diff --git a/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java b/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java index 062e520..c5ea780 100644 --- a/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java +++ b/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java @@ -1945,7 +1945,9 @@ public void setSparkConfigUpdated(boolean isSparkConfigUpdated) { " none: default(past) behavior. Implies only alphaNumeric and underscore are valid characters in identifiers.\n" + " column: implies column names can contain any character." ), - + HIVE_SUPPORT_SQL11_KEYWORDS("hive.support.sql11.keywords", true, + "This flag should be set to true to enable support for SQL 2011 keywords.\n" + + "The default value is true."), // role names are case-insensitive USERS_IN_ADMIN_ROLE("hive.users.in.admin.role", "", false, "Comma separated list of users who are in admin role for bootstrapping.\n" + diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/FromClauseParser.g b/ql/src/java/org/apache/hadoop/hive/ql/parse/FromClauseParser.g index b72ee5d..957c094 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/parse/FromClauseParser.g +++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/FromClauseParser.g @@ -35,6 +35,9 @@ k=3; RecognitionException e) { gParent.errors.add(new ParseError(gParent, e, tokenNames)); } + protected boolean useSQL11KeywordsForIdentifier() { + return gParent.useSQL11KeywordsForIdentifier(); + } } @rulecatch { @@ -177,7 +180,12 @@ tableSample tableSource @init { gParent.pushMsg("table source", state); } @after { gParent.popMsg(state); } - : tabname=tableName (props=tableProperties)? (ts=tableSample)? (KW_AS? alias=Identifier)? + : tabname=tableName + ((tableProperties) => props=tableProperties)? + ((tableSample) => ts=tableSample)? + ((KW_AS) => (KW_AS alias=Identifier) + | + (Identifier) => (alias=Identifier))? -> ^(TOK_TABREF $tabname $props? $ts? $alias?) ; @@ -232,11 +240,11 @@ partitionedTableFunction @init { gParent.pushMsg("ptf clause", state); } @after { gParent.popMsg(state); } : - name=Identifier - LPAREN KW_ON ptfsrc=partitionTableFunctionSource partitioningSpec? - ((Identifier LPAREN expression RPAREN ) => Identifier LPAREN expression RPAREN ( COMMA Identifier LPAREN expression RPAREN)*)? - RPAREN alias=Identifier? - -> ^(TOK_PTBLFUNCTION $name $alias? partitionTableFunctionSource partitioningSpec? expression*) + name=Identifier LPAREN KW_ON + ((partitionTableFunctionSource) => (ptfsrc=partitionTableFunctionSource spec=partitioningSpec?)) + ((Identifier LPAREN expression RPAREN ) => Identifier LPAREN expression RPAREN ( COMMA Identifier LPAREN expression RPAREN)*)? + ((RPAREN) => (RPAREN)) ((Identifier) => alias=Identifier)? + -> ^(TOK_PTBLFUNCTION $name $alias? $ptfsrc $spec? expression*) ; //----------------------- Rules for parsing whereClause ----------------------------- diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/HiveLexer.g b/ql/src/java/org/apache/hadoop/hive/ql/parse/HiveLexer.g index 20c73cd..1afa26f 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/parse/HiveLexer.g +++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/HiveLexer.g @@ -42,7 +42,6 @@ KW_TRUE : 'TRUE'; KW_FALSE : 'FALSE'; KW_ALL : 'ALL'; KW_NONE: 'NONE'; -KW_DEFAULT : 'DEFAULT'; KW_AND : 'AND'; KW_OR : 'OR'; KW_NOT : 'NOT' | '!'; diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/HiveParser.g b/ql/src/java/org/apache/hadoop/hive/ql/parse/HiveParser.g index 149b788..5330a00 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/parse/HiveParser.g +++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/HiveParser.g @@ -353,6 +353,8 @@ package org.apache.hadoop.hive.ql.parse; import java.util.Collection; import java.util.HashMap; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hive.conf.HiveConf; } @@ -369,7 +371,6 @@ import java.util.HashMap; xlateMap.put("KW_FALSE", "FALSE"); xlateMap.put("KW_ALL", "ALL"); xlateMap.put("KW_NONE", "NONE"); - xlateMap.put("KW_DEFAULT", "DEFAULT"); xlateMap.put("KW_AND", "AND"); xlateMap.put("KW_OR", "OR"); xlateMap.put("KW_NOT", "NOT"); @@ -619,6 +620,13 @@ import java.util.HashMap; private CommonTree throwSetOpException() throws RecognitionException { throw new FailedPredicateException(input, "orderByClause clusterByClause distributeByClause sortByClause limitClause can only be applied to the whole union.", ""); } + private Configuration hiveConf; + public void setHiveConf(Configuration hiveConf) { + this.hiveConf = hiveConf; + } + protected boolean useSQL11KeywordsForIdentifier() { + return !HiveConf.getBoolVar(hiveConf, HiveConf.ConfVars.HIVE_SUPPORT_SQL11_KEYWORDS); + } } @rulecatch { @@ -710,8 +718,8 @@ ddlStatement | unlockDatabase | createRoleStatement | dropRoleStatement - | grantPrivileges - | revokePrivileges + | (grantPrivileges) => grantPrivileges + | (revokePrivileges) => revokePrivileges | showGrants | showRoleGrants | showRolePrincipals @@ -954,7 +962,6 @@ alterTableStatementSuffix @init { pushMsg("alter table statement", state); } @after { popMsg(state); } : alterStatementSuffixRename[true] - | alterStatementSuffixUpdateStatsCol | alterStatementSuffixDropPartitions[true] | alterStatementSuffixAddPartitions[true] | alterStatementSuffixTouch @@ -1295,15 +1302,21 @@ fileFormat tabTypeExpr @init { pushMsg("specifying table types", state); } @after { popMsg(state); } - - : identifier (DOT^ (KW_ELEM_TYPE | KW_KEY_TYPE | KW_VALUE_TYPE | identifier))* + : identifier (DOT^ + ( + (KW_ELEM_TYPE) => KW_ELEM_TYPE + | + (KW_KEY_TYPE) => KW_KEY_TYPE + | + (KW_VALUE_TYPE) => KW_VALUE_TYPE + | identifier + ))* ; descTabTypeExpr @init { pushMsg("specifying describe table types", state); } @after { popMsg(state); } - - : identifier (DOT^ (KW_ELEM_TYPE | KW_KEY_TYPE | KW_VALUE_TYPE | identifier))* identifier? + : tabTypeExpr identifier? ; partTypeExpr @@ -1321,12 +1334,19 @@ descPartTypeExpr descStatement @init { pushMsg("describe statement", state); } @after { popMsg(state); } - : (KW_DESCRIBE|KW_DESC) (KW_DATABASE|KW_SCHEMA) KW_EXTENDED? (dbName=identifier) -> ^(TOK_DESCDATABASE $dbName KW_EXTENDED?) - | (KW_DESCRIBE|KW_DESC) (descOptions=KW_FORMATTED|descOptions=KW_EXTENDED|descOptions=KW_PRETTY)? (parttype=descPartTypeExpr) -> ^(TOK_DESCTABLE $parttype $descOptions?) - | (KW_DESCRIBE|KW_DESC) KW_FUNCTION KW_EXTENDED? (name=descFuncNames) -> ^(TOK_DESCFUNCTION $name KW_EXTENDED?) + : + (KW_DESCRIBE|KW_DESC) + ( + (KW_DATABASE|KW_SCHEMA) => (KW_DATABASE|KW_SCHEMA) KW_EXTENDED? (dbName=identifier) -> ^(TOK_DESCDATABASE $dbName KW_EXTENDED?) + | + (KW_FUNCTION) => KW_FUNCTION KW_EXTENDED? (name=descFuncNames) -> ^(TOK_DESCFUNCTION $name KW_EXTENDED?) + | + (KW_FORMATTED|KW_EXTENDED|KW_PRETTY) => ((descOptions=KW_FORMATTED|descOptions=KW_EXTENDED|descOptions=KW_PRETTY) parttype=descPartTypeExpr) -> ^(TOK_DESCTABLE $parttype $descOptions) + | + parttype=descPartTypeExpr -> ^(TOK_DESCTABLE $parttype) + ) ; - analyzeStatement @init { pushMsg("analyze statement", state); } @after { popMsg(state); } @@ -1348,8 +1368,12 @@ showStatement | KW_SHOW KW_TABLE KW_EXTENDED ((KW_FROM|KW_IN) db_name=identifier)? KW_LIKE showStmtIdentifier partitionSpec? -> ^(TOK_SHOW_TABLESTATUS showStmtIdentifier $db_name? partitionSpec?) | KW_SHOW KW_TBLPROPERTIES tableName (LPAREN prptyName=StringLiteral RPAREN)? -> ^(TOK_SHOW_TBLPROPERTIES tableName $prptyName?) - | KW_SHOW KW_LOCKS (parttype=partTypeExpr)? (isExtended=KW_EXTENDED)? -> ^(TOK_SHOWLOCKS $parttype? $isExtended?) - | KW_SHOW KW_LOCKS (KW_DATABASE|KW_SCHEMA) (dbName=Identifier) (isExtended=KW_EXTENDED)? -> ^(TOK_SHOWDBLOCKS $dbName $isExtended?) + | KW_SHOW KW_LOCKS + ( + (KW_DATABASE|KW_SCHEMA) => (KW_DATABASE|KW_SCHEMA) (dbName=Identifier) (isExtended=KW_EXTENDED)? -> ^(TOK_SHOWDBLOCKS $dbName $isExtended?) + | + (parttype=partTypeExpr)? (isExtended=KW_EXTENDED)? -> ^(TOK_SHOWLOCKS $parttype? $isExtended?) + ) | KW_SHOW (showOptions=KW_FORMATTED)? (KW_INDEX|KW_INDEXES) KW_ON showStmtIdentifier ((KW_FROM|KW_IN) db_name=identifier)? -> ^(TOK_SHOWINDEXES showStmtIdentifier $showOptions? $db_name?) | KW_SHOW KW_COMPACTIONS -> ^(TOK_SHOW_COMPACTIONS) @@ -1457,8 +1481,12 @@ showCurrentRole setRole @init {pushMsg("set role", state);} @after {popMsg(state);} - : KW_SET KW_ROLE roleName=identifier - -> ^(TOK_SHOW_SET_ROLE $roleName) + : KW_SET KW_ROLE + ( + KW_ALL -> ^(TOK_SHOW_SET_ROLE KW_ALL) + | + identifier -> ^(TOK_SHOW_SET_ROLE identifier) + ) ; showGrants @@ -1713,7 +1741,7 @@ tableSkewed @init { pushMsg("table skewed specification", state); } @after { popMsg(state); } : - KW_SKEWED KW_BY LPAREN skewedCols=columnNameList RPAREN KW_ON LPAREN (skewedValues=skewedValueElement) RPAREN (storedAsDirs)? + KW_SKEWED KW_BY LPAREN skewedCols=columnNameList RPAREN KW_ON LPAREN (skewedValues=skewedValueElement) RPAREN ((storedAsDirs) => storedAsDirs)? -> ^(TOK_TABLESKEWED $skewedCols $skewedValues storedAsDirs?) ; @@ -1844,7 +1872,7 @@ tableFileFormat @init { pushMsg("table file format specification", state); } @after { popMsg(state); } : - KW_STORED KW_AS KW_INPUTFORMAT inFmt=StringLiteral KW_OUTPUTFORMAT outFmt=StringLiteral (KW_INPUTDRIVER inDriver=StringLiteral KW_OUTPUTDRIVER outDriver=StringLiteral)? + (KW_STORED KW_AS KW_INPUTFORMAT) => KW_STORED KW_AS KW_INPUTFORMAT inFmt=StringLiteral KW_OUTPUTFORMAT outFmt=StringLiteral (KW_INPUTDRIVER inDriver=StringLiteral KW_OUTPUTDRIVER outDriver=StringLiteral)? -> ^(TOK_TABLEFILEFORMAT $inFmt $outFmt $inDriver? $outDriver?) | KW_STORED KW_BY storageHandler=StringLiteral (KW_WITH KW_SERDEPROPERTIES serdeprops=tableProperties)? @@ -2224,7 +2252,7 @@ simpleSelectStatement whereClause? groupByClause? havingClause? - window_clause? + ((window_clause) => window_clause)? -> ^(TOK_QUERY fromClause? ^(TOK_INSERT ^(TOK_DESTINATION ^(TOK_DIR TOK_TMP_FILE)) selectClause whereClause? groupByClause? havingClause? window_clause?)) ; diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/IdentifiersParser.g b/ql/src/java/org/apache/hadoop/hive/ql/parse/IdentifiersParser.g index d37f49f..b1229f7 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/parse/IdentifiersParser.g +++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/IdentifiersParser.g @@ -35,6 +35,9 @@ k=3; RecognitionException e) { gParent.errors.add(new ParseError(gParent, e, tokenNames)); } + protected boolean useSQL11KeywordsForIdentifier() { + return gParent.useSQL11KeywordsForIdentifier(); + } } @rulecatch { @@ -51,40 +54,41 @@ groupByClause @after { gParent.popMsg(state); } : KW_GROUP KW_BY - groupByExpression - ( COMMA groupByExpression )* + expression + ( COMMA expression)* ((rollup=KW_WITH KW_ROLLUP) | (cube=KW_WITH KW_CUBE)) ? (sets=KW_GROUPING KW_SETS LPAREN groupingSetExpression ( COMMA groupingSetExpression)* RPAREN ) ? - -> {rollup != null}? ^(TOK_ROLLUP_GROUPBY groupByExpression+) - -> {cube != null}? ^(TOK_CUBE_GROUPBY groupByExpression+) - -> {sets != null}? ^(TOK_GROUPING_SETS groupByExpression+ groupingSetExpression+) - -> ^(TOK_GROUPBY groupByExpression+) + -> {rollup != null}? ^(TOK_ROLLUP_GROUPBY expression+) + -> {cube != null}? ^(TOK_CUBE_GROUPBY expression+) + -> {sets != null}? ^(TOK_GROUPING_SETS expression+ groupingSetExpression+) + -> ^(TOK_GROUPBY expression+) ; groupingSetExpression @init {gParent.pushMsg("grouping set expression", state); } @after {gParent.popMsg(state); } : - groupByExpression - -> ^(TOK_GROUPING_SETS_EXPRESSION groupByExpression) + (LPAREN) => groupingSetExpressionMultiple | + groupingExpressionSingle + ; + +groupingSetExpressionMultiple +@init {gParent.pushMsg("grouping set part expression", state); } +@after {gParent.popMsg(state); } + : LPAREN - groupByExpression (COMMA groupByExpression)* - RPAREN - -> ^(TOK_GROUPING_SETS_EXPRESSION groupByExpression+) - | - LPAREN + expression? (COMMA expression)* RPAREN - -> ^(TOK_GROUPING_SETS_EXPRESSION) + -> ^(TOK_GROUPING_SETS_EXPRESSION expression*) ; - -groupByExpression -@init { gParent.pushMsg("group by expression", state); } +groupingExpressionSingle +@init { gParent.pushMsg("groupingExpression expression", state); } @after { gParent.popMsg(state); } : - expression + expression -> ^(TOK_GROUPING_SETS_EXPRESSION expression) ; havingClause @@ -101,6 +105,26 @@ havingCondition expression ; +expressionsInParenthese + : + LPAREN expression (COMMA expression)* RPAREN -> expression+ + ; + +expressionsNotInParenthese + : + expression (COMMA expression)* -> expression+ + ; + +columnRefOrderInParenthese + : + LPAREN columnRefOrder (COMMA columnRefOrder)* RPAREN -> columnRefOrder+ + ; + +columnRefOrderNotInParenthese + : + columnRefOrder (COMMA columnRefOrder)* -> columnRefOrder+ + ; + // order by a,b orderByClause @init { gParent.pushMsg("order by clause", state); } @@ -108,17 +132,17 @@ orderByClause : KW_ORDER KW_BY columnRefOrder ( COMMA columnRefOrder)* -> ^(TOK_ORDERBY columnRefOrder+) ; - + clusterByClause @init { gParent.pushMsg("cluster by clause", state); } @after { gParent.popMsg(state); } : KW_CLUSTER KW_BY - LPAREN expression (COMMA expression)* RPAREN -> ^(TOK_CLUSTERBY expression+) + ( + (LPAREN) => expressionsInParenthese -> ^(TOK_CLUSTERBY expressionsInParenthese) | - KW_CLUSTER KW_BY - expression - ( (COMMA)=>COMMA expression )* -> ^(TOK_CLUSTERBY expression+) + expressionsNotInParenthese -> ^(TOK_CLUSTERBY expressionsNotInParenthese) + ) ; partitionByClause @@ -126,10 +150,11 @@ partitionByClause @after { gParent.popMsg(state); } : KW_PARTITION KW_BY - LPAREN expression (COMMA expression)* RPAREN -> ^(TOK_DISTRIBUTEBY expression+) + ( + (LPAREN) => expressionsInParenthese -> ^(TOK_DISTRIBUTEBY expressionsInParenthese) | - KW_PARTITION KW_BY - expression ((COMMA)=> COMMA expression)* -> ^(TOK_DISTRIBUTEBY expression+) + expressionsNotInParenthese -> ^(TOK_DISTRIBUTEBY expressionsNotInParenthese) + ) ; distributeByClause @@ -137,10 +162,11 @@ distributeByClause @after { gParent.popMsg(state); } : KW_DISTRIBUTE KW_BY - LPAREN expression (COMMA expression)* RPAREN -> ^(TOK_DISTRIBUTEBY expression+) + ( + (LPAREN) => expressionsInParenthese -> ^(TOK_DISTRIBUTEBY expressionsInParenthese) | - KW_DISTRIBUTE KW_BY - expression ((COMMA)=> COMMA expression)* -> ^(TOK_DISTRIBUTEBY expression+) + expressionsNotInParenthese -> ^(TOK_DISTRIBUTEBY expressionsNotInParenthese) + ) ; sortByClause @@ -148,12 +174,11 @@ sortByClause @after { gParent.popMsg(state); } : KW_SORT KW_BY - LPAREN columnRefOrder - ( COMMA columnRefOrder)* RPAREN -> ^(TOK_SORTBY columnRefOrder+) + ( + (LPAREN) => columnRefOrderInParenthese -> ^(TOK_SORTBY columnRefOrderInParenthese) | - KW_SORT KW_BY - columnRefOrder - ( (COMMA)=> COMMA columnRefOrder)* -> ^(TOK_SORTBY columnRefOrder+) + columnRefOrderNotInParenthese -> ^(TOK_SORTBY columnRefOrderNotInParenthese) + ) ; // fun(par1, par2, par3) @@ -164,7 +189,7 @@ function functionName LPAREN ( - (star=STAR) + (STAR) => (star=STAR) | (dist=KW_DISTINCT)? (selectExpression (COMMA selectExpression)*)? ) RPAREN (KW_OVER ws=window_specification)? @@ -192,7 +217,9 @@ functionName @init { gParent.pushMsg("function name", state); } @after { gParent.popMsg(state); } : // Keyword IF is also a function name - KW_IF | KW_ARRAY | KW_MAP | KW_STRUCT | KW_UNIONTYPE | functionIdentifier + (KW_IF | KW_ARRAY | KW_MAP | KW_STRUCT | KW_UNIONTYPE) => (KW_IF | KW_ARRAY | KW_MAP | KW_STRUCT | KW_UNIONTYPE) + | + functionIdentifier | // This allows current_timestamp() to work as well as current_timestamp nonParenthesizedFunctionName @@ -403,7 +430,7 @@ precedenceEqualExpression ( (KW_NOT precedenceEqualNegatableOperator notExpr=precedenceBitwiseOrExpression) -> ^(KW_NOT ^(precedenceEqualNegatableOperator $precedenceEqualExpression $notExpr)) - | (precedenceEqualOperator equalExpr=precedenceBitwiseOrExpression) + | (precedenceEqualOperator) => (precedenceEqualOperator equalExpr=precedenceBitwiseOrExpression) -> ^(precedenceEqualOperator $precedenceEqualExpression $equalExpr) | (KW_NOT KW_IN LPAREN KW_SELECT)=> (KW_NOT KW_IN subQueryExpression) -> ^(KW_NOT ^(TOK_SUBQUERY_EXPR ^(TOK_SUBQUERY_OP KW_IN) subQueryExpression $precedenceEqualExpression)) @@ -543,7 +570,7 @@ sysFuncNames descFuncNames : - sysFuncNames + (sysFuncNames) => sysFuncNames | StringLiteral | functionIdentifier ; @@ -552,6 +579,9 @@ identifier : Identifier | nonReserved -> Identifier[$nonReserved.text] + // If it decides to support SQL11 keywords, i.e., useSQL11KeywordsForIdentifier()=false, + // the sql11keywords in existing q tests should NOT be added back. + | {useSQL11KeywordsForIdentifier()}? sql11keywords -> Identifier[$sql11keywords.text] ; functionIdentifier @@ -572,5 +602,37 @@ principalIdentifier nonReserved : - KW_TRUE | KW_FALSE | KW_LIKE | KW_EXISTS | KW_ASC | KW_DESC | KW_ORDER | KW_GROUP | KW_BY | KW_AS | KW_INSERT | KW_OVERWRITE | KW_OUTER | KW_LEFT | KW_RIGHT | KW_FULL | KW_PARTITION | KW_PARTITIONS | KW_TABLE | KW_TABLES | KW_COLUMNS | KW_INDEX | KW_INDEXES | KW_REBUILD | KW_FUNCTIONS | KW_SHOW | KW_MSCK | KW_REPAIR | KW_DIRECTORY | KW_LOCAL | KW_USING | KW_CLUSTER | KW_DISTRIBUTE | KW_SORT | KW_LOAD | KW_EXPORT | KW_IMPORT | KW_DATA | KW_INPATH | KW_IS | KW_NULL | KW_CREATE | KW_EXTERNAL | KW_ALTER | KW_CHANGE | KW_FIRST | KW_AFTER | KW_DESCRIBE | KW_DROP | KW_RENAME | KW_IGNORE | KW_PROTECTION | KW_TO | KW_COMMENT | KW_BOOLEAN | KW_TINYINT | KW_SMALLINT | KW_INT | KW_BIGINT | KW_FLOAT | KW_DOUBLE | KW_DATE | KW_DATETIME | KW_TIMESTAMP | KW_DECIMAL | KW_STRING | KW_ARRAY | KW_STRUCT | KW_UNIONTYPE | KW_PARTITIONED | KW_CLUSTERED | KW_SORTED | KW_INTO | KW_BUCKETS | KW_ROW | KW_ROWS | KW_FORMAT | KW_DELIMITED | KW_FIELDS | KW_TERMINATED | KW_ESCAPED | KW_COLLECTION | KW_ITEMS | KW_KEYS | KW_KEY_TYPE | KW_LINES | KW_STORED | KW_FILEFORMAT | KW_INPUTFORMAT | KW_OUTPUTFORMAT | KW_INPUTDRIVER | KW_OUTPUTDRIVER | KW_OFFLINE | KW_ENABLE | KW_DISABLE | KW_READONLY | KW_NO_DROP | KW_LOCATION | KW_BUCKET | KW_OUT | KW_OF | KW_PERCENT | KW_ADD | KW_REPLACE | KW_RLIKE | KW_REGEXP | KW_TEMPORARY | KW_EXPLAIN | KW_FORMATTED | KW_PRETTY | KW_DEPENDENCY | KW_LOGICAL | KW_SERDE | KW_WITH | KW_DEFERRED | KW_SERDEPROPERTIES | KW_DBPROPERTIES | KW_LIMIT | KW_SET | KW_UNSET | KW_TBLPROPERTIES | KW_IDXPROPERTIES | KW_VALUE_TYPE | KW_ELEM_TYPE | KW_MAPJOIN | KW_STREAMTABLE | KW_HOLD_DDLTIME | KW_CLUSTERSTATUS | KW_UTC | KW_UTCTIMESTAMP | KW_LONG | KW_DELETE | KW_PLUS | KW_MINUS | KW_FETCH | KW_INTERSECT | KW_VIEW | KW_IN | KW_DATABASES | KW_MATERIALIZED | KW_SCHEMA | KW_SCHEMAS | KW_GRANT | KW_REVOKE | KW_SSL | KW_UNDO | KW_LOCK | KW_LOCKS | KW_UNLOCK | KW_SHARED | KW_EXCLUSIVE | KW_PROCEDURE | KW_UNSIGNED | KW_WHILE | KW_READ | KW_READS | KW_PURGE | KW_RANGE | KW_ANALYZE | KW_BEFORE | KW_BETWEEN | KW_BOTH | KW_BINARY | KW_CONTINUE | KW_CURSOR | KW_TRIGGER | KW_RECORDREADER | KW_RECORDWRITER | KW_SEMI | KW_LATERAL | KW_TOUCH | KW_ARCHIVE | KW_UNARCHIVE | KW_COMPUTE | KW_STATISTICS | KW_USE | KW_OPTION | KW_CONCATENATE | KW_SHOW_DATABASE | KW_UPDATE | KW_RESTRICT | KW_CASCADE | KW_SKEWED | KW_ROLLUP | KW_CUBE | KW_DIRECTORIES | KW_FOR | KW_GROUPING | KW_SETS | KW_TRUNCATE | KW_NOSCAN | KW_USER | KW_ROLE | KW_ROLES | KW_INNER | KW_DEFINED | KW_ADMIN | KW_JAR | KW_FILE | KW_OWNER | KW_PRINCIPALS | KW_ALL | KW_DEFAULT | KW_NONE | KW_COMPACT | KW_COMPACTIONS | KW_TRANSACTIONS | KW_REWRITE | KW_AUTHORIZATION | KW_VALUES | KW_URI | KW_SERVER + KW_ADD | KW_ADMIN | KW_AFTER | KW_ANALYZE | KW_ARCHIVE | KW_ASC | KW_BEFORE | KW_BUCKET | KW_BUCKETS + | KW_CASCADE | KW_CHANGE | KW_CLUSTER | KW_CLUSTERED | KW_CLUSTERSTATUS | KW_COLLECTION | KW_COLUMNS + | KW_COMMENT | KW_COMPACT | KW_COMPACTIONS | KW_COMPUTE | KW_CONCATENATE | KW_CONTINUE | KW_DATA + | KW_DATABASES | KW_DATETIME | KW_DBPROPERTIES | KW_DEFERRED | KW_DEFINED | KW_DELIMITED | KW_DEPENDENCY + | KW_DESC | KW_DIRECTORIES | KW_DIRECTORY | KW_DISABLE | KW_DISTRIBUTE | KW_ELEM_TYPE | KW_ENABLE + | KW_ESCAPED | KW_EXCLUSIVE | KW_EXPLAIN | KW_EXPORT | KW_FIELDS | KW_FILE | KW_FILEFORMAT + | KW_FIRST | KW_FORMAT | KW_FORMATTED | KW_FUNCTIONS | KW_HOLD_DDLTIME | KW_IDXPROPERTIES | KW_IGNORE + | KW_INDEX | KW_INDEXES | KW_INNER | KW_INPATH | KW_INPUTDRIVER | KW_INPUTFORMAT | KW_ITEMS | KW_JAR + | KW_KEYS | KW_KEY_TYPE | KW_LINES | KW_LOAD | KW_LOCATION | KW_LOCK | KW_LOCKS | KW_LOGICAL | KW_LONG + | KW_MAPJOIN | KW_MATERIALIZED | KW_MINUS | KW_MSCK | KW_NOSCAN | KW_NO_DROP | KW_OFFLINE | KW_OPTION + | KW_OUTPUTDRIVER | KW_OUTPUTFORMAT | KW_OVERWRITE | KW_OWNER | KW_PLUS | KW_PRETTY | KW_PRINCIPALS + | KW_PROTECTION | KW_PURGE | KW_READ | KW_READONLY | KW_REBUILD | KW_RECORDREADER | KW_RECORDWRITER + | KW_REGEXP | KW_RENAME | KW_REPAIR | KW_REPLACE | KW_RESTRICT | KW_REWRITE | KW_RLIKE | KW_ROLE | KW_ROLES + | KW_SCHEMA | KW_SCHEMAS | KW_SEMI | KW_SERDE | KW_SERDEPROPERTIES | KW_SERVER | KW_SETS | KW_SHARED + | KW_SHOW | KW_SHOW_DATABASE | KW_SKEWED | KW_SORT | KW_SORTED | KW_SSL | KW_STATISTICS | KW_STORED + | KW_STREAMTABLE | KW_STRING | KW_STRUCT | KW_TABLES | KW_TBLPROPERTIES | KW_TEMPORARY | KW_TERMINATED + | KW_TINYINT | KW_TOUCH | KW_TRANSACTIONS | KW_UNARCHIVE | KW_UNDO | KW_UNIONTYPE | KW_UNLOCK | KW_UNSET + | KW_UNSIGNED | KW_URI | KW_USE | KW_UTC | KW_UTCTIMESTAMP | KW_VALUE_TYPE | KW_VIEW | KW_WHILE + ; + +//The following SQL2011 keywords are used as identifiers in q tests, they may be added back due to backward compatibility. +sql11keywords + : + //array_map_access_nonconstant.q + KW_ARRAY + //keyword_1.q,serde_regex.q,ppd_field_garbage.q + | KW_USER + //date_1.q,date_udf.q,varchar_cast.q,vectorized_casts.q,char_cast.q + | KW_TIMESTAMP + //date_1.q,vectorized_dynamic_partition_pruning.q,dynamic_partition_pruning.q,tez_union_group_by.q,vectorized_date_funcs.q + | KW_DATE + //orc_vectorization_ppd.q,windowing_navfn.q + | KW_INT ; diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/ParseDriver.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/ParseDriver.java index a24cad9..debd5ac 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/parse/ParseDriver.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/ParseDriver.java @@ -193,6 +193,9 @@ public ASTNode parse(String command, Context ctx, boolean setTokenRewriteStream) lexer.setHiveConf(ctx.getConf()); } HiveParser parser = new HiveParser(tokens); + if (ctx != null) { + parser.setHiveConf(ctx.getConf()); + } parser.setTreeAdaptor(adaptor); HiveParser.statement_return r = null; try { diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/SelectClauseParser.g b/ql/src/java/org/apache/hadoop/hive/ql/parse/SelectClauseParser.g index eba3689..f2d8e1b 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/parse/SelectClauseParser.g +++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/SelectClauseParser.g @@ -35,6 +35,9 @@ k=3; RecognitionException e) { gParent.errors.add(new ParseError(gParent, e, tokenNames)); } + protected boolean useSQL11KeywordsForIdentifier() { + return gParent.useSQL11KeywordsForIdentifier(); + } } @rulecatch { @@ -125,10 +128,11 @@ selectItem @init { gParent.pushMsg("selection target", state); } @after { gParent.popMsg(state); } : + (tableAllColumns) => tableAllColumns -> ^(TOK_SELEXPR tableAllColumns) + | ( expression ((KW_AS? identifier) | (KW_AS LPAREN identifier (COMMA identifier)* RPAREN))? ) -> ^(TOK_SELEXPR expression identifier*) - | tableAllColumns -> ^(TOK_SELEXPR tableAllColumns) ; trfmClause @@ -148,7 +152,9 @@ selectExpression @init { gParent.pushMsg("select expression", state); } @after { gParent.popMsg(state); } : - expression | tableAllColumns + (tableAllColumns) => tableAllColumns + | + expression ; selectExpressionList diff --git a/ql/src/test/queries/clientnegative/serde_regex.q b/ql/src/test/queries/clientnegative/serde_regex.q index 13b3f16..d266861 100644 --- a/ql/src/test/queries/clientnegative/serde_regex.q +++ b/ql/src/test/queries/clientnegative/serde_regex.q @@ -3,7 +3,7 @@ USE default; CREATE TABLE serde_regex( host STRING, identity STRING, - user STRING, + users STRING, time TIMESTAMP, request STRING, status INT, diff --git a/ql/src/test/queries/clientnegative/serde_regex2.q b/ql/src/test/queries/clientnegative/serde_regex2.q index d523d03..0d7ee26 100644 --- a/ql/src/test/queries/clientnegative/serde_regex2.q +++ b/ql/src/test/queries/clientnegative/serde_regex2.q @@ -3,7 +3,7 @@ USE default; CREATE TABLE serde_regex( host STRING, identity STRING, - user STRING, + users STRING, time STRING, request STRING, status STRING, diff --git a/ql/src/test/queries/clientnegative/serde_regex3.q b/ql/src/test/queries/clientnegative/serde_regex3.q index 5a0295c..fdea90d 100644 --- a/ql/src/test/queries/clientnegative/serde_regex3.q +++ b/ql/src/test/queries/clientnegative/serde_regex3.q @@ -3,7 +3,7 @@ USE default; CREATE TABLE serde_regex( host STRING, identity STRING, - user STRING, + users STRING, time STRING, request STRING, status STRING, diff --git a/ql/src/test/queries/clientpositive/ambiguitycheck.q b/ql/src/test/queries/clientpositive/ambiguitycheck.q new file mode 100644 index 0000000..1cf490e --- /dev/null +++ b/ql/src/test/queries/clientpositive/ambiguitycheck.q @@ -0,0 +1,64 @@ +set hive.cbo.enable=false; +set hive.support.sql11.keywords=false; + +-- check cluster/distribute/partitionBy +SELECT * FROM SRC x where x.key = 20 CLUSTER BY (key,value) ; + +SELECT * FROM SRC x where x.key = 20 CLUSTER BY ((key),value) ; + +SELECT * FROM SRC x where x.key = 20 CLUSTER BY (key,(value)) ; + +SELECT * FROM SRC x where x.key = 20 CLUSTER BY ((key),(value)) ; + +SELECT * FROM SRC x where x.key = 20 CLUSTER BY ((key),(((value)))); + +-- HIVE-6950 +SELECT tab1.key, + tab1.value, + SUM(1) +FROM src as tab1 +GROUP BY tab1.key, + tab1.value +GROUPING SETS ((tab1.key, tab1.value)); + +SELECT key, + src.value, + SUM(1) +FROM src +GROUP BY key, + src.value +GROUPING SETS ((key, src.value)); + +explain extended select int(1) from src limit 1; + +select int(1) from src limit 1; + +drop table if exists date_udf_flight; + +CREATE TABLE date_udf_flight ( + origin_city_name STRING, + dest_city_name STRING, + fl_date DATE, + arr_delay FLOAT, + fl_num INT +); +LOAD DATA LOCAL INPATH '../../data/files/flights_tiny.txt.1' OVERWRITE INTO TABLE date_udf_flight; + +drop table if exists date_udf_flight_orc; + +CREATE TABLE date_udf_flight_orc ( + fl_date DATE, + fl_time TIMESTAMP +) STORED AS ORC; + +INSERT INTO TABLE date_udf_flight_orc SELECT fl_date, to_utc_timestamp(fl_date, 'America/Los_Angeles') FROM date_udf_flight; + +SELECT * FROM date_udf_flight_orc; + +EXPLAIN extended SELECT + year(fl_time), + month(fl_time), + day(fl_time), + date(fl_time) + datediff(fl_time, "2000-01-01") +FROM date_udf_flight_orc; \ No newline at end of file diff --git a/ql/src/test/queries/clientpositive/array_map_access_nonconstant.q b/ql/src/test/queries/clientpositive/array_map_access_nonconstant.q index 49c1f54..423afb7 100644 --- a/ql/src/test/queries/clientpositive/array_map_access_nonconstant.q +++ b/ql/src/test/queries/clientpositive/array_map_access_nonconstant.q @@ -1,4 +1,6 @@ set hive.fetch.task.conversion=more; +set hive.support.sql11.keywords=false; +-- We need the above setting for backward compatibility because 'array' is a keyword in SQL2011 create table array_table (array array, index int ); insert into table array_table select array('first', 'second', 'third'), key%3 from src tablesample (4 rows); diff --git a/ql/src/test/queries/clientpositive/char_cast.q b/ql/src/test/queries/clientpositive/char_cast.q index 7f44d4d..688bbbe 100644 --- a/ql/src/test/queries/clientpositive/char_cast.q +++ b/ql/src/test/queries/clientpositive/char_cast.q @@ -1,3 +1,5 @@ +set hive.support.sql11.keywords=false; +-- We need the above setting for backward compatibility because 'date' is a keyword in SQL2011 -- Cast from char to other data types select diff --git a/ql/src/test/queries/clientpositive/date_1.q b/ql/src/test/queries/clientpositive/date_1.q index 7d89ac9..f22303c 100644 --- a/ql/src/test/queries/clientpositive/date_1.q +++ b/ql/src/test/queries/clientpositive/date_1.q @@ -1,4 +1,6 @@ set hive.fetch.task.conversion=more; +set hive.support.sql11.keywords=false; +-- We need the above setting for backward compatibility because 'timestamp' is a keyword in SQL2011 drop table date_1; diff --git a/ql/src/test/queries/clientpositive/date_udf.q b/ql/src/test/queries/clientpositive/date_udf.q index c55b9f9..ddf37fe 100644 --- a/ql/src/test/queries/clientpositive/date_udf.q +++ b/ql/src/test/queries/clientpositive/date_udf.q @@ -1,3 +1,6 @@ +set hive.support.sql11.keywords=false; +-- We need the above setting for backward compatibility because 'timestamp' is a keyword in SQL2011 + drop table date_udf; drop table date_udf_string; drop table date_udf_flight; diff --git a/ql/src/test/queries/clientpositive/decimal_10_0.q b/ql/src/test/queries/clientpositive/decimal_10_0.q index 02b547c..5bf15ca 100644 --- a/ql/src/test/queries/clientpositive/decimal_10_0.q +++ b/ql/src/test/queries/clientpositive/decimal_10_0.q @@ -1,9 +1,9 @@ -DROP TABLE IF EXISTS DECIMAL; +DROP TABLE IF EXISTS DECIMAL_TABLE; -CREATE TABLE DECIMAL (dec decimal); +CREATE TABLE DECIMAL_TABLE (dec decimal); -LOAD DATA LOCAL INPATH '../../data/files/decimal_10_0.txt' OVERWRITE INTO TABLE DECIMAL; +LOAD DATA LOCAL INPATH '../../data/files/decimal_10_0.txt' OVERWRITE INTO TABLE DECIMAL_TABLE; -SELECT dec FROM DECIMAL; +SELECT dec FROM DECIMAL_TABLE; -DROP TABLE DECIMAL; \ No newline at end of file +DROP TABLE DECIMAL_TABLE; \ No newline at end of file diff --git a/ql/src/test/queries/clientpositive/dynamic_partition_pruning.q b/ql/src/test/queries/clientpositive/dynamic_partition_pruning.q index f12b2c5..52b97b6 100644 --- a/ql/src/test/queries/clientpositive/dynamic_partition_pruning.q +++ b/ql/src/test/queries/clientpositive/dynamic_partition_pruning.q @@ -3,7 +3,8 @@ set hive.ppd.remove.duplicatefilters=true; set hive.tez.dynamic.partition.pruning=true; set hive.optimize.metadataonly=false; set hive.optimize.index.filter=true; - +set hive.support.sql11.keywords=false; +-- We need the above setting for backward compatibility because 'date' is a keyword in SQL2011 select distinct ds from srcpart; select distinct hr from srcpart; diff --git a/ql/src/test/queries/clientpositive/keyword_1.q b/ql/src/test/queries/clientpositive/keyword_1.q index 2e996af..c96529a 100644 --- a/ql/src/test/queries/clientpositive/keyword_1.q +++ b/ql/src/test/queries/clientpositive/keyword_1.q @@ -1,4 +1,7 @@ +set hive.support.sql11.keywords=false; + -- SORT_BEFORE_DIFF +-- We need the above setting for backward compatibility because 'user' is a keyword in SQL2011 create table test_user (user string, `group` string); grant select on table test_user to user hive_test; diff --git a/ql/src/test/queries/clientpositive/nonreserved_keywords_input37.q b/ql/src/test/queries/clientpositive/nonreserved_keywords_input37.q deleted file mode 100644 index e33b4bf..0000000 --- a/ql/src/test/queries/clientpositive/nonreserved_keywords_input37.q +++ /dev/null @@ -1,12 +0,0 @@ -CREATE TABLE table(string string) STORED AS TEXTFILE; - -LOAD DATA LOCAL INPATH '../../data/files/docurl.txt' INTO TABLE table; - -SELECT table, count(1) -FROM -( - FROM table - SELECT TRANSFORM (table.string) - USING 'java -cp ../util/target/classes/ org.apache.hadoop.hive.scripts.extracturl' AS (table, count) -) subq -GROUP BY table; diff --git a/ql/src/test/queries/clientpositive/nonreserved_keywords_insert_into1.q b/ql/src/test/queries/clientpositive/nonreserved_keywords_insert_into1.q deleted file mode 100644 index 144cfee..0000000 --- a/ql/src/test/queries/clientpositive/nonreserved_keywords_insert_into1.q +++ /dev/null @@ -1,26 +0,0 @@ -DROP TABLE insert; - -CREATE TABLE insert (key INT, as STRING); - -EXPLAIN INSERT INTO TABLE insert SELECT * FROM src LIMIT 100; -INSERT INTO TABLE insert SELECT * FROM src LIMIT 100; -SELECT SUM(HASH(hash)) FROM ( - SELECT TRANSFORM(*) USING 'tr \t _' AS (hash) FROM insert -) t; - -EXPLAIN INSERT INTO TABLE insert SELECT * FROM src LIMIT 100; -INSERT INTO TABLE insert SELECT * FROM src LIMIT 100; -SELECT SUM(HASH(sum)) FROM ( - SELECT TRANSFORM(*) USING 'tr \t _' AS (sum) FROM insert -) t; - -SELECT COUNT(*) FROM insert; - -EXPLAIN INSERT OVERWRITE TABLE insert SELECT * FROM src LIMIT 10; -INSERT OVERWRITE TABLE insert SELECT * FROM src LIMIT 10; -SELECT SUM(HASH(add)) FROM ( - SELECT TRANSFORM(*) USING 'tr \t _' AS (add) FROM insert -) t; - - -DROP TABLE insert; diff --git a/ql/src/test/queries/clientpositive/orc_vectorization_ppd.q b/ql/src/test/queries/clientpositive/orc_vectorization_ppd.q index 9bdad86..ba0720a 100644 --- a/ql/src/test/queries/clientpositive/orc_vectorization_ppd.q +++ b/ql/src/test/queries/clientpositive/orc_vectorization_ppd.q @@ -1,3 +1,6 @@ +set hive.support.sql11.keywords=false; +-- We need the above setting for backward compatibility because 'int' is a keyword in SQL2011 + -- create table with 1000 rows create table srcorc(key string, value string) stored as textfile; insert overwrite table srcorc select * from src; diff --git a/ql/src/test/queries/clientpositive/ppd_field_garbage.q b/ql/src/test/queries/clientpositive/ppd_field_garbage.q index 23e0778..173d2c7 100644 --- a/ql/src/test/queries/clientpositive/ppd_field_garbage.q +++ b/ql/src/test/queries/clientpositive/ppd_field_garbage.q @@ -1,3 +1,5 @@ +set hive.support.sql11.keywords=false; +-- We need the above setting for backward compatibility because 'user' is a keyword in SQL2011 -- ppd leaves invalid expr in field expr CREATE TABLE test_issue (fileid int, infos ARRAY>, test_c STRUCT>); CREATE VIEW v_test_issue AS SELECT fileid, i.user, test_c.user_c.age FROM test_issue LATERAL VIEW explode(infos) info AS i; diff --git a/ql/src/test/queries/clientpositive/serde_regex.q b/ql/src/test/queries/clientpositive/serde_regex.q index accdb54..5eee853 100644 --- a/ql/src/test/queries/clientpositive/serde_regex.q +++ b/ql/src/test/queries/clientpositive/serde_regex.q @@ -1,3 +1,6 @@ +set hive.support.sql11.keywords=false; +-- We need the above setting for backward compatibility because 'user' is a keyword in SQL2011 + EXPLAIN CREATE TABLE serde_regex( host STRING, diff --git a/ql/src/test/queries/clientpositive/tez_union_group_by.q b/ql/src/test/queries/clientpositive/tez_union_group_by.q index 56e8583..d629af2 100644 --- a/ql/src/test/queries/clientpositive/tez_union_group_by.q +++ b/ql/src/test/queries/clientpositive/tez_union_group_by.q @@ -1,3 +1,6 @@ +set hive.support.sql11.keywords=false; +-- We need the above setting for backward compatibility because 'date' is a keyword in SQL2011 + CREATE TABLE x ( u bigint, diff --git a/ql/src/test/queries/clientpositive/union_remove_1.q b/ql/src/test/queries/clientpositive/union_remove_1.q index 0db1743..bbadd74 100644 --- a/ql/src/test/queries/clientpositive/union_remove_1.q +++ b/ql/src/test/queries/clientpositive/union_remove_1.q @@ -18,7 +18,7 @@ set mapred.input.dir.recursive=true; -- to run the test only on hadoop 23 create table inputTbl1(key string, val string) stored as textfile; -create table outputTbl1(key string, values bigint) stored as textfile; +create table outputTbl1(key string, vals bigint) stored as textfile; load data local inpath '../../data/files/T1.txt' into table inputTbl1; @@ -26,20 +26,20 @@ explain insert overwrite table outputTbl1 SELECT * FROM ( - SELECT key, count(1) as values from inputTbl1 group by key + SELECT key, count(1) as vals from inputTbl1 group by key UNION ALL - SELECT key, count(1) as values from inputTbl1 group by key + SELECT key, count(1) as vals from inputTbl1 group by key ) a; insert overwrite table outputTbl1 SELECT * FROM ( - SELECT key, count(1) as values from inputTbl1 group by key + SELECT key, count(1) as vals from inputTbl1 group by key UNION ALL - SELECT key, count(1) as values from inputTbl1 group by key + SELECT key, count(1) as vals from inputTbl1 group by key ) a; desc formatted outputTbl1; set hive.input.format=org.apache.hadoop.hive.ql.io.HiveInputFormat; -select * from outputTbl1 order by key, values; +select * from outputTbl1 order by key, vals; diff --git a/ql/src/test/queries/clientpositive/union_remove_10.q b/ql/src/test/queries/clientpositive/union_remove_10.q index 1e9c201..de3a06e 100644 --- a/ql/src/test/queries/clientpositive/union_remove_10.q +++ b/ql/src/test/queries/clientpositive/union_remove_10.q @@ -23,7 +23,7 @@ set mapred.input.dir.recursive=true; -- to run the test only on hadoop 23 create table inputTbl1(key string, val string) stored as textfile; -create table outputTbl1(key string, values bigint) stored as rcfile; +create table outputTbl1(key string, vals bigint) stored as rcfile; load data local inpath '../../data/files/T1.txt' into table inputTbl1; @@ -31,28 +31,28 @@ explain insert overwrite table outputTbl1 SELECT * FROM ( -select key, 1 as values from inputTbl1 +select key, 1 as vals from inputTbl1 union all select * FROM ( - SELECT key, count(1) as values from inputTbl1 group by key + SELECT key, count(1) as vals from inputTbl1 group by key UNION ALL - SELECT key, 2 as values from inputTbl1 + SELECT key, 2 as vals from inputTbl1 ) a )b; insert overwrite table outputTbl1 SELECT * FROM ( -select key, 1 as values from inputTbl1 +select key, 1 as vals from inputTbl1 union all select * FROM ( - SELECT key, count(1) as values from inputTbl1 group by key + SELECT key, count(1) as vals from inputTbl1 group by key UNION ALL - SELECT key, 2 as values from inputTbl1 + SELECT key, 2 as vals from inputTbl1 ) a )b; desc formatted outputTbl1; set hive.input.format=org.apache.hadoop.hive.ql.io.HiveInputFormat; -select * from outputTbl1 order by key, values; +select * from outputTbl1 order by key, vals; diff --git a/ql/src/test/queries/clientpositive/union_remove_11.q b/ql/src/test/queries/clientpositive/union_remove_11.q index 7052c69..202f34a 100644 --- a/ql/src/test/queries/clientpositive/union_remove_11.q +++ b/ql/src/test/queries/clientpositive/union_remove_11.q @@ -23,7 +23,7 @@ set mapred.input.dir.recursive=true; -- to run the test only on hadoop 23 create table inputTbl1(key string, val string) stored as textfile; -create table outputTbl1(key string, values bigint) stored as rcfile; +create table outputTbl1(key string, vals bigint) stored as rcfile; load data local inpath '../../data/files/T1.txt' into table inputTbl1; @@ -31,28 +31,28 @@ explain insert overwrite table outputTbl1 SELECT * FROM ( -select key, 1 as values from inputTbl1 +select key, 1 as vals from inputTbl1 union all select * FROM ( - SELECT key, 2 values from inputTbl1 + SELECT key, 2 vals from inputTbl1 UNION ALL - SELECT key, 3 as values from inputTbl1 + SELECT key, 3 as vals from inputTbl1 ) a )b; insert overwrite table outputTbl1 SELECT * FROM ( -select key, 1 as values from inputTbl1 +select key, 1 as vals from inputTbl1 union all select * FROM ( - SELECT key, 2 as values from inputTbl1 + SELECT key, 2 as vals from inputTbl1 UNION ALL - SELECT key, 3 as values from inputTbl1 + SELECT key, 3 as vals from inputTbl1 ) a )b; desc formatted outputTbl1; set hive.input.format=org.apache.hadoop.hive.ql.io.HiveInputFormat; -select * from outputTbl1 order by key, values; +select * from outputTbl1 order by key, vals; diff --git a/ql/src/test/queries/clientpositive/union_remove_12.q b/ql/src/test/queries/clientpositive/union_remove_12.q index 67a1829..14816f4 100644 --- a/ql/src/test/queries/clientpositive/union_remove_12.q +++ b/ql/src/test/queries/clientpositive/union_remove_12.q @@ -23,7 +23,7 @@ set mapred.input.dir.recursive=true; -- on create table inputTbl1(key string, val string) stored as textfile; -create table outputTbl1(key string, values bigint) stored as rcfile; +create table outputTbl1(key string, vals bigint) stored as rcfile; load data local inpath '../../data/files/T1.txt' into table inputTbl1; @@ -31,22 +31,22 @@ explain insert overwrite table outputTbl1 SELECT * FROM ( -select key, 1 as values from inputTbl1 +select key, 1 as vals from inputTbl1 union all -select a.key as key, b.val as values +select a.key as key, b.val as vals FROM inputTbl1 a join inputTbl1 b on a.key=b.key )c; insert overwrite table outputTbl1 SELECT * FROM ( -select key, 1 as values from inputTbl1 +select key, 1 as vals from inputTbl1 union all -select a.key as key, b.val as values +select a.key as key, b.val as vals FROM inputTbl1 a join inputTbl1 b on a.key=b.key )c; desc formatted outputTbl1; set hive.input.format=org.apache.hadoop.hive.ql.io.HiveInputFormat; -select * from outputTbl1 order by key, values; +select * from outputTbl1 order by key, vals; diff --git a/ql/src/test/queries/clientpositive/union_remove_13.q b/ql/src/test/queries/clientpositive/union_remove_13.q index 29c164a..4e9228d 100644 --- a/ql/src/test/queries/clientpositive/union_remove_13.q +++ b/ql/src/test/queries/clientpositive/union_remove_13.q @@ -23,7 +23,7 @@ set mapred.input.dir.recursive=true; -- on create table inputTbl1(key string, val string) stored as textfile; -create table outputTbl1(key string, values bigint) stored as rcfile; +create table outputTbl1(key string, vals bigint) stored as rcfile; load data local inpath '../../data/files/T1.txt' into table inputTbl1; @@ -31,22 +31,22 @@ explain insert overwrite table outputTbl1 SELECT * FROM ( -select key, count(1) as values from inputTbl1 group by key +select key, count(1) as vals from inputTbl1 group by key union all -select a.key as key, b.val as values +select a.key as key, b.val as vals FROM inputTbl1 a join inputTbl1 b on a.key=b.key )c; insert overwrite table outputTbl1 SELECT * FROM ( -select key, count(1) as values from inputTbl1 group by key +select key, count(1) as vals from inputTbl1 group by key union all -select a.key as key, b.val as values +select a.key as key, b.val as vals FROM inputTbl1 a join inputTbl1 b on a.key=b.key )c; desc formatted outputTbl1; set hive.input.format=org.apache.hadoop.hive.ql.io.HiveInputFormat; -select * from outputTbl1 order by key, values; +select * from outputTbl1 order by key, vals; diff --git a/ql/src/test/queries/clientpositive/union_remove_14.q b/ql/src/test/queries/clientpositive/union_remove_14.q index ca2f5e5..61fbcde 100644 --- a/ql/src/test/queries/clientpositive/union_remove_14.q +++ b/ql/src/test/queries/clientpositive/union_remove_14.q @@ -24,7 +24,7 @@ set mapred.input.dir.recursive=true; -- on create table inputTbl1(key string, val string) stored as textfile; -create table outputTbl1(key string, values bigint) stored as rcfile; +create table outputTbl1(key string, vals bigint) stored as rcfile; load data local inpath '../../data/files/T1.txt' into table inputTbl1; @@ -32,22 +32,22 @@ explain insert overwrite table outputTbl1 SELECT * FROM ( -select key, 1 as values from inputTbl1 +select key, 1 as vals from inputTbl1 union all -select a.key as key, b.val as values +select a.key as key, b.val as vals FROM inputTbl1 a join inputTbl1 b on a.key=b.key )c; insert overwrite table outputTbl1 SELECT * FROM ( -select key, 1 as values from inputTbl1 +select key, 1 as vals from inputTbl1 union all -select a.key as key, b.val as values +select a.key as key, b.val as vals FROM inputTbl1 a join inputTbl1 b on a.key=b.key )c; desc formatted outputTbl1; set hive.input.format=org.apache.hadoop.hive.ql.io.HiveInputFormat; -select * from outputTbl1 order by key, values; +select * from outputTbl1 order by key, vals; diff --git a/ql/src/test/queries/clientpositive/union_remove_15.q b/ql/src/test/queries/clientpositive/union_remove_15.q index 72ced75..5579402 100644 --- a/ql/src/test/queries/clientpositive/union_remove_15.q +++ b/ql/src/test/queries/clientpositive/union_remove_15.q @@ -24,7 +24,7 @@ set mapred.input.dir.recursive=true; -- to run the test only on hadoop 23 create table inputTbl1(key string, val string) stored as textfile; -create table outputTbl1(key string, values bigint) partitioned by (ds string) stored as rcfile; +create table outputTbl1(key string, vals bigint) partitioned by (ds string) stored as rcfile; load data local inpath '../../data/files/T1.txt' into table inputTbl1; @@ -32,17 +32,17 @@ explain insert overwrite table outputTbl1 partition (ds) SELECT * FROM ( - SELECT key, count(1) as values, '1' as ds from inputTbl1 group by key + SELECT key, count(1) as vals, '1' as ds from inputTbl1 group by key UNION ALL - SELECT key, count(1) as values, '2' as ds from inputTbl1 group by key + SELECT key, count(1) as vals, '2' as ds from inputTbl1 group by key ) a; insert overwrite table outputTbl1 partition (ds) SELECT * FROM ( - SELECT key, count(1) as values, '1' as ds from inputTbl1 group by key + SELECT key, count(1) as vals, '1' as ds from inputTbl1 group by key UNION ALL - SELECT key, count(1) as values, '2' as ds from inputTbl1 group by key + SELECT key, count(1) as vals, '2' as ds from inputTbl1 group by key ) a; desc formatted outputTbl1; @@ -50,5 +50,5 @@ desc formatted outputTbl1; show partitions outputTbl1; set hive.input.format=org.apache.hadoop.hive.ql.io.HiveInputFormat; -select * from outputTbl1 where ds = '1' order by key, values; -select * from outputTbl1 where ds = '2' order by key, values; +select * from outputTbl1 where ds = '1' order by key, vals; +select * from outputTbl1 where ds = '2' order by key, vals; diff --git a/ql/src/test/queries/clientpositive/union_remove_16.q b/ql/src/test/queries/clientpositive/union_remove_16.q index 72e6cb1..e65699c 100644 --- a/ql/src/test/queries/clientpositive/union_remove_16.q +++ b/ql/src/test/queries/clientpositive/union_remove_16.q @@ -24,7 +24,7 @@ set hive.exec.dynamic.partition=true; -- to run the test only on hadoop 23 create table inputTbl1(key string, val string) stored as textfile; -create table outputTbl1(key string, values bigint) partitioned by (ds string) stored as rcfile ; +create table outputTbl1(key string, vals bigint) partitioned by (ds string) stored as rcfile ; load data local inpath '../../data/files/T1.txt' into table inputTbl1; @@ -32,22 +32,22 @@ explain insert overwrite table outputTbl1 partition (ds) SELECT * FROM ( - SELECT key, count(1) as values, '1' as ds from inputTbl1 group by key + SELECT key, count(1) as vals, '1' as ds from inputTbl1 group by key UNION ALL - SELECT key, count(1) as values, '2' as ds from inputTbl1 group by key + SELECT key, count(1) as vals, '2' as ds from inputTbl1 group by key ) a; insert overwrite table outputTbl1 partition (ds) SELECT * FROM ( - SELECT key, count(1) as values, '1' as ds from inputTbl1 group by key + SELECT key, count(1) as vals, '1' as ds from inputTbl1 group by key UNION ALL - SELECT key, count(1) as values, '2' as ds from inputTbl1 group by key + SELECT key, count(1) as vals, '2' as ds from inputTbl1 group by key ) a; desc formatted outputTbl1; show partitions outputTbl1; set hive.input.format=org.apache.hadoop.hive.ql.io.HiveInputFormat; -select * from outputTbl1 where ds = '1' order by key, values; -select * from outputTbl1 where ds = '2' order by key, values; +select * from outputTbl1 where ds = '1' order by key, vals; +select * from outputTbl1 where ds = '2' order by key, vals; diff --git a/ql/src/test/queries/clientpositive/union_remove_17.q b/ql/src/test/queries/clientpositive/union_remove_17.q index fa68755..e81333b 100644 --- a/ql/src/test/queries/clientpositive/union_remove_17.q +++ b/ql/src/test/queries/clientpositive/union_remove_17.q @@ -21,7 +21,7 @@ set mapred.input.dir.recursive=true; -- to run the test only on hadoop 23 create table inputTbl1(key string, val string) stored as textfile; -create table outputTbl1(key string, values bigint) partitioned by (ds string) stored as rcfile; +create table outputTbl1(key string, vals bigint) partitioned by (ds string) stored as rcfile; load data local inpath '../../data/files/T1.txt' into table inputTbl1; @@ -29,22 +29,22 @@ explain insert overwrite table outputTbl1 partition (ds) SELECT * FROM ( - SELECT key, 1 as values, '1' as ds from inputTbl1 + SELECT key, 1 as vals, '1' as ds from inputTbl1 UNION ALL - SELECT key, 2 as values, '2' as ds from inputTbl1 + SELECT key, 2 as vals, '2' as ds from inputTbl1 ) a; insert overwrite table outputTbl1 partition (ds) SELECT * FROM ( - SELECT key, 1 as values, '1' as ds from inputTbl1 + SELECT key, 1 as vals, '1' as ds from inputTbl1 UNION ALL - SELECT key, 2 as values, '2' as ds from inputTbl1 + SELECT key, 2 as vals, '2' as ds from inputTbl1 ) a; desc formatted outputTbl1; show partitions outputTbl1; set hive.input.format=org.apache.hadoop.hive.ql.io.HiveInputFormat; -select * from outputTbl1 where ds = '1' order by key, values; -select * from outputTbl1 where ds = '2' order by key, values; +select * from outputTbl1 where ds = '1' order by key, vals; +select * from outputTbl1 where ds = '2' order by key, vals; diff --git a/ql/src/test/queries/clientpositive/union_remove_18.q b/ql/src/test/queries/clientpositive/union_remove_18.q index 6d2d331..d0ffb78 100644 --- a/ql/src/test/queries/clientpositive/union_remove_18.q +++ b/ql/src/test/queries/clientpositive/union_remove_18.q @@ -23,7 +23,7 @@ set mapred.input.dir.recursive=true; -- to run the test only on hadoop 23 create table inputTbl1(key string, ds string) stored as textfile; -create table outputTbl1(key string, values bigint) partitioned by (ds string) stored as textfile; +create table outputTbl1(key string, vals bigint) partitioned by (ds string) stored as textfile; load data local inpath '../../data/files/T1.txt' into table inputTbl1; @@ -31,17 +31,17 @@ explain insert overwrite table outputTbl1 partition (ds) SELECT * FROM ( - SELECT key, count(1) as values, ds from inputTbl1 group by key, ds + SELECT key, count(1) as vals, ds from inputTbl1 group by key, ds UNION ALL - SELECT key, count(1) as values, ds from inputTbl1 group by key, ds + SELECT key, count(1) as vals, ds from inputTbl1 group by key, ds ) a; insert overwrite table outputTbl1 partition (ds) SELECT * FROM ( - SELECT key, count(1) as values, ds from inputTbl1 group by key, ds + SELECT key, count(1) as vals, ds from inputTbl1 group by key, ds UNION ALL - SELECT key, count(1) as values, ds from inputTbl1 group by key, ds + SELECT key, count(1) as vals, ds from inputTbl1 group by key, ds ) a; desc formatted outputTbl1; @@ -49,6 +49,6 @@ desc formatted outputTbl1; show partitions outputTbl1; set hive.input.format=org.apache.hadoop.hive.ql.io.HiveInputFormat; -select * from outputTbl1 where ds = '11' order by key, values; -select * from outputTbl1 where ds = '18' order by key, values; -select * from outputTbl1 where ds is not null order by key, values, ds; +select * from outputTbl1 where ds = '11' order by key, vals; +select * from outputTbl1 where ds = '18' order by key, vals; +select * from outputTbl1 where ds is not null order by key, vals, ds; diff --git a/ql/src/test/queries/clientpositive/union_remove_19.q b/ql/src/test/queries/clientpositive/union_remove_19.q index 17b8a0f..6ba5b3f 100644 --- a/ql/src/test/queries/clientpositive/union_remove_19.q +++ b/ql/src/test/queries/clientpositive/union_remove_19.q @@ -20,25 +20,25 @@ set mapred.input.dir.recursive=true; -- SORT_QUERY_RESULTS create table inputTbl1(key string, val string) stored as textfile; -create table outputTbl1(key string, values bigint) stored as textfile; +create table outputTbl1(key string, vals bigint) stored as textfile; load data local inpath '../../data/files/T1.txt' into table inputTbl1; explain insert overwrite table outputTbl1 -SELECT a.key, a.values +SELECT a.key, a.vals FROM ( - SELECT key, count(1) as values from inputTbl1 group by key + SELECT key, count(1) as vals from inputTbl1 group by key UNION ALL - SELECT key, count(1) as values from inputTbl1 group by key + SELECT key, count(1) as vals from inputTbl1 group by key ) a; insert overwrite table outputTbl1 -SELECT a.key, a.values +SELECT a.key, a.vals FROM ( - SELECT key, count(1) as values from inputTbl1 group by key + SELECT key, count(1) as vals from inputTbl1 group by key UNION ALL - SELECT key, count(1) as values from inputTbl1 group by key + SELECT key, count(1) as vals from inputTbl1 group by key ) a; desc formatted outputTbl1; @@ -48,19 +48,19 @@ select * from outputTbl1; -- filter should be fine explain insert overwrite table outputTbl1 -SELECT a.key, a.values +SELECT a.key, a.vals FROM ( - SELECT key, count(1) as values from inputTbl1 group by key + SELECT key, count(1) as vals from inputTbl1 group by key UNION ALL - SELECT key, count(1) as values from inputTbl1 group by key + SELECT key, count(1) as vals from inputTbl1 group by key ) a where a.key = 7; insert overwrite table outputTbl1 -SELECT a.key, a.values +SELECT a.key, a.vals FROM ( - SELECT key, count(1) as values from inputTbl1 group by key + SELECT key, count(1) as vals from inputTbl1 group by key UNION ALL - SELECT key, count(1) as values from inputTbl1 group by key + SELECT key, count(1) as vals from inputTbl1 group by key ) a where a.key = 7; select * from outputTbl1; @@ -68,26 +68,26 @@ select * from outputTbl1; -- filters and sub-queries should be fine explain insert overwrite table outputTbl1 -select key, values from +select key, vals from ( -SELECT a.key + a.key as key, a.values +SELECT a.key + a.key as key, a.vals FROM ( - SELECT key, count(1) as values from inputTbl1 group by key + SELECT key, count(1) as vals from inputTbl1 group by key UNION ALL - SELECT key, count(1) as values from inputTbl1 group by key + SELECT key, count(1) as vals from inputTbl1 group by key ) a ) b where b.key >= 7; insert overwrite table outputTbl1 -select key, values from +select key, vals from ( -SELECT a.key + a.key as key, a.values +SELECT a.key + a.key as key, a.vals FROM ( - SELECT key, count(1) as values from inputTbl1 group by key + SELECT key, count(1) as vals from inputTbl1 group by key UNION ALL - SELECT key, count(1) as values from inputTbl1 group by key + SELECT key, count(1) as vals from inputTbl1 group by key ) a ) b where b.key >= 7; set hive.input.format=org.apache.hadoop.hive.ql.io.HiveInputFormat; -select * from outputTbl1 order by key, values; +select * from outputTbl1 order by key, vals; diff --git a/ql/src/test/queries/clientpositive/union_remove_2.q b/ql/src/test/queries/clientpositive/union_remove_2.q index 0142325..465e7a7 100644 --- a/ql/src/test/queries/clientpositive/union_remove_2.q +++ b/ql/src/test/queries/clientpositive/union_remove_2.q @@ -19,7 +19,7 @@ set mapred.input.dir.recursive=true; -- to run the test only on hadoop 23 create table inputTbl1(key string, val string) stored as textfile; -create table outputTbl1(key string, values bigint) stored as textfile; +create table outputTbl1(key string, vals bigint) stored as textfile; load data local inpath '../../data/files/T1.txt' into table inputTbl1; @@ -27,25 +27,25 @@ explain insert overwrite table outputTbl1 SELECT * FROM ( - SELECT key, count(1) as values from inputTbl1 group by key + SELECT key, count(1) as vals from inputTbl1 group by key UNION ALL - SELECT key, 1 as values from inputTbl1 + SELECT key, 1 as vals from inputTbl1 UNION ALL - SELECT key, 2 as values from inputTbl1 + SELECT key, 2 as vals from inputTbl1 ) a; insert overwrite table outputTbl1 SELECT * FROM ( - SELECT key, count(1) as values from inputTbl1 group by key + SELECT key, count(1) as vals from inputTbl1 group by key UNION ALL - SELECT key, 1 as values from inputTbl1 + SELECT key, 1 as vals from inputTbl1 UNION ALL - SELECT key, 2 as values from inputTbl1 + SELECT key, 2 as vals from inputTbl1 ) a; desc formatted outputTbl1; set hive.input.format=org.apache.hadoop.hive.ql.io.HiveInputFormat; -select * from outputTbl1 order by key, values; +select * from outputTbl1 order by key, vals; diff --git a/ql/src/test/queries/clientpositive/union_remove_20.q b/ql/src/test/queries/clientpositive/union_remove_20.q index 1c59ef2..05bdecf 100644 --- a/ql/src/test/queries/clientpositive/union_remove_20.q +++ b/ql/src/test/queries/clientpositive/union_remove_20.q @@ -19,28 +19,28 @@ set mapred.input.dir.recursive=true; -- columns being selected) is pushed above the union. create table inputTbl1(key string, val string) stored as textfile; -create table outputTbl1(values bigint, key string) stored as textfile; +create table outputTbl1(vals bigint, key string) stored as textfile; load data local inpath '../../data/files/T1.txt' into table inputTbl1; explain insert overwrite table outputTbl1 -SELECT a.values, a.key +SELECT a.vals, a.key FROM ( - SELECT key, count(1) as values from inputTbl1 group by key + SELECT key, count(1) as vals from inputTbl1 group by key UNION ALL - SELECT key, count(1) as values from inputTbl1 group by key + SELECT key, count(1) as vals from inputTbl1 group by key ) a; insert overwrite table outputTbl1 -SELECT a.values, a.key +SELECT a.vals, a.key FROM ( - SELECT key, count(1) as values from inputTbl1 group by key + SELECT key, count(1) as vals from inputTbl1 group by key UNION ALL - SELECT key, count(1) as values from inputTbl1 group by key + SELECT key, count(1) as vals from inputTbl1 group by key ) a; desc formatted outputTbl1; set hive.input.format=org.apache.hadoop.hive.ql.io.HiveInputFormat; -select * from outputTbl1 order by key, values; +select * from outputTbl1 order by key, vals; diff --git a/ql/src/test/queries/clientpositive/union_remove_21.q b/ql/src/test/queries/clientpositive/union_remove_21.q index cbaa08b..6076d9e 100644 --- a/ql/src/test/queries/clientpositive/union_remove_21.q +++ b/ql/src/test/queries/clientpositive/union_remove_21.q @@ -27,17 +27,17 @@ explain insert overwrite table outputTbl1 SELECT a.key FROM ( - SELECT key, count(1) as values from inputTbl1 group by key + SELECT key, count(1) as vals from inputTbl1 group by key UNION ALL - SELECT key, count(1) as values from inputTbl1 group by key + SELECT key, count(1) as vals from inputTbl1 group by key ) a; insert overwrite table outputTbl1 SELECT a.key FROM ( - SELECT key, count(1) as values from inputTbl1 group by key + SELECT key, count(1) as vals from inputTbl1 group by key UNION ALL - SELECT key, count(1) as values from inputTbl1 group by key + SELECT key, count(1) as vals from inputTbl1 group by key ) a; desc formatted outputTbl1; diff --git a/ql/src/test/queries/clientpositive/union_remove_22.q b/ql/src/test/queries/clientpositive/union_remove_22.q index 982912b..8ca39b4 100644 --- a/ql/src/test/queries/clientpositive/union_remove_22.q +++ b/ql/src/test/queries/clientpositive/union_remove_22.q @@ -18,25 +18,25 @@ set mapred.input.dir.recursive=true; -- both the sub-qeuries of the union) is pushed above the union. create table inputTbl1(key string, val string) stored as textfile; -create table outputTbl1(key string, values bigint, values2 bigint) stored as textfile; +create table outputTbl1(key string, vals bigint, vals2 bigint) stored as textfile; load data local inpath '../../data/files/T1.txt' into table inputTbl1; explain insert overwrite table outputTbl1 -SELECT a.key, a.values, a.values +SELECT a.key, a.vals, a.vals FROM ( - SELECT key, count(1) as values from inputTbl1 group by key + SELECT key, count(1) as vals from inputTbl1 group by key UNION ALL - SELECT key, count(1) as values from inputTbl1 group by key + SELECT key, count(1) as vals from inputTbl1 group by key ) a; insert overwrite table outputTbl1 -SELECT a.key, a.values, a.values +SELECT a.key, a.vals, a.vals FROM ( - SELECT key, count(1) as values from inputTbl1 group by key + SELECT key, count(1) as vals from inputTbl1 group by key UNION ALL - SELECT key, count(1) as values from inputTbl1 group by key + SELECT key, count(1) as vals from inputTbl1 group by key ) a; desc formatted outputTbl1; @@ -45,20 +45,20 @@ select * from outputTbl1; explain insert overwrite table outputTbl1 -SELECT a.key, concat(a.values, a.values), concat(a.values, a.values) +SELECT a.key, concat(a.vals, a.vals), concat(a.vals, a.vals) FROM ( - SELECT key, count(1) as values from inputTbl1 group by key + SELECT key, count(1) as vals from inputTbl1 group by key UNION ALL - SELECT key, count(1) as values from inputTbl1 group by key + SELECT key, count(1) as vals from inputTbl1 group by key ) a; insert overwrite table outputTbl1 -SELECT a.key, concat(a.values, a.values), concat(a.values, a.values) +SELECT a.key, concat(a.vals, a.vals), concat(a.vals, a.vals) FROM ( - SELECT key, count(1) as values from inputTbl1 group by key + SELECT key, count(1) as vals from inputTbl1 group by key UNION ALL - SELECT key, count(1) as values from inputTbl1 group by key + SELECT key, count(1) as vals from inputTbl1 group by key ) a; set hive.input.format=org.apache.hadoop.hive.ql.io.HiveInputFormat; -select * from outputTbl1 order by key, values; +select * from outputTbl1 order by key, vals; diff --git a/ql/src/test/queries/clientpositive/union_remove_23.q b/ql/src/test/queries/clientpositive/union_remove_23.q index 63e4418..2fd4114 100644 --- a/ql/src/test/queries/clientpositive/union_remove_23.q +++ b/ql/src/test/queries/clientpositive/union_remove_23.q @@ -19,7 +19,7 @@ set mapred.input.dir.recursive=true; -- to run the test only on hadoop 23 create table inputTbl1(key string, val string) stored as textfile; -create table outputTbl1(key string, values bigint) stored as textfile; +create table outputTbl1(key string, vals bigint) stored as textfile; load data local inpath '../../data/files/T1.txt' into table inputTbl1; @@ -27,22 +27,22 @@ explain insert overwrite table outputTbl1 SELECT * FROM ( - SELECT key, count(1) as values from + SELECT key, count(1) as vals from (SELECT a.key, b.val from inputTbl1 a join inputTbl1 b on a.key=b.key) subq group by key UNION ALL - SELECT key, count(1) as values from inputTbl1 group by key + SELECT key, count(1) as vals from inputTbl1 group by key ) subq2; insert overwrite table outputTbl1 SELECT * FROM ( - SELECT key, count(1) as values from + SELECT key, count(1) as vals from (SELECT a.key, b.val from inputTbl1 a join inputTbl1 b on a.key=b.key) subq group by key UNION ALL - SELECT key, count(1) as values from inputTbl1 group by key + SELECT key, count(1) as vals from inputTbl1 group by key ) subq2; desc formatted outputTbl1; set hive.input.format=org.apache.hadoop.hive.ql.io.HiveInputFormat; -select * from outputTbl1 order by key, values; +select * from outputTbl1 order by key, vals; diff --git a/ql/src/test/queries/clientpositive/union_remove_24.q b/ql/src/test/queries/clientpositive/union_remove_24.q index 88c378d..fe68a2a 100644 --- a/ql/src/test/queries/clientpositive/union_remove_24.q +++ b/ql/src/test/queries/clientpositive/union_remove_24.q @@ -17,7 +17,7 @@ set mapred.input.dir.recursive=true; -- to run the test only on hadoop 23 create table inputTbl1(key string, val string) stored as textfile; -create table outputTbl1(key double, values bigint) stored as textfile; +create table outputTbl1(key double, vals bigint) stored as textfile; load data local inpath '../../data/files/T1.txt' into table inputTbl1; @@ -25,20 +25,20 @@ EXPLAIN INSERT OVERWRITE TABLE outputTbl1 SELECT * FROM ( - SELECT CAST(key AS DOUBLE) AS key, count(1) as values FROM inputTbl1 group by key + SELECT CAST(key AS DOUBLE) AS key, count(1) as vals FROM inputTbl1 group by key UNION ALL - SELECT CAST(key AS BIGINT) AS key, count(1) as values FROM inputTbl1 group by key + SELECT CAST(key AS BIGINT) AS key, count(1) as vals FROM inputTbl1 group by key ) a; INSERT OVERWRITE TABLE outputTbl1 SELECT * FROM ( - SELECT CAST(key AS DOUBLE) AS key, count(1) as values FROM inputTbl1 group by key + SELECT CAST(key AS DOUBLE) AS key, count(1) as vals FROM inputTbl1 group by key UNION ALL - SELECT CAST(key AS BIGINT) AS key, count(1) as values FROM inputTbl1 group by key + SELECT CAST(key AS BIGINT) AS key, count(1) as vals FROM inputTbl1 group by key ) a; desc formatted outputTbl1; set hive.input.format=org.apache.hadoop.hive.ql.io.HiveInputFormat; -select * from outputTbl1 order by key, values; +select * from outputTbl1 order by key, vals; diff --git a/ql/src/test/queries/clientpositive/union_remove_25.q b/ql/src/test/queries/clientpositive/union_remove_25.q index 27d9ebe..d7c709e 100644 --- a/ql/src/test/queries/clientpositive/union_remove_25.q +++ b/ql/src/test/queries/clientpositive/union_remove_25.q @@ -19,9 +19,9 @@ set mapred.input.dir.recursive=true; -- to run the test only on hadoop 23 create table inputTbl1(key string, val string) stored as textfile; -create table outputTbl1(key string, values bigint) partitioned by (ds string) stored as textfile; -create table outputTbl2(key string, values bigint) partitioned by (ds string) stored as textfile; -create table outputTbl3(key string, values bigint) partitioned by (ds string,hr string) stored as textfile; +create table outputTbl1(key string, vals bigint) partitioned by (ds string) stored as textfile; +create table outputTbl2(key string, vals bigint) partitioned by (ds string) stored as textfile; +create table outputTbl3(key string, vals bigint) partitioned by (ds string,hr string) stored as textfile; load data local inpath '../../data/files/T1.txt' into table inputTbl1; @@ -29,23 +29,23 @@ explain insert overwrite table outputTbl1 partition(ds='2004') SELECT * FROM ( - SELECT key, count(1) as values from inputTbl1 group by key + SELECT key, count(1) as vals from inputTbl1 group by key UNION ALL - SELECT key, count(1) as values from inputTbl1 group by key + SELECT key, count(1) as vals from inputTbl1 group by key ) a; insert overwrite table outputTbl1 partition(ds='2004') SELECT * FROM ( - SELECT key, count(1) as values from inputTbl1 group by key + SELECT key, count(1) as vals from inputTbl1 group by key UNION ALL - SELECT key, count(1) as values from inputTbl1 group by key + SELECT key, count(1) as vals from inputTbl1 group by key ) a; desc formatted outputTbl1 partition(ds='2004'); set hive.input.format=org.apache.hadoop.hive.ql.io.HiveInputFormat; -select * from outputTbl1 order by key, values; +select * from outputTbl1 order by key, vals; explain insert overwrite table outputTbl2 partition(ds) diff --git a/ql/src/test/queries/clientpositive/union_remove_3.q b/ql/src/test/queries/clientpositive/union_remove_3.q index 7e1b113..63b5183 100644 --- a/ql/src/test/queries/clientpositive/union_remove_3.q +++ b/ql/src/test/queries/clientpositive/union_remove_3.q @@ -19,7 +19,7 @@ set mapred.input.dir.recursive=true; -- to run the test only on hadoop 23 create table inputTbl1(key string, val string) stored as textfile; -create table outputTbl1(key string, values bigint) stored as textfile; +create table outputTbl1(key string, vals bigint) stored as textfile; load data local inpath '../../data/files/T1.txt' into table inputTbl1; @@ -27,25 +27,25 @@ explain insert overwrite table outputTbl1 SELECT * FROM ( - SELECT key, 1 as values from inputTbl1 + SELECT key, 1 as vals from inputTbl1 UNION ALL - SELECT key, 2 as values from inputTbl1 + SELECT key, 2 as vals from inputTbl1 UNION ALL - SELECT key, 3 as values from inputTbl1 + SELECT key, 3 as vals from inputTbl1 ) a; insert overwrite table outputTbl1 SELECT * FROM ( - SELECT key, 1 as values from inputTbl1 + SELECT key, 1 as vals from inputTbl1 UNION ALL - SELECT key, 2 as values from inputTbl1 + SELECT key, 2 as vals from inputTbl1 UNION ALL - SELECT key, 3 as values from inputTbl1 + SELECT key, 3 as vals from inputTbl1 ) a; desc formatted outputTbl1; set hive.input.format=org.apache.hadoop.hive.ql.io.HiveInputFormat; -select * from outputTbl1 order by key, values; +select * from outputTbl1 order by key, vals; diff --git a/ql/src/test/queries/clientpositive/union_remove_4.q b/ql/src/test/queries/clientpositive/union_remove_4.q index 44b31d6..2cb86ed 100644 --- a/ql/src/test/queries/clientpositive/union_remove_4.q +++ b/ql/src/test/queries/clientpositive/union_remove_4.q @@ -19,7 +19,7 @@ set hive.merge.smallfiles.avgsize=1; -- to run the test only on hadoop 23 create table inputTbl1(key string, val string) stored as textfile; -create table outputTbl1(key string, values bigint) stored as textfile; +create table outputTbl1(key string, vals bigint) stored as textfile; load data local inpath '../../data/files/T1.txt' into table inputTbl1; @@ -27,20 +27,20 @@ explain insert overwrite table outputTbl1 SELECT * FROM ( - SELECT key, count(1) as values from inputTbl1 group by key + SELECT key, count(1) as vals from inputTbl1 group by key UNION ALL - SELECT key, count(1) as values from inputTbl1 group by key + SELECT key, count(1) as vals from inputTbl1 group by key ) a; insert overwrite table outputTbl1 SELECT * FROM ( - SELECT key, count(1) as values from inputTbl1 group by key + SELECT key, count(1) as vals from inputTbl1 group by key UNION ALL - SELECT key, count(1) as values from inputTbl1 group by key + SELECT key, count(1) as vals from inputTbl1 group by key ) a; desc formatted outputTbl1; set hive.input.format=org.apache.hadoop.hive.ql.io.HiveInputFormat; -select * from outputTbl1 order by key, values; +select * from outputTbl1 order by key, vals; diff --git a/ql/src/test/queries/clientpositive/union_remove_5.q b/ql/src/test/queries/clientpositive/union_remove_5.q index c5c0b7f..bf9b88e 100644 --- a/ql/src/test/queries/clientpositive/union_remove_5.q +++ b/ql/src/test/queries/clientpositive/union_remove_5.q @@ -21,7 +21,7 @@ set mapred.input.dir.recursive=true; -- to run the test only on hadoop 23 create table inputTbl1(key string, val string) stored as textfile; -create table outputTbl1(key string, values bigint) stored as textfile; +create table outputTbl1(key string, vals bigint) stored as textfile; load data local inpath '../../data/files/T1.txt' into table inputTbl1; @@ -29,24 +29,24 @@ explain insert overwrite table outputTbl1 SELECT * FROM ( - SELECT key, count(1) as values from inputTbl1 group by key + SELECT key, count(1) as vals from inputTbl1 group by key UNION ALL - SELECT key, 1 as values from inputTbl1 + SELECT key, 1 as vals from inputTbl1 UNION ALL - SELECT key, 2 as values from inputTbl1 + SELECT key, 2 as vals from inputTbl1 ) a; insert overwrite table outputTbl1 SELECT * FROM ( - SELECT key, count(1) as values from inputTbl1 group by key + SELECT key, count(1) as vals from inputTbl1 group by key UNION ALL - SELECT key, 1 as values from inputTbl1 + SELECT key, 1 as vals from inputTbl1 UNION ALL - SELECT key, 2 as values from inputTbl1 + SELECT key, 2 as vals from inputTbl1 ) a; desc formatted outputTbl1; set hive.input.format=org.apache.hadoop.hive.ql.io.HiveInputFormat; -select * from outputTbl1 order by key, values; +select * from outputTbl1 order by key, vals; diff --git a/ql/src/test/queries/clientpositive/union_remove_6.q b/ql/src/test/queries/clientpositive/union_remove_6.q index 6990ed2..80af0f1 100644 --- a/ql/src/test/queries/clientpositive/union_remove_6.q +++ b/ql/src/test/queries/clientpositive/union_remove_6.q @@ -15,28 +15,28 @@ set mapred.input.dir.recursive=true; -- merging is turned off create table inputTbl1(key string, val string) stored as textfile; -create table outputTbl1(key string, values bigint) stored as textfile; -create table outputTbl2(key string, values bigint) stored as textfile; +create table outputTbl1(key string, vals bigint) stored as textfile; +create table outputTbl2(key string, vals bigint) stored as textfile; load data local inpath '../../data/files/T1.txt' into table inputTbl1; explain FROM ( - SELECT key, count(1) as values from inputTbl1 group by key + SELECT key, count(1) as vals from inputTbl1 group by key UNION ALL - SELECT key, count(1) as values from inputTbl1 group by key + SELECT key, count(1) as vals from inputTbl1 group by key ) a insert overwrite table outputTbl1 select * insert overwrite table outputTbl2 select *; FROM ( - SELECT key, count(1) as values from inputTbl1 group by key + SELECT key, count(1) as vals from inputTbl1 group by key UNION ALL - SELECT key, count(1) as values from inputTbl1 group by key + SELECT key, count(1) as vals from inputTbl1 group by key ) a insert overwrite table outputTbl1 select * insert overwrite table outputTbl2 select *; set hive.input.format=org.apache.hadoop.hive.ql.io.HiveInputFormat; -select * from outputTbl1 order by key, values; -select * from outputTbl2 order by key, values;; +select * from outputTbl1 order by key, vals; +select * from outputTbl2 order by key, vals;; diff --git a/ql/src/test/queries/clientpositive/union_remove_6_subq.q b/ql/src/test/queries/clientpositive/union_remove_6_subq.q index 8bcac6f..5994f3c 100644 --- a/ql/src/test/queries/clientpositive/union_remove_6_subq.q +++ b/ql/src/test/queries/clientpositive/union_remove_6_subq.q @@ -14,17 +14,17 @@ set mapred.input.dir.recursive=true; -- merging is turned off create table inputTbl1(key string, val string) stored as textfile; -create table outputTbl1(key string, values bigint) stored as textfile; -create table outputTbl2(key string, values bigint) stored as textfile; +create table outputTbl1(key string, vals bigint) stored as textfile; +create table outputTbl2(key string, vals bigint) stored as textfile; load data local inpath '../../data/files/T1.txt' into table inputTbl1; explain FROM ( select * from( - SELECT key, count(1) as values from inputTbl1 group by key + SELECT key, count(1) as vals from inputTbl1 group by key UNION ALL - SELECT key, count(1) as values from inputTbl1 group by key + SELECT key, count(1) as vals from inputTbl1 group by key )subq ) a insert overwrite table outputTbl1 select * @@ -32,17 +32,17 @@ insert overwrite table outputTbl2 select *; FROM ( select * from( - SELECT key, count(1) as values from inputTbl1 group by key + SELECT key, count(1) as vals from inputTbl1 group by key UNION ALL - SELECT key, count(1) as values from inputTbl1 group by key + SELECT key, count(1) as vals from inputTbl1 group by key )subq ) a insert overwrite table outputTbl1 select * insert overwrite table outputTbl2 select *; set hive.input.format=org.apache.hadoop.hive.ql.io.HiveInputFormat; -select * from outputTbl1 order by key, values; -select * from outputTbl2 order by key, values; +select * from outputTbl1 order by key, vals; +select * from outputTbl2 order by key, vals; -- The following queries guarantee the correctness. explain diff --git a/ql/src/test/queries/clientpositive/union_remove_7.q b/ql/src/test/queries/clientpositive/union_remove_7.q index c254aba..ddf3a7c 100644 --- a/ql/src/test/queries/clientpositive/union_remove_7.q +++ b/ql/src/test/queries/clientpositive/union_remove_7.q @@ -20,7 +20,7 @@ set mapred.input.dir.recursive=true; -- to run the test only on hadoop 23 create table inputTbl1(key string, val string) stored as textfile; -create table outputTbl1(key string, values bigint) stored as rcfile; +create table outputTbl1(key string, vals bigint) stored as rcfile; load data local inpath '../../data/files/T1.txt' into table inputTbl1; @@ -28,20 +28,20 @@ explain insert overwrite table outputTbl1 SELECT * FROM ( - SELECT key, count(1) as values from inputTbl1 group by key + SELECT key, count(1) as vals from inputTbl1 group by key UNION ALL - SELECT key, count(1) as values from inputTbl1 group by key + SELECT key, count(1) as vals from inputTbl1 group by key ) a; insert overwrite table outputTbl1 SELECT * FROM ( - SELECT key, count(1) as values from inputTbl1 group by key + SELECT key, count(1) as vals from inputTbl1 group by key UNION ALL - SELECT key, count(1) as values from inputTbl1 group by key + SELECT key, count(1) as vals from inputTbl1 group by key ) a; desc formatted outputTbl1; set hive.input.format=org.apache.hadoop.hive.ql.io.HiveInputFormat; -select * from outputTbl1 order by key, values; +select * from outputTbl1 order by key, vals; diff --git a/ql/src/test/queries/clientpositive/union_remove_8.q b/ql/src/test/queries/clientpositive/union_remove_8.q index 8dfb8e8..86ff035 100644 --- a/ql/src/test/queries/clientpositive/union_remove_8.q +++ b/ql/src/test/queries/clientpositive/union_remove_8.q @@ -21,7 +21,7 @@ set mapred.input.dir.recursive=true; -- to run the test only on hadoop 23 create table inputTbl1(key string, val string) stored as textfile; -create table outputTbl1(key string, values bigint) stored as rcfile; +create table outputTbl1(key string, vals bigint) stored as rcfile; load data local inpath '../../data/files/T1.txt' into table inputTbl1; @@ -29,24 +29,24 @@ explain insert overwrite table outputTbl1 SELECT * FROM ( - SELECT key, count(1) as values from inputTbl1 group by key + SELECT key, count(1) as vals from inputTbl1 group by key UNION ALL - SELECT key, 1 as values from inputTbl1 + SELECT key, 1 as vals from inputTbl1 UNION ALL - SELECT key, 2 as values from inputTbl1 + SELECT key, 2 as vals from inputTbl1 ) a; insert overwrite table outputTbl1 SELECT * FROM ( - SELECT key, count(1) as values from inputTbl1 group by key + SELECT key, count(1) as vals from inputTbl1 group by key UNION ALL - SELECT key, 1 as values from inputTbl1 + SELECT key, 1 as vals from inputTbl1 UNION ALL - SELECT key, 2 as values from inputTbl1 + SELECT key, 2 as vals from inputTbl1 ) a; desc formatted outputTbl1; set hive.input.format=org.apache.hadoop.hive.ql.io.HiveInputFormat; -select * from outputTbl1 order by key, values; +select * from outputTbl1 order by key, vals; diff --git a/ql/src/test/queries/clientpositive/union_remove_9.q b/ql/src/test/queries/clientpositive/union_remove_9.q index c9a4dc3..5e3d3ab 100644 --- a/ql/src/test/queries/clientpositive/union_remove_9.q +++ b/ql/src/test/queries/clientpositive/union_remove_9.q @@ -21,7 +21,7 @@ set mapred.input.dir.recursive=true; -- to run the test only on hadoop 23 create table inputTbl1(key string, val string) stored as textfile; -create table outputTbl1(key string, values bigint) stored as rcfile; +create table outputTbl1(key string, vals bigint) stored as rcfile; load data local inpath '../../data/files/T1.txt' into table inputTbl1; @@ -29,28 +29,28 @@ explain insert overwrite table outputTbl1 SELECT * FROM ( -select key, count(1) as values from inputTbl1 group by key +select key, count(1) as vals from inputTbl1 group by key union all select * FROM ( - SELECT key, 1 as values from inputTbl1 + SELECT key, 1 as vals from inputTbl1 UNION ALL - SELECT key, 2 as values from inputTbl1 + SELECT key, 2 as vals from inputTbl1 ) a )b; insert overwrite table outputTbl1 SELECT * FROM ( -select key, count(1) as values from inputTbl1 group by key +select key, count(1) as vals from inputTbl1 group by key union all select * FROM ( - SELECT key, 1 as values from inputTbl1 + SELECT key, 1 as vals from inputTbl1 UNION ALL - SELECT key, 2 as values from inputTbl1 + SELECT key, 2 as vals from inputTbl1 ) a )b; desc formatted outputTbl1; set hive.input.format=org.apache.hadoop.hive.ql.io.HiveInputFormat; -select * from outputTbl1 order by key, values; +select * from outputTbl1 order by key, vals; diff --git a/ql/src/test/queries/clientpositive/varchar_cast.q b/ql/src/test/queries/clientpositive/varchar_cast.q index c356b1d..f428eab 100644 --- a/ql/src/test/queries/clientpositive/varchar_cast.q +++ b/ql/src/test/queries/clientpositive/varchar_cast.q @@ -1,4 +1,6 @@ set hive.fetch.task.conversion=more; +set hive.support.sql11.keywords=false; +-- We need the above setting for backward compatibility because 'timestamp' is a keyword in SQL2011 -- Cast from varchar to other data types select diff --git a/ql/src/test/queries/clientpositive/vector_decimal_10_0.q b/ql/src/test/queries/clientpositive/vector_decimal_10_0.q index ae93058..ad55451 100644 --- a/ql/src/test/queries/clientpositive/vector_decimal_10_0.q +++ b/ql/src/test/queries/clientpositive/vector_decimal_10_0.q @@ -2,18 +2,18 @@ SET hive.vectorized.execution.enabled=true; set hive.fetch.task.conversion=minimal; DROP TABLE IF EXISTS decimal_txt; -DROP TABLE IF EXISTS decimal; +DROP TABLE IF EXISTS DECIMAL_TABLE; CREATE TABLE decimal_txt (dec decimal); LOAD DATA LOCAL INPATH '../../data/files/decimal_10_0.txt' OVERWRITE INTO TABLE decimal_txt; -CREATE TABLE DECIMAL STORED AS ORC AS SELECT * FROM decimal_txt; +CREATE TABLE DECIMAL_TABLE STORED AS ORC AS SELECT * FROM decimal_txt; EXPLAIN -SELECT dec FROM DECIMAL order by dec; +SELECT dec FROM DECIMAL_TABLE order by dec; -SELECT dec FROM DECIMAL order by dec; +SELECT dec FROM DECIMAL_TABLE order by dec; DROP TABLE DECIMAL_txt; -DROP TABLE DECIMAL; \ No newline at end of file +DROP TABLE DECIMAL_TABLE; \ No newline at end of file diff --git a/ql/src/test/queries/clientpositive/vectorized_casts.q b/ql/src/test/queries/clientpositive/vectorized_casts.q index 3f818b1..f5618f3 100644 --- a/ql/src/test/queries/clientpositive/vectorized_casts.q +++ b/ql/src/test/queries/clientpositive/vectorized_casts.q @@ -1,4 +1,6 @@ SET hive.vectorized.execution.enabled = true; +set hive.support.sql11.keywords=false; +-- We need the above setting for backward compatibility because 'timestamp' is a keyword in SQL2011 -- Test type casting in vectorized mode to verify end-to-end functionality. diff --git a/ql/src/test/queries/clientpositive/vectorized_date_funcs.q b/ql/src/test/queries/clientpositive/vectorized_date_funcs.q index 1fb0dac..570fca2 100644 --- a/ql/src/test/queries/clientpositive/vectorized_date_funcs.q +++ b/ql/src/test/queries/clientpositive/vectorized_date_funcs.q @@ -1,4 +1,6 @@ SET hive.vectorized.execution.enabled = true; +set hive.support.sql11.keywords=false; +-- We need the above setting for backward compatibility because 'date' is a keyword in SQL2011 -- Test timestamp functions in vectorized mode to verify they run correctly end-to-end. diff --git a/ql/src/test/queries/clientpositive/vectorized_dynamic_partition_pruning.q b/ql/src/test/queries/clientpositive/vectorized_dynamic_partition_pruning.q index 1197f7d..1393faa 100644 --- a/ql/src/test/queries/clientpositive/vectorized_dynamic_partition_pruning.q +++ b/ql/src/test/queries/clientpositive/vectorized_dynamic_partition_pruning.q @@ -4,6 +4,8 @@ set hive.tez.dynamic.partition.pruning=true; set hive.optimize.metadataonly=false; set hive.optimize.index.filter=true; set hive.vectorized.execution.enabled=true; +set hive.support.sql11.keywords=false; +-- We need the above setting for backward compatibility because 'date' is a keyword in SQL2011 select distinct ds from srcpart; diff --git a/ql/src/test/queries/clientpositive/windowing_navfn.q b/ql/src/test/queries/clientpositive/windowing_navfn.q index e275975..2b6f2d5 100644 --- a/ql/src/test/queries/clientpositive/windowing_navfn.q +++ b/ql/src/test/queries/clientpositive/windowing_navfn.q @@ -1,3 +1,6 @@ +set hive.support.sql11.keywords=false; +-- We need the above setting for backward compatibility because 'int' is a keyword in SQL2011 + drop table over10k; create table over10k( diff --git a/ql/src/test/results/clientnegative/authorization_cannot_create_none_role.q.out b/ql/src/test/results/clientnegative/authorization_cannot_create_none_role.q.out index 4808433..618cba6 100644 --- a/ql/src/test/results/clientnegative/authorization_cannot_create_none_role.q.out +++ b/ql/src/test/results/clientnegative/authorization_cannot_create_none_role.q.out @@ -2,6 +2,4 @@ PREHOOK: query: set role ADMIN PREHOOK: type: SHOW_ROLES POSTHOOK: query: set role ADMIN POSTHOOK: type: SHOW_ROLES -PREHOOK: query: create role None -PREHOOK: type: CREATEROLE -FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.exec.DDLTask. Role name cannot be one of the reserved roles: [ALL, DEFAULT, NONE] +FAILED: ParseException line 2:12 cannot recognize input near 'None' '' '' in create role diff --git a/ql/src/test/results/clientnegative/serde_regex.q.out b/ql/src/test/results/clientnegative/serde_regex.q.out index fc29724..086f77e 100644 --- a/ql/src/test/results/clientnegative/serde_regex.q.out +++ b/ql/src/test/results/clientnegative/serde_regex.q.out @@ -8,7 +8,7 @@ PREHOOK: query: -- This should fail because Regex SerDe doesn't support STRUCT CREATE TABLE serde_regex( host STRING, identity STRING, - user STRING, + users STRING, time TIMESTAMP, request STRING, status INT, diff --git a/ql/src/test/results/clientnegative/serde_regex2.q.out b/ql/src/test/results/clientnegative/serde_regex2.q.out index 198f79b..324d33c 100644 --- a/ql/src/test/results/clientnegative/serde_regex2.q.out +++ b/ql/src/test/results/clientnegative/serde_regex2.q.out @@ -8,7 +8,7 @@ PREHOOK: query: -- Mismatch between the number of matching groups and columns, t CREATE TABLE serde_regex( host STRING, identity STRING, - user STRING, + users STRING, time STRING, request STRING, status STRING, @@ -27,7 +27,7 @@ POSTHOOK: query: -- Mismatch between the number of matching groups and columns, CREATE TABLE serde_regex( host STRING, identity STRING, - user STRING, + users STRING, time STRING, request STRING, status STRING, diff --git a/ql/src/test/results/clientnegative/serde_regex3.q.out b/ql/src/test/results/clientnegative/serde_regex3.q.out index 1df4cd6..29be4ca 100644 --- a/ql/src/test/results/clientnegative/serde_regex3.q.out +++ b/ql/src/test/results/clientnegative/serde_regex3.q.out @@ -8,7 +8,7 @@ PREHOOK: query: -- null input.regex, raise an exception CREATE TABLE serde_regex( host STRING, identity STRING, - user STRING, + users STRING, time STRING, request STRING, status STRING, diff --git a/ql/src/test/results/clientpositive/ambiguitycheck.q.out b/ql/src/test/results/clientpositive/ambiguitycheck.q.out new file mode 100644 index 0000000..614e6ba --- /dev/null +++ b/ql/src/test/results/clientpositive/ambiguitycheck.q.out @@ -0,0 +1,707 @@ +PREHOOK: query: -- check cluster/distribute/partitionBy +SELECT * FROM SRC x where x.key = 20 CLUSTER BY (key,value) +PREHOOK: type: QUERY +PREHOOK: Input: default@src +#### A masked pattern was here #### +POSTHOOK: query: -- check cluster/distribute/partitionBy +SELECT * FROM SRC x where x.key = 20 CLUSTER BY (key,value) +POSTHOOK: type: QUERY +POSTHOOK: Input: default@src +#### A masked pattern was here #### +20 val_20 +PREHOOK: query: SELECT * FROM SRC x where x.key = 20 CLUSTER BY ((key),value) +PREHOOK: type: QUERY +PREHOOK: Input: default@src +#### A masked pattern was here #### +POSTHOOK: query: SELECT * FROM SRC x where x.key = 20 CLUSTER BY ((key),value) +POSTHOOK: type: QUERY +POSTHOOK: Input: default@src +#### A masked pattern was here #### +20 val_20 +PREHOOK: query: SELECT * FROM SRC x where x.key = 20 CLUSTER BY (key,(value)) +PREHOOK: type: QUERY +PREHOOK: Input: default@src +#### A masked pattern was here #### +POSTHOOK: query: SELECT * FROM SRC x where x.key = 20 CLUSTER BY (key,(value)) +POSTHOOK: type: QUERY +POSTHOOK: Input: default@src +#### A masked pattern was here #### +20 val_20 +PREHOOK: query: SELECT * FROM SRC x where x.key = 20 CLUSTER BY ((key),(value)) +PREHOOK: type: QUERY +PREHOOK: Input: default@src +#### A masked pattern was here #### +POSTHOOK: query: SELECT * FROM SRC x where x.key = 20 CLUSTER BY ((key),(value)) +POSTHOOK: type: QUERY +POSTHOOK: Input: default@src +#### A masked pattern was here #### +20 val_20 +PREHOOK: query: SELECT * FROM SRC x where x.key = 20 CLUSTER BY ((key),(((value)))) +PREHOOK: type: QUERY +PREHOOK: Input: default@src +#### A masked pattern was here #### +POSTHOOK: query: SELECT * FROM SRC x where x.key = 20 CLUSTER BY ((key),(((value)))) +POSTHOOK: type: QUERY +POSTHOOK: Input: default@src +#### A masked pattern was here #### +20 val_20 +PREHOOK: query: -- HIVE-6950 +SELECT tab1.key, + tab1.value, + SUM(1) +FROM src as tab1 +GROUP BY tab1.key, + tab1.value +GROUPING SETS ((tab1.key, tab1.value)) +PREHOOK: type: QUERY +PREHOOK: Input: default@src +#### A masked pattern was here #### +POSTHOOK: query: -- HIVE-6950 +SELECT tab1.key, + tab1.value, + SUM(1) +FROM src as tab1 +GROUP BY tab1.key, + tab1.value +GROUPING SETS ((tab1.key, tab1.value)) +POSTHOOK: type: QUERY +POSTHOOK: Input: default@src +#### A masked pattern was here #### +0 val_0 3 +10 val_10 1 +100 val_100 2 +103 val_103 2 +104 val_104 2 +105 val_105 1 +11 val_11 1 +111 val_111 1 +113 val_113 2 +114 val_114 1 +116 val_116 1 +118 val_118 2 +119 val_119 3 +12 val_12 2 +120 val_120 2 +125 val_125 2 +126 val_126 1 +128 val_128 3 +129 val_129 2 +131 val_131 1 +133 val_133 1 +134 val_134 2 +136 val_136 1 +137 val_137 2 +138 val_138 4 +143 val_143 1 +145 val_145 1 +146 val_146 2 +149 val_149 2 +15 val_15 2 +150 val_150 1 +152 val_152 2 +153 val_153 1 +155 val_155 1 +156 val_156 1 +157 val_157 1 +158 val_158 1 +160 val_160 1 +162 val_162 1 +163 val_163 1 +164 val_164 2 +165 val_165 2 +166 val_166 1 +167 val_167 3 +168 val_168 1 +169 val_169 4 +17 val_17 1 +170 val_170 1 +172 val_172 2 +174 val_174 2 +175 val_175 2 +176 val_176 2 +177 val_177 1 +178 val_178 1 +179 val_179 2 +18 val_18 2 +180 val_180 1 +181 val_181 1 +183 val_183 1 +186 val_186 1 +187 val_187 3 +189 val_189 1 +19 val_19 1 +190 val_190 1 +191 val_191 2 +192 val_192 1 +193 val_193 3 +194 val_194 1 +195 val_195 2 +196 val_196 1 +197 val_197 2 +199 val_199 3 +2 val_2 1 +20 val_20 1 +200 val_200 2 +201 val_201 1 +202 val_202 1 +203 val_203 2 +205 val_205 2 +207 val_207 2 +208 val_208 3 +209 val_209 2 +213 val_213 2 +214 val_214 1 +216 val_216 2 +217 val_217 2 +218 val_218 1 +219 val_219 2 +221 val_221 2 +222 val_222 1 +223 val_223 2 +224 val_224 2 +226 val_226 1 +228 val_228 1 +229 val_229 2 +230 val_230 5 +233 val_233 2 +235 val_235 1 +237 val_237 2 +238 val_238 2 +239 val_239 2 +24 val_24 2 +241 val_241 1 +242 val_242 2 +244 val_244 1 +247 val_247 1 +248 val_248 1 +249 val_249 1 +252 val_252 1 +255 val_255 2 +256 val_256 2 +257 val_257 1 +258 val_258 1 +26 val_26 2 +260 val_260 1 +262 val_262 1 +263 val_263 1 +265 val_265 2 +266 val_266 1 +27 val_27 1 +272 val_272 2 +273 val_273 3 +274 val_274 1 +275 val_275 1 +277 val_277 4 +278 val_278 2 +28 val_28 1 +280 val_280 2 +281 val_281 2 +282 val_282 2 +283 val_283 1 +284 val_284 1 +285 val_285 1 +286 val_286 1 +287 val_287 1 +288 val_288 2 +289 val_289 1 +291 val_291 1 +292 val_292 1 +296 val_296 1 +298 val_298 3 +30 val_30 1 +302 val_302 1 +305 val_305 1 +306 val_306 1 +307 val_307 2 +308 val_308 1 +309 val_309 2 +310 val_310 1 +311 val_311 3 +315 val_315 1 +316 val_316 3 +317 val_317 2 +318 val_318 3 +321 val_321 2 +322 val_322 2 +323 val_323 1 +325 val_325 2 +327 val_327 3 +33 val_33 1 +331 val_331 2 +332 val_332 1 +333 val_333 2 +335 val_335 1 +336 val_336 1 +338 val_338 1 +339 val_339 1 +34 val_34 1 +341 val_341 1 +342 val_342 2 +344 val_344 2 +345 val_345 1 +348 val_348 5 +35 val_35 3 +351 val_351 1 +353 val_353 2 +356 val_356 1 +360 val_360 1 +362 val_362 1 +364 val_364 1 +365 val_365 1 +366 val_366 1 +367 val_367 2 +368 val_368 1 +369 val_369 3 +37 val_37 2 +373 val_373 1 +374 val_374 1 +375 val_375 1 +377 val_377 1 +378 val_378 1 +379 val_379 1 +382 val_382 2 +384 val_384 3 +386 val_386 1 +389 val_389 1 +392 val_392 1 +393 val_393 1 +394 val_394 1 +395 val_395 2 +396 val_396 3 +397 val_397 2 +399 val_399 2 +4 val_4 1 +400 val_400 1 +401 val_401 5 +402 val_402 1 +403 val_403 3 +404 val_404 2 +406 val_406 4 +407 val_407 1 +409 val_409 3 +41 val_41 1 +411 val_411 1 +413 val_413 2 +414 val_414 2 +417 val_417 3 +418 val_418 1 +419 val_419 1 +42 val_42 2 +421 val_421 1 +424 val_424 2 +427 val_427 1 +429 val_429 2 +43 val_43 1 +430 val_430 3 +431 val_431 3 +432 val_432 1 +435 val_435 1 +436 val_436 1 +437 val_437 1 +438 val_438 3 +439 val_439 2 +44 val_44 1 +443 val_443 1 +444 val_444 1 +446 val_446 1 +448 val_448 1 +449 val_449 1 +452 val_452 1 +453 val_453 1 +454 val_454 3 +455 val_455 1 +457 val_457 1 +458 val_458 2 +459 val_459 2 +460 val_460 1 +462 val_462 2 +463 val_463 2 +466 val_466 3 +467 val_467 1 +468 val_468 4 +469 val_469 5 +47 val_47 1 +470 val_470 1 +472 val_472 1 +475 val_475 1 +477 val_477 1 +478 val_478 2 +479 val_479 1 +480 val_480 3 +481 val_481 1 +482 val_482 1 +483 val_483 1 +484 val_484 1 +485 val_485 1 +487 val_487 1 +489 val_489 4 +490 val_490 1 +491 val_491 1 +492 val_492 2 +493 val_493 1 +494 val_494 1 +495 val_495 1 +496 val_496 1 +497 val_497 1 +498 val_498 3 +5 val_5 3 +51 val_51 2 +53 val_53 1 +54 val_54 1 +57 val_57 1 +58 val_58 2 +64 val_64 1 +65 val_65 1 +66 val_66 1 +67 val_67 2 +69 val_69 1 +70 val_70 3 +72 val_72 2 +74 val_74 1 +76 val_76 2 +77 val_77 1 +78 val_78 1 +8 val_8 1 +80 val_80 1 +82 val_82 1 +83 val_83 2 +84 val_84 2 +85 val_85 1 +86 val_86 1 +87 val_87 1 +9 val_9 1 +90 val_90 3 +92 val_92 1 +95 val_95 2 +96 val_96 1 +97 val_97 2 +98 val_98 2 +PREHOOK: query: SELECT key, + src.value, + SUM(1) +FROM src +GROUP BY key, + src.value +GROUPING SETS ((key, src.value)) +PREHOOK: type: QUERY +PREHOOK: Input: default@src +#### A masked pattern was here #### +POSTHOOK: query: SELECT key, + src.value, + SUM(1) +FROM src +GROUP BY key, + src.value +GROUPING SETS ((key, src.value)) +POSTHOOK: type: QUERY +POSTHOOK: Input: default@src +#### A masked pattern was here #### +0 val_0 3 +10 val_10 1 +100 val_100 2 +103 val_103 2 +104 val_104 2 +105 val_105 1 +11 val_11 1 +111 val_111 1 +113 val_113 2 +114 val_114 1 +116 val_116 1 +118 val_118 2 +119 val_119 3 +12 val_12 2 +120 val_120 2 +125 val_125 2 +126 val_126 1 +128 val_128 3 +129 val_129 2 +131 val_131 1 +133 val_133 1 +134 val_134 2 +136 val_136 1 +137 val_137 2 +138 val_138 4 +143 val_143 1 +145 val_145 1 +146 val_146 2 +149 val_149 2 +15 val_15 2 +150 val_150 1 +152 val_152 2 +153 val_153 1 +155 val_155 1 +156 val_156 1 +157 val_157 1 +158 val_158 1 +160 val_160 1 +162 val_162 1 +163 val_163 1 +164 val_164 2 +165 val_165 2 +166 val_166 1 +167 val_167 3 +168 val_168 1 +169 val_169 4 +17 val_17 1 +170 val_170 1 +172 val_172 2 +174 val_174 2 +175 val_175 2 +176 val_176 2 +177 val_177 1 +178 val_178 1 +179 val_179 2 +18 val_18 2 +180 val_180 1 +181 val_181 1 +183 val_183 1 +186 val_186 1 +187 val_187 3 +189 val_189 1 +19 val_19 1 +190 val_190 1 +191 val_191 2 +192 val_192 1 +193 val_193 3 +194 val_194 1 +195 val_195 2 +196 val_196 1 +197 val_197 2 +199 val_199 3 +2 val_2 1 +20 val_20 1 +200 val_200 2 +201 val_201 1 +202 val_202 1 +203 val_203 2 +205 val_205 2 +207 val_207 2 +208 val_208 3 +209 val_209 2 +213 val_213 2 +214 val_214 1 +216 val_216 2 +217 val_217 2 +218 val_218 1 +219 val_219 2 +221 val_221 2 +222 val_222 1 +223 val_223 2 +224 val_224 2 +226 val_226 1 +228 val_228 1 +229 val_229 2 +230 val_230 5 +233 val_233 2 +235 val_235 1 +237 val_237 2 +238 val_238 2 +239 val_239 2 +24 val_24 2 +241 val_241 1 +242 val_242 2 +244 val_244 1 +247 val_247 1 +248 val_248 1 +249 val_249 1 +252 val_252 1 +255 val_255 2 +256 val_256 2 +257 val_257 1 +258 val_258 1 +26 val_26 2 +260 val_260 1 +262 val_262 1 +263 val_263 1 +265 val_265 2 +266 val_266 1 +27 val_27 1 +272 val_272 2 +273 val_273 3 +274 val_274 1 +275 val_275 1 +277 val_277 4 +278 val_278 2 +28 val_28 1 +280 val_280 2 +281 val_281 2 +282 val_282 2 +283 val_283 1 +284 val_284 1 +285 val_285 1 +286 val_286 1 +287 val_287 1 +288 val_288 2 +289 val_289 1 +291 val_291 1 +292 val_292 1 +296 val_296 1 +298 val_298 3 +30 val_30 1 +302 val_302 1 +305 val_305 1 +306 val_306 1 +307 val_307 2 +308 val_308 1 +309 val_309 2 +310 val_310 1 +311 val_311 3 +315 val_315 1 +316 val_316 3 +317 val_317 2 +318 val_318 3 +321 val_321 2 +322 val_322 2 +323 val_323 1 +325 val_325 2 +327 val_327 3 +33 val_33 1 +331 val_331 2 +332 val_332 1 +333 val_333 2 +335 val_335 1 +336 val_336 1 +338 val_338 1 +339 val_339 1 +34 val_34 1 +341 val_341 1 +342 val_342 2 +344 val_344 2 +345 val_345 1 +348 val_348 5 +35 val_35 3 +351 val_351 1 +353 val_353 2 +356 val_356 1 +360 val_360 1 +362 val_362 1 +364 val_364 1 +365 val_365 1 +366 val_366 1 +367 val_367 2 +368 val_368 1 +369 val_369 3 +37 val_37 2 +373 val_373 1 +374 val_374 1 +375 val_375 1 +377 val_377 1 +378 val_378 1 +379 val_379 1 +382 val_382 2 +384 val_384 3 +386 val_386 1 +389 val_389 1 +392 val_392 1 +393 val_393 1 +394 val_394 1 +395 val_395 2 +396 val_396 3 +397 val_397 2 +399 val_399 2 +4 val_4 1 +400 val_400 1 +401 val_401 5 +402 val_402 1 +403 val_403 3 +404 val_404 2 +406 val_406 4 +407 val_407 1 +409 val_409 3 +41 val_41 1 +411 val_411 1 +413 val_413 2 +414 val_414 2 +417 val_417 3 +418 val_418 1 +419 val_419 1 +42 val_42 2 +421 val_421 1 +424 val_424 2 +427 val_427 1 +429 val_429 2 +43 val_43 1 +430 val_430 3 +431 val_431 3 +432 val_432 1 +435 val_435 1 +436 val_436 1 +437 val_437 1 +438 val_438 3 +439 val_439 2 +44 val_44 1 +443 val_443 1 +444 val_444 1 +446 val_446 1 +448 val_448 1 +449 val_449 1 +452 val_452 1 +453 val_453 1 +454 val_454 3 +455 val_455 1 +457 val_457 1 +458 val_458 2 +459 val_459 2 +460 val_460 1 +462 val_462 2 +463 val_463 2 +466 val_466 3 +467 val_467 1 +468 val_468 4 +469 val_469 5 +47 val_47 1 +470 val_470 1 +472 val_472 1 +475 val_475 1 +477 val_477 1 +478 val_478 2 +479 val_479 1 +480 val_480 3 +481 val_481 1 +482 val_482 1 +483 val_483 1 +484 val_484 1 +485 val_485 1 +487 val_487 1 +489 val_489 4 +490 val_490 1 +491 val_491 1 +492 val_492 2 +493 val_493 1 +494 val_494 1 +495 val_495 1 +496 val_496 1 +497 val_497 1 +498 val_498 3 +5 val_5 3 +51 val_51 2 +53 val_53 1 +54 val_54 1 +57 val_57 1 +58 val_58 2 +64 val_64 1 +65 val_65 1 +66 val_66 1 +67 val_67 2 +69 val_69 1 +70 val_70 3 +72 val_72 2 +74 val_74 1 +76 val_76 2 +77 val_77 1 +78 val_78 1 +8 val_8 1 +80 val_80 1 +82 val_82 1 +83 val_83 2 +84 val_84 2 +85 val_85 1 +86 val_86 1 +87 val_87 1 +9 val_9 1 +90 val_90 3 +92 val_92 1 +95 val_95 2 +96 val_96 1 +97 val_97 2 +98 val_98 2 diff --git a/ql/src/test/results/clientpositive/char_cast.q.out b/ql/src/test/results/clientpositive/char_cast.q.out index 025fedb..f225ced 100644 --- a/ql/src/test/results/clientpositive/char_cast.q.out +++ b/ql/src/test/results/clientpositive/char_cast.q.out @@ -1,4 +1,6 @@ -PREHOOK: query: -- Cast from char to other data types +PREHOOK: query: -- We need the above setting for backward compatibility because 'date' is a keyword in SQL2011 + +-- Cast from char to other data types select cast(cast('11' as string) as tinyint), cast(cast('11' as string) as smallint), @@ -11,7 +13,9 @@ from src limit 1 PREHOOK: type: QUERY PREHOOK: Input: default@src #### A masked pattern was here #### -POSTHOOK: query: -- Cast from char to other data types +POSTHOOK: query: -- We need the above setting for backward compatibility because 'date' is a keyword in SQL2011 + +-- Cast from char to other data types select cast(cast('11' as string) as tinyint), cast(cast('11' as string) as smallint), diff --git a/ql/src/test/results/clientpositive/date_1.q.out b/ql/src/test/results/clientpositive/date_1.q.out index df9fc47..51fc29c 100644 --- a/ql/src/test/results/clientpositive/date_1.q.out +++ b/ql/src/test/results/clientpositive/date_1.q.out @@ -1,6 +1,10 @@ -PREHOOK: query: drop table date_1 +PREHOOK: query: -- We need the above setting for backward compatibility because 'timestamp' is a keyword in SQL2011 + +drop table date_1 PREHOOK: type: DROPTABLE -POSTHOOK: query: drop table date_1 +POSTHOOK: query: -- We need the above setting for backward compatibility because 'timestamp' is a keyword in SQL2011 + +drop table date_1 POSTHOOK: type: DROPTABLE PREHOOK: query: create table date_1 (d date) PREHOOK: type: CREATETABLE diff --git a/ql/src/test/results/clientpositive/date_udf.q.out b/ql/src/test/results/clientpositive/date_udf.q.out index 9b37da6..99c650a 100644 --- a/ql/src/test/results/clientpositive/date_udf.q.out +++ b/ql/src/test/results/clientpositive/date_udf.q.out @@ -1,6 +1,10 @@ -PREHOOK: query: drop table date_udf +PREHOOK: query: -- We need the above setting for backward compatibility because 'timestamp' is a keyword in SQL2011 + +drop table date_udf PREHOOK: type: DROPTABLE -POSTHOOK: query: drop table date_udf +POSTHOOK: query: -- We need the above setting for backward compatibility because 'timestamp' is a keyword in SQL2011 + +drop table date_udf POSTHOOK: type: DROPTABLE PREHOOK: query: drop table date_udf_string PREHOOK: type: DROPTABLE diff --git a/ql/src/test/results/clientpositive/decimal_10_0.q.out b/ql/src/test/results/clientpositive/decimal_10_0.q.out index ae3426c..ca313dd 100644 --- a/ql/src/test/results/clientpositive/decimal_10_0.q.out +++ b/ql/src/test/results/clientpositive/decimal_10_0.q.out @@ -1,38 +1,38 @@ -PREHOOK: query: DROP TABLE IF EXISTS DECIMAL +PREHOOK: query: DROP TABLE IF EXISTS DECIMAL_TABLE PREHOOK: type: DROPTABLE -POSTHOOK: query: DROP TABLE IF EXISTS DECIMAL +POSTHOOK: query: DROP TABLE IF EXISTS DECIMAL_TABLE POSTHOOK: type: DROPTABLE -PREHOOK: query: CREATE TABLE DECIMAL (dec decimal) +PREHOOK: query: CREATE TABLE DECIMAL_TABLE (dec decimal) PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@DECIMAL -POSTHOOK: query: CREATE TABLE DECIMAL (dec decimal) +PREHOOK: Output: default@DECIMAL_TABLE +POSTHOOK: query: CREATE TABLE DECIMAL_TABLE (dec decimal) POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@DECIMAL -PREHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/decimal_10_0.txt' OVERWRITE INTO TABLE DECIMAL +POSTHOOK: Output: default@DECIMAL_TABLE +PREHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/decimal_10_0.txt' OVERWRITE INTO TABLE DECIMAL_TABLE PREHOOK: type: LOAD #### A masked pattern was here #### -PREHOOK: Output: default@decimal -POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/decimal_10_0.txt' OVERWRITE INTO TABLE DECIMAL +PREHOOK: Output: default@decimal_table +POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/decimal_10_0.txt' OVERWRITE INTO TABLE DECIMAL_TABLE POSTHOOK: type: LOAD #### A masked pattern was here #### -POSTHOOK: Output: default@decimal -PREHOOK: query: SELECT dec FROM DECIMAL +POSTHOOK: Output: default@decimal_table +PREHOOK: query: SELECT dec FROM DECIMAL_TABLE PREHOOK: type: QUERY -PREHOOK: Input: default@decimal +PREHOOK: Input: default@decimal_table #### A masked pattern was here #### -POSTHOOK: query: SELECT dec FROM DECIMAL +POSTHOOK: query: SELECT dec FROM DECIMAL_TABLE POSTHOOK: type: QUERY -POSTHOOK: Input: default@decimal +POSTHOOK: Input: default@decimal_table #### A masked pattern was here #### 1000000000 NULL -PREHOOK: query: DROP TABLE DECIMAL +PREHOOK: query: DROP TABLE DECIMAL_TABLE PREHOOK: type: DROPTABLE -PREHOOK: Input: default@decimal -PREHOOK: Output: default@decimal -POSTHOOK: query: DROP TABLE DECIMAL +PREHOOK: Input: default@decimal_table +PREHOOK: Output: default@decimal_table +POSTHOOK: query: DROP TABLE DECIMAL_TABLE POSTHOOK: type: DROPTABLE -POSTHOOK: Input: default@decimal -POSTHOOK: Output: default@decimal +POSTHOOK: Input: default@decimal_table +POSTHOOK: Output: default@decimal_table diff --git a/ql/src/test/results/clientpositive/keyword_1.q.out b/ql/src/test/results/clientpositive/keyword_1.q.out index 135d8e5..7e80c32 100644 --- a/ql/src/test/results/clientpositive/keyword_1.q.out +++ b/ql/src/test/results/clientpositive/keyword_1.q.out @@ -1,10 +1,12 @@ PREHOOK: query: -- SORT_BEFORE_DIFF +-- We need the above setting for backward compatibility because 'user' is a keyword in SQL2011 create table test_user (user string, `group` string) PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@test_user POSTHOOK: query: -- SORT_BEFORE_DIFF +-- We need the above setting for backward compatibility because 'user' is a keyword in SQL2011 create table test_user (user string, `group` string) POSTHOOK: type: CREATETABLE diff --git a/ql/src/test/results/clientpositive/nonreserved_keywords_input37.q.out b/ql/src/test/results/clientpositive/nonreserved_keywords_input37.q.out deleted file mode 100644 index 819da22..0000000 --- a/ql/src/test/results/clientpositive/nonreserved_keywords_input37.q.out +++ /dev/null @@ -1,40 +0,0 @@ -PREHOOK: query: CREATE TABLE table(string string) STORED AS TEXTFILE -PREHOOK: type: CREATETABLE -PREHOOK: Output: database:default -PREHOOK: Output: default@table -POSTHOOK: query: CREATE TABLE table(string string) STORED AS TEXTFILE -POSTHOOK: type: CREATETABLE -POSTHOOK: Output: database:default -POSTHOOK: Output: default@table -PREHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/docurl.txt' INTO TABLE table -PREHOOK: type: LOAD -#### A masked pattern was here #### -PREHOOK: Output: default@table -POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/docurl.txt' INTO TABLE table -POSTHOOK: type: LOAD -#### A masked pattern was here #### -POSTHOOK: Output: default@table -PREHOOK: query: SELECT table, count(1) -FROM -( - FROM table - SELECT TRANSFORM (table.string) -#### A masked pattern was here #### -) subq -GROUP BY table -PREHOOK: type: QUERY -PREHOOK: Input: default@table -#### A masked pattern was here #### -POSTHOOK: query: SELECT table, count(1) -FROM -( - FROM table - SELECT TRANSFORM (table.string) -#### A masked pattern was here #### -) subq -GROUP BY table -POSTHOOK: type: QUERY -POSTHOOK: Input: default@table -#### A masked pattern was here #### -1uauniajqtunlsvadmxhlxvngxpqjuzbpzvdiwmzphmbaicduzkgxgtdeiunduosu.html 4 -4uzsbtwvdypfitqfqdjosynqp.html 4 diff --git a/ql/src/test/results/clientpositive/nonreserved_keywords_insert_into1.q.out b/ql/src/test/results/clientpositive/nonreserved_keywords_insert_into1.q.out deleted file mode 100644 index 9f075f1..0000000 --- a/ql/src/test/results/clientpositive/nonreserved_keywords_insert_into1.q.out +++ /dev/null @@ -1,281 +0,0 @@ -PREHOOK: query: DROP TABLE insert -PREHOOK: type: DROPTABLE -POSTHOOK: query: DROP TABLE insert -POSTHOOK: type: DROPTABLE -PREHOOK: query: CREATE TABLE insert (key INT, as STRING) -PREHOOK: type: CREATETABLE -PREHOOK: Output: database:default -PREHOOK: Output: default@insert -POSTHOOK: query: CREATE TABLE insert (key INT, as STRING) -POSTHOOK: type: CREATETABLE -POSTHOOK: Output: database:default -POSTHOOK: Output: default@insert -PREHOOK: query: EXPLAIN INSERT INTO TABLE insert SELECT * FROM src LIMIT 100 -PREHOOK: type: QUERY -POSTHOOK: query: EXPLAIN INSERT INTO TABLE insert SELECT * FROM src LIMIT 100 -POSTHOOK: type: QUERY -STAGE DEPENDENCIES: - Stage-1 is a root stage - Stage-0 depends on stages: Stage-1 - Stage-2 depends on stages: Stage-0 - -STAGE PLANS: - Stage: Stage-1 - Map Reduce - Map Operator Tree: - TableScan - alias: src - Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE - Select Operator - expressions: key (type: string), value (type: string) - outputColumnNames: _col0, _col1 - Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE - Limit - Number of rows: 100 - Statistics: Num rows: 100 Data size: 1000 Basic stats: COMPLETE Column stats: NONE - Reduce Output Operator - sort order: - Statistics: Num rows: 100 Data size: 1000 Basic stats: COMPLETE Column stats: NONE - value expressions: _col0 (type: string), _col1 (type: string) - Reduce Operator Tree: - Select Operator - expressions: VALUE._col0 (type: string), VALUE._col1 (type: string) - outputColumnNames: _col0, _col1 - Statistics: Num rows: 100 Data size: 1000 Basic stats: COMPLETE Column stats: NONE - Limit - Number of rows: 100 - Statistics: Num rows: 100 Data size: 1000 Basic stats: COMPLETE Column stats: NONE - Select Operator - expressions: UDFToInteger(_col0) (type: int), _col1 (type: string) - outputColumnNames: _col0, _col1 - Statistics: Num rows: 100 Data size: 1000 Basic stats: COMPLETE Column stats: NONE - File Output Operator - compressed: false - Statistics: Num rows: 100 Data size: 1000 Basic stats: COMPLETE Column stats: NONE - table: - input format: org.apache.hadoop.mapred.TextInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.insert - - Stage: Stage-0 - Move Operator - tables: - replace: false - table: - input format: org.apache.hadoop.mapred.TextInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.insert - - Stage: Stage-2 - Stats-Aggr Operator - -PREHOOK: query: INSERT INTO TABLE insert SELECT * FROM src LIMIT 100 -PREHOOK: type: QUERY -PREHOOK: Input: default@src -PREHOOK: Output: default@insert -POSTHOOK: query: INSERT INTO TABLE insert SELECT * FROM src LIMIT 100 -POSTHOOK: type: QUERY -POSTHOOK: Input: default@src -POSTHOOK: Output: default@insert -POSTHOOK: Lineage: insert.as SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] -POSTHOOK: Lineage: insert.key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] -PREHOOK: query: SELECT SUM(HASH(hash)) FROM ( - SELECT TRANSFORM(*) USING 'tr \t _' AS (hash) FROM insert -) t -PREHOOK: type: QUERY -PREHOOK: Input: default@insert -#### A masked pattern was here #### -POSTHOOK: query: SELECT SUM(HASH(hash)) FROM ( - SELECT TRANSFORM(*) USING 'tr \t _' AS (hash) FROM insert -) t -POSTHOOK: type: QUERY -POSTHOOK: Input: default@insert -#### A masked pattern was here #### -10226524244 -PREHOOK: query: EXPLAIN INSERT INTO TABLE insert SELECT * FROM src LIMIT 100 -PREHOOK: type: QUERY -POSTHOOK: query: EXPLAIN INSERT INTO TABLE insert SELECT * FROM src LIMIT 100 -POSTHOOK: type: QUERY -STAGE DEPENDENCIES: - Stage-1 is a root stage - Stage-0 depends on stages: Stage-1 - Stage-2 depends on stages: Stage-0 - -STAGE PLANS: - Stage: Stage-1 - Map Reduce - Map Operator Tree: - TableScan - alias: src - Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE - Select Operator - expressions: key (type: string), value (type: string) - outputColumnNames: _col0, _col1 - Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE - Limit - Number of rows: 100 - Statistics: Num rows: 100 Data size: 1000 Basic stats: COMPLETE Column stats: NONE - Reduce Output Operator - sort order: - Statistics: Num rows: 100 Data size: 1000 Basic stats: COMPLETE Column stats: NONE - value expressions: _col0 (type: string), _col1 (type: string) - Reduce Operator Tree: - Select Operator - expressions: VALUE._col0 (type: string), VALUE._col1 (type: string) - outputColumnNames: _col0, _col1 - Statistics: Num rows: 100 Data size: 1000 Basic stats: COMPLETE Column stats: NONE - Limit - Number of rows: 100 - Statistics: Num rows: 100 Data size: 1000 Basic stats: COMPLETE Column stats: NONE - Select Operator - expressions: UDFToInteger(_col0) (type: int), _col1 (type: string) - outputColumnNames: _col0, _col1 - Statistics: Num rows: 100 Data size: 1000 Basic stats: COMPLETE Column stats: NONE - File Output Operator - compressed: false - Statistics: Num rows: 100 Data size: 1000 Basic stats: COMPLETE Column stats: NONE - table: - input format: org.apache.hadoop.mapred.TextInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.insert - - Stage: Stage-0 - Move Operator - tables: - replace: false - table: - input format: org.apache.hadoop.mapred.TextInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.insert - - Stage: Stage-2 - Stats-Aggr Operator - -PREHOOK: query: INSERT INTO TABLE insert SELECT * FROM src LIMIT 100 -PREHOOK: type: QUERY -PREHOOK: Input: default@src -PREHOOK: Output: default@insert -POSTHOOK: query: INSERT INTO TABLE insert SELECT * FROM src LIMIT 100 -POSTHOOK: type: QUERY -POSTHOOK: Input: default@src -POSTHOOK: Output: default@insert -POSTHOOK: Lineage: insert.as SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] -POSTHOOK: Lineage: insert.key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] -PREHOOK: query: SELECT SUM(HASH(sum)) FROM ( - SELECT TRANSFORM(*) USING 'tr \t _' AS (sum) FROM insert -) t -PREHOOK: type: QUERY -PREHOOK: Input: default@insert -#### A masked pattern was here #### -POSTHOOK: query: SELECT SUM(HASH(sum)) FROM ( - SELECT TRANSFORM(*) USING 'tr \t _' AS (sum) FROM insert -) t -POSTHOOK: type: QUERY -POSTHOOK: Input: default@insert -#### A masked pattern was here #### -20453048488 -PREHOOK: query: SELECT COUNT(*) FROM insert -PREHOOK: type: QUERY -PREHOOK: Input: default@insert -#### A masked pattern was here #### -POSTHOOK: query: SELECT COUNT(*) FROM insert -POSTHOOK: type: QUERY -POSTHOOK: Input: default@insert -#### A masked pattern was here #### -200 -PREHOOK: query: EXPLAIN INSERT OVERWRITE TABLE insert SELECT * FROM src LIMIT 10 -PREHOOK: type: QUERY -POSTHOOK: query: EXPLAIN INSERT OVERWRITE TABLE insert SELECT * FROM src LIMIT 10 -POSTHOOK: type: QUERY -STAGE DEPENDENCIES: - Stage-1 is a root stage - Stage-0 depends on stages: Stage-1 - Stage-2 depends on stages: Stage-0 - -STAGE PLANS: - Stage: Stage-1 - Map Reduce - Map Operator Tree: - TableScan - alias: src - Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE - Select Operator - expressions: key (type: string), value (type: string) - outputColumnNames: _col0, _col1 - Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE - Limit - Number of rows: 10 - Statistics: Num rows: 10 Data size: 100 Basic stats: COMPLETE Column stats: NONE - Reduce Output Operator - sort order: - Statistics: Num rows: 10 Data size: 100 Basic stats: COMPLETE Column stats: NONE - value expressions: _col0 (type: string), _col1 (type: string) - Reduce Operator Tree: - Select Operator - expressions: VALUE._col0 (type: string), VALUE._col1 (type: string) - outputColumnNames: _col0, _col1 - Statistics: Num rows: 10 Data size: 100 Basic stats: COMPLETE Column stats: NONE - Limit - Number of rows: 10 - Statistics: Num rows: 10 Data size: 100 Basic stats: COMPLETE Column stats: NONE - Select Operator - expressions: UDFToInteger(_col0) (type: int), _col1 (type: string) - outputColumnNames: _col0, _col1 - Statistics: Num rows: 10 Data size: 100 Basic stats: COMPLETE Column stats: NONE - File Output Operator - compressed: false - Statistics: Num rows: 10 Data size: 100 Basic stats: COMPLETE Column stats: NONE - table: - input format: org.apache.hadoop.mapred.TextInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.insert - - Stage: Stage-0 - Move Operator - tables: - replace: true - table: - input format: org.apache.hadoop.mapred.TextInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.insert - - Stage: Stage-2 - Stats-Aggr Operator - -PREHOOK: query: INSERT OVERWRITE TABLE insert SELECT * FROM src LIMIT 10 -PREHOOK: type: QUERY -PREHOOK: Input: default@src -PREHOOK: Output: default@insert -POSTHOOK: query: INSERT OVERWRITE TABLE insert SELECT * FROM src LIMIT 10 -POSTHOOK: type: QUERY -POSTHOOK: Input: default@src -POSTHOOK: Output: default@insert -POSTHOOK: Lineage: insert.as SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] -POSTHOOK: Lineage: insert.key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] -PREHOOK: query: SELECT SUM(HASH(add)) FROM ( - SELECT TRANSFORM(*) USING 'tr \t _' AS (add) FROM insert -) t -PREHOOK: type: QUERY -PREHOOK: Input: default@insert -#### A masked pattern was here #### -POSTHOOK: query: SELECT SUM(HASH(add)) FROM ( - SELECT TRANSFORM(*) USING 'tr \t _' AS (add) FROM insert -) t -POSTHOOK: type: QUERY -POSTHOOK: Input: default@insert -#### A masked pattern was here #### --826625916 -PREHOOK: query: DROP TABLE insert -PREHOOK: type: DROPTABLE -PREHOOK: Input: default@insert -PREHOOK: Output: default@insert -POSTHOOK: query: DROP TABLE insert -POSTHOOK: type: DROPTABLE -POSTHOOK: Input: default@insert -POSTHOOK: Output: default@insert diff --git a/ql/src/test/results/clientpositive/orc_vectorization_ppd.q.out b/ql/src/test/results/clientpositive/orc_vectorization_ppd.q.out index 738abc4..4b958cc 100644 --- a/ql/src/test/results/clientpositive/orc_vectorization_ppd.q.out +++ b/ql/src/test/results/clientpositive/orc_vectorization_ppd.q.out @@ -1,9 +1,13 @@ -PREHOOK: query: -- create table with 1000 rows +PREHOOK: query: -- We need the above setting for backward compatibility because 'int' is a keyword in SQL2011 + +-- create table with 1000 rows create table srcorc(key string, value string) stored as textfile PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@srcorc -POSTHOOK: query: -- create table with 1000 rows +POSTHOOK: query: -- We need the above setting for backward compatibility because 'int' is a keyword in SQL2011 + +-- create table with 1000 rows create table srcorc(key string, value string) stored as textfile POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default diff --git a/ql/src/test/results/clientpositive/ppd_field_garbage.q.out b/ql/src/test/results/clientpositive/ppd_field_garbage.q.out index 86eca5b..1ce7a39 100644 --- a/ql/src/test/results/clientpositive/ppd_field_garbage.q.out +++ b/ql/src/test/results/clientpositive/ppd_field_garbage.q.out @@ -1,9 +1,11 @@ -PREHOOK: query: -- ppd leaves invalid expr in field expr +PREHOOK: query: -- We need the above setting for backward compatibility because 'user' is a keyword in SQL2011 +-- ppd leaves invalid expr in field expr CREATE TABLE test_issue (fileid int, infos ARRAY>, test_c STRUCT>) PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@test_issue -POSTHOOK: query: -- ppd leaves invalid expr in field expr +POSTHOOK: query: -- We need the above setting for backward compatibility because 'user' is a keyword in SQL2011 +-- ppd leaves invalid expr in field expr CREATE TABLE test_issue (fileid int, infos ARRAY>, test_c STRUCT>) POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default diff --git a/ql/src/test/results/clientpositive/serde_regex.q.out b/ql/src/test/results/clientpositive/serde_regex.q.out index 19187ba..d807ab3 100644 --- a/ql/src/test/results/clientpositive/serde_regex.q.out +++ b/ql/src/test/results/clientpositive/serde_regex.q.out @@ -1,4 +1,6 @@ -PREHOOK: query: EXPLAIN +PREHOOK: query: -- We need the above setting for backward compatibility because 'user' is a keyword in SQL2011 + +EXPLAIN CREATE TABLE serde_regex( host STRING, identity STRING, @@ -15,7 +17,9 @@ WITH SERDEPROPERTIES ( ) STORED AS TEXTFILE PREHOOK: type: CREATETABLE -POSTHOOK: query: EXPLAIN +POSTHOOK: query: -- We need the above setting for backward compatibility because 'user' is a keyword in SQL2011 + +EXPLAIN CREATE TABLE serde_regex( host STRING, identity STRING, diff --git a/ql/src/test/results/clientpositive/spark/union_remove_1.q.out b/ql/src/test/results/clientpositive/spark/union_remove_1.q.out index 581d305..7398a77 100644 --- a/ql/src/test/results/clientpositive/spark/union_remove_1.q.out +++ b/ql/src/test/results/clientpositive/spark/union_remove_1.q.out @@ -26,11 +26,11 @@ create table inputTbl1(key string, val string) stored as textfile POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@inputTbl1 -PREHOOK: query: create table outputTbl1(key string, values bigint) stored as textfile +PREHOOK: query: create table outputTbl1(key string, vals bigint) stored as textfile PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@outputTbl1 -POSTHOOK: query: create table outputTbl1(key string, values bigint) stored as textfile +POSTHOOK: query: create table outputTbl1(key string, vals bigint) stored as textfile POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@outputTbl1 @@ -46,18 +46,18 @@ PREHOOK: query: explain insert overwrite table outputTbl1 SELECT * FROM ( - SELECT key, count(1) as values from inputTbl1 group by key + SELECT key, count(1) as vals from inputTbl1 group by key UNION ALL - SELECT key, count(1) as values from inputTbl1 group by key + SELECT key, count(1) as vals from inputTbl1 group by key ) a PREHOOK: type: QUERY POSTHOOK: query: explain insert overwrite table outputTbl1 SELECT * FROM ( - SELECT key, count(1) as values from inputTbl1 group by key + SELECT key, count(1) as vals from inputTbl1 group by key UNION ALL - SELECT key, count(1) as values from inputTbl1 group by key + SELECT key, count(1) as vals from inputTbl1 group by key ) a POSTHOOK: type: QUERY STAGE DEPENDENCIES: @@ -156,9 +156,9 @@ STAGE PLANS: PREHOOK: query: insert overwrite table outputTbl1 SELECT * FROM ( - SELECT key, count(1) as values from inputTbl1 group by key + SELECT key, count(1) as vals from inputTbl1 group by key UNION ALL - SELECT key, count(1) as values from inputTbl1 group by key + SELECT key, count(1) as vals from inputTbl1 group by key ) a PREHOOK: type: QUERY PREHOOK: Input: default@inputtbl1 @@ -166,15 +166,15 @@ PREHOOK: Output: default@outputtbl1 POSTHOOK: query: insert overwrite table outputTbl1 SELECT * FROM ( - SELECT key, count(1) as values from inputTbl1 group by key + SELECT key, count(1) as vals from inputTbl1 group by key UNION ALL - SELECT key, count(1) as values from inputTbl1 group by key + SELECT key, count(1) as vals from inputTbl1 group by key ) a POSTHOOK: type: QUERY POSTHOOK: Input: default@inputtbl1 POSTHOOK: Output: default@outputtbl1 POSTHOOK: Lineage: outputtbl1.key EXPRESSION [(inputtbl1)inputtbl1.FieldSchema(name:key, type:string, comment:null), (inputtbl1)inputtbl1.FieldSchema(name:key, type:string, comment:null), ] -POSTHOOK: Lineage: outputtbl1.values EXPRESSION [(inputtbl1)inputtbl1.null, (inputtbl1)inputtbl1.null, ] +POSTHOOK: Lineage: outputtbl1.vals EXPRESSION [(inputtbl1)inputtbl1.null, (inputtbl1)inputtbl1.null, ] PREHOOK: query: desc formatted outputTbl1 PREHOOK: type: DESCTABLE PREHOOK: Input: default@outputtbl1 @@ -184,7 +184,7 @@ POSTHOOK: Input: default@outputtbl1 # col_name data_type comment key string -values bigint +vals bigint # Detailed Table Information Database: default @@ -211,11 +211,11 @@ Bucket Columns: [] Sort Columns: [] Storage Desc Params: serialization.format 1 -PREHOOK: query: select * from outputTbl1 order by key, values +PREHOOK: query: select * from outputTbl1 order by key, vals PREHOOK: type: QUERY PREHOOK: Input: default@outputtbl1 #### A masked pattern was here #### -POSTHOOK: query: select * from outputTbl1 order by key, values +POSTHOOK: query: select * from outputTbl1 order by key, vals POSTHOOK: type: QUERY POSTHOOK: Input: default@outputtbl1 #### A masked pattern was here #### diff --git a/ql/src/test/results/clientpositive/spark/union_remove_10.q.out b/ql/src/test/results/clientpositive/spark/union_remove_10.q.out index 629f7ba..31ea581 100644 --- a/ql/src/test/results/clientpositive/spark/union_remove_10.q.out +++ b/ql/src/test/results/clientpositive/spark/union_remove_10.q.out @@ -34,11 +34,11 @@ create table inputTbl1(key string, val string) stored as textfile POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@inputTbl1 -PREHOOK: query: create table outputTbl1(key string, values bigint) stored as rcfile +PREHOOK: query: create table outputTbl1(key string, vals bigint) stored as rcfile PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@outputTbl1 -POSTHOOK: query: create table outputTbl1(key string, values bigint) stored as rcfile +POSTHOOK: query: create table outputTbl1(key string, vals bigint) stored as rcfile POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@outputTbl1 @@ -54,12 +54,12 @@ PREHOOK: query: explain insert overwrite table outputTbl1 SELECT * FROM ( -select key, 1 as values from inputTbl1 +select key, 1 as vals from inputTbl1 union all select * FROM ( - SELECT key, count(1) as values from inputTbl1 group by key + SELECT key, count(1) as vals from inputTbl1 group by key UNION ALL - SELECT key, 2 as values from inputTbl1 + SELECT key, 2 as vals from inputTbl1 ) a )b PREHOOK: type: QUERY @@ -67,12 +67,12 @@ POSTHOOK: query: explain insert overwrite table outputTbl1 SELECT * FROM ( -select key, 1 as values from inputTbl1 +select key, 1 as vals from inputTbl1 union all select * FROM ( - SELECT key, count(1) as values from inputTbl1 group by key + SELECT key, count(1) as vals from inputTbl1 group by key UNION ALL - SELECT key, 2 as values from inputTbl1 + SELECT key, 2 as vals from inputTbl1 ) a )b POSTHOOK: type: QUERY @@ -206,12 +206,12 @@ STAGE PLANS: PREHOOK: query: insert overwrite table outputTbl1 SELECT * FROM ( -select key, 1 as values from inputTbl1 +select key, 1 as vals from inputTbl1 union all select * FROM ( - SELECT key, count(1) as values from inputTbl1 group by key + SELECT key, count(1) as vals from inputTbl1 group by key UNION ALL - SELECT key, 2 as values from inputTbl1 + SELECT key, 2 as vals from inputTbl1 ) a )b PREHOOK: type: QUERY @@ -220,19 +220,19 @@ PREHOOK: Output: default@outputtbl1 POSTHOOK: query: insert overwrite table outputTbl1 SELECT * FROM ( -select key, 1 as values from inputTbl1 +select key, 1 as vals from inputTbl1 union all select * FROM ( - SELECT key, count(1) as values from inputTbl1 group by key + SELECT key, count(1) as vals from inputTbl1 group by key UNION ALL - SELECT key, 2 as values from inputTbl1 + SELECT key, 2 as vals from inputTbl1 ) a )b POSTHOOK: type: QUERY POSTHOOK: Input: default@inputtbl1 POSTHOOK: Output: default@outputtbl1 POSTHOOK: Lineage: outputtbl1.key EXPRESSION [(inputtbl1)inputtbl1.FieldSchema(name:key, type:string, comment:null), (inputtbl1)inputtbl1.FieldSchema(name:key, type:string, comment:null), (inputtbl1)inputtbl1.FieldSchema(name:key, type:string, comment:null), ] -POSTHOOK: Lineage: outputtbl1.values EXPRESSION [(inputtbl1)inputtbl1.null, ] +POSTHOOK: Lineage: outputtbl1.vals EXPRESSION [(inputtbl1)inputtbl1.null, ] PREHOOK: query: desc formatted outputTbl1 PREHOOK: type: DESCTABLE PREHOOK: Input: default@outputtbl1 @@ -242,7 +242,7 @@ POSTHOOK: Input: default@outputtbl1 # col_name data_type comment key string -values bigint +vals bigint # Detailed Table Information Database: default @@ -269,11 +269,11 @@ Bucket Columns: [] Sort Columns: [] Storage Desc Params: serialization.format 1 -PREHOOK: query: select * from outputTbl1 order by key, values +PREHOOK: query: select * from outputTbl1 order by key, vals PREHOOK: type: QUERY PREHOOK: Input: default@outputtbl1 #### A masked pattern was here #### -POSTHOOK: query: select * from outputTbl1 order by key, values +POSTHOOK: query: select * from outputTbl1 order by key, vals POSTHOOK: type: QUERY POSTHOOK: Input: default@outputtbl1 #### A masked pattern was here #### diff --git a/ql/src/test/results/clientpositive/spark/union_remove_11.q.out b/ql/src/test/results/clientpositive/spark/union_remove_11.q.out index 257d2ff..e2fdecf 100644 --- a/ql/src/test/results/clientpositive/spark/union_remove_11.q.out +++ b/ql/src/test/results/clientpositive/spark/union_remove_11.q.out @@ -34,11 +34,11 @@ create table inputTbl1(key string, val string) stored as textfile POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@inputTbl1 -PREHOOK: query: create table outputTbl1(key string, values bigint) stored as rcfile +PREHOOK: query: create table outputTbl1(key string, vals bigint) stored as rcfile PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@outputTbl1 -POSTHOOK: query: create table outputTbl1(key string, values bigint) stored as rcfile +POSTHOOK: query: create table outputTbl1(key string, vals bigint) stored as rcfile POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@outputTbl1 @@ -54,12 +54,12 @@ PREHOOK: query: explain insert overwrite table outputTbl1 SELECT * FROM ( -select key, 1 as values from inputTbl1 +select key, 1 as vals from inputTbl1 union all select * FROM ( - SELECT key, 2 values from inputTbl1 + SELECT key, 2 vals from inputTbl1 UNION ALL - SELECT key, 3 as values from inputTbl1 + SELECT key, 3 as vals from inputTbl1 ) a )b PREHOOK: type: QUERY @@ -67,12 +67,12 @@ POSTHOOK: query: explain insert overwrite table outputTbl1 SELECT * FROM ( -select key, 1 as values from inputTbl1 +select key, 1 as vals from inputTbl1 union all select * FROM ( - SELECT key, 2 values from inputTbl1 + SELECT key, 2 vals from inputTbl1 UNION ALL - SELECT key, 3 as values from inputTbl1 + SELECT key, 3 as vals from inputTbl1 ) a )b POSTHOOK: type: QUERY @@ -192,12 +192,12 @@ STAGE PLANS: PREHOOK: query: insert overwrite table outputTbl1 SELECT * FROM ( -select key, 1 as values from inputTbl1 +select key, 1 as vals from inputTbl1 union all select * FROM ( - SELECT key, 2 as values from inputTbl1 + SELECT key, 2 as vals from inputTbl1 UNION ALL - SELECT key, 3 as values from inputTbl1 + SELECT key, 3 as vals from inputTbl1 ) a )b PREHOOK: type: QUERY @@ -206,19 +206,19 @@ PREHOOK: Output: default@outputtbl1 POSTHOOK: query: insert overwrite table outputTbl1 SELECT * FROM ( -select key, 1 as values from inputTbl1 +select key, 1 as vals from inputTbl1 union all select * FROM ( - SELECT key, 2 as values from inputTbl1 + SELECT key, 2 as vals from inputTbl1 UNION ALL - SELECT key, 3 as values from inputTbl1 + SELECT key, 3 as vals from inputTbl1 ) a )b POSTHOOK: type: QUERY POSTHOOK: Input: default@inputtbl1 POSTHOOK: Output: default@outputtbl1 POSTHOOK: Lineage: outputtbl1.key EXPRESSION [(inputtbl1)inputtbl1.FieldSchema(name:key, type:string, comment:null), (inputtbl1)inputtbl1.FieldSchema(name:key, type:string, comment:null), (inputtbl1)inputtbl1.FieldSchema(name:key, type:string, comment:null), ] -POSTHOOK: Lineage: outputtbl1.values EXPRESSION [] +POSTHOOK: Lineage: outputtbl1.vals EXPRESSION [] PREHOOK: query: desc formatted outputTbl1 PREHOOK: type: DESCTABLE PREHOOK: Input: default@outputtbl1 @@ -228,7 +228,7 @@ POSTHOOK: Input: default@outputtbl1 # col_name data_type comment key string -values bigint +vals bigint # Detailed Table Information Database: default @@ -255,11 +255,11 @@ Bucket Columns: [] Sort Columns: [] Storage Desc Params: serialization.format 1 -PREHOOK: query: select * from outputTbl1 order by key, values +PREHOOK: query: select * from outputTbl1 order by key, vals PREHOOK: type: QUERY PREHOOK: Input: default@outputtbl1 #### A masked pattern was here #### -POSTHOOK: query: select * from outputTbl1 order by key, values +POSTHOOK: query: select * from outputTbl1 order by key, vals POSTHOOK: type: QUERY POSTHOOK: Input: default@outputtbl1 #### A masked pattern was here #### diff --git a/ql/src/test/results/clientpositive/spark/union_remove_12.q.out b/ql/src/test/results/clientpositive/spark/union_remove_12.q.out new file mode 100644 index 0000000..9fb5a20 --- /dev/null +++ b/ql/src/test/results/clientpositive/spark/union_remove_12.q.out @@ -0,0 +1,281 @@ +PREHOOK: query: -- This is to test the union->selectstar->filesink optimization +-- Union of 2 subqueries is performed (one of which is a map-only query, and the +-- other one is a map-join query), followed by select star and a file sink. +-- The union optimization is applied, and the union is removed. + +-- INCLUDE_HADOOP_MAJOR_VERSIONS(0.23) +-- Since this test creates sub-directories for the output table outputTbl1, it might be easier +-- to run the test only on hadoop 23 + +-- The final file format is different from the input and intermediate file format. +-- It does not matter, whether the output is merged or not. In this case, merging is turned +-- on + +create table inputTbl1(key string, val string) stored as textfile +PREHOOK: type: CREATETABLE +PREHOOK: Output: database:default +PREHOOK: Output: default@inputTbl1 +POSTHOOK: query: -- This is to test the union->selectstar->filesink optimization +-- Union of 2 subqueries is performed (one of which is a map-only query, and the +-- other one is a map-join query), followed by select star and a file sink. +-- The union optimization is applied, and the union is removed. + +-- INCLUDE_HADOOP_MAJOR_VERSIONS(0.23) +-- Since this test creates sub-directories for the output table outputTbl1, it might be easier +-- to run the test only on hadoop 23 + +-- The final file format is different from the input and intermediate file format. +-- It does not matter, whether the output is merged or not. In this case, merging is turned +-- on + +create table inputTbl1(key string, val string) stored as textfile +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: database:default +POSTHOOK: Output: default@inputTbl1 +PREHOOK: query: create table outputTbl1(key string, vals bigint) stored as rcfile +PREHOOK: type: CREATETABLE +PREHOOK: Output: database:default +PREHOOK: Output: default@outputTbl1 +POSTHOOK: query: create table outputTbl1(key string, vals bigint) stored as rcfile +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: database:default +POSTHOOK: Output: default@outputTbl1 +PREHOOK: query: load data local inpath '../../data/files/T1.txt' into table inputTbl1 +PREHOOK: type: LOAD +#### A masked pattern was here #### +PREHOOK: Output: default@inputtbl1 +POSTHOOK: query: load data local inpath '../../data/files/T1.txt' into table inputTbl1 +POSTHOOK: type: LOAD +#### A masked pattern was here #### +POSTHOOK: Output: default@inputtbl1 +PREHOOK: query: explain +insert overwrite table outputTbl1 +SELECT * FROM +( +select key, 1 as vals from inputTbl1 +union all +select a.key as key, b.val as vals +FROM inputTbl1 a join inputTbl1 b on a.key=b.key +)c +PREHOOK: type: QUERY +POSTHOOK: query: explain +insert overwrite table outputTbl1 +SELECT * FROM +( +select key, 1 as vals from inputTbl1 +union all +select a.key as key, b.val as vals +FROM inputTbl1 a join inputTbl1 b on a.key=b.key +)c +POSTHOOK: type: QUERY +STAGE DEPENDENCIES: + Stage-7 is a root stage + Stage-1 depends on stages: Stage-7 + Stage-6 depends on stages: Stage-1 , consists of Stage-3, Stage-2, Stage-4 + Stage-3 + Stage-0 depends on stages: Stage-3, Stage-2, Stage-5 + Stage-2 + Stage-4 + Stage-5 depends on stages: Stage-4 + +STAGE PLANS: + Stage: Stage-7 + Spark +#### A masked pattern was here #### + Vertices: + Map 3 + Map Operator Tree: + TableScan + alias: b + Statistics: Num rows: 0 Data size: 30 Basic stats: PARTIAL Column stats: NONE + Filter Operator + predicate: key is not null (type: boolean) + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE + Spark HashTable Sink Operator + keys: + 0 key (type: string) + 1 key (type: string) + Local Work: + Map Reduce Local Work + + Stage: Stage-1 + Spark +#### A masked pattern was here #### + Vertices: + Map 1 + Map Operator Tree: + TableScan + alias: inputtbl1 + Select Operator + expressions: key (type: string), UDFToString(1) (type: string) + outputColumnNames: _col0, _col1 + Select Operator + expressions: _col0 (type: string), UDFToLong(_col1) (type: bigint) + outputColumnNames: _col0, _col1 + File Output Operator + compressed: false + table: + input format: org.apache.hadoop.hive.ql.io.RCFileInputFormat + output format: org.apache.hadoop.hive.ql.io.RCFileOutputFormat + serde: org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe + name: default.outputtbl1 + Map 2 + Map Operator Tree: + TableScan + alias: a + Filter Operator + predicate: key is not null (type: boolean) + Map Join Operator + condition map: + Inner Join 0 to 1 + keys: + 0 key (type: string) + 1 key (type: string) + outputColumnNames: _col0, _col6 + input vertices: + 1 Map 3 + Select Operator + expressions: _col0 (type: string), _col6 (type: string) + outputColumnNames: _col0, _col1 + Select Operator + expressions: _col0 (type: string), UDFToLong(_col1) (type: bigint) + outputColumnNames: _col0, _col1 + File Output Operator + compressed: false + table: + input format: org.apache.hadoop.hive.ql.io.RCFileInputFormat + output format: org.apache.hadoop.hive.ql.io.RCFileOutputFormat + serde: org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe + name: default.outputtbl1 + Local Work: + Map Reduce Local Work + + Stage: Stage-6 + Conditional Operator + + Stage: Stage-3 + Move Operator + files: + hdfs directory: true +#### A masked pattern was here #### + + Stage: Stage-0 + Move Operator + tables: + replace: true + table: + input format: org.apache.hadoop.hive.ql.io.RCFileInputFormat + output format: org.apache.hadoop.hive.ql.io.RCFileOutputFormat + serde: org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe + name: default.outputtbl1 + + Stage: Stage-2 + Spark +#### A masked pattern was here #### + Vertices: + Spark Merge File Work + Merge File Operator + Map Operator Tree: + RCFile Merge Operator + merge level: block + input format: org.apache.hadoop.hive.ql.io.RCFileInputFormat + + Stage: Stage-4 + Spark +#### A masked pattern was here #### + Vertices: + Spark Merge File Work + Merge File Operator + Map Operator Tree: + RCFile Merge Operator + merge level: block + input format: org.apache.hadoop.hive.ql.io.RCFileInputFormat + + Stage: Stage-5 + Move Operator + files: + hdfs directory: true +#### A masked pattern was here #### + +PREHOOK: query: insert overwrite table outputTbl1 +SELECT * FROM +( +select key, 1 as vals from inputTbl1 +union all +select a.key as key, b.val as vals +FROM inputTbl1 a join inputTbl1 b on a.key=b.key +)c +PREHOOK: type: QUERY +PREHOOK: Input: default@inputtbl1 +PREHOOK: Output: default@outputtbl1 +POSTHOOK: query: insert overwrite table outputTbl1 +SELECT * FROM +( +select key, 1 as vals from inputTbl1 +union all +select a.key as key, b.val as vals +FROM inputTbl1 a join inputTbl1 b on a.key=b.key +)c +POSTHOOK: type: QUERY +POSTHOOK: Input: default@inputtbl1 +POSTHOOK: Output: default@outputtbl1 +POSTHOOK: Lineage: outputtbl1.key EXPRESSION [(inputtbl1)inputtbl1.FieldSchema(name:key, type:string, comment:null), (inputtbl1)a.FieldSchema(name:key, type:string, comment:null), ] +POSTHOOK: Lineage: outputtbl1.vals EXPRESSION [(inputtbl1)b.FieldSchema(name:val, type:string, comment:null), ] +PREHOOK: query: desc formatted outputTbl1 +PREHOOK: type: DESCTABLE +PREHOOK: Input: default@outputtbl1 +POSTHOOK: query: desc formatted outputTbl1 +POSTHOOK: type: DESCTABLE +POSTHOOK: Input: default@outputtbl1 +# col_name data_type comment + +key string +vals bigint + +# Detailed Table Information +Database: default +#### A masked pattern was here #### +Protect Mode: None +Retention: 0 +#### A masked pattern was here #### +Table Type: MANAGED_TABLE +Table Parameters: + COLUMN_STATS_ACCURATE false + numFiles 2 + numRows -1 + rawDataSize -1 + totalSize 194 +#### A masked pattern was here #### + +# Storage Information +SerDe Library: org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe +InputFormat: org.apache.hadoop.hive.ql.io.RCFileInputFormat +OutputFormat: org.apache.hadoop.hive.ql.io.RCFileOutputFormat +Compressed: No +Num Buckets: -1 +Bucket Columns: [] +Sort Columns: [] +Storage Desc Params: + serialization.format 1 +PREHOOK: query: select * from outputTbl1 order by key, vals +PREHOOK: type: QUERY +PREHOOK: Input: default@outputtbl1 +#### A masked pattern was here #### +POSTHOOK: query: select * from outputTbl1 order by key, vals +POSTHOOK: type: QUERY +POSTHOOK: Input: default@outputtbl1 +#### A masked pattern was here #### +1 1 +1 11 +2 1 +2 12 +3 1 +3 13 +7 1 +7 17 +8 1 +8 1 +8 18 +8 18 +8 28 +8 28 diff --git a/ql/src/test/results/clientpositive/spark/union_remove_13.q.out b/ql/src/test/results/clientpositive/spark/union_remove_13.q.out new file mode 100644 index 0000000..a21d91d --- /dev/null +++ b/ql/src/test/results/clientpositive/spark/union_remove_13.q.out @@ -0,0 +1,306 @@ +PREHOOK: query: -- This is to test the union->selectstar->filesink optimization +-- Union of 2 subqueries is performed (one of which is a mapred query, and the +-- other one is a map-join query), followed by select star and a file sink. +-- The union selectstar optimization should be performed, and the union should be removed. + +-- INCLUDE_HADOOP_MAJOR_VERSIONS(0.23) +-- Since this test creates sub-directories for the output table outputTbl1, it might be easier +-- to run the test only on hadoop 23 + +-- The final file format is different from the input and intermediate file format. +-- It does not matter, whether the output is merged or not. In this case, merging is turned +-- on + +create table inputTbl1(key string, val string) stored as textfile +PREHOOK: type: CREATETABLE +PREHOOK: Output: database:default +PREHOOK: Output: default@inputTbl1 +POSTHOOK: query: -- This is to test the union->selectstar->filesink optimization +-- Union of 2 subqueries is performed (one of which is a mapred query, and the +-- other one is a map-join query), followed by select star and a file sink. +-- The union selectstar optimization should be performed, and the union should be removed. + +-- INCLUDE_HADOOP_MAJOR_VERSIONS(0.23) +-- Since this test creates sub-directories for the output table outputTbl1, it might be easier +-- to run the test only on hadoop 23 + +-- The final file format is different from the input and intermediate file format. +-- It does not matter, whether the output is merged or not. In this case, merging is turned +-- on + +create table inputTbl1(key string, val string) stored as textfile +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: database:default +POSTHOOK: Output: default@inputTbl1 +PREHOOK: query: create table outputTbl1(key string, vals bigint) stored as rcfile +PREHOOK: type: CREATETABLE +PREHOOK: Output: database:default +PREHOOK: Output: default@outputTbl1 +POSTHOOK: query: create table outputTbl1(key string, vals bigint) stored as rcfile +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: database:default +POSTHOOK: Output: default@outputTbl1 +PREHOOK: query: load data local inpath '../../data/files/T1.txt' into table inputTbl1 +PREHOOK: type: LOAD +#### A masked pattern was here #### +PREHOOK: Output: default@inputtbl1 +POSTHOOK: query: load data local inpath '../../data/files/T1.txt' into table inputTbl1 +POSTHOOK: type: LOAD +#### A masked pattern was here #### +POSTHOOK: Output: default@inputtbl1 +PREHOOK: query: explain +insert overwrite table outputTbl1 +SELECT * FROM +( +select key, count(1) as vals from inputTbl1 group by key +union all +select a.key as key, b.val as vals +FROM inputTbl1 a join inputTbl1 b on a.key=b.key +)c +PREHOOK: type: QUERY +POSTHOOK: query: explain +insert overwrite table outputTbl1 +SELECT * FROM +( +select key, count(1) as vals from inputTbl1 group by key +union all +select a.key as key, b.val as vals +FROM inputTbl1 a join inputTbl1 b on a.key=b.key +)c +POSTHOOK: type: QUERY +STAGE DEPENDENCIES: + Stage-7 is a root stage + Stage-1 depends on stages: Stage-7 + Stage-6 depends on stages: Stage-1 , consists of Stage-3, Stage-2, Stage-4 + Stage-3 + Stage-0 depends on stages: Stage-3, Stage-2, Stage-5 + Stage-2 + Stage-4 + Stage-5 depends on stages: Stage-4 + +STAGE PLANS: + Stage: Stage-7 + Spark +#### A masked pattern was here #### + Vertices: + Map 4 + Map Operator Tree: + TableScan + alias: b + Statistics: Num rows: 0 Data size: 30 Basic stats: PARTIAL Column stats: NONE + Filter Operator + predicate: key is not null (type: boolean) + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE + Spark HashTable Sink Operator + keys: + 0 key (type: string) + 1 key (type: string) + Local Work: + Map Reduce Local Work + + Stage: Stage-1 + Spark + Edges: + Reducer 2 <- Map 1 (GROUP, 2) +#### A masked pattern was here #### + Vertices: + Map 1 + Map Operator Tree: + TableScan + alias: inputtbl1 + Statistics: Num rows: 0 Data size: 30 Basic stats: PARTIAL Column stats: NONE + Select Operator + expressions: key (type: string) + outputColumnNames: key + Statistics: Num rows: 0 Data size: 30 Basic stats: PARTIAL Column stats: NONE + Group By Operator + aggregations: count(1) + keys: key (type: string) + mode: hash + outputColumnNames: _col0, _col1 + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE + Reduce Output Operator + key expressions: _col0 (type: string) + sort order: + + Map-reduce partition columns: _col0 (type: string) + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE + value expressions: _col1 (type: bigint) + Map 3 + Map Operator Tree: + TableScan + alias: a + Filter Operator + predicate: key is not null (type: boolean) + Map Join Operator + condition map: + Inner Join 0 to 1 + keys: + 0 key (type: string) + 1 key (type: string) + outputColumnNames: _col0, _col6 + input vertices: + 1 Map 4 + Select Operator + expressions: _col0 (type: string), _col6 (type: string) + outputColumnNames: _col0, _col1 + Select Operator + expressions: _col0 (type: string), UDFToLong(_col1) (type: bigint) + outputColumnNames: _col0, _col1 + File Output Operator + compressed: false + table: + input format: org.apache.hadoop.hive.ql.io.RCFileInputFormat + output format: org.apache.hadoop.hive.ql.io.RCFileOutputFormat + serde: org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe + name: default.outputtbl1 + Local Work: + Map Reduce Local Work + Reducer 2 + Reduce Operator Tree: + Group By Operator + aggregations: count(VALUE._col0) + keys: KEY._col0 (type: string) + mode: mergepartial + outputColumnNames: _col0, _col1 + Select Operator + expressions: _col0 (type: string), UDFToString(_col1) (type: string) + outputColumnNames: _col0, _col1 + Select Operator + expressions: _col0 (type: string), UDFToLong(_col1) (type: bigint) + outputColumnNames: _col0, _col1 + File Output Operator + compressed: false + table: + input format: org.apache.hadoop.hive.ql.io.RCFileInputFormat + output format: org.apache.hadoop.hive.ql.io.RCFileOutputFormat + serde: org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe + name: default.outputtbl1 + + Stage: Stage-6 + Conditional Operator + + Stage: Stage-3 + Move Operator + files: + hdfs directory: true +#### A masked pattern was here #### + + Stage: Stage-0 + Move Operator + tables: + replace: true + table: + input format: org.apache.hadoop.hive.ql.io.RCFileInputFormat + output format: org.apache.hadoop.hive.ql.io.RCFileOutputFormat + serde: org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe + name: default.outputtbl1 + + Stage: Stage-2 + Spark +#### A masked pattern was here #### + Vertices: + Spark Merge File Work + Merge File Operator + Map Operator Tree: + RCFile Merge Operator + merge level: block + input format: org.apache.hadoop.hive.ql.io.RCFileInputFormat + + Stage: Stage-4 + Spark +#### A masked pattern was here #### + Vertices: + Spark Merge File Work + Merge File Operator + Map Operator Tree: + RCFile Merge Operator + merge level: block + input format: org.apache.hadoop.hive.ql.io.RCFileInputFormat + + Stage: Stage-5 + Move Operator + files: + hdfs directory: true +#### A masked pattern was here #### + +PREHOOK: query: insert overwrite table outputTbl1 +SELECT * FROM +( +select key, count(1) as vals from inputTbl1 group by key +union all +select a.key as key, b.val as vals +FROM inputTbl1 a join inputTbl1 b on a.key=b.key +)c +PREHOOK: type: QUERY +PREHOOK: Input: default@inputtbl1 +PREHOOK: Output: default@outputtbl1 +POSTHOOK: query: insert overwrite table outputTbl1 +SELECT * FROM +( +select key, count(1) as vals from inputTbl1 group by key +union all +select a.key as key, b.val as vals +FROM inputTbl1 a join inputTbl1 b on a.key=b.key +)c +POSTHOOK: type: QUERY +POSTHOOK: Input: default@inputtbl1 +POSTHOOK: Output: default@outputtbl1 +POSTHOOK: Lineage: outputtbl1.key EXPRESSION [(inputtbl1)inputtbl1.FieldSchema(name:key, type:string, comment:null), (inputtbl1)a.FieldSchema(name:key, type:string, comment:null), ] +POSTHOOK: Lineage: outputtbl1.vals EXPRESSION [(inputtbl1)inputtbl1.null, (inputtbl1)b.FieldSchema(name:val, type:string, comment:null), ] +PREHOOK: query: desc formatted outputTbl1 +PREHOOK: type: DESCTABLE +PREHOOK: Input: default@outputtbl1 +POSTHOOK: query: desc formatted outputTbl1 +POSTHOOK: type: DESCTABLE +POSTHOOK: Input: default@outputtbl1 +# col_name data_type comment + +key string +vals bigint + +# Detailed Table Information +Database: default +#### A masked pattern was here #### +Protect Mode: None +Retention: 0 +#### A masked pattern was here #### +Table Type: MANAGED_TABLE +Table Parameters: + COLUMN_STATS_ACCURATE false + numFiles 3 + numRows -1 + rawDataSize -1 + totalSize 271 +#### A masked pattern was here #### + +# Storage Information +SerDe Library: org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe +InputFormat: org.apache.hadoop.hive.ql.io.RCFileInputFormat +OutputFormat: org.apache.hadoop.hive.ql.io.RCFileOutputFormat +Compressed: No +Num Buckets: -1 +Bucket Columns: [] +Sort Columns: [] +Storage Desc Params: + serialization.format 1 +PREHOOK: query: select * from outputTbl1 order by key, vals +PREHOOK: type: QUERY +PREHOOK: Input: default@outputtbl1 +#### A masked pattern was here #### +POSTHOOK: query: select * from outputTbl1 order by key, vals +POSTHOOK: type: QUERY +POSTHOOK: Input: default@outputtbl1 +#### A masked pattern was here #### +1 1 +1 11 +2 1 +2 12 +3 1 +3 13 +7 1 +7 17 +8 2 +8 18 +8 18 +8 28 +8 28 diff --git a/ql/src/test/results/clientpositive/spark/union_remove_14.q.out b/ql/src/test/results/clientpositive/spark/union_remove_14.q.out new file mode 100644 index 0000000..130b221 --- /dev/null +++ b/ql/src/test/results/clientpositive/spark/union_remove_14.q.out @@ -0,0 +1,283 @@ +PREHOOK: query: -- This is to test the union->selectstar->filesink optimization +-- Union of 2 subqueries is performed (one of which is a map-only query, and the +-- other one contains a join, which should be performed as a map-join query at runtime), +-- followed by select star and a file sink. +-- The union selectstar optimization should be performed, and the union should be removed. + +-- INCLUDE_HADOOP_MAJOR_VERSIONS(0.23) +-- Since this test creates sub-directories for the output table outputTbl1, it might be easier +-- to run the test only on hadoop 23 + +-- The final file format is different from the input and intermediate file format. +-- It does not matter, whether the output is merged or not. In this case, merging is turned +-- on + +create table inputTbl1(key string, val string) stored as textfile +PREHOOK: type: CREATETABLE +PREHOOK: Output: database:default +PREHOOK: Output: default@inputTbl1 +POSTHOOK: query: -- This is to test the union->selectstar->filesink optimization +-- Union of 2 subqueries is performed (one of which is a map-only query, and the +-- other one contains a join, which should be performed as a map-join query at runtime), +-- followed by select star and a file sink. +-- The union selectstar optimization should be performed, and the union should be removed. + +-- INCLUDE_HADOOP_MAJOR_VERSIONS(0.23) +-- Since this test creates sub-directories for the output table outputTbl1, it might be easier +-- to run the test only on hadoop 23 + +-- The final file format is different from the input and intermediate file format. +-- It does not matter, whether the output is merged or not. In this case, merging is turned +-- on + +create table inputTbl1(key string, val string) stored as textfile +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: database:default +POSTHOOK: Output: default@inputTbl1 +PREHOOK: query: create table outputTbl1(key string, vals bigint) stored as rcfile +PREHOOK: type: CREATETABLE +PREHOOK: Output: database:default +PREHOOK: Output: default@outputTbl1 +POSTHOOK: query: create table outputTbl1(key string, vals bigint) stored as rcfile +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: database:default +POSTHOOK: Output: default@outputTbl1 +PREHOOK: query: load data local inpath '../../data/files/T1.txt' into table inputTbl1 +PREHOOK: type: LOAD +#### A masked pattern was here #### +PREHOOK: Output: default@inputtbl1 +POSTHOOK: query: load data local inpath '../../data/files/T1.txt' into table inputTbl1 +POSTHOOK: type: LOAD +#### A masked pattern was here #### +POSTHOOK: Output: default@inputtbl1 +PREHOOK: query: explain +insert overwrite table outputTbl1 +SELECT * FROM +( +select key, 1 as vals from inputTbl1 +union all +select a.key as key, b.val as vals +FROM inputTbl1 a join inputTbl1 b on a.key=b.key +)c +PREHOOK: type: QUERY +POSTHOOK: query: explain +insert overwrite table outputTbl1 +SELECT * FROM +( +select key, 1 as vals from inputTbl1 +union all +select a.key as key, b.val as vals +FROM inputTbl1 a join inputTbl1 b on a.key=b.key +)c +POSTHOOK: type: QUERY +STAGE DEPENDENCIES: + Stage-7 is a root stage + Stage-1 depends on stages: Stage-7 + Stage-6 depends on stages: Stage-1 , consists of Stage-3, Stage-2, Stage-4 + Stage-3 + Stage-0 depends on stages: Stage-3, Stage-2, Stage-5 + Stage-2 + Stage-4 + Stage-5 depends on stages: Stage-4 + +STAGE PLANS: + Stage: Stage-7 + Spark +#### A masked pattern was here #### + Vertices: + Map 3 + Map Operator Tree: + TableScan + alias: b + Statistics: Num rows: 0 Data size: 30 Basic stats: PARTIAL Column stats: NONE + Filter Operator + predicate: key is not null (type: boolean) + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE + Spark HashTable Sink Operator + keys: + 0 key (type: string) + 1 key (type: string) + Local Work: + Map Reduce Local Work + + Stage: Stage-1 + Spark +#### A masked pattern was here #### + Vertices: + Map 1 + Map Operator Tree: + TableScan + alias: inputtbl1 + Select Operator + expressions: key (type: string), UDFToString(1) (type: string) + outputColumnNames: _col0, _col1 + Select Operator + expressions: _col0 (type: string), UDFToLong(_col1) (type: bigint) + outputColumnNames: _col0, _col1 + File Output Operator + compressed: false + table: + input format: org.apache.hadoop.hive.ql.io.RCFileInputFormat + output format: org.apache.hadoop.hive.ql.io.RCFileOutputFormat + serde: org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe + name: default.outputtbl1 + Map 2 + Map Operator Tree: + TableScan + alias: a + Filter Operator + predicate: key is not null (type: boolean) + Map Join Operator + condition map: + Inner Join 0 to 1 + keys: + 0 key (type: string) + 1 key (type: string) + outputColumnNames: _col0, _col6 + input vertices: + 1 Map 3 + Select Operator + expressions: _col0 (type: string), _col6 (type: string) + outputColumnNames: _col0, _col1 + Select Operator + expressions: _col0 (type: string), UDFToLong(_col1) (type: bigint) + outputColumnNames: _col0, _col1 + File Output Operator + compressed: false + table: + input format: org.apache.hadoop.hive.ql.io.RCFileInputFormat + output format: org.apache.hadoop.hive.ql.io.RCFileOutputFormat + serde: org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe + name: default.outputtbl1 + Local Work: + Map Reduce Local Work + + Stage: Stage-6 + Conditional Operator + + Stage: Stage-3 + Move Operator + files: + hdfs directory: true +#### A masked pattern was here #### + + Stage: Stage-0 + Move Operator + tables: + replace: true + table: + input format: org.apache.hadoop.hive.ql.io.RCFileInputFormat + output format: org.apache.hadoop.hive.ql.io.RCFileOutputFormat + serde: org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe + name: default.outputtbl1 + + Stage: Stage-2 + Spark +#### A masked pattern was here #### + Vertices: + Spark Merge File Work + Merge File Operator + Map Operator Tree: + RCFile Merge Operator + merge level: block + input format: org.apache.hadoop.hive.ql.io.RCFileInputFormat + + Stage: Stage-4 + Spark +#### A masked pattern was here #### + Vertices: + Spark Merge File Work + Merge File Operator + Map Operator Tree: + RCFile Merge Operator + merge level: block + input format: org.apache.hadoop.hive.ql.io.RCFileInputFormat + + Stage: Stage-5 + Move Operator + files: + hdfs directory: true +#### A masked pattern was here #### + +PREHOOK: query: insert overwrite table outputTbl1 +SELECT * FROM +( +select key, 1 as vals from inputTbl1 +union all +select a.key as key, b.val as vals +FROM inputTbl1 a join inputTbl1 b on a.key=b.key +)c +PREHOOK: type: QUERY +PREHOOK: Input: default@inputtbl1 +PREHOOK: Output: default@outputtbl1 +POSTHOOK: query: insert overwrite table outputTbl1 +SELECT * FROM +( +select key, 1 as vals from inputTbl1 +union all +select a.key as key, b.val as vals +FROM inputTbl1 a join inputTbl1 b on a.key=b.key +)c +POSTHOOK: type: QUERY +POSTHOOK: Input: default@inputtbl1 +POSTHOOK: Output: default@outputtbl1 +POSTHOOK: Lineage: outputtbl1.key EXPRESSION [(inputtbl1)inputtbl1.FieldSchema(name:key, type:string, comment:null), (inputtbl1)a.FieldSchema(name:key, type:string, comment:null), ] +POSTHOOK: Lineage: outputtbl1.vals EXPRESSION [(inputtbl1)b.FieldSchema(name:val, type:string, comment:null), ] +PREHOOK: query: desc formatted outputTbl1 +PREHOOK: type: DESCTABLE +PREHOOK: Input: default@outputtbl1 +POSTHOOK: query: desc formatted outputTbl1 +POSTHOOK: type: DESCTABLE +POSTHOOK: Input: default@outputtbl1 +# col_name data_type comment + +key string +vals bigint + +# Detailed Table Information +Database: default +#### A masked pattern was here #### +Protect Mode: None +Retention: 0 +#### A masked pattern was here #### +Table Type: MANAGED_TABLE +Table Parameters: + COLUMN_STATS_ACCURATE false + numFiles 2 + numRows -1 + rawDataSize -1 + totalSize 194 +#### A masked pattern was here #### + +# Storage Information +SerDe Library: org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe +InputFormat: org.apache.hadoop.hive.ql.io.RCFileInputFormat +OutputFormat: org.apache.hadoop.hive.ql.io.RCFileOutputFormat +Compressed: No +Num Buckets: -1 +Bucket Columns: [] +Sort Columns: [] +Storage Desc Params: + serialization.format 1 +PREHOOK: query: select * from outputTbl1 order by key, vals +PREHOOK: type: QUERY +PREHOOK: Input: default@outputtbl1 +#### A masked pattern was here #### +POSTHOOK: query: select * from outputTbl1 order by key, vals +POSTHOOK: type: QUERY +POSTHOOK: Input: default@outputtbl1 +#### A masked pattern was here #### +1 1 +1 11 +2 1 +2 12 +3 1 +3 13 +7 1 +7 17 +8 1 +8 1 +8 18 +8 18 +8 28 +8 28 diff --git a/ql/src/test/results/clientpositive/spark/union_remove_15.q.out b/ql/src/test/results/clientpositive/spark/union_remove_15.q.out index 09cd5d3..2996286 100644 --- a/ql/src/test/results/clientpositive/spark/union_remove_15.q.out +++ b/ql/src/test/results/clientpositive/spark/union_remove_15.q.out @@ -32,11 +32,11 @@ create table inputTbl1(key string, val string) stored as textfile POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@inputTbl1 -PREHOOK: query: create table outputTbl1(key string, values bigint) partitioned by (ds string) stored as rcfile +PREHOOK: query: create table outputTbl1(key string, vals bigint) partitioned by (ds string) stored as rcfile PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@outputTbl1 -POSTHOOK: query: create table outputTbl1(key string, values bigint) partitioned by (ds string) stored as rcfile +POSTHOOK: query: create table outputTbl1(key string, vals bigint) partitioned by (ds string) stored as rcfile POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@outputTbl1 @@ -52,18 +52,18 @@ PREHOOK: query: explain insert overwrite table outputTbl1 partition (ds) SELECT * FROM ( - SELECT key, count(1) as values, '1' as ds from inputTbl1 group by key + SELECT key, count(1) as vals, '1' as ds from inputTbl1 group by key UNION ALL - SELECT key, count(1) as values, '2' as ds from inputTbl1 group by key + SELECT key, count(1) as vals, '2' as ds from inputTbl1 group by key ) a PREHOOK: type: QUERY POSTHOOK: query: explain insert overwrite table outputTbl1 partition (ds) SELECT * FROM ( - SELECT key, count(1) as values, '1' as ds from inputTbl1 group by key + SELECT key, count(1) as vals, '1' as ds from inputTbl1 group by key UNION ALL - SELECT key, count(1) as values, '2' as ds from inputTbl1 group by key + SELECT key, count(1) as vals, '2' as ds from inputTbl1 group by key ) a POSTHOOK: type: QUERY STAGE DEPENDENCIES: @@ -170,9 +170,9 @@ STAGE PLANS: PREHOOK: query: insert overwrite table outputTbl1 partition (ds) SELECT * FROM ( - SELECT key, count(1) as values, '1' as ds from inputTbl1 group by key + SELECT key, count(1) as vals, '1' as ds from inputTbl1 group by key UNION ALL - SELECT key, count(1) as values, '2' as ds from inputTbl1 group by key + SELECT key, count(1) as vals, '2' as ds from inputTbl1 group by key ) a PREHOOK: type: QUERY PREHOOK: Input: default@inputtbl1 @@ -180,18 +180,18 @@ PREHOOK: Output: default@outputtbl1 POSTHOOK: query: insert overwrite table outputTbl1 partition (ds) SELECT * FROM ( - SELECT key, count(1) as values, '1' as ds from inputTbl1 group by key + SELECT key, count(1) as vals, '1' as ds from inputTbl1 group by key UNION ALL - SELECT key, count(1) as values, '2' as ds from inputTbl1 group by key + SELECT key, count(1) as vals, '2' as ds from inputTbl1 group by key ) a POSTHOOK: type: QUERY POSTHOOK: Input: default@inputtbl1 POSTHOOK: Output: default@outputtbl1@ds=1 POSTHOOK: Output: default@outputtbl1@ds=2 POSTHOOK: Lineage: outputtbl1 PARTITION(ds=1).key EXPRESSION [(inputtbl1)inputtbl1.FieldSchema(name:key, type:string, comment:null), (inputtbl1)inputtbl1.FieldSchema(name:key, type:string, comment:null), ] -POSTHOOK: Lineage: outputtbl1 PARTITION(ds=1).values EXPRESSION [(inputtbl1)inputtbl1.null, (inputtbl1)inputtbl1.null, ] +POSTHOOK: Lineage: outputtbl1 PARTITION(ds=1).vals EXPRESSION [(inputtbl1)inputtbl1.null, (inputtbl1)inputtbl1.null, ] POSTHOOK: Lineage: outputtbl1 PARTITION(ds=2).key EXPRESSION [(inputtbl1)inputtbl1.FieldSchema(name:key, type:string, comment:null), (inputtbl1)inputtbl1.FieldSchema(name:key, type:string, comment:null), ] -POSTHOOK: Lineage: outputtbl1 PARTITION(ds=2).values EXPRESSION [(inputtbl1)inputtbl1.null, (inputtbl1)inputtbl1.null, ] +POSTHOOK: Lineage: outputtbl1 PARTITION(ds=2).vals EXPRESSION [(inputtbl1)inputtbl1.null, (inputtbl1)inputtbl1.null, ] PREHOOK: query: desc formatted outputTbl1 PREHOOK: type: DESCTABLE PREHOOK: Input: default@outputtbl1 @@ -201,7 +201,7 @@ POSTHOOK: Input: default@outputtbl1 # col_name data_type comment key string -values bigint +vals bigint # Partition Information # col_name data_type comment @@ -236,12 +236,12 @@ POSTHOOK: type: SHOWPARTITIONS POSTHOOK: Input: default@outputtbl1 ds=1 ds=2 -PREHOOK: query: select * from outputTbl1 where ds = '1' order by key, values +PREHOOK: query: select * from outputTbl1 where ds = '1' order by key, vals PREHOOK: type: QUERY PREHOOK: Input: default@outputtbl1 PREHOOK: Input: default@outputtbl1@ds=1 #### A masked pattern was here #### -POSTHOOK: query: select * from outputTbl1 where ds = '1' order by key, values +POSTHOOK: query: select * from outputTbl1 where ds = '1' order by key, vals POSTHOOK: type: QUERY POSTHOOK: Input: default@outputtbl1 POSTHOOK: Input: default@outputtbl1@ds=1 @@ -251,12 +251,12 @@ POSTHOOK: Input: default@outputtbl1@ds=1 3 1 1 7 1 1 8 2 1 -PREHOOK: query: select * from outputTbl1 where ds = '2' order by key, values +PREHOOK: query: select * from outputTbl1 where ds = '2' order by key, vals PREHOOK: type: QUERY PREHOOK: Input: default@outputtbl1 PREHOOK: Input: default@outputtbl1@ds=2 #### A masked pattern was here #### -POSTHOOK: query: select * from outputTbl1 where ds = '2' order by key, values +POSTHOOK: query: select * from outputTbl1 where ds = '2' order by key, vals POSTHOOK: type: QUERY POSTHOOK: Input: default@outputtbl1 POSTHOOK: Input: default@outputtbl1@ds=2 diff --git a/ql/src/test/results/clientpositive/spark/union_remove_16.q.out b/ql/src/test/results/clientpositive/spark/union_remove_16.q.out index 0db279f..dcf883a 100644 --- a/ql/src/test/results/clientpositive/spark/union_remove_16.q.out +++ b/ql/src/test/results/clientpositive/spark/union_remove_16.q.out @@ -30,11 +30,11 @@ create table inputTbl1(key string, val string) stored as textfile POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@inputTbl1 -PREHOOK: query: create table outputTbl1(key string, values bigint) partitioned by (ds string) stored as rcfile +PREHOOK: query: create table outputTbl1(key string, vals bigint) partitioned by (ds string) stored as rcfile PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@outputTbl1 -POSTHOOK: query: create table outputTbl1(key string, values bigint) partitioned by (ds string) stored as rcfile +POSTHOOK: query: create table outputTbl1(key string, vals bigint) partitioned by (ds string) stored as rcfile POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@outputTbl1 @@ -50,18 +50,18 @@ PREHOOK: query: explain insert overwrite table outputTbl1 partition (ds) SELECT * FROM ( - SELECT key, count(1) as values, '1' as ds from inputTbl1 group by key + SELECT key, count(1) as vals, '1' as ds from inputTbl1 group by key UNION ALL - SELECT key, count(1) as values, '2' as ds from inputTbl1 group by key + SELECT key, count(1) as vals, '2' as ds from inputTbl1 group by key ) a PREHOOK: type: QUERY POSTHOOK: query: explain insert overwrite table outputTbl1 partition (ds) SELECT * FROM ( - SELECT key, count(1) as values, '1' as ds from inputTbl1 group by key + SELECT key, count(1) as vals, '1' as ds from inputTbl1 group by key UNION ALL - SELECT key, count(1) as values, '2' as ds from inputTbl1 group by key + SELECT key, count(1) as vals, '2' as ds from inputTbl1 group by key ) a POSTHOOK: type: QUERY STAGE DEPENDENCIES: @@ -210,9 +210,9 @@ STAGE PLANS: PREHOOK: query: insert overwrite table outputTbl1 partition (ds) SELECT * FROM ( - SELECT key, count(1) as values, '1' as ds from inputTbl1 group by key + SELECT key, count(1) as vals, '1' as ds from inputTbl1 group by key UNION ALL - SELECT key, count(1) as values, '2' as ds from inputTbl1 group by key + SELECT key, count(1) as vals, '2' as ds from inputTbl1 group by key ) a PREHOOK: type: QUERY PREHOOK: Input: default@inputtbl1 @@ -220,18 +220,18 @@ PREHOOK: Output: default@outputtbl1 POSTHOOK: query: insert overwrite table outputTbl1 partition (ds) SELECT * FROM ( - SELECT key, count(1) as values, '1' as ds from inputTbl1 group by key + SELECT key, count(1) as vals, '1' as ds from inputTbl1 group by key UNION ALL - SELECT key, count(1) as values, '2' as ds from inputTbl1 group by key + SELECT key, count(1) as vals, '2' as ds from inputTbl1 group by key ) a POSTHOOK: type: QUERY POSTHOOK: Input: default@inputtbl1 POSTHOOK: Output: default@outputtbl1@ds=1 POSTHOOK: Output: default@outputtbl1@ds=2 POSTHOOK: Lineage: outputtbl1 PARTITION(ds=1).key EXPRESSION [(inputtbl1)inputtbl1.FieldSchema(name:key, type:string, comment:null), (inputtbl1)inputtbl1.FieldSchema(name:key, type:string, comment:null), ] -POSTHOOK: Lineage: outputtbl1 PARTITION(ds=1).values EXPRESSION [(inputtbl1)inputtbl1.null, (inputtbl1)inputtbl1.null, ] +POSTHOOK: Lineage: outputtbl1 PARTITION(ds=1).vals EXPRESSION [(inputtbl1)inputtbl1.null, (inputtbl1)inputtbl1.null, ] POSTHOOK: Lineage: outputtbl1 PARTITION(ds=2).key EXPRESSION [(inputtbl1)inputtbl1.FieldSchema(name:key, type:string, comment:null), (inputtbl1)inputtbl1.FieldSchema(name:key, type:string, comment:null), ] -POSTHOOK: Lineage: outputtbl1 PARTITION(ds=2).values EXPRESSION [(inputtbl1)inputtbl1.null, (inputtbl1)inputtbl1.null, ] +POSTHOOK: Lineage: outputtbl1 PARTITION(ds=2).vals EXPRESSION [(inputtbl1)inputtbl1.null, (inputtbl1)inputtbl1.null, ] PREHOOK: query: desc formatted outputTbl1 PREHOOK: type: DESCTABLE PREHOOK: Input: default@outputtbl1 @@ -241,7 +241,7 @@ POSTHOOK: Input: default@outputtbl1 # col_name data_type comment key string -values bigint +vals bigint # Partition Information # col_name data_type comment @@ -276,12 +276,12 @@ POSTHOOK: type: SHOWPARTITIONS POSTHOOK: Input: default@outputtbl1 ds=1 ds=2 -PREHOOK: query: select * from outputTbl1 where ds = '1' order by key, values +PREHOOK: query: select * from outputTbl1 where ds = '1' order by key, vals PREHOOK: type: QUERY PREHOOK: Input: default@outputtbl1 PREHOOK: Input: default@outputtbl1@ds=1 #### A masked pattern was here #### -POSTHOOK: query: select * from outputTbl1 where ds = '1' order by key, values +POSTHOOK: query: select * from outputTbl1 where ds = '1' order by key, vals POSTHOOK: type: QUERY POSTHOOK: Input: default@outputtbl1 POSTHOOK: Input: default@outputtbl1@ds=1 @@ -291,12 +291,12 @@ POSTHOOK: Input: default@outputtbl1@ds=1 3 1 1 7 1 1 8 2 1 -PREHOOK: query: select * from outputTbl1 where ds = '2' order by key, values +PREHOOK: query: select * from outputTbl1 where ds = '2' order by key, vals PREHOOK: type: QUERY PREHOOK: Input: default@outputtbl1 PREHOOK: Input: default@outputtbl1@ds=2 #### A masked pattern was here #### -POSTHOOK: query: select * from outputTbl1 where ds = '2' order by key, values +POSTHOOK: query: select * from outputTbl1 where ds = '2' order by key, vals POSTHOOK: type: QUERY POSTHOOK: Input: default@outputtbl1 POSTHOOK: Input: default@outputtbl1@ds=2 diff --git a/ql/src/test/results/clientpositive/spark/union_remove_17.q.out b/ql/src/test/results/clientpositive/spark/union_remove_17.q.out index 8f317de..87d7bb3 100644 --- a/ql/src/test/results/clientpositive/spark/union_remove_17.q.out +++ b/ql/src/test/results/clientpositive/spark/union_remove_17.q.out @@ -26,11 +26,11 @@ create table inputTbl1(key string, val string) stored as textfile POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@inputTbl1 -PREHOOK: query: create table outputTbl1(key string, values bigint) partitioned by (ds string) stored as rcfile +PREHOOK: query: create table outputTbl1(key string, vals bigint) partitioned by (ds string) stored as rcfile PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@outputTbl1 -POSTHOOK: query: create table outputTbl1(key string, values bigint) partitioned by (ds string) stored as rcfile +POSTHOOK: query: create table outputTbl1(key string, vals bigint) partitioned by (ds string) stored as rcfile POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@outputTbl1 @@ -46,18 +46,18 @@ PREHOOK: query: explain insert overwrite table outputTbl1 partition (ds) SELECT * FROM ( - SELECT key, 1 as values, '1' as ds from inputTbl1 + SELECT key, 1 as vals, '1' as ds from inputTbl1 UNION ALL - SELECT key, 2 as values, '2' as ds from inputTbl1 + SELECT key, 2 as vals, '2' as ds from inputTbl1 ) a PREHOOK: type: QUERY POSTHOOK: query: explain insert overwrite table outputTbl1 partition (ds) SELECT * FROM ( - SELECT key, 1 as values, '1' as ds from inputTbl1 + SELECT key, 1 as vals, '1' as ds from inputTbl1 UNION ALL - SELECT key, 2 as values, '2' as ds from inputTbl1 + SELECT key, 2 as vals, '2' as ds from inputTbl1 ) a POSTHOOK: type: QUERY STAGE DEPENDENCIES: @@ -119,9 +119,9 @@ STAGE PLANS: PREHOOK: query: insert overwrite table outputTbl1 partition (ds) SELECT * FROM ( - SELECT key, 1 as values, '1' as ds from inputTbl1 + SELECT key, 1 as vals, '1' as ds from inputTbl1 UNION ALL - SELECT key, 2 as values, '2' as ds from inputTbl1 + SELECT key, 2 as vals, '2' as ds from inputTbl1 ) a PREHOOK: type: QUERY PREHOOK: Input: default@inputtbl1 @@ -129,18 +129,18 @@ PREHOOK: Output: default@outputtbl1 POSTHOOK: query: insert overwrite table outputTbl1 partition (ds) SELECT * FROM ( - SELECT key, 1 as values, '1' as ds from inputTbl1 + SELECT key, 1 as vals, '1' as ds from inputTbl1 UNION ALL - SELECT key, 2 as values, '2' as ds from inputTbl1 + SELECT key, 2 as vals, '2' as ds from inputTbl1 ) a POSTHOOK: type: QUERY POSTHOOK: Input: default@inputtbl1 POSTHOOK: Output: default@outputtbl1@ds=1 POSTHOOK: Output: default@outputtbl1@ds=2 POSTHOOK: Lineage: outputtbl1 PARTITION(ds=1).key EXPRESSION [(inputtbl1)inputtbl1.FieldSchema(name:key, type:string, comment:null), (inputtbl1)inputtbl1.FieldSchema(name:key, type:string, comment:null), ] -POSTHOOK: Lineage: outputtbl1 PARTITION(ds=1).values EXPRESSION [] +POSTHOOK: Lineage: outputtbl1 PARTITION(ds=1).vals EXPRESSION [] POSTHOOK: Lineage: outputtbl1 PARTITION(ds=2).key EXPRESSION [(inputtbl1)inputtbl1.FieldSchema(name:key, type:string, comment:null), (inputtbl1)inputtbl1.FieldSchema(name:key, type:string, comment:null), ] -POSTHOOK: Lineage: outputtbl1 PARTITION(ds=2).values EXPRESSION [] +POSTHOOK: Lineage: outputtbl1 PARTITION(ds=2).vals EXPRESSION [] PREHOOK: query: desc formatted outputTbl1 PREHOOK: type: DESCTABLE PREHOOK: Input: default@outputtbl1 @@ -150,7 +150,7 @@ POSTHOOK: Input: default@outputtbl1 # col_name data_type comment key string -values bigint +vals bigint # Partition Information # col_name data_type comment @@ -185,12 +185,12 @@ POSTHOOK: type: SHOWPARTITIONS POSTHOOK: Input: default@outputtbl1 ds=1 ds=2 -PREHOOK: query: select * from outputTbl1 where ds = '1' order by key, values +PREHOOK: query: select * from outputTbl1 where ds = '1' order by key, vals PREHOOK: type: QUERY PREHOOK: Input: default@outputtbl1 PREHOOK: Input: default@outputtbl1@ds=1 #### A masked pattern was here #### -POSTHOOK: query: select * from outputTbl1 where ds = '1' order by key, values +POSTHOOK: query: select * from outputTbl1 where ds = '1' order by key, vals POSTHOOK: type: QUERY POSTHOOK: Input: default@outputtbl1 POSTHOOK: Input: default@outputtbl1@ds=1 @@ -201,12 +201,12 @@ POSTHOOK: Input: default@outputtbl1@ds=1 7 1 1 8 1 1 8 1 1 -PREHOOK: query: select * from outputTbl1 where ds = '2' order by key, values +PREHOOK: query: select * from outputTbl1 where ds = '2' order by key, vals PREHOOK: type: QUERY PREHOOK: Input: default@outputtbl1 PREHOOK: Input: default@outputtbl1@ds=2 #### A masked pattern was here #### -POSTHOOK: query: select * from outputTbl1 where ds = '2' order by key, values +POSTHOOK: query: select * from outputTbl1 where ds = '2' order by key, vals POSTHOOK: type: QUERY POSTHOOK: Input: default@outputtbl1 POSTHOOK: Input: default@outputtbl1@ds=2 diff --git a/ql/src/test/results/clientpositive/spark/union_remove_18.q.out b/ql/src/test/results/clientpositive/spark/union_remove_18.q.out index 96c8c25..ebe9d51 100644 --- a/ql/src/test/results/clientpositive/spark/union_remove_18.q.out +++ b/ql/src/test/results/clientpositive/spark/union_remove_18.q.out @@ -30,11 +30,11 @@ create table inputTbl1(key string, ds string) stored as textfile POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@inputTbl1 -PREHOOK: query: create table outputTbl1(key string, values bigint) partitioned by (ds string) stored as textfile +PREHOOK: query: create table outputTbl1(key string, vals bigint) partitioned by (ds string) stored as textfile PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@outputTbl1 -POSTHOOK: query: create table outputTbl1(key string, values bigint) partitioned by (ds string) stored as textfile +POSTHOOK: query: create table outputTbl1(key string, vals bigint) partitioned by (ds string) stored as textfile POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@outputTbl1 @@ -50,18 +50,18 @@ PREHOOK: query: explain insert overwrite table outputTbl1 partition (ds) SELECT * FROM ( - SELECT key, count(1) as values, ds from inputTbl1 group by key, ds + SELECT key, count(1) as vals, ds from inputTbl1 group by key, ds UNION ALL - SELECT key, count(1) as values, ds from inputTbl1 group by key, ds + SELECT key, count(1) as vals, ds from inputTbl1 group by key, ds ) a PREHOOK: type: QUERY POSTHOOK: query: explain insert overwrite table outputTbl1 partition (ds) SELECT * FROM ( - SELECT key, count(1) as values, ds from inputTbl1 group by key, ds + SELECT key, count(1) as vals, ds from inputTbl1 group by key, ds UNION ALL - SELECT key, count(1) as values, ds from inputTbl1 group by key, ds + SELECT key, count(1) as vals, ds from inputTbl1 group by key, ds ) a POSTHOOK: type: QUERY STAGE DEPENDENCIES: @@ -168,9 +168,9 @@ STAGE PLANS: PREHOOK: query: insert overwrite table outputTbl1 partition (ds) SELECT * FROM ( - SELECT key, count(1) as values, ds from inputTbl1 group by key, ds + SELECT key, count(1) as vals, ds from inputTbl1 group by key, ds UNION ALL - SELECT key, count(1) as values, ds from inputTbl1 group by key, ds + SELECT key, count(1) as vals, ds from inputTbl1 group by key, ds ) a PREHOOK: type: QUERY PREHOOK: Input: default@inputtbl1 @@ -178,9 +178,9 @@ PREHOOK: Output: default@outputtbl1 POSTHOOK: query: insert overwrite table outputTbl1 partition (ds) SELECT * FROM ( - SELECT key, count(1) as values, ds from inputTbl1 group by key, ds + SELECT key, count(1) as vals, ds from inputTbl1 group by key, ds UNION ALL - SELECT key, count(1) as values, ds from inputTbl1 group by key, ds + SELECT key, count(1) as vals, ds from inputTbl1 group by key, ds ) a POSTHOOK: type: QUERY POSTHOOK: Input: default@inputtbl1 @@ -191,17 +191,17 @@ POSTHOOK: Output: default@outputtbl1@ds=17 POSTHOOK: Output: default@outputtbl1@ds=18 POSTHOOK: Output: default@outputtbl1@ds=28 POSTHOOK: Lineage: outputtbl1 PARTITION(ds=11).key EXPRESSION [(inputtbl1)inputtbl1.FieldSchema(name:key, type:string, comment:null), (inputtbl1)inputtbl1.FieldSchema(name:key, type:string, comment:null), ] -POSTHOOK: Lineage: outputtbl1 PARTITION(ds=11).values EXPRESSION [(inputtbl1)inputtbl1.null, (inputtbl1)inputtbl1.null, ] +POSTHOOK: Lineage: outputtbl1 PARTITION(ds=11).vals EXPRESSION [(inputtbl1)inputtbl1.null, (inputtbl1)inputtbl1.null, ] POSTHOOK: Lineage: outputtbl1 PARTITION(ds=12).key EXPRESSION [(inputtbl1)inputtbl1.FieldSchema(name:key, type:string, comment:null), (inputtbl1)inputtbl1.FieldSchema(name:key, type:string, comment:null), ] -POSTHOOK: Lineage: outputtbl1 PARTITION(ds=12).values EXPRESSION [(inputtbl1)inputtbl1.null, (inputtbl1)inputtbl1.null, ] +POSTHOOK: Lineage: outputtbl1 PARTITION(ds=12).vals EXPRESSION [(inputtbl1)inputtbl1.null, (inputtbl1)inputtbl1.null, ] POSTHOOK: Lineage: outputtbl1 PARTITION(ds=13).key EXPRESSION [(inputtbl1)inputtbl1.FieldSchema(name:key, type:string, comment:null), (inputtbl1)inputtbl1.FieldSchema(name:key, type:string, comment:null), ] -POSTHOOK: Lineage: outputtbl1 PARTITION(ds=13).values EXPRESSION [(inputtbl1)inputtbl1.null, (inputtbl1)inputtbl1.null, ] +POSTHOOK: Lineage: outputtbl1 PARTITION(ds=13).vals EXPRESSION [(inputtbl1)inputtbl1.null, (inputtbl1)inputtbl1.null, ] POSTHOOK: Lineage: outputtbl1 PARTITION(ds=17).key EXPRESSION [(inputtbl1)inputtbl1.FieldSchema(name:key, type:string, comment:null), (inputtbl1)inputtbl1.FieldSchema(name:key, type:string, comment:null), ] -POSTHOOK: Lineage: outputtbl1 PARTITION(ds=17).values EXPRESSION [(inputtbl1)inputtbl1.null, (inputtbl1)inputtbl1.null, ] +POSTHOOK: Lineage: outputtbl1 PARTITION(ds=17).vals EXPRESSION [(inputtbl1)inputtbl1.null, (inputtbl1)inputtbl1.null, ] POSTHOOK: Lineage: outputtbl1 PARTITION(ds=18).key EXPRESSION [(inputtbl1)inputtbl1.FieldSchema(name:key, type:string, comment:null), (inputtbl1)inputtbl1.FieldSchema(name:key, type:string, comment:null), ] -POSTHOOK: Lineage: outputtbl1 PARTITION(ds=18).values EXPRESSION [(inputtbl1)inputtbl1.null, (inputtbl1)inputtbl1.null, ] +POSTHOOK: Lineage: outputtbl1 PARTITION(ds=18).vals EXPRESSION [(inputtbl1)inputtbl1.null, (inputtbl1)inputtbl1.null, ] POSTHOOK: Lineage: outputtbl1 PARTITION(ds=28).key EXPRESSION [(inputtbl1)inputtbl1.FieldSchema(name:key, type:string, comment:null), (inputtbl1)inputtbl1.FieldSchema(name:key, type:string, comment:null), ] -POSTHOOK: Lineage: outputtbl1 PARTITION(ds=28).values EXPRESSION [(inputtbl1)inputtbl1.null, (inputtbl1)inputtbl1.null, ] +POSTHOOK: Lineage: outputtbl1 PARTITION(ds=28).vals EXPRESSION [(inputtbl1)inputtbl1.null, (inputtbl1)inputtbl1.null, ] PREHOOK: query: desc formatted outputTbl1 PREHOOK: type: DESCTABLE PREHOOK: Input: default@outputtbl1 @@ -211,7 +211,7 @@ POSTHOOK: Input: default@outputtbl1 # col_name data_type comment key string -values bigint +vals bigint # Partition Information # col_name data_type comment @@ -250,31 +250,31 @@ ds=13 ds=17 ds=18 ds=28 -PREHOOK: query: select * from outputTbl1 where ds = '11' order by key, values +PREHOOK: query: select * from outputTbl1 where ds = '11' order by key, vals PREHOOK: type: QUERY PREHOOK: Input: default@outputtbl1 PREHOOK: Input: default@outputtbl1@ds=11 #### A masked pattern was here #### -POSTHOOK: query: select * from outputTbl1 where ds = '11' order by key, values +POSTHOOK: query: select * from outputTbl1 where ds = '11' order by key, vals POSTHOOK: type: QUERY POSTHOOK: Input: default@outputtbl1 POSTHOOK: Input: default@outputtbl1@ds=11 #### A masked pattern was here #### 1 1 11 1 1 11 -PREHOOK: query: select * from outputTbl1 where ds = '18' order by key, values +PREHOOK: query: select * from outputTbl1 where ds = '18' order by key, vals PREHOOK: type: QUERY PREHOOK: Input: default@outputtbl1 PREHOOK: Input: default@outputtbl1@ds=18 #### A masked pattern was here #### -POSTHOOK: query: select * from outputTbl1 where ds = '18' order by key, values +POSTHOOK: query: select * from outputTbl1 where ds = '18' order by key, vals POSTHOOK: type: QUERY POSTHOOK: Input: default@outputtbl1 POSTHOOK: Input: default@outputtbl1@ds=18 #### A masked pattern was here #### 8 1 18 8 1 18 -PREHOOK: query: select * from outputTbl1 where ds is not null order by key, values, ds +PREHOOK: query: select * from outputTbl1 where ds is not null order by key, vals, ds PREHOOK: type: QUERY PREHOOK: Input: default@outputtbl1 PREHOOK: Input: default@outputtbl1@ds=11 @@ -284,7 +284,7 @@ PREHOOK: Input: default@outputtbl1@ds=17 PREHOOK: Input: default@outputtbl1@ds=18 PREHOOK: Input: default@outputtbl1@ds=28 #### A masked pattern was here #### -POSTHOOK: query: select * from outputTbl1 where ds is not null order by key, values, ds +POSTHOOK: query: select * from outputTbl1 where ds is not null order by key, vals, ds POSTHOOK: type: QUERY POSTHOOK: Input: default@outputtbl1 POSTHOOK: Input: default@outputtbl1@ds=11 diff --git a/ql/src/test/results/clientpositive/spark/union_remove_19.q.out b/ql/src/test/results/clientpositive/spark/union_remove_19.q.out index 7049a91..f6cc765 100644 --- a/ql/src/test/results/clientpositive/spark/union_remove_19.q.out +++ b/ql/src/test/results/clientpositive/spark/union_remove_19.q.out @@ -30,11 +30,11 @@ create table inputTbl1(key string, val string) stored as textfile POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@inputTbl1 -PREHOOK: query: create table outputTbl1(key string, values bigint) stored as textfile +PREHOOK: query: create table outputTbl1(key string, vals bigint) stored as textfile PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@outputTbl1 -POSTHOOK: query: create table outputTbl1(key string, values bigint) stored as textfile +POSTHOOK: query: create table outputTbl1(key string, vals bigint) stored as textfile POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@outputTbl1 @@ -48,20 +48,20 @@ POSTHOOK: type: LOAD POSTHOOK: Output: default@inputtbl1 PREHOOK: query: explain insert overwrite table outputTbl1 -SELECT a.key, a.values +SELECT a.key, a.vals FROM ( - SELECT key, count(1) as values from inputTbl1 group by key + SELECT key, count(1) as vals from inputTbl1 group by key UNION ALL - SELECT key, count(1) as values from inputTbl1 group by key + SELECT key, count(1) as vals from inputTbl1 group by key ) a PREHOOK: type: QUERY POSTHOOK: query: explain insert overwrite table outputTbl1 -SELECT a.key, a.values +SELECT a.key, a.vals FROM ( - SELECT key, count(1) as values from inputTbl1 group by key + SELECT key, count(1) as vals from inputTbl1 group by key UNION ALL - SELECT key, count(1) as values from inputTbl1 group by key + SELECT key, count(1) as vals from inputTbl1 group by key ) a POSTHOOK: type: QUERY STAGE DEPENDENCIES: @@ -158,27 +158,27 @@ STAGE PLANS: name: default.outputtbl1 PREHOOK: query: insert overwrite table outputTbl1 -SELECT a.key, a.values +SELECT a.key, a.vals FROM ( - SELECT key, count(1) as values from inputTbl1 group by key + SELECT key, count(1) as vals from inputTbl1 group by key UNION ALL - SELECT key, count(1) as values from inputTbl1 group by key + SELECT key, count(1) as vals from inputTbl1 group by key ) a PREHOOK: type: QUERY PREHOOK: Input: default@inputtbl1 PREHOOK: Output: default@outputtbl1 POSTHOOK: query: insert overwrite table outputTbl1 -SELECT a.key, a.values +SELECT a.key, a.vals FROM ( - SELECT key, count(1) as values from inputTbl1 group by key + SELECT key, count(1) as vals from inputTbl1 group by key UNION ALL - SELECT key, count(1) as values from inputTbl1 group by key + SELECT key, count(1) as vals from inputTbl1 group by key ) a POSTHOOK: type: QUERY POSTHOOK: Input: default@inputtbl1 POSTHOOK: Output: default@outputtbl1 POSTHOOK: Lineage: outputtbl1.key EXPRESSION [(inputtbl1)inputtbl1.FieldSchema(name:key, type:string, comment:null), (inputtbl1)inputtbl1.FieldSchema(name:key, type:string, comment:null), ] -POSTHOOK: Lineage: outputtbl1.values EXPRESSION [(inputtbl1)inputtbl1.null, (inputtbl1)inputtbl1.null, ] +POSTHOOK: Lineage: outputtbl1.vals EXPRESSION [(inputtbl1)inputtbl1.null, (inputtbl1)inputtbl1.null, ] PREHOOK: query: desc formatted outputTbl1 PREHOOK: type: DESCTABLE PREHOOK: Input: default@outputtbl1 @@ -188,7 +188,7 @@ POSTHOOK: Input: default@outputtbl1 # col_name data_type comment key string -values bigint +vals bigint # Detailed Table Information Database: default @@ -236,21 +236,21 @@ POSTHOOK: Input: default@outputtbl1 PREHOOK: query: -- filter should be fine explain insert overwrite table outputTbl1 -SELECT a.key, a.values +SELECT a.key, a.vals FROM ( - SELECT key, count(1) as values from inputTbl1 group by key + SELECT key, count(1) as vals from inputTbl1 group by key UNION ALL - SELECT key, count(1) as values from inputTbl1 group by key + SELECT key, count(1) as vals from inputTbl1 group by key ) a where a.key = 7 PREHOOK: type: QUERY POSTHOOK: query: -- filter should be fine explain insert overwrite table outputTbl1 -SELECT a.key, a.values +SELECT a.key, a.vals FROM ( - SELECT key, count(1) as values from inputTbl1 group by key + SELECT key, count(1) as vals from inputTbl1 group by key UNION ALL - SELECT key, count(1) as values from inputTbl1 group by key + SELECT key, count(1) as vals from inputTbl1 group by key ) a where a.key = 7 POSTHOOK: type: QUERY STAGE DEPENDENCIES: @@ -365,27 +365,27 @@ STAGE PLANS: name: default.outputtbl1 PREHOOK: query: insert overwrite table outputTbl1 -SELECT a.key, a.values +SELECT a.key, a.vals FROM ( - SELECT key, count(1) as values from inputTbl1 group by key + SELECT key, count(1) as vals from inputTbl1 group by key UNION ALL - SELECT key, count(1) as values from inputTbl1 group by key + SELECT key, count(1) as vals from inputTbl1 group by key ) a where a.key = 7 PREHOOK: type: QUERY PREHOOK: Input: default@inputtbl1 PREHOOK: Output: default@outputtbl1 POSTHOOK: query: insert overwrite table outputTbl1 -SELECT a.key, a.values +SELECT a.key, a.vals FROM ( - SELECT key, count(1) as values from inputTbl1 group by key + SELECT key, count(1) as vals from inputTbl1 group by key UNION ALL - SELECT key, count(1) as values from inputTbl1 group by key + SELECT key, count(1) as vals from inputTbl1 group by key ) a where a.key = 7 POSTHOOK: type: QUERY POSTHOOK: Input: default@inputtbl1 POSTHOOK: Output: default@outputtbl1 POSTHOOK: Lineage: outputtbl1.key EXPRESSION [(inputtbl1)inputtbl1.FieldSchema(name:key, type:string, comment:null), (inputtbl1)inputtbl1.FieldSchema(name:key, type:string, comment:null), ] -POSTHOOK: Lineage: outputtbl1.values EXPRESSION [(inputtbl1)inputtbl1.null, (inputtbl1)inputtbl1.null, ] +POSTHOOK: Lineage: outputtbl1.vals EXPRESSION [(inputtbl1)inputtbl1.null, (inputtbl1)inputtbl1.null, ] PREHOOK: query: select * from outputTbl1 PREHOOK: type: QUERY PREHOOK: Input: default@outputtbl1 @@ -399,26 +399,26 @@ POSTHOOK: Input: default@outputtbl1 PREHOOK: query: -- filters and sub-queries should be fine explain insert overwrite table outputTbl1 -select key, values from +select key, vals from ( -SELECT a.key + a.key as key, a.values +SELECT a.key + a.key as key, a.vals FROM ( - SELECT key, count(1) as values from inputTbl1 group by key + SELECT key, count(1) as vals from inputTbl1 group by key UNION ALL - SELECT key, count(1) as values from inputTbl1 group by key + SELECT key, count(1) as vals from inputTbl1 group by key ) a ) b where b.key >= 7 PREHOOK: type: QUERY POSTHOOK: query: -- filters and sub-queries should be fine explain insert overwrite table outputTbl1 -select key, values from +select key, vals from ( -SELECT a.key + a.key as key, a.values +SELECT a.key + a.key as key, a.vals FROM ( - SELECT key, count(1) as values from inputTbl1 group by key + SELECT key, count(1) as vals from inputTbl1 group by key UNION ALL - SELECT key, count(1) as values from inputTbl1 group by key + SELECT key, count(1) as vals from inputTbl1 group by key ) a ) b where b.key >= 7 POSTHOOK: type: QUERY @@ -526,38 +526,38 @@ STAGE PLANS: name: default.outputtbl1 PREHOOK: query: insert overwrite table outputTbl1 -select key, values from +select key, vals from ( -SELECT a.key + a.key as key, a.values +SELECT a.key + a.key as key, a.vals FROM ( - SELECT key, count(1) as values from inputTbl1 group by key + SELECT key, count(1) as vals from inputTbl1 group by key UNION ALL - SELECT key, count(1) as values from inputTbl1 group by key + SELECT key, count(1) as vals from inputTbl1 group by key ) a ) b where b.key >= 7 PREHOOK: type: QUERY PREHOOK: Input: default@inputtbl1 PREHOOK: Output: default@outputtbl1 POSTHOOK: query: insert overwrite table outputTbl1 -select key, values from +select key, vals from ( -SELECT a.key + a.key as key, a.values +SELECT a.key + a.key as key, a.vals FROM ( - SELECT key, count(1) as values from inputTbl1 group by key + SELECT key, count(1) as vals from inputTbl1 group by key UNION ALL - SELECT key, count(1) as values from inputTbl1 group by key + SELECT key, count(1) as vals from inputTbl1 group by key ) a ) b where b.key >= 7 POSTHOOK: type: QUERY POSTHOOK: Input: default@inputtbl1 POSTHOOK: Output: default@outputtbl1 POSTHOOK: Lineage: outputtbl1.key EXPRESSION [(inputtbl1)inputtbl1.FieldSchema(name:key, type:string, comment:null), (inputtbl1)inputtbl1.FieldSchema(name:key, type:string, comment:null), ] -POSTHOOK: Lineage: outputtbl1.values EXPRESSION [(inputtbl1)inputtbl1.null, (inputtbl1)inputtbl1.null, ] -PREHOOK: query: select * from outputTbl1 order by key, values +POSTHOOK: Lineage: outputtbl1.vals EXPRESSION [(inputtbl1)inputtbl1.null, (inputtbl1)inputtbl1.null, ] +PREHOOK: query: select * from outputTbl1 order by key, vals PREHOOK: type: QUERY PREHOOK: Input: default@outputtbl1 #### A masked pattern was here #### -POSTHOOK: query: select * from outputTbl1 order by key, values +POSTHOOK: query: select * from outputTbl1 order by key, vals POSTHOOK: type: QUERY POSTHOOK: Input: default@outputtbl1 #### A masked pattern was here #### diff --git a/ql/src/test/results/clientpositive/spark/union_remove_2.q.out b/ql/src/test/results/clientpositive/spark/union_remove_2.q.out index 298929d..f307b5d 100644 --- a/ql/src/test/results/clientpositive/spark/union_remove_2.q.out +++ b/ql/src/test/results/clientpositive/spark/union_remove_2.q.out @@ -28,11 +28,11 @@ create table inputTbl1(key string, val string) stored as textfile POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@inputTbl1 -PREHOOK: query: create table outputTbl1(key string, values bigint) stored as textfile +PREHOOK: query: create table outputTbl1(key string, vals bigint) stored as textfile PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@outputTbl1 -POSTHOOK: query: create table outputTbl1(key string, values bigint) stored as textfile +POSTHOOK: query: create table outputTbl1(key string, vals bigint) stored as textfile POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@outputTbl1 @@ -48,22 +48,22 @@ PREHOOK: query: explain insert overwrite table outputTbl1 SELECT * FROM ( - SELECT key, count(1) as values from inputTbl1 group by key + SELECT key, count(1) as vals from inputTbl1 group by key UNION ALL - SELECT key, 1 as values from inputTbl1 + SELECT key, 1 as vals from inputTbl1 UNION ALL - SELECT key, 2 as values from inputTbl1 + SELECT key, 2 as vals from inputTbl1 ) a PREHOOK: type: QUERY POSTHOOK: query: explain insert overwrite table outputTbl1 SELECT * FROM ( - SELECT key, count(1) as values from inputTbl1 group by key + SELECT key, count(1) as vals from inputTbl1 group by key UNION ALL - SELECT key, 1 as values from inputTbl1 + SELECT key, 1 as vals from inputTbl1 UNION ALL - SELECT key, 2 as values from inputTbl1 + SELECT key, 2 as vals from inputTbl1 ) a POSTHOOK: type: QUERY STAGE DEPENDENCIES: @@ -154,11 +154,11 @@ STAGE PLANS: PREHOOK: query: insert overwrite table outputTbl1 SELECT * FROM ( - SELECT key, count(1) as values from inputTbl1 group by key + SELECT key, count(1) as vals from inputTbl1 group by key UNION ALL - SELECT key, 1 as values from inputTbl1 + SELECT key, 1 as vals from inputTbl1 UNION ALL - SELECT key, 2 as values from inputTbl1 + SELECT key, 2 as vals from inputTbl1 ) a PREHOOK: type: QUERY PREHOOK: Input: default@inputtbl1 @@ -166,17 +166,17 @@ PREHOOK: Output: default@outputtbl1 POSTHOOK: query: insert overwrite table outputTbl1 SELECT * FROM ( - SELECT key, count(1) as values from inputTbl1 group by key + SELECT key, count(1) as vals from inputTbl1 group by key UNION ALL - SELECT key, 1 as values from inputTbl1 + SELECT key, 1 as vals from inputTbl1 UNION ALL - SELECT key, 2 as values from inputTbl1 + SELECT key, 2 as vals from inputTbl1 ) a POSTHOOK: type: QUERY POSTHOOK: Input: default@inputtbl1 POSTHOOK: Output: default@outputtbl1 POSTHOOK: Lineage: outputtbl1.key EXPRESSION [(inputtbl1)inputtbl1.FieldSchema(name:key, type:string, comment:null), (inputtbl1)inputtbl1.FieldSchema(name:key, type:string, comment:null), (inputtbl1)inputtbl1.FieldSchema(name:key, type:string, comment:null), ] -POSTHOOK: Lineage: outputtbl1.values EXPRESSION [(inputtbl1)inputtbl1.null, ] +POSTHOOK: Lineage: outputtbl1.vals EXPRESSION [(inputtbl1)inputtbl1.null, ] PREHOOK: query: desc formatted outputTbl1 PREHOOK: type: DESCTABLE PREHOOK: Input: default@outputtbl1 @@ -186,7 +186,7 @@ POSTHOOK: Input: default@outputtbl1 # col_name data_type comment key string -values bigint +vals bigint # Detailed Table Information Database: default @@ -213,11 +213,11 @@ Bucket Columns: [] Sort Columns: [] Storage Desc Params: serialization.format 1 -PREHOOK: query: select * from outputTbl1 order by key, values +PREHOOK: query: select * from outputTbl1 order by key, vals PREHOOK: type: QUERY PREHOOK: Input: default@outputtbl1 #### A masked pattern was here #### -POSTHOOK: query: select * from outputTbl1 order by key, values +POSTHOOK: query: select * from outputTbl1 order by key, vals POSTHOOK: type: QUERY POSTHOOK: Input: default@outputtbl1 #### A masked pattern was here #### diff --git a/ql/src/test/results/clientpositive/spark/union_remove_20.q.out b/ql/src/test/results/clientpositive/spark/union_remove_20.q.out index 328b1ac..ce38f72 100644 --- a/ql/src/test/results/clientpositive/spark/union_remove_20.q.out +++ b/ql/src/test/results/clientpositive/spark/union_remove_20.q.out @@ -28,11 +28,11 @@ create table inputTbl1(key string, val string) stored as textfile POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@inputTbl1 -PREHOOK: query: create table outputTbl1(values bigint, key string) stored as textfile +PREHOOK: query: create table outputTbl1(vals bigint, key string) stored as textfile PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@outputTbl1 -POSTHOOK: query: create table outputTbl1(values bigint, key string) stored as textfile +POSTHOOK: query: create table outputTbl1(vals bigint, key string) stored as textfile POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@outputTbl1 @@ -46,20 +46,20 @@ POSTHOOK: type: LOAD POSTHOOK: Output: default@inputtbl1 PREHOOK: query: explain insert overwrite table outputTbl1 -SELECT a.values, a.key +SELECT a.vals, a.key FROM ( - SELECT key, count(1) as values from inputTbl1 group by key + SELECT key, count(1) as vals from inputTbl1 group by key UNION ALL - SELECT key, count(1) as values from inputTbl1 group by key + SELECT key, count(1) as vals from inputTbl1 group by key ) a PREHOOK: type: QUERY POSTHOOK: query: explain insert overwrite table outputTbl1 -SELECT a.values, a.key +SELECT a.vals, a.key FROM ( - SELECT key, count(1) as values from inputTbl1 group by key + SELECT key, count(1) as vals from inputTbl1 group by key UNION ALL - SELECT key, count(1) as values from inputTbl1 group by key + SELECT key, count(1) as vals from inputTbl1 group by key ) a POSTHOOK: type: QUERY STAGE DEPENDENCIES: @@ -162,27 +162,27 @@ STAGE PLANS: name: default.outputtbl1 PREHOOK: query: insert overwrite table outputTbl1 -SELECT a.values, a.key +SELECT a.vals, a.key FROM ( - SELECT key, count(1) as values from inputTbl1 group by key + SELECT key, count(1) as vals from inputTbl1 group by key UNION ALL - SELECT key, count(1) as values from inputTbl1 group by key + SELECT key, count(1) as vals from inputTbl1 group by key ) a PREHOOK: type: QUERY PREHOOK: Input: default@inputtbl1 PREHOOK: Output: default@outputtbl1 POSTHOOK: query: insert overwrite table outputTbl1 -SELECT a.values, a.key +SELECT a.vals, a.key FROM ( - SELECT key, count(1) as values from inputTbl1 group by key + SELECT key, count(1) as vals from inputTbl1 group by key UNION ALL - SELECT key, count(1) as values from inputTbl1 group by key + SELECT key, count(1) as vals from inputTbl1 group by key ) a POSTHOOK: type: QUERY POSTHOOK: Input: default@inputtbl1 POSTHOOK: Output: default@outputtbl1 POSTHOOK: Lineage: outputtbl1.key EXPRESSION [(inputtbl1)inputtbl1.FieldSchema(name:key, type:string, comment:null), (inputtbl1)inputtbl1.FieldSchema(name:key, type:string, comment:null), ] -POSTHOOK: Lineage: outputtbl1.values EXPRESSION [(inputtbl1)inputtbl1.null, (inputtbl1)inputtbl1.null, ] +POSTHOOK: Lineage: outputtbl1.vals EXPRESSION [(inputtbl1)inputtbl1.null, (inputtbl1)inputtbl1.null, ] PREHOOK: query: desc formatted outputTbl1 PREHOOK: type: DESCTABLE PREHOOK: Input: default@outputtbl1 @@ -191,7 +191,7 @@ POSTHOOK: type: DESCTABLE POSTHOOK: Input: default@outputtbl1 # col_name data_type comment -values bigint +vals bigint key string # Detailed Table Information @@ -219,11 +219,11 @@ Bucket Columns: [] Sort Columns: [] Storage Desc Params: serialization.format 1 -PREHOOK: query: select * from outputTbl1 order by key, values +PREHOOK: query: select * from outputTbl1 order by key, vals PREHOOK: type: QUERY PREHOOK: Input: default@outputtbl1 #### A masked pattern was here #### -POSTHOOK: query: select * from outputTbl1 order by key, values +POSTHOOK: query: select * from outputTbl1 order by key, vals POSTHOOK: type: QUERY POSTHOOK: Input: default@outputtbl1 #### A masked pattern was here #### diff --git a/ql/src/test/results/clientpositive/spark/union_remove_21.q.out b/ql/src/test/results/clientpositive/spark/union_remove_21.q.out index b160397..cb9d92f 100644 --- a/ql/src/test/results/clientpositive/spark/union_remove_21.q.out +++ b/ql/src/test/results/clientpositive/spark/union_remove_21.q.out @@ -48,18 +48,18 @@ PREHOOK: query: explain insert overwrite table outputTbl1 SELECT a.key FROM ( - SELECT key, count(1) as values from inputTbl1 group by key + SELECT key, count(1) as vals from inputTbl1 group by key UNION ALL - SELECT key, count(1) as values from inputTbl1 group by key + SELECT key, count(1) as vals from inputTbl1 group by key ) a PREHOOK: type: QUERY POSTHOOK: query: explain insert overwrite table outputTbl1 SELECT a.key FROM ( - SELECT key, count(1) as values from inputTbl1 group by key + SELECT key, count(1) as vals from inputTbl1 group by key UNION ALL - SELECT key, count(1) as values from inputTbl1 group by key + SELECT key, count(1) as vals from inputTbl1 group by key ) a POSTHOOK: type: QUERY STAGE DEPENDENCIES: @@ -170,9 +170,9 @@ STAGE PLANS: PREHOOK: query: insert overwrite table outputTbl1 SELECT a.key FROM ( - SELECT key, count(1) as values from inputTbl1 group by key + SELECT key, count(1) as vals from inputTbl1 group by key UNION ALL - SELECT key, count(1) as values from inputTbl1 group by key + SELECT key, count(1) as vals from inputTbl1 group by key ) a PREHOOK: type: QUERY PREHOOK: Input: default@inputtbl1 @@ -180,9 +180,9 @@ PREHOOK: Output: default@outputtbl1 POSTHOOK: query: insert overwrite table outputTbl1 SELECT a.key FROM ( - SELECT key, count(1) as values from inputTbl1 group by key + SELECT key, count(1) as vals from inputTbl1 group by key UNION ALL - SELECT key, count(1) as values from inputTbl1 group by key + SELECT key, count(1) as vals from inputTbl1 group by key ) a POSTHOOK: type: QUERY POSTHOOK: Input: default@inputtbl1 diff --git a/ql/src/test/results/clientpositive/spark/union_remove_22.q.out b/ql/src/test/results/clientpositive/spark/union_remove_22.q.out new file mode 100644 index 0000000..9ba2c9d --- /dev/null +++ b/ql/src/test/results/clientpositive/spark/union_remove_22.q.out @@ -0,0 +1,397 @@ +PREHOOK: query: -- This is to test the union->selectstar->filesink optimization +-- Union of 2 map-reduce subqueries is performed followed by select and a file sink +-- However, some columns are repeated. So, union cannot be removed. +-- It does not matter, whether the output is merged or not. In this case, merging is turned +-- off +-- INCLUDE_HADOOP_MAJOR_VERSIONS(0.23) +-- Since this test creates sub-directories for the output table outputTbl1, it might be easier +-- to run the test only on hadoop 23. The union is removed, the select (which selects columns from +-- both the sub-qeuries of the union) is pushed above the union. + +create table inputTbl1(key string, val string) stored as textfile +PREHOOK: type: CREATETABLE +PREHOOK: Output: database:default +PREHOOK: Output: default@inputTbl1 +POSTHOOK: query: -- This is to test the union->selectstar->filesink optimization +-- Union of 2 map-reduce subqueries is performed followed by select and a file sink +-- However, some columns are repeated. So, union cannot be removed. +-- It does not matter, whether the output is merged or not. In this case, merging is turned +-- off +-- INCLUDE_HADOOP_MAJOR_VERSIONS(0.23) +-- Since this test creates sub-directories for the output table outputTbl1, it might be easier +-- to run the test only on hadoop 23. The union is removed, the select (which selects columns from +-- both the sub-qeuries of the union) is pushed above the union. + +create table inputTbl1(key string, val string) stored as textfile +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: database:default +POSTHOOK: Output: default@inputTbl1 +PREHOOK: query: create table outputTbl1(key string, vals bigint, vals2 bigint) stored as textfile +PREHOOK: type: CREATETABLE +PREHOOK: Output: database:default +PREHOOK: Output: default@outputTbl1 +POSTHOOK: query: create table outputTbl1(key string, vals bigint, vals2 bigint) stored as textfile +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: database:default +POSTHOOK: Output: default@outputTbl1 +PREHOOK: query: load data local inpath '../../data/files/T1.txt' into table inputTbl1 +PREHOOK: type: LOAD +#### A masked pattern was here #### +PREHOOK: Output: default@inputtbl1 +POSTHOOK: query: load data local inpath '../../data/files/T1.txt' into table inputTbl1 +POSTHOOK: type: LOAD +#### A masked pattern was here #### +POSTHOOK: Output: default@inputtbl1 +PREHOOK: query: explain +insert overwrite table outputTbl1 +SELECT a.key, a.vals, a.vals +FROM ( + SELECT key, count(1) as vals from inputTbl1 group by key + UNION ALL + SELECT key, count(1) as vals from inputTbl1 group by key +) a +PREHOOK: type: QUERY +POSTHOOK: query: explain +insert overwrite table outputTbl1 +SELECT a.key, a.vals, a.vals +FROM ( + SELECT key, count(1) as vals from inputTbl1 group by key + UNION ALL + SELECT key, count(1) as vals from inputTbl1 group by key +) a +POSTHOOK: type: QUERY +STAGE DEPENDENCIES: + Stage-1 is a root stage + Stage-0 depends on stages: Stage-1 + +STAGE PLANS: + Stage: Stage-1 + Spark + Edges: + Reducer 2 <- Map 1 (GROUP, 2) + Reducer 4 <- Map 3 (GROUP, 2) +#### A masked pattern was here #### + Vertices: + Map 1 + Map Operator Tree: + TableScan + alias: inputtbl1 + Statistics: Num rows: 0 Data size: 30 Basic stats: PARTIAL Column stats: NONE + Select Operator + expressions: key (type: string) + outputColumnNames: key + Statistics: Num rows: 0 Data size: 30 Basic stats: PARTIAL Column stats: NONE + Group By Operator + aggregations: count(1) + keys: key (type: string) + mode: hash + outputColumnNames: _col0, _col1 + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE + Reduce Output Operator + key expressions: _col0 (type: string) + sort order: + + Map-reduce partition columns: _col0 (type: string) + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE + value expressions: _col1 (type: bigint) + Map 3 + Map Operator Tree: + TableScan + alias: inputtbl1 + Statistics: Num rows: 0 Data size: 30 Basic stats: PARTIAL Column stats: NONE + Select Operator + expressions: key (type: string) + outputColumnNames: key + Statistics: Num rows: 0 Data size: 30 Basic stats: PARTIAL Column stats: NONE + Group By Operator + aggregations: count(1) + keys: key (type: string) + mode: hash + outputColumnNames: _col0, _col1 + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE + Reduce Output Operator + key expressions: _col0 (type: string) + sort order: + + Map-reduce partition columns: _col0 (type: string) + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE + value expressions: _col1 (type: bigint) + Reducer 2 + Reduce Operator Tree: + Group By Operator + aggregations: count(VALUE._col0) + keys: KEY._col0 (type: string) + mode: mergepartial + outputColumnNames: _col0, _col1 + Select Operator + expressions: _col0 (type: string), _col1 (type: bigint), _col1 (type: bigint) + outputColumnNames: _col0, _col1, _col2 + File Output Operator + compressed: false + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + name: default.outputtbl1 + Reducer 4 + Reduce Operator Tree: + Group By Operator + aggregations: count(VALUE._col0) + keys: KEY._col0 (type: string) + mode: mergepartial + outputColumnNames: _col0, _col1 + Select Operator + expressions: _col0 (type: string), _col1 (type: bigint), _col1 (type: bigint) + outputColumnNames: _col0, _col1, _col2 + File Output Operator + compressed: false + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + name: default.outputtbl1 + + Stage: Stage-0 + Move Operator + tables: + replace: true + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + name: default.outputtbl1 + +PREHOOK: query: insert overwrite table outputTbl1 +SELECT a.key, a.vals, a.vals +FROM ( + SELECT key, count(1) as vals from inputTbl1 group by key + UNION ALL + SELECT key, count(1) as vals from inputTbl1 group by key +) a +PREHOOK: type: QUERY +PREHOOK: Input: default@inputtbl1 +PREHOOK: Output: default@outputtbl1 +POSTHOOK: query: insert overwrite table outputTbl1 +SELECT a.key, a.vals, a.vals +FROM ( + SELECT key, count(1) as vals from inputTbl1 group by key + UNION ALL + SELECT key, count(1) as vals from inputTbl1 group by key +) a +POSTHOOK: type: QUERY +POSTHOOK: Input: default@inputtbl1 +POSTHOOK: Output: default@outputtbl1 +POSTHOOK: Lineage: outputtbl1.key EXPRESSION [(inputtbl1)inputtbl1.FieldSchema(name:key, type:string, comment:null), (inputtbl1)inputtbl1.FieldSchema(name:key, type:string, comment:null), ] +POSTHOOK: Lineage: outputtbl1.vals EXPRESSION [(inputtbl1)inputtbl1.null, (inputtbl1)inputtbl1.null, ] +POSTHOOK: Lineage: outputtbl1.vals2 EXPRESSION [(inputtbl1)inputtbl1.null, (inputtbl1)inputtbl1.null, ] +PREHOOK: query: desc formatted outputTbl1 +PREHOOK: type: DESCTABLE +PREHOOK: Input: default@outputtbl1 +POSTHOOK: query: desc formatted outputTbl1 +POSTHOOK: type: DESCTABLE +POSTHOOK: Input: default@outputtbl1 +# col_name data_type comment + +key string +vals bigint +vals2 bigint + +# Detailed Table Information +Database: default +#### A masked pattern was here #### +Protect Mode: None +Retention: 0 +#### A masked pattern was here #### +Table Type: MANAGED_TABLE +Table Parameters: + COLUMN_STATS_ACCURATE false + numFiles 4 + numRows -1 + rawDataSize -1 + totalSize 60 +#### A masked pattern was here #### + +# Storage Information +SerDe Library: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe +InputFormat: org.apache.hadoop.mapred.TextInputFormat +OutputFormat: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat +Compressed: No +Num Buckets: -1 +Bucket Columns: [] +Sort Columns: [] +Storage Desc Params: + serialization.format 1 +PREHOOK: query: select * from outputTbl1 +PREHOOK: type: QUERY +PREHOOK: Input: default@outputtbl1 +#### A masked pattern was here #### +POSTHOOK: query: select * from outputTbl1 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@outputtbl1 +#### A masked pattern was here #### +8 2 2 +2 1 1 +1 1 1 +3 1 1 +7 1 1 +8 2 2 +2 1 1 +1 1 1 +3 1 1 +7 1 1 +PREHOOK: query: explain +insert overwrite table outputTbl1 +SELECT a.key, concat(a.vals, a.vals), concat(a.vals, a.vals) +FROM ( + SELECT key, count(1) as vals from inputTbl1 group by key + UNION ALL + SELECT key, count(1) as vals from inputTbl1 group by key +) a +PREHOOK: type: QUERY +POSTHOOK: query: explain +insert overwrite table outputTbl1 +SELECT a.key, concat(a.vals, a.vals), concat(a.vals, a.vals) +FROM ( + SELECT key, count(1) as vals from inputTbl1 group by key + UNION ALL + SELECT key, count(1) as vals from inputTbl1 group by key +) a +POSTHOOK: type: QUERY +STAGE DEPENDENCIES: + Stage-1 is a root stage + Stage-0 depends on stages: Stage-1 + +STAGE PLANS: + Stage: Stage-1 + Spark + Edges: + Reducer 2 <- Map 1 (GROUP, 2) + Reducer 4 <- Map 3 (GROUP, 2) +#### A masked pattern was here #### + Vertices: + Map 1 + Map Operator Tree: + TableScan + alias: inputtbl1 + Statistics: Num rows: 0 Data size: 30 Basic stats: PARTIAL Column stats: NONE + Select Operator + expressions: key (type: string) + outputColumnNames: key + Statistics: Num rows: 0 Data size: 30 Basic stats: PARTIAL Column stats: NONE + Group By Operator + aggregations: count(1) + keys: key (type: string) + mode: hash + outputColumnNames: _col0, _col1 + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE + Reduce Output Operator + key expressions: _col0 (type: string) + sort order: + + Map-reduce partition columns: _col0 (type: string) + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE + value expressions: _col1 (type: bigint) + Map 3 + Map Operator Tree: + TableScan + alias: inputtbl1 + Statistics: Num rows: 0 Data size: 30 Basic stats: PARTIAL Column stats: NONE + Select Operator + expressions: key (type: string) + outputColumnNames: key + Statistics: Num rows: 0 Data size: 30 Basic stats: PARTIAL Column stats: NONE + Group By Operator + aggregations: count(1) + keys: key (type: string) + mode: hash + outputColumnNames: _col0, _col1 + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE + Reduce Output Operator + key expressions: _col0 (type: string) + sort order: + + Map-reduce partition columns: _col0 (type: string) + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE + value expressions: _col1 (type: bigint) + Reducer 2 + Reduce Operator Tree: + Group By Operator + aggregations: count(VALUE._col0) + keys: KEY._col0 (type: string) + mode: mergepartial + outputColumnNames: _col0, _col1 + Select Operator + expressions: _col0 (type: string), UDFToLong(concat(_col1, _col1)) (type: bigint), UDFToLong(concat(_col1, _col1)) (type: bigint) + outputColumnNames: _col0, _col1, _col2 + File Output Operator + compressed: false + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + name: default.outputtbl1 + Reducer 4 + Reduce Operator Tree: + Group By Operator + aggregations: count(VALUE._col0) + keys: KEY._col0 (type: string) + mode: mergepartial + outputColumnNames: _col0, _col1 + Select Operator + expressions: _col0 (type: string), UDFToLong(concat(_col1, _col1)) (type: bigint), UDFToLong(concat(_col1, _col1)) (type: bigint) + outputColumnNames: _col0, _col1, _col2 + File Output Operator + compressed: false + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + name: default.outputtbl1 + + Stage: Stage-0 + Move Operator + tables: + replace: true + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + name: default.outputtbl1 + +PREHOOK: query: insert overwrite table outputTbl1 +SELECT a.key, concat(a.vals, a.vals), concat(a.vals, a.vals) +FROM ( + SELECT key, count(1) as vals from inputTbl1 group by key + UNION ALL + SELECT key, count(1) as vals from inputTbl1 group by key +) a +PREHOOK: type: QUERY +PREHOOK: Input: default@inputtbl1 +PREHOOK: Output: default@outputtbl1 +POSTHOOK: query: insert overwrite table outputTbl1 +SELECT a.key, concat(a.vals, a.vals), concat(a.vals, a.vals) +FROM ( + SELECT key, count(1) as vals from inputTbl1 group by key + UNION ALL + SELECT key, count(1) as vals from inputTbl1 group by key +) a +POSTHOOK: type: QUERY +POSTHOOK: Input: default@inputtbl1 +POSTHOOK: Output: default@outputtbl1 +POSTHOOK: Lineage: outputtbl1.key EXPRESSION [(inputtbl1)inputtbl1.FieldSchema(name:key, type:string, comment:null), (inputtbl1)inputtbl1.FieldSchema(name:key, type:string, comment:null), ] +POSTHOOK: Lineage: outputtbl1.vals EXPRESSION [(inputtbl1)inputtbl1.null, (inputtbl1)inputtbl1.null, ] +POSTHOOK: Lineage: outputtbl1.vals2 EXPRESSION [(inputtbl1)inputtbl1.null, (inputtbl1)inputtbl1.null, ] +PREHOOK: query: select * from outputTbl1 order by key, vals +PREHOOK: type: QUERY +PREHOOK: Input: default@outputtbl1 +#### A masked pattern was here #### +POSTHOOK: query: select * from outputTbl1 order by key, vals +POSTHOOK: type: QUERY +POSTHOOK: Input: default@outputtbl1 +#### A masked pattern was here #### +1 11 11 +1 11 11 +2 11 11 +2 11 11 +3 11 11 +3 11 11 +7 11 11 +7 11 11 +8 22 22 +8 22 22 diff --git a/ql/src/test/results/clientpositive/spark/union_remove_23.q.out b/ql/src/test/results/clientpositive/spark/union_remove_23.q.out new file mode 100644 index 0000000..a7e7c96 --- /dev/null +++ b/ql/src/test/results/clientpositive/spark/union_remove_23.q.out @@ -0,0 +1,265 @@ +PREHOOK: query: -- This is to test the union->selectstar->filesink optimization +-- Union of 2 map-reduce subqueries is performed followed by select star and a file sink +-- There is no need to write the temporary results of the sub-queries, and then read them +-- again to process the union. The union can be removed completely. One of the sub-queries +-- would have multiple map-reduce jobs. +-- It does not matter, whether the output is merged or not. In this case, merging is turned +-- off +-- INCLUDE_HADOOP_MAJOR_VERSIONS(0.23) +-- Since this test creates sub-directories for the output table outputTbl1, it might be easier +-- to run the test only on hadoop 23 + +create table inputTbl1(key string, val string) stored as textfile +PREHOOK: type: CREATETABLE +PREHOOK: Output: database:default +PREHOOK: Output: default@inputTbl1 +POSTHOOK: query: -- This is to test the union->selectstar->filesink optimization +-- Union of 2 map-reduce subqueries is performed followed by select star and a file sink +-- There is no need to write the temporary results of the sub-queries, and then read them +-- again to process the union. The union can be removed completely. One of the sub-queries +-- would have multiple map-reduce jobs. +-- It does not matter, whether the output is merged or not. In this case, merging is turned +-- off +-- INCLUDE_HADOOP_MAJOR_VERSIONS(0.23) +-- Since this test creates sub-directories for the output table outputTbl1, it might be easier +-- to run the test only on hadoop 23 + +create table inputTbl1(key string, val string) stored as textfile +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: database:default +POSTHOOK: Output: default@inputTbl1 +PREHOOK: query: create table outputTbl1(key string, vals bigint) stored as textfile +PREHOOK: type: CREATETABLE +PREHOOK: Output: database:default +PREHOOK: Output: default@outputTbl1 +POSTHOOK: query: create table outputTbl1(key string, vals bigint) stored as textfile +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: database:default +POSTHOOK: Output: default@outputTbl1 +PREHOOK: query: load data local inpath '../../data/files/T1.txt' into table inputTbl1 +PREHOOK: type: LOAD +#### A masked pattern was here #### +PREHOOK: Output: default@inputtbl1 +POSTHOOK: query: load data local inpath '../../data/files/T1.txt' into table inputTbl1 +POSTHOOK: type: LOAD +#### A masked pattern was here #### +POSTHOOK: Output: default@inputtbl1 +PREHOOK: query: explain +insert overwrite table outputTbl1 +SELECT * +FROM ( + SELECT key, count(1) as vals from + (SELECT a.key, b.val from inputTbl1 a join inputTbl1 b on a.key=b.key) subq group by key + UNION ALL + SELECT key, count(1) as vals from inputTbl1 group by key +) subq2 +PREHOOK: type: QUERY +POSTHOOK: query: explain +insert overwrite table outputTbl1 +SELECT * +FROM ( + SELECT key, count(1) as vals from + (SELECT a.key, b.val from inputTbl1 a join inputTbl1 b on a.key=b.key) subq group by key + UNION ALL + SELECT key, count(1) as vals from inputTbl1 group by key +) subq2 +POSTHOOK: type: QUERY +STAGE DEPENDENCIES: + Stage-1 is a root stage + Stage-0 depends on stages: Stage-1 + +STAGE PLANS: + Stage: Stage-1 + Spark + Edges: + Reducer 2 <- Map 1 (PARTITION-LEVEL SORT, 2), Map 4 (PARTITION-LEVEL SORT, 2) + Reducer 6 <- Map 5 (GROUP, 2) + Reducer 3 <- Reducer 2 (GROUP, 2) +#### A masked pattern was here #### + Vertices: + Map 1 + Map Operator Tree: + TableScan + alias: a + Statistics: Num rows: 0 Data size: 30 Basic stats: PARTIAL Column stats: NONE + Filter Operator + predicate: key is not null (type: boolean) + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE + Reduce Output Operator + key expressions: key (type: string) + sort order: + + Map-reduce partition columns: key (type: string) + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE + Map 4 + Map Operator Tree: + TableScan + alias: b + Statistics: Num rows: 0 Data size: 30 Basic stats: PARTIAL Column stats: NONE + Filter Operator + predicate: key is not null (type: boolean) + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE + Reduce Output Operator + key expressions: key (type: string) + sort order: + + Map-reduce partition columns: key (type: string) + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE + Map 5 + Map Operator Tree: + TableScan + alias: inputtbl1 + Statistics: Num rows: 0 Data size: 30 Basic stats: PARTIAL Column stats: NONE + Select Operator + expressions: key (type: string) + outputColumnNames: key + Statistics: Num rows: 0 Data size: 30 Basic stats: PARTIAL Column stats: NONE + Group By Operator + aggregations: count(1) + keys: key (type: string) + mode: hash + outputColumnNames: _col0, _col1 + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE + Reduce Output Operator + key expressions: _col0 (type: string) + sort order: + + Map-reduce partition columns: _col0 (type: string) + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE + value expressions: _col1 (type: bigint) + Reducer 2 + Reduce Operator Tree: + Join Operator + condition map: + Inner Join 0 to 1 + keys: + 0 key (type: string) + 1 key (type: string) + outputColumnNames: _col0 + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE + Group By Operator + aggregations: count(1) + keys: _col0 (type: string) + mode: hash + outputColumnNames: _col0, _col1 + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE + Reduce Output Operator + key expressions: _col0 (type: string) + sort order: + + Map-reduce partition columns: _col0 (type: string) + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE + value expressions: _col1 (type: bigint) + Reducer 3 + Reduce Operator Tree: + Group By Operator + aggregations: count(VALUE._col0) + keys: KEY._col0 (type: string) + mode: mergepartial + outputColumnNames: _col0, _col1 + File Output Operator + compressed: false + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + name: default.outputtbl1 + Reducer 6 + Reduce Operator Tree: + Group By Operator + aggregations: count(VALUE._col0) + keys: KEY._col0 (type: string) + mode: mergepartial + outputColumnNames: _col0, _col1 + File Output Operator + compressed: false + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + name: default.outputtbl1 + + Stage: Stage-0 + Move Operator + tables: + replace: true + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + name: default.outputtbl1 + +PREHOOK: query: insert overwrite table outputTbl1 +SELECT * +FROM ( + SELECT key, count(1) as vals from + (SELECT a.key, b.val from inputTbl1 a join inputTbl1 b on a.key=b.key) subq group by key + UNION ALL + SELECT key, count(1) as vals from inputTbl1 group by key +) subq2 +PREHOOK: type: QUERY +PREHOOK: Input: default@inputtbl1 +PREHOOK: Output: default@outputtbl1 +POSTHOOK: query: insert overwrite table outputTbl1 +SELECT * +FROM ( + SELECT key, count(1) as vals from + (SELECT a.key, b.val from inputTbl1 a join inputTbl1 b on a.key=b.key) subq group by key + UNION ALL + SELECT key, count(1) as vals from inputTbl1 group by key +) subq2 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@inputtbl1 +POSTHOOK: Output: default@outputtbl1 +POSTHOOK: Lineage: outputtbl1.key EXPRESSION [(inputtbl1)a.FieldSchema(name:key, type:string, comment:null), (inputtbl1)inputtbl1.FieldSchema(name:key, type:string, comment:null), ] +POSTHOOK: Lineage: outputtbl1.vals EXPRESSION [(inputtbl1)a.null, (inputtbl1)a.null, (inputtbl1)b.null, (inputtbl1)inputtbl1.null, ] +PREHOOK: query: desc formatted outputTbl1 +PREHOOK: type: DESCTABLE +PREHOOK: Input: default@outputtbl1 +POSTHOOK: query: desc formatted outputTbl1 +POSTHOOK: type: DESCTABLE +POSTHOOK: Input: default@outputtbl1 +# col_name data_type comment + +key string +vals bigint + +# Detailed Table Information +Database: default +#### A masked pattern was here #### +Protect Mode: None +Retention: 0 +#### A masked pattern was here #### +Table Type: MANAGED_TABLE +Table Parameters: + COLUMN_STATS_ACCURATE false + numFiles 4 + numRows -1 + rawDataSize -1 + totalSize 40 +#### A masked pattern was here #### + +# Storage Information +SerDe Library: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe +InputFormat: org.apache.hadoop.mapred.TextInputFormat +OutputFormat: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat +Compressed: No +Num Buckets: -1 +Bucket Columns: [] +Sort Columns: [] +Storage Desc Params: + serialization.format 1 +PREHOOK: query: select * from outputTbl1 order by key, vals +PREHOOK: type: QUERY +PREHOOK: Input: default@outputtbl1 +#### A masked pattern was here #### +POSTHOOK: query: select * from outputTbl1 order by key, vals +POSTHOOK: type: QUERY +POSTHOOK: Input: default@outputtbl1 +#### A masked pattern was here #### +1 1 +1 1 +2 1 +2 1 +3 1 +3 1 +7 1 +7 1 +8 2 +8 4 diff --git a/ql/src/test/results/clientpositive/spark/union_remove_24.q.out b/ql/src/test/results/clientpositive/spark/union_remove_24.q.out index 8bc748d..b50671c 100644 --- a/ql/src/test/results/clientpositive/spark/union_remove_24.q.out +++ b/ql/src/test/results/clientpositive/spark/union_remove_24.q.out @@ -24,11 +24,11 @@ create table inputTbl1(key string, val string) stored as textfile POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@inputTbl1 -PREHOOK: query: create table outputTbl1(key double, values bigint) stored as textfile +PREHOOK: query: create table outputTbl1(key double, vals bigint) stored as textfile PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@outputTbl1 -POSTHOOK: query: create table outputTbl1(key double, values bigint) stored as textfile +POSTHOOK: query: create table outputTbl1(key double, vals bigint) stored as textfile POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@outputTbl1 @@ -44,18 +44,18 @@ PREHOOK: query: EXPLAIN INSERT OVERWRITE TABLE outputTbl1 SELECT * FROM ( - SELECT CAST(key AS DOUBLE) AS key, count(1) as values FROM inputTbl1 group by key + SELECT CAST(key AS DOUBLE) AS key, count(1) as vals FROM inputTbl1 group by key UNION ALL - SELECT CAST(key AS BIGINT) AS key, count(1) as values FROM inputTbl1 group by key + SELECT CAST(key AS BIGINT) AS key, count(1) as vals FROM inputTbl1 group by key ) a PREHOOK: type: QUERY POSTHOOK: query: EXPLAIN INSERT OVERWRITE TABLE outputTbl1 SELECT * FROM ( - SELECT CAST(key AS DOUBLE) AS key, count(1) as values FROM inputTbl1 group by key + SELECT CAST(key AS DOUBLE) AS key, count(1) as vals FROM inputTbl1 group by key UNION ALL - SELECT CAST(key AS BIGINT) AS key, count(1) as values FROM inputTbl1 group by key + SELECT CAST(key AS BIGINT) AS key, count(1) as vals FROM inputTbl1 group by key ) a POSTHOOK: type: QUERY STAGE DEPENDENCIES: @@ -160,9 +160,9 @@ STAGE PLANS: PREHOOK: query: INSERT OVERWRITE TABLE outputTbl1 SELECT * FROM ( - SELECT CAST(key AS DOUBLE) AS key, count(1) as values FROM inputTbl1 group by key + SELECT CAST(key AS DOUBLE) AS key, count(1) as vals FROM inputTbl1 group by key UNION ALL - SELECT CAST(key AS BIGINT) AS key, count(1) as values FROM inputTbl1 group by key + SELECT CAST(key AS BIGINT) AS key, count(1) as vals FROM inputTbl1 group by key ) a PREHOOK: type: QUERY PREHOOK: Input: default@inputtbl1 @@ -170,15 +170,15 @@ PREHOOK: Output: default@outputtbl1 POSTHOOK: query: INSERT OVERWRITE TABLE outputTbl1 SELECT * FROM ( - SELECT CAST(key AS DOUBLE) AS key, count(1) as values FROM inputTbl1 group by key + SELECT CAST(key AS DOUBLE) AS key, count(1) as vals FROM inputTbl1 group by key UNION ALL - SELECT CAST(key AS BIGINT) AS key, count(1) as values FROM inputTbl1 group by key + SELECT CAST(key AS BIGINT) AS key, count(1) as vals FROM inputTbl1 group by key ) a POSTHOOK: type: QUERY POSTHOOK: Input: default@inputtbl1 POSTHOOK: Output: default@outputtbl1 POSTHOOK: Lineage: outputtbl1.key EXPRESSION [(inputtbl1)inputtbl1.FieldSchema(name:key, type:string, comment:null), (inputtbl1)inputtbl1.FieldSchema(name:key, type:string, comment:null), ] -POSTHOOK: Lineage: outputtbl1.values EXPRESSION [(inputtbl1)inputtbl1.null, (inputtbl1)inputtbl1.null, ] +POSTHOOK: Lineage: outputtbl1.vals EXPRESSION [(inputtbl1)inputtbl1.null, (inputtbl1)inputtbl1.null, ] PREHOOK: query: desc formatted outputTbl1 PREHOOK: type: DESCTABLE PREHOOK: Input: default@outputtbl1 @@ -188,7 +188,7 @@ POSTHOOK: Input: default@outputtbl1 # col_name data_type comment key double -values bigint +vals bigint # Detailed Table Information Database: default @@ -215,11 +215,11 @@ Bucket Columns: [] Sort Columns: [] Storage Desc Params: serialization.format 1 -PREHOOK: query: select * from outputTbl1 order by key, values +PREHOOK: query: select * from outputTbl1 order by key, vals PREHOOK: type: QUERY PREHOOK: Input: default@outputtbl1 #### A masked pattern was here #### -POSTHOOK: query: select * from outputTbl1 order by key, values +POSTHOOK: query: select * from outputTbl1 order by key, vals POSTHOOK: type: QUERY POSTHOOK: Input: default@outputtbl1 #### A masked pattern was here #### diff --git a/ql/src/test/results/clientpositive/spark/union_remove_25.q.out b/ql/src/test/results/clientpositive/spark/union_remove_25.q.out index 396a43d..ce77e11 100644 --- a/ql/src/test/results/clientpositive/spark/union_remove_25.q.out +++ b/ql/src/test/results/clientpositive/spark/union_remove_25.q.out @@ -26,27 +26,27 @@ create table inputTbl1(key string, val string) stored as textfile POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@inputTbl1 -PREHOOK: query: create table outputTbl1(key string, values bigint) partitioned by (ds string) stored as textfile +PREHOOK: query: create table outputTbl1(key string, vals bigint) partitioned by (ds string) stored as textfile PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@outputTbl1 -POSTHOOK: query: create table outputTbl1(key string, values bigint) partitioned by (ds string) stored as textfile +POSTHOOK: query: create table outputTbl1(key string, vals bigint) partitioned by (ds string) stored as textfile POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@outputTbl1 -PREHOOK: query: create table outputTbl2(key string, values bigint) partitioned by (ds string) stored as textfile +PREHOOK: query: create table outputTbl2(key string, vals bigint) partitioned by (ds string) stored as textfile PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@outputTbl2 -POSTHOOK: query: create table outputTbl2(key string, values bigint) partitioned by (ds string) stored as textfile +POSTHOOK: query: create table outputTbl2(key string, vals bigint) partitioned by (ds string) stored as textfile POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@outputTbl2 -PREHOOK: query: create table outputTbl3(key string, values bigint) partitioned by (ds string,hr string) stored as textfile +PREHOOK: query: create table outputTbl3(key string, vals bigint) partitioned by (ds string,hr string) stored as textfile PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@outputTbl3 -POSTHOOK: query: create table outputTbl3(key string, values bigint) partitioned by (ds string,hr string) stored as textfile +POSTHOOK: query: create table outputTbl3(key string, vals bigint) partitioned by (ds string,hr string) stored as textfile POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@outputTbl3 @@ -62,18 +62,18 @@ PREHOOK: query: explain insert overwrite table outputTbl1 partition(ds='2004') SELECT * FROM ( - SELECT key, count(1) as values from inputTbl1 group by key + SELECT key, count(1) as vals from inputTbl1 group by key UNION ALL - SELECT key, count(1) as values from inputTbl1 group by key + SELECT key, count(1) as vals from inputTbl1 group by key ) a PREHOOK: type: QUERY POSTHOOK: query: explain insert overwrite table outputTbl1 partition(ds='2004') SELECT * FROM ( - SELECT key, count(1) as values from inputTbl1 group by key + SELECT key, count(1) as vals from inputTbl1 group by key UNION ALL - SELECT key, count(1) as values from inputTbl1 group by key + SELECT key, count(1) as vals from inputTbl1 group by key ) a POSTHOOK: type: QUERY STAGE DEPENDENCIES: @@ -174,9 +174,9 @@ STAGE PLANS: PREHOOK: query: insert overwrite table outputTbl1 partition(ds='2004') SELECT * FROM ( - SELECT key, count(1) as values from inputTbl1 group by key + SELECT key, count(1) as vals from inputTbl1 group by key UNION ALL - SELECT key, count(1) as values from inputTbl1 group by key + SELECT key, count(1) as vals from inputTbl1 group by key ) a PREHOOK: type: QUERY PREHOOK: Input: default@inputtbl1 @@ -184,15 +184,15 @@ PREHOOK: Output: default@outputtbl1@ds=2004 POSTHOOK: query: insert overwrite table outputTbl1 partition(ds='2004') SELECT * FROM ( - SELECT key, count(1) as values from inputTbl1 group by key + SELECT key, count(1) as vals from inputTbl1 group by key UNION ALL - SELECT key, count(1) as values from inputTbl1 group by key + SELECT key, count(1) as vals from inputTbl1 group by key ) a POSTHOOK: type: QUERY POSTHOOK: Input: default@inputtbl1 POSTHOOK: Output: default@outputtbl1@ds=2004 POSTHOOK: Lineage: outputtbl1 PARTITION(ds=2004).key EXPRESSION [(inputtbl1)inputtbl1.FieldSchema(name:key, type:string, comment:null), (inputtbl1)inputtbl1.FieldSchema(name:key, type:string, comment:null), ] -POSTHOOK: Lineage: outputtbl1 PARTITION(ds=2004).values EXPRESSION [(inputtbl1)inputtbl1.null, (inputtbl1)inputtbl1.null, ] +POSTHOOK: Lineage: outputtbl1 PARTITION(ds=2004).vals EXPRESSION [(inputtbl1)inputtbl1.null, (inputtbl1)inputtbl1.null, ] PREHOOK: query: desc formatted outputTbl1 partition(ds='2004') PREHOOK: type: DESCTABLE PREHOOK: Input: default@outputtbl1 @@ -202,7 +202,7 @@ POSTHOOK: Input: default@outputtbl1 # col_name data_type comment key string -values bigint +vals bigint # Partition Information # col_name data_type comment @@ -234,12 +234,12 @@ Bucket Columns: [] Sort Columns: [] Storage Desc Params: serialization.format 1 -PREHOOK: query: select * from outputTbl1 order by key, values +PREHOOK: query: select * from outputTbl1 order by key, vals PREHOOK: type: QUERY PREHOOK: Input: default@outputtbl1 PREHOOK: Input: default@outputtbl1@ds=2004 #### A masked pattern was here #### -POSTHOOK: query: select * from outputTbl1 order by key, values +POSTHOOK: query: select * from outputTbl1 order by key, vals POSTHOOK: type: QUERY POSTHOOK: Input: default@outputtbl1 POSTHOOK: Input: default@outputtbl1@ds=2004 @@ -388,7 +388,7 @@ POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=11 POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=12 POSTHOOK: Output: default@outputtbl2@ds=2008-04-08 POSTHOOK: Lineage: outputtbl2 PARTITION(ds=2008-04-08).key EXPRESSION [(srcpart)srcpart.FieldSchema(name:key, type:string, comment:default), (srcpart)srcpart.FieldSchema(name:key, type:string, comment:default), ] -POSTHOOK: Lineage: outputtbl2 PARTITION(ds=2008-04-08).values EXPRESSION [(srcpart)srcpart.FieldSchema(name:value, type:string, comment:default), (srcpart)srcpart.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: outputtbl2 PARTITION(ds=2008-04-08).vals EXPRESSION [(srcpart)srcpart.FieldSchema(name:value, type:string, comment:default), (srcpart)srcpart.FieldSchema(name:value, type:string, comment:default), ] PREHOOK: query: show partitions outputTbl2 PREHOOK: type: SHOWPARTITIONS PREHOOK: Input: default@outputtbl2 @@ -405,7 +405,7 @@ POSTHOOK: Input: default@outputtbl2 # col_name data_type comment key string -values bigint +vals bigint # Partition Information # col_name data_type comment @@ -571,9 +571,9 @@ POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=12 POSTHOOK: Output: default@outputtbl3@ds=2008-04-08/hr=11 POSTHOOK: Output: default@outputtbl3@ds=2008-04-08/hr=12 POSTHOOK: Lineage: outputtbl3 PARTITION(ds=2008-04-08,hr=11).key EXPRESSION [(srcpart)srcpart.FieldSchema(name:key, type:string, comment:default), (srcpart)srcpart.FieldSchema(name:key, type:string, comment:default), ] -POSTHOOK: Lineage: outputtbl3 PARTITION(ds=2008-04-08,hr=11).values EXPRESSION [(srcpart)srcpart.FieldSchema(name:value, type:string, comment:default), (srcpart)srcpart.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: outputtbl3 PARTITION(ds=2008-04-08,hr=11).vals EXPRESSION [(srcpart)srcpart.FieldSchema(name:value, type:string, comment:default), (srcpart)srcpart.FieldSchema(name:value, type:string, comment:default), ] POSTHOOK: Lineage: outputtbl3 PARTITION(ds=2008-04-08,hr=12).key EXPRESSION [(srcpart)srcpart.FieldSchema(name:key, type:string, comment:default), (srcpart)srcpart.FieldSchema(name:key, type:string, comment:default), ] -POSTHOOK: Lineage: outputtbl3 PARTITION(ds=2008-04-08,hr=12).values EXPRESSION [(srcpart)srcpart.FieldSchema(name:value, type:string, comment:default), (srcpart)srcpart.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: outputtbl3 PARTITION(ds=2008-04-08,hr=12).vals EXPRESSION [(srcpart)srcpart.FieldSchema(name:value, type:string, comment:default), (srcpart)srcpart.FieldSchema(name:value, type:string, comment:default), ] PREHOOK: query: show partitions outputTbl3 PREHOOK: type: SHOWPARTITIONS PREHOOK: Input: default@outputtbl3 @@ -591,7 +591,7 @@ POSTHOOK: Input: default@outputtbl3 # col_name data_type comment key string -values bigint +vals bigint # Partition Information # col_name data_type comment diff --git a/ql/src/test/results/clientpositive/spark/union_remove_3.q.out b/ql/src/test/results/clientpositive/spark/union_remove_3.q.out index bac5441..9e28dbd 100644 --- a/ql/src/test/results/clientpositive/spark/union_remove_3.q.out +++ b/ql/src/test/results/clientpositive/spark/union_remove_3.q.out @@ -28,11 +28,11 @@ create table inputTbl1(key string, val string) stored as textfile POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@inputTbl1 -PREHOOK: query: create table outputTbl1(key string, values bigint) stored as textfile +PREHOOK: query: create table outputTbl1(key string, vals bigint) stored as textfile PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@outputTbl1 -POSTHOOK: query: create table outputTbl1(key string, values bigint) stored as textfile +POSTHOOK: query: create table outputTbl1(key string, vals bigint) stored as textfile POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@outputTbl1 @@ -48,22 +48,22 @@ PREHOOK: query: explain insert overwrite table outputTbl1 SELECT * FROM ( - SELECT key, 1 as values from inputTbl1 + SELECT key, 1 as vals from inputTbl1 UNION ALL - SELECT key, 2 as values from inputTbl1 + SELECT key, 2 as vals from inputTbl1 UNION ALL - SELECT key, 3 as values from inputTbl1 + SELECT key, 3 as vals from inputTbl1 ) a PREHOOK: type: QUERY POSTHOOK: query: explain insert overwrite table outputTbl1 SELECT * FROM ( - SELECT key, 1 as values from inputTbl1 + SELECT key, 1 as vals from inputTbl1 UNION ALL - SELECT key, 2 as values from inputTbl1 + SELECT key, 2 as vals from inputTbl1 UNION ALL - SELECT key, 3 as values from inputTbl1 + SELECT key, 3 as vals from inputTbl1 ) a POSTHOOK: type: QUERY STAGE DEPENDENCIES: @@ -140,11 +140,11 @@ STAGE PLANS: PREHOOK: query: insert overwrite table outputTbl1 SELECT * FROM ( - SELECT key, 1 as values from inputTbl1 + SELECT key, 1 as vals from inputTbl1 UNION ALL - SELECT key, 2 as values from inputTbl1 + SELECT key, 2 as vals from inputTbl1 UNION ALL - SELECT key, 3 as values from inputTbl1 + SELECT key, 3 as vals from inputTbl1 ) a PREHOOK: type: QUERY PREHOOK: Input: default@inputtbl1 @@ -152,17 +152,17 @@ PREHOOK: Output: default@outputtbl1 POSTHOOK: query: insert overwrite table outputTbl1 SELECT * FROM ( - SELECT key, 1 as values from inputTbl1 + SELECT key, 1 as vals from inputTbl1 UNION ALL - SELECT key, 2 as values from inputTbl1 + SELECT key, 2 as vals from inputTbl1 UNION ALL - SELECT key, 3 as values from inputTbl1 + SELECT key, 3 as vals from inputTbl1 ) a POSTHOOK: type: QUERY POSTHOOK: Input: default@inputtbl1 POSTHOOK: Output: default@outputtbl1 POSTHOOK: Lineage: outputtbl1.key EXPRESSION [(inputtbl1)inputtbl1.FieldSchema(name:key, type:string, comment:null), (inputtbl1)inputtbl1.FieldSchema(name:key, type:string, comment:null), (inputtbl1)inputtbl1.FieldSchema(name:key, type:string, comment:null), ] -POSTHOOK: Lineage: outputtbl1.values EXPRESSION [] +POSTHOOK: Lineage: outputtbl1.vals EXPRESSION [] PREHOOK: query: desc formatted outputTbl1 PREHOOK: type: DESCTABLE PREHOOK: Input: default@outputtbl1 @@ -172,7 +172,7 @@ POSTHOOK: Input: default@outputtbl1 # col_name data_type comment key string -values bigint +vals bigint # Detailed Table Information Database: default @@ -199,11 +199,11 @@ Bucket Columns: [] Sort Columns: [] Storage Desc Params: serialization.format 1 -PREHOOK: query: select * from outputTbl1 order by key, values +PREHOOK: query: select * from outputTbl1 order by key, vals PREHOOK: type: QUERY PREHOOK: Input: default@outputtbl1 #### A masked pattern was here #### -POSTHOOK: query: select * from outputTbl1 order by key, values +POSTHOOK: query: select * from outputTbl1 order by key, vals POSTHOOK: type: QUERY POSTHOOK: Input: default@outputtbl1 #### A masked pattern was here #### diff --git a/ql/src/test/results/clientpositive/spark/union_remove_4.q.out b/ql/src/test/results/clientpositive/spark/union_remove_4.q.out index e83788a..60dae3d 100644 --- a/ql/src/test/results/clientpositive/spark/union_remove_4.q.out +++ b/ql/src/test/results/clientpositive/spark/union_remove_4.q.out @@ -26,11 +26,11 @@ create table inputTbl1(key string, val string) stored as textfile POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@inputTbl1 -PREHOOK: query: create table outputTbl1(key string, values bigint) stored as textfile +PREHOOK: query: create table outputTbl1(key string, vals bigint) stored as textfile PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@outputTbl1 -POSTHOOK: query: create table outputTbl1(key string, values bigint) stored as textfile +POSTHOOK: query: create table outputTbl1(key string, vals bigint) stored as textfile POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@outputTbl1 @@ -46,18 +46,18 @@ PREHOOK: query: explain insert overwrite table outputTbl1 SELECT * FROM ( - SELECT key, count(1) as values from inputTbl1 group by key + SELECT key, count(1) as vals from inputTbl1 group by key UNION ALL - SELECT key, count(1) as values from inputTbl1 group by key + SELECT key, count(1) as vals from inputTbl1 group by key ) a PREHOOK: type: QUERY POSTHOOK: query: explain insert overwrite table outputTbl1 SELECT * FROM ( - SELECT key, count(1) as values from inputTbl1 group by key + SELECT key, count(1) as vals from inputTbl1 group by key UNION ALL - SELECT key, count(1) as values from inputTbl1 group by key + SELECT key, count(1) as vals from inputTbl1 group by key ) a POSTHOOK: type: QUERY STAGE DEPENDENCIES: @@ -206,9 +206,9 @@ STAGE PLANS: PREHOOK: query: insert overwrite table outputTbl1 SELECT * FROM ( - SELECT key, count(1) as values from inputTbl1 group by key + SELECT key, count(1) as vals from inputTbl1 group by key UNION ALL - SELECT key, count(1) as values from inputTbl1 group by key + SELECT key, count(1) as vals from inputTbl1 group by key ) a PREHOOK: type: QUERY PREHOOK: Input: default@inputtbl1 @@ -216,15 +216,15 @@ PREHOOK: Output: default@outputtbl1 POSTHOOK: query: insert overwrite table outputTbl1 SELECT * FROM ( - SELECT key, count(1) as values from inputTbl1 group by key + SELECT key, count(1) as vals from inputTbl1 group by key UNION ALL - SELECT key, count(1) as values from inputTbl1 group by key + SELECT key, count(1) as vals from inputTbl1 group by key ) a POSTHOOK: type: QUERY POSTHOOK: Input: default@inputtbl1 POSTHOOK: Output: default@outputtbl1 POSTHOOK: Lineage: outputtbl1.key EXPRESSION [(inputtbl1)inputtbl1.FieldSchema(name:key, type:string, comment:null), (inputtbl1)inputtbl1.FieldSchema(name:key, type:string, comment:null), ] -POSTHOOK: Lineage: outputtbl1.values EXPRESSION [(inputtbl1)inputtbl1.null, (inputtbl1)inputtbl1.null, ] +POSTHOOK: Lineage: outputtbl1.vals EXPRESSION [(inputtbl1)inputtbl1.null, (inputtbl1)inputtbl1.null, ] PREHOOK: query: desc formatted outputTbl1 PREHOOK: type: DESCTABLE PREHOOK: Input: default@outputtbl1 @@ -234,7 +234,7 @@ POSTHOOK: Input: default@outputtbl1 # col_name data_type comment key string -values bigint +vals bigint # Detailed Table Information Database: default @@ -261,11 +261,11 @@ Bucket Columns: [] Sort Columns: [] Storage Desc Params: serialization.format 1 -PREHOOK: query: select * from outputTbl1 order by key, values +PREHOOK: query: select * from outputTbl1 order by key, vals PREHOOK: type: QUERY PREHOOK: Input: default@outputtbl1 #### A masked pattern was here #### -POSTHOOK: query: select * from outputTbl1 order by key, values +POSTHOOK: query: select * from outputTbl1 order by key, vals POSTHOOK: type: QUERY POSTHOOK: Input: default@outputtbl1 #### A masked pattern was here #### diff --git a/ql/src/test/results/clientpositive/spark/union_remove_5.q.out b/ql/src/test/results/clientpositive/spark/union_remove_5.q.out index 0deb03d..6312f30 100644 --- a/ql/src/test/results/clientpositive/spark/union_remove_5.q.out +++ b/ql/src/test/results/clientpositive/spark/union_remove_5.q.out @@ -30,11 +30,11 @@ create table inputTbl1(key string, val string) stored as textfile POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@inputTbl1 -PREHOOK: query: create table outputTbl1(key string, values bigint) stored as textfile +PREHOOK: query: create table outputTbl1(key string, vals bigint) stored as textfile PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@outputTbl1 -POSTHOOK: query: create table outputTbl1(key string, values bigint) stored as textfile +POSTHOOK: query: create table outputTbl1(key string, vals bigint) stored as textfile POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@outputTbl1 @@ -50,22 +50,22 @@ PREHOOK: query: explain insert overwrite table outputTbl1 SELECT * FROM ( - SELECT key, count(1) as values from inputTbl1 group by key + SELECT key, count(1) as vals from inputTbl1 group by key UNION ALL - SELECT key, 1 as values from inputTbl1 + SELECT key, 1 as vals from inputTbl1 UNION ALL - SELECT key, 2 as values from inputTbl1 + SELECT key, 2 as vals from inputTbl1 ) a PREHOOK: type: QUERY POSTHOOK: query: explain insert overwrite table outputTbl1 SELECT * FROM ( - SELECT key, count(1) as values from inputTbl1 group by key + SELECT key, count(1) as vals from inputTbl1 group by key UNION ALL - SELECT key, 1 as values from inputTbl1 + SELECT key, 1 as vals from inputTbl1 UNION ALL - SELECT key, 2 as values from inputTbl1 + SELECT key, 2 as vals from inputTbl1 ) a POSTHOOK: type: QUERY STAGE DEPENDENCIES: @@ -206,11 +206,11 @@ STAGE PLANS: PREHOOK: query: insert overwrite table outputTbl1 SELECT * FROM ( - SELECT key, count(1) as values from inputTbl1 group by key + SELECT key, count(1) as vals from inputTbl1 group by key UNION ALL - SELECT key, 1 as values from inputTbl1 + SELECT key, 1 as vals from inputTbl1 UNION ALL - SELECT key, 2 as values from inputTbl1 + SELECT key, 2 as vals from inputTbl1 ) a PREHOOK: type: QUERY PREHOOK: Input: default@inputtbl1 @@ -218,17 +218,17 @@ PREHOOK: Output: default@outputtbl1 POSTHOOK: query: insert overwrite table outputTbl1 SELECT * FROM ( - SELECT key, count(1) as values from inputTbl1 group by key + SELECT key, count(1) as vals from inputTbl1 group by key UNION ALL - SELECT key, 1 as values from inputTbl1 + SELECT key, 1 as vals from inputTbl1 UNION ALL - SELECT key, 2 as values from inputTbl1 + SELECT key, 2 as vals from inputTbl1 ) a POSTHOOK: type: QUERY POSTHOOK: Input: default@inputtbl1 POSTHOOK: Output: default@outputtbl1 POSTHOOK: Lineage: outputtbl1.key EXPRESSION [(inputtbl1)inputtbl1.FieldSchema(name:key, type:string, comment:null), (inputtbl1)inputtbl1.FieldSchema(name:key, type:string, comment:null), (inputtbl1)inputtbl1.FieldSchema(name:key, type:string, comment:null), ] -POSTHOOK: Lineage: outputtbl1.values EXPRESSION [(inputtbl1)inputtbl1.null, ] +POSTHOOK: Lineage: outputtbl1.vals EXPRESSION [(inputtbl1)inputtbl1.null, ] PREHOOK: query: desc formatted outputTbl1 PREHOOK: type: DESCTABLE PREHOOK: Input: default@outputtbl1 @@ -238,7 +238,7 @@ POSTHOOK: Input: default@outputtbl1 # col_name data_type comment key string -values bigint +vals bigint # Detailed Table Information Database: default @@ -265,11 +265,11 @@ Bucket Columns: [] Sort Columns: [] Storage Desc Params: serialization.format 1 -PREHOOK: query: select * from outputTbl1 order by key, values +PREHOOK: query: select * from outputTbl1 order by key, vals PREHOOK: type: QUERY PREHOOK: Input: default@outputtbl1 #### A masked pattern was here #### -POSTHOOK: query: select * from outputTbl1 order by key, values +POSTHOOK: query: select * from outputTbl1 order by key, vals POSTHOOK: type: QUERY POSTHOOK: Input: default@outputtbl1 #### A masked pattern was here #### diff --git a/ql/src/test/results/clientpositive/spark/union_remove_6.q.out b/ql/src/test/results/clientpositive/spark/union_remove_6.q.out index 64c252d..ae011f5 100644 --- a/ql/src/test/results/clientpositive/spark/union_remove_6.q.out +++ b/ql/src/test/results/clientpositive/spark/union_remove_6.q.out @@ -20,19 +20,19 @@ create table inputTbl1(key string, val string) stored as textfile POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@inputTbl1 -PREHOOK: query: create table outputTbl1(key string, values bigint) stored as textfile +PREHOOK: query: create table outputTbl1(key string, vals bigint) stored as textfile PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@outputTbl1 -POSTHOOK: query: create table outputTbl1(key string, values bigint) stored as textfile +POSTHOOK: query: create table outputTbl1(key string, vals bigint) stored as textfile POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@outputTbl1 -PREHOOK: query: create table outputTbl2(key string, values bigint) stored as textfile +PREHOOK: query: create table outputTbl2(key string, vals bigint) stored as textfile PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@outputTbl2 -POSTHOOK: query: create table outputTbl2(key string, values bigint) stored as textfile +POSTHOOK: query: create table outputTbl2(key string, vals bigint) stored as textfile POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@outputTbl2 @@ -46,18 +46,18 @@ POSTHOOK: type: LOAD POSTHOOK: Output: default@inputtbl1 PREHOOK: query: explain FROM ( - SELECT key, count(1) as values from inputTbl1 group by key + SELECT key, count(1) as vals from inputTbl1 group by key UNION ALL - SELECT key, count(1) as values from inputTbl1 group by key + SELECT key, count(1) as vals from inputTbl1 group by key ) a insert overwrite table outputTbl1 select * insert overwrite table outputTbl2 select * PREHOOK: type: QUERY POSTHOOK: query: explain FROM ( - SELECT key, count(1) as values from inputTbl1 group by key + SELECT key, count(1) as vals from inputTbl1 group by key UNION ALL - SELECT key, count(1) as values from inputTbl1 group by key + SELECT key, count(1) as vals from inputTbl1 group by key ) a insert overwrite table outputTbl1 select * insert overwrite table outputTbl2 select * @@ -181,9 +181,9 @@ STAGE PLANS: name: default.outputtbl2 PREHOOK: query: FROM ( - SELECT key, count(1) as values from inputTbl1 group by key + SELECT key, count(1) as vals from inputTbl1 group by key UNION ALL - SELECT key, count(1) as values from inputTbl1 group by key + SELECT key, count(1) as vals from inputTbl1 group by key ) a insert overwrite table outputTbl1 select * insert overwrite table outputTbl2 select * @@ -192,9 +192,9 @@ PREHOOK: Input: default@inputtbl1 PREHOOK: Output: default@outputtbl1 PREHOOK: Output: default@outputtbl2 POSTHOOK: query: FROM ( - SELECT key, count(1) as values from inputTbl1 group by key + SELECT key, count(1) as vals from inputTbl1 group by key UNION ALL - SELECT key, count(1) as values from inputTbl1 group by key + SELECT key, count(1) as vals from inputTbl1 group by key ) a insert overwrite table outputTbl1 select * insert overwrite table outputTbl2 select * @@ -203,14 +203,14 @@ POSTHOOK: Input: default@inputtbl1 POSTHOOK: Output: default@outputtbl1 POSTHOOK: Output: default@outputtbl2 POSTHOOK: Lineage: outputtbl1.key EXPRESSION [(inputtbl1)inputtbl1.FieldSchema(name:key, type:string, comment:null), (inputtbl1)inputtbl1.FieldSchema(name:key, type:string, comment:null), ] -POSTHOOK: Lineage: outputtbl1.values EXPRESSION [(inputtbl1)inputtbl1.null, (inputtbl1)inputtbl1.null, ] +POSTHOOK: Lineage: outputtbl1.vals EXPRESSION [(inputtbl1)inputtbl1.null, (inputtbl1)inputtbl1.null, ] POSTHOOK: Lineage: outputtbl2.key EXPRESSION [(inputtbl1)inputtbl1.FieldSchema(name:key, type:string, comment:null), (inputtbl1)inputtbl1.FieldSchema(name:key, type:string, comment:null), ] -POSTHOOK: Lineage: outputtbl2.values EXPRESSION [(inputtbl1)inputtbl1.null, (inputtbl1)inputtbl1.null, ] -PREHOOK: query: select * from outputTbl1 order by key, values +POSTHOOK: Lineage: outputtbl2.vals EXPRESSION [(inputtbl1)inputtbl1.null, (inputtbl1)inputtbl1.null, ] +PREHOOK: query: select * from outputTbl1 order by key, vals PREHOOK: type: QUERY PREHOOK: Input: default@outputtbl1 #### A masked pattern was here #### -POSTHOOK: query: select * from outputTbl1 order by key, values +POSTHOOK: query: select * from outputTbl1 order by key, vals POSTHOOK: type: QUERY POSTHOOK: Input: default@outputtbl1 #### A masked pattern was here #### @@ -224,11 +224,11 @@ POSTHOOK: Input: default@outputtbl1 7 1 8 2 8 2 -PREHOOK: query: select * from outputTbl2 order by key, values +PREHOOK: query: select * from outputTbl2 order by key, vals PREHOOK: type: QUERY PREHOOK: Input: default@outputtbl2 #### A masked pattern was here #### -POSTHOOK: query: select * from outputTbl2 order by key, values +POSTHOOK: query: select * from outputTbl2 order by key, vals POSTHOOK: type: QUERY POSTHOOK: Input: default@outputtbl2 #### A masked pattern was here #### diff --git a/ql/src/test/results/clientpositive/spark/union_remove_6_subq.q.out b/ql/src/test/results/clientpositive/spark/union_remove_6_subq.q.out new file mode 100644 index 0000000..d0faa72 --- /dev/null +++ b/ql/src/test/results/clientpositive/spark/union_remove_6_subq.q.out @@ -0,0 +1,1159 @@ +PREHOOK: query: -- This is to test the union->selectstar->filesink optimization +-- Union of 2 subqueries is performed (all of which are mapred queries) +-- followed by select star and a file sink in 2 output tables. +-- The optimiaztion does not take affect since it is a multi-table insert. +-- It does not matter, whether the output is merged or not. In this case, +-- merging is turned off + +create table inputTbl1(key string, val string) stored as textfile +PREHOOK: type: CREATETABLE +PREHOOK: Output: database:default +PREHOOK: Output: default@inputTbl1 +POSTHOOK: query: -- This is to test the union->selectstar->filesink optimization +-- Union of 2 subqueries is performed (all of which are mapred queries) +-- followed by select star and a file sink in 2 output tables. +-- The optimiaztion does not take affect since it is a multi-table insert. +-- It does not matter, whether the output is merged or not. In this case, +-- merging is turned off + +create table inputTbl1(key string, val string) stored as textfile +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: database:default +POSTHOOK: Output: default@inputTbl1 +PREHOOK: query: create table outputTbl1(key string, vals bigint) stored as textfile +PREHOOK: type: CREATETABLE +PREHOOK: Output: database:default +PREHOOK: Output: default@outputTbl1 +POSTHOOK: query: create table outputTbl1(key string, vals bigint) stored as textfile +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: database:default +POSTHOOK: Output: default@outputTbl1 +PREHOOK: query: create table outputTbl2(key string, vals bigint) stored as textfile +PREHOOK: type: CREATETABLE +PREHOOK: Output: database:default +PREHOOK: Output: default@outputTbl2 +POSTHOOK: query: create table outputTbl2(key string, vals bigint) stored as textfile +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: database:default +POSTHOOK: Output: default@outputTbl2 +PREHOOK: query: load data local inpath '../../data/files/T1.txt' into table inputTbl1 +PREHOOK: type: LOAD +#### A masked pattern was here #### +PREHOOK: Output: default@inputtbl1 +POSTHOOK: query: load data local inpath '../../data/files/T1.txt' into table inputTbl1 +POSTHOOK: type: LOAD +#### A masked pattern was here #### +POSTHOOK: Output: default@inputtbl1 +PREHOOK: query: explain +FROM ( + select * from( + SELECT key, count(1) as vals from inputTbl1 group by key + UNION ALL + SELECT key, count(1) as vals from inputTbl1 group by key + )subq +) a +insert overwrite table outputTbl1 select * +insert overwrite table outputTbl2 select * +PREHOOK: type: QUERY +POSTHOOK: query: explain +FROM ( + select * from( + SELECT key, count(1) as vals from inputTbl1 group by key + UNION ALL + SELECT key, count(1) as vals from inputTbl1 group by key + )subq +) a +insert overwrite table outputTbl1 select * +insert overwrite table outputTbl2 select * +POSTHOOK: type: QUERY +STAGE DEPENDENCIES: + Stage-2 is a root stage + Stage-0 depends on stages: Stage-2 + Stage-1 depends on stages: Stage-2 + +STAGE PLANS: + Stage: Stage-2 + Spark + Edges: + Reducer 2 <- Map 1 (GROUP, 2) + Reducer 4 <- Map 3 (GROUP, 2) +#### A masked pattern was here #### + Vertices: + Map 1 + Map Operator Tree: + TableScan + alias: inputtbl1 + Statistics: Num rows: 0 Data size: 30 Basic stats: PARTIAL Column stats: NONE + Select Operator + expressions: key (type: string) + outputColumnNames: key + Statistics: Num rows: 0 Data size: 30 Basic stats: PARTIAL Column stats: NONE + Group By Operator + aggregations: count(1) + keys: key (type: string) + mode: hash + outputColumnNames: _col0, _col1 + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE + Reduce Output Operator + key expressions: _col0 (type: string) + sort order: + + Map-reduce partition columns: _col0 (type: string) + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE + value expressions: _col1 (type: bigint) + Map 3 + Map Operator Tree: + TableScan + alias: inputtbl1 + Statistics: Num rows: 0 Data size: 30 Basic stats: PARTIAL Column stats: NONE + Select Operator + expressions: key (type: string) + outputColumnNames: key + Statistics: Num rows: 0 Data size: 30 Basic stats: PARTIAL Column stats: NONE + Group By Operator + aggregations: count(1) + keys: key (type: string) + mode: hash + outputColumnNames: _col0, _col1 + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE + Reduce Output Operator + key expressions: _col0 (type: string) + sort order: + + Map-reduce partition columns: _col0 (type: string) + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE + value expressions: _col1 (type: bigint) + Reducer 2 + Reduce Operator Tree: + Group By Operator + aggregations: count(VALUE._col0) + keys: KEY._col0 (type: string) + mode: mergepartial + outputColumnNames: _col0, _col1 + File Output Operator + compressed: false + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + name: default.outputtbl1 + File Output Operator + compressed: false + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + name: default.outputtbl2 + Reducer 4 + Reduce Operator Tree: + Group By Operator + aggregations: count(VALUE._col0) + keys: KEY._col0 (type: string) + mode: mergepartial + outputColumnNames: _col0, _col1 + File Output Operator + compressed: false + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + name: default.outputtbl1 + File Output Operator + compressed: false + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + name: default.outputtbl2 + + Stage: Stage-0 + Move Operator + tables: + replace: true + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + name: default.outputtbl1 + + Stage: Stage-1 + Move Operator + tables: + replace: true + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + name: default.outputtbl2 + +PREHOOK: query: FROM ( + select * from( + SELECT key, count(1) as vals from inputTbl1 group by key + UNION ALL + SELECT key, count(1) as vals from inputTbl1 group by key + )subq +) a +insert overwrite table outputTbl1 select * +insert overwrite table outputTbl2 select * +PREHOOK: type: QUERY +PREHOOK: Input: default@inputtbl1 +PREHOOK: Output: default@outputtbl1 +PREHOOK: Output: default@outputtbl2 +POSTHOOK: query: FROM ( + select * from( + SELECT key, count(1) as vals from inputTbl1 group by key + UNION ALL + SELECT key, count(1) as vals from inputTbl1 group by key + )subq +) a +insert overwrite table outputTbl1 select * +insert overwrite table outputTbl2 select * +POSTHOOK: type: QUERY +POSTHOOK: Input: default@inputtbl1 +POSTHOOK: Output: default@outputtbl1 +POSTHOOK: Output: default@outputtbl2 +POSTHOOK: Lineage: outputtbl1.key EXPRESSION [(inputtbl1)inputtbl1.FieldSchema(name:key, type:string, comment:null), (inputtbl1)inputtbl1.FieldSchema(name:key, type:string, comment:null), ] +POSTHOOK: Lineage: outputtbl1.vals EXPRESSION [(inputtbl1)inputtbl1.null, (inputtbl1)inputtbl1.null, ] +POSTHOOK: Lineage: outputtbl2.key EXPRESSION [(inputtbl1)inputtbl1.FieldSchema(name:key, type:string, comment:null), (inputtbl1)inputtbl1.FieldSchema(name:key, type:string, comment:null), ] +POSTHOOK: Lineage: outputtbl2.vals EXPRESSION [(inputtbl1)inputtbl1.null, (inputtbl1)inputtbl1.null, ] +PREHOOK: query: select * from outputTbl1 order by key, vals +PREHOOK: type: QUERY +PREHOOK: Input: default@outputtbl1 +#### A masked pattern was here #### +POSTHOOK: query: select * from outputTbl1 order by key, vals +POSTHOOK: type: QUERY +POSTHOOK: Input: default@outputtbl1 +#### A masked pattern was here #### +1 1 +1 1 +2 1 +2 1 +3 1 +3 1 +7 1 +7 1 +8 2 +8 2 +PREHOOK: query: select * from outputTbl2 order by key, vals +PREHOOK: type: QUERY +PREHOOK: Input: default@outputtbl2 +#### A masked pattern was here #### +POSTHOOK: query: select * from outputTbl2 order by key, vals +POSTHOOK: type: QUERY +POSTHOOK: Input: default@outputtbl2 +#### A masked pattern was here #### +1 1 +1 1 +2 1 +2 1 +3 1 +3 1 +7 1 +7 1 +8 2 +8 2 +PREHOOK: query: -- The following queries guarantee the correctness. +explain +select avg(c) from( + SELECT count(1)-200 as c from src + UNION ALL + SELECT count(1) as c from src +)subq +PREHOOK: type: QUERY +POSTHOOK: query: -- The following queries guarantee the correctness. +explain +select avg(c) from( + SELECT count(1)-200 as c from src + UNION ALL + SELECT count(1) as c from src +)subq +POSTHOOK: type: QUERY +STAGE DEPENDENCIES: + Stage-1 is a root stage + Stage-0 depends on stages: Stage-1 + +STAGE PLANS: + Stage: Stage-1 + Spark + Edges: + Reducer 2 <- Map 1 (GROUP, 1) + Reducer 5 <- Map 4 (GROUP, 1) + Reducer 3 <- Reducer 2 (GROUP, 1), Reducer 5 (GROUP, 1) +#### A masked pattern was here #### + Vertices: + Map 1 + Map Operator Tree: + TableScan + alias: src + Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: COMPLETE + Select Operator + Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: COMPLETE + Group By Operator + aggregations: count(1) + mode: hash + outputColumnNames: _col0 + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE + Reduce Output Operator + sort order: + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE + value expressions: _col0 (type: bigint) + Map 4 + Map Operator Tree: + TableScan + alias: src + Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: COMPLETE + Select Operator + Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: COMPLETE + Group By Operator + aggregations: count(1) + mode: hash + outputColumnNames: _col0 + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE + Reduce Output Operator + sort order: + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE + value expressions: _col0 (type: bigint) + Reducer 2 + Reduce Operator Tree: + Group By Operator + aggregations: count(VALUE._col0) + mode: mergepartial + outputColumnNames: _col0 + Select Operator + expressions: (_col0 - 200) (type: bigint) + outputColumnNames: _col0 + Group By Operator + aggregations: avg(_col0) + mode: hash + outputColumnNames: _col0 + Reduce Output Operator + sort order: + value expressions: _col0 (type: struct) + Reducer 3 + Reduce Operator Tree: + Group By Operator + aggregations: avg(VALUE._col0) + mode: mergepartial + outputColumnNames: _col0 + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE + File Output Operator + compressed: false + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + Reducer 5 + Reduce Operator Tree: + Group By Operator + aggregations: count(VALUE._col0) + mode: mergepartial + outputColumnNames: _col0 + Group By Operator + aggregations: avg(_col0) + mode: hash + outputColumnNames: _col0 + Reduce Output Operator + sort order: + value expressions: _col0 (type: struct) + + Stage: Stage-0 + Fetch Operator + limit: -1 + Processor Tree: + ListSink + +PREHOOK: query: select avg(c) from( + SELECT count(1)-200 as c from src + UNION ALL + SELECT count(1) as c from src +)subq +PREHOOK: type: QUERY +PREHOOK: Input: default@src +#### A masked pattern was here #### +POSTHOOK: query: select avg(c) from( + SELECT count(1)-200 as c from src + UNION ALL + SELECT count(1) as c from src +)subq +POSTHOOK: type: QUERY +POSTHOOK: Input: default@src +#### A masked pattern was here #### +400.0 +PREHOOK: query: explain +select key, avg(c) over w from( + SELECT key, count(1)*2 as c from src group by key + UNION ALL + SELECT key, count(1) as c from src group by key +)subq group by key, c +WINDOW w AS (PARTITION BY key ORDER BY c ROWS UNBOUNDED PRECEDING) +PREHOOK: type: QUERY +POSTHOOK: query: explain +select key, avg(c) over w from( + SELECT key, count(1)*2 as c from src group by key + UNION ALL + SELECT key, count(1) as c from src group by key +)subq group by key, c +WINDOW w AS (PARTITION BY key ORDER BY c ROWS UNBOUNDED PRECEDING) +POSTHOOK: type: QUERY +STAGE DEPENDENCIES: + Stage-1 is a root stage + Stage-0 depends on stages: Stage-1 + +STAGE PLANS: + Stage: Stage-1 + Spark + Edges: + Reducer 2 <- Map 1 (GROUP, 2) + Reducer 6 <- Map 5 (GROUP, 2) + Reducer 3 <- Reducer 2 (GROUP, 2), Reducer 6 (GROUP, 2) + Reducer 4 <- Reducer 3 (PARTITION-LEVEL SORT, 2) +#### A masked pattern was here #### + Vertices: + Map 1 + Map Operator Tree: + TableScan + alias: src + Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: key (type: string) + outputColumnNames: key + Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE + Group By Operator + aggregations: count(1) + keys: key (type: string) + mode: hash + outputColumnNames: _col0, _col1 + Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE + Reduce Output Operator + key expressions: _col0 (type: string) + sort order: + + Map-reduce partition columns: _col0 (type: string) + Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE + value expressions: _col1 (type: bigint) + Map 5 + Map Operator Tree: + TableScan + alias: src + Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: key (type: string) + outputColumnNames: key + Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE + Group By Operator + aggregations: count(1) + keys: key (type: string) + mode: hash + outputColumnNames: _col0, _col1 + Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE + Reduce Output Operator + key expressions: _col0 (type: string) + sort order: + + Map-reduce partition columns: _col0 (type: string) + Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE + value expressions: _col1 (type: bigint) + Reducer 2 + Reduce Operator Tree: + Group By Operator + aggregations: count(VALUE._col0) + keys: KEY._col0 (type: string) + mode: mergepartial + outputColumnNames: _col0, _col1 + Select Operator + expressions: _col0 (type: string), (_col1 * 2) (type: bigint) + outputColumnNames: _col0, _col1 + Group By Operator + keys: _col0 (type: string), _col1 (type: bigint) + mode: hash + outputColumnNames: _col0, _col1 + Reduce Output Operator + key expressions: _col0 (type: string), _col1 (type: bigint) + sort order: ++ + Map-reduce partition columns: _col0 (type: string), _col1 (type: bigint) + Reducer 3 + Reduce Operator Tree: + Group By Operator + keys: KEY._col0 (type: string), KEY._col1 (type: bigint) + mode: mergepartial + outputColumnNames: _col0, _col1 + Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE + Reduce Output Operator + key expressions: _col0 (type: string), _col1 (type: bigint) + sort order: ++ + Map-reduce partition columns: _col0 (type: string) + Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE + Reducer 4 + Reduce Operator Tree: + Select Operator + expressions: KEY.reducesinkkey0 (type: string), KEY.reducesinkkey1 (type: bigint) + outputColumnNames: _col0, _col1 + Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE + PTF Operator + Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: _col0 (type: string), _wcol0 (type: double) + outputColumnNames: _col0, _col1 + Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE + File Output Operator + compressed: false + Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + Reducer 6 + Reduce Operator Tree: + Group By Operator + aggregations: count(VALUE._col0) + keys: KEY._col0 (type: string) + mode: mergepartial + outputColumnNames: _col0, _col1 + Group By Operator + keys: _col0 (type: string), _col1 (type: bigint) + mode: hash + outputColumnNames: _col0, _col1 + Reduce Output Operator + key expressions: _col0 (type: string), _col1 (type: bigint) + sort order: ++ + Map-reduce partition columns: _col0 (type: string), _col1 (type: bigint) + + Stage: Stage-0 + Fetch Operator + limit: -1 + Processor Tree: + ListSink + +PREHOOK: query: select key, avg(c) over w from( + SELECT key, count(1)*2 as c from src group by key + UNION ALL + SELECT key, count(1) as c from src group by key +)subq group by key, c +WINDOW w AS (PARTITION BY key ORDER BY c ROWS UNBOUNDED PRECEDING) +PREHOOK: type: QUERY +PREHOOK: Input: default@src +#### A masked pattern was here #### +POSTHOOK: query: select key, avg(c) over w from( + SELECT key, count(1)*2 as c from src group by key + UNION ALL + SELECT key, count(1) as c from src group by key +)subq group by key, c +WINDOW w AS (PARTITION BY key ORDER BY c ROWS UNBOUNDED PRECEDING) +POSTHOOK: type: QUERY +POSTHOOK: Input: default@src +#### A masked pattern was here #### +0 3.0 +0 4.5 +103 2.0 +103 3.0 +105 1.0 +105 1.5 +11 1.0 +11 1.5 +114 1.0 +114 1.5 +116 1.0 +116 1.5 +118 2.0 +118 3.0 +125 2.0 +125 3.0 +129 2.0 +129 3.0 +134 2.0 +134 3.0 +136 1.0 +136 1.5 +138 4.0 +138 6.0 +143 1.0 +143 1.5 +145 1.0 +145 1.5 +149 2.0 +149 3.0 +15 2.0 +15 3.0 +150 1.0 +150 1.5 +152 2.0 +152 3.0 +156 1.0 +156 1.5 +158 1.0 +158 1.5 +163 1.0 +163 1.5 +165 2.0 +165 3.0 +167 3.0 +167 4.5 +169 4.0 +169 6.0 +17 1.0 +17 1.5 +170 1.0 +170 1.5 +172 2.0 +172 3.0 +174 2.0 +174 3.0 +176 2.0 +176 3.0 +178 1.0 +178 1.5 +181 1.0 +181 1.5 +183 1.0 +183 1.5 +187 3.0 +187 4.5 +189 1.0 +189 1.5 +19 1.0 +19 1.5 +190 1.0 +190 1.5 +192 1.0 +192 1.5 +194 1.0 +194 1.5 +196 1.0 +196 1.5 +2 1.0 +2 1.5 +20 1.0 +20 1.5 +200 2.0 +200 3.0 +202 1.0 +202 1.5 +208 3.0 +208 4.5 +213 2.0 +213 3.0 +217 2.0 +217 3.0 +219 2.0 +219 3.0 +222 1.0 +222 1.5 +224 2.0 +224 3.0 +226 1.0 +226 1.5 +228 1.0 +228 1.5 +233 2.0 +233 3.0 +235 1.0 +235 1.5 +237 2.0 +237 3.0 +239 2.0 +239 3.0 +24 2.0 +24 3.0 +242 2.0 +242 3.0 +244 1.0 +244 1.5 +248 1.0 +248 1.5 +255 2.0 +255 3.0 +257 1.0 +257 1.5 +26 2.0 +26 3.0 +260 1.0 +260 1.5 +262 1.0 +262 1.5 +266 1.0 +266 1.5 +273 3.0 +273 4.5 +275 1.0 +275 1.5 +277 4.0 +277 6.0 +28 1.0 +28 1.5 +280 2.0 +280 3.0 +282 2.0 +282 3.0 +284 1.0 +284 1.5 +286 1.0 +286 1.5 +288 2.0 +288 3.0 +291 1.0 +291 1.5 +305 1.0 +305 1.5 +307 2.0 +307 3.0 +309 2.0 +309 3.0 +310 1.0 +310 1.5 +316 3.0 +316 4.5 +318 3.0 +318 4.5 +321 2.0 +321 3.0 +323 1.0 +323 1.5 +325 2.0 +325 3.0 +327 3.0 +327 4.5 +33 1.0 +33 1.5 +332 1.0 +332 1.5 +336 1.0 +336 1.5 +338 1.0 +338 1.5 +341 1.0 +341 1.5 +345 1.0 +345 1.5 +35 3.0 +35 4.5 +356 1.0 +356 1.5 +365 1.0 +365 1.5 +367 2.0 +367 3.0 +369 3.0 +369 4.5 +37 2.0 +37 3.0 +374 1.0 +374 1.5 +378 1.0 +378 1.5 +389 1.0 +389 1.5 +392 1.0 +392 1.5 +394 1.0 +394 1.5 +396 3.0 +396 4.5 +4 1.0 +4 1.5 +400 1.0 +400 1.5 +402 1.0 +402 1.5 +404 2.0 +404 3.0 +406 4.0 +406 6.0 +411 1.0 +411 1.5 +413 2.0 +413 3.0 +417 3.0 +417 4.5 +419 1.0 +419 1.5 +42 2.0 +42 3.0 +424 2.0 +424 3.0 +431 3.0 +431 4.5 +435 1.0 +435 1.5 +437 1.0 +437 1.5 +439 2.0 +439 3.0 +44 1.0 +44 1.5 +444 1.0 +444 1.5 +446 1.0 +446 1.5 +448 1.0 +448 1.5 +453 1.0 +453 1.5 +455 1.0 +455 1.5 +457 1.0 +457 1.5 +459 2.0 +459 3.0 +460 1.0 +460 1.5 +462 2.0 +462 3.0 +466 3.0 +466 4.5 +468 4.0 +468 6.0 +475 1.0 +475 1.5 +477 1.0 +477 1.5 +479 1.0 +479 1.5 +480 3.0 +480 4.5 +482 1.0 +482 1.5 +484 1.0 +484 1.5 +491 1.0 +491 1.5 +493 1.0 +493 1.5 +495 1.0 +495 1.5 +497 1.0 +497 1.5 +51 2.0 +51 3.0 +53 1.0 +53 1.5 +57 1.0 +57 1.5 +64 1.0 +64 1.5 +66 1.0 +66 1.5 +77 1.0 +77 1.5 +8 1.0 +8 1.5 +80 1.0 +80 1.5 +82 1.0 +82 1.5 +84 2.0 +84 3.0 +86 1.0 +86 1.5 +95 2.0 +95 3.0 +97 2.0 +97 3.0 +10 1.0 +10 1.5 +100 2.0 +100 3.0 +104 2.0 +104 3.0 +111 1.0 +111 1.5 +113 2.0 +113 3.0 +119 3.0 +119 4.5 +12 2.0 +12 3.0 +120 2.0 +120 3.0 +126 1.0 +126 1.5 +128 3.0 +128 4.5 +131 1.0 +131 1.5 +133 1.0 +133 1.5 +137 2.0 +137 3.0 +146 2.0 +146 3.0 +153 1.0 +153 1.5 +155 1.0 +155 1.5 +157 1.0 +157 1.5 +160 1.0 +160 1.5 +162 1.0 +162 1.5 +164 2.0 +164 3.0 +166 1.0 +166 1.5 +168 1.0 +168 1.5 +175 2.0 +175 3.0 +177 1.0 +177 1.5 +179 2.0 +179 3.0 +18 2.0 +18 3.0 +180 1.0 +180 1.5 +186 1.0 +186 1.5 +191 2.0 +191 3.0 +193 3.0 +193 4.5 +195 2.0 +195 3.0 +197 2.0 +197 3.0 +199 3.0 +199 4.5 +201 1.0 +201 1.5 +203 2.0 +203 3.0 +205 2.0 +205 3.0 +207 2.0 +207 3.0 +209 2.0 +209 3.0 +214 1.0 +214 1.5 +216 2.0 +216 3.0 +218 1.0 +218 1.5 +221 2.0 +221 3.0 +223 2.0 +223 3.0 +229 2.0 +229 3.0 +230 5.0 +230 7.5 +238 2.0 +238 3.0 +241 1.0 +241 1.5 +247 1.0 +247 1.5 +249 1.0 +249 1.5 +252 1.0 +252 1.5 +256 2.0 +256 3.0 +258 1.0 +258 1.5 +263 1.0 +263 1.5 +265 2.0 +265 3.0 +27 1.0 +27 1.5 +272 2.0 +272 3.0 +274 1.0 +274 1.5 +278 2.0 +278 3.0 +281 2.0 +281 3.0 +283 1.0 +283 1.5 +285 1.0 +285 1.5 +287 1.0 +287 1.5 +289 1.0 +289 1.5 +292 1.0 +292 1.5 +296 1.0 +296 1.5 +298 3.0 +298 4.5 +30 1.0 +30 1.5 +302 1.0 +302 1.5 +306 1.0 +306 1.5 +308 1.0 +308 1.5 +311 3.0 +311 4.5 +315 1.0 +315 1.5 +317 2.0 +317 3.0 +322 2.0 +322 3.0 +331 2.0 +331 3.0 +333 2.0 +333 3.0 +335 1.0 +335 1.5 +339 1.0 +339 1.5 +34 1.0 +34 1.5 +342 2.0 +342 3.0 +344 2.0 +344 3.0 +348 5.0 +348 7.5 +351 1.0 +351 1.5 +353 2.0 +353 3.0 +360 1.0 +360 1.5 +362 1.0 +362 1.5 +364 1.0 +364 1.5 +366 1.0 +366 1.5 +368 1.0 +368 1.5 +373 1.0 +373 1.5 +375 1.0 +375 1.5 +377 1.0 +377 1.5 +379 1.0 +379 1.5 +382 2.0 +382 3.0 +384 3.0 +384 4.5 +386 1.0 +386 1.5 +393 1.0 +393 1.5 +395 2.0 +395 3.0 +397 2.0 +397 3.0 +399 2.0 +399 3.0 +401 5.0 +401 7.5 +403 3.0 +403 4.5 +407 1.0 +407 1.5 +409 3.0 +409 4.5 +41 1.0 +41 1.5 +414 2.0 +414 3.0 +418 1.0 +418 1.5 +421 1.0 +421 1.5 +427 1.0 +427 1.5 +429 2.0 +429 3.0 +43 1.0 +43 1.5 +430 3.0 +430 4.5 +432 1.0 +432 1.5 +436 1.0 +436 1.5 +438 3.0 +438 4.5 +443 1.0 +443 1.5 +449 1.0 +449 1.5 +452 1.0 +452 1.5 +454 3.0 +454 4.5 +458 2.0 +458 3.0 +463 2.0 +463 3.0 +467 1.0 +467 1.5 +469 5.0 +469 7.5 +47 1.0 +47 1.5 +470 1.0 +470 1.5 +472 1.0 +472 1.5 +478 2.0 +478 3.0 +481 1.0 +481 1.5 +483 1.0 +483 1.5 +485 1.0 +485 1.5 +487 1.0 +487 1.5 +489 4.0 +489 6.0 +490 1.0 +490 1.5 +492 2.0 +492 3.0 +494 1.0 +494 1.5 +496 1.0 +496 1.5 +498 3.0 +498 4.5 +5 3.0 +5 4.5 +54 1.0 +54 1.5 +58 2.0 +58 3.0 +65 1.0 +65 1.5 +67 2.0 +67 3.0 +69 1.0 +69 1.5 +70 3.0 +70 4.5 +72 2.0 +72 3.0 +74 1.0 +74 1.5 +76 2.0 +76 3.0 +78 1.0 +78 1.5 +83 2.0 +83 3.0 +85 1.0 +85 1.5 +87 1.0 +87 1.5 +9 1.0 +9 1.5 +90 3.0 +90 4.5 +92 1.0 +92 1.5 +96 1.0 +96 1.5 +98 2.0 +98 3.0 diff --git a/ql/src/test/results/clientpositive/spark/union_remove_7.q.out b/ql/src/test/results/clientpositive/spark/union_remove_7.q.out index 2d350a3..b4261af 100644 --- a/ql/src/test/results/clientpositive/spark/union_remove_7.q.out +++ b/ql/src/test/results/clientpositive/spark/union_remove_7.q.out @@ -30,11 +30,11 @@ create table inputTbl1(key string, val string) stored as textfile POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@inputTbl1 -PREHOOK: query: create table outputTbl1(key string, values bigint) stored as rcfile +PREHOOK: query: create table outputTbl1(key string, vals bigint) stored as rcfile PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@outputTbl1 -POSTHOOK: query: create table outputTbl1(key string, values bigint) stored as rcfile +POSTHOOK: query: create table outputTbl1(key string, vals bigint) stored as rcfile POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@outputTbl1 @@ -50,18 +50,18 @@ PREHOOK: query: explain insert overwrite table outputTbl1 SELECT * FROM ( - SELECT key, count(1) as values from inputTbl1 group by key + SELECT key, count(1) as vals from inputTbl1 group by key UNION ALL - SELECT key, count(1) as values from inputTbl1 group by key + SELECT key, count(1) as vals from inputTbl1 group by key ) a PREHOOK: type: QUERY POSTHOOK: query: explain insert overwrite table outputTbl1 SELECT * FROM ( - SELECT key, count(1) as values from inputTbl1 group by key + SELECT key, count(1) as vals from inputTbl1 group by key UNION ALL - SELECT key, count(1) as values from inputTbl1 group by key + SELECT key, count(1) as vals from inputTbl1 group by key ) a POSTHOOK: type: QUERY STAGE DEPENDENCIES: @@ -160,9 +160,9 @@ STAGE PLANS: PREHOOK: query: insert overwrite table outputTbl1 SELECT * FROM ( - SELECT key, count(1) as values from inputTbl1 group by key + SELECT key, count(1) as vals from inputTbl1 group by key UNION ALL - SELECT key, count(1) as values from inputTbl1 group by key + SELECT key, count(1) as vals from inputTbl1 group by key ) a PREHOOK: type: QUERY PREHOOK: Input: default@inputtbl1 @@ -170,15 +170,15 @@ PREHOOK: Output: default@outputtbl1 POSTHOOK: query: insert overwrite table outputTbl1 SELECT * FROM ( - SELECT key, count(1) as values from inputTbl1 group by key + SELECT key, count(1) as vals from inputTbl1 group by key UNION ALL - SELECT key, count(1) as values from inputTbl1 group by key + SELECT key, count(1) as vals from inputTbl1 group by key ) a POSTHOOK: type: QUERY POSTHOOK: Input: default@inputtbl1 POSTHOOK: Output: default@outputtbl1 POSTHOOK: Lineage: outputtbl1.key EXPRESSION [(inputtbl1)inputtbl1.FieldSchema(name:key, type:string, comment:null), (inputtbl1)inputtbl1.FieldSchema(name:key, type:string, comment:null), ] -POSTHOOK: Lineage: outputtbl1.values EXPRESSION [(inputtbl1)inputtbl1.null, (inputtbl1)inputtbl1.null, ] +POSTHOOK: Lineage: outputtbl1.vals EXPRESSION [(inputtbl1)inputtbl1.null, (inputtbl1)inputtbl1.null, ] PREHOOK: query: desc formatted outputTbl1 PREHOOK: type: DESCTABLE PREHOOK: Input: default@outputtbl1 @@ -188,7 +188,7 @@ POSTHOOK: Input: default@outputtbl1 # col_name data_type comment key string -values bigint +vals bigint # Detailed Table Information Database: default @@ -215,11 +215,11 @@ Bucket Columns: [] Sort Columns: [] Storage Desc Params: serialization.format 1 -PREHOOK: query: select * from outputTbl1 order by key, values +PREHOOK: query: select * from outputTbl1 order by key, vals PREHOOK: type: QUERY PREHOOK: Input: default@outputtbl1 #### A masked pattern was here #### -POSTHOOK: query: select * from outputTbl1 order by key, values +POSTHOOK: query: select * from outputTbl1 order by key, vals POSTHOOK: type: QUERY POSTHOOK: Input: default@outputtbl1 #### A masked pattern was here #### diff --git a/ql/src/test/results/clientpositive/spark/union_remove_8.q.out b/ql/src/test/results/clientpositive/spark/union_remove_8.q.out index e6ab825..d2609ce 100644 --- a/ql/src/test/results/clientpositive/spark/union_remove_8.q.out +++ b/ql/src/test/results/clientpositive/spark/union_remove_8.q.out @@ -32,11 +32,11 @@ create table inputTbl1(key string, val string) stored as textfile POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@inputTbl1 -PREHOOK: query: create table outputTbl1(key string, values bigint) stored as rcfile +PREHOOK: query: create table outputTbl1(key string, vals bigint) stored as rcfile PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@outputTbl1 -POSTHOOK: query: create table outputTbl1(key string, values bigint) stored as rcfile +POSTHOOK: query: create table outputTbl1(key string, vals bigint) stored as rcfile POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@outputTbl1 @@ -52,22 +52,22 @@ PREHOOK: query: explain insert overwrite table outputTbl1 SELECT * FROM ( - SELECT key, count(1) as values from inputTbl1 group by key + SELECT key, count(1) as vals from inputTbl1 group by key UNION ALL - SELECT key, 1 as values from inputTbl1 + SELECT key, 1 as vals from inputTbl1 UNION ALL - SELECT key, 2 as values from inputTbl1 + SELECT key, 2 as vals from inputTbl1 ) a PREHOOK: type: QUERY POSTHOOK: query: explain insert overwrite table outputTbl1 SELECT * FROM ( - SELECT key, count(1) as values from inputTbl1 group by key + SELECT key, count(1) as vals from inputTbl1 group by key UNION ALL - SELECT key, 1 as values from inputTbl1 + SELECT key, 1 as vals from inputTbl1 UNION ALL - SELECT key, 2 as values from inputTbl1 + SELECT key, 2 as vals from inputTbl1 ) a POSTHOOK: type: QUERY STAGE DEPENDENCIES: @@ -158,11 +158,11 @@ STAGE PLANS: PREHOOK: query: insert overwrite table outputTbl1 SELECT * FROM ( - SELECT key, count(1) as values from inputTbl1 group by key + SELECT key, count(1) as vals from inputTbl1 group by key UNION ALL - SELECT key, 1 as values from inputTbl1 + SELECT key, 1 as vals from inputTbl1 UNION ALL - SELECT key, 2 as values from inputTbl1 + SELECT key, 2 as vals from inputTbl1 ) a PREHOOK: type: QUERY PREHOOK: Input: default@inputtbl1 @@ -170,17 +170,17 @@ PREHOOK: Output: default@outputtbl1 POSTHOOK: query: insert overwrite table outputTbl1 SELECT * FROM ( - SELECT key, count(1) as values from inputTbl1 group by key + SELECT key, count(1) as vals from inputTbl1 group by key UNION ALL - SELECT key, 1 as values from inputTbl1 + SELECT key, 1 as vals from inputTbl1 UNION ALL - SELECT key, 2 as values from inputTbl1 + SELECT key, 2 as vals from inputTbl1 ) a POSTHOOK: type: QUERY POSTHOOK: Input: default@inputtbl1 POSTHOOK: Output: default@outputtbl1 POSTHOOK: Lineage: outputtbl1.key EXPRESSION [(inputtbl1)inputtbl1.FieldSchema(name:key, type:string, comment:null), (inputtbl1)inputtbl1.FieldSchema(name:key, type:string, comment:null), (inputtbl1)inputtbl1.FieldSchema(name:key, type:string, comment:null), ] -POSTHOOK: Lineage: outputtbl1.values EXPRESSION [(inputtbl1)inputtbl1.null, ] +POSTHOOK: Lineage: outputtbl1.vals EXPRESSION [(inputtbl1)inputtbl1.null, ] PREHOOK: query: desc formatted outputTbl1 PREHOOK: type: DESCTABLE PREHOOK: Input: default@outputtbl1 @@ -190,7 +190,7 @@ POSTHOOK: Input: default@outputtbl1 # col_name data_type comment key string -values bigint +vals bigint # Detailed Table Information Database: default @@ -217,11 +217,11 @@ Bucket Columns: [] Sort Columns: [] Storage Desc Params: serialization.format 1 -PREHOOK: query: select * from outputTbl1 order by key, values +PREHOOK: query: select * from outputTbl1 order by key, vals PREHOOK: type: QUERY PREHOOK: Input: default@outputtbl1 #### A masked pattern was here #### -POSTHOOK: query: select * from outputTbl1 order by key, values +POSTHOOK: query: select * from outputTbl1 order by key, vals POSTHOOK: type: QUERY POSTHOOK: Input: default@outputtbl1 #### A masked pattern was here #### diff --git a/ql/src/test/results/clientpositive/spark/union_remove_9.q.out b/ql/src/test/results/clientpositive/spark/union_remove_9.q.out index 1eef57a..eb345e8 100644 --- a/ql/src/test/results/clientpositive/spark/union_remove_9.q.out +++ b/ql/src/test/results/clientpositive/spark/union_remove_9.q.out @@ -30,11 +30,11 @@ create table inputTbl1(key string, val string) stored as textfile POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@inputTbl1 -PREHOOK: query: create table outputTbl1(key string, values bigint) stored as rcfile +PREHOOK: query: create table outputTbl1(key string, vals bigint) stored as rcfile PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@outputTbl1 -POSTHOOK: query: create table outputTbl1(key string, values bigint) stored as rcfile +POSTHOOK: query: create table outputTbl1(key string, vals bigint) stored as rcfile POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@outputTbl1 @@ -50,12 +50,12 @@ PREHOOK: query: explain insert overwrite table outputTbl1 SELECT * FROM ( -select key, count(1) as values from inputTbl1 group by key +select key, count(1) as vals from inputTbl1 group by key union all select * FROM ( - SELECT key, 1 as values from inputTbl1 + SELECT key, 1 as vals from inputTbl1 UNION ALL - SELECT key, 2 as values from inputTbl1 + SELECT key, 2 as vals from inputTbl1 ) a )b PREHOOK: type: QUERY @@ -63,12 +63,12 @@ POSTHOOK: query: explain insert overwrite table outputTbl1 SELECT * FROM ( -select key, count(1) as values from inputTbl1 group by key +select key, count(1) as vals from inputTbl1 group by key union all select * FROM ( - SELECT key, 1 as values from inputTbl1 + SELECT key, 1 as vals from inputTbl1 UNION ALL - SELECT key, 2 as values from inputTbl1 + SELECT key, 2 as vals from inputTbl1 ) a )b POSTHOOK: type: QUERY @@ -208,12 +208,12 @@ STAGE PLANS: PREHOOK: query: insert overwrite table outputTbl1 SELECT * FROM ( -select key, count(1) as values from inputTbl1 group by key +select key, count(1) as vals from inputTbl1 group by key union all select * FROM ( - SELECT key, 1 as values from inputTbl1 + SELECT key, 1 as vals from inputTbl1 UNION ALL - SELECT key, 2 as values from inputTbl1 + SELECT key, 2 as vals from inputTbl1 ) a )b PREHOOK: type: QUERY @@ -222,19 +222,19 @@ PREHOOK: Output: default@outputtbl1 POSTHOOK: query: insert overwrite table outputTbl1 SELECT * FROM ( -select key, count(1) as values from inputTbl1 group by key +select key, count(1) as vals from inputTbl1 group by key union all select * FROM ( - SELECT key, 1 as values from inputTbl1 + SELECT key, 1 as vals from inputTbl1 UNION ALL - SELECT key, 2 as values from inputTbl1 + SELECT key, 2 as vals from inputTbl1 ) a )b POSTHOOK: type: QUERY POSTHOOK: Input: default@inputtbl1 POSTHOOK: Output: default@outputtbl1 POSTHOOK: Lineage: outputtbl1.key EXPRESSION [(inputtbl1)inputtbl1.FieldSchema(name:key, type:string, comment:null), (inputtbl1)inputtbl1.FieldSchema(name:key, type:string, comment:null), (inputtbl1)inputtbl1.FieldSchema(name:key, type:string, comment:null), ] -POSTHOOK: Lineage: outputtbl1.values EXPRESSION [(inputtbl1)inputtbl1.null, ] +POSTHOOK: Lineage: outputtbl1.vals EXPRESSION [(inputtbl1)inputtbl1.null, ] PREHOOK: query: desc formatted outputTbl1 PREHOOK: type: DESCTABLE PREHOOK: Input: default@outputtbl1 @@ -244,7 +244,7 @@ POSTHOOK: Input: default@outputtbl1 # col_name data_type comment key string -values bigint +vals bigint # Detailed Table Information Database: default @@ -271,11 +271,11 @@ Bucket Columns: [] Sort Columns: [] Storage Desc Params: serialization.format 1 -PREHOOK: query: select * from outputTbl1 order by key, values +PREHOOK: query: select * from outputTbl1 order by key, vals PREHOOK: type: QUERY PREHOOK: Input: default@outputtbl1 #### A masked pattern was here #### -POSTHOOK: query: select * from outputTbl1 order by key, values +POSTHOOK: query: select * from outputTbl1 order by key, vals POSTHOOK: type: QUERY POSTHOOK: Input: default@outputtbl1 #### A masked pattern was here #### diff --git a/ql/src/test/results/clientpositive/tez/dynamic_partition_pruning.q.out b/ql/src/test/results/clientpositive/tez/dynamic_partition_pruning.q.out index 21e0876..d021cd9 100644 --- a/ql/src/test/results/clientpositive/tez/dynamic_partition_pruning.q.out +++ b/ql/src/test/results/clientpositive/tez/dynamic_partition_pruning.q.out @@ -1,4 +1,6 @@ -PREHOOK: query: select distinct ds from srcpart +PREHOOK: query: -- We need the above setting for backward compatibility because 'date' is a keyword in SQL2011 + +select distinct ds from srcpart PREHOOK: type: QUERY PREHOOK: Input: default@srcpart PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=11 @@ -6,7 +8,9 @@ PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=12 PREHOOK: Input: default@srcpart@ds=2008-04-09/hr=11 PREHOOK: Input: default@srcpart@ds=2008-04-09/hr=12 #### A masked pattern was here #### -POSTHOOK: query: select distinct ds from srcpart +POSTHOOK: query: -- We need the above setting for backward compatibility because 'date' is a keyword in SQL2011 + +select distinct ds from srcpart POSTHOOK: type: QUERY POSTHOOK: Input: default@srcpart POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=11 diff --git a/ql/src/test/results/clientpositive/tez/orc_vectorization_ppd.q.out b/ql/src/test/results/clientpositive/tez/orc_vectorization_ppd.q.out index 738abc4..4b958cc 100644 --- a/ql/src/test/results/clientpositive/tez/orc_vectorization_ppd.q.out +++ b/ql/src/test/results/clientpositive/tez/orc_vectorization_ppd.q.out @@ -1,9 +1,13 @@ -PREHOOK: query: -- create table with 1000 rows +PREHOOK: query: -- We need the above setting for backward compatibility because 'int' is a keyword in SQL2011 + +-- create table with 1000 rows create table srcorc(key string, value string) stored as textfile PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@srcorc -POSTHOOK: query: -- create table with 1000 rows +POSTHOOK: query: -- We need the above setting for backward compatibility because 'int' is a keyword in SQL2011 + +-- create table with 1000 rows create table srcorc(key string, value string) stored as textfile POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default diff --git a/ql/src/test/results/clientpositive/tez/tez_union_group_by.q.out b/ql/src/test/results/clientpositive/tez/tez_union_group_by.q.out index 32c961d..8fc7e36 100644 --- a/ql/src/test/results/clientpositive/tez/tez_union_group_by.q.out +++ b/ql/src/test/results/clientpositive/tez/tez_union_group_by.q.out @@ -1,4 +1,6 @@ -PREHOOK: query: CREATE TABLE x +PREHOOK: query: -- We need the above setting for backward compatibility because 'date' is a keyword in SQL2011 + +CREATE TABLE x ( u bigint, t string, @@ -10,7 +12,9 @@ TBLPROPERTIES ("orc.compress"="ZLIB") PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@x -POSTHOOK: query: CREATE TABLE x +POSTHOOK: query: -- We need the above setting for backward compatibility because 'date' is a keyword in SQL2011 + +CREATE TABLE x ( u bigint, t string, diff --git a/ql/src/test/results/clientpositive/tez/vectorized_date_funcs.q.out b/ql/src/test/results/clientpositive/tez/vectorized_date_funcs.q.out index be782e4..a0128aa 100644 --- a/ql/src/test/results/clientpositive/tez/vectorized_date_funcs.q.out +++ b/ql/src/test/results/clientpositive/tez/vectorized_date_funcs.q.out @@ -1,4 +1,6 @@ -PREHOOK: query: -- Test timestamp functions in vectorized mode to verify they run correctly end-to-end. +PREHOOK: query: -- We need the above setting for backward compatibility because 'date' is a keyword in SQL2011 + +-- Test timestamp functions in vectorized mode to verify they run correctly end-to-end. CREATE TABLE date_udf_flight ( origin_city_name STRING, @@ -10,7 +12,9 @@ CREATE TABLE date_udf_flight ( PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@date_udf_flight -POSTHOOK: query: -- Test timestamp functions in vectorized mode to verify they run correctly end-to-end. +POSTHOOK: query: -- We need the above setting for backward compatibility because 'date' is a keyword in SQL2011 + +-- Test timestamp functions in vectorized mode to verify they run correctly end-to-end. CREATE TABLE date_udf_flight ( origin_city_name STRING, diff --git a/ql/src/test/results/clientpositive/tez/vectorized_dynamic_partition_pruning.q.out b/ql/src/test/results/clientpositive/tez/vectorized_dynamic_partition_pruning.q.out index 6fde788..1dc6fef 100644 --- a/ql/src/test/results/clientpositive/tez/vectorized_dynamic_partition_pruning.q.out +++ b/ql/src/test/results/clientpositive/tez/vectorized_dynamic_partition_pruning.q.out @@ -1,4 +1,7 @@ -PREHOOK: query: select distinct ds from srcpart +PREHOOK: query: -- We need the above setting for backward compatibility because 'date' is a keyword in SQL2011 + + +select distinct ds from srcpart PREHOOK: type: QUERY PREHOOK: Input: default@srcpart PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=11 @@ -6,7 +9,10 @@ PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=12 PREHOOK: Input: default@srcpart@ds=2008-04-09/hr=11 PREHOOK: Input: default@srcpart@ds=2008-04-09/hr=12 #### A masked pattern was here #### -POSTHOOK: query: select distinct ds from srcpart +POSTHOOK: query: -- We need the above setting for backward compatibility because 'date' is a keyword in SQL2011 + + +select distinct ds from srcpart POSTHOOK: type: QUERY POSTHOOK: Input: default@srcpart POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=11 diff --git a/ql/src/test/results/clientpositive/union_remove_1.q.out b/ql/src/test/results/clientpositive/union_remove_1.q.out index 411f63e..48653ea 100644 --- a/ql/src/test/results/clientpositive/union_remove_1.q.out +++ b/ql/src/test/results/clientpositive/union_remove_1.q.out @@ -26,11 +26,11 @@ create table inputTbl1(key string, val string) stored as textfile POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@inputTbl1 -PREHOOK: query: create table outputTbl1(key string, values bigint) stored as textfile +PREHOOK: query: create table outputTbl1(key string, vals bigint) stored as textfile PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@outputTbl1 -POSTHOOK: query: create table outputTbl1(key string, values bigint) stored as textfile +POSTHOOK: query: create table outputTbl1(key string, vals bigint) stored as textfile POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@outputTbl1 @@ -46,18 +46,18 @@ PREHOOK: query: explain insert overwrite table outputTbl1 SELECT * FROM ( - SELECT key, count(1) as values from inputTbl1 group by key + SELECT key, count(1) as vals from inputTbl1 group by key UNION ALL - SELECT key, count(1) as values from inputTbl1 group by key + SELECT key, count(1) as vals from inputTbl1 group by key ) a PREHOOK: type: QUERY POSTHOOK: query: explain insert overwrite table outputTbl1 SELECT * FROM ( - SELECT key, count(1) as values from inputTbl1 group by key + SELECT key, count(1) as vals from inputTbl1 group by key UNION ALL - SELECT key, count(1) as values from inputTbl1 group by key + SELECT key, count(1) as vals from inputTbl1 group by key ) a POSTHOOK: type: QUERY STAGE DEPENDENCIES: @@ -155,9 +155,9 @@ STAGE PLANS: PREHOOK: query: insert overwrite table outputTbl1 SELECT * FROM ( - SELECT key, count(1) as values from inputTbl1 group by key + SELECT key, count(1) as vals from inputTbl1 group by key UNION ALL - SELECT key, count(1) as values from inputTbl1 group by key + SELECT key, count(1) as vals from inputTbl1 group by key ) a PREHOOK: type: QUERY PREHOOK: Input: default@inputtbl1 @@ -165,15 +165,15 @@ PREHOOK: Output: default@outputtbl1 POSTHOOK: query: insert overwrite table outputTbl1 SELECT * FROM ( - SELECT key, count(1) as values from inputTbl1 group by key + SELECT key, count(1) as vals from inputTbl1 group by key UNION ALL - SELECT key, count(1) as values from inputTbl1 group by key + SELECT key, count(1) as vals from inputTbl1 group by key ) a POSTHOOK: type: QUERY POSTHOOK: Input: default@inputtbl1 POSTHOOK: Output: default@outputtbl1 POSTHOOK: Lineage: outputtbl1.key EXPRESSION [(inputtbl1)inputtbl1.FieldSchema(name:key, type:string, comment:null), (inputtbl1)inputtbl1.FieldSchema(name:key, type:string, comment:null), ] -POSTHOOK: Lineage: outputtbl1.values EXPRESSION [(inputtbl1)inputtbl1.null, (inputtbl1)inputtbl1.null, ] +POSTHOOK: Lineage: outputtbl1.vals EXPRESSION [(inputtbl1)inputtbl1.null, (inputtbl1)inputtbl1.null, ] PREHOOK: query: desc formatted outputTbl1 PREHOOK: type: DESCTABLE PREHOOK: Input: default@outputtbl1 @@ -183,7 +183,7 @@ POSTHOOK: Input: default@outputtbl1 # col_name data_type comment key string -values bigint +vals bigint # Detailed Table Information Database: default @@ -210,11 +210,11 @@ Bucket Columns: [] Sort Columns: [] Storage Desc Params: serialization.format 1 -PREHOOK: query: select * from outputTbl1 order by key, values +PREHOOK: query: select * from outputTbl1 order by key, vals PREHOOK: type: QUERY PREHOOK: Input: default@outputtbl1 #### A masked pattern was here #### -POSTHOOK: query: select * from outputTbl1 order by key, values +POSTHOOK: query: select * from outputTbl1 order by key, vals POSTHOOK: type: QUERY POSTHOOK: Input: default@outputtbl1 #### A masked pattern was here #### diff --git a/ql/src/test/results/clientpositive/union_remove_10.q.out b/ql/src/test/results/clientpositive/union_remove_10.q.out index d80bf32..2db6735 100644 --- a/ql/src/test/results/clientpositive/union_remove_10.q.out +++ b/ql/src/test/results/clientpositive/union_remove_10.q.out @@ -34,11 +34,11 @@ create table inputTbl1(key string, val string) stored as textfile POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@inputTbl1 -PREHOOK: query: create table outputTbl1(key string, values bigint) stored as rcfile +PREHOOK: query: create table outputTbl1(key string, vals bigint) stored as rcfile PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@outputTbl1 -POSTHOOK: query: create table outputTbl1(key string, values bigint) stored as rcfile +POSTHOOK: query: create table outputTbl1(key string, vals bigint) stored as rcfile POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@outputTbl1 @@ -54,12 +54,12 @@ PREHOOK: query: explain insert overwrite table outputTbl1 SELECT * FROM ( -select key, 1 as values from inputTbl1 +select key, 1 as vals from inputTbl1 union all select * FROM ( - SELECT key, count(1) as values from inputTbl1 group by key + SELECT key, count(1) as vals from inputTbl1 group by key UNION ALL - SELECT key, 2 as values from inputTbl1 + SELECT key, 2 as vals from inputTbl1 ) a )b PREHOOK: type: QUERY @@ -67,12 +67,12 @@ POSTHOOK: query: explain insert overwrite table outputTbl1 SELECT * FROM ( -select key, 1 as values from inputTbl1 +select key, 1 as vals from inputTbl1 union all select * FROM ( - SELECT key, count(1) as values from inputTbl1 group by key + SELECT key, count(1) as vals from inputTbl1 group by key UNION ALL - SELECT key, 2 as values from inputTbl1 + SELECT key, 2 as vals from inputTbl1 ) a )b POSTHOOK: type: QUERY @@ -206,12 +206,12 @@ STAGE PLANS: PREHOOK: query: insert overwrite table outputTbl1 SELECT * FROM ( -select key, 1 as values from inputTbl1 +select key, 1 as vals from inputTbl1 union all select * FROM ( - SELECT key, count(1) as values from inputTbl1 group by key + SELECT key, count(1) as vals from inputTbl1 group by key UNION ALL - SELECT key, 2 as values from inputTbl1 + SELECT key, 2 as vals from inputTbl1 ) a )b PREHOOK: type: QUERY @@ -220,19 +220,19 @@ PREHOOK: Output: default@outputtbl1 POSTHOOK: query: insert overwrite table outputTbl1 SELECT * FROM ( -select key, 1 as values from inputTbl1 +select key, 1 as vals from inputTbl1 union all select * FROM ( - SELECT key, count(1) as values from inputTbl1 group by key + SELECT key, count(1) as vals from inputTbl1 group by key UNION ALL - SELECT key, 2 as values from inputTbl1 + SELECT key, 2 as vals from inputTbl1 ) a )b POSTHOOK: type: QUERY POSTHOOK: Input: default@inputtbl1 POSTHOOK: Output: default@outputtbl1 POSTHOOK: Lineage: outputtbl1.key EXPRESSION [(inputtbl1)inputtbl1.FieldSchema(name:key, type:string, comment:null), (inputtbl1)inputtbl1.FieldSchema(name:key, type:string, comment:null), (inputtbl1)inputtbl1.FieldSchema(name:key, type:string, comment:null), ] -POSTHOOK: Lineage: outputtbl1.values EXPRESSION [(inputtbl1)inputtbl1.null, ] +POSTHOOK: Lineage: outputtbl1.vals EXPRESSION [(inputtbl1)inputtbl1.null, ] PREHOOK: query: desc formatted outputTbl1 PREHOOK: type: DESCTABLE PREHOOK: Input: default@outputtbl1 @@ -242,7 +242,7 @@ POSTHOOK: Input: default@outputtbl1 # col_name data_type comment key string -values bigint +vals bigint # Detailed Table Information Database: default @@ -269,11 +269,11 @@ Bucket Columns: [] Sort Columns: [] Storage Desc Params: serialization.format 1 -PREHOOK: query: select * from outputTbl1 order by key, values +PREHOOK: query: select * from outputTbl1 order by key, vals PREHOOK: type: QUERY PREHOOK: Input: default@outputtbl1 #### A masked pattern was here #### -POSTHOOK: query: select * from outputTbl1 order by key, values +POSTHOOK: query: select * from outputTbl1 order by key, vals POSTHOOK: type: QUERY POSTHOOK: Input: default@outputtbl1 #### A masked pattern was here #### diff --git a/ql/src/test/results/clientpositive/union_remove_11.q.out b/ql/src/test/results/clientpositive/union_remove_11.q.out index 23ab7c6..9e840be 100644 --- a/ql/src/test/results/clientpositive/union_remove_11.q.out +++ b/ql/src/test/results/clientpositive/union_remove_11.q.out @@ -34,11 +34,11 @@ create table inputTbl1(key string, val string) stored as textfile POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@inputTbl1 -PREHOOK: query: create table outputTbl1(key string, values bigint) stored as rcfile +PREHOOK: query: create table outputTbl1(key string, vals bigint) stored as rcfile PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@outputTbl1 -POSTHOOK: query: create table outputTbl1(key string, values bigint) stored as rcfile +POSTHOOK: query: create table outputTbl1(key string, vals bigint) stored as rcfile POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@outputTbl1 @@ -54,12 +54,12 @@ PREHOOK: query: explain insert overwrite table outputTbl1 SELECT * FROM ( -select key, 1 as values from inputTbl1 +select key, 1 as vals from inputTbl1 union all select * FROM ( - SELECT key, 2 values from inputTbl1 + SELECT key, 2 vals from inputTbl1 UNION ALL - SELECT key, 3 as values from inputTbl1 + SELECT key, 3 as vals from inputTbl1 ) a )b PREHOOK: type: QUERY @@ -67,12 +67,12 @@ POSTHOOK: query: explain insert overwrite table outputTbl1 SELECT * FROM ( -select key, 1 as values from inputTbl1 +select key, 1 as vals from inputTbl1 union all select * FROM ( - SELECT key, 2 values from inputTbl1 + SELECT key, 2 vals from inputTbl1 UNION ALL - SELECT key, 3 as values from inputTbl1 + SELECT key, 3 as vals from inputTbl1 ) a )b POSTHOOK: type: QUERY @@ -195,12 +195,12 @@ STAGE PLANS: PREHOOK: query: insert overwrite table outputTbl1 SELECT * FROM ( -select key, 1 as values from inputTbl1 +select key, 1 as vals from inputTbl1 union all select * FROM ( - SELECT key, 2 as values from inputTbl1 + SELECT key, 2 as vals from inputTbl1 UNION ALL - SELECT key, 3 as values from inputTbl1 + SELECT key, 3 as vals from inputTbl1 ) a )b PREHOOK: type: QUERY @@ -209,19 +209,19 @@ PREHOOK: Output: default@outputtbl1 POSTHOOK: query: insert overwrite table outputTbl1 SELECT * FROM ( -select key, 1 as values from inputTbl1 +select key, 1 as vals from inputTbl1 union all select * FROM ( - SELECT key, 2 as values from inputTbl1 + SELECT key, 2 as vals from inputTbl1 UNION ALL - SELECT key, 3 as values from inputTbl1 + SELECT key, 3 as vals from inputTbl1 ) a )b POSTHOOK: type: QUERY POSTHOOK: Input: default@inputtbl1 POSTHOOK: Output: default@outputtbl1 POSTHOOK: Lineage: outputtbl1.key EXPRESSION [(inputtbl1)inputtbl1.FieldSchema(name:key, type:string, comment:null), (inputtbl1)inputtbl1.FieldSchema(name:key, type:string, comment:null), (inputtbl1)inputtbl1.FieldSchema(name:key, type:string, comment:null), ] -POSTHOOK: Lineage: outputtbl1.values EXPRESSION [] +POSTHOOK: Lineage: outputtbl1.vals EXPRESSION [] PREHOOK: query: desc formatted outputTbl1 PREHOOK: type: DESCTABLE PREHOOK: Input: default@outputtbl1 @@ -231,7 +231,7 @@ POSTHOOK: Input: default@outputtbl1 # col_name data_type comment key string -values bigint +vals bigint # Detailed Table Information Database: default @@ -258,11 +258,11 @@ Bucket Columns: [] Sort Columns: [] Storage Desc Params: serialization.format 1 -PREHOOK: query: select * from outputTbl1 order by key, values +PREHOOK: query: select * from outputTbl1 order by key, vals PREHOOK: type: QUERY PREHOOK: Input: default@outputtbl1 #### A masked pattern was here #### -POSTHOOK: query: select * from outputTbl1 order by key, values +POSTHOOK: query: select * from outputTbl1 order by key, vals POSTHOOK: type: QUERY POSTHOOK: Input: default@outputtbl1 #### A masked pattern was here #### diff --git a/ql/src/test/results/clientpositive/union_remove_12.q.out b/ql/src/test/results/clientpositive/union_remove_12.q.out index f9fd323..cc6ad80 100644 --- a/ql/src/test/results/clientpositive/union_remove_12.q.out +++ b/ql/src/test/results/clientpositive/union_remove_12.q.out @@ -32,11 +32,11 @@ create table inputTbl1(key string, val string) stored as textfile POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@inputTbl1 -PREHOOK: query: create table outputTbl1(key string, values bigint) stored as rcfile +PREHOOK: query: create table outputTbl1(key string, vals bigint) stored as rcfile PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@outputTbl1 -POSTHOOK: query: create table outputTbl1(key string, values bigint) stored as rcfile +POSTHOOK: query: create table outputTbl1(key string, vals bigint) stored as rcfile POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@outputTbl1 @@ -52,9 +52,9 @@ PREHOOK: query: explain insert overwrite table outputTbl1 SELECT * FROM ( -select key, 1 as values from inputTbl1 +select key, 1 as vals from inputTbl1 union all -select a.key as key, b.val as values +select a.key as key, b.val as vals FROM inputTbl1 a join inputTbl1 b on a.key=b.key )c PREHOOK: type: QUERY @@ -62,9 +62,9 @@ POSTHOOK: query: explain insert overwrite table outputTbl1 SELECT * FROM ( -select key, 1 as values from inputTbl1 +select key, 1 as vals from inputTbl1 union all -select a.key as key, b.val as values +select a.key as key, b.val as vals FROM inputTbl1 a join inputTbl1 b on a.key=b.key )c POSTHOOK: type: QUERY @@ -192,9 +192,9 @@ STAGE PLANS: PREHOOK: query: insert overwrite table outputTbl1 SELECT * FROM ( -select key, 1 as values from inputTbl1 +select key, 1 as vals from inputTbl1 union all -select a.key as key, b.val as values +select a.key as key, b.val as vals FROM inputTbl1 a join inputTbl1 b on a.key=b.key )c PREHOOK: type: QUERY @@ -203,16 +203,16 @@ PREHOOK: Output: default@outputtbl1 POSTHOOK: query: insert overwrite table outputTbl1 SELECT * FROM ( -select key, 1 as values from inputTbl1 +select key, 1 as vals from inputTbl1 union all -select a.key as key, b.val as values +select a.key as key, b.val as vals FROM inputTbl1 a join inputTbl1 b on a.key=b.key )c POSTHOOK: type: QUERY POSTHOOK: Input: default@inputtbl1 POSTHOOK: Output: default@outputtbl1 POSTHOOK: Lineage: outputtbl1.key EXPRESSION [(inputtbl1)inputtbl1.FieldSchema(name:key, type:string, comment:null), (inputtbl1)a.FieldSchema(name:key, type:string, comment:null), ] -POSTHOOK: Lineage: outputtbl1.values EXPRESSION [(inputtbl1)b.FieldSchema(name:val, type:string, comment:null), ] +POSTHOOK: Lineage: outputtbl1.vals EXPRESSION [(inputtbl1)b.FieldSchema(name:val, type:string, comment:null), ] PREHOOK: query: desc formatted outputTbl1 PREHOOK: type: DESCTABLE PREHOOK: Input: default@outputtbl1 @@ -222,7 +222,7 @@ POSTHOOK: Input: default@outputtbl1 # col_name data_type comment key string -values bigint +vals bigint # Detailed Table Information Database: default @@ -249,11 +249,11 @@ Bucket Columns: [] Sort Columns: [] Storage Desc Params: serialization.format 1 -PREHOOK: query: select * from outputTbl1 order by key, values +PREHOOK: query: select * from outputTbl1 order by key, vals PREHOOK: type: QUERY PREHOOK: Input: default@outputtbl1 #### A masked pattern was here #### -POSTHOOK: query: select * from outputTbl1 order by key, values +POSTHOOK: query: select * from outputTbl1 order by key, vals POSTHOOK: type: QUERY POSTHOOK: Input: default@outputtbl1 #### A masked pattern was here #### diff --git a/ql/src/test/results/clientpositive/union_remove_13.q.out b/ql/src/test/results/clientpositive/union_remove_13.q.out index b8913e2..30f38ba 100644 --- a/ql/src/test/results/clientpositive/union_remove_13.q.out +++ b/ql/src/test/results/clientpositive/union_remove_13.q.out @@ -32,11 +32,11 @@ create table inputTbl1(key string, val string) stored as textfile POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@inputTbl1 -PREHOOK: query: create table outputTbl1(key string, values bigint) stored as rcfile +PREHOOK: query: create table outputTbl1(key string, vals bigint) stored as rcfile PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@outputTbl1 -POSTHOOK: query: create table outputTbl1(key string, values bigint) stored as rcfile +POSTHOOK: query: create table outputTbl1(key string, vals bigint) stored as rcfile POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@outputTbl1 @@ -52,9 +52,9 @@ PREHOOK: query: explain insert overwrite table outputTbl1 SELECT * FROM ( -select key, count(1) as values from inputTbl1 group by key +select key, count(1) as vals from inputTbl1 group by key union all -select a.key as key, b.val as values +select a.key as key, b.val as vals FROM inputTbl1 a join inputTbl1 b on a.key=b.key )c PREHOOK: type: QUERY @@ -62,9 +62,9 @@ POSTHOOK: query: explain insert overwrite table outputTbl1 SELECT * FROM ( -select key, count(1) as values from inputTbl1 group by key +select key, count(1) as vals from inputTbl1 group by key union all -select a.key as key, b.val as values +select a.key as key, b.val as vals FROM inputTbl1 a join inputTbl1 b on a.key=b.key )c POSTHOOK: type: QUERY @@ -215,9 +215,9 @@ STAGE PLANS: PREHOOK: query: insert overwrite table outputTbl1 SELECT * FROM ( -select key, count(1) as values from inputTbl1 group by key +select key, count(1) as vals from inputTbl1 group by key union all -select a.key as key, b.val as values +select a.key as key, b.val as vals FROM inputTbl1 a join inputTbl1 b on a.key=b.key )c PREHOOK: type: QUERY @@ -226,16 +226,16 @@ PREHOOK: Output: default@outputtbl1 POSTHOOK: query: insert overwrite table outputTbl1 SELECT * FROM ( -select key, count(1) as values from inputTbl1 group by key +select key, count(1) as vals from inputTbl1 group by key union all -select a.key as key, b.val as values +select a.key as key, b.val as vals FROM inputTbl1 a join inputTbl1 b on a.key=b.key )c POSTHOOK: type: QUERY POSTHOOK: Input: default@inputtbl1 POSTHOOK: Output: default@outputtbl1 POSTHOOK: Lineage: outputtbl1.key EXPRESSION [(inputtbl1)inputtbl1.FieldSchema(name:key, type:string, comment:null), (inputtbl1)a.FieldSchema(name:key, type:string, comment:null), ] -POSTHOOK: Lineage: outputtbl1.values EXPRESSION [(inputtbl1)inputtbl1.null, (inputtbl1)b.FieldSchema(name:val, type:string, comment:null), ] +POSTHOOK: Lineage: outputtbl1.vals EXPRESSION [(inputtbl1)inputtbl1.null, (inputtbl1)b.FieldSchema(name:val, type:string, comment:null), ] PREHOOK: query: desc formatted outputTbl1 PREHOOK: type: DESCTABLE PREHOOK: Input: default@outputtbl1 @@ -245,7 +245,7 @@ POSTHOOK: Input: default@outputtbl1 # col_name data_type comment key string -values bigint +vals bigint # Detailed Table Information Database: default @@ -272,11 +272,11 @@ Bucket Columns: [] Sort Columns: [] Storage Desc Params: serialization.format 1 -PREHOOK: query: select * from outputTbl1 order by key, values +PREHOOK: query: select * from outputTbl1 order by key, vals PREHOOK: type: QUERY PREHOOK: Input: default@outputtbl1 #### A masked pattern was here #### -POSTHOOK: query: select * from outputTbl1 order by key, values +POSTHOOK: query: select * from outputTbl1 order by key, vals POSTHOOK: type: QUERY POSTHOOK: Input: default@outputtbl1 #### A masked pattern was here #### diff --git a/ql/src/test/results/clientpositive/union_remove_14.q.out b/ql/src/test/results/clientpositive/union_remove_14.q.out index 28e4bb6..edc24a1 100644 --- a/ql/src/test/results/clientpositive/union_remove_14.q.out +++ b/ql/src/test/results/clientpositive/union_remove_14.q.out @@ -34,11 +34,11 @@ create table inputTbl1(key string, val string) stored as textfile POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@inputTbl1 -PREHOOK: query: create table outputTbl1(key string, values bigint) stored as rcfile +PREHOOK: query: create table outputTbl1(key string, vals bigint) stored as rcfile PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@outputTbl1 -POSTHOOK: query: create table outputTbl1(key string, values bigint) stored as rcfile +POSTHOOK: query: create table outputTbl1(key string, vals bigint) stored as rcfile POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@outputTbl1 @@ -54,9 +54,9 @@ PREHOOK: query: explain insert overwrite table outputTbl1 SELECT * FROM ( -select key, 1 as values from inputTbl1 +select key, 1 as vals from inputTbl1 union all -select a.key as key, b.val as values +select a.key as key, b.val as vals FROM inputTbl1 a join inputTbl1 b on a.key=b.key )c PREHOOK: type: QUERY @@ -64,9 +64,9 @@ POSTHOOK: query: explain insert overwrite table outputTbl1 SELECT * FROM ( -select key, 1 as values from inputTbl1 +select key, 1 as vals from inputTbl1 union all -select a.key as key, b.val as values +select a.key as key, b.val as vals FROM inputTbl1 a join inputTbl1 b on a.key=b.key )c POSTHOOK: type: QUERY @@ -194,9 +194,9 @@ STAGE PLANS: PREHOOK: query: insert overwrite table outputTbl1 SELECT * FROM ( -select key, 1 as values from inputTbl1 +select key, 1 as vals from inputTbl1 union all -select a.key as key, b.val as values +select a.key as key, b.val as vals FROM inputTbl1 a join inputTbl1 b on a.key=b.key )c PREHOOK: type: QUERY @@ -205,16 +205,16 @@ PREHOOK: Output: default@outputtbl1 POSTHOOK: query: insert overwrite table outputTbl1 SELECT * FROM ( -select key, 1 as values from inputTbl1 +select key, 1 as vals from inputTbl1 union all -select a.key as key, b.val as values +select a.key as key, b.val as vals FROM inputTbl1 a join inputTbl1 b on a.key=b.key )c POSTHOOK: type: QUERY POSTHOOK: Input: default@inputtbl1 POSTHOOK: Output: default@outputtbl1 POSTHOOK: Lineage: outputtbl1.key EXPRESSION [(inputtbl1)inputtbl1.FieldSchema(name:key, type:string, comment:null), (inputtbl1)a.FieldSchema(name:key, type:string, comment:null), ] -POSTHOOK: Lineage: outputtbl1.values EXPRESSION [(inputtbl1)b.FieldSchema(name:val, type:string, comment:null), ] +POSTHOOK: Lineage: outputtbl1.vals EXPRESSION [(inputtbl1)b.FieldSchema(name:val, type:string, comment:null), ] PREHOOK: query: desc formatted outputTbl1 PREHOOK: type: DESCTABLE PREHOOK: Input: default@outputtbl1 @@ -224,7 +224,7 @@ POSTHOOK: Input: default@outputtbl1 # col_name data_type comment key string -values bigint +vals bigint # Detailed Table Information Database: default @@ -251,11 +251,11 @@ Bucket Columns: [] Sort Columns: [] Storage Desc Params: serialization.format 1 -PREHOOK: query: select * from outputTbl1 order by key, values +PREHOOK: query: select * from outputTbl1 order by key, vals PREHOOK: type: QUERY PREHOOK: Input: default@outputtbl1 #### A masked pattern was here #### -POSTHOOK: query: select * from outputTbl1 order by key, values +POSTHOOK: query: select * from outputTbl1 order by key, vals POSTHOOK: type: QUERY POSTHOOK: Input: default@outputtbl1 #### A masked pattern was here #### diff --git a/ql/src/test/results/clientpositive/union_remove_15.q.out b/ql/src/test/results/clientpositive/union_remove_15.q.out index faf3e58..0841bd3 100644 --- a/ql/src/test/results/clientpositive/union_remove_15.q.out +++ b/ql/src/test/results/clientpositive/union_remove_15.q.out @@ -32,11 +32,11 @@ create table inputTbl1(key string, val string) stored as textfile POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@inputTbl1 -PREHOOK: query: create table outputTbl1(key string, values bigint) partitioned by (ds string) stored as rcfile +PREHOOK: query: create table outputTbl1(key string, vals bigint) partitioned by (ds string) stored as rcfile PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@outputTbl1 -POSTHOOK: query: create table outputTbl1(key string, values bigint) partitioned by (ds string) stored as rcfile +POSTHOOK: query: create table outputTbl1(key string, vals bigint) partitioned by (ds string) stored as rcfile POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@outputTbl1 @@ -52,18 +52,18 @@ PREHOOK: query: explain insert overwrite table outputTbl1 partition (ds) SELECT * FROM ( - SELECT key, count(1) as values, '1' as ds from inputTbl1 group by key + SELECT key, count(1) as vals, '1' as ds from inputTbl1 group by key UNION ALL - SELECT key, count(1) as values, '2' as ds from inputTbl1 group by key + SELECT key, count(1) as vals, '2' as ds from inputTbl1 group by key ) a PREHOOK: type: QUERY POSTHOOK: query: explain insert overwrite table outputTbl1 partition (ds) SELECT * FROM ( - SELECT key, count(1) as values, '1' as ds from inputTbl1 group by key + SELECT key, count(1) as vals, '1' as ds from inputTbl1 group by key UNION ALL - SELECT key, count(1) as values, '2' as ds from inputTbl1 group by key + SELECT key, count(1) as vals, '2' as ds from inputTbl1 group by key ) a POSTHOOK: type: QUERY STAGE DEPENDENCIES: @@ -171,9 +171,9 @@ STAGE PLANS: PREHOOK: query: insert overwrite table outputTbl1 partition (ds) SELECT * FROM ( - SELECT key, count(1) as values, '1' as ds from inputTbl1 group by key + SELECT key, count(1) as vals, '1' as ds from inputTbl1 group by key UNION ALL - SELECT key, count(1) as values, '2' as ds from inputTbl1 group by key + SELECT key, count(1) as vals, '2' as ds from inputTbl1 group by key ) a PREHOOK: type: QUERY PREHOOK: Input: default@inputtbl1 @@ -181,18 +181,18 @@ PREHOOK: Output: default@outputtbl1 POSTHOOK: query: insert overwrite table outputTbl1 partition (ds) SELECT * FROM ( - SELECT key, count(1) as values, '1' as ds from inputTbl1 group by key + SELECT key, count(1) as vals, '1' as ds from inputTbl1 group by key UNION ALL - SELECT key, count(1) as values, '2' as ds from inputTbl1 group by key + SELECT key, count(1) as vals, '2' as ds from inputTbl1 group by key ) a POSTHOOK: type: QUERY POSTHOOK: Input: default@inputtbl1 POSTHOOK: Output: default@outputtbl1@ds=1 POSTHOOK: Output: default@outputtbl1@ds=2 POSTHOOK: Lineage: outputtbl1 PARTITION(ds=1).key EXPRESSION [(inputtbl1)inputtbl1.FieldSchema(name:key, type:string, comment:null), (inputtbl1)inputtbl1.FieldSchema(name:key, type:string, comment:null), ] -POSTHOOK: Lineage: outputtbl1 PARTITION(ds=1).values EXPRESSION [(inputtbl1)inputtbl1.null, (inputtbl1)inputtbl1.null, ] +POSTHOOK: Lineage: outputtbl1 PARTITION(ds=1).vals EXPRESSION [(inputtbl1)inputtbl1.null, (inputtbl1)inputtbl1.null, ] POSTHOOK: Lineage: outputtbl1 PARTITION(ds=2).key EXPRESSION [(inputtbl1)inputtbl1.FieldSchema(name:key, type:string, comment:null), (inputtbl1)inputtbl1.FieldSchema(name:key, type:string, comment:null), ] -POSTHOOK: Lineage: outputtbl1 PARTITION(ds=2).values EXPRESSION [(inputtbl1)inputtbl1.null, (inputtbl1)inputtbl1.null, ] +POSTHOOK: Lineage: outputtbl1 PARTITION(ds=2).vals EXPRESSION [(inputtbl1)inputtbl1.null, (inputtbl1)inputtbl1.null, ] PREHOOK: query: desc formatted outputTbl1 PREHOOK: type: DESCTABLE PREHOOK: Input: default@outputtbl1 @@ -202,7 +202,7 @@ POSTHOOK: Input: default@outputtbl1 # col_name data_type comment key string -values bigint +vals bigint # Partition Information # col_name data_type comment @@ -237,12 +237,12 @@ POSTHOOK: type: SHOWPARTITIONS POSTHOOK: Input: default@outputtbl1 ds=1 ds=2 -PREHOOK: query: select * from outputTbl1 where ds = '1' order by key, values +PREHOOK: query: select * from outputTbl1 where ds = '1' order by key, vals PREHOOK: type: QUERY PREHOOK: Input: default@outputtbl1 PREHOOK: Input: default@outputtbl1@ds=1 #### A masked pattern was here #### -POSTHOOK: query: select * from outputTbl1 where ds = '1' order by key, values +POSTHOOK: query: select * from outputTbl1 where ds = '1' order by key, vals POSTHOOK: type: QUERY POSTHOOK: Input: default@outputtbl1 POSTHOOK: Input: default@outputtbl1@ds=1 @@ -252,12 +252,12 @@ POSTHOOK: Input: default@outputtbl1@ds=1 3 1 1 7 1 1 8 2 1 -PREHOOK: query: select * from outputTbl1 where ds = '2' order by key, values +PREHOOK: query: select * from outputTbl1 where ds = '2' order by key, vals PREHOOK: type: QUERY PREHOOK: Input: default@outputtbl1 PREHOOK: Input: default@outputtbl1@ds=2 #### A masked pattern was here #### -POSTHOOK: query: select * from outputTbl1 where ds = '2' order by key, values +POSTHOOK: query: select * from outputTbl1 where ds = '2' order by key, vals POSTHOOK: type: QUERY POSTHOOK: Input: default@outputtbl1 POSTHOOK: Input: default@outputtbl1@ds=2 diff --git a/ql/src/test/results/clientpositive/union_remove_16.q.out b/ql/src/test/results/clientpositive/union_remove_16.q.out index 8656c11..05e607a 100644 --- a/ql/src/test/results/clientpositive/union_remove_16.q.out +++ b/ql/src/test/results/clientpositive/union_remove_16.q.out @@ -30,11 +30,11 @@ create table inputTbl1(key string, val string) stored as textfile POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@inputTbl1 -PREHOOK: query: create table outputTbl1(key string, values bigint) partitioned by (ds string) stored as rcfile +PREHOOK: query: create table outputTbl1(key string, vals bigint) partitioned by (ds string) stored as rcfile PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@outputTbl1 -POSTHOOK: query: create table outputTbl1(key string, values bigint) partitioned by (ds string) stored as rcfile +POSTHOOK: query: create table outputTbl1(key string, vals bigint) partitioned by (ds string) stored as rcfile POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@outputTbl1 @@ -50,18 +50,18 @@ PREHOOK: query: explain insert overwrite table outputTbl1 partition (ds) SELECT * FROM ( - SELECT key, count(1) as values, '1' as ds from inputTbl1 group by key + SELECT key, count(1) as vals, '1' as ds from inputTbl1 group by key UNION ALL - SELECT key, count(1) as values, '2' as ds from inputTbl1 group by key + SELECT key, count(1) as vals, '2' as ds from inputTbl1 group by key ) a PREHOOK: type: QUERY POSTHOOK: query: explain insert overwrite table outputTbl1 partition (ds) SELECT * FROM ( - SELECT key, count(1) as values, '1' as ds from inputTbl1 group by key + SELECT key, count(1) as vals, '1' as ds from inputTbl1 group by key UNION ALL - SELECT key, count(1) as values, '2' as ds from inputTbl1 group by key + SELECT key, count(1) as vals, '2' as ds from inputTbl1 group by key ) a POSTHOOK: type: QUERY STAGE DEPENDENCIES: @@ -203,9 +203,9 @@ STAGE PLANS: PREHOOK: query: insert overwrite table outputTbl1 partition (ds) SELECT * FROM ( - SELECT key, count(1) as values, '1' as ds from inputTbl1 group by key + SELECT key, count(1) as vals, '1' as ds from inputTbl1 group by key UNION ALL - SELECT key, count(1) as values, '2' as ds from inputTbl1 group by key + SELECT key, count(1) as vals, '2' as ds from inputTbl1 group by key ) a PREHOOK: type: QUERY PREHOOK: Input: default@inputtbl1 @@ -213,18 +213,18 @@ PREHOOK: Output: default@outputtbl1 POSTHOOK: query: insert overwrite table outputTbl1 partition (ds) SELECT * FROM ( - SELECT key, count(1) as values, '1' as ds from inputTbl1 group by key + SELECT key, count(1) as vals, '1' as ds from inputTbl1 group by key UNION ALL - SELECT key, count(1) as values, '2' as ds from inputTbl1 group by key + SELECT key, count(1) as vals, '2' as ds from inputTbl1 group by key ) a POSTHOOK: type: QUERY POSTHOOK: Input: default@inputtbl1 POSTHOOK: Output: default@outputtbl1@ds=1 POSTHOOK: Output: default@outputtbl1@ds=2 POSTHOOK: Lineage: outputtbl1 PARTITION(ds=1).key EXPRESSION [(inputtbl1)inputtbl1.FieldSchema(name:key, type:string, comment:null), (inputtbl1)inputtbl1.FieldSchema(name:key, type:string, comment:null), ] -POSTHOOK: Lineage: outputtbl1 PARTITION(ds=1).values EXPRESSION [(inputtbl1)inputtbl1.null, (inputtbl1)inputtbl1.null, ] +POSTHOOK: Lineage: outputtbl1 PARTITION(ds=1).vals EXPRESSION [(inputtbl1)inputtbl1.null, (inputtbl1)inputtbl1.null, ] POSTHOOK: Lineage: outputtbl1 PARTITION(ds=2).key EXPRESSION [(inputtbl1)inputtbl1.FieldSchema(name:key, type:string, comment:null), (inputtbl1)inputtbl1.FieldSchema(name:key, type:string, comment:null), ] -POSTHOOK: Lineage: outputtbl1 PARTITION(ds=2).values EXPRESSION [(inputtbl1)inputtbl1.null, (inputtbl1)inputtbl1.null, ] +POSTHOOK: Lineage: outputtbl1 PARTITION(ds=2).vals EXPRESSION [(inputtbl1)inputtbl1.null, (inputtbl1)inputtbl1.null, ] PREHOOK: query: desc formatted outputTbl1 PREHOOK: type: DESCTABLE PREHOOK: Input: default@outputtbl1 @@ -234,7 +234,7 @@ POSTHOOK: Input: default@outputtbl1 # col_name data_type comment key string -values bigint +vals bigint # Partition Information # col_name data_type comment @@ -269,12 +269,12 @@ POSTHOOK: type: SHOWPARTITIONS POSTHOOK: Input: default@outputtbl1 ds=1 ds=2 -PREHOOK: query: select * from outputTbl1 where ds = '1' order by key, values +PREHOOK: query: select * from outputTbl1 where ds = '1' order by key, vals PREHOOK: type: QUERY PREHOOK: Input: default@outputtbl1 PREHOOK: Input: default@outputtbl1@ds=1 #### A masked pattern was here #### -POSTHOOK: query: select * from outputTbl1 where ds = '1' order by key, values +POSTHOOK: query: select * from outputTbl1 where ds = '1' order by key, vals POSTHOOK: type: QUERY POSTHOOK: Input: default@outputtbl1 POSTHOOK: Input: default@outputtbl1@ds=1 @@ -284,12 +284,12 @@ POSTHOOK: Input: default@outputtbl1@ds=1 3 1 1 7 1 1 8 2 1 -PREHOOK: query: select * from outputTbl1 where ds = '2' order by key, values +PREHOOK: query: select * from outputTbl1 where ds = '2' order by key, vals PREHOOK: type: QUERY PREHOOK: Input: default@outputtbl1 PREHOOK: Input: default@outputtbl1@ds=2 #### A masked pattern was here #### -POSTHOOK: query: select * from outputTbl1 where ds = '2' order by key, values +POSTHOOK: query: select * from outputTbl1 where ds = '2' order by key, vals POSTHOOK: type: QUERY POSTHOOK: Input: default@outputtbl1 POSTHOOK: Input: default@outputtbl1@ds=2 diff --git a/ql/src/test/results/clientpositive/union_remove_17.q.out b/ql/src/test/results/clientpositive/union_remove_17.q.out index 386b023..034b61c 100644 --- a/ql/src/test/results/clientpositive/union_remove_17.q.out +++ b/ql/src/test/results/clientpositive/union_remove_17.q.out @@ -26,11 +26,11 @@ create table inputTbl1(key string, val string) stored as textfile POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@inputTbl1 -PREHOOK: query: create table outputTbl1(key string, values bigint) partitioned by (ds string) stored as rcfile +PREHOOK: query: create table outputTbl1(key string, vals bigint) partitioned by (ds string) stored as rcfile PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@outputTbl1 -POSTHOOK: query: create table outputTbl1(key string, values bigint) partitioned by (ds string) stored as rcfile +POSTHOOK: query: create table outputTbl1(key string, vals bigint) partitioned by (ds string) stored as rcfile POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@outputTbl1 @@ -46,18 +46,18 @@ PREHOOK: query: explain insert overwrite table outputTbl1 partition (ds) SELECT * FROM ( - SELECT key, 1 as values, '1' as ds from inputTbl1 + SELECT key, 1 as vals, '1' as ds from inputTbl1 UNION ALL - SELECT key, 2 as values, '2' as ds from inputTbl1 + SELECT key, 2 as vals, '2' as ds from inputTbl1 ) a PREHOOK: type: QUERY POSTHOOK: query: explain insert overwrite table outputTbl1 partition (ds) SELECT * FROM ( - SELECT key, 1 as values, '1' as ds from inputTbl1 + SELECT key, 1 as vals, '1' as ds from inputTbl1 UNION ALL - SELECT key, 2 as values, '2' as ds from inputTbl1 + SELECT key, 2 as vals, '2' as ds from inputTbl1 ) a POSTHOOK: type: QUERY STAGE DEPENDENCIES: @@ -126,9 +126,9 @@ STAGE PLANS: PREHOOK: query: insert overwrite table outputTbl1 partition (ds) SELECT * FROM ( - SELECT key, 1 as values, '1' as ds from inputTbl1 + SELECT key, 1 as vals, '1' as ds from inputTbl1 UNION ALL - SELECT key, 2 as values, '2' as ds from inputTbl1 + SELECT key, 2 as vals, '2' as ds from inputTbl1 ) a PREHOOK: type: QUERY PREHOOK: Input: default@inputtbl1 @@ -136,18 +136,18 @@ PREHOOK: Output: default@outputtbl1 POSTHOOK: query: insert overwrite table outputTbl1 partition (ds) SELECT * FROM ( - SELECT key, 1 as values, '1' as ds from inputTbl1 + SELECT key, 1 as vals, '1' as ds from inputTbl1 UNION ALL - SELECT key, 2 as values, '2' as ds from inputTbl1 + SELECT key, 2 as vals, '2' as ds from inputTbl1 ) a POSTHOOK: type: QUERY POSTHOOK: Input: default@inputtbl1 POSTHOOK: Output: default@outputtbl1@ds=1 POSTHOOK: Output: default@outputtbl1@ds=2 POSTHOOK: Lineage: outputtbl1 PARTITION(ds=1).key EXPRESSION [(inputtbl1)inputtbl1.FieldSchema(name:key, type:string, comment:null), (inputtbl1)inputtbl1.FieldSchema(name:key, type:string, comment:null), ] -POSTHOOK: Lineage: outputtbl1 PARTITION(ds=1).values EXPRESSION [] +POSTHOOK: Lineage: outputtbl1 PARTITION(ds=1).vals EXPRESSION [] POSTHOOK: Lineage: outputtbl1 PARTITION(ds=2).key EXPRESSION [(inputtbl1)inputtbl1.FieldSchema(name:key, type:string, comment:null), (inputtbl1)inputtbl1.FieldSchema(name:key, type:string, comment:null), ] -POSTHOOK: Lineage: outputtbl1 PARTITION(ds=2).values EXPRESSION [] +POSTHOOK: Lineage: outputtbl1 PARTITION(ds=2).vals EXPRESSION [] PREHOOK: query: desc formatted outputTbl1 PREHOOK: type: DESCTABLE PREHOOK: Input: default@outputtbl1 @@ -157,7 +157,7 @@ POSTHOOK: Input: default@outputtbl1 # col_name data_type comment key string -values bigint +vals bigint # Partition Information # col_name data_type comment @@ -192,12 +192,12 @@ POSTHOOK: type: SHOWPARTITIONS POSTHOOK: Input: default@outputtbl1 ds=1 ds=2 -PREHOOK: query: select * from outputTbl1 where ds = '1' order by key, values +PREHOOK: query: select * from outputTbl1 where ds = '1' order by key, vals PREHOOK: type: QUERY PREHOOK: Input: default@outputtbl1 PREHOOK: Input: default@outputtbl1@ds=1 #### A masked pattern was here #### -POSTHOOK: query: select * from outputTbl1 where ds = '1' order by key, values +POSTHOOK: query: select * from outputTbl1 where ds = '1' order by key, vals POSTHOOK: type: QUERY POSTHOOK: Input: default@outputtbl1 POSTHOOK: Input: default@outputtbl1@ds=1 @@ -208,12 +208,12 @@ POSTHOOK: Input: default@outputtbl1@ds=1 7 1 1 8 1 1 8 1 1 -PREHOOK: query: select * from outputTbl1 where ds = '2' order by key, values +PREHOOK: query: select * from outputTbl1 where ds = '2' order by key, vals PREHOOK: type: QUERY PREHOOK: Input: default@outputtbl1 PREHOOK: Input: default@outputtbl1@ds=2 #### A masked pattern was here #### -POSTHOOK: query: select * from outputTbl1 where ds = '2' order by key, values +POSTHOOK: query: select * from outputTbl1 where ds = '2' order by key, vals POSTHOOK: type: QUERY POSTHOOK: Input: default@outputtbl1 POSTHOOK: Input: default@outputtbl1@ds=2 diff --git a/ql/src/test/results/clientpositive/union_remove_18.q.out b/ql/src/test/results/clientpositive/union_remove_18.q.out index 0ce6e81..f9e24e1 100644 --- a/ql/src/test/results/clientpositive/union_remove_18.q.out +++ b/ql/src/test/results/clientpositive/union_remove_18.q.out @@ -30,11 +30,11 @@ create table inputTbl1(key string, ds string) stored as textfile POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@inputTbl1 -PREHOOK: query: create table outputTbl1(key string, values bigint) partitioned by (ds string) stored as textfile +PREHOOK: query: create table outputTbl1(key string, vals bigint) partitioned by (ds string) stored as textfile PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@outputTbl1 -POSTHOOK: query: create table outputTbl1(key string, values bigint) partitioned by (ds string) stored as textfile +POSTHOOK: query: create table outputTbl1(key string, vals bigint) partitioned by (ds string) stored as textfile POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@outputTbl1 @@ -50,18 +50,18 @@ PREHOOK: query: explain insert overwrite table outputTbl1 partition (ds) SELECT * FROM ( - SELECT key, count(1) as values, ds from inputTbl1 group by key, ds + SELECT key, count(1) as vals, ds from inputTbl1 group by key, ds UNION ALL - SELECT key, count(1) as values, ds from inputTbl1 group by key, ds + SELECT key, count(1) as vals, ds from inputTbl1 group by key, ds ) a PREHOOK: type: QUERY POSTHOOK: query: explain insert overwrite table outputTbl1 partition (ds) SELECT * FROM ( - SELECT key, count(1) as values, ds from inputTbl1 group by key, ds + SELECT key, count(1) as vals, ds from inputTbl1 group by key, ds UNION ALL - SELECT key, count(1) as values, ds from inputTbl1 group by key, ds + SELECT key, count(1) as vals, ds from inputTbl1 group by key, ds ) a POSTHOOK: type: QUERY STAGE DEPENDENCIES: @@ -169,9 +169,9 @@ STAGE PLANS: PREHOOK: query: insert overwrite table outputTbl1 partition (ds) SELECT * FROM ( - SELECT key, count(1) as values, ds from inputTbl1 group by key, ds + SELECT key, count(1) as vals, ds from inputTbl1 group by key, ds UNION ALL - SELECT key, count(1) as values, ds from inputTbl1 group by key, ds + SELECT key, count(1) as vals, ds from inputTbl1 group by key, ds ) a PREHOOK: type: QUERY PREHOOK: Input: default@inputtbl1 @@ -179,9 +179,9 @@ PREHOOK: Output: default@outputtbl1 POSTHOOK: query: insert overwrite table outputTbl1 partition (ds) SELECT * FROM ( - SELECT key, count(1) as values, ds from inputTbl1 group by key, ds + SELECT key, count(1) as vals, ds from inputTbl1 group by key, ds UNION ALL - SELECT key, count(1) as values, ds from inputTbl1 group by key, ds + SELECT key, count(1) as vals, ds from inputTbl1 group by key, ds ) a POSTHOOK: type: QUERY POSTHOOK: Input: default@inputtbl1 @@ -192,17 +192,17 @@ POSTHOOK: Output: default@outputtbl1@ds=17 POSTHOOK: Output: default@outputtbl1@ds=18 POSTHOOK: Output: default@outputtbl1@ds=28 POSTHOOK: Lineage: outputtbl1 PARTITION(ds=11).key EXPRESSION [(inputtbl1)inputtbl1.FieldSchema(name:key, type:string, comment:null), (inputtbl1)inputtbl1.FieldSchema(name:key, type:string, comment:null), ] -POSTHOOK: Lineage: outputtbl1 PARTITION(ds=11).values EXPRESSION [(inputtbl1)inputtbl1.null, (inputtbl1)inputtbl1.null, ] +POSTHOOK: Lineage: outputtbl1 PARTITION(ds=11).vals EXPRESSION [(inputtbl1)inputtbl1.null, (inputtbl1)inputtbl1.null, ] POSTHOOK: Lineage: outputtbl1 PARTITION(ds=12).key EXPRESSION [(inputtbl1)inputtbl1.FieldSchema(name:key, type:string, comment:null), (inputtbl1)inputtbl1.FieldSchema(name:key, type:string, comment:null), ] -POSTHOOK: Lineage: outputtbl1 PARTITION(ds=12).values EXPRESSION [(inputtbl1)inputtbl1.null, (inputtbl1)inputtbl1.null, ] +POSTHOOK: Lineage: outputtbl1 PARTITION(ds=12).vals EXPRESSION [(inputtbl1)inputtbl1.null, (inputtbl1)inputtbl1.null, ] POSTHOOK: Lineage: outputtbl1 PARTITION(ds=13).key EXPRESSION [(inputtbl1)inputtbl1.FieldSchema(name:key, type:string, comment:null), (inputtbl1)inputtbl1.FieldSchema(name:key, type:string, comment:null), ] -POSTHOOK: Lineage: outputtbl1 PARTITION(ds=13).values EXPRESSION [(inputtbl1)inputtbl1.null, (inputtbl1)inputtbl1.null, ] +POSTHOOK: Lineage: outputtbl1 PARTITION(ds=13).vals EXPRESSION [(inputtbl1)inputtbl1.null, (inputtbl1)inputtbl1.null, ] POSTHOOK: Lineage: outputtbl1 PARTITION(ds=17).key EXPRESSION [(inputtbl1)inputtbl1.FieldSchema(name:key, type:string, comment:null), (inputtbl1)inputtbl1.FieldSchema(name:key, type:string, comment:null), ] -POSTHOOK: Lineage: outputtbl1 PARTITION(ds=17).values EXPRESSION [(inputtbl1)inputtbl1.null, (inputtbl1)inputtbl1.null, ] +POSTHOOK: Lineage: outputtbl1 PARTITION(ds=17).vals EXPRESSION [(inputtbl1)inputtbl1.null, (inputtbl1)inputtbl1.null, ] POSTHOOK: Lineage: outputtbl1 PARTITION(ds=18).key EXPRESSION [(inputtbl1)inputtbl1.FieldSchema(name:key, type:string, comment:null), (inputtbl1)inputtbl1.FieldSchema(name:key, type:string, comment:null), ] -POSTHOOK: Lineage: outputtbl1 PARTITION(ds=18).values EXPRESSION [(inputtbl1)inputtbl1.null, (inputtbl1)inputtbl1.null, ] +POSTHOOK: Lineage: outputtbl1 PARTITION(ds=18).vals EXPRESSION [(inputtbl1)inputtbl1.null, (inputtbl1)inputtbl1.null, ] POSTHOOK: Lineage: outputtbl1 PARTITION(ds=28).key EXPRESSION [(inputtbl1)inputtbl1.FieldSchema(name:key, type:string, comment:null), (inputtbl1)inputtbl1.FieldSchema(name:key, type:string, comment:null), ] -POSTHOOK: Lineage: outputtbl1 PARTITION(ds=28).values EXPRESSION [(inputtbl1)inputtbl1.null, (inputtbl1)inputtbl1.null, ] +POSTHOOK: Lineage: outputtbl1 PARTITION(ds=28).vals EXPRESSION [(inputtbl1)inputtbl1.null, (inputtbl1)inputtbl1.null, ] PREHOOK: query: desc formatted outputTbl1 PREHOOK: type: DESCTABLE PREHOOK: Input: default@outputtbl1 @@ -212,7 +212,7 @@ POSTHOOK: Input: default@outputtbl1 # col_name data_type comment key string -values bigint +vals bigint # Partition Information # col_name data_type comment @@ -251,31 +251,31 @@ ds=13 ds=17 ds=18 ds=28 -PREHOOK: query: select * from outputTbl1 where ds = '11' order by key, values +PREHOOK: query: select * from outputTbl1 where ds = '11' order by key, vals PREHOOK: type: QUERY PREHOOK: Input: default@outputtbl1 PREHOOK: Input: default@outputtbl1@ds=11 #### A masked pattern was here #### -POSTHOOK: query: select * from outputTbl1 where ds = '11' order by key, values +POSTHOOK: query: select * from outputTbl1 where ds = '11' order by key, vals POSTHOOK: type: QUERY POSTHOOK: Input: default@outputtbl1 POSTHOOK: Input: default@outputtbl1@ds=11 #### A masked pattern was here #### 1 1 11 1 1 11 -PREHOOK: query: select * from outputTbl1 where ds = '18' order by key, values +PREHOOK: query: select * from outputTbl1 where ds = '18' order by key, vals PREHOOK: type: QUERY PREHOOK: Input: default@outputtbl1 PREHOOK: Input: default@outputtbl1@ds=18 #### A masked pattern was here #### -POSTHOOK: query: select * from outputTbl1 where ds = '18' order by key, values +POSTHOOK: query: select * from outputTbl1 where ds = '18' order by key, vals POSTHOOK: type: QUERY POSTHOOK: Input: default@outputtbl1 POSTHOOK: Input: default@outputtbl1@ds=18 #### A masked pattern was here #### 8 1 18 8 1 18 -PREHOOK: query: select * from outputTbl1 where ds is not null order by key, values, ds +PREHOOK: query: select * from outputTbl1 where ds is not null order by key, vals, ds PREHOOK: type: QUERY PREHOOK: Input: default@outputtbl1 PREHOOK: Input: default@outputtbl1@ds=11 @@ -285,7 +285,7 @@ PREHOOK: Input: default@outputtbl1@ds=17 PREHOOK: Input: default@outputtbl1@ds=18 PREHOOK: Input: default@outputtbl1@ds=28 #### A masked pattern was here #### -POSTHOOK: query: select * from outputTbl1 where ds is not null order by key, values, ds +POSTHOOK: query: select * from outputTbl1 where ds is not null order by key, vals, ds POSTHOOK: type: QUERY POSTHOOK: Input: default@outputtbl1 POSTHOOK: Input: default@outputtbl1@ds=11 diff --git a/ql/src/test/results/clientpositive/union_remove_19.q.out b/ql/src/test/results/clientpositive/union_remove_19.q.out index b6d557b..779583f 100644 --- a/ql/src/test/results/clientpositive/union_remove_19.q.out +++ b/ql/src/test/results/clientpositive/union_remove_19.q.out @@ -30,11 +30,11 @@ create table inputTbl1(key string, val string) stored as textfile POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@inputTbl1 -PREHOOK: query: create table outputTbl1(key string, values bigint) stored as textfile +PREHOOK: query: create table outputTbl1(key string, vals bigint) stored as textfile PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@outputTbl1 -POSTHOOK: query: create table outputTbl1(key string, values bigint) stored as textfile +POSTHOOK: query: create table outputTbl1(key string, vals bigint) stored as textfile POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@outputTbl1 @@ -48,20 +48,20 @@ POSTHOOK: type: LOAD POSTHOOK: Output: default@inputtbl1 PREHOOK: query: explain insert overwrite table outputTbl1 -SELECT a.key, a.values +SELECT a.key, a.vals FROM ( - SELECT key, count(1) as values from inputTbl1 group by key + SELECT key, count(1) as vals from inputTbl1 group by key UNION ALL - SELECT key, count(1) as values from inputTbl1 group by key + SELECT key, count(1) as vals from inputTbl1 group by key ) a PREHOOK: type: QUERY POSTHOOK: query: explain insert overwrite table outputTbl1 -SELECT a.key, a.values +SELECT a.key, a.vals FROM ( - SELECT key, count(1) as values from inputTbl1 group by key + SELECT key, count(1) as vals from inputTbl1 group by key UNION ALL - SELECT key, count(1) as values from inputTbl1 group by key + SELECT key, count(1) as vals from inputTbl1 group by key ) a POSTHOOK: type: QUERY STAGE DEPENDENCIES: @@ -157,27 +157,27 @@ STAGE PLANS: name: default.outputtbl1 PREHOOK: query: insert overwrite table outputTbl1 -SELECT a.key, a.values +SELECT a.key, a.vals FROM ( - SELECT key, count(1) as values from inputTbl1 group by key + SELECT key, count(1) as vals from inputTbl1 group by key UNION ALL - SELECT key, count(1) as values from inputTbl1 group by key + SELECT key, count(1) as vals from inputTbl1 group by key ) a PREHOOK: type: QUERY PREHOOK: Input: default@inputtbl1 PREHOOK: Output: default@outputtbl1 POSTHOOK: query: insert overwrite table outputTbl1 -SELECT a.key, a.values +SELECT a.key, a.vals FROM ( - SELECT key, count(1) as values from inputTbl1 group by key + SELECT key, count(1) as vals from inputTbl1 group by key UNION ALL - SELECT key, count(1) as values from inputTbl1 group by key + SELECT key, count(1) as vals from inputTbl1 group by key ) a POSTHOOK: type: QUERY POSTHOOK: Input: default@inputtbl1 POSTHOOK: Output: default@outputtbl1 POSTHOOK: Lineage: outputtbl1.key EXPRESSION [(inputtbl1)inputtbl1.FieldSchema(name:key, type:string, comment:null), (inputtbl1)inputtbl1.FieldSchema(name:key, type:string, comment:null), ] -POSTHOOK: Lineage: outputtbl1.values EXPRESSION [(inputtbl1)inputtbl1.null, (inputtbl1)inputtbl1.null, ] +POSTHOOK: Lineage: outputtbl1.vals EXPRESSION [(inputtbl1)inputtbl1.null, (inputtbl1)inputtbl1.null, ] PREHOOK: query: desc formatted outputTbl1 PREHOOK: type: DESCTABLE PREHOOK: Input: default@outputtbl1 @@ -187,7 +187,7 @@ POSTHOOK: Input: default@outputtbl1 # col_name data_type comment key string -values bigint +vals bigint # Detailed Table Information Database: default @@ -235,21 +235,21 @@ POSTHOOK: Input: default@outputtbl1 PREHOOK: query: -- filter should be fine explain insert overwrite table outputTbl1 -SELECT a.key, a.values +SELECT a.key, a.vals FROM ( - SELECT key, count(1) as values from inputTbl1 group by key + SELECT key, count(1) as vals from inputTbl1 group by key UNION ALL - SELECT key, count(1) as values from inputTbl1 group by key + SELECT key, count(1) as vals from inputTbl1 group by key ) a where a.key = 7 PREHOOK: type: QUERY POSTHOOK: query: -- filter should be fine explain insert overwrite table outputTbl1 -SELECT a.key, a.values +SELECT a.key, a.vals FROM ( - SELECT key, count(1) as values from inputTbl1 group by key + SELECT key, count(1) as vals from inputTbl1 group by key UNION ALL - SELECT key, count(1) as values from inputTbl1 group by key + SELECT key, count(1) as vals from inputTbl1 group by key ) a where a.key = 7 POSTHOOK: type: QUERY STAGE DEPENDENCIES: @@ -351,27 +351,27 @@ STAGE PLANS: name: default.outputtbl1 PREHOOK: query: insert overwrite table outputTbl1 -SELECT a.key, a.values +SELECT a.key, a.vals FROM ( - SELECT key, count(1) as values from inputTbl1 group by key + SELECT key, count(1) as vals from inputTbl1 group by key UNION ALL - SELECT key, count(1) as values from inputTbl1 group by key + SELECT key, count(1) as vals from inputTbl1 group by key ) a where a.key = 7 PREHOOK: type: QUERY PREHOOK: Input: default@inputtbl1 PREHOOK: Output: default@outputtbl1 POSTHOOK: query: insert overwrite table outputTbl1 -SELECT a.key, a.values +SELECT a.key, a.vals FROM ( - SELECT key, count(1) as values from inputTbl1 group by key + SELECT key, count(1) as vals from inputTbl1 group by key UNION ALL - SELECT key, count(1) as values from inputTbl1 group by key + SELECT key, count(1) as vals from inputTbl1 group by key ) a where a.key = 7 POSTHOOK: type: QUERY POSTHOOK: Input: default@inputtbl1 POSTHOOK: Output: default@outputtbl1 POSTHOOK: Lineage: outputtbl1.key EXPRESSION [(inputtbl1)inputtbl1.FieldSchema(name:key, type:string, comment:null), (inputtbl1)inputtbl1.FieldSchema(name:key, type:string, comment:null), ] -POSTHOOK: Lineage: outputtbl1.values EXPRESSION [(inputtbl1)inputtbl1.null, (inputtbl1)inputtbl1.null, ] +POSTHOOK: Lineage: outputtbl1.vals EXPRESSION [(inputtbl1)inputtbl1.null, (inputtbl1)inputtbl1.null, ] PREHOOK: query: select * from outputTbl1 PREHOOK: type: QUERY PREHOOK: Input: default@outputtbl1 @@ -385,26 +385,26 @@ POSTHOOK: Input: default@outputtbl1 PREHOOK: query: -- filters and sub-queries should be fine explain insert overwrite table outputTbl1 -select key, values from +select key, vals from ( -SELECT a.key + a.key as key, a.values +SELECT a.key + a.key as key, a.vals FROM ( - SELECT key, count(1) as values from inputTbl1 group by key + SELECT key, count(1) as vals from inputTbl1 group by key UNION ALL - SELECT key, count(1) as values from inputTbl1 group by key + SELECT key, count(1) as vals from inputTbl1 group by key ) a ) b where b.key >= 7 PREHOOK: type: QUERY POSTHOOK: query: -- filters and sub-queries should be fine explain insert overwrite table outputTbl1 -select key, values from +select key, vals from ( -SELECT a.key + a.key as key, a.values +SELECT a.key + a.key as key, a.vals FROM ( - SELECT key, count(1) as values from inputTbl1 group by key + SELECT key, count(1) as vals from inputTbl1 group by key UNION ALL - SELECT key, count(1) as values from inputTbl1 group by key + SELECT key, count(1) as vals from inputTbl1 group by key ) a ) b where b.key >= 7 POSTHOOK: type: QUERY @@ -515,38 +515,38 @@ STAGE PLANS: name: default.outputtbl1 PREHOOK: query: insert overwrite table outputTbl1 -select key, values from +select key, vals from ( -SELECT a.key + a.key as key, a.values +SELECT a.key + a.key as key, a.vals FROM ( - SELECT key, count(1) as values from inputTbl1 group by key + SELECT key, count(1) as vals from inputTbl1 group by key UNION ALL - SELECT key, count(1) as values from inputTbl1 group by key + SELECT key, count(1) as vals from inputTbl1 group by key ) a ) b where b.key >= 7 PREHOOK: type: QUERY PREHOOK: Input: default@inputtbl1 PREHOOK: Output: default@outputtbl1 POSTHOOK: query: insert overwrite table outputTbl1 -select key, values from +select key, vals from ( -SELECT a.key + a.key as key, a.values +SELECT a.key + a.key as key, a.vals FROM ( - SELECT key, count(1) as values from inputTbl1 group by key + SELECT key, count(1) as vals from inputTbl1 group by key UNION ALL - SELECT key, count(1) as values from inputTbl1 group by key + SELECT key, count(1) as vals from inputTbl1 group by key ) a ) b where b.key >= 7 POSTHOOK: type: QUERY POSTHOOK: Input: default@inputtbl1 POSTHOOK: Output: default@outputtbl1 POSTHOOK: Lineage: outputtbl1.key EXPRESSION [(inputtbl1)inputtbl1.FieldSchema(name:key, type:string, comment:null), (inputtbl1)inputtbl1.FieldSchema(name:key, type:string, comment:null), ] -POSTHOOK: Lineage: outputtbl1.values EXPRESSION [(inputtbl1)inputtbl1.null, (inputtbl1)inputtbl1.null, ] -PREHOOK: query: select * from outputTbl1 order by key, values +POSTHOOK: Lineage: outputtbl1.vals EXPRESSION [(inputtbl1)inputtbl1.null, (inputtbl1)inputtbl1.null, ] +PREHOOK: query: select * from outputTbl1 order by key, vals PREHOOK: type: QUERY PREHOOK: Input: default@outputtbl1 #### A masked pattern was here #### -POSTHOOK: query: select * from outputTbl1 order by key, values +POSTHOOK: query: select * from outputTbl1 order by key, vals POSTHOOK: type: QUERY POSTHOOK: Input: default@outputtbl1 #### A masked pattern was here #### diff --git a/ql/src/test/results/clientpositive/union_remove_2.q.out b/ql/src/test/results/clientpositive/union_remove_2.q.out index b9e575b..35ca11d 100644 --- a/ql/src/test/results/clientpositive/union_remove_2.q.out +++ b/ql/src/test/results/clientpositive/union_remove_2.q.out @@ -28,11 +28,11 @@ create table inputTbl1(key string, val string) stored as textfile POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@inputTbl1 -PREHOOK: query: create table outputTbl1(key string, values bigint) stored as textfile +PREHOOK: query: create table outputTbl1(key string, vals bigint) stored as textfile PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@outputTbl1 -POSTHOOK: query: create table outputTbl1(key string, values bigint) stored as textfile +POSTHOOK: query: create table outputTbl1(key string, vals bigint) stored as textfile POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@outputTbl1 @@ -48,22 +48,22 @@ PREHOOK: query: explain insert overwrite table outputTbl1 SELECT * FROM ( - SELECT key, count(1) as values from inputTbl1 group by key + SELECT key, count(1) as vals from inputTbl1 group by key UNION ALL - SELECT key, 1 as values from inputTbl1 + SELECT key, 1 as vals from inputTbl1 UNION ALL - SELECT key, 2 as values from inputTbl1 + SELECT key, 2 as vals from inputTbl1 ) a PREHOOK: type: QUERY POSTHOOK: query: explain insert overwrite table outputTbl1 SELECT * FROM ( - SELECT key, count(1) as values from inputTbl1 group by key + SELECT key, count(1) as vals from inputTbl1 group by key UNION ALL - SELECT key, 1 as values from inputTbl1 + SELECT key, 1 as vals from inputTbl1 UNION ALL - SELECT key, 2 as values from inputTbl1 + SELECT key, 2 as vals from inputTbl1 ) a POSTHOOK: type: QUERY STAGE DEPENDENCIES: @@ -162,11 +162,11 @@ STAGE PLANS: PREHOOK: query: insert overwrite table outputTbl1 SELECT * FROM ( - SELECT key, count(1) as values from inputTbl1 group by key + SELECT key, count(1) as vals from inputTbl1 group by key UNION ALL - SELECT key, 1 as values from inputTbl1 + SELECT key, 1 as vals from inputTbl1 UNION ALL - SELECT key, 2 as values from inputTbl1 + SELECT key, 2 as vals from inputTbl1 ) a PREHOOK: type: QUERY PREHOOK: Input: default@inputtbl1 @@ -174,17 +174,17 @@ PREHOOK: Output: default@outputtbl1 POSTHOOK: query: insert overwrite table outputTbl1 SELECT * FROM ( - SELECT key, count(1) as values from inputTbl1 group by key + SELECT key, count(1) as vals from inputTbl1 group by key UNION ALL - SELECT key, 1 as values from inputTbl1 + SELECT key, 1 as vals from inputTbl1 UNION ALL - SELECT key, 2 as values from inputTbl1 + SELECT key, 2 as vals from inputTbl1 ) a POSTHOOK: type: QUERY POSTHOOK: Input: default@inputtbl1 POSTHOOK: Output: default@outputtbl1 POSTHOOK: Lineage: outputtbl1.key EXPRESSION [(inputtbl1)inputtbl1.FieldSchema(name:key, type:string, comment:null), (inputtbl1)inputtbl1.FieldSchema(name:key, type:string, comment:null), (inputtbl1)inputtbl1.FieldSchema(name:key, type:string, comment:null), ] -POSTHOOK: Lineage: outputtbl1.values EXPRESSION [(inputtbl1)inputtbl1.null, ] +POSTHOOK: Lineage: outputtbl1.vals EXPRESSION [(inputtbl1)inputtbl1.null, ] PREHOOK: query: desc formatted outputTbl1 PREHOOK: type: DESCTABLE PREHOOK: Input: default@outputtbl1 @@ -194,7 +194,7 @@ POSTHOOK: Input: default@outputtbl1 # col_name data_type comment key string -values bigint +vals bigint # Detailed Table Information Database: default @@ -221,11 +221,11 @@ Bucket Columns: [] Sort Columns: [] Storage Desc Params: serialization.format 1 -PREHOOK: query: select * from outputTbl1 order by key, values +PREHOOK: query: select * from outputTbl1 order by key, vals PREHOOK: type: QUERY PREHOOK: Input: default@outputtbl1 #### A masked pattern was here #### -POSTHOOK: query: select * from outputTbl1 order by key, values +POSTHOOK: query: select * from outputTbl1 order by key, vals POSTHOOK: type: QUERY POSTHOOK: Input: default@outputtbl1 #### A masked pattern was here #### diff --git a/ql/src/test/results/clientpositive/union_remove_20.q.out b/ql/src/test/results/clientpositive/union_remove_20.q.out index b2819ce..691a0dc 100644 --- a/ql/src/test/results/clientpositive/union_remove_20.q.out +++ b/ql/src/test/results/clientpositive/union_remove_20.q.out @@ -28,11 +28,11 @@ create table inputTbl1(key string, val string) stored as textfile POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@inputTbl1 -PREHOOK: query: create table outputTbl1(values bigint, key string) stored as textfile +PREHOOK: query: create table outputTbl1(vals bigint, key string) stored as textfile PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@outputTbl1 -POSTHOOK: query: create table outputTbl1(values bigint, key string) stored as textfile +POSTHOOK: query: create table outputTbl1(vals bigint, key string) stored as textfile POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@outputTbl1 @@ -46,20 +46,20 @@ POSTHOOK: type: LOAD POSTHOOK: Output: default@inputtbl1 PREHOOK: query: explain insert overwrite table outputTbl1 -SELECT a.values, a.key +SELECT a.vals, a.key FROM ( - SELECT key, count(1) as values from inputTbl1 group by key + SELECT key, count(1) as vals from inputTbl1 group by key UNION ALL - SELECT key, count(1) as values from inputTbl1 group by key + SELECT key, count(1) as vals from inputTbl1 group by key ) a PREHOOK: type: QUERY POSTHOOK: query: explain insert overwrite table outputTbl1 -SELECT a.values, a.key +SELECT a.vals, a.key FROM ( - SELECT key, count(1) as values from inputTbl1 group by key + SELECT key, count(1) as vals from inputTbl1 group by key UNION ALL - SELECT key, count(1) as values from inputTbl1 group by key + SELECT key, count(1) as vals from inputTbl1 group by key ) a POSTHOOK: type: QUERY STAGE DEPENDENCIES: @@ -163,27 +163,27 @@ STAGE PLANS: name: default.outputtbl1 PREHOOK: query: insert overwrite table outputTbl1 -SELECT a.values, a.key +SELECT a.vals, a.key FROM ( - SELECT key, count(1) as values from inputTbl1 group by key + SELECT key, count(1) as vals from inputTbl1 group by key UNION ALL - SELECT key, count(1) as values from inputTbl1 group by key + SELECT key, count(1) as vals from inputTbl1 group by key ) a PREHOOK: type: QUERY PREHOOK: Input: default@inputtbl1 PREHOOK: Output: default@outputtbl1 POSTHOOK: query: insert overwrite table outputTbl1 -SELECT a.values, a.key +SELECT a.vals, a.key FROM ( - SELECT key, count(1) as values from inputTbl1 group by key + SELECT key, count(1) as vals from inputTbl1 group by key UNION ALL - SELECT key, count(1) as values from inputTbl1 group by key + SELECT key, count(1) as vals from inputTbl1 group by key ) a POSTHOOK: type: QUERY POSTHOOK: Input: default@inputtbl1 POSTHOOK: Output: default@outputtbl1 POSTHOOK: Lineage: outputtbl1.key EXPRESSION [(inputtbl1)inputtbl1.FieldSchema(name:key, type:string, comment:null), (inputtbl1)inputtbl1.FieldSchema(name:key, type:string, comment:null), ] -POSTHOOK: Lineage: outputtbl1.values EXPRESSION [(inputtbl1)inputtbl1.null, (inputtbl1)inputtbl1.null, ] +POSTHOOK: Lineage: outputtbl1.vals EXPRESSION [(inputtbl1)inputtbl1.null, (inputtbl1)inputtbl1.null, ] PREHOOK: query: desc formatted outputTbl1 PREHOOK: type: DESCTABLE PREHOOK: Input: default@outputtbl1 @@ -192,7 +192,7 @@ POSTHOOK: type: DESCTABLE POSTHOOK: Input: default@outputtbl1 # col_name data_type comment -values bigint +vals bigint key string # Detailed Table Information @@ -220,11 +220,11 @@ Bucket Columns: [] Sort Columns: [] Storage Desc Params: serialization.format 1 -PREHOOK: query: select * from outputTbl1 order by key, values +PREHOOK: query: select * from outputTbl1 order by key, vals PREHOOK: type: QUERY PREHOOK: Input: default@outputtbl1 #### A masked pattern was here #### -POSTHOOK: query: select * from outputTbl1 order by key, values +POSTHOOK: query: select * from outputTbl1 order by key, vals POSTHOOK: type: QUERY POSTHOOK: Input: default@outputtbl1 #### A masked pattern was here #### diff --git a/ql/src/test/results/clientpositive/union_remove_21.q.out b/ql/src/test/results/clientpositive/union_remove_21.q.out index ac20851..bf6de20 100644 --- a/ql/src/test/results/clientpositive/union_remove_21.q.out +++ b/ql/src/test/results/clientpositive/union_remove_21.q.out @@ -48,18 +48,18 @@ PREHOOK: query: explain insert overwrite table outputTbl1 SELECT a.key FROM ( - SELECT key, count(1) as values from inputTbl1 group by key + SELECT key, count(1) as vals from inputTbl1 group by key UNION ALL - SELECT key, count(1) as values from inputTbl1 group by key + SELECT key, count(1) as vals from inputTbl1 group by key ) a PREHOOK: type: QUERY POSTHOOK: query: explain insert overwrite table outputTbl1 SELECT a.key FROM ( - SELECT key, count(1) as values from inputTbl1 group by key + SELECT key, count(1) as vals from inputTbl1 group by key UNION ALL - SELECT key, count(1) as values from inputTbl1 group by key + SELECT key, count(1) as vals from inputTbl1 group by key ) a POSTHOOK: type: QUERY STAGE DEPENDENCIES: @@ -151,9 +151,9 @@ STAGE PLANS: PREHOOK: query: insert overwrite table outputTbl1 SELECT a.key FROM ( - SELECT key, count(1) as values from inputTbl1 group by key + SELECT key, count(1) as vals from inputTbl1 group by key UNION ALL - SELECT key, count(1) as values from inputTbl1 group by key + SELECT key, count(1) as vals from inputTbl1 group by key ) a PREHOOK: type: QUERY PREHOOK: Input: default@inputtbl1 @@ -161,9 +161,9 @@ PREHOOK: Output: default@outputtbl1 POSTHOOK: query: insert overwrite table outputTbl1 SELECT a.key FROM ( - SELECT key, count(1) as values from inputTbl1 group by key + SELECT key, count(1) as vals from inputTbl1 group by key UNION ALL - SELECT key, count(1) as values from inputTbl1 group by key + SELECT key, count(1) as vals from inputTbl1 group by key ) a POSTHOOK: type: QUERY POSTHOOK: Input: default@inputtbl1 diff --git a/ql/src/test/results/clientpositive/union_remove_22.q.out b/ql/src/test/results/clientpositive/union_remove_22.q.out index a8257d0..c6a6484 100644 --- a/ql/src/test/results/clientpositive/union_remove_22.q.out +++ b/ql/src/test/results/clientpositive/union_remove_22.q.out @@ -26,11 +26,11 @@ create table inputTbl1(key string, val string) stored as textfile POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@inputTbl1 -PREHOOK: query: create table outputTbl1(key string, values bigint, values2 bigint) stored as textfile +PREHOOK: query: create table outputTbl1(key string, vals bigint, vals2 bigint) stored as textfile PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@outputTbl1 -POSTHOOK: query: create table outputTbl1(key string, values bigint, values2 bigint) stored as textfile +POSTHOOK: query: create table outputTbl1(key string, vals bigint, vals2 bigint) stored as textfile POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@outputTbl1 @@ -44,20 +44,20 @@ POSTHOOK: type: LOAD POSTHOOK: Output: default@inputtbl1 PREHOOK: query: explain insert overwrite table outputTbl1 -SELECT a.key, a.values, a.values +SELECT a.key, a.vals, a.vals FROM ( - SELECT key, count(1) as values from inputTbl1 group by key + SELECT key, count(1) as vals from inputTbl1 group by key UNION ALL - SELECT key, count(1) as values from inputTbl1 group by key + SELECT key, count(1) as vals from inputTbl1 group by key ) a PREHOOK: type: QUERY POSTHOOK: query: explain insert overwrite table outputTbl1 -SELECT a.key, a.values, a.values +SELECT a.key, a.vals, a.vals FROM ( - SELECT key, count(1) as values from inputTbl1 group by key + SELECT key, count(1) as vals from inputTbl1 group by key UNION ALL - SELECT key, count(1) as values from inputTbl1 group by key + SELECT key, count(1) as vals from inputTbl1 group by key ) a POSTHOOK: type: QUERY STAGE DEPENDENCIES: @@ -161,28 +161,28 @@ STAGE PLANS: name: default.outputtbl1 PREHOOK: query: insert overwrite table outputTbl1 -SELECT a.key, a.values, a.values +SELECT a.key, a.vals, a.vals FROM ( - SELECT key, count(1) as values from inputTbl1 group by key + SELECT key, count(1) as vals from inputTbl1 group by key UNION ALL - SELECT key, count(1) as values from inputTbl1 group by key + SELECT key, count(1) as vals from inputTbl1 group by key ) a PREHOOK: type: QUERY PREHOOK: Input: default@inputtbl1 PREHOOK: Output: default@outputtbl1 POSTHOOK: query: insert overwrite table outputTbl1 -SELECT a.key, a.values, a.values +SELECT a.key, a.vals, a.vals FROM ( - SELECT key, count(1) as values from inputTbl1 group by key + SELECT key, count(1) as vals from inputTbl1 group by key UNION ALL - SELECT key, count(1) as values from inputTbl1 group by key + SELECT key, count(1) as vals from inputTbl1 group by key ) a POSTHOOK: type: QUERY POSTHOOK: Input: default@inputtbl1 POSTHOOK: Output: default@outputtbl1 POSTHOOK: Lineage: outputtbl1.key EXPRESSION [(inputtbl1)inputtbl1.FieldSchema(name:key, type:string, comment:null), (inputtbl1)inputtbl1.FieldSchema(name:key, type:string, comment:null), ] -POSTHOOK: Lineage: outputtbl1.values EXPRESSION [(inputtbl1)inputtbl1.null, (inputtbl1)inputtbl1.null, ] -POSTHOOK: Lineage: outputtbl1.values2 EXPRESSION [(inputtbl1)inputtbl1.null, (inputtbl1)inputtbl1.null, ] +POSTHOOK: Lineage: outputtbl1.vals EXPRESSION [(inputtbl1)inputtbl1.null, (inputtbl1)inputtbl1.null, ] +POSTHOOK: Lineage: outputtbl1.vals2 EXPRESSION [(inputtbl1)inputtbl1.null, (inputtbl1)inputtbl1.null, ] PREHOOK: query: desc formatted outputTbl1 PREHOOK: type: DESCTABLE PREHOOK: Input: default@outputtbl1 @@ -192,8 +192,8 @@ POSTHOOK: Input: default@outputtbl1 # col_name data_type comment key string -values bigint -values2 bigint +vals bigint +vals2 bigint # Detailed Table Information Database: default @@ -240,20 +240,20 @@ POSTHOOK: Input: default@outputtbl1 8 2 2 PREHOOK: query: explain insert overwrite table outputTbl1 -SELECT a.key, concat(a.values, a.values), concat(a.values, a.values) +SELECT a.key, concat(a.vals, a.vals), concat(a.vals, a.vals) FROM ( - SELECT key, count(1) as values from inputTbl1 group by key + SELECT key, count(1) as vals from inputTbl1 group by key UNION ALL - SELECT key, count(1) as values from inputTbl1 group by key + SELECT key, count(1) as vals from inputTbl1 group by key ) a PREHOOK: type: QUERY POSTHOOK: query: explain insert overwrite table outputTbl1 -SELECT a.key, concat(a.values, a.values), concat(a.values, a.values) +SELECT a.key, concat(a.vals, a.vals), concat(a.vals, a.vals) FROM ( - SELECT key, count(1) as values from inputTbl1 group by key + SELECT key, count(1) as vals from inputTbl1 group by key UNION ALL - SELECT key, count(1) as values from inputTbl1 group by key + SELECT key, count(1) as vals from inputTbl1 group by key ) a POSTHOOK: type: QUERY STAGE DEPENDENCIES: @@ -357,33 +357,33 @@ STAGE PLANS: name: default.outputtbl1 PREHOOK: query: insert overwrite table outputTbl1 -SELECT a.key, concat(a.values, a.values), concat(a.values, a.values) +SELECT a.key, concat(a.vals, a.vals), concat(a.vals, a.vals) FROM ( - SELECT key, count(1) as values from inputTbl1 group by key + SELECT key, count(1) as vals from inputTbl1 group by key UNION ALL - SELECT key, count(1) as values from inputTbl1 group by key + SELECT key, count(1) as vals from inputTbl1 group by key ) a PREHOOK: type: QUERY PREHOOK: Input: default@inputtbl1 PREHOOK: Output: default@outputtbl1 POSTHOOK: query: insert overwrite table outputTbl1 -SELECT a.key, concat(a.values, a.values), concat(a.values, a.values) +SELECT a.key, concat(a.vals, a.vals), concat(a.vals, a.vals) FROM ( - SELECT key, count(1) as values from inputTbl1 group by key + SELECT key, count(1) as vals from inputTbl1 group by key UNION ALL - SELECT key, count(1) as values from inputTbl1 group by key + SELECT key, count(1) as vals from inputTbl1 group by key ) a POSTHOOK: type: QUERY POSTHOOK: Input: default@inputtbl1 POSTHOOK: Output: default@outputtbl1 POSTHOOK: Lineage: outputtbl1.key EXPRESSION [(inputtbl1)inputtbl1.FieldSchema(name:key, type:string, comment:null), (inputtbl1)inputtbl1.FieldSchema(name:key, type:string, comment:null), ] -POSTHOOK: Lineage: outputtbl1.values EXPRESSION [(inputtbl1)inputtbl1.null, (inputtbl1)inputtbl1.null, ] -POSTHOOK: Lineage: outputtbl1.values2 EXPRESSION [(inputtbl1)inputtbl1.null, (inputtbl1)inputtbl1.null, ] -PREHOOK: query: select * from outputTbl1 order by key, values +POSTHOOK: Lineage: outputtbl1.vals EXPRESSION [(inputtbl1)inputtbl1.null, (inputtbl1)inputtbl1.null, ] +POSTHOOK: Lineage: outputtbl1.vals2 EXPRESSION [(inputtbl1)inputtbl1.null, (inputtbl1)inputtbl1.null, ] +PREHOOK: query: select * from outputTbl1 order by key, vals PREHOOK: type: QUERY PREHOOK: Input: default@outputtbl1 #### A masked pattern was here #### -POSTHOOK: query: select * from outputTbl1 order by key, values +POSTHOOK: query: select * from outputTbl1 order by key, vals POSTHOOK: type: QUERY POSTHOOK: Input: default@outputtbl1 #### A masked pattern was here #### diff --git a/ql/src/test/results/clientpositive/union_remove_23.q.out b/ql/src/test/results/clientpositive/union_remove_23.q.out index a2ed7f3..2e573fe 100644 --- a/ql/src/test/results/clientpositive/union_remove_23.q.out +++ b/ql/src/test/results/clientpositive/union_remove_23.q.out @@ -28,11 +28,11 @@ create table inputTbl1(key string, val string) stored as textfile POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@inputTbl1 -PREHOOK: query: create table outputTbl1(key string, values bigint) stored as textfile +PREHOOK: query: create table outputTbl1(key string, vals bigint) stored as textfile PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@outputTbl1 -POSTHOOK: query: create table outputTbl1(key string, values bigint) stored as textfile +POSTHOOK: query: create table outputTbl1(key string, vals bigint) stored as textfile POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@outputTbl1 @@ -48,20 +48,20 @@ PREHOOK: query: explain insert overwrite table outputTbl1 SELECT * FROM ( - SELECT key, count(1) as values from + SELECT key, count(1) as vals from (SELECT a.key, b.val from inputTbl1 a join inputTbl1 b on a.key=b.key) subq group by key UNION ALL - SELECT key, count(1) as values from inputTbl1 group by key + SELECT key, count(1) as vals from inputTbl1 group by key ) subq2 PREHOOK: type: QUERY POSTHOOK: query: explain insert overwrite table outputTbl1 SELECT * FROM ( - SELECT key, count(1) as values from + SELECT key, count(1) as vals from (SELECT a.key, b.val from inputTbl1 a join inputTbl1 b on a.key=b.key) subq group by key UNION ALL - SELECT key, count(1) as values from inputTbl1 group by key + SELECT key, count(1) as vals from inputTbl1 group by key ) subq2 POSTHOOK: type: QUERY STAGE DEPENDENCIES: @@ -195,10 +195,10 @@ STAGE PLANS: PREHOOK: query: insert overwrite table outputTbl1 SELECT * FROM ( - SELECT key, count(1) as values from + SELECT key, count(1) as vals from (SELECT a.key, b.val from inputTbl1 a join inputTbl1 b on a.key=b.key) subq group by key UNION ALL - SELECT key, count(1) as values from inputTbl1 group by key + SELECT key, count(1) as vals from inputTbl1 group by key ) subq2 PREHOOK: type: QUERY PREHOOK: Input: default@inputtbl1 @@ -206,16 +206,16 @@ PREHOOK: Output: default@outputtbl1 POSTHOOK: query: insert overwrite table outputTbl1 SELECT * FROM ( - SELECT key, count(1) as values from + SELECT key, count(1) as vals from (SELECT a.key, b.val from inputTbl1 a join inputTbl1 b on a.key=b.key) subq group by key UNION ALL - SELECT key, count(1) as values from inputTbl1 group by key + SELECT key, count(1) as vals from inputTbl1 group by key ) subq2 POSTHOOK: type: QUERY POSTHOOK: Input: default@inputtbl1 POSTHOOK: Output: default@outputtbl1 POSTHOOK: Lineage: outputtbl1.key EXPRESSION [(inputtbl1)a.FieldSchema(name:key, type:string, comment:null), (inputtbl1)inputtbl1.FieldSchema(name:key, type:string, comment:null), ] -POSTHOOK: Lineage: outputtbl1.values EXPRESSION [(inputtbl1)a.null, (inputtbl1)a.null, (inputtbl1)b.null, (inputtbl1)inputtbl1.null, ] +POSTHOOK: Lineage: outputtbl1.vals EXPRESSION [(inputtbl1)a.null, (inputtbl1)a.null, (inputtbl1)b.null, (inputtbl1)inputtbl1.null, ] PREHOOK: query: desc formatted outputTbl1 PREHOOK: type: DESCTABLE PREHOOK: Input: default@outputtbl1 @@ -225,7 +225,7 @@ POSTHOOK: Input: default@outputtbl1 # col_name data_type comment key string -values bigint +vals bigint # Detailed Table Information Database: default @@ -252,11 +252,11 @@ Bucket Columns: [] Sort Columns: [] Storage Desc Params: serialization.format 1 -PREHOOK: query: select * from outputTbl1 order by key, values +PREHOOK: query: select * from outputTbl1 order by key, vals PREHOOK: type: QUERY PREHOOK: Input: default@outputtbl1 #### A masked pattern was here #### -POSTHOOK: query: select * from outputTbl1 order by key, values +POSTHOOK: query: select * from outputTbl1 order by key, vals POSTHOOK: type: QUERY POSTHOOK: Input: default@outputtbl1 #### A masked pattern was here #### diff --git a/ql/src/test/results/clientpositive/union_remove_24.q.out b/ql/src/test/results/clientpositive/union_remove_24.q.out index d0ac662..c42dd66 100644 --- a/ql/src/test/results/clientpositive/union_remove_24.q.out +++ b/ql/src/test/results/clientpositive/union_remove_24.q.out @@ -24,11 +24,11 @@ create table inputTbl1(key string, val string) stored as textfile POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@inputTbl1 -PREHOOK: query: create table outputTbl1(key double, values bigint) stored as textfile +PREHOOK: query: create table outputTbl1(key double, vals bigint) stored as textfile PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@outputTbl1 -POSTHOOK: query: create table outputTbl1(key double, values bigint) stored as textfile +POSTHOOK: query: create table outputTbl1(key double, vals bigint) stored as textfile POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@outputTbl1 @@ -44,18 +44,18 @@ PREHOOK: query: EXPLAIN INSERT OVERWRITE TABLE outputTbl1 SELECT * FROM ( - SELECT CAST(key AS DOUBLE) AS key, count(1) as values FROM inputTbl1 group by key + SELECT CAST(key AS DOUBLE) AS key, count(1) as vals FROM inputTbl1 group by key UNION ALL - SELECT CAST(key AS BIGINT) AS key, count(1) as values FROM inputTbl1 group by key + SELECT CAST(key AS BIGINT) AS key, count(1) as vals FROM inputTbl1 group by key ) a PREHOOK: type: QUERY POSTHOOK: query: EXPLAIN INSERT OVERWRITE TABLE outputTbl1 SELECT * FROM ( - SELECT CAST(key AS DOUBLE) AS key, count(1) as values FROM inputTbl1 group by key + SELECT CAST(key AS DOUBLE) AS key, count(1) as vals FROM inputTbl1 group by key UNION ALL - SELECT CAST(key AS BIGINT) AS key, count(1) as values FROM inputTbl1 group by key + SELECT CAST(key AS BIGINT) AS key, count(1) as vals FROM inputTbl1 group by key ) a POSTHOOK: type: QUERY STAGE DEPENDENCIES: @@ -161,9 +161,9 @@ STAGE PLANS: PREHOOK: query: INSERT OVERWRITE TABLE outputTbl1 SELECT * FROM ( - SELECT CAST(key AS DOUBLE) AS key, count(1) as values FROM inputTbl1 group by key + SELECT CAST(key AS DOUBLE) AS key, count(1) as vals FROM inputTbl1 group by key UNION ALL - SELECT CAST(key AS BIGINT) AS key, count(1) as values FROM inputTbl1 group by key + SELECT CAST(key AS BIGINT) AS key, count(1) as vals FROM inputTbl1 group by key ) a PREHOOK: type: QUERY PREHOOK: Input: default@inputtbl1 @@ -171,15 +171,15 @@ PREHOOK: Output: default@outputtbl1 POSTHOOK: query: INSERT OVERWRITE TABLE outputTbl1 SELECT * FROM ( - SELECT CAST(key AS DOUBLE) AS key, count(1) as values FROM inputTbl1 group by key + SELECT CAST(key AS DOUBLE) AS key, count(1) as vals FROM inputTbl1 group by key UNION ALL - SELECT CAST(key AS BIGINT) AS key, count(1) as values FROM inputTbl1 group by key + SELECT CAST(key AS BIGINT) AS key, count(1) as vals FROM inputTbl1 group by key ) a POSTHOOK: type: QUERY POSTHOOK: Input: default@inputtbl1 POSTHOOK: Output: default@outputtbl1 POSTHOOK: Lineage: outputtbl1.key EXPRESSION [(inputtbl1)inputtbl1.FieldSchema(name:key, type:string, comment:null), (inputtbl1)inputtbl1.FieldSchema(name:key, type:string, comment:null), ] -POSTHOOK: Lineage: outputtbl1.values EXPRESSION [(inputtbl1)inputtbl1.null, (inputtbl1)inputtbl1.null, ] +POSTHOOK: Lineage: outputtbl1.vals EXPRESSION [(inputtbl1)inputtbl1.null, (inputtbl1)inputtbl1.null, ] PREHOOK: query: desc formatted outputTbl1 PREHOOK: type: DESCTABLE PREHOOK: Input: default@outputtbl1 @@ -189,7 +189,7 @@ POSTHOOK: Input: default@outputtbl1 # col_name data_type comment key double -values bigint +vals bigint # Detailed Table Information Database: default @@ -216,11 +216,11 @@ Bucket Columns: [] Sort Columns: [] Storage Desc Params: serialization.format 1 -PREHOOK: query: select * from outputTbl1 order by key, values +PREHOOK: query: select * from outputTbl1 order by key, vals PREHOOK: type: QUERY PREHOOK: Input: default@outputtbl1 #### A masked pattern was here #### -POSTHOOK: query: select * from outputTbl1 order by key, values +POSTHOOK: query: select * from outputTbl1 order by key, vals POSTHOOK: type: QUERY POSTHOOK: Input: default@outputtbl1 #### A masked pattern was here #### diff --git a/ql/src/test/results/clientpositive/union_remove_25.q.out b/ql/src/test/results/clientpositive/union_remove_25.q.out index 8c93ead..cd58b08 100644 --- a/ql/src/test/results/clientpositive/union_remove_25.q.out +++ b/ql/src/test/results/clientpositive/union_remove_25.q.out @@ -26,27 +26,27 @@ create table inputTbl1(key string, val string) stored as textfile POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@inputTbl1 -PREHOOK: query: create table outputTbl1(key string, values bigint) partitioned by (ds string) stored as textfile +PREHOOK: query: create table outputTbl1(key string, vals bigint) partitioned by (ds string) stored as textfile PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@outputTbl1 -POSTHOOK: query: create table outputTbl1(key string, values bigint) partitioned by (ds string) stored as textfile +POSTHOOK: query: create table outputTbl1(key string, vals bigint) partitioned by (ds string) stored as textfile POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@outputTbl1 -PREHOOK: query: create table outputTbl2(key string, values bigint) partitioned by (ds string) stored as textfile +PREHOOK: query: create table outputTbl2(key string, vals bigint) partitioned by (ds string) stored as textfile PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@outputTbl2 -POSTHOOK: query: create table outputTbl2(key string, values bigint) partitioned by (ds string) stored as textfile +POSTHOOK: query: create table outputTbl2(key string, vals bigint) partitioned by (ds string) stored as textfile POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@outputTbl2 -PREHOOK: query: create table outputTbl3(key string, values bigint) partitioned by (ds string,hr string) stored as textfile +PREHOOK: query: create table outputTbl3(key string, vals bigint) partitioned by (ds string,hr string) stored as textfile PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@outputTbl3 -POSTHOOK: query: create table outputTbl3(key string, values bigint) partitioned by (ds string,hr string) stored as textfile +POSTHOOK: query: create table outputTbl3(key string, vals bigint) partitioned by (ds string,hr string) stored as textfile POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@outputTbl3 @@ -62,18 +62,18 @@ PREHOOK: query: explain insert overwrite table outputTbl1 partition(ds='2004') SELECT * FROM ( - SELECT key, count(1) as values from inputTbl1 group by key + SELECT key, count(1) as vals from inputTbl1 group by key UNION ALL - SELECT key, count(1) as values from inputTbl1 group by key + SELECT key, count(1) as vals from inputTbl1 group by key ) a PREHOOK: type: QUERY POSTHOOK: query: explain insert overwrite table outputTbl1 partition(ds='2004') SELECT * FROM ( - SELECT key, count(1) as values from inputTbl1 group by key + SELECT key, count(1) as vals from inputTbl1 group by key UNION ALL - SELECT key, count(1) as values from inputTbl1 group by key + SELECT key, count(1) as vals from inputTbl1 group by key ) a POSTHOOK: type: QUERY STAGE DEPENDENCIES: @@ -173,9 +173,9 @@ STAGE PLANS: PREHOOK: query: insert overwrite table outputTbl1 partition(ds='2004') SELECT * FROM ( - SELECT key, count(1) as values from inputTbl1 group by key + SELECT key, count(1) as vals from inputTbl1 group by key UNION ALL - SELECT key, count(1) as values from inputTbl1 group by key + SELECT key, count(1) as vals from inputTbl1 group by key ) a PREHOOK: type: QUERY PREHOOK: Input: default@inputtbl1 @@ -183,15 +183,15 @@ PREHOOK: Output: default@outputtbl1@ds=2004 POSTHOOK: query: insert overwrite table outputTbl1 partition(ds='2004') SELECT * FROM ( - SELECT key, count(1) as values from inputTbl1 group by key + SELECT key, count(1) as vals from inputTbl1 group by key UNION ALL - SELECT key, count(1) as values from inputTbl1 group by key + SELECT key, count(1) as vals from inputTbl1 group by key ) a POSTHOOK: type: QUERY POSTHOOK: Input: default@inputtbl1 POSTHOOK: Output: default@outputtbl1@ds=2004 POSTHOOK: Lineage: outputtbl1 PARTITION(ds=2004).key EXPRESSION [(inputtbl1)inputtbl1.FieldSchema(name:key, type:string, comment:null), (inputtbl1)inputtbl1.FieldSchema(name:key, type:string, comment:null), ] -POSTHOOK: Lineage: outputtbl1 PARTITION(ds=2004).values EXPRESSION [(inputtbl1)inputtbl1.null, (inputtbl1)inputtbl1.null, ] +POSTHOOK: Lineage: outputtbl1 PARTITION(ds=2004).vals EXPRESSION [(inputtbl1)inputtbl1.null, (inputtbl1)inputtbl1.null, ] PREHOOK: query: desc formatted outputTbl1 partition(ds='2004') PREHOOK: type: DESCTABLE PREHOOK: Input: default@outputtbl1 @@ -201,7 +201,7 @@ POSTHOOK: Input: default@outputtbl1 # col_name data_type comment key string -values bigint +vals bigint # Partition Information # col_name data_type comment @@ -233,12 +233,12 @@ Bucket Columns: [] Sort Columns: [] Storage Desc Params: serialization.format 1 -PREHOOK: query: select * from outputTbl1 order by key, values +PREHOOK: query: select * from outputTbl1 order by key, vals PREHOOK: type: QUERY PREHOOK: Input: default@outputtbl1 PREHOOK: Input: default@outputtbl1@ds=2004 #### A masked pattern was here #### -POSTHOOK: query: select * from outputTbl1 order by key, values +POSTHOOK: query: select * from outputTbl1 order by key, vals POSTHOOK: type: QUERY POSTHOOK: Input: default@outputtbl1 POSTHOOK: Input: default@outputtbl1@ds=2004 @@ -390,7 +390,7 @@ POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=11 POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=12 POSTHOOK: Output: default@outputtbl2@ds=2008-04-08 POSTHOOK: Lineage: outputtbl2 PARTITION(ds=2008-04-08).key EXPRESSION [(srcpart)srcpart.FieldSchema(name:key, type:string, comment:default), (srcpart)srcpart.FieldSchema(name:key, type:string, comment:default), ] -POSTHOOK: Lineage: outputtbl2 PARTITION(ds=2008-04-08).values EXPRESSION [(srcpart)srcpart.FieldSchema(name:value, type:string, comment:default), (srcpart)srcpart.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: outputtbl2 PARTITION(ds=2008-04-08).vals EXPRESSION [(srcpart)srcpart.FieldSchema(name:value, type:string, comment:default), (srcpart)srcpart.FieldSchema(name:value, type:string, comment:default), ] PREHOOK: query: show partitions outputTbl2 PREHOOK: type: SHOWPARTITIONS PREHOOK: Input: default@outputtbl2 @@ -407,7 +407,7 @@ POSTHOOK: Input: default@outputtbl2 # col_name data_type comment key string -values bigint +vals bigint # Partition Information # col_name data_type comment @@ -576,9 +576,9 @@ POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=12 POSTHOOK: Output: default@outputtbl3@ds=2008-04-08/hr=11 POSTHOOK: Output: default@outputtbl3@ds=2008-04-08/hr=12 POSTHOOK: Lineage: outputtbl3 PARTITION(ds=2008-04-08,hr=11).key EXPRESSION [(srcpart)srcpart.FieldSchema(name:key, type:string, comment:default), (srcpart)srcpart.FieldSchema(name:key, type:string, comment:default), ] -POSTHOOK: Lineage: outputtbl3 PARTITION(ds=2008-04-08,hr=11).values EXPRESSION [(srcpart)srcpart.FieldSchema(name:value, type:string, comment:default), (srcpart)srcpart.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: outputtbl3 PARTITION(ds=2008-04-08,hr=11).vals EXPRESSION [(srcpart)srcpart.FieldSchema(name:value, type:string, comment:default), (srcpart)srcpart.FieldSchema(name:value, type:string, comment:default), ] POSTHOOK: Lineage: outputtbl3 PARTITION(ds=2008-04-08,hr=12).key EXPRESSION [(srcpart)srcpart.FieldSchema(name:key, type:string, comment:default), (srcpart)srcpart.FieldSchema(name:key, type:string, comment:default), ] -POSTHOOK: Lineage: outputtbl3 PARTITION(ds=2008-04-08,hr=12).values EXPRESSION [(srcpart)srcpart.FieldSchema(name:value, type:string, comment:default), (srcpart)srcpart.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: outputtbl3 PARTITION(ds=2008-04-08,hr=12).vals EXPRESSION [(srcpart)srcpart.FieldSchema(name:value, type:string, comment:default), (srcpart)srcpart.FieldSchema(name:value, type:string, comment:default), ] PREHOOK: query: show partitions outputTbl3 PREHOOK: type: SHOWPARTITIONS PREHOOK: Input: default@outputtbl3 @@ -596,7 +596,7 @@ POSTHOOK: Input: default@outputtbl3 # col_name data_type comment key string -values bigint +vals bigint # Partition Information # col_name data_type comment diff --git a/ql/src/test/results/clientpositive/union_remove_3.q.out b/ql/src/test/results/clientpositive/union_remove_3.q.out index e210461..8376364 100644 --- a/ql/src/test/results/clientpositive/union_remove_3.q.out +++ b/ql/src/test/results/clientpositive/union_remove_3.q.out @@ -28,11 +28,11 @@ create table inputTbl1(key string, val string) stored as textfile POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@inputTbl1 -PREHOOK: query: create table outputTbl1(key string, values bigint) stored as textfile +PREHOOK: query: create table outputTbl1(key string, vals bigint) stored as textfile PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@outputTbl1 -POSTHOOK: query: create table outputTbl1(key string, values bigint) stored as textfile +POSTHOOK: query: create table outputTbl1(key string, vals bigint) stored as textfile POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@outputTbl1 @@ -48,22 +48,22 @@ PREHOOK: query: explain insert overwrite table outputTbl1 SELECT * FROM ( - SELECT key, 1 as values from inputTbl1 + SELECT key, 1 as vals from inputTbl1 UNION ALL - SELECT key, 2 as values from inputTbl1 + SELECT key, 2 as vals from inputTbl1 UNION ALL - SELECT key, 3 as values from inputTbl1 + SELECT key, 3 as vals from inputTbl1 ) a PREHOOK: type: QUERY POSTHOOK: query: explain insert overwrite table outputTbl1 SELECT * FROM ( - SELECT key, 1 as values from inputTbl1 + SELECT key, 1 as vals from inputTbl1 UNION ALL - SELECT key, 2 as values from inputTbl1 + SELECT key, 2 as vals from inputTbl1 UNION ALL - SELECT key, 3 as values from inputTbl1 + SELECT key, 3 as vals from inputTbl1 ) a POSTHOOK: type: QUERY STAGE DEPENDENCIES: @@ -151,11 +151,11 @@ STAGE PLANS: PREHOOK: query: insert overwrite table outputTbl1 SELECT * FROM ( - SELECT key, 1 as values from inputTbl1 + SELECT key, 1 as vals from inputTbl1 UNION ALL - SELECT key, 2 as values from inputTbl1 + SELECT key, 2 as vals from inputTbl1 UNION ALL - SELECT key, 3 as values from inputTbl1 + SELECT key, 3 as vals from inputTbl1 ) a PREHOOK: type: QUERY PREHOOK: Input: default@inputtbl1 @@ -163,17 +163,17 @@ PREHOOK: Output: default@outputtbl1 POSTHOOK: query: insert overwrite table outputTbl1 SELECT * FROM ( - SELECT key, 1 as values from inputTbl1 + SELECT key, 1 as vals from inputTbl1 UNION ALL - SELECT key, 2 as values from inputTbl1 + SELECT key, 2 as vals from inputTbl1 UNION ALL - SELECT key, 3 as values from inputTbl1 + SELECT key, 3 as vals from inputTbl1 ) a POSTHOOK: type: QUERY POSTHOOK: Input: default@inputtbl1 POSTHOOK: Output: default@outputtbl1 POSTHOOK: Lineage: outputtbl1.key EXPRESSION [(inputtbl1)inputtbl1.FieldSchema(name:key, type:string, comment:null), (inputtbl1)inputtbl1.FieldSchema(name:key, type:string, comment:null), (inputtbl1)inputtbl1.FieldSchema(name:key, type:string, comment:null), ] -POSTHOOK: Lineage: outputtbl1.values EXPRESSION [] +POSTHOOK: Lineage: outputtbl1.vals EXPRESSION [] PREHOOK: query: desc formatted outputTbl1 PREHOOK: type: DESCTABLE PREHOOK: Input: default@outputtbl1 @@ -183,7 +183,7 @@ POSTHOOK: Input: default@outputtbl1 # col_name data_type comment key string -values bigint +vals bigint # Detailed Table Information Database: default @@ -210,11 +210,11 @@ Bucket Columns: [] Sort Columns: [] Storage Desc Params: serialization.format 1 -PREHOOK: query: select * from outputTbl1 order by key, values +PREHOOK: query: select * from outputTbl1 order by key, vals PREHOOK: type: QUERY PREHOOK: Input: default@outputtbl1 #### A masked pattern was here #### -POSTHOOK: query: select * from outputTbl1 order by key, values +POSTHOOK: query: select * from outputTbl1 order by key, vals POSTHOOK: type: QUERY POSTHOOK: Input: default@outputtbl1 #### A masked pattern was here #### diff --git a/ql/src/test/results/clientpositive/union_remove_4.q.out b/ql/src/test/results/clientpositive/union_remove_4.q.out index 2291a43..f597568 100644 --- a/ql/src/test/results/clientpositive/union_remove_4.q.out +++ b/ql/src/test/results/clientpositive/union_remove_4.q.out @@ -26,11 +26,11 @@ create table inputTbl1(key string, val string) stored as textfile POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@inputTbl1 -PREHOOK: query: create table outputTbl1(key string, values bigint) stored as textfile +PREHOOK: query: create table outputTbl1(key string, vals bigint) stored as textfile PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@outputTbl1 -POSTHOOK: query: create table outputTbl1(key string, values bigint) stored as textfile +POSTHOOK: query: create table outputTbl1(key string, vals bigint) stored as textfile POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@outputTbl1 @@ -46,18 +46,18 @@ PREHOOK: query: explain insert overwrite table outputTbl1 SELECT * FROM ( - SELECT key, count(1) as values from inputTbl1 group by key + SELECT key, count(1) as vals from inputTbl1 group by key UNION ALL - SELECT key, count(1) as values from inputTbl1 group by key + SELECT key, count(1) as vals from inputTbl1 group by key ) a PREHOOK: type: QUERY POSTHOOK: query: explain insert overwrite table outputTbl1 SELECT * FROM ( - SELECT key, count(1) as values from inputTbl1 group by key + SELECT key, count(1) as vals from inputTbl1 group by key UNION ALL - SELECT key, count(1) as values from inputTbl1 group by key + SELECT key, count(1) as vals from inputTbl1 group by key ) a POSTHOOK: type: QUERY STAGE DEPENDENCIES: @@ -199,9 +199,9 @@ STAGE PLANS: PREHOOK: query: insert overwrite table outputTbl1 SELECT * FROM ( - SELECT key, count(1) as values from inputTbl1 group by key + SELECT key, count(1) as vals from inputTbl1 group by key UNION ALL - SELECT key, count(1) as values from inputTbl1 group by key + SELECT key, count(1) as vals from inputTbl1 group by key ) a PREHOOK: type: QUERY PREHOOK: Input: default@inputtbl1 @@ -209,15 +209,15 @@ PREHOOK: Output: default@outputtbl1 POSTHOOK: query: insert overwrite table outputTbl1 SELECT * FROM ( - SELECT key, count(1) as values from inputTbl1 group by key + SELECT key, count(1) as vals from inputTbl1 group by key UNION ALL - SELECT key, count(1) as values from inputTbl1 group by key + SELECT key, count(1) as vals from inputTbl1 group by key ) a POSTHOOK: type: QUERY POSTHOOK: Input: default@inputtbl1 POSTHOOK: Output: default@outputtbl1 POSTHOOK: Lineage: outputtbl1.key EXPRESSION [(inputtbl1)inputtbl1.FieldSchema(name:key, type:string, comment:null), (inputtbl1)inputtbl1.FieldSchema(name:key, type:string, comment:null), ] -POSTHOOK: Lineage: outputtbl1.values EXPRESSION [(inputtbl1)inputtbl1.null, (inputtbl1)inputtbl1.null, ] +POSTHOOK: Lineage: outputtbl1.vals EXPRESSION [(inputtbl1)inputtbl1.null, (inputtbl1)inputtbl1.null, ] PREHOOK: query: desc formatted outputTbl1 PREHOOK: type: DESCTABLE PREHOOK: Input: default@outputtbl1 @@ -227,7 +227,7 @@ POSTHOOK: Input: default@outputtbl1 # col_name data_type comment key string -values bigint +vals bigint # Detailed Table Information Database: default @@ -254,11 +254,11 @@ Bucket Columns: [] Sort Columns: [] Storage Desc Params: serialization.format 1 -PREHOOK: query: select * from outputTbl1 order by key, values +PREHOOK: query: select * from outputTbl1 order by key, vals PREHOOK: type: QUERY PREHOOK: Input: default@outputtbl1 #### A masked pattern was here #### -POSTHOOK: query: select * from outputTbl1 order by key, values +POSTHOOK: query: select * from outputTbl1 order by key, vals POSTHOOK: type: QUERY POSTHOOK: Input: default@outputtbl1 #### A masked pattern was here #### diff --git a/ql/src/test/results/clientpositive/union_remove_5.q.out b/ql/src/test/results/clientpositive/union_remove_5.q.out index 26179f5..cfdb36c 100644 --- a/ql/src/test/results/clientpositive/union_remove_5.q.out +++ b/ql/src/test/results/clientpositive/union_remove_5.q.out @@ -30,11 +30,11 @@ create table inputTbl1(key string, val string) stored as textfile POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@inputTbl1 -PREHOOK: query: create table outputTbl1(key string, values bigint) stored as textfile +PREHOOK: query: create table outputTbl1(key string, vals bigint) stored as textfile PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@outputTbl1 -POSTHOOK: query: create table outputTbl1(key string, values bigint) stored as textfile +POSTHOOK: query: create table outputTbl1(key string, vals bigint) stored as textfile POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@outputTbl1 @@ -50,22 +50,22 @@ PREHOOK: query: explain insert overwrite table outputTbl1 SELECT * FROM ( - SELECT key, count(1) as values from inputTbl1 group by key + SELECT key, count(1) as vals from inputTbl1 group by key UNION ALL - SELECT key, 1 as values from inputTbl1 + SELECT key, 1 as vals from inputTbl1 UNION ALL - SELECT key, 2 as values from inputTbl1 + SELECT key, 2 as vals from inputTbl1 ) a PREHOOK: type: QUERY POSTHOOK: query: explain insert overwrite table outputTbl1 SELECT * FROM ( - SELECT key, count(1) as values from inputTbl1 group by key + SELECT key, count(1) as vals from inputTbl1 group by key UNION ALL - SELECT key, 1 as values from inputTbl1 + SELECT key, 1 as vals from inputTbl1 UNION ALL - SELECT key, 2 as values from inputTbl1 + SELECT key, 2 as vals from inputTbl1 ) a POSTHOOK: type: QUERY STAGE DEPENDENCIES: @@ -208,11 +208,11 @@ STAGE PLANS: PREHOOK: query: insert overwrite table outputTbl1 SELECT * FROM ( - SELECT key, count(1) as values from inputTbl1 group by key + SELECT key, count(1) as vals from inputTbl1 group by key UNION ALL - SELECT key, 1 as values from inputTbl1 + SELECT key, 1 as vals from inputTbl1 UNION ALL - SELECT key, 2 as values from inputTbl1 + SELECT key, 2 as vals from inputTbl1 ) a PREHOOK: type: QUERY PREHOOK: Input: default@inputtbl1 @@ -220,17 +220,17 @@ PREHOOK: Output: default@outputtbl1 POSTHOOK: query: insert overwrite table outputTbl1 SELECT * FROM ( - SELECT key, count(1) as values from inputTbl1 group by key + SELECT key, count(1) as vals from inputTbl1 group by key UNION ALL - SELECT key, 1 as values from inputTbl1 + SELECT key, 1 as vals from inputTbl1 UNION ALL - SELECT key, 2 as values from inputTbl1 + SELECT key, 2 as vals from inputTbl1 ) a POSTHOOK: type: QUERY POSTHOOK: Input: default@inputtbl1 POSTHOOK: Output: default@outputtbl1 POSTHOOK: Lineage: outputtbl1.key EXPRESSION [(inputtbl1)inputtbl1.FieldSchema(name:key, type:string, comment:null), (inputtbl1)inputtbl1.FieldSchema(name:key, type:string, comment:null), (inputtbl1)inputtbl1.FieldSchema(name:key, type:string, comment:null), ] -POSTHOOK: Lineage: outputtbl1.values EXPRESSION [(inputtbl1)inputtbl1.null, ] +POSTHOOK: Lineage: outputtbl1.vals EXPRESSION [(inputtbl1)inputtbl1.null, ] PREHOOK: query: desc formatted outputTbl1 PREHOOK: type: DESCTABLE PREHOOK: Input: default@outputtbl1 @@ -240,7 +240,7 @@ POSTHOOK: Input: default@outputtbl1 # col_name data_type comment key string -values bigint +vals bigint # Detailed Table Information Database: default @@ -267,11 +267,11 @@ Bucket Columns: [] Sort Columns: [] Storage Desc Params: serialization.format 1 -PREHOOK: query: select * from outputTbl1 order by key, values +PREHOOK: query: select * from outputTbl1 order by key, vals PREHOOK: type: QUERY PREHOOK: Input: default@outputtbl1 #### A masked pattern was here #### -POSTHOOK: query: select * from outputTbl1 order by key, values +POSTHOOK: query: select * from outputTbl1 order by key, vals POSTHOOK: type: QUERY POSTHOOK: Input: default@outputtbl1 #### A masked pattern was here #### diff --git a/ql/src/test/results/clientpositive/union_remove_6.q.out b/ql/src/test/results/clientpositive/union_remove_6.q.out index ff23e2d..1e3fd73 100644 --- a/ql/src/test/results/clientpositive/union_remove_6.q.out +++ b/ql/src/test/results/clientpositive/union_remove_6.q.out @@ -20,19 +20,19 @@ create table inputTbl1(key string, val string) stored as textfile POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@inputTbl1 -PREHOOK: query: create table outputTbl1(key string, values bigint) stored as textfile +PREHOOK: query: create table outputTbl1(key string, vals bigint) stored as textfile PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@outputTbl1 -POSTHOOK: query: create table outputTbl1(key string, values bigint) stored as textfile +POSTHOOK: query: create table outputTbl1(key string, vals bigint) stored as textfile POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@outputTbl1 -PREHOOK: query: create table outputTbl2(key string, values bigint) stored as textfile +PREHOOK: query: create table outputTbl2(key string, vals bigint) stored as textfile PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@outputTbl2 -POSTHOOK: query: create table outputTbl2(key string, values bigint) stored as textfile +POSTHOOK: query: create table outputTbl2(key string, vals bigint) stored as textfile POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@outputTbl2 @@ -46,18 +46,18 @@ POSTHOOK: type: LOAD POSTHOOK: Output: default@inputtbl1 PREHOOK: query: explain FROM ( - SELECT key, count(1) as values from inputTbl1 group by key + SELECT key, count(1) as vals from inputTbl1 group by key UNION ALL - SELECT key, count(1) as values from inputTbl1 group by key + SELECT key, count(1) as vals from inputTbl1 group by key ) a insert overwrite table outputTbl1 select * insert overwrite table outputTbl2 select * PREHOOK: type: QUERY POSTHOOK: query: explain FROM ( - SELECT key, count(1) as values from inputTbl1 group by key + SELECT key, count(1) as vals from inputTbl1 group by key UNION ALL - SELECT key, count(1) as values from inputTbl1 group by key + SELECT key, count(1) as vals from inputTbl1 group by key ) a insert overwrite table outputTbl1 select * insert overwrite table outputTbl2 select * @@ -205,9 +205,9 @@ STAGE PLANS: serde: org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe PREHOOK: query: FROM ( - SELECT key, count(1) as values from inputTbl1 group by key + SELECT key, count(1) as vals from inputTbl1 group by key UNION ALL - SELECT key, count(1) as values from inputTbl1 group by key + SELECT key, count(1) as vals from inputTbl1 group by key ) a insert overwrite table outputTbl1 select * insert overwrite table outputTbl2 select * @@ -216,9 +216,9 @@ PREHOOK: Input: default@inputtbl1 PREHOOK: Output: default@outputtbl1 PREHOOK: Output: default@outputtbl2 POSTHOOK: query: FROM ( - SELECT key, count(1) as values from inputTbl1 group by key + SELECT key, count(1) as vals from inputTbl1 group by key UNION ALL - SELECT key, count(1) as values from inputTbl1 group by key + SELECT key, count(1) as vals from inputTbl1 group by key ) a insert overwrite table outputTbl1 select * insert overwrite table outputTbl2 select * @@ -227,14 +227,14 @@ POSTHOOK: Input: default@inputtbl1 POSTHOOK: Output: default@outputtbl1 POSTHOOK: Output: default@outputtbl2 POSTHOOK: Lineage: outputtbl1.key EXPRESSION [(inputtbl1)inputtbl1.FieldSchema(name:key, type:string, comment:null), (inputtbl1)inputtbl1.FieldSchema(name:key, type:string, comment:null), ] -POSTHOOK: Lineage: outputtbl1.values EXPRESSION [(inputtbl1)inputtbl1.null, (inputtbl1)inputtbl1.null, ] +POSTHOOK: Lineage: outputtbl1.vals EXPRESSION [(inputtbl1)inputtbl1.null, (inputtbl1)inputtbl1.null, ] POSTHOOK: Lineage: outputtbl2.key EXPRESSION [(inputtbl1)inputtbl1.FieldSchema(name:key, type:string, comment:null), (inputtbl1)inputtbl1.FieldSchema(name:key, type:string, comment:null), ] -POSTHOOK: Lineage: outputtbl2.values EXPRESSION [(inputtbl1)inputtbl1.null, (inputtbl1)inputtbl1.null, ] -PREHOOK: query: select * from outputTbl1 order by key, values +POSTHOOK: Lineage: outputtbl2.vals EXPRESSION [(inputtbl1)inputtbl1.null, (inputtbl1)inputtbl1.null, ] +PREHOOK: query: select * from outputTbl1 order by key, vals PREHOOK: type: QUERY PREHOOK: Input: default@outputtbl1 #### A masked pattern was here #### -POSTHOOK: query: select * from outputTbl1 order by key, values +POSTHOOK: query: select * from outputTbl1 order by key, vals POSTHOOK: type: QUERY POSTHOOK: Input: default@outputtbl1 #### A masked pattern was here #### @@ -248,11 +248,11 @@ POSTHOOK: Input: default@outputtbl1 7 1 8 2 8 2 -PREHOOK: query: select * from outputTbl2 order by key, values +PREHOOK: query: select * from outputTbl2 order by key, vals PREHOOK: type: QUERY PREHOOK: Input: default@outputtbl2 #### A masked pattern was here #### -POSTHOOK: query: select * from outputTbl2 order by key, values +POSTHOOK: query: select * from outputTbl2 order by key, vals POSTHOOK: type: QUERY POSTHOOK: Input: default@outputtbl2 #### A masked pattern was here #### diff --git a/ql/src/test/results/clientpositive/union_remove_6_subq.q.out b/ql/src/test/results/clientpositive/union_remove_6_subq.q.out index be15c1f..2d94424 100644 --- a/ql/src/test/results/clientpositive/union_remove_6_subq.q.out +++ b/ql/src/test/results/clientpositive/union_remove_6_subq.q.out @@ -20,19 +20,19 @@ create table inputTbl1(key string, val string) stored as textfile POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@inputTbl1 -PREHOOK: query: create table outputTbl1(key string, values bigint) stored as textfile +PREHOOK: query: create table outputTbl1(key string, vals bigint) stored as textfile PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@outputTbl1 -POSTHOOK: query: create table outputTbl1(key string, values bigint) stored as textfile +POSTHOOK: query: create table outputTbl1(key string, vals bigint) stored as textfile POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@outputTbl1 -PREHOOK: query: create table outputTbl2(key string, values bigint) stored as textfile +PREHOOK: query: create table outputTbl2(key string, vals bigint) stored as textfile PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@outputTbl2 -POSTHOOK: query: create table outputTbl2(key string, values bigint) stored as textfile +POSTHOOK: query: create table outputTbl2(key string, vals bigint) stored as textfile POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@outputTbl2 @@ -47,9 +47,9 @@ POSTHOOK: Output: default@inputtbl1 PREHOOK: query: explain FROM ( select * from( - SELECT key, count(1) as values from inputTbl1 group by key + SELECT key, count(1) as vals from inputTbl1 group by key UNION ALL - SELECT key, count(1) as values from inputTbl1 group by key + SELECT key, count(1) as vals from inputTbl1 group by key )subq ) a insert overwrite table outputTbl1 select * @@ -58,9 +58,9 @@ PREHOOK: type: QUERY POSTHOOK: query: explain FROM ( select * from( - SELECT key, count(1) as values from inputTbl1 group by key + SELECT key, count(1) as vals from inputTbl1 group by key UNION ALL - SELECT key, count(1) as values from inputTbl1 group by key + SELECT key, count(1) as vals from inputTbl1 group by key )subq ) a insert overwrite table outputTbl1 select * @@ -210,9 +210,9 @@ STAGE PLANS: PREHOOK: query: FROM ( select * from( - SELECT key, count(1) as values from inputTbl1 group by key + SELECT key, count(1) as vals from inputTbl1 group by key UNION ALL - SELECT key, count(1) as values from inputTbl1 group by key + SELECT key, count(1) as vals from inputTbl1 group by key )subq ) a insert overwrite table outputTbl1 select * @@ -223,9 +223,9 @@ PREHOOK: Output: default@outputtbl1 PREHOOK: Output: default@outputtbl2 POSTHOOK: query: FROM ( select * from( - SELECT key, count(1) as values from inputTbl1 group by key + SELECT key, count(1) as vals from inputTbl1 group by key UNION ALL - SELECT key, count(1) as values from inputTbl1 group by key + SELECT key, count(1) as vals from inputTbl1 group by key )subq ) a insert overwrite table outputTbl1 select * @@ -235,14 +235,14 @@ POSTHOOK: Input: default@inputtbl1 POSTHOOK: Output: default@outputtbl1 POSTHOOK: Output: default@outputtbl2 POSTHOOK: Lineage: outputtbl1.key EXPRESSION [(inputtbl1)inputtbl1.FieldSchema(name:key, type:string, comment:null), (inputtbl1)inputtbl1.FieldSchema(name:key, type:string, comment:null), ] -POSTHOOK: Lineage: outputtbl1.values EXPRESSION [(inputtbl1)inputtbl1.null, (inputtbl1)inputtbl1.null, ] +POSTHOOK: Lineage: outputtbl1.vals EXPRESSION [(inputtbl1)inputtbl1.null, (inputtbl1)inputtbl1.null, ] POSTHOOK: Lineage: outputtbl2.key EXPRESSION [(inputtbl1)inputtbl1.FieldSchema(name:key, type:string, comment:null), (inputtbl1)inputtbl1.FieldSchema(name:key, type:string, comment:null), ] -POSTHOOK: Lineage: outputtbl2.values EXPRESSION [(inputtbl1)inputtbl1.null, (inputtbl1)inputtbl1.null, ] -PREHOOK: query: select * from outputTbl1 order by key, values +POSTHOOK: Lineage: outputtbl2.vals EXPRESSION [(inputtbl1)inputtbl1.null, (inputtbl1)inputtbl1.null, ] +PREHOOK: query: select * from outputTbl1 order by key, vals PREHOOK: type: QUERY PREHOOK: Input: default@outputtbl1 #### A masked pattern was here #### -POSTHOOK: query: select * from outputTbl1 order by key, values +POSTHOOK: query: select * from outputTbl1 order by key, vals POSTHOOK: type: QUERY POSTHOOK: Input: default@outputtbl1 #### A masked pattern was here #### @@ -256,11 +256,11 @@ POSTHOOK: Input: default@outputtbl1 7 1 8 2 8 2 -PREHOOK: query: select * from outputTbl2 order by key, values +PREHOOK: query: select * from outputTbl2 order by key, vals PREHOOK: type: QUERY PREHOOK: Input: default@outputtbl2 #### A masked pattern was here #### -POSTHOOK: query: select * from outputTbl2 order by key, values +POSTHOOK: query: select * from outputTbl2 order by key, vals POSTHOOK: type: QUERY POSTHOOK: Input: default@outputtbl2 #### A masked pattern was here #### diff --git a/ql/src/test/results/clientpositive/union_remove_7.q.out b/ql/src/test/results/clientpositive/union_remove_7.q.out index f0e59cb..3f2b428 100644 --- a/ql/src/test/results/clientpositive/union_remove_7.q.out +++ b/ql/src/test/results/clientpositive/union_remove_7.q.out @@ -30,11 +30,11 @@ create table inputTbl1(key string, val string) stored as textfile POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@inputTbl1 -PREHOOK: query: create table outputTbl1(key string, values bigint) stored as rcfile +PREHOOK: query: create table outputTbl1(key string, vals bigint) stored as rcfile PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@outputTbl1 -POSTHOOK: query: create table outputTbl1(key string, values bigint) stored as rcfile +POSTHOOK: query: create table outputTbl1(key string, vals bigint) stored as rcfile POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@outputTbl1 @@ -50,18 +50,18 @@ PREHOOK: query: explain insert overwrite table outputTbl1 SELECT * FROM ( - SELECT key, count(1) as values from inputTbl1 group by key + SELECT key, count(1) as vals from inputTbl1 group by key UNION ALL - SELECT key, count(1) as values from inputTbl1 group by key + SELECT key, count(1) as vals from inputTbl1 group by key ) a PREHOOK: type: QUERY POSTHOOK: query: explain insert overwrite table outputTbl1 SELECT * FROM ( - SELECT key, count(1) as values from inputTbl1 group by key + SELECT key, count(1) as vals from inputTbl1 group by key UNION ALL - SELECT key, count(1) as values from inputTbl1 group by key + SELECT key, count(1) as vals from inputTbl1 group by key ) a POSTHOOK: type: QUERY STAGE DEPENDENCIES: @@ -159,9 +159,9 @@ STAGE PLANS: PREHOOK: query: insert overwrite table outputTbl1 SELECT * FROM ( - SELECT key, count(1) as values from inputTbl1 group by key + SELECT key, count(1) as vals from inputTbl1 group by key UNION ALL - SELECT key, count(1) as values from inputTbl1 group by key + SELECT key, count(1) as vals from inputTbl1 group by key ) a PREHOOK: type: QUERY PREHOOK: Input: default@inputtbl1 @@ -169,15 +169,15 @@ PREHOOK: Output: default@outputtbl1 POSTHOOK: query: insert overwrite table outputTbl1 SELECT * FROM ( - SELECT key, count(1) as values from inputTbl1 group by key + SELECT key, count(1) as vals from inputTbl1 group by key UNION ALL - SELECT key, count(1) as values from inputTbl1 group by key + SELECT key, count(1) as vals from inputTbl1 group by key ) a POSTHOOK: type: QUERY POSTHOOK: Input: default@inputtbl1 POSTHOOK: Output: default@outputtbl1 POSTHOOK: Lineage: outputtbl1.key EXPRESSION [(inputtbl1)inputtbl1.FieldSchema(name:key, type:string, comment:null), (inputtbl1)inputtbl1.FieldSchema(name:key, type:string, comment:null), ] -POSTHOOK: Lineage: outputtbl1.values EXPRESSION [(inputtbl1)inputtbl1.null, (inputtbl1)inputtbl1.null, ] +POSTHOOK: Lineage: outputtbl1.vals EXPRESSION [(inputtbl1)inputtbl1.null, (inputtbl1)inputtbl1.null, ] PREHOOK: query: desc formatted outputTbl1 PREHOOK: type: DESCTABLE PREHOOK: Input: default@outputtbl1 @@ -187,7 +187,7 @@ POSTHOOK: Input: default@outputtbl1 # col_name data_type comment key string -values bigint +vals bigint # Detailed Table Information Database: default @@ -214,11 +214,11 @@ Bucket Columns: [] Sort Columns: [] Storage Desc Params: serialization.format 1 -PREHOOK: query: select * from outputTbl1 order by key, values +PREHOOK: query: select * from outputTbl1 order by key, vals PREHOOK: type: QUERY PREHOOK: Input: default@outputtbl1 #### A masked pattern was here #### -POSTHOOK: query: select * from outputTbl1 order by key, values +POSTHOOK: query: select * from outputTbl1 order by key, vals POSTHOOK: type: QUERY POSTHOOK: Input: default@outputtbl1 #### A masked pattern was here #### diff --git a/ql/src/test/results/clientpositive/union_remove_8.q.out b/ql/src/test/results/clientpositive/union_remove_8.q.out index 2cba717..6778407 100644 --- a/ql/src/test/results/clientpositive/union_remove_8.q.out +++ b/ql/src/test/results/clientpositive/union_remove_8.q.out @@ -32,11 +32,11 @@ create table inputTbl1(key string, val string) stored as textfile POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@inputTbl1 -PREHOOK: query: create table outputTbl1(key string, values bigint) stored as rcfile +PREHOOK: query: create table outputTbl1(key string, vals bigint) stored as rcfile PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@outputTbl1 -POSTHOOK: query: create table outputTbl1(key string, values bigint) stored as rcfile +POSTHOOK: query: create table outputTbl1(key string, vals bigint) stored as rcfile POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@outputTbl1 @@ -52,22 +52,22 @@ PREHOOK: query: explain insert overwrite table outputTbl1 SELECT * FROM ( - SELECT key, count(1) as values from inputTbl1 group by key + SELECT key, count(1) as vals from inputTbl1 group by key UNION ALL - SELECT key, 1 as values from inputTbl1 + SELECT key, 1 as vals from inputTbl1 UNION ALL - SELECT key, 2 as values from inputTbl1 + SELECT key, 2 as vals from inputTbl1 ) a PREHOOK: type: QUERY POSTHOOK: query: explain insert overwrite table outputTbl1 SELECT * FROM ( - SELECT key, count(1) as values from inputTbl1 group by key + SELECT key, count(1) as vals from inputTbl1 group by key UNION ALL - SELECT key, 1 as values from inputTbl1 + SELECT key, 1 as vals from inputTbl1 UNION ALL - SELECT key, 2 as values from inputTbl1 + SELECT key, 2 as vals from inputTbl1 ) a POSTHOOK: type: QUERY STAGE DEPENDENCIES: @@ -166,11 +166,11 @@ STAGE PLANS: PREHOOK: query: insert overwrite table outputTbl1 SELECT * FROM ( - SELECT key, count(1) as values from inputTbl1 group by key + SELECT key, count(1) as vals from inputTbl1 group by key UNION ALL - SELECT key, 1 as values from inputTbl1 + SELECT key, 1 as vals from inputTbl1 UNION ALL - SELECT key, 2 as values from inputTbl1 + SELECT key, 2 as vals from inputTbl1 ) a PREHOOK: type: QUERY PREHOOK: Input: default@inputtbl1 @@ -178,17 +178,17 @@ PREHOOK: Output: default@outputtbl1 POSTHOOK: query: insert overwrite table outputTbl1 SELECT * FROM ( - SELECT key, count(1) as values from inputTbl1 group by key + SELECT key, count(1) as vals from inputTbl1 group by key UNION ALL - SELECT key, 1 as values from inputTbl1 + SELECT key, 1 as vals from inputTbl1 UNION ALL - SELECT key, 2 as values from inputTbl1 + SELECT key, 2 as vals from inputTbl1 ) a POSTHOOK: type: QUERY POSTHOOK: Input: default@inputtbl1 POSTHOOK: Output: default@outputtbl1 POSTHOOK: Lineage: outputtbl1.key EXPRESSION [(inputtbl1)inputtbl1.FieldSchema(name:key, type:string, comment:null), (inputtbl1)inputtbl1.FieldSchema(name:key, type:string, comment:null), (inputtbl1)inputtbl1.FieldSchema(name:key, type:string, comment:null), ] -POSTHOOK: Lineage: outputtbl1.values EXPRESSION [(inputtbl1)inputtbl1.null, ] +POSTHOOK: Lineage: outputtbl1.vals EXPRESSION [(inputtbl1)inputtbl1.null, ] PREHOOK: query: desc formatted outputTbl1 PREHOOK: type: DESCTABLE PREHOOK: Input: default@outputtbl1 @@ -198,7 +198,7 @@ POSTHOOK: Input: default@outputtbl1 # col_name data_type comment key string -values bigint +vals bigint # Detailed Table Information Database: default @@ -225,11 +225,11 @@ Bucket Columns: [] Sort Columns: [] Storage Desc Params: serialization.format 1 -PREHOOK: query: select * from outputTbl1 order by key, values +PREHOOK: query: select * from outputTbl1 order by key, vals PREHOOK: type: QUERY PREHOOK: Input: default@outputtbl1 #### A masked pattern was here #### -POSTHOOK: query: select * from outputTbl1 order by key, values +POSTHOOK: query: select * from outputTbl1 order by key, vals POSTHOOK: type: QUERY POSTHOOK: Input: default@outputtbl1 #### A masked pattern was here #### diff --git a/ql/src/test/results/clientpositive/union_remove_9.q.out b/ql/src/test/results/clientpositive/union_remove_9.q.out index 75925ac..ac34577 100644 --- a/ql/src/test/results/clientpositive/union_remove_9.q.out +++ b/ql/src/test/results/clientpositive/union_remove_9.q.out @@ -30,11 +30,11 @@ create table inputTbl1(key string, val string) stored as textfile POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@inputTbl1 -PREHOOK: query: create table outputTbl1(key string, values bigint) stored as rcfile +PREHOOK: query: create table outputTbl1(key string, vals bigint) stored as rcfile PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@outputTbl1 -POSTHOOK: query: create table outputTbl1(key string, values bigint) stored as rcfile +POSTHOOK: query: create table outputTbl1(key string, vals bigint) stored as rcfile POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@outputTbl1 @@ -50,12 +50,12 @@ PREHOOK: query: explain insert overwrite table outputTbl1 SELECT * FROM ( -select key, count(1) as values from inputTbl1 group by key +select key, count(1) as vals from inputTbl1 group by key union all select * FROM ( - SELECT key, 1 as values from inputTbl1 + SELECT key, 1 as vals from inputTbl1 UNION ALL - SELECT key, 2 as values from inputTbl1 + SELECT key, 2 as vals from inputTbl1 ) a )b PREHOOK: type: QUERY @@ -63,12 +63,12 @@ POSTHOOK: query: explain insert overwrite table outputTbl1 SELECT * FROM ( -select key, count(1) as values from inputTbl1 group by key +select key, count(1) as vals from inputTbl1 group by key union all select * FROM ( - SELECT key, 1 as values from inputTbl1 + SELECT key, 1 as vals from inputTbl1 UNION ALL - SELECT key, 2 as values from inputTbl1 + SELECT key, 2 as vals from inputTbl1 ) a )b POSTHOOK: type: QUERY @@ -209,12 +209,12 @@ STAGE PLANS: PREHOOK: query: insert overwrite table outputTbl1 SELECT * FROM ( -select key, count(1) as values from inputTbl1 group by key +select key, count(1) as vals from inputTbl1 group by key union all select * FROM ( - SELECT key, 1 as values from inputTbl1 + SELECT key, 1 as vals from inputTbl1 UNION ALL - SELECT key, 2 as values from inputTbl1 + SELECT key, 2 as vals from inputTbl1 ) a )b PREHOOK: type: QUERY @@ -223,19 +223,19 @@ PREHOOK: Output: default@outputtbl1 POSTHOOK: query: insert overwrite table outputTbl1 SELECT * FROM ( -select key, count(1) as values from inputTbl1 group by key +select key, count(1) as vals from inputTbl1 group by key union all select * FROM ( - SELECT key, 1 as values from inputTbl1 + SELECT key, 1 as vals from inputTbl1 UNION ALL - SELECT key, 2 as values from inputTbl1 + SELECT key, 2 as vals from inputTbl1 ) a )b POSTHOOK: type: QUERY POSTHOOK: Input: default@inputtbl1 POSTHOOK: Output: default@outputtbl1 POSTHOOK: Lineage: outputtbl1.key EXPRESSION [(inputtbl1)inputtbl1.FieldSchema(name:key, type:string, comment:null), (inputtbl1)inputtbl1.FieldSchema(name:key, type:string, comment:null), (inputtbl1)inputtbl1.FieldSchema(name:key, type:string, comment:null), ] -POSTHOOK: Lineage: outputtbl1.values EXPRESSION [(inputtbl1)inputtbl1.null, ] +POSTHOOK: Lineage: outputtbl1.vals EXPRESSION [(inputtbl1)inputtbl1.null, ] PREHOOK: query: desc formatted outputTbl1 PREHOOK: type: DESCTABLE PREHOOK: Input: default@outputtbl1 @@ -245,7 +245,7 @@ POSTHOOK: Input: default@outputtbl1 # col_name data_type comment key string -values bigint +vals bigint # Detailed Table Information Database: default @@ -272,11 +272,11 @@ Bucket Columns: [] Sort Columns: [] Storage Desc Params: serialization.format 1 -PREHOOK: query: select * from outputTbl1 order by key, values +PREHOOK: query: select * from outputTbl1 order by key, vals PREHOOK: type: QUERY PREHOOK: Input: default@outputtbl1 #### A masked pattern was here #### -POSTHOOK: query: select * from outputTbl1 order by key, values +POSTHOOK: query: select * from outputTbl1 order by key, vals POSTHOOK: type: QUERY POSTHOOK: Input: default@outputtbl1 #### A masked pattern was here #### diff --git a/ql/src/test/results/clientpositive/varchar_cast.q.out b/ql/src/test/results/clientpositive/varchar_cast.q.out index 5a968f2..975b08a 100644 --- a/ql/src/test/results/clientpositive/varchar_cast.q.out +++ b/ql/src/test/results/clientpositive/varchar_cast.q.out @@ -1,4 +1,6 @@ -PREHOOK: query: -- Cast from varchar to other data types +PREHOOK: query: -- We need the above setting for backward compatibility because 'timestamp' is a keyword in SQL2011 + +-- Cast from varchar to other data types select cast(cast('11' as string) as tinyint), cast(cast('11' as string) as smallint), @@ -11,7 +13,9 @@ from src limit 1 PREHOOK: type: QUERY PREHOOK: Input: default@src #### A masked pattern was here #### -POSTHOOK: query: -- Cast from varchar to other data types +POSTHOOK: query: -- We need the above setting for backward compatibility because 'timestamp' is a keyword in SQL2011 + +-- Cast from varchar to other data types select cast(cast('11' as string) as tinyint), cast(cast('11' as string) as smallint), diff --git a/ql/src/test/results/clientpositive/vectorized_casts.q.out b/ql/src/test/results/clientpositive/vectorized_casts.q.out index f915200..4708c65 100644 --- a/ql/src/test/results/clientpositive/vectorized_casts.q.out +++ b/ql/src/test/results/clientpositive/vectorized_casts.q.out @@ -1,4 +1,6 @@ -PREHOOK: query: -- Test type casting in vectorized mode to verify end-to-end functionality. +PREHOOK: query: -- We need the above setting for backward compatibility because 'timestamp' is a keyword in SQL2011 + +-- Test type casting in vectorized mode to verify end-to-end functionality. explain select @@ -72,7 +74,9 @@ from alltypesorc -- limit output to a reasonably small number of rows where cbigint % 250 = 0 PREHOOK: type: QUERY -POSTHOOK: query: -- Test type casting in vectorized mode to verify end-to-end functionality. +POSTHOOK: query: -- We need the above setting for backward compatibility because 'timestamp' is a keyword in SQL2011 + +-- Test type casting in vectorized mode to verify end-to-end functionality. explain select diff --git a/ql/src/test/results/clientpositive/windowing_navfn.q.out b/ql/src/test/results/clientpositive/windowing_navfn.q.out index 531ab6b..33fe764 100644 --- a/ql/src/test/results/clientpositive/windowing_navfn.q.out +++ b/ql/src/test/results/clientpositive/windowing_navfn.q.out @@ -1,6 +1,10 @@ -PREHOOK: query: drop table over10k +PREHOOK: query: -- We need the above setting for backward compatibility because 'int' is a keyword in SQL2011 + +drop table over10k PREHOOK: type: DROPTABLE -POSTHOOK: query: drop table over10k +POSTHOOK: query: -- We need the above setting for backward compatibility because 'int' is a keyword in SQL2011 + +drop table over10k POSTHOOK: type: DROPTABLE PREHOOK: query: create table over10k( t tinyint,