diff --git hcatalog/core/src/main/java/org/apache/hive/hcatalog/cli/SemanticAnalysis/HCatSemanticAnalyzer.java hcatalog/core/src/main/java/org/apache/hive/hcatalog/cli/SemanticAnalysis/HCatSemanticAnalyzer.java index f92478c48b..8277d34731 100644 --- hcatalog/core/src/main/java/org/apache/hive/hcatalog/cli/SemanticAnalysis/HCatSemanticAnalyzer.java +++ hcatalog/core/src/main/java/org/apache/hive/hcatalog/cli/SemanticAnalysis/HCatSemanticAnalyzer.java @@ -25,9 +25,9 @@ import org.apache.hadoop.hive.ql.ddl.database.drop.DropDatabaseDesc; import org.apache.hadoop.hive.ql.ddl.database.show.ShowDatabasesDesc; import org.apache.hadoop.hive.ql.ddl.database.use.SwitchDatabaseDesc; -import org.apache.hadoop.hive.ql.ddl.table.info.DescTableDesc; -import org.apache.hadoop.hive.ql.ddl.table.info.ShowTableStatusDesc; -import org.apache.hadoop.hive.ql.ddl.table.info.ShowTablesDesc; +import org.apache.hadoop.hive.ql.ddl.table.info.desc.DescTableDesc; +import org.apache.hadoop.hive.ql.ddl.table.info.show.status.ShowTableStatusDesc; +import org.apache.hadoop.hive.ql.ddl.table.info.show.tables.ShowTablesDesc; import org.apache.hadoop.hive.ql.ddl.table.partition.drop.AlterTableDropPartitionDesc; import org.apache.hadoop.hive.ql.ddl.table.partition.show.ShowPartitionsDesc; import org.apache.hadoop.hive.ql.ddl.table.storage.AlterTableSetLocationDesc; diff --git ql/src/java/org/apache/hadoop/hive/ql/ddl/DDLUtils.java ql/src/java/org/apache/hadoop/hive/ql/ddl/DDLUtils.java index 3dc6bf56f2..16387f25db 100644 --- ql/src/java/org/apache/hadoop/hive/ql/ddl/DDLUtils.java +++ ql/src/java/org/apache/hadoop/hive/ql/ddl/DDLUtils.java @@ -41,6 +41,7 @@ import org.apache.hadoop.hive.ql.metadata.HiveException; import org.apache.hadoop.hive.ql.metadata.Partition; import org.apache.hadoop.hive.ql.metadata.Table; +import org.apache.hadoop.hive.ql.parse.ASTNode; import org.apache.hadoop.hive.ql.parse.ReplicationSpec; import org.apache.hadoop.hive.ql.parse.SemanticException; import org.apache.hadoop.hive.ql.session.SessionState; @@ -219,4 +220,21 @@ private static String getHS2Host(HiveConf conf) throws SemanticException { throw new SemanticException("Kill query is only supported in HiveServer2 (not hive cli)"); } + + /** + * Get the fully qualified name in the node. + * E.g. the node of the form ^(DOT ^(DOT a b) c) will generate a name of the form "a.b.c". + */ + public static String getFQName(ASTNode node) { + if (node.getChildCount() == 0) { + return node.getText(); + } else if (node.getChildCount() == 2) { + return getFQName((ASTNode) node.getChild(0)) + "." + getFQName((ASTNode) node.getChild(1)); + } else if (node.getChildCount() == 3) { + return getFQName((ASTNode) node.getChild(0)) + "." + getFQName((ASTNode) node.getChild(1)) + "." + + getFQName((ASTNode) node.getChild(2)); + } else { + return null; + } + } } diff --git ql/src/java/org/apache/hadoop/hive/ql/ddl/table/info/TableInfoUtils.java ql/src/java/org/apache/hadoop/hive/ql/ddl/table/info/TableInfoUtils.java new file mode 100644 index 0000000000..de4fd62fb0 --- /dev/null +++ ql/src/java/org/apache/hadoop/hive/ql/ddl/table/info/TableInfoUtils.java @@ -0,0 +1,53 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hive.ql.ddl.table.info; + +import java.util.Map; + +import org.apache.hadoop.hive.ql.ErrorMsg; +import org.apache.hadoop.hive.ql.ddl.table.partition.PartitionUtils; +import org.apache.hadoop.hive.ql.metadata.Hive; +import org.apache.hadoop.hive.ql.metadata.HiveException; +import org.apache.hadoop.hive.ql.metadata.Table; +import org.apache.hadoop.hive.ql.parse.SemanticException; + +/** + * Utilities used by table information DDL commands. + */ +public final class TableInfoUtils { + private TableInfoUtils() { + throw new UnsupportedOperationException("TableInfoUtils should not be instantiated"); + } + + public static void validateDatabase(Hive db, String databaseName) throws SemanticException { + try { + if (!db.databaseExists(databaseName)) { + throw new SemanticException(ErrorMsg.DATABASE_NOT_EXISTS.getMsg(databaseName)); + } + } catch (HiveException e) { + throw new SemanticException(ErrorMsg.DATABASE_NOT_EXISTS.getMsg(databaseName), e); + } + } + + public static void validateTable(Hive db, Table table, Map partSpec) throws SemanticException { + if (partSpec != null) { + PartitionUtils.getPartition(db, table, partSpec, true); + } + } +} diff --git ql/src/java/org/apache/hadoop/hive/ql/ddl/table/info/desc/DescTableAnalyzer.java ql/src/java/org/apache/hadoop/hive/ql/ddl/table/info/desc/DescTableAnalyzer.java new file mode 100644 index 0000000000..8dbfb9b509 --- /dev/null +++ ql/src/java/org/apache/hadoop/hive/ql/ddl/table/info/desc/DescTableAnalyzer.java @@ -0,0 +1,210 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hive.ql.ddl.table.info.desc; + +import java.util.Map; + +import org.apache.hadoop.hive.common.TableName; +import org.apache.hadoop.hive.ql.ErrorMsg; +import org.apache.hadoop.hive.ql.QueryState; +import org.apache.hadoop.hive.ql.ddl.DDLWork; +import org.apache.hadoop.hive.ql.ddl.table.info.TableInfoUtils; +import org.apache.hadoop.hive.ql.ddl.DDLSemanticAnalyzerFactory.DDLType; +import org.apache.hadoop.hive.ql.ddl.DDLUtils; +import org.apache.hadoop.hive.ql.exec.Task; +import org.apache.hadoop.hive.ql.exec.TaskFactory; +import org.apache.hadoop.hive.ql.hooks.ReadEntity; +import org.apache.hadoop.hive.ql.metadata.Hive; +import org.apache.hadoop.hive.ql.metadata.HiveException; +import org.apache.hadoop.hive.ql.metadata.InvalidTableException; +import org.apache.hadoop.hive.ql.metadata.Partition; +import org.apache.hadoop.hive.ql.metadata.Table; +import org.apache.hadoop.hive.ql.parse.ASTNode; +import org.apache.hadoop.hive.ql.parse.BaseSemanticAnalyzer; +import org.apache.hadoop.hive.ql.parse.HiveParser; +import org.apache.hadoop.hive.ql.parse.HiveTableName; +import org.apache.hadoop.hive.ql.parse.SemanticException; +import org.apache.hadoop.hive.ql.session.SessionState; + +/** + * Analyzer for table describing commands. + * + * A query like this will generate a tree as follows + * "describe formatted default.maptable partition (b=100) id;" + * TOK_TABTYPE + * TOK_TABNAME --> root for tablename, 2 child nodes mean DB specified + * default + * maptable + * TOK_PARTSPEC --> root node for partition spec. else columnName + * TOK_PARTVAL + * b + * 100 + * id --> root node for columnName + * formatted + */ +@DDLType(type=HiveParser.TOK_DESCTABLE) +public class DescTableAnalyzer extends BaseSemanticAnalyzer { + public DescTableAnalyzer(QueryState queryState) throws SemanticException { + super(queryState); + } + + @Override + public void analyzeInternal(ASTNode root) throws SemanticException { + ctx.setResFile(ctx.getLocalTmpPath()); + + ASTNode tableTypeExpr = (ASTNode) root.getChild(0); + + TableName tableName = getTableName(tableTypeExpr); + Table table = getTable(tableName); + + // process the second child,if exists, node to get partition spec(s) + Map partitionSpec = getPartitionSpec(db, tableTypeExpr, tableName); + TableInfoUtils.validateTable(db, table, partitionSpec); + + // process the third child node,if exists, to get partition spec(s) + String columnPath = getColumnPath(db, tableTypeExpr, tableName, partitionSpec); + + boolean showColStats = false; + boolean isFormatted = false; + boolean isExt = false; + if (root.getChildCount() == 2) { + int descOptions = root.getChild(1).getType(); + isFormatted = descOptions == HiveParser.KW_FORMATTED; + isExt = descOptions == HiveParser.KW_EXTENDED; + // in case of "DESCRIBE FORMATTED tablename column_name" statement, colPath will contain tablename.column_name. + // If column_name is not specified colPath will be equal to tableName. + // This is how we can differentiate if we are describing a table or column. + if (columnPath != null && isFormatted) { + showColStats = true; + } + } + + inputs.add(new ReadEntity(table)); + + DescTableDesc desc = new DescTableDesc(ctx.getResFile(), tableName, partitionSpec, columnPath, isExt, isFormatted); + Task task = TaskFactory.get(new DDLWork(getInputs(), getOutputs(), desc)); + rootTasks.add(task); + + task.setFetchSource(true); + String schema = showColStats ? DescTableDesc.COLUMN_STATISTICS_SCHEMA : DescTableDesc.SCHEMA; + setFetchTask(createFetchTask(schema)); + } + + /** Process the first node to extract tablename, it is either TABLENAME or DBNAME.TABLENAME if db is given. */ + private TableName getTableName(ASTNode tableTypeExpr) throws SemanticException { + TableName tableName; + if (((ASTNode) tableTypeExpr.getChild(0)).getType() == HiveParser.TOK_TABNAME) { + ASTNode tableNode = (ASTNode) tableTypeExpr.getChild(0); + if (tableNode.getChildCount() == 1) { + tableName = HiveTableName.of(((ASTNode) tableNode.getChild(0)).getText()); + } else { + tableName = TableName.fromString(((ASTNode) tableNode.getChild(1)).getText(), + SessionState.get().getCurrentCatalog(), ((ASTNode) tableNode.getChild(0)).getText()); + } + } else { + throw new SemanticException(((ASTNode) tableTypeExpr.getChild(0)).getText() + " is not an expected token type"); + } + + // if database is not the one currently using validate database + if (tableName.getDb() != null) { + TableInfoUtils.validateDatabase(db, tableName.getDb()); + } + + return tableName; + } + + /** + * Get the column path. + * Return column name if exists, column could be DOT separated. + * Example: lintString.$elem$.myint. + * Return table name for column name if no column has been specified. + */ + private String getColumnPath(Hive db, ASTNode node, TableName tableName, Map partitionSpec) + throws SemanticException { + + // if this ast has only one child, then no column name specified. + if (node.getChildCount() == 1) { + return null; + } + + // Second child node could be partitionSpec or column + if (node.getChildCount() > 1) { + ASTNode columnNode = (partitionSpec == null) ? (ASTNode) node.getChild(1) : (ASTNode) node.getChild(2); + if (columnNode != null) { + return String.join(".", tableName.getNotEmptyDbTable(), DDLUtils.getFQName(columnNode)); + } + } + + return null; + } + + private Map getPartitionSpec(Hive db, ASTNode node, TableName tableName) throws SemanticException { + // if this node has only one child, then no partition spec specified. + if (node.getChildCount() == 1) { + return null; + } + + // if ast has two children the 2nd child could be partition spec or columnName + // if the ast has 3 children, the second *has to* be partition spec + if (node.getChildCount() > 2 && (((ASTNode) node.getChild(1)).getType() != HiveParser.TOK_PARTSPEC)) { + throw new SemanticException(((ASTNode) node.getChild(1)).getType() + " is not a partition specification"); + } + + if (((ASTNode) node.getChild(1)).getType() == HiveParser.TOK_PARTSPEC) { + ASTNode partNode = (ASTNode) node.getChild(1); + + Table tab = null; + try { + tab = db.getTable(tableName.getNotEmptyDbTable()); + } catch (InvalidTableException e) { + throw new SemanticException(ErrorMsg.INVALID_TABLE.getMsg(tableName.getNotEmptyDbTable()), e); + } catch (HiveException e) { + throw new SemanticException(e.getMessage(), e); + } + + Map partitionSpec = null; + try { + partitionSpec = getValidatedPartSpec(tab, partNode, db.getConf(), false); + } catch (SemanticException e) { + // get exception in resolving partition it could be DESCRIBE table key + // return null, continue processing for DESCRIBE table key + return null; + } + + if (partitionSpec != null) { + Partition part = null; + try { + part = db.getPartition(tab, partitionSpec, false); + } catch (HiveException e) { + // if get exception in finding partition it could be DESCRIBE table key + // return null, continue processing for DESCRIBE table key + return null; + } + + if (part == null) { + throw new SemanticException(ErrorMsg.INVALID_PARTITION.getMsg(partitionSpec.toString())); + } + + return partitionSpec; + } + } + + return null; + } +} diff --git ql/src/java/org/apache/hadoop/hive/ql/ddl/table/info/DescTableDesc.java ql/src/java/org/apache/hadoop/hive/ql/ddl/table/info/desc/DescTableDesc.java similarity index 98% rename from ql/src/java/org/apache/hadoop/hive/ql/ddl/table/info/DescTableDesc.java rename to ql/src/java/org/apache/hadoop/hive/ql/ddl/table/info/desc/DescTableDesc.java index 0fb14e64c5..f96a10c75c 100644 --- ql/src/java/org/apache/hadoop/hive/ql/ddl/table/info/DescTableDesc.java +++ ql/src/java/org/apache/hadoop/hive/ql/ddl/table/info/desc/DescTableDesc.java @@ -16,7 +16,7 @@ * limitations under the License. */ -package org.apache.hadoop.hive.ql.ddl.table.info; +package org.apache.hadoop.hive.ql.ddl.table.info.desc; import java.io.Serializable; import java.util.List; diff --git ql/src/java/org/apache/hadoop/hive/ql/ddl/table/info/DescTableOperation.java ql/src/java/org/apache/hadoop/hive/ql/ddl/table/info/desc/DescTableOperation.java similarity index 99% rename from ql/src/java/org/apache/hadoop/hive/ql/ddl/table/info/DescTableOperation.java rename to ql/src/java/org/apache/hadoop/hive/ql/ddl/table/info/desc/DescTableOperation.java index 5178fb5fb5..33b6a5b72d 100644 --- ql/src/java/org/apache/hadoop/hive/ql/ddl/table/info/DescTableOperation.java +++ ql/src/java/org/apache/hadoop/hive/ql/ddl/table/info/desc/DescTableOperation.java @@ -16,7 +16,7 @@ * limitations under the License. */ -package org.apache.hadoop.hive.ql.ddl.table.info; +package org.apache.hadoop.hive.ql.ddl.table.info.desc; import java.io.DataOutputStream; import java.sql.SQLException; @@ -66,7 +66,7 @@ import com.google.common.collect.Lists; /** - * Operation process of dropping a table. + * Operation process of describing a table. */ public class DescTableOperation extends DDLOperation { public DescTableOperation(DDLOperationContext context, DescTableDesc desc) { diff --git ql/src/java/org/apache/hadoop/hive/ql/ddl/table/info/desc/package-info.java ql/src/java/org/apache/hadoop/hive/ql/ddl/table/info/desc/package-info.java new file mode 100644 index 0000000000..cf0073457b --- /dev/null +++ ql/src/java/org/apache/hadoop/hive/ql/ddl/table/info/desc/package-info.java @@ -0,0 +1,20 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/** Describe table DDL operation. */ +package org.apache.hadoop.hive.ql.ddl.table.info.desc; diff --git ql/src/java/org/apache/hadoop/hive/ql/ddl/table/info/show/properties/ShowTablePropertiesAnalyzer.java ql/src/java/org/apache/hadoop/hive/ql/ddl/table/info/show/properties/ShowTablePropertiesAnalyzer.java new file mode 100644 index 0000000000..720487f16f --- /dev/null +++ ql/src/java/org/apache/hadoop/hive/ql/ddl/table/info/show/properties/ShowTablePropertiesAnalyzer.java @@ -0,0 +1,58 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hive.ql.ddl.table.info.show.properties; + +import org.apache.hadoop.hive.common.TableName; +import org.apache.hadoop.hive.ql.QueryState; +import org.apache.hadoop.hive.ql.ddl.DDLWork; +import org.apache.hadoop.hive.ql.ddl.DDLSemanticAnalyzerFactory.DDLType; +import org.apache.hadoop.hive.ql.ddl.table.info.TableInfoUtils; +import org.apache.hadoop.hive.ql.exec.Task; +import org.apache.hadoop.hive.ql.exec.TaskFactory; +import org.apache.hadoop.hive.ql.parse.ASTNode; +import org.apache.hadoop.hive.ql.parse.BaseSemanticAnalyzer; +import org.apache.hadoop.hive.ql.parse.HiveParser; +import org.apache.hadoop.hive.ql.parse.SemanticException; + +/** + * Analyzer for show table properties commands. + */ +@DDLType(type=HiveParser.TOK_SHOW_TBLPROPERTIES) +public class ShowTablePropertiesAnalyzer extends BaseSemanticAnalyzer { + public ShowTablePropertiesAnalyzer(QueryState queryState) throws SemanticException { + super(queryState); + } + + @Override + public void analyzeInternal(ASTNode root) throws SemanticException { + ctx.setResFile(ctx.getLocalTmpPath()); + + TableName tableName = getQualifiedTableName((ASTNode) root.getChild(0)); + String propertyName = (root.getChildCount() > 1) ? unescapeSQLString(root.getChild(1).getText()) : null; + + TableInfoUtils.validateTable(db, getTable(tableName), null); + + ShowTablePropertiesDesc desc = new ShowTablePropertiesDesc(ctx.getResFile().toString(), tableName, propertyName); + Task task = TaskFactory.get(new DDLWork(getInputs(), getOutputs(), desc)); + rootTasks.add(task); + + task.setFetchSource(true); + setFetchTask(createFetchTask(ShowTablePropertiesDesc.SCHEMA)); + } +} diff --git ql/src/java/org/apache/hadoop/hive/ql/ddl/table/info/ShowTablePropertiesDesc.java ql/src/java/org/apache/hadoop/hive/ql/ddl/table/info/show/properties/ShowTablePropertiesDesc.java similarity index 97% rename from ql/src/java/org/apache/hadoop/hive/ql/ddl/table/info/ShowTablePropertiesDesc.java rename to ql/src/java/org/apache/hadoop/hive/ql/ddl/table/info/show/properties/ShowTablePropertiesDesc.java index 9d3d0b9c7e..062dc2e8fa 100644 --- ql/src/java/org/apache/hadoop/hive/ql/ddl/table/info/ShowTablePropertiesDesc.java +++ ql/src/java/org/apache/hadoop/hive/ql/ddl/table/info/show/properties/ShowTablePropertiesDesc.java @@ -16,7 +16,7 @@ * limitations under the License. */ -package org.apache.hadoop.hive.ql.ddl.table.info; +package org.apache.hadoop.hive.ql.ddl.table.info.show.properties; import java.io.Serializable; diff --git ql/src/java/org/apache/hadoop/hive/ql/ddl/table/info/ShowTablePropertiesOperation.java ql/src/java/org/apache/hadoop/hive/ql/ddl/table/info/show/properties/ShowTablePropertiesOperation.java similarity index 97% rename from ql/src/java/org/apache/hadoop/hive/ql/ddl/table/info/ShowTablePropertiesOperation.java rename to ql/src/java/org/apache/hadoop/hive/ql/ddl/table/info/show/properties/ShowTablePropertiesOperation.java index 3826bce0d0..d7ab616146 100644 --- ql/src/java/org/apache/hadoop/hive/ql/ddl/table/info/ShowTablePropertiesOperation.java +++ ql/src/java/org/apache/hadoop/hive/ql/ddl/table/info/show/properties/ShowTablePropertiesOperation.java @@ -16,7 +16,7 @@ * limitations under the License. */ -package org.apache.hadoop.hive.ql.ddl.table.info; +package org.apache.hadoop.hive.ql.ddl.table.info.show.properties; import org.apache.hadoop.hive.ql.ddl.DDLOperationContext; import org.apache.hadoop.hive.ql.ddl.DDLUtils; diff --git ql/src/java/org/apache/hadoop/hive/ql/ddl/table/info/show/properties/package-info.java ql/src/java/org/apache/hadoop/hive/ql/ddl/table/info/show/properties/package-info.java new file mode 100644 index 0000000000..36fbbc23c7 --- /dev/null +++ ql/src/java/org/apache/hadoop/hive/ql/ddl/table/info/show/properties/package-info.java @@ -0,0 +1,20 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/** Show table properties DDL operation. */ +package org.apache.hadoop.hive.ql.ddl.table.info.show.properties; diff --git ql/src/java/org/apache/hadoop/hive/ql/ddl/table/info/show/status/ShowTableStatusAnalyzer.java ql/src/java/org/apache/hadoop/hive/ql/ddl/table/info/show/status/ShowTableStatusAnalyzer.java new file mode 100644 index 0000000000..425e34b530 --- /dev/null +++ ql/src/java/org/apache/hadoop/hive/ql/ddl/table/info/show/status/ShowTableStatusAnalyzer.java @@ -0,0 +1,82 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hive.ql.ddl.table.info.show.status; + +import java.util.Map; + +import org.apache.hadoop.hive.ql.ErrorMsg; +import org.apache.hadoop.hive.ql.QueryState; +import org.apache.hadoop.hive.ql.ddl.DDLWork; +import org.apache.hadoop.hive.ql.ddl.DDLSemanticAnalyzerFactory.DDLType; +import org.apache.hadoop.hive.ql.ddl.table.info.TableInfoUtils; +import org.apache.hadoop.hive.ql.exec.Task; +import org.apache.hadoop.hive.ql.exec.TaskFactory; +import org.apache.hadoop.hive.ql.parse.ASTNode; +import org.apache.hadoop.hive.ql.parse.BaseSemanticAnalyzer; +import org.apache.hadoop.hive.ql.parse.HiveParser; +import org.apache.hadoop.hive.ql.parse.HiveTableName; +import org.apache.hadoop.hive.ql.parse.SemanticException; +import org.apache.hadoop.hive.ql.session.SessionState; + +/** + * Analyzer for show table status commands. + */ +@DDLType(type=HiveParser.TOK_SHOW_TABLESTATUS) +public class ShowTableStatusAnalyzer extends BaseSemanticAnalyzer { + public ShowTableStatusAnalyzer(QueryState queryState) throws SemanticException { + super(queryState); + } + + @Override + public void analyzeInternal(ASTNode root) throws SemanticException { + if (root.getChildCount() > 3 || root.getChildCount() < 1) { + throw new SemanticException(ErrorMsg.INVALID_AST_TREE.getMsg()); + } + + ctx.setResFile(ctx.getLocalTmpPath()); + + String tableNames = getUnescapedName((ASTNode) root.getChild(0)); + String dbName = SessionState.get().getCurrentDatabase(); + Map partitionSpec = null; + if (root.getChildCount() > 1) { + for (int i = 1; i < root.getChildCount(); i++) { + ASTNode child = (ASTNode) root.getChild(i); + if (child.getToken().getType() == HiveParser.Identifier) { + dbName = unescapeIdentifier(child.getText()); + } else if (child.getToken().getType() == HiveParser.TOK_PARTSPEC) { + partitionSpec = getValidatedPartSpec(getTable(tableNames), child, conf, false); + } else { + throw new SemanticException(ErrorMsg.INVALID_AST_TREE.getMsg( + child.toStringTree() + " , Invalid token " + child.getToken().getType())); + } + } + } + + if (partitionSpec != null) { + TableInfoUtils.validateTable(db, getTable(HiveTableName.of(tableNames)), partitionSpec); + } + + ShowTableStatusDesc desc = new ShowTableStatusDesc(ctx.getResFile(), dbName, tableNames, partitionSpec); + Task task = TaskFactory.get(new DDLWork(getInputs(), getOutputs(), desc)); + rootTasks.add(task); + + task.setFetchSource(true); + setFetchTask(createFetchTask(ShowTableStatusDesc.SCHEMA)); + } +} diff --git ql/src/java/org/apache/hadoop/hive/ql/ddl/table/info/ShowTableStatusDesc.java ql/src/java/org/apache/hadoop/hive/ql/ddl/table/info/show/status/ShowTableStatusDesc.java similarity index 87% rename from ql/src/java/org/apache/hadoop/hive/ql/ddl/table/info/ShowTableStatusDesc.java rename to ql/src/java/org/apache/hadoop/hive/ql/ddl/table/info/show/status/ShowTableStatusDesc.java index b53d138963..672651821b 100644 --- ql/src/java/org/apache/hadoop/hive/ql/ddl/table/info/ShowTableStatusDesc.java +++ ql/src/java/org/apache/hadoop/hive/ql/ddl/table/info/show/status/ShowTableStatusDesc.java @@ -16,11 +16,12 @@ * limitations under the License. */ -package org.apache.hadoop.hive.ql.ddl.table.info; +package org.apache.hadoop.hive.ql.ddl.table.info.show.status; import java.io.Serializable; import java.util.Map; +import org.apache.hadoop.fs.Path; import org.apache.hadoop.hive.ql.ddl.DDLDesc; import org.apache.hadoop.hive.ql.plan.Explain; import org.apache.hadoop.hive.ql.plan.Explain.Level; @@ -39,12 +40,8 @@ private final String pattern; private final Map partSpec; - public ShowTableStatusDesc(String resFile, String dbName, String pattern) { - this(resFile, dbName, pattern, null); - } - - public ShowTableStatusDesc(String resFile, String dbName, String pattern, Map partSpec) { - this.resFile = resFile; + public ShowTableStatusDesc(Path resFile, String dbName, String pattern, Map partSpec) { + this.resFile = resFile.toString(); this.dbName = dbName; this.pattern = pattern; this.partSpec = partSpec; diff --git ql/src/java/org/apache/hadoop/hive/ql/ddl/table/info/ShowTableStatusOperation.java ql/src/java/org/apache/hadoop/hive/ql/ddl/table/info/show/status/ShowTableStatusOperation.java similarity index 98% rename from ql/src/java/org/apache/hadoop/hive/ql/ddl/table/info/ShowTableStatusOperation.java rename to ql/src/java/org/apache/hadoop/hive/ql/ddl/table/info/show/status/ShowTableStatusOperation.java index bc8ec66970..914e63d80c 100644 --- ql/src/java/org/apache/hadoop/hive/ql/ddl/table/info/ShowTableStatusOperation.java +++ ql/src/java/org/apache/hadoop/hive/ql/ddl/table/info/show/status/ShowTableStatusOperation.java @@ -16,7 +16,7 @@ * limitations under the License. */ -package org.apache.hadoop.hive.ql.ddl.table.info; +package org.apache.hadoop.hive.ql.ddl.table.info.show.status; import org.apache.hadoop.hive.ql.ddl.DDLOperationContext; import org.apache.hadoop.hive.ql.ddl.DDLUtils; diff --git ql/src/java/org/apache/hadoop/hive/ql/ddl/table/info/show/status/package-info.java ql/src/java/org/apache/hadoop/hive/ql/ddl/table/info/show/status/package-info.java new file mode 100644 index 0000000000..3c3d1a3c2f --- /dev/null +++ ql/src/java/org/apache/hadoop/hive/ql/ddl/table/info/show/status/package-info.java @@ -0,0 +1,20 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/** Show table status DDL operation. */ +package org.apache.hadoop.hive.ql.ddl.table.info.show.status; diff --git ql/src/java/org/apache/hadoop/hive/ql/ddl/table/info/show/tables/ShowTablesAnalyzer.java ql/src/java/org/apache/hadoop/hive/ql/ddl/table/info/show/tables/ShowTablesAnalyzer.java new file mode 100644 index 0000000000..6b659accc2 --- /dev/null +++ ql/src/java/org/apache/hadoop/hive/ql/ddl/table/info/show/tables/ShowTablesAnalyzer.java @@ -0,0 +1,84 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hive.ql.ddl.table.info.show.tables; + +import org.apache.hadoop.hive.metastore.TableType; +import org.apache.hadoop.hive.ql.ErrorMsg; +import org.apache.hadoop.hive.ql.QueryState; +import org.apache.hadoop.hive.ql.ddl.DDLWork; +import org.apache.hadoop.hive.ql.ddl.DDLSemanticAnalyzerFactory.DDLType; +import org.apache.hadoop.hive.ql.ddl.table.info.TableInfoUtils; +import org.apache.hadoop.hive.ql.exec.Task; +import org.apache.hadoop.hive.ql.exec.TaskFactory; +import org.apache.hadoop.hive.ql.hooks.ReadEntity; +import org.apache.hadoop.hive.ql.parse.ASTNode; +import org.apache.hadoop.hive.ql.parse.BaseSemanticAnalyzer; +import org.apache.hadoop.hive.ql.parse.HiveParser; +import org.apache.hadoop.hive.ql.parse.SemanticException; +import org.apache.hadoop.hive.ql.session.SessionState; + +/** + * Analyzer for show tables commands. + */ +@DDLType(type=HiveParser.TOK_SHOWTABLES) +public class ShowTablesAnalyzer extends BaseSemanticAnalyzer { + public ShowTablesAnalyzer(QueryState queryState) throws SemanticException { + super(queryState); + } + + @Override + public void analyzeInternal(ASTNode root) throws SemanticException { + if (root.getChildCount() > 4) { + throw new SemanticException(ErrorMsg.INVALID_AST_TREE.getMsg(root.toStringTree())); + } + + ctx.setResFile(ctx.getLocalTmpPath()); + + String dbName = SessionState.get().getCurrentDatabase(); + String tableNames = null; + TableType tableTypeFilter = null; + boolean isExtended = false; + for (int i = 0; i < root.getChildCount(); i++) { + ASTNode child = (ASTNode) root.getChild(i); + if (child.getType() == HiveParser.TOK_FROM) { // Specifies a DB + dbName = unescapeIdentifier(root.getChild(++i).getText()); + TableInfoUtils.validateDatabase(db, dbName); + } else if (child.getType() == HiveParser.TOK_TABLE_TYPE) { // Filter on table type + String tableType = unescapeIdentifier(child.getChild(0).getText()); + if (!"table_type".equalsIgnoreCase(tableType)) { + throw new SemanticException("SHOW TABLES statement only allows equality filter on table_type value"); + } + tableTypeFilter = TableType.valueOf(unescapeSQLString(child.getChild(1).getText())); + } else if (child.getType() == HiveParser.KW_EXTENDED) { // Include table type + isExtended = true; + } else { // Uses a pattern + tableNames = unescapeSQLString(child.getText()); + } + } + + inputs.add(new ReadEntity(getDatabase(dbName))); + + ShowTablesDesc desc = new ShowTablesDesc(ctx.getResFile(), dbName, tableNames, tableTypeFilter, isExtended); + Task task = TaskFactory.get(new DDLWork(getInputs(), getOutputs(), desc)); + rootTasks.add(task); + + task.setFetchSource(true); + setFetchTask(createFetchTask(desc.getSchema())); + } +} diff --git ql/src/java/org/apache/hadoop/hive/ql/ddl/table/info/ShowTablesDesc.java ql/src/java/org/apache/hadoop/hive/ql/ddl/table/info/show/tables/ShowTablesDesc.java similarity index 72% rename from ql/src/java/org/apache/hadoop/hive/ql/ddl/table/info/ShowTablesDesc.java rename to ql/src/java/org/apache/hadoop/hive/ql/ddl/table/info/show/tables/ShowTablesDesc.java index 44c1f09c99..99c1118f9f 100644 --- ql/src/java/org/apache/hadoop/hive/ql/ddl/table/info/ShowTablesDesc.java +++ ql/src/java/org/apache/hadoop/hive/ql/ddl/table/info/show/tables/ShowTablesDesc.java @@ -16,7 +16,7 @@ * limitations under the License. */ -package org.apache.hadoop.hive.ql.ddl.table.info; +package org.apache.hadoop.hive.ql.ddl.table.info.show.tables; import java.io.Serializable; @@ -35,42 +35,17 @@ private static final String TABLES_VIEWS_SCHEMA = "tab_name#string"; private static final String EXTENDED_TABLES_SCHEMA = "tab_name,table_type#string,string"; - private static final String MATERIALIZED_VIEWS_SCHEMA = "mv_name,rewrite_enabled,mode#string:string:string"; private final String resFile; private final String dbName; private final String pattern; - private final TableType type; private final TableType typeFilter; private final boolean isExtended; - public ShowTablesDesc(Path resFile) { - this(resFile, null, null, null, null, false); - } - - public ShowTablesDesc(Path resFile, String dbName) { - this(resFile, dbName, null, null, null, false); - } - - public ShowTablesDesc(Path resFile, String dbName, TableType type) { - this(resFile, dbName, null, type, null, false); - } - public ShowTablesDesc(Path resFile, String dbName, String pattern, TableType typeFilter, boolean isExtended) { - this(resFile, dbName, pattern, null, typeFilter, isExtended); - } - - public ShowTablesDesc(Path resFile, String dbName, String pattern, TableType type) { - this(resFile, dbName, pattern, type, null, false); - } - - - public ShowTablesDesc(Path resFile, String dbName, String pattern, TableType type, TableType typeFilter, - boolean isExtended) { this.resFile = resFile.toString(); this.dbName = dbName; this.pattern = pattern; - this.type = type; this.typeFilter = typeFilter; this.isExtended = isExtended; } @@ -80,11 +55,6 @@ public String getPattern() { return pattern; } - @Explain(displayName = "type") - public TableType getType() { - return type; - } - @Explain(displayName = "result file", explainLevels = { Level.EXTENDED }) public String getResFile() { return resFile; @@ -101,15 +71,17 @@ public boolean isExtended() { return isExtended; } + /** For explain only. */ @Explain(displayName = "table type filter", explainLevels = { Level.USER, Level.DEFAULT, Level.EXTENDED }) + public String getTypeFilterString() { + return typeFilter.name(); + } + public TableType getTypeFilter() { return typeFilter; } public String getSchema() { - if (type != null && type == TableType.MATERIALIZED_VIEW) { - return MATERIALIZED_VIEWS_SCHEMA; - } return isExtended ? EXTENDED_TABLES_SCHEMA : TABLES_VIEWS_SCHEMA; } } diff --git ql/src/java/org/apache/hadoop/hive/ql/ddl/table/info/ShowTablesOperation.java ql/src/java/org/apache/hadoop/hive/ql/ddl/table/info/show/tables/ShowTablesOperation.java similarity index 58% rename from ql/src/java/org/apache/hadoop/hive/ql/ddl/table/info/ShowTablesOperation.java rename to ql/src/java/org/apache/hadoop/hive/ql/ddl/table/info/show/tables/ShowTablesOperation.java index c174e0988e..4846d2969c 100644 --- ql/src/java/org/apache/hadoop/hive/ql/ddl/table/info/ShowTablesOperation.java +++ ql/src/java/org/apache/hadoop/hive/ql/ddl/table/info/show/tables/ShowTablesOperation.java @@ -16,9 +16,10 @@ * limitations under the License. */ -package org.apache.hadoop.hive.ql.ddl.table.info; +package org.apache.hadoop.hive.ql.ddl.table.info.show.tables; import org.apache.hadoop.hive.ql.ddl.DDLOperationContext; +import org.apache.hadoop.hive.ql.ddl.DDLUtils; import java.io.DataOutputStream; import java.util.ArrayList; @@ -28,14 +29,12 @@ import java.util.SortedSet; import java.util.TreeSet; -import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; import org.apache.hadoop.hive.metastore.TableType; import org.apache.hadoop.hive.ql.ErrorMsg; import org.apache.hadoop.hive.ql.ddl.DDLOperation; import org.apache.hadoop.hive.ql.metadata.HiveException; import org.apache.hadoop.hive.ql.metadata.Table; -import org.apache.hadoop.io.IOUtils; /** * Operation process showing the tables. @@ -47,7 +46,6 @@ public ShowTablesOperation(DDLOperationContext context, ShowTablesDesc desc) { @Override public int execute() throws HiveException { - TableType type = desc.getType(); // null for tables, VIRTUAL_VIEW for views, MATERIALIZED_VIEW for MVs String dbName = desc.getDbName(); String pattern = desc.getPattern(); // if null, all tables/views are returned TableType typeFilter = desc.getTypeFilter(); @@ -58,55 +56,33 @@ public int execute() throws HiveException { throw new HiveException(ErrorMsg.DATABASE_NOT_EXISTS, dbName); } - LOG.debug("pattern: {}", pattern); - LOG.debug("typeFilter: {}", typeFilter); - List tableNames = null; List tableObjects = null; - if (type == null) { - if (isExtended) { - tableObjects = new ArrayList<>(); - tableObjects.addAll(context.getDb().getTableObjectsByType(dbName, pattern, typeFilter)); - LOG.debug("Found {} table(s) matching the SHOW EXTENDED TABLES statement.", tableObjects.size()); - } else { - tableNames = context.getDb().getTablesByType(dbName, pattern, typeFilter); - LOG.debug("Found {} table(s) matching the SHOW TABLES statement.", tableNames.size()); - } - } else if (type == TableType.MATERIALIZED_VIEW) { + if (isExtended) { tableObjects = new ArrayList<>(); - tableObjects.addAll(context.getDb().getMaterializedViewObjectsByPattern(dbName, pattern)); - LOG.debug("Found {} materialized view(s) matching the SHOW MATERIALIZED VIEWS statement.", tableObjects.size()); - } else if (type == TableType.VIRTUAL_VIEW) { - tableNames = context.getDb().getTablesByType(dbName, pattern, type); - LOG.debug("Found {} view(s) matching the SHOW VIEWS statement.", tableNames.size()); + tableObjects.addAll(context.getDb().getTableObjectsByType(dbName, pattern, typeFilter)); + LOG.debug("Found {} table(s) matching the SHOW EXTENDED TABLES statement.", tableObjects.size()); } else { - throw new HiveException("Option not recognized in SHOW TABLES/VIEWS/MATERIALIZED VIEWS"); + tableNames = context.getDb().getTablesByType(dbName, pattern, typeFilter); + LOG.debug("Found {} table(s) matching the SHOW TABLES statement.", tableNames.size()); } - // write the results in the file - DataOutputStream outStream = null; - try { - Path resFile = new Path(resultsFile); - FileSystem fs = resFile.getFileSystem(context.getConf()); - outStream = fs.create(resFile); - // Sort by name and print + try (DataOutputStream os = DDLUtils.getOutputStream(new Path(resultsFile), context)) { if (tableNames != null) { SortedSet sortedSet = new TreeSet(tableNames); - context.getFormatter().showTables(outStream, sortedSet); + context.getFormatter().showTables(os, sortedSet); } else { Collections.sort(tableObjects, Comparator.comparing(Table::getTableName)); if (isExtended) { - context.getFormatter().showTablesExtended(outStream, tableObjects); + context.getFormatter().showTablesExtended(os, tableObjects); } else { - context.getFormatter().showMaterializedViews(outStream, tableObjects); + context.getFormatter().showMaterializedViews(os, tableObjects); } } - outStream.close(); } catch (Exception e) { throw new HiveException(e, ErrorMsg.GENERIC_ERROR, "in database" + dbName); - } finally { - IOUtils.closeStream(outStream); } + return 0; } } diff --git ql/src/java/org/apache/hadoop/hive/ql/ddl/table/info/show/tables/package-info.java ql/src/java/org/apache/hadoop/hive/ql/ddl/table/info/show/tables/package-info.java new file mode 100644 index 0000000000..21c738bc9a --- /dev/null +++ ql/src/java/org/apache/hadoop/hive/ql/ddl/table/info/show/tables/package-info.java @@ -0,0 +1,20 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/** Show tables DDL operation. */ +package org.apache.hadoop.hive.ql.ddl.table.info.show.tables; diff --git ql/src/java/org/apache/hadoop/hive/ql/ddl/table/lock/LockTableAnalyzer.java ql/src/java/org/apache/hadoop/hive/ql/ddl/table/lock/LockTableAnalyzer.java new file mode 100644 index 0000000000..8282966e3a --- /dev/null +++ ql/src/java/org/apache/hadoop/hive/ql/ddl/table/lock/LockTableAnalyzer.java @@ -0,0 +1,64 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hive.ql.ddl.table.lock; + +import java.util.List; +import java.util.Map; + +import org.apache.hadoop.hive.conf.HiveConf; +import org.apache.hadoop.hive.conf.HiveConf.ConfVars; +import org.apache.hadoop.hive.ql.QueryState; +import org.apache.hadoop.hive.ql.ddl.DDLSemanticAnalyzerFactory.DDLType; +import org.apache.hadoop.hive.ql.ddl.DDLWork; +import org.apache.hadoop.hive.ql.exec.TaskFactory; +import org.apache.hadoop.hive.ql.parse.ASTNode; +import org.apache.hadoop.hive.ql.parse.BaseSemanticAnalyzer; +import org.apache.hadoop.hive.ql.parse.HiveParser; +import org.apache.hadoop.hive.ql.parse.SemanticException; + +/** + * Analyzer for lock table commands. + */ +@DDLType(type=HiveParser.TOK_LOCKTABLE) +public class LockTableAnalyzer extends BaseSemanticAnalyzer { + public LockTableAnalyzer(QueryState queryState) throws SemanticException { + super(queryState); + } + + @Override + public void analyzeInternal(ASTNode root) throws SemanticException { + String tableName = getUnescapedName((ASTNode) root.getChild(0)).toLowerCase(); + String mode = unescapeIdentifier(root.getChild(1).getText().toUpperCase()); + List> partitionSpecs = getPartitionSpecs(getTable(tableName), root); + + // We only can have a single partition spec + assert (partitionSpecs.size() <= 1); + Map partitionSpec = null; + if (partitionSpecs.size() > 0) { + partitionSpec = partitionSpecs.get(0); + } + + LockTableDesc desc = new LockTableDesc(tableName, mode, partitionSpec, + HiveConf.getVar(conf, ConfVars.HIVEQUERYID), ctx.getCmd()); + rootTasks.add(TaskFactory.get(new DDLWork(getInputs(), getOutputs(), desc))); + + // Need to initialize the lock manager + ctx.setNeedLockMgr(true); + } +} diff --git ql/src/java/org/apache/hadoop/hive/ql/ddl/table/lock/UnlockTableAnalyzer.java ql/src/java/org/apache/hadoop/hive/ql/ddl/table/lock/UnlockTableAnalyzer.java new file mode 100644 index 0000000000..18a838bf79 --- /dev/null +++ ql/src/java/org/apache/hadoop/hive/ql/ddl/table/lock/UnlockTableAnalyzer.java @@ -0,0 +1,60 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hive.ql.ddl.table.lock; + +import java.util.List; +import java.util.Map; + +import org.apache.hadoop.hive.ql.QueryState; +import org.apache.hadoop.hive.ql.ddl.DDLSemanticAnalyzerFactory.DDLType; +import org.apache.hadoop.hive.ql.ddl.DDLWork; +import org.apache.hadoop.hive.ql.exec.TaskFactory; +import org.apache.hadoop.hive.ql.parse.ASTNode; +import org.apache.hadoop.hive.ql.parse.BaseSemanticAnalyzer; +import org.apache.hadoop.hive.ql.parse.HiveParser; +import org.apache.hadoop.hive.ql.parse.SemanticException; + +/** + * Analyzer for unlock table commands. + */ +@DDLType(type=HiveParser.TOK_UNLOCKTABLE) +public class UnlockTableAnalyzer extends BaseSemanticAnalyzer { + public UnlockTableAnalyzer(QueryState queryState) throws SemanticException { + super(queryState); + } + + @Override + public void analyzeInternal(ASTNode root) throws SemanticException { + String tableName = getUnescapedName((ASTNode) root.getChild(0)); + List> partitionSpecs = getPartitionSpecs(getTable(tableName), root); + + // We only can have a single partition spec + assert (partitionSpecs.size() <= 1); + Map partitionSpec = null; + if (partitionSpecs.size() > 0) { + partitionSpec = partitionSpecs.get(0); + } + + UnlockTableDesc desc = new UnlockTableDesc(tableName, partitionSpec); + rootTasks.add(TaskFactory.get(new DDLWork(getInputs(), getOutputs(), desc))); + + // Need to initialize the lock manager + ctx.setNeedLockMgr(true); + } +} diff --git ql/src/java/org/apache/hadoop/hive/ql/ddl/table/lock/show/ShowDbLocksAnalyzer.java ql/src/java/org/apache/hadoop/hive/ql/ddl/table/lock/show/ShowDbLocksAnalyzer.java new file mode 100644 index 0000000000..c6b7f42eb3 --- /dev/null +++ ql/src/java/org/apache/hadoop/hive/ql/ddl/table/lock/show/ShowDbLocksAnalyzer.java @@ -0,0 +1,67 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hive.ql.ddl.table.lock.show; + +import org.apache.hadoop.hive.ql.QueryState; +import org.apache.hadoop.hive.ql.ddl.DDLSemanticAnalyzerFactory.DDLType; +import org.apache.hadoop.hive.ql.ddl.DDLWork; +import org.apache.hadoop.hive.ql.exec.Task; +import org.apache.hadoop.hive.ql.exec.TaskFactory; +import org.apache.hadoop.hive.ql.lockmgr.HiveTxnManager; +import org.apache.hadoop.hive.ql.lockmgr.LockException; +import org.apache.hadoop.hive.ql.lockmgr.TxnManagerFactory; +import org.apache.hadoop.hive.ql.parse.ASTNode; +import org.apache.hadoop.hive.ql.parse.BaseSemanticAnalyzer; +import org.apache.hadoop.hive.ql.parse.HiveParser; +import org.apache.hadoop.hive.ql.parse.SemanticException; + +/** + * Analyzer for show DB locks commands. + */ +@DDLType(type=HiveParser.TOK_SHOWDBLOCKS) +public class ShowDbLocksAnalyzer extends BaseSemanticAnalyzer { + public ShowDbLocksAnalyzer(QueryState queryState) throws SemanticException { + super(queryState); + } + + @Override + public void analyzeInternal(ASTNode root) throws SemanticException { + ctx.setResFile(ctx.getLocalTmpPath()); + + String dbName = stripQuotes(root.getChild(0).getText()); + boolean isExtended = (root.getChildCount() > 1); + + HiveTxnManager txnManager = null; + try { + txnManager = TxnManagerFactory.getTxnManagerFactory().getTxnManager(conf); + } catch (LockException e) { + throw new SemanticException(e.getMessage()); + } + + ShowLocksDesc desc = new ShowLocksDesc(ctx.getResFile(), dbName, isExtended, txnManager.useNewShowLocksFormat()); + Task task = TaskFactory.get(new DDLWork(getInputs(), getOutputs(), desc)); + rootTasks.add(task); + + task.setFetchSource(true); + setFetchTask(createFetchTask(desc.getSchema())); + + // Need to initialize the lock manager + ctx.setNeedLockMgr(true); + } +} diff --git ql/src/java/org/apache/hadoop/hive/ql/ddl/table/lock/show/ShowLocksAnalyzer.java ql/src/java/org/apache/hadoop/hive/ql/ddl/table/lock/show/ShowLocksAnalyzer.java new file mode 100644 index 0000000000..709ce47400 --- /dev/null +++ ql/src/java/org/apache/hadoop/hive/ql/ddl/table/lock/show/ShowLocksAnalyzer.java @@ -0,0 +1,88 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hive.ql.ddl.table.lock.show; + +import java.util.Map; + +import org.apache.hadoop.hive.ql.QueryState; +import org.apache.hadoop.hive.ql.ddl.DDLSemanticAnalyzerFactory.DDLType; +import org.apache.hadoop.hive.ql.ddl.DDLUtils; +import org.apache.hadoop.hive.ql.ddl.DDLWork; +import org.apache.hadoop.hive.ql.exec.Task; +import org.apache.hadoop.hive.ql.exec.TaskFactory; +import org.apache.hadoop.hive.ql.lockmgr.HiveTxnManager; +import org.apache.hadoop.hive.ql.lockmgr.LockException; +import org.apache.hadoop.hive.ql.lockmgr.TxnManagerFactory; +import org.apache.hadoop.hive.ql.parse.ASTNode; +import org.apache.hadoop.hive.ql.parse.BaseSemanticAnalyzer; +import org.apache.hadoop.hive.ql.parse.HiveParser; +import org.apache.hadoop.hive.ql.parse.SemanticException; + +/** + * Analyzer for show locks commands. + */ +@DDLType(type=HiveParser.TOK_SHOWLOCKS) +public class ShowLocksAnalyzer extends BaseSemanticAnalyzer { + public ShowLocksAnalyzer(QueryState queryState) throws SemanticException { + super(queryState); + } + + @Override + public void analyzeInternal(ASTNode root) throws SemanticException { + ctx.setResFile(ctx.getLocalTmpPath()); + + String tableName = null; + Map partitionSpec = null; + boolean isExtended = false; + if (root.getChildCount() >= 1) { + // table for which show locks is being executed + for (int i = 0; i < root.getChildCount(); i++) { + ASTNode child = (ASTNode) root.getChild(i); + if (child.getType() == HiveParser.TOK_TABTYPE) { + tableName = DDLUtils.getFQName((ASTNode) child.getChild(0)); + // get partition metadata if partition specified + if (child.getChildCount() == 2) { + ASTNode partitionSpecNode = (ASTNode) child.getChild(1); + partitionSpec = getValidatedPartSpec(getTable(tableName), partitionSpecNode, conf, false); + } + } else if (child.getType() == HiveParser.KW_EXTENDED) { + isExtended = true; + } + } + } + + HiveTxnManager txnManager = null; + try { + txnManager = TxnManagerFactory.getTxnManagerFactory().getTxnManager(conf); + } catch (LockException e) { + throw new SemanticException(e.getMessage()); + } + + ShowLocksDesc desc = new ShowLocksDesc(ctx.getResFile(), tableName, partitionSpec, isExtended, + txnManager.useNewShowLocksFormat()); + Task task = TaskFactory.get(new DDLWork(getInputs(), getOutputs(), desc)); + rootTasks.add(task); + + task.setFetchSource(true); + setFetchTask(createFetchTask(desc.getSchema())); + + // Need to initialize the lock manager + ctx.setNeedLockMgr(true); + } +} diff --git ql/src/java/org/apache/hadoop/hive/ql/ddl/table/lock/ShowLocksDesc.java ql/src/java/org/apache/hadoop/hive/ql/ddl/table/lock/show/ShowLocksDesc.java similarity index 98% rename from ql/src/java/org/apache/hadoop/hive/ql/ddl/table/lock/ShowLocksDesc.java rename to ql/src/java/org/apache/hadoop/hive/ql/ddl/table/lock/show/ShowLocksDesc.java index 8ede3503ee..898d3b8be6 100644 --- ql/src/java/org/apache/hadoop/hive/ql/ddl/table/lock/ShowLocksDesc.java +++ ql/src/java/org/apache/hadoop/hive/ql/ddl/table/lock/show/ShowLocksDesc.java @@ -16,7 +16,7 @@ * limitations under the License. */ -package org.apache.hadoop.hive.ql.ddl.table.lock; +package org.apache.hadoop.hive.ql.ddl.table.lock.show; import java.io.Serializable; import java.util.Map; diff --git ql/src/java/org/apache/hadoop/hive/ql/ddl/table/lock/ShowLocksOperation.java ql/src/java/org/apache/hadoop/hive/ql/ddl/table/lock/show/ShowLocksOperation.java similarity index 99% rename from ql/src/java/org/apache/hadoop/hive/ql/ddl/table/lock/ShowLocksOperation.java rename to ql/src/java/org/apache/hadoop/hive/ql/ddl/table/lock/show/ShowLocksOperation.java index 506b7cfb1c..0f11aade8c 100644 --- ql/src/java/org/apache/hadoop/hive/ql/ddl/table/lock/ShowLocksOperation.java +++ ql/src/java/org/apache/hadoop/hive/ql/ddl/table/lock/show/ShowLocksOperation.java @@ -16,7 +16,7 @@ * limitations under the License. */ -package org.apache.hadoop.hive.ql.ddl.table.lock; +package org.apache.hadoop.hive.ql.ddl.table.lock.show; import org.apache.hadoop.hive.ql.ddl.DDLOperationContext; import org.apache.hadoop.hive.ql.ddl.DDLUtils; diff --git ql/src/java/org/apache/hadoop/hive/ql/ddl/table/lock/show/package-info.java ql/src/java/org/apache/hadoop/hive/ql/ddl/table/lock/show/package-info.java new file mode 100644 index 0000000000..b59ed099bb --- /dev/null +++ ql/src/java/org/apache/hadoop/hive/ql/ddl/table/lock/show/package-info.java @@ -0,0 +1,20 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/** Show locks DDL operation. */ +package org.apache.hadoop.hive.ql.ddl.table.lock.show; diff --git ql/src/java/org/apache/hadoop/hive/ql/ddl/view/materialized/show/ShowMaterializedViewsAnalyzer.java ql/src/java/org/apache/hadoop/hive/ql/ddl/view/materialized/show/ShowMaterializedViewsAnalyzer.java new file mode 100644 index 0000000000..4d3536e4d1 --- /dev/null +++ ql/src/java/org/apache/hadoop/hive/ql/ddl/view/materialized/show/ShowMaterializedViewsAnalyzer.java @@ -0,0 +1,79 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hive.ql.ddl.view.materialized.show; + +import org.apache.hadoop.hive.ql.ErrorMsg; +import org.apache.hadoop.hive.ql.QueryState; +import org.apache.hadoop.hive.ql.ddl.DDLSemanticAnalyzerFactory.DDLType; +import org.apache.hadoop.hive.ql.ddl.DDLWork; +import org.apache.hadoop.hive.ql.ddl.table.info.TableInfoUtils; +import org.apache.hadoop.hive.ql.exec.Task; +import org.apache.hadoop.hive.ql.exec.TaskFactory; +import org.apache.hadoop.hive.ql.parse.ASTNode; +import org.apache.hadoop.hive.ql.parse.BaseSemanticAnalyzer; +import org.apache.hadoop.hive.ql.parse.HiveParser; +import org.apache.hadoop.hive.ql.parse.SemanticException; +import org.apache.hadoop.hive.ql.session.SessionState; + +/** + * Analyzer for show materialized views commands. + */ +@DDLType(type=HiveParser.TOK_SHOWMATERIALIZEDVIEWS) +public class ShowMaterializedViewsAnalyzer extends BaseSemanticAnalyzer { + public ShowMaterializedViewsAnalyzer(QueryState queryState) throws SemanticException { + super(queryState); + } + + @Override + public void analyzeInternal(ASTNode root) throws SemanticException { + if (root.getChildCount() > 3) { + throw new SemanticException(ErrorMsg.GENERIC_ERROR.getMsg()); + } + + ctx.setResFile(ctx.getLocalTmpPath()); + + String dbName = SessionState.get().getCurrentDatabase(); + String viewNames = null; + switch (root.getChildCount()) { + case 1: // Uses a pattern + viewNames = unescapeSQLString(root.getChild(0).getText()); + break; + case 2: // Specifies a DB + assert (root.getChild(0).getType() == HiveParser.TOK_FROM); + dbName = unescapeIdentifier(root.getChild(1).getText()); + TableInfoUtils.validateDatabase(db, dbName); + break; + case 3: // Uses a pattern and specifies a DB + assert (root.getChild(0).getType() == HiveParser.TOK_FROM); + dbName = unescapeIdentifier(root.getChild(1).getText()); + viewNames = unescapeSQLString(root.getChild(2).getText()); + TableInfoUtils.validateDatabase(db, dbName); + break; + default: // No pattern or DB + break; + } + + ShowMaterializedViewsDesc desc = new ShowMaterializedViewsDesc(ctx.getResFile(), dbName, viewNames); + Task task = TaskFactory.get(new DDLWork(getInputs(), getOutputs(), desc)); + rootTasks.add(task); + + task.setFetchSource(true); + setFetchTask(createFetchTask(ShowMaterializedViewsDesc.SCHEMA)); + } +} diff --git ql/src/java/org/apache/hadoop/hive/ql/ddl/view/materialized/show/ShowMaterializedViewsDesc.java ql/src/java/org/apache/hadoop/hive/ql/ddl/view/materialized/show/ShowMaterializedViewsDesc.java new file mode 100644 index 0000000000..0bb856a3cd --- /dev/null +++ ql/src/java/org/apache/hadoop/hive/ql/ddl/view/materialized/show/ShowMaterializedViewsDesc.java @@ -0,0 +1,61 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hive.ql.ddl.view.materialized.show; + +import java.io.Serializable; + +import org.apache.hadoop.fs.Path; +import org.apache.hadoop.hive.ql.ddl.DDLDesc; +import org.apache.hadoop.hive.ql.plan.Explain; +import org.apache.hadoop.hive.ql.plan.Explain.Level; + +/** + * DDL task description for SHOW MATERIALIZED VIEWS commands. + */ +@Explain(displayName = "Show Materialized Views", explainLevels = { Level.USER, Level.DEFAULT, Level.EXTENDED }) +public class ShowMaterializedViewsDesc implements DDLDesc, Serializable { + private static final long serialVersionUID = 1L; + + public static final String SCHEMA = "mv_name,rewrite_enabled,mode#string:string:string"; + + private final String resFile; + private final String dbName; + private final String pattern; + + public ShowMaterializedViewsDesc(Path resFile, String dbName, String pattern) { + this.resFile = resFile.toString(); + this.dbName = dbName; + this.pattern = pattern; + } + + @Explain(displayName = "pattern") + public String getPattern() { + return pattern; + } + + @Explain(displayName = "result file", explainLevels = { Level.EXTENDED }) + public String getResFile() { + return resFile; + } + + @Explain(displayName = "database name", explainLevels = { Level.USER, Level.DEFAULT, Level.EXTENDED }) + public String getDbName() { + return dbName; + } +} diff --git ql/src/java/org/apache/hadoop/hive/ql/ddl/view/materialized/show/ShowMaterializedViewsOperation.java ql/src/java/org/apache/hadoop/hive/ql/ddl/view/materialized/show/ShowMaterializedViewsOperation.java new file mode 100644 index 0000000000..792b352e4c --- /dev/null +++ ql/src/java/org/apache/hadoop/hive/ql/ddl/view/materialized/show/ShowMaterializedViewsOperation.java @@ -0,0 +1,66 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hive.ql.ddl.view.materialized.show; + +import java.io.DataOutputStream; +import java.util.ArrayList; +import java.util.Collections; +import java.util.Comparator; +import java.util.List; + +import org.apache.hadoop.fs.Path; +import org.apache.hadoop.hive.ql.ErrorMsg; +import org.apache.hadoop.hive.ql.ddl.DDLOperation; +import org.apache.hadoop.hive.ql.ddl.DDLOperationContext; +import org.apache.hadoop.hive.ql.ddl.DDLUtils; +import org.apache.hadoop.hive.ql.metadata.HiveException; +import org.apache.hadoop.hive.ql.metadata.Table; + +/** + * Operation process showing the materialized views. + */ +public class ShowMaterializedViewsOperation extends DDLOperation { + public ShowMaterializedViewsOperation(DDLOperationContext context, ShowMaterializedViewsDesc desc) { + super(context, desc); + } + + @Override + public int execute() throws HiveException { + String dbName = desc.getDbName(); + String pattern = desc.getPattern(); // if null, all tables/views are returned + String resultsFile = desc.getResFile(); + + if (!context.getDb().databaseExists(dbName)) { + throw new HiveException(ErrorMsg.DATABASE_NOT_EXISTS, dbName); + } + + // the returned list is not sortable as it is immutable, thus it must be put into a new ArrayList + List
tableObjects = new ArrayList<>(context.getDb().getMaterializedViewObjectsByPattern(dbName, pattern)); + LOG.debug("Found {} materialized view(s) matching the SHOW MATERIALIZED VIEWS statement.", tableObjects.size()); + + try (DataOutputStream os = DDLUtils.getOutputStream(new Path(resultsFile), context)) { + Collections.sort(tableObjects, Comparator.comparing(Table::getTableName)); + context.getFormatter().showMaterializedViews(os, tableObjects); + } catch (Exception e) { + throw new HiveException(e, ErrorMsg.GENERIC_ERROR, "in database" + dbName); + } + + return 0; + } +} diff --git ql/src/java/org/apache/hadoop/hive/ql/ddl/view/materialized/show/package-info.java ql/src/java/org/apache/hadoop/hive/ql/ddl/view/materialized/show/package-info.java new file mode 100644 index 0000000000..d12c032b7f --- /dev/null +++ ql/src/java/org/apache/hadoop/hive/ql/ddl/view/materialized/show/package-info.java @@ -0,0 +1,20 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/** Show materialized views DDL operation. */ +package org.apache.hadoop.hive.ql.ddl.view.materialized.show; diff --git ql/src/java/org/apache/hadoop/hive/ql/ddl/view/show/ShowViewsAnalyzer.java ql/src/java/org/apache/hadoop/hive/ql/ddl/view/show/ShowViewsAnalyzer.java new file mode 100644 index 0000000000..89ac045082 --- /dev/null +++ ql/src/java/org/apache/hadoop/hive/ql/ddl/view/show/ShowViewsAnalyzer.java @@ -0,0 +1,79 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hive.ql.ddl.view.show; + +import org.apache.hadoop.hive.ql.ErrorMsg; +import org.apache.hadoop.hive.ql.QueryState; +import org.apache.hadoop.hive.ql.ddl.DDLWork; +import org.apache.hadoop.hive.ql.ddl.DDLSemanticAnalyzerFactory.DDLType; +import org.apache.hadoop.hive.ql.ddl.table.info.TableInfoUtils; +import org.apache.hadoop.hive.ql.exec.Task; +import org.apache.hadoop.hive.ql.exec.TaskFactory; +import org.apache.hadoop.hive.ql.parse.ASTNode; +import org.apache.hadoop.hive.ql.parse.BaseSemanticAnalyzer; +import org.apache.hadoop.hive.ql.parse.HiveParser; +import org.apache.hadoop.hive.ql.parse.SemanticException; +import org.apache.hadoop.hive.ql.session.SessionState; + +/** + * Analyzer for show views commands. + */ +@DDLType(type=HiveParser.TOK_SHOWVIEWS) +public class ShowViewsAnalyzer extends BaseSemanticAnalyzer { + public ShowViewsAnalyzer(QueryState queryState) throws SemanticException { + super(queryState); + } + + @Override + public void analyzeInternal(ASTNode root) throws SemanticException { + if (root.getChildCount() > 3) { + throw new SemanticException(ErrorMsg.GENERIC_ERROR.getMsg()); + } + + ctx.setResFile(ctx.getLocalTmpPath()); + + String dbName = SessionState.get().getCurrentDatabase(); + String viewNames = null; + switch (root.getChildCount()) { + case 1: // Uses a pattern + viewNames = unescapeSQLString(root.getChild(0).getText()); + break; + case 2: // Specifies a DB + assert (root.getChild(0).getType() == HiveParser.TOK_FROM); + dbName = unescapeIdentifier(root.getChild(1).getText()); + TableInfoUtils.validateDatabase(db, dbName); + break; + case 3: // Uses a pattern and specifies a DB + assert (root.getChild(0).getType() == HiveParser.TOK_FROM); + dbName = unescapeIdentifier(root.getChild(1).getText()); + viewNames = unescapeSQLString(root.getChild(2).getText()); + TableInfoUtils.validateDatabase(db, dbName); + break; + default: // No pattern or DB + break; + } + + ShowViewsDesc desc = new ShowViewsDesc(ctx.getResFile(), dbName, viewNames); + Task task = TaskFactory.get(new DDLWork(getInputs(), getOutputs(), desc)); + rootTasks.add(task); + + task.setFetchSource(true); + setFetchTask(createFetchTask(ShowViewsDesc.SCHEMA)); + } +} diff --git ql/src/java/org/apache/hadoop/hive/ql/ddl/view/show/ShowViewsDesc.java ql/src/java/org/apache/hadoop/hive/ql/ddl/view/show/ShowViewsDesc.java new file mode 100644 index 0000000000..b3d683e264 --- /dev/null +++ ql/src/java/org/apache/hadoop/hive/ql/ddl/view/show/ShowViewsDesc.java @@ -0,0 +1,61 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hive.ql.ddl.view.show; + +import java.io.Serializable; + +import org.apache.hadoop.fs.Path; +import org.apache.hadoop.hive.ql.ddl.DDLDesc; +import org.apache.hadoop.hive.ql.plan.Explain; +import org.apache.hadoop.hive.ql.plan.Explain.Level; + +/** + * DDL task description for SHOW VIEWS commands. + */ +@Explain(displayName = "Show Views", explainLevels = { Level.USER, Level.DEFAULT, Level.EXTENDED }) +public class ShowViewsDesc implements DDLDesc, Serializable { + private static final long serialVersionUID = 1L; + + public static final String SCHEMA = "tab_name#string"; + + private final String resFile; + private final String dbName; + private final String pattern; + + public ShowViewsDesc(Path resFile, String dbName, String pattern) { + this.resFile = resFile.toString(); + this.dbName = dbName; + this.pattern = pattern; + } + + @Explain(displayName = "pattern") + public String getPattern() { + return pattern; + } + + @Explain(displayName = "result file", explainLevels = { Level.EXTENDED }) + public String getResFile() { + return resFile; + } + + @Explain(displayName = "database name", explainLevels = { Level.USER, Level.DEFAULT, Level.EXTENDED }) + public String getDbName() { + return dbName; + } +} diff --git ql/src/java/org/apache/hadoop/hive/ql/ddl/view/show/ShowViewsOperation.java ql/src/java/org/apache/hadoop/hive/ql/ddl/view/show/ShowViewsOperation.java new file mode 100644 index 0000000000..7962551cc9 --- /dev/null +++ ql/src/java/org/apache/hadoop/hive/ql/ddl/view/show/ShowViewsOperation.java @@ -0,0 +1,64 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hive.ql.ddl.view.show; + +import java.io.DataOutputStream; +import java.util.List; +import java.util.SortedSet; +import java.util.TreeSet; + +import org.apache.hadoop.fs.Path; +import org.apache.hadoop.hive.metastore.TableType; +import org.apache.hadoop.hive.ql.ErrorMsg; +import org.apache.hadoop.hive.ql.ddl.DDLOperation; +import org.apache.hadoop.hive.ql.ddl.DDLOperationContext; +import org.apache.hadoop.hive.ql.ddl.DDLUtils; +import org.apache.hadoop.hive.ql.metadata.HiveException; + +/** + * Operation process showing the views. + */ +public class ShowViewsOperation extends DDLOperation { + public ShowViewsOperation(DDLOperationContext context, ShowViewsDesc desc) { + super(context, desc); + } + + @Override + public int execute() throws HiveException { + String dbName = desc.getDbName(); + String pattern = desc.getPattern(); // if null, all tables/views are returned + String resultsFile = desc.getResFile(); + + if (!context.getDb().databaseExists(dbName)) { + throw new HiveException(ErrorMsg.DATABASE_NOT_EXISTS, dbName); + } + + List tableNames = context.getDb().getTablesByType(dbName, pattern, TableType.VIRTUAL_VIEW); + LOG.debug("Found {} view(s) matching the SHOW VIEWS statement.", tableNames.size()); + + try (DataOutputStream os = DDLUtils.getOutputStream(new Path(resultsFile), context)) { + SortedSet sortedSet = new TreeSet(tableNames); + context.getFormatter().showTables(os, sortedSet); + } catch (Exception e) { + throw new HiveException(e, ErrorMsg.GENERIC_ERROR, "in database" + dbName); + } + + return 0; + } +} diff --git ql/src/java/org/apache/hadoop/hive/ql/ddl/view/show/package-info.java ql/src/java/org/apache/hadoop/hive/ql/ddl/view/show/package-info.java new file mode 100644 index 0000000000..379b9d4e0e --- /dev/null +++ ql/src/java/org/apache/hadoop/hive/ql/ddl/view/show/package-info.java @@ -0,0 +1,20 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/** Show views DDL operation. */ +package org.apache.hadoop.hive.ql.ddl.view.show; diff --git ql/src/java/org/apache/hadoop/hive/ql/lockmgr/DbLockManager.java ql/src/java/org/apache/hadoop/hive/ql/lockmgr/DbLockManager.java index a8b9653411..4b6bc3e1e3 100644 --- ql/src/java/org/apache/hadoop/hive/ql/lockmgr/DbLockManager.java +++ ql/src/java/org/apache/hadoop/hive/ql/lockmgr/DbLockManager.java @@ -26,8 +26,8 @@ import org.apache.hadoop.hive.common.metrics.common.MetricsConstant; import org.apache.hadoop.hive.common.metrics.common.MetricsFactory; import org.apache.hadoop.hive.metastore.api.*; -import org.apache.hadoop.hive.ql.ddl.table.lock.ShowLocksOperation; import org.apache.hadoop.hive.ql.ErrorMsg; +import org.apache.hadoop.hive.ql.ddl.table.lock.show.ShowLocksOperation; import org.apache.hadoop.hive.ql.DriverState; import org.apache.thrift.TException; diff --git ql/src/java/org/apache/hadoop/hive/ql/metadata/formatting/TextMetaDataFormatter.java ql/src/java/org/apache/hadoop/hive/ql/metadata/formatting/TextMetaDataFormatter.java index bb17a14c52..d25dbae77e 100644 --- ql/src/java/org/apache/hadoop/hive/ql/metadata/formatting/TextMetaDataFormatter.java +++ ql/src/java/org/apache/hadoop/hive/ql/metadata/formatting/TextMetaDataFormatter.java @@ -46,7 +46,7 @@ import org.apache.hadoop.hive.metastore.api.WMFullResourcePlan; import org.apache.hadoop.hive.metastore.api.WMResourcePlan; import org.apache.hadoop.hive.metastore.api.WMValidateResourcePlanResponse; -import org.apache.hadoop.hive.ql.ddl.table.info.DescTableDesc; +import org.apache.hadoop.hive.ql.ddl.table.info.desc.DescTableDesc; import org.apache.hadoop.hive.ql.exec.Utilities; import org.apache.hadoop.hive.ql.metadata.CheckConstraint; import org.apache.hadoop.hive.ql.metadata.DefaultConstraint; diff --git ql/src/java/org/apache/hadoop/hive/ql/parse/DDLSemanticAnalyzer.java ql/src/java/org/apache/hadoop/hive/ql/parse/DDLSemanticAnalyzer.java index 274da31317..3aaffbfe94 100644 --- ql/src/java/org/apache/hadoop/hive/ql/parse/DDLSemanticAnalyzer.java +++ ql/src/java/org/apache/hadoop/hive/ql/parse/DDLSemanticAnalyzer.java @@ -55,13 +55,6 @@ import org.apache.hadoop.hive.ql.ddl.privilege.PrincipalDesc; import org.apache.hadoop.hive.ql.ddl.table.AbstractAlterTableDesc; import org.apache.hadoop.hive.ql.ddl.table.AlterTableType; -import org.apache.hadoop.hive.ql.ddl.table.info.DescTableDesc; -import org.apache.hadoop.hive.ql.ddl.table.info.ShowTablePropertiesDesc; -import org.apache.hadoop.hive.ql.ddl.table.info.ShowTableStatusDesc; -import org.apache.hadoop.hive.ql.ddl.table.info.ShowTablesDesc; -import org.apache.hadoop.hive.ql.ddl.table.lock.LockTableDesc; -import org.apache.hadoop.hive.ql.ddl.table.lock.ShowLocksDesc; -import org.apache.hadoop.hive.ql.ddl.table.lock.UnlockTableDesc; import org.apache.hadoop.hive.ql.ddl.table.misc.AlterTableRenameDesc; import org.apache.hadoop.hive.ql.ddl.table.misc.AlterTableSetOwnerDesc; import org.apache.hadoop.hive.ql.ddl.table.misc.AlterTableSetPropertiesDesc; @@ -96,13 +89,9 @@ import org.apache.hadoop.hive.ql.io.RCFileInputFormat; import org.apache.hadoop.hive.ql.io.orc.OrcInputFormat; import org.apache.hadoop.hive.ql.lib.Node; -import org.apache.hadoop.hive.ql.lockmgr.HiveTxnManager; -import org.apache.hadoop.hive.ql.lockmgr.LockException; -import org.apache.hadoop.hive.ql.lockmgr.TxnManagerFactory; import org.apache.hadoop.hive.ql.metadata.DefaultConstraint; import org.apache.hadoop.hive.ql.metadata.Hive; import org.apache.hadoop.hive.ql.metadata.HiveException; -import org.apache.hadoop.hive.ql.metadata.InvalidTableException; import org.apache.hadoop.hive.ql.metadata.NotNullConstraint; import org.apache.hadoop.hive.ql.metadata.Partition; import org.apache.hadoop.hive.ql.metadata.Table; @@ -125,15 +114,12 @@ import org.apache.hadoop.hive.serde2.typeinfo.TypeInfoFactory; import org.apache.hadoop.hive.serde2.typeinfo.VarcharTypeInfo; import org.apache.hadoop.mapred.InputFormat; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; /** * DDLSemanticAnalyzer. * */ public class DDLSemanticAnalyzer extends BaseSemanticAnalyzer { - private static final Logger LOG = LoggerFactory.getLogger(DDLSemanticAnalyzer.class); private static final Map TokenToTypeName = new HashMap(); // Equivalent to acidSinks, but for DDL operations that change data. @@ -277,38 +263,6 @@ public void analyzeInternal(ASTNode input) throws SemanticException { case HiveParser.TOK_TRUNCATETABLE: analyzeTruncateTable(ast); break; - case HiveParser.TOK_DESCTABLE: - ctx.setResFile(ctx.getLocalTmpPath()); - analyzeDescribeTable(ast); - break; - case HiveParser.TOK_SHOWTABLES: - ctx.setResFile(ctx.getLocalTmpPath()); - analyzeShowTables(ast); - break; - case HiveParser.TOK_SHOW_TABLESTATUS: - ctx.setResFile(ctx.getLocalTmpPath()); - analyzeShowTableStatus(ast); - break; - case HiveParser.TOK_SHOW_TBLPROPERTIES: - ctx.setResFile(ctx.getLocalTmpPath()); - analyzeShowTableProperties(ast); - break; - case HiveParser.TOK_SHOWLOCKS: - ctx.setResFile(ctx.getLocalTmpPath()); - analyzeShowLocks(ast); - break; - case HiveParser.TOK_SHOWDBLOCKS: - ctx.setResFile(ctx.getLocalTmpPath()); - analyzeShowDbLocks(ast); - break; - case HiveParser.TOK_SHOWVIEWS: - ctx.setResFile(ctx.getLocalTmpPath()); - analyzeShowViews(ast); - break; - case HiveParser.TOK_SHOWMATERIALIZEDVIEWS: - ctx.setResFile(ctx.getLocalTmpPath()); - analyzeShowMaterializedViews(ast); - break; case HiveParser.TOK_ALTERVIEW: { final TableName tName = getQualifiedTableName((ASTNode) ast.getChild(0)); ast = (ASTNode) ast.getChild(1); @@ -321,12 +275,6 @@ public void analyzeInternal(ASTNode input) throws SemanticException { } break; } - case HiveParser.TOK_LOCKTABLE: - analyzeLockTable(ast); - break; - case HiveParser.TOK_UNLOCKTABLE: - analyzeUnlockTable(ast); - break; default: throw new SemanticException("Unsupported command: " + ast); } @@ -1119,9 +1067,6 @@ private void analyzeAlterTableCompact(ASTNode ast, TableName tableName, */ static class QualifiedNameUtil { - // delimiter to check DOT delimited qualified names - static final String delimiter = "\\."; - /** * Get the fully qualified name in the ast. e.g. the ast of the form ^(DOT * ^(DOT a b) c) will generate a name of the form a.b.c @@ -1144,107 +1089,6 @@ static public String getFullyQualifiedName(ASTNode ast) { return null; } } - - // get the column path - // return column name if exists, column could be DOT separated. - // example: lintString.$elem$.myint - // return table name for column name if no column has been specified. - static public String getColPath(Hive db, ASTNode node, TableName tableName, Map partSpec) - throws SemanticException { - - // if this ast has only one child, then no column name specified. - if (node.getChildCount() == 1) { - return null; - } - - ASTNode columnNode = null; - // Second child node could be partitionspec or column - if (node.getChildCount() > 1) { - if (partSpec == null) { - columnNode = (ASTNode) node.getChild(1); - } else { - columnNode = (ASTNode) node.getChild(2); - } - } - - if (columnNode != null) { - return String.join(".", tableName.getNotEmptyDbTable(), QualifiedNameUtil.getFullyQualifiedName(columnNode)); - } else { - return null; - } - } - - // get partition metadata - static Map getPartitionSpec(Hive db, ASTNode ast, TableName tableName) - throws SemanticException { - ASTNode partNode = null; - // if this ast has only one child, then no partition spec specified. - if (ast.getChildCount() == 1) { - return null; - } - - // if ast has two children - // the 2nd child could be partition spec or columnName - // if the ast has 3 children, the second *has to* be partition spec - if (ast.getChildCount() > 2 && (((ASTNode) ast.getChild(1)).getType() != HiveParser.TOK_PARTSPEC)) { - throw new SemanticException(((ASTNode) ast.getChild(1)).getType() + " is not a partition specification"); - } - - if (((ASTNode) ast.getChild(1)).getType() == HiveParser.TOK_PARTSPEC) { - partNode = (ASTNode) ast.getChild(1); - } - - if (partNode != null) { - Table tab = null; - try { - tab = db.getTable(tableName.getNotEmptyDbTable()); - } - catch (InvalidTableException e) { - throw new SemanticException(ErrorMsg.INVALID_TABLE.getMsg(tableName.getNotEmptyDbTable()), e); - } - catch (HiveException e) { - throw new SemanticException(e.getMessage(), e); - } - - Map partSpec = null; - try { - partSpec = getValidatedPartSpec(tab, partNode, db.getConf(), false); - } catch (SemanticException e) { - // get exception in resolving partition - // it could be DESCRIBE table key - // return null - // continue processing for DESCRIBE table key - return null; - } - - if (partSpec != null) { - Partition part = null; - try { - part = db.getPartition(tab, partSpec, false); - } catch (HiveException e) { - // if get exception in finding partition - // it could be DESCRIBE table key - // return null - // continue processing for DESCRIBE table key - return null; - } - - // if partition is not found - // it is DESCRIBE table partition - // invalid partition exception - if (part == null) { - throw new SemanticException(ErrorMsg.INVALID_PARTITION.getMsg(partSpec.toString())); - } - - // it is DESCRIBE table partition - // return partition metadata - return partSpec; - } - } - - return null; - } - } private void validateDatabase(String databaseName) throws SemanticException { @@ -1257,384 +1101,6 @@ private void validateDatabase(String databaseName) throws SemanticException { } } - private void validateTable(TableName tableName, Map partSpec) - throws SemanticException { - Table tab = getTable(tableName); - if (partSpec != null) { - PartitionUtils.getPartition(db, tab, partSpec, true); - } - } - - /** - * A query like this will generate a tree as follows - * "describe formatted default.maptable partition (b=100) id;" - * TOK_TABTYPE - * TOK_TABNAME --> root for tablename, 2 child nodes mean DB specified - * default - * maptable - * TOK_PARTSPEC --> root node for partition spec. else columnName - * TOK_PARTVAL - * b - * 100 - * id --> root node for columnName - * formatted - */ - private void analyzeDescribeTable(ASTNode ast) throws SemanticException { - ASTNode tableTypeExpr = (ASTNode) ast.getChild(0); - - final TableName tableName; - String colPath = null; - Map partSpec = null; - - ASTNode tableNode = null; - - // process the first node to extract tablename - // tablename is either TABLENAME or DBNAME.TABLENAME if db is given - if (((ASTNode) tableTypeExpr.getChild(0)).getType() == HiveParser.TOK_TABNAME) { - tableNode = (ASTNode) tableTypeExpr.getChild(0); - if (tableNode.getChildCount() == 1) { - tableName = HiveTableName.of(((ASTNode) tableNode.getChild(0)).getText()); - } else { - tableName = TableName.fromString(((ASTNode) tableNode.getChild(1)).getText(), - SessionState.get().getCurrentCatalog(), ((ASTNode) tableNode.getChild(0)).getText()); - } - } else { - throw new SemanticException(((ASTNode) tableTypeExpr.getChild(0)).getText() + " is not an expected token type"); - } - - // process the second child,if exists, node to get partition spec(s) - partSpec = QualifiedNameUtil.getPartitionSpec(db, tableTypeExpr, tableName); - - // process the third child node,if exists, to get partition spec(s) - colPath = QualifiedNameUtil.getColPath(db, tableTypeExpr, tableName, partSpec); - - // if database is not the one currently using - // validate database - if (tableName.getDb() != null) { - validateDatabase(tableName.getDb()); - } - if (partSpec != null) { - validateTable(tableName, partSpec); - } - - boolean showColStats = false; - boolean isFormatted = false; - boolean isExt = false; - if (ast.getChildCount() == 2) { - int descOptions = ast.getChild(1).getType(); - isFormatted = descOptions == HiveParser.KW_FORMATTED; - isExt = descOptions == HiveParser.KW_EXTENDED; - // in case of "DESCRIBE FORMATTED tablename column_name" statement, colPath - // will contain tablename.column_name. If column_name is not specified - // colPath will be equal to tableName. This is how we can differentiate - // if we are describing a table or column - if (colPath != null && isFormatted) { - showColStats = true; - } - } - - inputs.add(new ReadEntity(getTable(tableName))); - - DescTableDesc descTblDesc = new DescTableDesc(ctx.getResFile(), tableName, partSpec, colPath, isExt, isFormatted); - Task ddlTask = TaskFactory.get(new DDLWork(getInputs(), getOutputs(), descTblDesc)); - rootTasks.add(ddlTask); - String schema = showColStats ? DescTableDesc.COLUMN_STATISTICS_SCHEMA : DescTableDesc.SCHEMA; - setFetchTask(createFetchTask(schema)); - LOG.info("analyzeDescribeTable done"); - } - - private void analyzeShowTables(ASTNode ast) throws SemanticException { - ShowTablesDesc showTblsDesc; - String dbName = SessionState.get().getCurrentDatabase(); - String tableNames = null; - TableType tableTypeFilter = null; - boolean isExtended = false; - - if (ast.getChildCount() > 4) { - throw new SemanticException(ErrorMsg.INVALID_AST_TREE.getMsg(ast.toStringTree())); - } - - for (int i = 0; i < ast.getChildCount(); i++) { - ASTNode child = (ASTNode) ast.getChild(i); - if (child.getType() == HiveParser.TOK_FROM) { // Specifies a DB - dbName = unescapeIdentifier(ast.getChild(++i).getText()); - validateDatabase(dbName); - } else if (child.getType() == HiveParser.TOK_TABLE_TYPE) { // Filter on table type - String tableType = unescapeIdentifier(child.getChild(0).getText()); - if (!tableType.equalsIgnoreCase("table_type")) { - throw new SemanticException("SHOW TABLES statement only allows equality filter on table_type value"); - } - tableTypeFilter = TableType.valueOf(unescapeSQLString(child.getChild(1).getText())); - } else if (child.getType() == HiveParser.KW_EXTENDED) { // Include table type - isExtended = true; - } else { // Uses a pattern - tableNames = unescapeSQLString(child.getText()); - } - } - - showTblsDesc = new ShowTablesDesc(ctx.getResFile(), dbName, tableNames, tableTypeFilter, isExtended); - inputs.add(new ReadEntity(getDatabase(dbName))); - rootTasks.add(TaskFactory.get(new DDLWork(getInputs(), getOutputs(), showTblsDesc))); - setFetchTask(createFetchTask(showTblsDesc.getSchema())); - } - - private void analyzeShowTableStatus(ASTNode ast) throws SemanticException { - ShowTableStatusDesc showTblStatusDesc; - String tableNames = getUnescapedName((ASTNode) ast.getChild(0)); - String dbName = SessionState.get().getCurrentDatabase(); - int children = ast.getChildCount(); - Map partSpec = null; - if (children >= 2) { - if (children > 3) { - throw new SemanticException(ErrorMsg.INVALID_AST_TREE.getMsg()); - } - for (int i = 1; i < children; i++) { - ASTNode child = (ASTNode) ast.getChild(i); - if (child.getToken().getType() == HiveParser.Identifier) { - dbName = unescapeIdentifier(child.getText()); - } else if (child.getToken().getType() == HiveParser.TOK_PARTSPEC) { - partSpec = getValidatedPartSpec(getTable(tableNames), child, conf, false); - } else { - throw new SemanticException(ErrorMsg.INVALID_AST_TREE.getMsg(child.toStringTree() + - " , Invalid token " + child.getToken().getType())); - } - } - } - - if (partSpec != null) { - validateTable(HiveTableName.ofNullableWithNoDefault(tableNames), partSpec); - } - - showTblStatusDesc = new ShowTableStatusDesc(ctx.getResFile().toString(), dbName, tableNames, partSpec); - rootTasks.add(TaskFactory.get(new DDLWork(getInputs(), getOutputs(), showTblStatusDesc))); - setFetchTask(createFetchTask(ShowTableStatusDesc.SCHEMA)); - } - - private void analyzeShowTableProperties(ASTNode ast) throws SemanticException { - ShowTablePropertiesDesc showTblPropertiesDesc; - TableName qualified = getQualifiedTableName((ASTNode) ast.getChild(0)); - String propertyName = null; - if (ast.getChildCount() > 1) { - propertyName = unescapeSQLString(ast.getChild(1).getText()); - } - - validateTable(qualified, null); - - showTblPropertiesDesc = new ShowTablePropertiesDesc(ctx.getResFile().toString(), qualified, propertyName); - rootTasks.add(TaskFactory.get(new DDLWork(getInputs(), getOutputs(), showTblPropertiesDesc))); - setFetchTask(createFetchTask(ShowTablePropertiesDesc.SCHEMA)); - } - - /** - * Add the task according to the parsed command tree. This is used for the CLI - * command "SHOW LOCKS;". - * - * @param ast - * The parsed command tree. - * @throws SemanticException - * Parsing failed - */ - private void analyzeShowLocks(ASTNode ast) throws SemanticException { - String tableName = null; - Map partSpec = null; - boolean isExtended = false; - - if (ast.getChildCount() >= 1) { - // table for which show locks is being executed - for (int i = 0; i < ast.getChildCount(); i++) { - ASTNode child = (ASTNode) ast.getChild(i); - if (child.getType() == HiveParser.TOK_TABTYPE) { - ASTNode tableTypeExpr = child; - tableName = - QualifiedNameUtil.getFullyQualifiedName((ASTNode) tableTypeExpr.getChild(0)); - // get partition metadata if partition specified - if (tableTypeExpr.getChildCount() == 2) { - ASTNode partSpecNode = (ASTNode) tableTypeExpr.getChild(1); - partSpec = getValidatedPartSpec(getTable(tableName), partSpecNode, conf, false); - } - } else if (child.getType() == HiveParser.KW_EXTENDED) { - isExtended = true; - } - } - } - - HiveTxnManager txnManager = null; - try { - txnManager = TxnManagerFactory.getTxnManagerFactory().getTxnManager(conf); - } catch (LockException e) { - throw new SemanticException(e.getMessage()); - } - - ShowLocksDesc showLocksDesc = new ShowLocksDesc(ctx.getResFile(), tableName, - partSpec, isExtended, txnManager.useNewShowLocksFormat()); - rootTasks.add(TaskFactory.get(new DDLWork(getInputs(), getOutputs(), showLocksDesc))); - setFetchTask(createFetchTask(showLocksDesc.getSchema())); - - // Need to initialize the lock manager - ctx.setNeedLockMgr(true); - } - - /** - * Add the task according to the parsed command tree. This is used for the CLI - * command "SHOW LOCKS DATABASE database [extended];". - * - * @param ast - * The parsed command tree. - * @throws SemanticException - * Parsing failed - */ - private void analyzeShowDbLocks(ASTNode ast) throws SemanticException { - boolean isExtended = (ast.getChildCount() > 1); - String dbName = stripQuotes(ast.getChild(0).getText()); - - HiveTxnManager txnManager = null; - try { - txnManager = TxnManagerFactory.getTxnManagerFactory().getTxnManager(conf); - } catch (LockException e) { - throw new SemanticException(e.getMessage()); - } - - ShowLocksDesc showLocksDesc = new ShowLocksDesc(ctx.getResFile(), dbName, - isExtended, txnManager.useNewShowLocksFormat()); - rootTasks.add(TaskFactory.get(new DDLWork(getInputs(), getOutputs(), showLocksDesc))); - setFetchTask(createFetchTask(showLocksDesc.getSchema())); - - // Need to initialize the lock manager - ctx.setNeedLockMgr(true); - } - - private void analyzeShowViews(ASTNode ast) throws SemanticException { - ShowTablesDesc showViewsDesc; - String dbName = SessionState.get().getCurrentDatabase(); - String viewNames = null; - - if (ast.getChildCount() > 3) { - throw new SemanticException(ErrorMsg.GENERIC_ERROR.getMsg()); - } - - switch (ast.getChildCount()) { - case 1: // Uses a pattern - viewNames = unescapeSQLString(ast.getChild(0).getText()); - showViewsDesc = new ShowTablesDesc(ctx.getResFile(), dbName, viewNames, TableType.VIRTUAL_VIEW); - break; - case 2: // Specifies a DB - assert (ast.getChild(0).getType() == HiveParser.TOK_FROM); - dbName = unescapeIdentifier(ast.getChild(1).getText()); - validateDatabase(dbName); - showViewsDesc = new ShowTablesDesc(ctx.getResFile(), dbName, TableType.VIRTUAL_VIEW); - break; - case 3: // Uses a pattern and specifies a DB - assert (ast.getChild(0).getType() == HiveParser.TOK_FROM); - dbName = unescapeIdentifier(ast.getChild(1).getText()); - viewNames = unescapeSQLString(ast.getChild(2).getText()); - validateDatabase(dbName); - showViewsDesc = new ShowTablesDesc(ctx.getResFile(), dbName, viewNames, TableType.VIRTUAL_VIEW); - break; - default: // No pattern or DB - showViewsDesc = new ShowTablesDesc(ctx.getResFile(), dbName, TableType.VIRTUAL_VIEW); - break; - } - - rootTasks.add(TaskFactory.get(new DDLWork(getInputs(), getOutputs(), showViewsDesc))); - setFetchTask(createFetchTask(showViewsDesc.getSchema())); - } - - private void analyzeShowMaterializedViews(ASTNode ast) throws SemanticException { - ShowTablesDesc showMaterializedViewsDesc; - String dbName = SessionState.get().getCurrentDatabase(); - String materializedViewNames = null; - - if (ast.getChildCount() > 3) { - throw new SemanticException(ErrorMsg.GENERIC_ERROR.getMsg()); - } - - switch (ast.getChildCount()) { - case 1: // Uses a pattern - materializedViewNames = unescapeSQLString(ast.getChild(0).getText()); - showMaterializedViewsDesc = new ShowTablesDesc( - ctx.getResFile(), dbName, materializedViewNames, TableType.MATERIALIZED_VIEW); - break; - case 2: // Specifies a DB - assert (ast.getChild(0).getType() == HiveParser.TOK_FROM); - dbName = unescapeIdentifier(ast.getChild(1).getText()); - validateDatabase(dbName); - showMaterializedViewsDesc = new ShowTablesDesc(ctx.getResFile(), dbName, TableType.MATERIALIZED_VIEW); - break; - case 3: // Uses a pattern and specifies a DB - assert (ast.getChild(0).getType() == HiveParser.TOK_FROM); - dbName = unescapeIdentifier(ast.getChild(1).getText()); - materializedViewNames = unescapeSQLString(ast.getChild(2).getText()); - validateDatabase(dbName); - showMaterializedViewsDesc = new ShowTablesDesc( - ctx.getResFile(), dbName, materializedViewNames, TableType.MATERIALIZED_VIEW); - break; - default: // No pattern or DB - showMaterializedViewsDesc = new ShowTablesDesc(ctx.getResFile(), dbName, TableType.MATERIALIZED_VIEW); - break; - } - - rootTasks.add(TaskFactory.get(new DDLWork(getInputs(), getOutputs(), showMaterializedViewsDesc))); - setFetchTask(createFetchTask(showMaterializedViewsDesc.getSchema())); - } - - /** - * Add the task according to the parsed command tree. This is used for the CLI - * command "LOCK TABLE ..;". - * - * @param ast - * The parsed command tree. - * @throws SemanticException - * Parsing failed - */ - private void analyzeLockTable(ASTNode ast) - throws SemanticException { - String tableName = getUnescapedName((ASTNode) ast.getChild(0)).toLowerCase(); - String mode = unescapeIdentifier(ast.getChild(1).getText().toUpperCase()); - List> partSpecs = getPartitionSpecs(getTable(tableName), ast); - - // We only can have a single partition spec - assert (partSpecs.size() <= 1); - Map partSpec = null; - if (partSpecs.size() > 0) { - partSpec = partSpecs.get(0); - } - - LockTableDesc lockTblDesc = new LockTableDesc(tableName, mode, partSpec, - HiveConf.getVar(conf, ConfVars.HIVEQUERYID), ctx.getCmd()); - rootTasks.add(TaskFactory.get(new DDLWork(getInputs(), getOutputs(), lockTblDesc))); - - // Need to initialize the lock manager - ctx.setNeedLockMgr(true); - } - - /** - * Add the task according to the parsed command tree. This is used for the CLI - * command "UNLOCK TABLE ..;". - * - * @param ast - * The parsed command tree. - * @throws SemanticException - * Parsing failed - */ - private void analyzeUnlockTable(ASTNode ast) - throws SemanticException { - String tableName = getUnescapedName((ASTNode) ast.getChild(0)); - List> partSpecs = getPartitionSpecs(getTable(tableName), ast); - - // We only can have a single partition spec - assert (partSpecs.size() <= 1); - Map partSpec = null; - if (partSpecs.size() > 0) { - partSpec = partSpecs.get(0); - } - - UnlockTableDesc unlockTblDesc = new UnlockTableDesc(tableName, partSpec); - rootTasks.add(TaskFactory.get(new DDLWork(getInputs(), getOutputs(), unlockTblDesc))); - - // Need to initialize the lock manager - ctx.setNeedLockMgr(true); - } - private void analyzeAlterTableRename(TableName source, ASTNode ast, boolean expectView) throws SemanticException { final TableName target = getQualifiedTableName((ASTNode) ast.getChild(0)); diff --git ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzerFactory.java ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzerFactory.java index 8d1136a42b..cf5753b4fd 100644 --- ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzerFactory.java +++ ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzerFactory.java @@ -103,18 +103,11 @@ private static BaseSemanticAnalyzer getInternal(QueryState queryState, ASTNode t queryState.setCommandType(HiveOperation.ALTERVIEW_AS); return new SemanticAnalyzer(queryState); } - case HiveParser.TOK_DESCTABLE: case HiveParser.TOK_MSCK: - case HiveParser.TOK_SHOWTABLES: - case HiveParser.TOK_SHOW_TABLESTATUS: - case HiveParser.TOK_SHOW_TBLPROPERTIES: - case HiveParser.TOK_SHOWLOCKS: case HiveParser.TOK_SHOWDBLOCKS: case HiveParser.TOK_SHOWCONF: case HiveParser.TOK_SHOWVIEWS: case HiveParser.TOK_SHOWMATERIALIZEDVIEWS: - case HiveParser.TOK_LOCKTABLE: - case HiveParser.TOK_UNLOCKTABLE: case HiveParser.TOK_TRUNCATETABLE: return new DDLSemanticAnalyzer(queryState); diff --git ql/src/test/queries/clientpositive/show_materialized_views.q ql/src/test/queries/clientpositive/show_materialized_views.q index c6ae0210b7..81f86a7a95 100644 --- ql/src/test/queries/clientpositive/show_materialized_views.q +++ ql/src/test/queries/clientpositive/show_materialized_views.q @@ -29,9 +29,12 @@ CREATE MATERIALIZED VIEW shtb_test2_view2 DISABLE REWRITE AS SELECT * FROM shtb_test2 where KEY > 100 and KEY < 200; USE test1; +EXPLAIN SHOW MATERIALIZED VIEWS; SHOW MATERIALIZED VIEWS; +EXPLAIN SHOW MATERIALIZED VIEWS '*test*'; SHOW MATERIALIZED VIEWS '*test*'; SHOW MATERIALIZED VIEWS '*view2'; +EXPLAIN SHOW MATERIALIZED VIEWS LIKE 'shtb_test1_view1|shtb_test1_view2'; SHOW MATERIALIZED VIEWS LIKE 'shtb_test1_view1|shtb_test1_view2'; USE test2; @@ -39,10 +42,13 @@ SHOW MATERIALIZED VIEWS 'shtb_*'; -- SHOW MATERIALIZED VIEWS basic syntax tests USE default; +EXPLAIN SHOW MATERIALIZED VIEWS FROM test1; SHOW MATERIALIZED VIEWS FROM test1; SHOW MATERIALIZED VIEWS FROM test2; +EXPLAIN SHOW MATERIALIZED VIEWS IN test1; SHOW MATERIALIZED VIEWS IN test1; SHOW MATERIALIZED VIEWS IN default; +EXPLAIN SHOW MATERIALIZED VIEWS IN test1 "shtb_test*"; SHOW MATERIALIZED VIEWS IN test1 "shtb_test*"; DESCRIBE FORMATTED test1.shtb_full_view2; DESCRIBE FORMATTED test1.shtb_test1_view1; diff --git ql/src/test/queries/clientpositive/show_tables.q ql/src/test/queries/clientpositive/show_tables.q index 087d40f267..8576daa689 100644 --- ql/src/test/queries/clientpositive/show_tables.q +++ ql/src/test/queries/clientpositive/show_tables.q @@ -21,13 +21,21 @@ CREATE VIEW test_view_n100 AS SELECT * FROM foo_n4; -- SHOW TABLES basic syntax tests USE default; +EXPLAIN SHOW TABLES FROM test_db; SHOW TABLES FROM test_db; +EXPLAIN SHOW EXTENDED TABLES FROM test_db; SHOW EXTENDED TABLES FROM test_db; +EXPLAIN SHOW TABLES IN test_db; SHOW TABLES IN test_db; +EXPLAIN SHOW EXTENDED TABLES IN test_db; SHOW EXTENDED TABLES IN test_db; +EXPLAIN SHOW TABLES IN test_db "test*"; SHOW TABLES IN test_db "test*"; +EXPLAIN SHOW TABLES IN test_db LIKE "nomatch"; SHOW TABLES IN test_db LIKE "nomatch"; +EXPLAIN SHOW TABLES IN test_db WHERE `table_type` = "MANAGED_TABLE"; SHOW TABLES IN test_db WHERE `table_type` = "MANAGED_TABLE"; +EXPLAIN SHOW EXTENDED TABLES IN test_db WHERE `table_type` = "VIRTUAL_VIEW"; SHOW EXTENDED TABLES IN test_db WHERE `table_type` = "VIRTUAL_VIEW"; -- SHOW TABLE EXTENDED basic syntax tests and wildcard diff --git ql/src/test/queries/clientpositive/show_views.q ql/src/test/queries/clientpositive/show_views.q index 726ad4b22e..1af89b6d9e 100644 --- ql/src/test/queries/clientpositive/show_views.q +++ ql/src/test/queries/clientpositive/show_views.q @@ -13,9 +13,12 @@ CREATE VIEW shtb_test1_view1_n0 AS SELECT * FROM shtb_test1_n1 where KEY > 1000 CREATE VIEW shtb_test2_view2_n0 AS SELECT * FROM shtb_test2_n1 where KEY > 100 and KEY < 200; USE test1; +EXPLAIN SHOW VIEWS; SHOW VIEWS; +EXPLAIN SHOW VIEWS 'test_*'; SHOW VIEWS 'test_*'; SHOW VIEWS '*view2'; +EXPLAIN SHOW VIEWS LIKE 'test_view1|test_view2'; SHOW VIEWS LIKE 'test_view1|test_view2'; USE test2; @@ -23,10 +26,13 @@ SHOW VIEWS 'shtb_*'; -- SHOW VIEWS basic syntax tests USE default; +EXPLAIN SHOW VIEWS FROM test1; SHOW VIEWS FROM test1; SHOW VIEWS FROM test2; +EXPLAIN SHOW VIEWS IN test1; SHOW VIEWS IN test1; SHOW VIEWS IN default; +EXPLAIN SHOW VIEWS IN test1 "shtb_test_*"; SHOW VIEWS IN test1 "shtb_test_*"; SHOW VIEWS IN test2 LIKE "nomatch"; diff --git ql/src/test/results/clientpositive/show_materialized_views.q.out ql/src/test/results/clientpositive/show_materialized_views.q.out index c3328511bc..57bd93b545 100644 --- ql/src/test/results/clientpositive/show_materialized_views.q.out +++ ql/src/test/results/clientpositive/show_materialized_views.q.out @@ -122,6 +122,25 @@ PREHOOK: Input: database:test1 POSTHOOK: query: USE test1 POSTHOOK: type: SWITCHDATABASE POSTHOOK: Input: database:test1 +PREHOOK: query: EXPLAIN SHOW MATERIALIZED VIEWS +PREHOOK: type: SHOWMATERIALIZEDVIEWS +POSTHOOK: query: EXPLAIN SHOW MATERIALIZED VIEWS +POSTHOOK: type: SHOWMATERIALIZEDVIEWS +STAGE DEPENDENCIES: + Stage-0 is a root stage + Stage-1 depends on stages: Stage-0 + +STAGE PLANS: + Stage: Stage-0 + Show Materialized Views + database name: test1 + + Stage: Stage-1 + Fetch Operator + limit: -1 + Processor Tree: + ListSink + PREHOOK: query: SHOW MATERIALIZED VIEWS PREHOOK: type: SHOWMATERIALIZEDVIEWS POSTHOOK: query: SHOW MATERIALIZED VIEWS @@ -131,6 +150,26 @@ shtb_full_view2 Yes Manual refresh (Valid for 5min) shtb_test1_view1 No Manual refresh shtb_test1_view2 Yes Manual refresh (Valid always) +PREHOOK: query: EXPLAIN SHOW MATERIALIZED VIEWS '*test*' +PREHOOK: type: SHOWMATERIALIZEDVIEWS +POSTHOOK: query: EXPLAIN SHOW MATERIALIZED VIEWS '*test*' +POSTHOOK: type: SHOWMATERIALIZEDVIEWS +STAGE DEPENDENCIES: + Stage-0 is a root stage + Stage-1 depends on stages: Stage-0 + +STAGE PLANS: + Stage: Stage-0 + Show Materialized Views + database name: test1 + pattern: *test* + + Stage: Stage-1 + Fetch Operator + limit: -1 + Processor Tree: + ListSink + PREHOOK: query: SHOW MATERIALIZED VIEWS '*test*' PREHOOK: type: SHOWMATERIALIZEDVIEWS POSTHOOK: query: SHOW MATERIALIZED VIEWS '*test*' @@ -147,6 +186,26 @@ POSTHOOK: type: SHOWMATERIALIZEDVIEWS shtb_full_view2 Yes Manual refresh (Valid for 5min) shtb_test1_view2 Yes Manual refresh (Valid always) +PREHOOK: query: EXPLAIN SHOW MATERIALIZED VIEWS LIKE 'shtb_test1_view1|shtb_test1_view2' +PREHOOK: type: SHOWMATERIALIZEDVIEWS +POSTHOOK: query: EXPLAIN SHOW MATERIALIZED VIEWS LIKE 'shtb_test1_view1|shtb_test1_view2' +POSTHOOK: type: SHOWMATERIALIZEDVIEWS +STAGE DEPENDENCIES: + Stage-0 is a root stage + Stage-1 depends on stages: Stage-0 + +STAGE PLANS: + Stage: Stage-0 + Show Materialized Views + database name: test1 + pattern: shtb_test1_view1|shtb_test1_view2 + + Stage: Stage-1 + Fetch Operator + limit: -1 + Processor Tree: + ListSink + PREHOOK: query: SHOW MATERIALIZED VIEWS LIKE 'shtb_test1_view1|shtb_test1_view2' PREHOOK: type: SHOWMATERIALIZEDVIEWS POSTHOOK: query: SHOW MATERIALIZED VIEWS LIKE 'shtb_test1_view1|shtb_test1_view2' @@ -175,6 +234,25 @@ PREHOOK: Input: database:default POSTHOOK: query: USE default POSTHOOK: type: SWITCHDATABASE POSTHOOK: Input: database:default +PREHOOK: query: EXPLAIN SHOW MATERIALIZED VIEWS FROM test1 +PREHOOK: type: SHOWMATERIALIZEDVIEWS +POSTHOOK: query: EXPLAIN SHOW MATERIALIZED VIEWS FROM test1 +POSTHOOK: type: SHOWMATERIALIZEDVIEWS +STAGE DEPENDENCIES: + Stage-0 is a root stage + Stage-1 depends on stages: Stage-0 + +STAGE PLANS: + Stage: Stage-0 + Show Materialized Views + database name: test1 + + Stage: Stage-1 + Fetch Operator + limit: -1 + Processor Tree: + ListSink + PREHOOK: query: SHOW MATERIALIZED VIEWS FROM test1 PREHOOK: type: SHOWMATERIALIZEDVIEWS POSTHOOK: query: SHOW MATERIALIZED VIEWS FROM test1 @@ -192,6 +270,25 @@ POSTHOOK: type: SHOWMATERIALIZEDVIEWS shtb_test1_view1 No Manual refresh shtb_test2_view2 No Manual refresh +PREHOOK: query: EXPLAIN SHOW MATERIALIZED VIEWS IN test1 +PREHOOK: type: SHOWMATERIALIZEDVIEWS +POSTHOOK: query: EXPLAIN SHOW MATERIALIZED VIEWS IN test1 +POSTHOOK: type: SHOWMATERIALIZEDVIEWS +STAGE DEPENDENCIES: + Stage-0 is a root stage + Stage-1 depends on stages: Stage-0 + +STAGE PLANS: + Stage: Stage-0 + Show Materialized Views + database name: test1 + + Stage: Stage-1 + Fetch Operator + limit: -1 + Processor Tree: + ListSink + PREHOOK: query: SHOW MATERIALIZED VIEWS IN test1 PREHOOK: type: SHOWMATERIALIZEDVIEWS POSTHOOK: query: SHOW MATERIALIZED VIEWS IN test1 @@ -205,6 +302,26 @@ PREHOOK: query: SHOW MATERIALIZED VIEWS IN default PREHOOK: type: SHOWMATERIALIZEDVIEWS POSTHOOK: query: SHOW MATERIALIZED VIEWS IN default POSTHOOK: type: SHOWMATERIALIZEDVIEWS +PREHOOK: query: EXPLAIN SHOW MATERIALIZED VIEWS IN test1 "shtb_test*" +PREHOOK: type: SHOWMATERIALIZEDVIEWS +POSTHOOK: query: EXPLAIN SHOW MATERIALIZED VIEWS IN test1 "shtb_test*" +POSTHOOK: type: SHOWMATERIALIZEDVIEWS +STAGE DEPENDENCIES: + Stage-0 is a root stage + Stage-1 depends on stages: Stage-0 + +STAGE PLANS: + Stage: Stage-0 + Show Materialized Views + database name: test1 + pattern: shtb_test* + + Stage: Stage-1 + Fetch Operator + limit: -1 + Processor Tree: + ListSink + PREHOOK: query: SHOW MATERIALIZED VIEWS IN test1 "shtb_test*" PREHOOK: type: SHOWMATERIALIZEDVIEWS POSTHOOK: query: SHOW MATERIALIZED VIEWS IN test1 "shtb_test*" diff --git ql/src/test/results/clientpositive/show_tables.q.out ql/src/test/results/clientpositive/show_tables.q.out index 82cc06f8b8..342ebd4016 100644 --- ql/src/test/results/clientpositive/show_tables.q.out +++ ql/src/test/results/clientpositive/show_tables.q.out @@ -131,6 +131,27 @@ PREHOOK: Input: database:default POSTHOOK: query: USE default POSTHOOK: type: SWITCHDATABASE POSTHOOK: Input: database:default +PREHOOK: query: EXPLAIN SHOW TABLES FROM test_db +PREHOOK: type: SHOWTABLES +PREHOOK: Input: database:test_db +POSTHOOK: query: EXPLAIN SHOW TABLES FROM test_db +POSTHOOK: type: SHOWTABLES +POSTHOOK: Input: database:test_db +STAGE DEPENDENCIES: + Stage-0 is a root stage + Stage-1 depends on stages: Stage-0 + +STAGE PLANS: + Stage: Stage-0 + Show Tables + database name: test_db + + Stage: Stage-1 + Fetch Operator + limit: -1 + Processor Tree: + ListSink + PREHOOK: query: SHOW TABLES FROM test_db PREHOOK: type: SHOWTABLES PREHOOK: Input: database:test_db @@ -141,6 +162,28 @@ bar_n0 baz foo_n4 test_view_n100 +PREHOOK: query: EXPLAIN SHOW EXTENDED TABLES FROM test_db +PREHOOK: type: SHOWTABLES +PREHOOK: Input: database:test_db +POSTHOOK: query: EXPLAIN SHOW EXTENDED TABLES FROM test_db +POSTHOOK: type: SHOWTABLES +POSTHOOK: Input: database:test_db +STAGE DEPENDENCIES: + Stage-0 is a root stage + Stage-1 depends on stages: Stage-0 + +STAGE PLANS: + Stage: Stage-0 + Show Tables + database name: test_db + extended: true + + Stage: Stage-1 + Fetch Operator + limit: -1 + Processor Tree: + ListSink + PREHOOK: query: SHOW EXTENDED TABLES FROM test_db PREHOOK: type: SHOWTABLES PREHOOK: Input: database:test_db @@ -153,6 +196,27 @@ baz MANAGED_TABLE foo_n4 MANAGED_TABLE test_view_n100 VIRTUAL_VIEW +PREHOOK: query: EXPLAIN SHOW TABLES IN test_db +PREHOOK: type: SHOWTABLES +PREHOOK: Input: database:test_db +POSTHOOK: query: EXPLAIN SHOW TABLES IN test_db +POSTHOOK: type: SHOWTABLES +POSTHOOK: Input: database:test_db +STAGE DEPENDENCIES: + Stage-0 is a root stage + Stage-1 depends on stages: Stage-0 + +STAGE PLANS: + Stage: Stage-0 + Show Tables + database name: test_db + + Stage: Stage-1 + Fetch Operator + limit: -1 + Processor Tree: + ListSink + PREHOOK: query: SHOW TABLES IN test_db PREHOOK: type: SHOWTABLES PREHOOK: Input: database:test_db @@ -163,6 +227,28 @@ bar_n0 baz foo_n4 test_view_n100 +PREHOOK: query: EXPLAIN SHOW EXTENDED TABLES IN test_db +PREHOOK: type: SHOWTABLES +PREHOOK: Input: database:test_db +POSTHOOK: query: EXPLAIN SHOW EXTENDED TABLES IN test_db +POSTHOOK: type: SHOWTABLES +POSTHOOK: Input: database:test_db +STAGE DEPENDENCIES: + Stage-0 is a root stage + Stage-1 depends on stages: Stage-0 + +STAGE PLANS: + Stage: Stage-0 + Show Tables + database name: test_db + extended: true + + Stage: Stage-1 + Fetch Operator + limit: -1 + Processor Tree: + ListSink + PREHOOK: query: SHOW EXTENDED TABLES IN test_db PREHOOK: type: SHOWTABLES PREHOOK: Input: database:test_db @@ -175,6 +261,28 @@ baz MANAGED_TABLE foo_n4 MANAGED_TABLE test_view_n100 VIRTUAL_VIEW +PREHOOK: query: EXPLAIN SHOW TABLES IN test_db "test*" +PREHOOK: type: SHOWTABLES +PREHOOK: Input: database:test_db +POSTHOOK: query: EXPLAIN SHOW TABLES IN test_db "test*" +POSTHOOK: type: SHOWTABLES +POSTHOOK: Input: database:test_db +STAGE DEPENDENCIES: + Stage-0 is a root stage + Stage-1 depends on stages: Stage-0 + +STAGE PLANS: + Stage: Stage-0 + Show Tables + database name: test_db + pattern: test* + + Stage: Stage-1 + Fetch Operator + limit: -1 + Processor Tree: + ListSink + PREHOOK: query: SHOW TABLES IN test_db "test*" PREHOOK: type: SHOWTABLES PREHOOK: Input: database:test_db @@ -182,12 +290,56 @@ POSTHOOK: query: SHOW TABLES IN test_db "test*" POSTHOOK: type: SHOWTABLES POSTHOOK: Input: database:test_db test_view_n100 +PREHOOK: query: EXPLAIN SHOW TABLES IN test_db LIKE "nomatch" +PREHOOK: type: SHOWTABLES +PREHOOK: Input: database:test_db +POSTHOOK: query: EXPLAIN SHOW TABLES IN test_db LIKE "nomatch" +POSTHOOK: type: SHOWTABLES +POSTHOOK: Input: database:test_db +STAGE DEPENDENCIES: + Stage-0 is a root stage + Stage-1 depends on stages: Stage-0 + +STAGE PLANS: + Stage: Stage-0 + Show Tables + database name: test_db + pattern: nomatch + + Stage: Stage-1 + Fetch Operator + limit: -1 + Processor Tree: + ListSink + PREHOOK: query: SHOW TABLES IN test_db LIKE "nomatch" PREHOOK: type: SHOWTABLES PREHOOK: Input: database:test_db POSTHOOK: query: SHOW TABLES IN test_db LIKE "nomatch" POSTHOOK: type: SHOWTABLES POSTHOOK: Input: database:test_db +PREHOOK: query: EXPLAIN SHOW TABLES IN test_db WHERE `table_type` = "MANAGED_TABLE" +PREHOOK: type: SHOWTABLES +PREHOOK: Input: database:test_db +POSTHOOK: query: EXPLAIN SHOW TABLES IN test_db WHERE `table_type` = "MANAGED_TABLE" +POSTHOOK: type: SHOWTABLES +POSTHOOK: Input: database:test_db +STAGE DEPENDENCIES: + Stage-0 is a root stage + Stage-1 depends on stages: Stage-0 + +STAGE PLANS: + Stage: Stage-0 + Show Tables + database name: test_db + table type filter: MANAGED_TABLE + + Stage: Stage-1 + Fetch Operator + limit: -1 + Processor Tree: + ListSink + PREHOOK: query: SHOW TABLES IN test_db WHERE `table_type` = "MANAGED_TABLE" PREHOOK: type: SHOWTABLES PREHOOK: Input: database:test_db @@ -197,6 +349,29 @@ POSTHOOK: Input: database:test_db bar_n0 baz foo_n4 +PREHOOK: query: EXPLAIN SHOW EXTENDED TABLES IN test_db WHERE `table_type` = "VIRTUAL_VIEW" +PREHOOK: type: SHOWTABLES +PREHOOK: Input: database:test_db +POSTHOOK: query: EXPLAIN SHOW EXTENDED TABLES IN test_db WHERE `table_type` = "VIRTUAL_VIEW" +POSTHOOK: type: SHOWTABLES +POSTHOOK: Input: database:test_db +STAGE DEPENDENCIES: + Stage-0 is a root stage + Stage-1 depends on stages: Stage-0 + +STAGE PLANS: + Stage: Stage-0 + Show Tables + database name: test_db + table type filter: VIRTUAL_VIEW + extended: true + + Stage: Stage-1 + Fetch Operator + limit: -1 + Processor Tree: + ListSink + PREHOOK: query: SHOW EXTENDED TABLES IN test_db WHERE `table_type` = "VIRTUAL_VIEW" PREHOOK: type: SHOWTABLES PREHOOK: Input: database:test_db diff --git ql/src/test/results/clientpositive/show_views.q.out ql/src/test/results/clientpositive/show_views.q.out index 311f700cb1..b5d2027420 100644 --- ql/src/test/results/clientpositive/show_views.q.out +++ ql/src/test/results/clientpositive/show_views.q.out @@ -117,6 +117,25 @@ PREHOOK: Input: database:test1 POSTHOOK: query: USE test1 POSTHOOK: type: SWITCHDATABASE POSTHOOK: Input: database:test1 +PREHOOK: query: EXPLAIN SHOW VIEWS +PREHOOK: type: SHOWVIEWS +POSTHOOK: query: EXPLAIN SHOW VIEWS +POSTHOOK: type: SHOWVIEWS +STAGE DEPENDENCIES: + Stage-0 is a root stage + Stage-1 depends on stages: Stage-0 + +STAGE PLANS: + Stage: Stage-0 + Show Views + database name: test1 + + Stage: Stage-1 + Fetch Operator + limit: -1 + Processor Tree: + ListSink + PREHOOK: query: SHOW VIEWS PREHOOK: type: SHOWVIEWS POSTHOOK: query: SHOW VIEWS @@ -124,6 +143,26 @@ POSTHOOK: type: SHOWVIEWS shtb_full_view2_n0 shtb_test1_view1_n0 shtb_test1_view2_n0 +PREHOOK: query: EXPLAIN SHOW VIEWS 'test_*' +PREHOOK: type: SHOWVIEWS +POSTHOOK: query: EXPLAIN SHOW VIEWS 'test_*' +POSTHOOK: type: SHOWVIEWS +STAGE DEPENDENCIES: + Stage-0 is a root stage + Stage-1 depends on stages: Stage-0 + +STAGE PLANS: + Stage: Stage-0 + Show Views + database name: test1 + pattern: test_* + + Stage: Stage-1 + Fetch Operator + limit: -1 + Processor Tree: + ListSink + PREHOOK: query: SHOW VIEWS 'test_*' PREHOOK: type: SHOWVIEWS POSTHOOK: query: SHOW VIEWS 'test_*' @@ -132,6 +171,26 @@ PREHOOK: query: SHOW VIEWS '*view2' PREHOOK: type: SHOWVIEWS POSTHOOK: query: SHOW VIEWS '*view2' POSTHOOK: type: SHOWVIEWS +PREHOOK: query: EXPLAIN SHOW VIEWS LIKE 'test_view1|test_view2' +PREHOOK: type: SHOWVIEWS +POSTHOOK: query: EXPLAIN SHOW VIEWS LIKE 'test_view1|test_view2' +POSTHOOK: type: SHOWVIEWS +STAGE DEPENDENCIES: + Stage-0 is a root stage + Stage-1 depends on stages: Stage-0 + +STAGE PLANS: + Stage: Stage-0 + Show Views + database name: test1 + pattern: test_view1|test_view2 + + Stage: Stage-1 + Fetch Operator + limit: -1 + Processor Tree: + ListSink + PREHOOK: query: SHOW VIEWS LIKE 'test_view1|test_view2' PREHOOK: type: SHOWVIEWS POSTHOOK: query: SHOW VIEWS LIKE 'test_view1|test_view2' @@ -154,6 +213,25 @@ PREHOOK: Input: database:default POSTHOOK: query: USE default POSTHOOK: type: SWITCHDATABASE POSTHOOK: Input: database:default +PREHOOK: query: EXPLAIN SHOW VIEWS FROM test1 +PREHOOK: type: SHOWVIEWS +POSTHOOK: query: EXPLAIN SHOW VIEWS FROM test1 +POSTHOOK: type: SHOWVIEWS +STAGE DEPENDENCIES: + Stage-0 is a root stage + Stage-1 depends on stages: Stage-0 + +STAGE PLANS: + Stage: Stage-0 + Show Views + database name: test1 + + Stage: Stage-1 + Fetch Operator + limit: -1 + Processor Tree: + ListSink + PREHOOK: query: SHOW VIEWS FROM test1 PREHOOK: type: SHOWVIEWS POSTHOOK: query: SHOW VIEWS FROM test1 @@ -167,6 +245,25 @@ POSTHOOK: query: SHOW VIEWS FROM test2 POSTHOOK: type: SHOWVIEWS shtb_test1_view1_n0 shtb_test2_view2_n0 +PREHOOK: query: EXPLAIN SHOW VIEWS IN test1 +PREHOOK: type: SHOWVIEWS +POSTHOOK: query: EXPLAIN SHOW VIEWS IN test1 +POSTHOOK: type: SHOWVIEWS +STAGE DEPENDENCIES: + Stage-0 is a root stage + Stage-1 depends on stages: Stage-0 + +STAGE PLANS: + Stage: Stage-0 + Show Views + database name: test1 + + Stage: Stage-1 + Fetch Operator + limit: -1 + Processor Tree: + ListSink + PREHOOK: query: SHOW VIEWS IN test1 PREHOOK: type: SHOWVIEWS POSTHOOK: query: SHOW VIEWS IN test1 @@ -178,6 +275,26 @@ PREHOOK: query: SHOW VIEWS IN default PREHOOK: type: SHOWVIEWS POSTHOOK: query: SHOW VIEWS IN default POSTHOOK: type: SHOWVIEWS +PREHOOK: query: EXPLAIN SHOW VIEWS IN test1 "shtb_test_*" +PREHOOK: type: SHOWVIEWS +POSTHOOK: query: EXPLAIN SHOW VIEWS IN test1 "shtb_test_*" +POSTHOOK: type: SHOWVIEWS +STAGE DEPENDENCIES: + Stage-0 is a root stage + Stage-1 depends on stages: Stage-0 + +STAGE PLANS: + Stage: Stage-0 + Show Views + database name: test1 + pattern: shtb_test_* + + Stage: Stage-1 + Fetch Operator + limit: -1 + Processor Tree: + ListSink + PREHOOK: query: SHOW VIEWS IN test1 "shtb_test_*" PREHOOK: type: SHOWVIEWS POSTHOOK: query: SHOW VIEWS IN test1 "shtb_test_*"