diff --git accumulo-handler/src/test/results/positive/accumulo_queries.q.out accumulo-handler/src/test/results/positive/accumulo_queries.q.out index ac2d52760d..d0214829b2 100644 --- accumulo-handler/src/test/results/positive/accumulo_queries.q.out +++ accumulo-handler/src/test/results/positive/accumulo_queries.q.out @@ -59,8 +59,7 @@ STAGE PLANS: COLUMN_STATS_ACCURATE Stage: Stage-1 - Pre Insert operator: - Pre-Insert task + Pre-Insert task Stage: Stage-2 Map Reduce @@ -543,8 +542,7 @@ STAGE PLANS: COLUMN_STATS_ACCURATE Stage: Stage-1 - Pre Insert operator: - Pre-Insert task + Pre-Insert task Stage: Stage-3 Map Reduce diff --git accumulo-handler/src/test/results/positive/accumulo_single_sourced_multi_insert.q.out accumulo-handler/src/test/results/positive/accumulo_single_sourced_multi_insert.q.out index ac809fa685..3fa3f167b6 100644 --- accumulo-handler/src/test/results/positive/accumulo_single_sourced_multi_insert.q.out +++ accumulo-handler/src/test/results/positive/accumulo_single_sourced_multi_insert.q.out @@ -70,8 +70,7 @@ STAGE PLANS: Table: default.src_x1 Stage: Stage-1 - Pre Insert operator: - Pre-Insert task + Pre-Insert task Stage: Stage-3 Map Reduce diff --git common/src/java/org/apache/hadoop/hive/conf/HiveConf.java common/src/java/org/apache/hadoop/hive/conf/HiveConf.java index c33d03e90a..d48f2c80e0 100644 --- common/src/java/org/apache/hadoop/hive/conf/HiveConf.java +++ common/src/java/org/apache/hadoop/hive/conf/HiveConf.java @@ -1518,10 +1518,6 @@ private static void populateLlapDaemonVarsSet(Set llapDaemonVarsSetLocal CLIPROMPT("hive.cli.prompt", "hive", "Command line prompt configuration value. Other hiveconf can be used in this configuration value. \n" + "Variable substitution will only be invoked at the Hive CLI startup."), - CLIPRETTYOUTPUTNUMCOLS("hive.cli.pretty.output.num.cols", -1, - "The number of columns to use when formatting output generated by the DESCRIBE PRETTY table_name command.\n" + - "If the value of this property is -1, then Hive will use the auto-detected terminal width."), - /** * @deprecated Use MetastoreConf.FS_HANDLER_CLS */ diff --git contrib/src/test/results/clientnegative/serde_regex.q.out contrib/src/test/results/clientnegative/serde_regex.q.out index 58a4679d67..65b8e314fc 100644 --- contrib/src/test/results/clientnegative/serde_regex.q.out +++ contrib/src/test/results/clientnegative/serde_regex.q.out @@ -49,16 +49,15 @@ STAGE DEPENDENCIES: STAGE PLANS: Stage: Stage-0 - Create Table Operator: - Create Table - columns: host string, identity string, user string, time string, request string, status int, size int, referer string, agent string - input format: org.apache.hadoop.mapred.TextInputFormat - output format: org.apache.hadoop.hive.ql.io.IgnoreKeyTextOutputFormat - serde name: org.apache.hadoop.hive.contrib.serde2.RegexSerDe - serde properties: - input.regex ([^ ]*) ([^ ]*) ([^ ]*) (-|\[[^\]]*\]) ([^ "]*|"[^"]*") (-|[0-9]*) (-|[0-9]*)(?: ([^ "]*|"[^"]*") ([^ "]*|"[^"]*"))? - output.format.string %1$s %2$s %3$s %4$s %5$s %6$s %7$s %8$s %9$s - name: default.serde_regex + Create Table + columns: host string, identity string, user string, time string, request string, status int, size int, referer string, agent string + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.IgnoreKeyTextOutputFormat + serde name: org.apache.hadoop.hive.contrib.serde2.RegexSerDe + serde properties: + input.regex ([^ ]*) ([^ ]*) ([^ ]*) (-|\[[^\]]*\]) ([^ "]*|"[^"]*") (-|[0-9]*) (-|[0-9]*)(?: ([^ "]*|"[^"]*") ([^ "]*|"[^"]*"))? + output.format.string %1$s %2$s %3$s %4$s %5$s %6$s %7$s %8$s %9$s + name: default.serde_regex PREHOOK: query: CREATE TABLE serde_regex( host STRING, @@ -79,4 +78,4 @@ STORED AS TEXTFILE PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@serde_regex -FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.exec.DDLTask. java.lang.RuntimeException: MetaException(message:org.apache.hadoop.hive.serde2.SerDeException org.apache.hadoop.hive.contrib.serde2.RegexSerDe only accepts string columns, but column[5] named status has type int) +FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.ddl.DDLTask2. java.lang.RuntimeException: MetaException(message:org.apache.hadoop.hive.serde2.SerDeException org.apache.hadoop.hive.contrib.serde2.RegexSerDe only accepts string columns, but column[5] named status has type int) diff --git contrib/src/test/results/clientpositive/fileformat_base64.q.out contrib/src/test/results/clientpositive/fileformat_base64.q.out index 8e6a5e42fb..c204feeda9 100644 --- contrib/src/test/results/clientpositive/fileformat_base64.q.out +++ contrib/src/test/results/clientpositive/fileformat_base64.q.out @@ -21,12 +21,11 @@ STAGE DEPENDENCIES: STAGE PLANS: Stage: Stage-0 - Create Table Operator: - Create Table - columns: key int, value string - input format: org.apache.hadoop.hive.contrib.fileformat.base64.Base64TextInputFormat - output format: org.apache.hadoop.hive.contrib.fileformat.base64.Base64TextOutputFormat - name: default.base64_test + Create Table + columns: key int, value string + input format: org.apache.hadoop.hive.contrib.fileformat.base64.Base64TextInputFormat + output format: org.apache.hadoop.hive.contrib.fileformat.base64.Base64TextOutputFormat + name: default.base64_test PREHOOK: query: CREATE TABLE base64_test(key INT, value STRING) STORED AS INPUTFORMAT 'org.apache.hadoop.hive.contrib.fileformat.base64.Base64TextInputFormat' diff --git contrib/src/test/results/clientpositive/serde_regex.q.out contrib/src/test/results/clientpositive/serde_regex.q.out index 691e254d76..80bd2e43d4 100644 --- contrib/src/test/results/clientpositive/serde_regex.q.out +++ contrib/src/test/results/clientpositive/serde_regex.q.out @@ -43,16 +43,15 @@ STAGE DEPENDENCIES: STAGE PLANS: Stage: Stage-0 - Create Table Operator: - Create Table - columns: host string, identity string, user string, time string, request string, status string, size string, referer string, agent string - input format: org.apache.hadoop.mapred.TextInputFormat - output format: org.apache.hadoop.hive.ql.io.IgnoreKeyTextOutputFormat - serde name: org.apache.hadoop.hive.contrib.serde2.RegexSerDe - serde properties: - input.regex ([^ ]*) ([^ ]*) ([^ ]*) (-|\[[^\]]*\]) ([^ "]*|"[^"]*") (-|[0-9]*) (-|[0-9]*)(?: ([^ "]*|"[^"]*") ([^ "]*|"[^"]*"))? - output.format.string %1$s %2$s %3$s %4$s %5$s %6$s %7$s %8$s %9$s - name: default.serde_regex + Create Table + columns: host string, identity string, user string, time string, request string, status string, size string, referer string, agent string + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.IgnoreKeyTextOutputFormat + serde name: org.apache.hadoop.hive.contrib.serde2.RegexSerDe + serde properties: + input.regex ([^ ]*) ([^ ]*) ([^ ]*) (-|\[[^\]]*\]) ([^ "]*|"[^"]*") (-|[0-9]*) (-|[0-9]*)(?: ([^ "]*|"[^"]*") ([^ "]*|"[^"]*"))? + output.format.string %1$s %2$s %3$s %4$s %5$s %6$s %7$s %8$s %9$s + name: default.serde_regex PREHOOK: query: CREATE TABLE serde_regex( host STRING, diff --git hbase-handler/src/test/results/positive/hbase_ddl.q.out hbase-handler/src/test/results/positive/hbase_ddl.q.out index e87240a8e8..7adb474b0d 100644 --- hbase-handler/src/test/results/positive/hbase_ddl.q.out +++ hbase-handler/src/test/results/positive/hbase_ddl.q.out @@ -57,8 +57,7 @@ STAGE PLANS: COLUMN_STATS_ACCURATE Stage: Stage-1 - Pre Insert operator: - Pre-Insert task + Pre-Insert task Stage: Stage-2 Map Reduce diff --git hbase-handler/src/test/results/positive/hbase_queries.q.out hbase-handler/src/test/results/positive/hbase_queries.q.out index 02f46d8c39..793e01f6e2 100644 --- hbase-handler/src/test/results/positive/hbase_queries.q.out +++ hbase-handler/src/test/results/positive/hbase_queries.q.out @@ -57,8 +57,7 @@ STAGE PLANS: COLUMN_STATS_ACCURATE Stage: Stage-1 - Pre Insert operator: - Pre-Insert task + Pre-Insert task Stage: Stage-2 Map Reduce @@ -543,8 +542,7 @@ STAGE PLANS: COLUMN_STATS_ACCURATE Stage: Stage-1 - Pre Insert operator: - Pre-Insert task + Pre-Insert task Stage: Stage-3 Map Reduce diff --git hbase-handler/src/test/results/positive/hbase_single_sourced_multi_insert.q.out hbase-handler/src/test/results/positive/hbase_single_sourced_multi_insert.q.out index b15515e604..60d0829c63 100644 --- hbase-handler/src/test/results/positive/hbase_single_sourced_multi_insert.q.out +++ hbase-handler/src/test/results/positive/hbase_single_sourced_multi_insert.q.out @@ -70,8 +70,7 @@ STAGE PLANS: Table: default.src_x1 Stage: Stage-1 - Pre Insert operator: - Pre-Insert task + Pre-Insert task Stage: Stage-3 Map Reduce diff --git hbase-handler/src/test/results/positive/hbasestats.q.out hbase-handler/src/test/results/positive/hbasestats.q.out index 5143522a8d..783708f48b 100644 --- hbase-handler/src/test/results/positive/hbasestats.q.out +++ hbase-handler/src/test/results/positive/hbasestats.q.out @@ -85,8 +85,7 @@ STAGE PLANS: COLUMN_STATS_ACCURATE Stage: Stage-1 - Pre Insert operator: - Pre-Insert task + Pre-Insert task Stage: Stage-2 Map Reduce diff --git hcatalog/core/src/main/java/org/apache/hive/hcatalog/cli/SemanticAnalysis/CreateTableHook.java hcatalog/core/src/main/java/org/apache/hive/hcatalog/cli/SemanticAnalysis/CreateTableHook.java index a377805549..8366e5ec93 100644 --- hcatalog/core/src/main/java/org/apache/hive/hcatalog/cli/SemanticAnalysis/CreateTableHook.java +++ hcatalog/core/src/main/java/org/apache/hive/hcatalog/cli/SemanticAnalysis/CreateTableHook.java @@ -27,7 +27,9 @@ import org.apache.commons.lang.StringUtils; import org.apache.hadoop.fs.Path; import org.apache.hadoop.hive.metastore.api.FieldSchema; -import org.apache.hadoop.hive.ql.exec.DDLTask; +import org.apache.hadoop.hive.ql.ddl.DDLDesc; +import org.apache.hadoop.hive.ql.ddl.DDLTask2; +import org.apache.hadoop.hive.ql.ddl.table.CreateTableDesc; import org.apache.hadoop.hive.ql.exec.Task; import org.apache.hadoop.hive.ql.metadata.Hive; import org.apache.hadoop.hive.ql.metadata.HiveException; @@ -39,7 +41,6 @@ import org.apache.hadoop.hive.ql.parse.HiveSemanticAnalyzerHookContext; import org.apache.hadoop.hive.ql.parse.SemanticException; import org.apache.hadoop.hive.ql.parse.StorageFormat; -import org.apache.hadoop.hive.ql.plan.CreateTableDesc; import org.apache.hadoop.hive.ql.security.authorization.Privilege; import org.apache.hive.hcatalog.common.HCatConstants; import org.apache.hive.hcatalog.common.HCatUtil; @@ -135,18 +136,19 @@ public void postAnalyze(HiveSemanticAnalyzerHookContext context, throws SemanticException { if (rootTasks.size() == 0) { - // There will be no DDL task created in case if its CREATE TABLE IF - // NOT EXISTS + // There will be no DDL task created in case if its CREATE TABLE IF NOT EXISTS return; } - CreateTableDesc desc = ((DDLTask) rootTasks.get(rootTasks.size() - 1)) - .getWork().getCreateTblDesc(); - if (desc == null) { - // Desc will be null if its CREATE TABLE LIKE. Desc will be - // contained in CreateTableLikeDesc. Currently, HCat disallows CTLT in - // pre-hook. So, desc can never be null. + Task t = rootTasks.get(rootTasks.size() - 1); + if (!(t instanceof DDLTask2)) { return; } + DDLTask2 task = (DDLTask2)t; + DDLDesc d = task.getWork().getDDLDesc(); + if (!(d instanceof CreateTableDesc)) { + return; + } + CreateTableDesc desc = (CreateTableDesc)d; Map tblProps = desc.getTblProps(); if (tblProps == null) { // tblProps will be null if user didnt use tblprops in his CREATE @@ -157,8 +159,7 @@ public void postAnalyze(HiveSemanticAnalyzerHookContext context, // first check if we will allow the user to create table. String storageHandler = desc.getStorageHandler(); - if (StringUtils.isEmpty(storageHandler)) { - } else { + if (StringUtils.isNotEmpty(storageHandler)) { try { HiveStorageHandler storageHandlerInst = HCatUtil .getStorageHandler(context.getConf(), @@ -173,33 +174,31 @@ public void postAnalyze(HiveSemanticAnalyzerHookContext context, } } - if (desc != null) { - try { - Table table = context.getHive().newTable(desc.getTableName()); - if (desc.getLocation() != null) { - table.setDataLocation(new Path(desc.getLocation())); - } - if (desc.getStorageHandler() != null) { - table.setProperty( + try { + Table table = context.getHive().newTable(desc.getTableName()); + if (desc.getLocation() != null) { + table.setDataLocation(new Path(desc.getLocation())); + } + if (desc.getStorageHandler() != null) { + table.setProperty( org.apache.hadoop.hive.metastore.api.hive_metastoreConstants.META_TABLE_STORAGE, desc.getStorageHandler()); - } - for (Map.Entry prop : tblProps.entrySet()) { - table.setProperty(prop.getKey(), prop.getValue()); - } - for (Map.Entry prop : desc.getSerdeProps().entrySet()) { - table.setSerdeParam(prop.getKey(), prop.getValue()); - } - //TODO: set other Table properties as needed + } + for (Map.Entry prop : tblProps.entrySet()) { + table.setProperty(prop.getKey(), prop.getValue()); + } + for (Map.Entry prop : desc.getSerdeProps().entrySet()) { + table.setSerdeParam(prop.getKey(), prop.getValue()); + } + //TODO: set other Table properties as needed - //authorize against the table operation so that location permissions can be checked if any + //authorize against the table operation so that location permissions can be checked if any - if (HCatAuthUtil.isAuthorizationEnabled(context.getConf())) { - authorize(table, Privilege.CREATE); - } - } catch (HiveException ex) { - throw new SemanticException(ex); + if (HCatAuthUtil.isAuthorizationEnabled(context.getConf())) { + authorize(table, Privilege.CREATE); } + } catch (HiveException ex) { + throw new SemanticException(ex); } desc.setTblProps(tblProps); diff --git hcatalog/core/src/main/java/org/apache/hive/hcatalog/cli/SemanticAnalysis/HCatSemanticAnalyzer.java hcatalog/core/src/main/java/org/apache/hive/hcatalog/cli/SemanticAnalysis/HCatSemanticAnalyzer.java index fd159fe285..6d9dd5eb05 100644 --- hcatalog/core/src/main/java/org/apache/hive/hcatalog/cli/SemanticAnalysis/HCatSemanticAnalyzer.java +++ hcatalog/core/src/main/java/org/apache/hive/hcatalog/cli/SemanticAnalysis/HCatSemanticAnalyzer.java @@ -25,6 +25,9 @@ import org.apache.hadoop.hive.ql.ddl.database.DropDatabaseDesc; import org.apache.hadoop.hive.ql.ddl.database.ShowDatabasesDesc; import org.apache.hadoop.hive.ql.ddl.database.SwitchDatabaseDesc; +import org.apache.hadoop.hive.ql.ddl.table.DescTableDesc; +import org.apache.hadoop.hive.ql.ddl.table.ShowTableStatusDesc; +import org.apache.hadoop.hive.ql.ddl.table.ShowTablesDesc; import org.apache.hadoop.hive.ql.exec.Task; import org.apache.hadoop.hive.ql.exec.Utilities; import org.apache.hadoop.hive.ql.metadata.Hive; @@ -38,12 +41,9 @@ import org.apache.hadoop.hive.ql.parse.SemanticException; import org.apache.hadoop.hive.ql.plan.AlterTableDesc; import org.apache.hadoop.hive.ql.plan.DDLWork; -import org.apache.hadoop.hive.ql.plan.DescTableDesc; -import org.apache.hadoop.hive.ql.plan.DropTableDesc; +import org.apache.hadoop.hive.ql.plan.DropPartitionDesc; import org.apache.hadoop.hive.ql.plan.HiveOperation; import org.apache.hadoop.hive.ql.plan.ShowPartitionsDesc; -import org.apache.hadoop.hive.ql.plan.ShowTableStatusDesc; -import org.apache.hadoop.hive.ql.plan.ShowTablesDesc; import org.apache.hadoop.hive.ql.security.authorization.Privilege; import org.apache.hadoop.hive.ql.session.SessionState; import org.apache.hive.hcatalog.common.ErrorType; @@ -297,51 +297,47 @@ protected void authorizeDDLWork2(HiveSemanticAnalyzerHookContext cntxt, Hive hiv SwitchDatabaseDesc switchDb = (SwitchDatabaseDesc)ddlDesc; Database db = cntxt.getHive().getDatabase(switchDb.getDatabaseName()); authorize(db, Privilege.SELECT); - } - } - - @Override - protected void authorizeDDLWork(HiveSemanticAnalyzerHookContext cntxt, Hive hive, DDLWork work) - throws HiveException { - // DB opereations, none of them are enforced by Hive right now. - - ShowTablesDesc showTables = work.getShowTblsDesc(); - if (showTables != null) { + } else if (ddlDesc instanceof ShowTablesDesc) { + ShowTablesDesc showTables = (ShowTablesDesc)ddlDesc; String dbName = showTables.getDbName() == null ? SessionState.get().getCurrentDatabase() - : showTables.getDbName(); + : showTables.getDbName(); authorize(cntxt.getHive().getDatabase(dbName), Privilege.SELECT); - } - - ShowTableStatusDesc showTableStatus = work.getShowTblStatusDesc(); - if (showTableStatus != null) { + } else if (ddlDesc instanceof DescTableDesc) { + // we should be careful when authorizing table based on just the + // table name. If columns have separate authorization domain, it + // must be honored + DescTableDesc descTable = (DescTableDesc)ddlDesc; + String tableName = extractTableName(descTable.getTableName()); + authorizeTable(cntxt.getHive(), tableName, Privilege.SELECT); + } else if (ddlDesc instanceof ShowTableStatusDesc) { + ShowTableStatusDesc showTableStatus = (ShowTableStatusDesc)ddlDesc; String dbName = showTableStatus.getDbName() == null ? SessionState.get().getCurrentDatabase() - : showTableStatus.getDbName(); + : showTableStatus.getDbName(); authorize(cntxt.getHive().getDatabase(dbName), Privilege.SELECT); } + } + @Override + protected void authorizeDDLWork(HiveSemanticAnalyzerHookContext cntxt, Hive hive, DDLWork work) + throws HiveException { // TODO: add alter database support in HCat // Table operations. - DropTableDesc dropTable = work.getDropTblDesc(); - if (dropTable != null) { - if (dropTable.getPartSpecs() == null) { - // drop table is already enforced by Hive. We only check for table level location even if the - // table is partitioned. - } else { - //this is actually a ALTER TABLE DROP PARITITION statement - for (DropTableDesc.PartSpec partSpec : dropTable.getPartSpecs()) { - // partitions are not added as write entries in drop partitions in Hive - Table table = hive.getTable(SessionState.get().getCurrentDatabase(), dropTable.getTableName()); - List partitions = null; - try { - partitions = hive.getPartitionsByFilter(table, partSpec.getPartSpec().getExprString()); - } catch (Exception e) { - throw new HiveException(e); - } - for (Partition part : partitions) { - authorize(part, Privilege.DROP); - } + DropPartitionDesc dropPartition = work.getDropPartitionDesc(); + if (dropPartition != null) { + //this is actually a ALTER TABLE DROP PARITITION statement + for (DropPartitionDesc.PartSpec partSpec : dropPartition.getPartSpecs()) { + // partitions are not added as write entries in drop partitions in Hive + Table table = hive.getTable(SessionState.get().getCurrentDatabase(), dropPartition.getTableName()); + List partitions = null; + try { + partitions = hive.getPartitionsByFilter(table, partSpec.getPartSpec().getExprString()); + } catch (Exception e) { + throw new HiveException(e); + } + for (Partition part : partitions) { + authorize(part, Privilege.DROP); } } } @@ -377,15 +373,6 @@ protected void authorizeDDLWork(HiveSemanticAnalyzerHookContext cntxt, Hive hive //other alter operations are already supported by Hive } - // we should be careful when authorizing table based on just the - // table name. If columns have separate authorization domain, it - // must be honored - DescTableDesc descTable = work.getDescTblDesc(); - if (descTable != null) { - String tableName = extractTableName(descTable.getTableName()); - authorizeTable(cntxt.getHive(), tableName, Privilege.SELECT); - } - ShowPartitionsDesc showParts = work.getShowPartsDesc(); if (showParts != null) { String tableName = extractTableName(showParts.getTabName()); diff --git itests/hive-unit/src/test/java/org/apache/hive/jdbc/TestJdbcDriver2.java itests/hive-unit/src/test/java/org/apache/hive/jdbc/TestJdbcDriver2.java index 45aac5fbec..85f703decb 100644 --- itests/hive-unit/src/test/java/org/apache/hive/jdbc/TestJdbcDriver2.java +++ itests/hive-unit/src/test/java/org/apache/hive/jdbc/TestJdbcDriver2.java @@ -1092,7 +1092,7 @@ public void testErrorMessages() throws SQLException { // codes and messages. This should be fixed. doTestErrorCase( "create table " + tableName + " (key int, value string)", - "FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.exec.DDLTask", + "FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.ddl.DDLTask2", "08S01", 1); } diff --git itests/util/src/main/java/org/apache/hadoop/hive/ql/metadata/DummySemanticAnalyzerHook.java itests/util/src/main/java/org/apache/hadoop/hive/ql/metadata/DummySemanticAnalyzerHook.java index 3575a16bba..f988d42366 100644 --- itests/util/src/main/java/org/apache/hadoop/hive/ql/metadata/DummySemanticAnalyzerHook.java +++ itests/util/src/main/java/org/apache/hadoop/hive/ql/metadata/DummySemanticAnalyzerHook.java @@ -23,14 +23,14 @@ import java.util.List; import java.util.Map; -import org.apache.hadoop.hive.ql.exec.DDLTask; +import org.apache.hadoop.hive.ql.ddl.DDLTask2; +import org.apache.hadoop.hive.ql.ddl.table.CreateTableDesc; import org.apache.hadoop.hive.ql.exec.Task; import org.apache.hadoop.hive.ql.parse.ASTNode; import org.apache.hadoop.hive.ql.parse.AbstractSemanticAnalyzerHook; import org.apache.hadoop.hive.ql.parse.HiveParser; import org.apache.hadoop.hive.ql.parse.HiveSemanticAnalyzerHookContext; import org.apache.hadoop.hive.ql.parse.SemanticException; -import org.apache.hadoop.hive.ql.plan.CreateTableDesc; public class DummySemanticAnalyzerHook extends AbstractSemanticAnalyzerHook{ @@ -92,7 +92,7 @@ public ASTNode preAnalyze(HiveSemanticAnalyzerHookContext context, ASTNode ast) @Override public void postAnalyze(HiveSemanticAnalyzerHookContext context, List> rootTasks) throws SemanticException { - CreateTableDesc desc = ((DDLTask)rootTasks.get(rootTasks.size()-1)).getWork().getCreateTblDesc(); + CreateTableDesc desc = (CreateTableDesc) ((DDLTask2)rootTasks.get(rootTasks.size()-1)).getWork().getDDLDesc(); Map tblProps = desc.getTblProps(); if(tblProps == null) { tblProps = new HashMap(); diff --git itests/util/src/main/java/org/apache/hadoop/hive/ql/metadata/DummySemanticAnalyzerHook1.java itests/util/src/main/java/org/apache/hadoop/hive/ql/metadata/DummySemanticAnalyzerHook1.java index e20ac64ee5..b2b00720c6 100644 --- itests/util/src/main/java/org/apache/hadoop/hive/ql/metadata/DummySemanticAnalyzerHook1.java +++ itests/util/src/main/java/org/apache/hadoop/hive/ql/metadata/DummySemanticAnalyzerHook1.java @@ -23,14 +23,14 @@ import java.util.List; import java.util.Map; -import org.apache.hadoop.hive.ql.exec.DDLTask; +import org.apache.hadoop.hive.ql.ddl.DDLTask2; +import org.apache.hadoop.hive.ql.ddl.table.CreateTableDesc; import org.apache.hadoop.hive.ql.exec.Task; import org.apache.hadoop.hive.ql.parse.ASTNode; import org.apache.hadoop.hive.ql.parse.AbstractSemanticAnalyzerHook; import org.apache.hadoop.hive.ql.parse.HiveParser; import org.apache.hadoop.hive.ql.parse.HiveSemanticAnalyzerHookContext; import org.apache.hadoop.hive.ql.parse.SemanticException; -import org.apache.hadoop.hive.ql.plan.CreateTableDesc; import org.apache.hadoop.hive.ql.session.SessionState; import org.apache.hadoop.hive.ql.session.SessionState.LogHelper; @@ -62,8 +62,7 @@ public void postAnalyze(HiveSemanticAnalyzerHookContext context, return; } - CreateTableDesc desc = ((DDLTask) rootTasks.get(rootTasks.size() - 1)).getWork() - .getCreateTblDesc(); + CreateTableDesc desc = (CreateTableDesc) ((DDLTask2) rootTasks.get(rootTasks.size() - 1)).getWork().getDDLDesc(); Map tblProps = desc.getTblProps(); if (tblProps == null) { tblProps = new HashMap(); diff --git ql/src/java/org/apache/hadoop/hive/ql/ddl/DDLOperation.java ql/src/java/org/apache/hadoop/hive/ql/ddl/DDLOperation.java index e349a0ac2c..d556d55f97 100644 --- ql/src/java/org/apache/hadoop/hive/ql/ddl/DDLOperation.java +++ ql/src/java/org/apache/hadoop/hive/ql/ddl/DDLOperation.java @@ -18,11 +18,6 @@ package org.apache.hadoop.hive.ql.ddl; -import java.io.DataOutputStream; - -import org.apache.hadoop.fs.FileSystem; -import org.apache.hadoop.fs.Path; -import org.apache.hadoop.hive.ql.metadata.HiveException; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -38,14 +33,5 @@ public DDLOperation(DDLOperationContext context) { this.context = context; } - public abstract int execute() throws HiveException; - - protected DataOutputStream getOutputStream(Path outputFile) throws HiveException { - try { - FileSystem fs = outputFile.getFileSystem(context.getConf()); - return fs.create(outputFile); - } catch (Exception e) { - throw new HiveException(e); - } - } + public abstract int execute() throws Exception; } diff --git ql/src/java/org/apache/hadoop/hive/ql/ddl/DDLOperationContext.java ql/src/java/org/apache/hadoop/hive/ql/ddl/DDLOperationContext.java index 924f0b31cf..14744d1461 100644 --- ql/src/java/org/apache/hadoop/hive/ql/ddl/DDLOperationContext.java +++ ql/src/java/org/apache/hadoop/hive/ql/ddl/DDLOperationContext.java @@ -20,6 +20,8 @@ import org.apache.hadoop.hive.conf.HiveConf; import org.apache.hadoop.hive.ql.DriverContext; +import org.apache.hadoop.hive.ql.QueryPlan; +import org.apache.hadoop.hive.ql.QueryState; import org.apache.hadoop.hive.ql.metadata.Hive; import org.apache.hadoop.hive.ql.metadata.HiveException; import org.apache.hadoop.hive.ql.metadata.formatting.MetaDataFormatUtils; @@ -33,12 +35,21 @@ private final HiveConf conf; private final DriverContext driverContext; private final MetaDataFormatter formatter; + private final DDLTask2 task; + private final DDLWork2 work; + private final QueryState queryState; + private final QueryPlan queryPlan; - public DDLOperationContext(HiveConf conf, DriverContext driverContext) throws HiveException { + public DDLOperationContext(HiveConf conf, DriverContext driverContext, DDLTask2 task, DDLWork2 work, + QueryState queryState, QueryPlan queryPlan) throws HiveException { this.db = Hive.get(conf); this.conf = conf; this.driverContext = driverContext; this.formatter = MetaDataFormatUtils.getFormatter(conf); + this.task = task; + this.work = work; + this.queryState = queryState; + this.queryPlan = queryPlan; } public Hive getDb() { @@ -56,4 +67,20 @@ public DriverContext getDriverContext() { public MetaDataFormatter getFormatter() { return formatter; } + + public DDLTask2 getTask() { + return task; + } + + public DDLWork2 getWork() { + return work; + } + + public QueryState getQueryState() { + return queryState; + } + + public QueryPlan getQueryPlan() { + return queryPlan; + } } diff --git ql/src/java/org/apache/hadoop/hive/ql/ddl/DDLTask2.java ql/src/java/org/apache/hadoop/hive/ql/ddl/DDLTask2.java index 068e1e7a96..1f9a0bb173 100644 --- ql/src/java/org/apache/hadoop/hive/ql/ddl/DDLTask2.java +++ ql/src/java/org/apache/hadoop/hive/ql/ddl/DDLTask2.java @@ -65,7 +65,8 @@ public int execute(DriverContext driverContext) { DDLDesc ddlDesc = work.getDDLDesc(); if (DESC_TO_OPARATION.containsKey(ddlDesc.getClass())) { - DDLOperationContext context = new DDLOperationContext(conf, driverContext); + DDLOperationContext context = new DDLOperationContext(conf, driverContext, this, (DDLWork2)work, queryState, + queryPlan); Class ddlOpertaionClass = DESC_TO_OPARATION.get(ddlDesc.getClass()); Constructor constructor = ddlOpertaionClass.getConstructor(DDLOperationContext.class, ddlDesc.getClass()); diff --git ql/src/java/org/apache/hadoop/hive/ql/ddl/DDLUtils.java ql/src/java/org/apache/hadoop/hive/ql/ddl/DDLUtils.java new file mode 100644 index 0000000000..c3d5f90d3e --- /dev/null +++ ql/src/java/org/apache/hadoop/hive/ql/ddl/DDLUtils.java @@ -0,0 +1,200 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hive.ql.ddl; + +import java.io.DataOutputStream; +import java.io.IOException; +import java.io.OutputStreamWriter; +import java.util.ArrayList; +import java.util.List; +import java.util.Map; +import java.util.Set; +import java.util.TreeMap; + +import org.apache.commons.lang.StringUtils; +import org.apache.hadoop.fs.FSDataOutputStream; +import org.apache.hadoop.fs.FileSystem; +import org.apache.hadoop.fs.Path; +import org.apache.hadoop.hive.conf.HiveConf; +import org.apache.hadoop.hive.ql.exec.Utilities; +import org.apache.hadoop.hive.ql.hooks.WriteEntity; +import org.apache.hadoop.hive.ql.metadata.Hive; +import org.apache.hadoop.hive.ql.metadata.HiveException; +import org.apache.hadoop.hive.ql.metadata.Partition; +import org.apache.hadoop.hive.ql.metadata.Table; +import org.apache.hadoop.hive.ql.parse.ReplicationSpec; +import org.apache.hadoop.hive.serde2.Deserializer; +import org.apache.hive.common.util.HiveStringUtils; +import org.apache.hive.common.util.ReflectionUtil; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + + +/** + * Utilities used by some DDLOperations. + */ +public final class DDLUtils { + private static final Logger LOG = LoggerFactory.getLogger("hive.ql.exec.DDLTask"); + + private DDLUtils() { + throw new UnsupportedOperationException("DDLUtils should not be instantiated"); + } + + public static DataOutputStream getOutputStream(Path outputFile, DDLOperationContext context) throws HiveException { + try { + FileSystem fs = outputFile.getFileSystem(context.getConf()); + return fs.create(outputFile); + } catch (Exception e) { + throw new HiveException(e); + } + } + + /** + * There are many places where "duplicate" Read/WriteEnity objects are added. The way this was + * initially implemented, the duplicate just replaced the previous object. + * (work.getOutputs() is a Set and WriteEntity#equals() relies on name) + * This may be benign for ReadEntity and perhaps was benign for WriteEntity before WriteType was + * added. Now that WriteEntity has a WriteType it replaces it with one with possibly different + * {@link org.apache.hadoop.hive.ql.hooks.WriteEntity.WriteType}. It's hard to imagine + * how this is desirable. + * + * As of HIVE-14993, WriteEntity with different WriteType must be considered different. + * So WriteEntity created in DDLTask cause extra output in golden files, but only because + * DDLTask sets a different WriteType for the same Entity. + * + * In the spirit of bug-for-bug compatibility, this method ensures we only add new + * WriteEntity if it's really new. + * + * @return {@code true} if item was added + */ + public static boolean addIfAbsentByName(WriteEntity newWriteEntity, Set outputs) { + for(WriteEntity writeEntity : outputs) { + if(writeEntity.getName().equalsIgnoreCase(newWriteEntity.getName())) { + LOG.debug("Ignoring request to add {} because {} is present", newWriteEntity.toStringDetail(), + writeEntity.toStringDetail()); + return false; + } + } + outputs.add(newWriteEntity); + return true; + } + + public static boolean addIfAbsentByName(WriteEntity newWriteEntity, DDLOperationContext context) { + return addIfAbsentByName(newWriteEntity, context.getWork().getOutputs()); + } + + /** + * Check if the given serde is valid. + */ + public static void validateSerDe(String serdeName, DDLOperationContext context) throws HiveException { + validateSerDe(serdeName, context.getConf()); + } + + public static void validateSerDe(String serdeName, HiveConf conf) throws HiveException { + try { + Deserializer d = ReflectionUtil.newInstance(conf.getClassByName(serdeName). + asSubclass(Deserializer.class), conf); + if (d != null) { + LOG.debug("Found class for {}", serdeName); + } + } catch (Exception e) { + throw new HiveException("Cannot validate serde: " + serdeName, e); + } + } + + /** + * Validate if the given table/partition is eligible for update. + * + * @param db Database. + * @param tableName Table name of format db.table + * @param partSpec Partition spec for the partition + * @param replicationSpec Replications specification + * + * @return boolean true if allow the operation + * @throws HiveException + */ + public static boolean allowOperationInReplicationScope(Hive db, String tableName, Map partSpec, + ReplicationSpec replicationSpec) throws HiveException { + if ((null == replicationSpec) || (!replicationSpec.isInReplicationScope())) { + // Always allow the operation if it is not in replication scope. + return true; + } + // If the table/partition exist and is older than the event, then just apply the event else noop. + Table existingTable = db.getTable(tableName, false); + if ((existingTable != null) && replicationSpec.allowEventReplacementInto(existingTable.getParameters())) { + // Table exists and is older than the update. Now, need to ensure if update allowed on the partition. + if (partSpec != null) { + Partition existingPtn = db.getPartition(existingTable, partSpec, false); + return ((existingPtn != null) && replicationSpec.allowEventReplacementInto(existingPtn.getParameters())); + } + + // Replacement is allowed as the existing table is older than event + return true; + } + + // The table is missing either due to drop/rename which follows the operation. + // Or the existing table is newer than our update. So, don't allow the update. + return false; + } + + public static String propertiesToString(Map props, List exclude) { + if (props.isEmpty()) { + return ""; + } + + Map sortedProperties = new TreeMap(props); + List realProps = new ArrayList(); + for (Map.Entry e : sortedProperties.entrySet()) { + if (e.getValue() != null && (exclude == null || !exclude.contains(e.getKey()))) { + realProps.add(" '" + e.getKey() + "'='" + HiveStringUtils.escapeHiveCommand(e.getValue()) + "'"); + } + } + return StringUtils.join(realProps, ", \n"); + } + + public static void writeToFile(String data, String file, DDLOperationContext context) throws IOException { + if (StringUtils.isEmpty(data)) { + return; + } + + Path resFile = new Path(file); + FileSystem fs = resFile.getFileSystem(context.getConf()); + try (FSDataOutputStream out = fs.create(resFile); + OutputStreamWriter writer = new OutputStreamWriter(out, "UTF-8")) { + writer.write(data); + writer.write((char) Utilities.newLineCode); + writer.flush(); + } + } + + public static void appendNonNull(StringBuilder builder, Object value) { + appendNonNull(builder, value, false); + } + + public static void appendNonNull(StringBuilder builder, Object value, boolean firstColumn) { + if (!firstColumn) { + builder.append((char)Utilities.tabCode); + } else if (builder.length() > 0) { + builder.append((char)Utilities.newLineCode); + } + if (value != null) { + builder.append(value); + } + } +} diff --git ql/src/java/org/apache/hadoop/hive/ql/ddl/DDLWork2.java ql/src/java/org/apache/hadoop/hive/ql/ddl/DDLWork2.java index d2fbe8fede..a2f49b7503 100644 --- ql/src/java/org/apache/hadoop/hive/ql/ddl/DDLWork2.java +++ ql/src/java/org/apache/hadoop/hive/ql/ddl/DDLWork2.java @@ -19,6 +19,8 @@ import org.apache.hadoop.hive.ql.hooks.ReadEntity; import org.apache.hadoop.hive.ql.hooks.WriteEntity; +import org.apache.hadoop.hive.ql.plan.Explain; +import org.apache.hadoop.hive.ql.plan.Explain.Level; import java.io.Serializable; @@ -67,6 +69,7 @@ public void setNeedLock(boolean needLock) { this.needLock = needLock; } + @Explain(skipHeader = true, explainLevels = { Level.USER, Level.DEFAULT, Level.EXTENDED }) public DDLDesc getDDLDesc() { return ddlDesc; } diff --git ql/src/java/org/apache/hadoop/hive/ql/ddl/database/DescDatabaseOperation.java ql/src/java/org/apache/hadoop/hive/ql/ddl/database/DescDatabaseOperation.java index efaf389ee8..801ac62987 100644 --- ql/src/java/org/apache/hadoop/hive/ql/ddl/database/DescDatabaseOperation.java +++ ql/src/java/org/apache/hadoop/hive/ql/ddl/database/DescDatabaseOperation.java @@ -19,16 +19,16 @@ package org.apache.hadoop.hive.ql.ddl.database; import java.io.DataOutputStream; -import java.util.Map; +import java.util.SortedMap; import java.util.TreeMap; import org.apache.hadoop.fs.Path; import org.apache.hadoop.hive.conf.HiveConf; import org.apache.hadoop.hive.metastore.api.Database; -import org.apache.hadoop.hive.metastore.api.PrincipalType; import org.apache.hadoop.hive.ql.ErrorMsg; import org.apache.hadoop.hive.ql.ddl.DDLOperation; import org.apache.hadoop.hive.ql.ddl.DDLOperationContext; +import org.apache.hadoop.hive.ql.ddl.DDLUtils; import org.apache.hadoop.hive.ql.metadata.HiveException; /** @@ -44,21 +44,15 @@ public DescDatabaseOperation(DDLOperationContext context, DescDatabaseDesc desc) @Override public int execute() throws HiveException { - try (DataOutputStream outStream = getOutputStream(new Path(desc.getResFile()))) { + try (DataOutputStream outStream = DDLUtils.getOutputStream(new Path(desc.getResFile()), context)) { Database database = context.getDb().getDatabase(desc.getDatabaseName()); if (database == null) { throw new HiveException(ErrorMsg.DATABASE_NOT_EXISTS, desc.getDatabaseName()); } - Map params = null; + SortedMap params = null; if (desc.isExt()) { - params = database.getParameters(); - } - - // If this is a q-test, let's order the params map (lexicographically) by - // key. This is to get consistent param ordering between Java7 and Java8. - if (HiveConf.getBoolVar(context.getConf(), HiveConf.ConfVars.HIVE_IN_TEST) && params != null) { - params = new TreeMap(params); + params = new TreeMap<>(database.getParameters()); } String location = database.getLocationUri(); @@ -66,9 +60,8 @@ public int execute() throws HiveException { location = "location/in/test"; } - PrincipalType ownerType = database.getOwnerType(); context.getFormatter().showDatabaseDescription(outStream, database.getName(), database.getDescription(), - location, database.getOwnerName(), (null == ownerType) ? null : ownerType.name(), params); + location, database.getOwnerName(), database.getOwnerType(), params); } catch (Exception e) { throw new HiveException(e, ErrorMsg.GENERIC_ERROR); } diff --git ql/src/java/org/apache/hadoop/hive/ql/plan/ShowCreateDatabaseDesc.java ql/src/java/org/apache/hadoop/hive/ql/ddl/database/ShowCreateDatabaseDesc.java similarity index 53% rename from ql/src/java/org/apache/hadoop/hive/ql/plan/ShowCreateDatabaseDesc.java rename to ql/src/java/org/apache/hadoop/hive/ql/ddl/database/ShowCreateDatabaseDesc.java index ba5d06e079..29dc266ebf 100644 --- ql/src/java/org/apache/hadoop/hive/ql/plan/ShowCreateDatabaseDesc.java +++ ql/src/java/org/apache/hadoop/hive/ql/ddl/database/ShowCreateDatabaseDesc.java @@ -16,79 +16,47 @@ * limitations under the License. */ -package org.apache.hadoop.hive.ql.plan; +package org.apache.hadoop.hive.ql.ddl.database; import java.io.Serializable; +import org.apache.hadoop.hive.ql.ddl.DDLDesc; +import org.apache.hadoop.hive.ql.ddl.DDLTask2; +import org.apache.hadoop.hive.ql.plan.Explain; import org.apache.hadoop.hive.ql.plan.Explain.Level; /** - * ShowCreateDatabaseDesc. - * + * DDL task description for SHOW CREATE DATABASE commands. */ -@Explain(displayName = "Show Create Database", - explainLevels = { Level.USER, Level.DEFAULT, Level.EXTENDED }) -public class ShowCreateDatabaseDesc extends DDLDesc implements Serializable { +@Explain(displayName = "Show Create Database", explainLevels = { Level.USER, Level.DEFAULT, Level.EXTENDED }) +public class ShowCreateDatabaseDesc implements DDLDesc, Serializable { private static final long serialVersionUID = 1L; - String resFile; - String dbName; - - /** - * thrift ddl for the result of showcreatedatabase. - */ - private static final String schema = "createdb_stmt#string"; - public String getSchema() { - return schema; + static { + DDLTask2.registerOperation(ShowCreateDatabaseDesc.class, ShowCreateDatabaseOperation.class); } - /** - * For serialization use only. - */ - public ShowCreateDatabaseDesc() { - } + private final String resFile; + private final String dbName; /** - * @param resFile - * @param dbName - * name of database to show + * Thrift ddl for the result of showcreatedatabase. */ + public static final String SCHEMA = "createdb_stmt#string"; + public ShowCreateDatabaseDesc(String dbName, String resFile) { this.dbName = dbName; this.resFile = resFile; } - /** - * @return the resFile - */ @Explain(displayName = "result file", explainLevels = { Level.EXTENDED }) public String getResFile() { return resFile; } - /** - * @param resFile - * the resFile to set - */ - public void setResFile(String resFile) { - this.resFile = resFile; - } - - /** - * @return the databaseName - */ - @Explain(displayName = "database name", - explainLevels = { Level.USER, Level.DEFAULT, Level.EXTENDED }) + @Explain(displayName = "database name", explainLevels = { Level.USER, Level.DEFAULT, Level.EXTENDED }) public String getDatabaseName() { return dbName; } - - /** - * @param dbName - * the dbName to set - */ - public void setDatabaseName(String dbName) { - this.dbName = dbName; - } } diff --git ql/src/java/org/apache/hadoop/hive/ql/ddl/database/ShowCreateDatabaseOperation.java ql/src/java/org/apache/hadoop/hive/ql/ddl/database/ShowCreateDatabaseOperation.java new file mode 100644 index 0000000000..100ac95f40 --- /dev/null +++ ql/src/java/org/apache/hadoop/hive/ql/ddl/database/ShowCreateDatabaseOperation.java @@ -0,0 +1,76 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hive.ql.ddl.database; + +import org.apache.hadoop.hive.ql.ddl.DDLOperationContext; +import org.apache.hadoop.hive.ql.ddl.DDLUtils; + +import java.io.DataOutputStream; + +import org.apache.hadoop.fs.Path; +import org.apache.hadoop.hive.metastore.api.Database; +import org.apache.hadoop.hive.ql.ddl.DDLOperation; +import org.apache.hadoop.hive.ql.metadata.HiveException; +import org.apache.hadoop.io.IOUtils; +import org.apache.hive.common.util.HiveStringUtils; + +/** + * Operation process showing the creation of a database. + */ +public class ShowCreateDatabaseOperation extends DDLOperation { + private final ShowCreateDatabaseDesc desc; + + public ShowCreateDatabaseOperation(DDLOperationContext context, ShowCreateDatabaseDesc desc) { + super(context); + this.desc = desc; + } + + @Override + public int execute() throws HiveException { + DataOutputStream outStream = DDLUtils.getOutputStream(new Path(desc.getResFile()), context); + try { + return showCreateDatabase(outStream); + } catch (Exception e) { + throw new HiveException(e); + } finally { + IOUtils.closeStream(outStream); + } + } + + private int showCreateDatabase(DataOutputStream outStream) throws Exception { + Database database = context.getDb().getDatabase(desc.getDatabaseName()); + + StringBuilder createDbCommand = new StringBuilder(); + createDbCommand.append("CREATE DATABASE `").append(database.getName()).append("`\n"); + if (database.getDescription() != null) { + createDbCommand.append("COMMENT\n '"); + createDbCommand.append(HiveStringUtils.escapeHiveCommand(database.getDescription())).append("'\n"); + } + createDbCommand.append("LOCATION\n '"); + createDbCommand.append(database.getLocationUri()).append("'\n"); + String propertiesToString = DDLUtils.propertiesToString(database.getParameters(), null); + if (!propertiesToString.isEmpty()) { + createDbCommand.append("WITH DBPROPERTIES (\n"); + createDbCommand.append(propertiesToString).append(")\n"); + } + + outStream.write(createDbCommand.toString().getBytes("UTF-8")); + return 0; + } +} diff --git ql/src/java/org/apache/hadoop/hive/ql/ddl/database/ShowDatabasesOperation.java ql/src/java/org/apache/hadoop/hive/ql/ddl/database/ShowDatabasesOperation.java index 30c4db8f3c..476762feb2 100644 --- ql/src/java/org/apache/hadoop/hive/ql/ddl/database/ShowDatabasesOperation.java +++ ql/src/java/org/apache/hadoop/hive/ql/ddl/database/ShowDatabasesOperation.java @@ -25,6 +25,7 @@ import org.apache.hadoop.hive.ql.ErrorMsg; import org.apache.hadoop.hive.ql.ddl.DDLOperation; import org.apache.hadoop.hive.ql.ddl.DDLOperationContext; +import org.apache.hadoop.hive.ql.ddl.DDLUtils; import org.apache.hadoop.hive.ql.metadata.HiveException; import org.apache.hadoop.io.IOUtils; @@ -53,7 +54,7 @@ public int execute() throws HiveException { LOG.info("Found {} database(s) matching the SHOW DATABASES statement.", databases.size()); // write the results in the file - DataOutputStream outStream = getOutputStream(new Path(desc.getResFile())); + DataOutputStream outStream = DDLUtils.getOutputStream(new Path(desc.getResFile()), context); try { context.getFormatter().showDatabases(outStream, databases); } catch (Exception e) { diff --git ql/src/java/org/apache/hadoop/hive/ql/plan/CreateTableDesc.java ql/src/java/org/apache/hadoop/hive/ql/ddl/table/CreateTableDesc.java similarity index 96% rename from ql/src/java/org/apache/hadoop/hive/ql/plan/CreateTableDesc.java rename to ql/src/java/org/apache/hadoop/hive/ql/ddl/table/CreateTableDesc.java index 4514af1f08..15fe4a99ea 100644 --- ql/src/java/org/apache/hadoop/hive/ql/plan/CreateTableDesc.java +++ ql/src/java/org/apache/hadoop/hive/ql/ddl/table/CreateTableDesc.java @@ -16,7 +16,7 @@ * limitations under the License. */ -package org.apache.hadoop.hive.ql.plan; +package org.apache.hadoop.hive.ql.ddl.table; import java.io.Serializable; import java.util.ArrayList; @@ -43,8 +43,10 @@ import org.apache.hadoop.hive.metastore.txn.TxnUtils; import org.apache.hadoop.hive.metastore.utils.MetaStoreUtils; import org.apache.hadoop.hive.ql.ErrorMsg; -import org.apache.hadoop.hive.ql.exec.DDLTask; import org.apache.hadoop.hive.ql.exec.Utilities; +import org.apache.hadoop.hive.ql.ddl.DDLDesc; +import org.apache.hadoop.hive.ql.ddl.DDLTask2; +import org.apache.hadoop.hive.ql.ddl.DDLUtils; import org.apache.hadoop.hive.ql.io.HiveFileFormatUtils; import org.apache.hadoop.hive.ql.io.HiveOutputFormat; import org.apache.hadoop.hive.ql.metadata.HiveException; @@ -54,6 +56,10 @@ import org.apache.hadoop.hive.ql.parse.ParseUtils; import org.apache.hadoop.hive.ql.parse.ReplicationSpec; import org.apache.hadoop.hive.ql.parse.SemanticException; +import org.apache.hadoop.hive.ql.plan.Explain; +import org.apache.hadoop.hive.ql.plan.FileSinkDesc; +import org.apache.hadoop.hive.ql.plan.PlanUtils; +import org.apache.hadoop.hive.ql.plan.ValidationUtility; import org.apache.hadoop.hive.ql.plan.Explain.Level; import org.apache.hadoop.hive.serde.serdeConstants; import org.apache.hadoop.hive.serde2.typeinfo.TypeInfo; @@ -62,15 +68,18 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; - /** - * CreateTableDesc. - * + * DDL task description for CREATE TABLE commands. */ @Explain(displayName = "Create Table", explainLevels = { Level.USER, Level.DEFAULT, Level.EXTENDED }) -public class CreateTableDesc extends DDLDesc implements Serializable { +public class CreateTableDesc implements DDLDesc, Serializable { private static final long serialVersionUID = 1L; - private static Logger LOG = LoggerFactory.getLogger(CreateTableDesc.class); + private static final Logger LOG = LoggerFactory.getLogger(CreateTableDesc.class); + + static { + DDLTask2.registerOperation(CreateTableDesc.class, CreateTableOperation.class); + } + String databaseName; String tableName; boolean isExternal; @@ -303,7 +312,9 @@ public void setForeignKeys(ArrayList foreignKeys) { return defaultConstraints; } - public List getCheckConstraints() { return checkConstraints; } + public List getCheckConstraints() { + return checkConstraints; + } @Explain(displayName = "bucket columns") public List getBucketCols() { @@ -536,13 +547,10 @@ public void validate(HiveConf conf) if (this.getStorageHandler() == null) { try { - Class origin = Class.forName(this.getOutputFormat(), true, - Utilities.getSessionSpecifiedClassLoader()); - Class replaced = HiveFileFormatUtils - .getOutputFormatSubstitute(origin); + Class origin = Class.forName(this.getOutputFormat(), true, Utilities.getSessionSpecifiedClassLoader()); + Class replaced = HiveFileFormatUtils.getOutputFormatSubstitute(origin); if (!HiveOutputFormat.class.isAssignableFrom(replaced)) { - throw new SemanticException(ErrorMsg.INVALID_OUTPUT_FORMAT_TYPE - .getMsg()); + throw new SemanticException(ErrorMsg.INVALID_OUTPUT_FORMAT_TYPE.getMsg()); } } catch (ClassNotFoundException e) { throw new SemanticException(ErrorMsg.CLASSPATH_ERROR.getMsg(), e); @@ -766,7 +774,7 @@ public Table toTable(HiveConf conf) throws HiveException { } else { // let's validate that the serde exists serDeClassName = getSerName(); - DDLTask.validateSerDe(serDeClassName, conf); + DDLUtils.validateSerDe(serDeClassName, conf); } tbl.setSerializationLib(serDeClassName); @@ -838,9 +846,9 @@ public Table toTable(HiveConf conf) throws HiveException { tbl.getTTable().getSd().setOutputFormat(tbl.getOutputFormatClass().getName()); } - if (DDLTask.doesTableNeedLocation(tbl)) { + if (CreateTableOperation.doesTableNeedLocation(tbl)) { // If location is specified - ensure that it is a full qualified name - DDLTask.makeLocationQualified(tbl.getDbName(), tbl, conf); + CreateTableOperation.makeLocationQualified(tbl, conf); } if (isExternal()) { @@ -925,8 +933,6 @@ public Long getInitialMmWriteId() { return initialMmWriteId; } - - public FileSinkDesc getAndUnsetWriter() { FileSinkDesc fsd = writer; writer = null; diff --git ql/src/java/org/apache/hadoop/hive/ql/plan/CreateTableLikeDesc.java ql/src/java/org/apache/hadoop/hive/ql/ddl/table/CreateTableLikeDesc.java similarity index 52% rename from ql/src/java/org/apache/hadoop/hive/ql/plan/CreateTableLikeDesc.java rename to ql/src/java/org/apache/hadoop/hive/ql/ddl/table/CreateTableLikeDesc.java index 2cc0712823..6652b79a88 100644 --- ql/src/java/org/apache/hadoop/hive/ql/plan/CreateTableLikeDesc.java +++ ql/src/java/org/apache/hadoop/hive/ql/ddl/table/CreateTableLikeDesc.java @@ -16,47 +16,50 @@ * limitations under the License. */ -package org.apache.hadoop.hive.ql.plan; +package org.apache.hadoop.hive.ql.ddl.table; import java.io.Serializable; import java.util.Map; +import org.apache.hadoop.hive.ql.ddl.DDLDesc; +import org.apache.hadoop.hive.ql.ddl.DDLTask2; +import org.apache.hadoop.hive.ql.plan.Explain; import org.apache.hadoop.hive.ql.plan.Explain.Level; /** - * CreateTableLikeDesc. - * + * DDL task description for CREATE TABLE LIKE commands. */ @Explain(displayName = "Create Table", explainLevels = { Level.USER, Level.DEFAULT, Level.EXTENDED }) -public class CreateTableLikeDesc extends DDLDesc implements Serializable { +public class CreateTableLikeDesc implements DDLDesc, Serializable { private static final long serialVersionUID = 1L; - String tableName; - boolean isExternal; - String defaultInputFormat; - String defaultOutputFormat; - String defaultSerName; - Map defaultSerdeProps; - String location; - Map tblProps; - boolean ifNotExists; - String likeTableName; - boolean isTemporary = false; - boolean isUserStorageFormat = false; - - public CreateTableLikeDesc() { - } - public CreateTableLikeDesc(String tableName, boolean isExternal, boolean isTemporary, - String defaultInputFormat, String defaultOutputFormat, String location, - String defaultSerName, Map defaultSerdeProps, Map tblProps, - boolean ifNotExists, String likeTableName, boolean isUserStorageFormat) { + static { + DDLTask2.registerOperation(CreateTableLikeDesc.class, CreateTableLikeOperation.class); + } + + private final String tableName; + private final boolean isExternal; + private final boolean isTemporary; + private final String defaultInputFormat; + private final String defaultOutputFormat; + private final String location; + private final String defaultSerName; + private final Map defaultSerdeProps; + private final Map tblProps; + private final boolean ifNotExists; + private final String likeTableName; + private final boolean isUserStorageFormat; + + public CreateTableLikeDesc(String tableName, boolean isExternal, boolean isTemporary, String defaultInputFormat, + String defaultOutputFormat, String location, String defaultSerName, Map defaultSerdeProps, + Map tblProps, boolean ifNotExists, String likeTableName, boolean isUserStorageFormat) { this.tableName = tableName; this.isExternal = isExternal; this.isTemporary = isTemporary; - this.defaultInputFormat=defaultInputFormat; - this.defaultOutputFormat=defaultOutputFormat; - this.defaultSerName=defaultSerName; - this.defaultSerdeProps=defaultSerdeProps; + this.defaultInputFormat = defaultInputFormat; + this.defaultOutputFormat = defaultOutputFormat; + this.defaultSerName = defaultSerName; + this.defaultSerdeProps = defaultSerdeProps; this.location = location; this.tblProps = tblProps; this.ifNotExists = ifNotExists; @@ -69,131 +72,56 @@ public boolean getIfNotExists() { return ifNotExists; } - public void setIfNotExists(boolean ifNotExists) { - this.ifNotExists = ifNotExists; - } - @Explain(displayName = "name", explainLevels = { Level.USER, Level.DEFAULT, Level.EXTENDED }) public String getTableName() { return tableName; } - public void setTableName(String tableName) { - this.tableName = tableName; - } - @Explain(displayName = "default input format") public String getDefaultInputFormat() { return defaultInputFormat; } - public void setInputFormat(String inputFormat) { - this.defaultInputFormat = inputFormat; - } - @Explain(displayName = "default output format") public String getDefaultOutputFormat() { return defaultOutputFormat; } - public void setOutputFormat(String outputFormat) { - this.defaultOutputFormat = outputFormat; - } - @Explain(displayName = "location") public String getLocation() { return location; } - public void setLocation(String location) { - this.location = location; - } - @Explain(displayName = "isExternal", displayOnlyOnTrue = true) public boolean isExternal() { return isExternal; } - public void setExternal(boolean isExternal) { - this.isExternal = isExternal; - } - - /** - * @return the default serDeName - */ @Explain(displayName = "default serde name") public String getDefaultSerName() { return defaultSerName; } - /** - * @param serName - * the serName to set - */ - public void setDefaultSerName(String serName) { - this.defaultSerName = serName; - } - - /** - * @return the default serDe properties - */ @Explain(displayName = "serde properties") public Map getDefaultSerdeProps() { return defaultSerdeProps; } - /** - * @param serdeProps - * the default serde properties to set - */ - public void setDefaultSerdeProps(Map serdeProps) { - this.defaultSerdeProps = serdeProps; - } - @Explain(displayName = "like") public String getLikeTableName() { return likeTableName; } - public void setLikeTableName(String likeTableName) { - this.likeTableName = likeTableName; - } - - /** - * @return the table properties - */ @Explain(displayName = "table properties") public Map getTblProps() { return tblProps; } - /** - * @param tblProps - * the table properties to set - */ - public void setTblProps(Map tblProps) { - this.tblProps = tblProps; - } - - /** - * @return the isTemporary - */ @Explain(displayName = "isTemporary", displayOnlyOnTrue = true) public boolean isTemporary() { return isTemporary; } - /** - * @param isTemporary table is Temporary or not. - */ - public void setTemporary(boolean isTemporary) { - this.isTemporary = isTemporary; - } - - /** - * True if user has specified storage format in query - * @return boolean - */ public boolean isUserStorageFormat() { return this.isUserStorageFormat; } diff --git ql/src/java/org/apache/hadoop/hive/ql/ddl/table/CreateTableLikeOperation.java ql/src/java/org/apache/hadoop/hive/ql/ddl/table/CreateTableLikeOperation.java new file mode 100644 index 0000000000..6ac6b101a7 --- /dev/null +++ ql/src/java/org/apache/hadoop/hive/ql/ddl/table/CreateTableLikeOperation.java @@ -0,0 +1,211 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hive.ql.ddl.table; + +import static org.apache.hadoop.hive.metastore.api.hive_metastoreConstants.META_TABLE_STORAGE; + +import java.util.Arrays; +import java.util.HashSet; +import java.util.Map; +import java.util.Set; + +import org.apache.hadoop.fs.Path; +import org.apache.hadoop.hive.common.StatsSetupConst; +import org.apache.hadoop.hive.conf.HiveConf; +import org.apache.hadoop.hive.metastore.PartitionManagementTask; +import org.apache.hadoop.hive.metastore.TableType; +import org.apache.hadoop.hive.metastore.utils.MetaStoreUtils; +import org.apache.hadoop.hive.ql.exec.Utilities; +import org.apache.hadoop.hive.ql.ddl.DDLOperation; +import org.apache.hadoop.hive.ql.ddl.DDLOperationContext; +import org.apache.hadoop.hive.ql.ddl.DDLUtils; +import org.apache.hadoop.hive.ql.hooks.WriteEntity; +import org.apache.hadoop.hive.ql.metadata.HiveException; +import org.apache.hadoop.hive.ql.metadata.Table; +import org.apache.hadoop.hive.ql.parse.SemanticException; +import org.apache.hadoop.hive.ql.session.SessionState; +import org.apache.hadoop.hive.serde2.Deserializer; +import org.apache.hadoop.hive.serde2.SerDeSpec; +import org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe; +import org.apache.hive.common.util.AnnotationUtils; + +/** + * Operation process of creating a table like an existing one. + */ +public class CreateTableLikeOperation extends DDLOperation { + private final CreateTableLikeDesc desc; + + public CreateTableLikeOperation(DDLOperationContext context, CreateTableLikeDesc desc) { + super(context); + this.desc = desc; + } + + @Override + public int execute() throws HiveException { + // Get the existing table + Table oldtbl = context.getDb().getTable(desc.getLikeTableName()); + Table tbl; + if (oldtbl.getTableType() == TableType.VIRTUAL_VIEW || oldtbl.getTableType() == TableType.MATERIALIZED_VIEW) { + tbl = createViewLikeTable(oldtbl); + } else { + tbl = createTableLikeTable(oldtbl); + } + + // If location is specified - ensure that it is a full qualified name + if (CreateTableOperation.doesTableNeedLocation(tbl)) { + CreateTableOperation.makeLocationQualified(tbl, context.getConf()); + } + + if (desc.getLocation() == null && !tbl.isPartitioned() && + context.getConf().getBoolVar(HiveConf.ConfVars.HIVESTATSAUTOGATHER)) { + StatsSetupConst.setStatsStateForCreateTable(tbl.getTTable().getParameters(), + MetaStoreUtils.getColumnNames(tbl.getCols()), StatsSetupConst.TRUE); + } + + // create the table + context.getDb().createTable(tbl, desc.getIfNotExists()); + DDLUtils.addIfAbsentByName(new WriteEntity(tbl, WriteEntity.WriteType.DDL_NO_LOCK), context); + return 0; + } + + private Table createViewLikeTable(Table oldtbl) throws HiveException { + Table tbl; + String targetTableName = desc.getTableName(); + tbl = context.getDb().newTable(targetTableName); + + if (desc.getTblProps() != null) { + tbl.getTTable().getParameters().putAll(desc.getTblProps()); + } + + tbl.setTableType(TableType.MANAGED_TABLE); + + if (desc.isExternal()) { + tbl.setProperty("EXTERNAL", "TRUE"); + tbl.setTableType(TableType.EXTERNAL_TABLE); + // partition discovery is on by default + tbl.setProperty(PartitionManagementTask.DISCOVER_PARTITIONS_TBLPROPERTY, "true"); + } + + tbl.setFields(oldtbl.getCols()); + tbl.setPartCols(oldtbl.getPartCols()); + + if (desc.getDefaultSerName() == null) { + LOG.info("Default to LazySimpleSerDe for table {}", targetTableName); + tbl.setSerializationLib(LazySimpleSerDe.class.getName()); + } else { + // let's validate that the serde exists + DDLUtils.validateSerDe(desc.getDefaultSerName(), context); + tbl.setSerializationLib(desc.getDefaultSerName()); + } + + if (desc.getDefaultSerdeProps() != null) { + for (Map.Entry e : desc.getDefaultSerdeProps().entrySet()) { + tbl.setSerdeParam(e.getKey(), e.getValue()); + } + } + + tbl.setInputFormatClass(desc.getDefaultInputFormat()); + tbl.setOutputFormatClass(desc.getDefaultOutputFormat()); + tbl.getTTable().getSd().setInputFormat(tbl.getInputFormatClass().getName()); + tbl.getTTable().getSd().setOutputFormat(tbl.getOutputFormatClass().getName()); + + return tbl; + } + + private Table createTableLikeTable(Table oldtbl) throws SemanticException, HiveException { + Table tbl = oldtbl; + + // find out database name and table name of target table + String targetTableName = desc.getTableName(); + String[] names = Utilities.getDbTableName(targetTableName); + + tbl.setDbName(names[0]); + tbl.setTableName(names[1]); + + // using old table object, hence reset the owner to current user for new table. + tbl.setOwner(SessionState.getUserFromAuthenticator()); + + if (desc.getLocation() != null) { + tbl.setDataLocation(new Path(desc.getLocation())); + } else { + tbl.unsetDataLocation(); + } + + Class serdeClass; + try { + serdeClass = oldtbl.getDeserializerClass(); + } catch (Exception e) { + throw new HiveException(e); + } + // We should copy only those table parameters that are specified in the config. + SerDeSpec spec = AnnotationUtils.getAnnotation(serdeClass, SerDeSpec.class); + + Set retainer = new HashSet(); + // for non-native table, property storage_handler should be retained + retainer.add(META_TABLE_STORAGE); + if (spec != null && spec.schemaProps() != null) { + retainer.addAll(Arrays.asList(spec.schemaProps())); + } + + String paramsStr = HiveConf.getVar(context.getConf(), HiveConf.ConfVars.DDL_CTL_PARAMETERS_WHITELIST); + if (paramsStr != null) { + retainer.addAll(Arrays.asList(paramsStr.split(","))); + } + + Map params = tbl.getParameters(); + if (!retainer.isEmpty()) { + params.keySet().retainAll(retainer); + } else { + params.clear(); + } + + if (desc.getTblProps() != null) { + params.putAll(desc.getTblProps()); + } + + if (desc.isUserStorageFormat()) { + tbl.setInputFormatClass(desc.getDefaultInputFormat()); + tbl.setOutputFormatClass(desc.getDefaultOutputFormat()); + tbl.getTTable().getSd().setInputFormat(tbl.getInputFormatClass().getName()); + tbl.getTTable().getSd().setOutputFormat(tbl.getOutputFormatClass().getName()); + if (desc.getDefaultSerName() == null) { + LOG.info("Default to LazySimpleSerDe for like table {}", targetTableName); + tbl.setSerializationLib(LazySimpleSerDe.class.getName()); + } else { + // let's validate that the serde exists + DDLUtils.validateSerDe(desc.getDefaultSerName(), context); + tbl.setSerializationLib(desc.getDefaultSerName()); + } + } + + tbl.getTTable().setTemporary(desc.isTemporary()); + tbl.getTTable().unsetId(); + + if (desc.isExternal()) { + tbl.setProperty("EXTERNAL", "TRUE"); + tbl.setTableType(TableType.EXTERNAL_TABLE); + // partition discovery is on by default + tbl.setProperty(PartitionManagementTask.DISCOVER_PARTITIONS_TBLPROPERTY, "true"); + } else { + tbl.getParameters().remove("EXTERNAL"); + } + + return tbl; + } +} diff --git ql/src/java/org/apache/hadoop/hive/ql/ddl/table/CreateTableOperation.java ql/src/java/org/apache/hadoop/hive/ql/ddl/table/CreateTableOperation.java new file mode 100644 index 0000000000..af39c16570 --- /dev/null +++ ql/src/java/org/apache/hadoop/hive/ql/ddl/table/CreateTableOperation.java @@ -0,0 +1,168 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hive.ql.ddl.table; + +import org.apache.commons.collections.CollectionUtils; +import org.apache.hadoop.fs.Path; +import org.apache.hadoop.hive.common.StatsSetupConst; +import org.apache.hadoop.hive.conf.Constants; +import org.apache.hadoop.hive.conf.HiveConf; +import org.apache.hadoop.hive.metastore.api.EnvironmentContext; +import org.apache.hadoop.hive.metastore.api.StorageDescriptor; +import org.apache.hadoop.hive.ql.ddl.DDLOperation; +import org.apache.hadoop.hive.ql.ddl.DDLOperationContext; +import org.apache.hadoop.hive.ql.ddl.DDLUtils; +import org.apache.hadoop.hive.ql.exec.Utilities; +import org.apache.hadoop.hive.ql.exec.repl.util.ReplUtils; +import org.apache.hadoop.hive.ql.hooks.WriteEntity; +import org.apache.hadoop.hive.ql.hooks.LineageInfo.DataContainer; +import org.apache.hadoop.hive.ql.io.AcidUtils; +import org.apache.hadoop.hive.ql.metadata.HiveException; +import org.apache.hadoop.hive.ql.metadata.Table; +import org.apache.hadoop.hive.ql.parse.ReplicationSpec; + +/** + * Operation process of creating a table. + */ +public class CreateTableOperation extends DDLOperation { + private final CreateTableDesc desc; + + public CreateTableOperation(DDLOperationContext context, CreateTableDesc desc) { + super(context); + this.desc = desc; + } + + @Override + public int execute() throws HiveException { + // create the table + Table tbl = desc.toTable(context.getConf()); + LOG.debug("creating table {} on {}", tbl.getFullyQualifiedName(), tbl.getDataLocation()); + + if (desc.getReplicationSpec().isInReplicationScope() && (!desc.getReplaceMode())){ + // if this is a replication spec, then replace-mode semantics might apply. + // if we're already asking for a table replacement, then we can skip this check. + // however, otherwise, if in replication scope, and we've not been explicitly asked + // to replace, we should check if the object we're looking at exists, and if so, + // trigger replace-mode semantics. + Table existingTable = context.getDb().getTable(tbl.getDbName(), tbl.getTableName(), false); + if (existingTable != null){ + if (desc.getReplicationSpec().allowEventReplacementInto(existingTable.getParameters())) { + desc.setReplaceMode(true); // we replace existing table. + ReplicationSpec.copyLastReplId(existingTable.getParameters(), tbl.getParameters()); + } else { + LOG.debug("DDLTask: Create Table is skipped as table {} is newer than update", desc.getTableName()); + return 0; // no replacement, the existing table state is newer than our update. + } + } + } + + // create the table + if (desc.getReplaceMode()) { + createTableReplaceMode(tbl); + } else { + createTableNonReplaceMode(tbl); + } + + DDLUtils.addIfAbsentByName(new WriteEntity(tbl, WriteEntity.WriteType.DDL_NO_LOCK), context); + return 0; + } + + private void createTableReplaceMode(Table tbl) throws HiveException { + ReplicationSpec replicationSpec = desc.getReplicationSpec(); + long writeId = 0; + EnvironmentContext environmentContext = null; + if (replicationSpec != null && replicationSpec.isInReplicationScope()) { + if (replicationSpec.isMigratingToTxnTable()) { + // for migration we start the transaction and allocate write id in repl txn task for migration. + String writeIdPara = context.getConf().get(ReplUtils.REPL_CURRENT_TBL_WRITE_ID); + if (writeIdPara == null) { + throw new HiveException("DDLTask : Write id is not set in the config by open txn task for migration"); + } + writeId = Long.parseLong(writeIdPara); + } else { + writeId = desc.getReplWriteId(); + } + + // In case of replication statistics is obtained from the source, so do not update those + // on replica. Since we are not replicating statisics for transactional tables, do not do + // so for transactional tables right now. + if (!AcidUtils.isTransactionalTable(desc)) { + environmentContext = new EnvironmentContext(); + environmentContext.putToProperties(StatsSetupConst.DO_NOT_UPDATE_STATS, StatsSetupConst.TRUE); + } + } + + // replace-mode creates are really alters using CreateTableDesc. + context.getDb().alterTable(tbl.getCatName(), tbl.getDbName(), tbl.getTableName(), tbl, false, environmentContext, + true, writeId); + } + + private void createTableNonReplaceMode(Table tbl) throws HiveException { + if (CollectionUtils.isNotEmpty(desc.getPrimaryKeys()) || + CollectionUtils.isNotEmpty(desc.getForeignKeys()) || + CollectionUtils.isNotEmpty(desc.getUniqueConstraints()) || + CollectionUtils.isNotEmpty(desc.getNotNullConstraints()) || + CollectionUtils.isNotEmpty(desc.getDefaultConstraints()) || + CollectionUtils.isNotEmpty(desc.getCheckConstraints())) { + context.getDb().createTable(tbl, desc.getIfNotExists(), desc.getPrimaryKeys(), desc.getForeignKeys(), + desc.getUniqueConstraints(), desc.getNotNullConstraints(), desc.getDefaultConstraints(), + desc.getCheckConstraints()); + } else { + context.getDb().createTable(tbl, desc.getIfNotExists()); + } + + if (desc.isCTAS()) { + Table createdTable = context.getDb().getTable(tbl.getDbName(), tbl.getTableName()); + DataContainer dc = new DataContainer(createdTable.getTTable()); + context.getQueryState().getLineageState().setLineage(createdTable.getPath(), dc, createdTable.getCols()); + } + } + + public static boolean doesTableNeedLocation(Table tbl) { + // TODO: If we are ok with breaking compatibility of existing 3rd party StorageHandlers, + // this method could be moved to the HiveStorageHandler interface. + boolean retval = true; + if (tbl.getStorageHandler() != null) { + // TODO: why doesn't this check class name rather than toString? + String sh = tbl.getStorageHandler().toString(); + retval = !"org.apache.hadoop.hive.hbase.HBaseStorageHandler".equals(sh) && + !Constants.DRUID_HIVE_STORAGE_HANDLER_ID.equals(sh) && + !Constants.JDBC_HIVE_STORAGE_HANDLER_ID.equals(sh) && + !"org.apache.hadoop.hive.accumulo.AccumuloStorageHandler".equals(sh); + } + return retval; + } + + public static void makeLocationQualified(Table table, HiveConf conf) throws HiveException { + StorageDescriptor sd = table.getTTable().getSd(); + // If the table's location is currently unset, it is left unset, allowing the metastore to + // fill in the table's location. + // Note that the previous logic for some reason would make a special case if the DB was the + // default database, and actually attempt to generate a location. + // This seems incorrect and uncessary, since the metastore is just as able to fill in the + // default table location in the case of the default DB, as it is for non-default DBs. + Path path = null; + if (sd.isSetLocation()) { + path = new Path(sd.getLocation()); + } + if (path != null) { + sd.setLocation(Utilities.getQualifiedPath(conf, path)); + } + } +} diff --git ql/src/java/org/apache/hadoop/hive/ql/plan/DescTableDesc.java ql/src/java/org/apache/hadoop/hive/ql/ddl/table/DescTableDesc.java similarity index 52% rename from ql/src/java/org/apache/hadoop/hive/ql/plan/DescTableDesc.java rename to ql/src/java/org/apache/hadoop/hive/ql/ddl/table/DescTableDesc.java index ee502323e1..0cfffd2032 100644 --- ql/src/java/org/apache/hadoop/hive/ql/plan/DescTableDesc.java +++ ql/src/java/org/apache/hadoop/hive/ql/ddl/table/DescTableDesc.java @@ -16,166 +16,85 @@ * limitations under the License. */ -package org.apache.hadoop.hive.ql.plan; +package org.apache.hadoop.hive.ql.ddl.table; import java.io.Serializable; import java.util.Map; import org.apache.hadoop.fs.Path; +import org.apache.hadoop.hive.ql.ddl.DDLDesc; +import org.apache.hadoop.hive.ql.ddl.DDLTask2; +import org.apache.hadoop.hive.ql.plan.Explain; import org.apache.hadoop.hive.ql.plan.Explain.Level; /** - * DescTableDesc. - * + * DDL task description for DESC table_name commands. */ @Explain(displayName = "Describe Table", explainLevels = { Level.USER, Level.DEFAULT, Level.EXTENDED }) -public class DescTableDesc extends DDLDesc implements Serializable { - public void setPartSpec(Map partSpec) { - this.partSpec = partSpec; - } - +public class DescTableDesc implements DDLDesc, Serializable { private static final long serialVersionUID = 1L; - String tableName; - Map partSpec; - String resFile; - - String colPath; - boolean isExt; - boolean isFormatted; - - /** - * table name for the result of describe table. - */ - private static final String table = "describe"; - /** - * thrift ddl for the result of describe table. - */ - private static final String schema = "col_name,data_type,comment#string:string:string"; - private static final String colStatsSchema = "col_name,data_type,min,max,num_nulls," - + "distinct_count,avg_col_len,max_col_len,num_trues,num_falses,bitVector,comment" - + "#string:string:string:string:string:string:string:string:string:string:string:string"; - - public DescTableDesc() { + static { + DDLTask2.registerOperation(DescTableDesc.class, DescTableOperation.class); } - /** - * @param partSpec - * @param resFile - * @param tableName - */ - public DescTableDesc(Path resFile, String tableName, - Map partSpec, String colPath) { - this.isExt = false; - this.isFormatted = false; - this.partSpec = partSpec; + private final String resFile; + private final String tableName; + private final Map partSpec; + private final String colPath; + private final boolean isExt; + private final boolean isFormatted; + + public DescTableDesc(Path resFile, String tableName, Map partSpec, String colPath, boolean isExt, + boolean isFormatted) { this.resFile = resFile.toString(); this.tableName = tableName; + this.partSpec = partSpec; this.colPath = colPath; - } - - public String getTable() { - return table; - } - - public static String getSchema(boolean colStats) { - if (colStats) { - return colStatsSchema; - } - return schema; - } - - /** - * @return the isExt - */ - public boolean isExt() { - return isExt; - } - - /** - * @param isExt - * the isExt to set - */ - public void setExt(boolean isExt) { this.isExt = isExt; + this.isFormatted = isFormatted; } - /** - * @return the isFormatted - */ - public boolean isFormatted() { - return isFormatted; - } - - /** - * @param isFormat - * the isFormat to set - */ - public void setFormatted(boolean isFormat) { - this.isFormatted = isFormat; + @Explain(displayName = "result file", explainLevels = { Level.EXTENDED }) + public String getResFile() { + return resFile; } - /** - * @return the tableName - */ @Explain(displayName = "table", explainLevels = { Level.USER, Level.DEFAULT, Level.EXTENDED }) public String getTableName() { return tableName; } - /** - * @param tableName - * the tableName to set - */ - public void setTableName(String tableName) { - this.tableName = tableName; - } - - /** - * @param colPath - * the colPath to set - */ - public void setColPath(String colPath) { - this.colPath = colPath; + @Explain(displayName = "partition", explainLevels = { Level.USER, Level.DEFAULT, Level.EXTENDED }) + public Map getPartSpec() { + return partSpec; } - /** - * @return the columnPath - */ public String getColumnPath() { return colPath; } - /** - * @return the partSpec - */ - @Explain(displayName = "partition", explainLevels = { Level.USER, Level.DEFAULT, Level.EXTENDED }) - public Map getPartSpec() { - return partSpec; + public boolean isExt() { + return isExt; } - /** - * @param partSpec - * the partSpec to set - */ - public void setPartSpecs(Map partSpec) { - this.partSpec = partSpec; + public boolean isFormatted() { + return isFormatted; } /** - * @return the resFile + * thrift ddl for the result of describe table. */ - @Explain(displayName = "result file", explainLevels = { Level.EXTENDED }) - public String getResFile() { - return resFile; - } + private static final String SCHEMA = "col_name,data_type,comment#string:string:string"; + private static final String COL_STATS_SCHEMA = "col_name,data_type,min,max,num_nulls," + + "distinct_count,avg_col_len,max_col_len,num_trues,num_falses,bitVector,comment" + + "#string:string:string:string:string:string:string:string:string:string:string:string"; - /** - * @param resFile - * the resFile to set - */ - public void setResFile(String resFile) { - this.resFile = resFile; + public static String getSchema(boolean colStats) { + if (colStats) { + return COL_STATS_SCHEMA; + } + return SCHEMA; } } diff --git ql/src/java/org/apache/hadoop/hive/ql/ddl/table/DescTableOperation.java ql/src/java/org/apache/hadoop/hive/ql/ddl/table/DescTableOperation.java new file mode 100644 index 0000000000..1d94ff3a5b --- /dev/null +++ ql/src/java/org/apache/hadoop/hive/ql/ddl/table/DescTableOperation.java @@ -0,0 +1,280 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hive.ql.ddl.table; + +import java.io.DataOutputStream; +import java.sql.SQLException; +import java.util.ArrayList; +import java.util.Collections; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.concurrent.TimeUnit; + +import org.apache.commons.lang.StringUtils; +import org.apache.hadoop.fs.Path; +import org.apache.hadoop.hive.common.StatsSetupConst; +import org.apache.hadoop.hive.common.ValidTxnList; +import org.apache.hadoop.hive.common.ValidTxnWriteIdList; +import org.apache.hadoop.hive.common.type.HiveDecimal; +import org.apache.hadoop.hive.conf.HiveConf; +import org.apache.hadoop.hive.metastore.StatObjectConverter; +import org.apache.hadoop.hive.metastore.TableType; +import org.apache.hadoop.hive.metastore.api.AggrStats; +import org.apache.hadoop.hive.metastore.api.ColumnStatisticsData; +import org.apache.hadoop.hive.metastore.api.ColumnStatisticsObj; +import org.apache.hadoop.hive.metastore.api.FieldSchema; +import org.apache.hadoop.hive.ql.ddl.DDLOperationContext; +import org.apache.hadoop.hive.ql.ddl.DDLUtils; +import org.apache.hadoop.hive.ql.exec.ColumnInfo; +import org.apache.hadoop.hive.ql.exec.Utilities; +import org.apache.hadoop.hive.ql.ErrorMsg; +import org.apache.hadoop.hive.ql.ddl.DDLOperation; +import org.apache.hadoop.hive.ql.metadata.CheckConstraint; +import org.apache.hadoop.hive.ql.metadata.DefaultConstraint; +import org.apache.hadoop.hive.ql.metadata.ForeignKeyInfo; +import org.apache.hadoop.hive.ql.metadata.Hive; +import org.apache.hadoop.hive.ql.metadata.HiveException; +import org.apache.hadoop.hive.ql.metadata.NotNullConstraint; +import org.apache.hadoop.hive.ql.metadata.Partition; +import org.apache.hadoop.hive.ql.metadata.PartitionIterable; +import org.apache.hadoop.hive.ql.metadata.PrimaryKeyInfo; +import org.apache.hadoop.hive.ql.metadata.StorageHandlerInfo; +import org.apache.hadoop.hive.ql.metadata.Table; +import org.apache.hadoop.hive.ql.metadata.UniqueConstraint; +import org.apache.hadoop.hive.ql.plan.ColStatistics; +import org.apache.hadoop.hive.ql.session.SessionState; +import org.apache.hadoop.hive.ql.stats.StatsUtils; +import org.apache.hadoop.hive.serde.serdeConstants; +import org.apache.hadoop.hive.serde2.AbstractSerDe; +import org.apache.hadoop.hive.serde2.Deserializer; +import org.apache.hadoop.hive.serde2.typeinfo.DecimalTypeInfo; +import org.apache.hadoop.hive.serde2.typeinfo.TypeInfoUtils; +import org.apache.hadoop.io.IOUtils; + +/** + * Operation process of dropping a table. + */ +public class DescTableOperation extends DDLOperation { + private final DescTableDesc desc; + + public DescTableOperation(DDLOperationContext context, DescTableDesc desc) { + super(context); + this.desc = desc; + } + + @Override + public int execute() throws Exception { + String colPath = desc.getColumnPath(); + String tableName = desc.getTableName(); + + // describe the table - populate the output stream + Table tbl = context.getDb().getTable(tableName, false); + if (tbl == null) { + throw new HiveException(ErrorMsg.INVALID_TABLE, tableName); + } + Partition part = null; + if (desc.getPartSpec() != null) { + part = context.getDb().getPartition(tbl, desc.getPartSpec(), false); + if (part == null) { + throw new HiveException(ErrorMsg.INVALID_PARTITION, + StringUtils.join(desc.getPartSpec().keySet(), ','), tableName); + } + tbl = part.getTable(); + } + + DataOutputStream outStream = DDLUtils.getOutputStream(new Path(desc.getResFile()), context); + try { + LOG.debug("DDLTask: got data for {}", tableName); + + List cols = null; + List colStats = null; + + Deserializer deserializer = tbl.getDeserializer(true); + if (deserializer instanceof AbstractSerDe) { + String errorMsgs = ((AbstractSerDe) deserializer).getConfigurationErrors(); + if (errorMsgs != null && !errorMsgs.isEmpty()) { + throw new SQLException(errorMsgs); + } + } + + if (colPath.equals(tableName)) { + cols = (part == null || tbl.getTableType() == TableType.VIRTUAL_VIEW) ? + tbl.getCols() : part.getCols(); + + if (!desc.isFormatted()) { + cols.addAll(tbl.getPartCols()); + } + + if (tbl.isPartitioned() && part == null) { + // No partitioned specified for partitioned table, lets fetch all. + Map tblProps = tbl.getParameters() == null ? + new HashMap() : tbl.getParameters(); + Map valueMap = new HashMap<>(); + Map stateMap = new HashMap<>(); + for (String stat : StatsSetupConst.SUPPORTED_STATS) { + valueMap.put(stat, 0L); + stateMap.put(stat, true); + } + PartitionIterable parts = new PartitionIterable(context.getDb(), tbl, null, + context.getConf().getIntVar(HiveConf.ConfVars.METASTORE_BATCH_RETRIEVE_MAX)); + int numParts = 0; + for (Partition partition : parts) { + Map props = partition.getParameters(); + Boolean state = StatsSetupConst.areBasicStatsUptoDate(props); + for (String stat : StatsSetupConst.SUPPORTED_STATS) { + stateMap.put(stat, stateMap.get(stat) && state); + if (props != null && props.get(stat) != null) { + valueMap.put(stat, valueMap.get(stat) + Long.parseLong(props.get(stat))); + } + } + numParts++; + } + for (String stat : StatsSetupConst.SUPPORTED_STATS) { + StatsSetupConst.setBasicStatsState(tblProps, Boolean.toString(stateMap.get(stat))); + tblProps.put(stat, valueMap.get(stat).toString()); + } + tblProps.put(StatsSetupConst.NUM_PARTITIONS, Integer.toString(numParts)); + tbl.setParameters(tblProps); + } + } else { + if (desc.isFormatted()) { + // when column name is specified in describe table DDL, colPath will + // will be table_name.column_name + String colName = colPath.split("\\.")[1]; + String[] dbTab = Utilities.getDbTableName(tableName); + List colNames = new ArrayList(); + colNames.add(colName.toLowerCase()); + if (null == part) { + if (tbl.isPartitioned()) { + Map tblProps = tbl.getParameters() == null ? + new HashMap() : tbl.getParameters(); + if (tbl.isPartitionKey(colNames.get(0))) { + FieldSchema partCol = tbl.getPartColByName(colNames.get(0)); + cols = Collections.singletonList(partCol); + PartitionIterable parts = new PartitionIterable(context.getDb(), tbl, null, + context.getConf().getIntVar(HiveConf.ConfVars.METASTORE_BATCH_RETRIEVE_MAX)); + ColumnInfo ci = new ColumnInfo(partCol.getName(), + TypeInfoUtils.getTypeInfoFromTypeString(partCol.getType()), null, false); + ColStatistics cs = StatsUtils.getColStatsForPartCol(ci, parts, context.getConf()); + ColumnStatisticsData data = new ColumnStatisticsData(); + ColStatistics.Range r = cs.getRange(); + StatObjectConverter.fillColumnStatisticsData(partCol.getType(), data, r == null ? null : r.minValue, + r == null ? null : r.maxValue, r == null ? null : r.minValue, r == null ? null : r.maxValue, + r == null ? null : r.minValue.toString(), r == null ? null : r.maxValue.toString(), + cs.getNumNulls(), cs.getCountDistint(), null, cs.getAvgColLen(), cs.getAvgColLen(), + cs.getNumTrues(), cs.getNumFalses()); + ColumnStatisticsObj cso = new ColumnStatisticsObj(partCol.getName(), partCol.getType(), data); + colStats = Collections.singletonList(cso); + StatsSetupConst.setColumnStatsState(tblProps, colNames); + } else { + cols = Hive.getFieldsFromDeserializer(colPath, deserializer); + List parts = context.getDb().getPartitionNames(dbTab[0].toLowerCase(), dbTab[1].toLowerCase(), + (short) -1); + AggrStats aggrStats = context.getDb().getAggrColStatsFor( + dbTab[0].toLowerCase(), dbTab[1].toLowerCase(), colNames, parts, false); + colStats = aggrStats.getColStats(); + if (parts.size() == aggrStats.getPartsFound()) { + StatsSetupConst.setColumnStatsState(tblProps, colNames); + } else { + StatsSetupConst.removeColumnStatsState(tblProps, colNames); + } + } + tbl.setParameters(tblProps); + } else { + cols = Hive.getFieldsFromDeserializer(colPath, deserializer); + colStats = context.getDb().getTableColumnStatistics( + dbTab[0].toLowerCase(), dbTab[1].toLowerCase(), colNames, false); + } + } else { + List partitions = new ArrayList(); + partitions.add(part.getName()); + cols = Hive.getFieldsFromDeserializer(colPath, deserializer); + colStats = context.getDb().getPartitionColumnStatistics(dbTab[0].toLowerCase(), + dbTab[1].toLowerCase(), partitions, colNames, false).get(part.getName()); + } + } else { + cols = Hive.getFieldsFromDeserializer(colPath, deserializer); + } + } + PrimaryKeyInfo pkInfo = null; + ForeignKeyInfo fkInfo = null; + UniqueConstraint ukInfo = null; + NotNullConstraint nnInfo = null; + DefaultConstraint dInfo = null; + CheckConstraint cInfo = null; + StorageHandlerInfo storageHandlerInfo = null; + if (desc.isExt() || desc.isFormatted()) { + pkInfo = context.getDb().getPrimaryKeys(tbl.getDbName(), tbl.getTableName()); + fkInfo = context.getDb().getForeignKeys(tbl.getDbName(), tbl.getTableName()); + ukInfo = context.getDb().getUniqueConstraints(tbl.getDbName(), tbl.getTableName()); + nnInfo = context.getDb().getNotNullConstraints(tbl.getDbName(), tbl.getTableName()); + dInfo = context.getDb().getDefaultConstraints(tbl.getDbName(), tbl.getTableName()); + cInfo = context.getDb().getCheckConstraints(tbl.getDbName(), tbl.getTableName()); + storageHandlerInfo = context.getDb().getStorageHandlerInfo(tbl); + } + fixDecimalColumnTypeName(cols); + // Information for materialized views + if (tbl.isMaterializedView()) { + final String validTxnsList = context.getDb().getConf().get(ValidTxnList.VALID_TXNS_KEY); + if (validTxnsList != null) { + List tablesUsed = new ArrayList<>(tbl.getCreationMetadata().getTablesUsed()); + ValidTxnWriteIdList currentTxnWriteIds = + SessionState.get().getTxnMgr().getValidWriteIds(tablesUsed, validTxnsList); + long defaultTimeWindow = HiveConf.getTimeVar(context.getDb().getConf(), + HiveConf.ConfVars.HIVE_MATERIALIZED_VIEW_REWRITING_TIME_WINDOW, TimeUnit.MILLISECONDS); + tbl.setOutdatedForRewriting(Hive.isOutdatedMaterializedView(tbl, + currentTxnWriteIds, defaultTimeWindow, tablesUsed, false)); + } + } + // In case the query is served by HiveServer2, don't pad it with spaces, + // as HiveServer2 output is consumed by JDBC/ODBC clients. + boolean isOutputPadded = !SessionState.get().isHiveServerQuery(); + context.getFormatter().describeTable(outStream, colPath, tableName, tbl, part, + cols, desc.isFormatted(), desc.isExt(), isOutputPadded, + colStats, pkInfo, fkInfo, ukInfo, nnInfo, dInfo, cInfo, + storageHandlerInfo); + + LOG.debug("DDLTask: written data for {}", tableName); + + } catch (SQLException e) { + throw new HiveException(e, ErrorMsg.GENERIC_ERROR, tableName); + } finally { + IOUtils.closeStream(outStream); + } + + return 0; + } + + /** + * Fix the type name of a column of type decimal w/o precision/scale specified. This makes + * the describe table show "decimal(10,0)" instead of "decimal" even if the type stored + * in metastore is "decimal", which is possible with previous hive. + * + * @param cols columns that to be fixed as such + */ + private static void fixDecimalColumnTypeName(List cols) { + for (FieldSchema col : cols) { + if (serdeConstants.DECIMAL_TYPE_NAME.equals(col.getType())) { + col.setType(DecimalTypeInfo.getQualifiedName(HiveDecimal.USER_DEFAULT_PRECISION, + HiveDecimal.USER_DEFAULT_SCALE)); + } + } + } +} diff --git ql/src/java/org/apache/hadoop/hive/ql/ddl/table/DropTableDesc.java ql/src/java/org/apache/hadoop/hive/ql/ddl/table/DropTableDesc.java new file mode 100644 index 0000000000..f910c57f3a --- /dev/null +++ ql/src/java/org/apache/hadoop/hive/ql/ddl/table/DropTableDesc.java @@ -0,0 +1,95 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hive.ql.ddl.table; + +import java.io.Serializable; + +import org.apache.hadoop.hive.metastore.TableType; +import org.apache.hadoop.hive.ql.ddl.DDLDesc; +import org.apache.hadoop.hive.ql.ddl.DDLTask2; +import org.apache.hadoop.hive.ql.parse.ReplicationSpec; +import org.apache.hadoop.hive.ql.plan.Explain; +import org.apache.hadoop.hive.ql.plan.Explain.Level; + +/** + * DDL task description for DROP TABLE commands. + */ +@Explain(displayName = "Drop Table", explainLevels = { Level.USER, Level.DEFAULT, Level.EXTENDED }) +public class DropTableDesc implements DDLDesc, Serializable { + private static final long serialVersionUID = 1L; + + static { + DDLTask2.registerOperation(DropTableDesc.class, DropTableOperation.class); + } + + private final String tableName; + private final TableType expectedType; + private final boolean ifExists; + private final boolean ifPurge; + private final ReplicationSpec replicationSpec; + private final boolean validationRequired; + + public DropTableDesc(String tableName, TableType expectedType, boolean ifExists, boolean ifPurge, + ReplicationSpec replicationSpec) { + this(tableName, expectedType, ifExists, ifPurge, replicationSpec, true); + } + + public DropTableDesc(String tableName, TableType expectedType, boolean ifExists, boolean ifPurge, + ReplicationSpec replicationSpec, boolean validationRequired) { + this.tableName = tableName; + this.expectedType = expectedType; + this.ifExists = ifExists; + this.ifPurge = ifPurge; + this.replicationSpec = replicationSpec == null ? new ReplicationSpec() : replicationSpec; + this.validationRequired = validationRequired; + } + + @Explain(displayName = "table", explainLevels = { Level.USER, Level.DEFAULT, Level.EXTENDED }) + public String getTableName() { + return tableName; + } + + public boolean getExpectView() { + return expectedType != null && expectedType == TableType.VIRTUAL_VIEW; + } + + public boolean getExpectMaterializedView() { + return expectedType != null && expectedType == TableType.MATERIALIZED_VIEW; + } + + public boolean getIfExists() { + return ifExists; + } + + public boolean getIfPurge() { + return ifPurge; + } + + /** + * @return what kind of replication scope this drop is running under. + * This can result in a "DROP IF OLDER THAN" kind of semantic + */ + public ReplicationSpec getReplicationSpec(){ + return this.replicationSpec; + } + + public boolean getValidationRequired(){ + return this.validationRequired; + } +} diff --git ql/src/java/org/apache/hadoop/hive/ql/ddl/table/DropTableOperation.java ql/src/java/org/apache/hadoop/hive/ql/ddl/table/DropTableOperation.java new file mode 100644 index 0000000000..d250772b2a --- /dev/null +++ ql/src/java/org/apache/hadoop/hive/ql/ddl/table/DropTableOperation.java @@ -0,0 +1,148 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hive.ql.ddl.table; + +import org.apache.hadoop.hive.conf.HiveConf; +import org.apache.hadoop.hive.ql.ddl.DDLOperationContext; +import org.apache.hadoop.hive.ql.ddl.DDLUtils; +import org.apache.hadoop.hive.ql.ddl.DDLOperation; +import org.apache.hadoop.hive.ql.hooks.WriteEntity; +import org.apache.hadoop.hive.ql.metadata.HiveException; +import org.apache.hadoop.hive.ql.metadata.HiveMaterializedViewsRegistry; +import org.apache.hadoop.hive.ql.metadata.InvalidTableException; +import org.apache.hadoop.hive.ql.metadata.Partition; +import org.apache.hadoop.hive.ql.metadata.PartitionIterable; +import org.apache.hadoop.hive.ql.metadata.Table; +import org.apache.hadoop.hive.ql.parse.ReplicationSpec; + +import com.google.common.collect.Iterables; + +/** + * Operation process of dropping a table. + */ +public class DropTableOperation extends DDLOperation { + private final DropTableDesc desc; + + public DropTableOperation(DDLOperationContext context, DropTableDesc desc) { + super(context); + this.desc = desc; + } + + @Override + public int execute() throws HiveException { + Table tbl = null; + try { + tbl = context.getDb().getTable(desc.getTableName()); + } catch (InvalidTableException e) { + // drop table is idempotent + } + + // This is a true DROP TABLE + if (tbl != null && desc.getValidationRequired()) { + if (tbl.isView()) { + if (!desc.getExpectView()) { + if (desc.getIfExists()) { + return 0; + } + if (desc.getExpectMaterializedView()) { + throw new HiveException("Cannot drop a view with DROP MATERIALIZED VIEW"); + } else { + throw new HiveException("Cannot drop a view with DROP TABLE"); + } + } + } else if (tbl.isMaterializedView()) { + if (!desc.getExpectMaterializedView()) { + if (desc.getIfExists()) { + return 0; + } + if (desc.getExpectView()) { + throw new HiveException("Cannot drop a materialized view with DROP VIEW"); + } else { + throw new HiveException("Cannot drop a materialized view with DROP TABLE"); + } + } + } else { + if (desc.getExpectView()) { + if (desc.getIfExists()) { + return 0; + } + throw new HiveException("Cannot drop a base table with DROP VIEW"); + } else if (desc.getExpectMaterializedView()) { + if (desc.getIfExists()) { + return 0; + } + throw new HiveException("Cannot drop a base table with DROP MATERIALIZED VIEW"); + } + } + } + + ReplicationSpec replicationSpec = desc.getReplicationSpec(); + if (tbl != null && replicationSpec.isInReplicationScope()) { + /** + * DROP TABLE FOR REPLICATION behaves differently from DROP TABLE IF EXISTS - it more closely + * matches a DROP TABLE IF OLDER THAN(x) semantic. + * + * Ideally, commands executed under the scope of replication need to be idempotent and resilient + * to repeats. What can happen, sometimes, is that a drone processing a replication task can + * have been abandoned for not returning in time, but still execute its task after a while, + * which should not result in it mucking up data that has been impressed later on. So, for eg., + * if we create partition P1, followed by droppping it, followed by creating it yet again, + * the replication of that drop should not drop the newer partition if it runs after the destination + * object is already in the newer state. + * + * Thus, we check the replicationSpec.allowEventReplacementInto to determine whether or not we can + * drop the object in question(will return false if object is newer than the event, true if not) + * + * In addition, since DROP TABLE FOR REPLICATION can result in a table not being dropped, while DROP + * TABLE will always drop the table, and the included partitions, DROP TABLE FOR REPLICATION must + * do one more thing - if it does not drop the table because the table is in a newer state, it must + * drop the partitions inside it that are older than this event. To wit, DROP TABLE FOR REPL + * acts like a recursive DROP TABLE IF OLDER. + */ + if (!replicationSpec.allowEventReplacementInto(tbl.getParameters())) { + // Drop occured as part of replicating a drop, but the destination + // table was newer than the event being replicated. Ignore, but drop + // any partitions inside that are older. + if (tbl.isPartitioned()) { + PartitionIterable partitions = new PartitionIterable(context.getDb(), tbl, null, + context.getConf().getIntVar(HiveConf.ConfVars.METASTORE_BATCH_RETRIEVE_MAX)); + for (Partition p : Iterables.filter(partitions, replicationSpec.allowEventReplacementInto())){ + context.getDb().dropPartition(tbl.getDbName(), tbl.getTableName(), p.getValues(), true); + } + } + LOG.debug("DDLTask: Drop Table is skipped as table {} is newer than update", desc.getTableName()); + return 0; // table is newer, leave it be. + } + } + + // drop the table + // TODO: API w/catalog name + context.getDb().dropTable(desc.getTableName(), desc.getIfPurge()); + if (tbl != null) { + // Remove from cache if it is a materialized view + if (tbl.isMaterializedView()) { + HiveMaterializedViewsRegistry.get().dropMaterializedView(tbl); + } + // We have already locked the table in DDLSemanticAnalyzer, don't do it again here + DDLUtils.addIfAbsentByName(new WriteEntity(tbl, WriteEntity.WriteType.DDL_NO_LOCK), context); + } + + return 0; + } +} diff --git ql/src/java/org/apache/hadoop/hive/ql/plan/LockTableDesc.java ql/src/java/org/apache/hadoop/hive/ql/ddl/table/LockTableDesc.java similarity index 67% rename from ql/src/java/org/apache/hadoop/hive/ql/plan/LockTableDesc.java rename to ql/src/java/org/apache/hadoop/hive/ql/ddl/table/LockTableDesc.java index 723678e119..2a8b02e067 100644 --- ql/src/java/org/apache/hadoop/hive/ql/plan/LockTableDesc.java +++ ql/src/java/org/apache/hadoop/hive/ql/ddl/table/LockTableDesc.java @@ -16,51 +16,46 @@ * limitations under the License. */ -package org.apache.hadoop.hive.ql.plan; +package org.apache.hadoop.hive.ql.ddl.table; import java.io.Serializable; import java.util.Map; -import org.apache.hadoop.fs.Path; +import org.apache.hadoop.hive.ql.ddl.DDLDesc; +import org.apache.hadoop.hive.ql.ddl.DDLTask2; +import org.apache.hadoop.hive.ql.plan.Explain; import org.apache.hadoop.hive.ql.plan.Explain.Level; /** - * LockTableDesc. - * + * DDL task description for LOCK TABLE commands. */ @Explain(displayName = "Lock Table", explainLevels = { Level.USER, Level.DEFAULT, Level.EXTENDED }) -public class LockTableDesc extends DDLDesc implements Serializable { +public class LockTableDesc implements DDLDesc, Serializable { private static final long serialVersionUID = 1L; - private String tableName; - private String mode; - private Map partSpec; - private String queryId; - private String queryStr; - - public LockTableDesc() { + static { + DDLTask2.registerOperation(LockTableDesc.class, LockTableOperation.class); } - public LockTableDesc(String tableName, String mode, Map partSpec, String queryId) { + private final String tableName; + private final String mode; + private final Map partSpec; + private final String queryId; + private final String queryStr; + + public LockTableDesc(String tableName, String mode, Map partSpec, String queryId, String queryStr) { this.tableName = tableName; this.mode = mode; this.partSpec = partSpec; this.queryId = queryId; + this.queryStr = queryStr; } public String getTableName() { return tableName; } - public void setTableName(String tableName) { - this.tableName = tableName; - } - - public void setMode(String mode) { - this.mode = mode; - } - public String getMode() { return mode; } @@ -69,23 +64,11 @@ public String getMode() { return partSpec; } - public void setPartSpec(Map partSpec) { - this.partSpec = partSpec; - } - public String getQueryId() { return queryId; } - public void setQueryId(String queryId) { - this.queryId = queryId; - } - public String getQueryStr() { return queryStr; } - - public void setQueryStr(String queryStr) { - this.queryStr = queryStr; - } } diff --git ql/src/java/org/apache/hadoop/hive/ql/ddl/table/LockTableOperation.java ql/src/java/org/apache/hadoop/hive/ql/ddl/table/LockTableOperation.java new file mode 100644 index 0000000000..2044a81406 --- /dev/null +++ ql/src/java/org/apache/hadoop/hive/ql/ddl/table/LockTableOperation.java @@ -0,0 +1,44 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hive.ql.ddl.table; + +import org.apache.hadoop.hive.ql.ddl.DDLOperationContext; +import org.apache.hadoop.hive.ql.Context; +import org.apache.hadoop.hive.ql.ddl.DDLOperation; +import org.apache.hadoop.hive.ql.lockmgr.HiveTxnManager; +import org.apache.hadoop.hive.ql.metadata.HiveException; + +/** + * Operation process of locking a table. + */ +public class LockTableOperation extends DDLOperation { + private final LockTableDesc desc; + + public LockTableOperation(DDLOperationContext context, LockTableDesc desc) { + super(context); + this.desc = desc; + } + + @Override + public int execute() throws HiveException { + Context ctx = context.getDriverContext().getCtx(); + HiveTxnManager txnManager = ctx.getHiveTxnManager(); + return txnManager.lockTable(context.getDb(), desc); + } +} diff --git ql/src/java/org/apache/hadoop/hive/ql/parse/PreInsertTableDesc.java ql/src/java/org/apache/hadoop/hive/ql/ddl/table/PreInsertTableDesc.java similarity index 67% rename from ql/src/java/org/apache/hadoop/hive/ql/parse/PreInsertTableDesc.java rename to ql/src/java/org/apache/hadoop/hive/ql/ddl/table/PreInsertTableDesc.java index 2c8e1e18d0..4bb609ef54 100644 --- ql/src/java/org/apache/hadoop/hive/ql/parse/PreInsertTableDesc.java +++ ql/src/java/org/apache/hadoop/hive/ql/ddl/table/PreInsertTableDesc.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -16,16 +16,25 @@ * limitations under the License. */ -package org.apache.hadoop.hive.ql.parse; +package org.apache.hadoop.hive.ql.ddl.table; +import org.apache.hadoop.hive.ql.ddl.DDLDesc; +import org.apache.hadoop.hive.ql.ddl.DDLTask2; import org.apache.hadoop.hive.ql.metadata.Table; -import org.apache.hadoop.hive.ql.plan.DDLDesc; import org.apache.hadoop.hive.ql.plan.Explain; +import org.apache.hadoop.hive.ql.plan.Explain.Level; + +/** + * DDL task description for PRE INSERT commands. + */ +@Explain(displayName = "Pre-Insert task", explainLevels = { Level.USER, Level.DEFAULT, Level.EXTENDED }) +public class PreInsertTableDesc implements DDLDesc { + static { + DDLTask2.registerOperation(PreInsertTableDesc.class, PreInsertTableOperation.class); + } -@Explain(displayName = "Pre-Insert task", explainLevels = { Explain.Level.USER, Explain.Level.DEFAULT, Explain.Level.EXTENDED }) -public class PreInsertTableDesc extends DDLDesc { - private final boolean isOverwrite; private final Table table; + private final boolean isOverwrite; public PreInsertTableDesc(Table table, boolean overwrite) { this.table = table; diff --git ql/src/java/org/apache/hadoop/hive/ql/ddl/table/PreInsertTableOperation.java ql/src/java/org/apache/hadoop/hive/ql/ddl/table/PreInsertTableOperation.java new file mode 100644 index 0000000000..5d85d0a14c --- /dev/null +++ ql/src/java/org/apache/hadoop/hive/ql/ddl/table/PreInsertTableOperation.java @@ -0,0 +1,55 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hive.ql.ddl.table; + +import org.apache.hadoop.hive.ql.ddl.DDLOperationContext; +import org.apache.hadoop.hive.metastore.DefaultHiveMetaHook; +import org.apache.hadoop.hive.metastore.HiveMetaHook; +import org.apache.hadoop.hive.metastore.api.MetaException; +import org.apache.hadoop.hive.ql.ddl.DDLOperation; +import org.apache.hadoop.hive.ql.metadata.HiveException; + +/** + * Operation process of pre inserting a table. + */ +public class PreInsertTableOperation extends DDLOperation { + private final PreInsertTableDesc desc; + + public PreInsertTableOperation(DDLOperationContext context, PreInsertTableDesc desc) { + super(context); + this.desc = desc; + } + + @Override + public int execute() throws HiveException { + try { + HiveMetaHook hook = desc.getTable().getStorageHandler().getMetaHook(); + if (hook == null || !(hook instanceof DefaultHiveMetaHook)) { + return 0; + } + + DefaultHiveMetaHook hiveMetaHook = (DefaultHiveMetaHook) hook; + hiveMetaHook.preInsertTable(desc.getTable().getTTable(), desc.isOverwrite()); + } catch (MetaException e) { + throw new HiveException(e); + } + + return 0; + } +} diff --git ql/src/java/org/apache/hadoop/hive/ql/plan/ShowCreateTableDesc.java ql/src/java/org/apache/hadoop/hive/ql/ddl/table/ShowCreateTableDesc.java similarity index 58% rename from ql/src/java/org/apache/hadoop/hive/ql/plan/ShowCreateTableDesc.java rename to ql/src/java/org/apache/hadoop/hive/ql/ddl/table/ShowCreateTableDesc.java index f96c5296a7..8fa1ef16aa 100644 --- ql/src/java/org/apache/hadoop/hive/ql/plan/ShowCreateTableDesc.java +++ ql/src/java/org/apache/hadoop/hive/ql/ddl/table/ShowCreateTableDesc.java @@ -16,84 +16,46 @@ * limitations under the License. */ -package org.apache.hadoop.hive.ql.plan; +package org.apache.hadoop.hive.ql.ddl.table; import java.io.Serializable; -import org.apache.hadoop.hive.ql.plan.Explain.Level; +import org.apache.hadoop.hive.ql.ddl.DDLDesc; +import org.apache.hadoop.hive.ql.ddl.DDLTask2; +import org.apache.hadoop.hive.ql.plan.Explain; +import org.apache.hadoop.hive.ql.plan.Explain.Level; /** - * ShowCreateTableDesc. - * + * DDL task description for SHOW CREATE TABLE commands. */ @Explain(displayName = "Show Create Table", explainLevels = { Level.USER, Level.DEFAULT, Level.EXTENDED }) -public class ShowCreateTableDesc extends DDLDesc implements Serializable { +public class ShowCreateTableDesc implements DDLDesc, Serializable { private static final long serialVersionUID = 1L; - String resFile; - String tableName; - - /** - * table name for the result of showcreatetable. - */ - private static final String table = "show_create_table"; - /** - * thrift ddl for the result of showcreatetable. - */ - private static final String schema = "createtab_stmt#string"; - - public String getTable() { - return table; - } - public String getSchema() { - return schema; + static { + DDLTask2.registerOperation(ShowCreateTableDesc.class, ShowCreateTableOperation.class); } /** - * For serialization use only. + * Thrift ddl for the result of showcreatetable. */ - public ShowCreateTableDesc() { - } + public static final String SCHEMA = "createtab_stmt#string"; + + private final String resFile; + private final String tableName; - /** - * @param resFile - * @param tableName - * name of table to show - */ public ShowCreateTableDesc(String tableName, String resFile) { this.tableName = tableName; this.resFile = resFile; } - /** - * @return the resFile - */ @Explain(displayName = "result file", explainLevels = { Level.EXTENDED }) public String getResFile() { return resFile; } - /** - * @param resFile - * the resFile to set - */ - public void setResFile(String resFile) { - this.resFile = resFile; - } - - /** - * @return the tableName - */ @Explain(displayName = "table name", explainLevels = { Level.USER, Level.DEFAULT, Level.EXTENDED }) public String getTableName() { return tableName; } - - /** - * @param tableName - * the tableName to set - */ - public void setTableName(String tableName) { - this.tableName = tableName; - } } diff --git ql/src/java/org/apache/hadoop/hive/ql/ddl/table/ShowCreateTableOperation.java ql/src/java/org/apache/hadoop/hive/ql/ddl/table/ShowCreateTableOperation.java new file mode 100644 index 0000000000..932d9428a9 --- /dev/null +++ ql/src/java/org/apache/hadoop/hive/ql/ddl/table/ShowCreateTableOperation.java @@ -0,0 +1,278 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hive.ql.ddl.table; + +import org.apache.hadoop.hive.ql.ddl.DDLOperationContext; +import org.apache.hadoop.hive.ql.ddl.DDLUtils; + +import static org.apache.hadoop.hive.metastore.api.hive_metastoreConstants.META_TABLE_STORAGE; + +import java.io.DataOutputStream; +import java.io.IOException; +import java.nio.charset.StandardCharsets; +import java.util.ArrayList; +import java.util.List; +import java.util.Map; +import java.util.TreeMap; +import java.util.Map.Entry; + +import org.apache.commons.lang.StringUtils; +import org.apache.hadoop.fs.Path; +import org.apache.hadoop.hive.common.StatsSetupConst; +import org.apache.hadoop.hive.metastore.TableType; +import org.apache.hadoop.hive.metastore.Warehouse; +import org.apache.hadoop.hive.metastore.api.FieldSchema; +import org.apache.hadoop.hive.metastore.api.Order; +import org.apache.hadoop.hive.metastore.api.SerDeInfo; +import org.apache.hadoop.hive.metastore.api.SkewedInfo; +import org.apache.hadoop.hive.metastore.api.StorageDescriptor; +import org.apache.hadoop.hive.ql.ddl.DDLOperation; +import org.apache.hadoop.hive.ql.metadata.HiveException; +import org.apache.hadoop.hive.ql.metadata.Table; +import org.apache.hadoop.hive.ql.parse.BaseSemanticAnalyzer; +import org.apache.hadoop.hive.serde.serdeConstants; +import org.apache.hive.common.util.HiveStringUtils; +import org.stringtemplate.v4.ST; + +/** + * Operation process showing the creation of a table. + */ +public class ShowCreateTableOperation extends DDLOperation { + private static final String EXTERNAL = "external"; + private static final String TEMPORARY = "temporary"; + private static final String LIST_COLUMNS = "columns"; + private static final String TBL_COMMENT = "tbl_comment"; + private static final String LIST_PARTITIONS = "partitions"; + private static final String SORT_BUCKET = "sort_bucket"; + private static final String SKEWED_INFO = "tbl_skewedinfo"; + private static final String ROW_FORMAT = "row_format"; + private static final String TBL_LOCATION = "tbl_location"; + private static final String TBL_PROPERTIES = "tbl_properties"; + + private final ShowCreateTableDesc desc; + + public ShowCreateTableOperation(DDLOperationContext context, ShowCreateTableDesc desc) { + super(context); + this.desc = desc; + } + + @Override + public int execute() throws HiveException { + // get the create table statement for the table and populate the output + try (DataOutputStream outStream = DDLUtils.getOutputStream(new Path(desc.getResFile()), context)) { + return showCreateTable(outStream); + } catch (Exception e) { + throw new HiveException(e); + } + } + + private int showCreateTable(DataOutputStream outStream) throws HiveException { + boolean needsLocation = true; + StringBuilder createTabCommand = new StringBuilder(); + + Table tbl = context.getDb().getTable(desc.getTableName(), false); + List duplicateProps = new ArrayList(); + try { + needsLocation = CreateTableOperation.doesTableNeedLocation(tbl); + + if (tbl.isView()) { + String createTabStmt = "CREATE VIEW `" + desc.getTableName() + "` AS " + tbl.getViewExpandedText(); + outStream.write(createTabStmt.getBytes(StandardCharsets.UTF_8)); + return 0; + } + + createTabCommand.append("CREATE <" + TEMPORARY + "><" + EXTERNAL + ">TABLE `"); + createTabCommand.append(desc.getTableName() + "`(\n"); + createTabCommand.append("<" + LIST_COLUMNS + ">)\n"); + createTabCommand.append("<" + TBL_COMMENT + ">\n"); + createTabCommand.append("<" + LIST_PARTITIONS + ">\n"); + createTabCommand.append("<" + SORT_BUCKET + ">\n"); + createTabCommand.append("<" + SKEWED_INFO + ">\n"); + createTabCommand.append("<" + ROW_FORMAT + ">\n"); + if (needsLocation) { + createTabCommand.append("LOCATION\n"); + createTabCommand.append("<" + TBL_LOCATION + ">\n"); + } + createTabCommand.append("TBLPROPERTIES (\n"); + createTabCommand.append("<" + TBL_PROPERTIES + ">)\n"); + ST createTabStmt = new ST(createTabCommand.toString()); + + // For cases where the table is temporary + String tblTemp = ""; + if (tbl.isTemporary()) { + duplicateProps.add("TEMPORARY"); + tblTemp = "TEMPORARY "; + } + // For cases where the table is external + String tblExternal = ""; + if (tbl.getTableType() == TableType.EXTERNAL_TABLE) { + duplicateProps.add("EXTERNAL"); + tblExternal = "EXTERNAL "; + } + + // Columns + String tblColumns = ""; + List cols = tbl.getCols(); + List columns = new ArrayList(); + for (FieldSchema col : cols) { + String columnDesc = " `" + col.getName() + "` " + col.getType(); + if (col.getComment() != null) { + columnDesc = columnDesc + " COMMENT '" + HiveStringUtils.escapeHiveCommand(col.getComment()) + "'"; + } + columns.add(columnDesc); + } + tblColumns = StringUtils.join(columns, ", \n"); + + // Table comment + String tblComment = ""; + String tabComment = tbl.getProperty("comment"); + if (tabComment != null) { + duplicateProps.add("comment"); + tblComment = "COMMENT '" + HiveStringUtils.escapeHiveCommand(tabComment) + "'"; + } + + // Partitions + String tblPartitions = ""; + List partKeys = tbl.getPartitionKeys(); + if (partKeys.size() > 0) { + tblPartitions += "PARTITIONED BY ( \n"; + List partCols = new ArrayList(); + for (FieldSchema partKey : partKeys) { + String partColDesc = " `" + partKey.getName() + "` " + partKey.getType(); + if (partKey.getComment() != null) { + partColDesc = partColDesc + " COMMENT '" + HiveStringUtils.escapeHiveCommand(partKey.getComment()) + "'"; + } + partCols.add(partColDesc); + } + tblPartitions += StringUtils.join(partCols, ", \n"); + tblPartitions += ")"; + } + + // Clusters (Buckets) + String tblSortBucket = ""; + List buckCols = tbl.getBucketCols(); + if (buckCols.size() > 0) { + duplicateProps.add("SORTBUCKETCOLSPREFIX"); + tblSortBucket += "CLUSTERED BY ( \n "; + tblSortBucket += StringUtils.join(buckCols, ", \n "); + tblSortBucket += ") \n"; + List sortCols = tbl.getSortCols(); + if (sortCols.size() > 0) { + tblSortBucket += "SORTED BY ( \n"; + // Order + List sortKeys = new ArrayList(); + for (Order sortCol : sortCols) { + String sortKeyDesc = " " + sortCol.getCol() + " "; + if (sortCol.getOrder() == BaseSemanticAnalyzer.HIVE_COLUMN_ORDER_ASC) { + sortKeyDesc = sortKeyDesc + "ASC"; + } else if (sortCol.getOrder() == BaseSemanticAnalyzer.HIVE_COLUMN_ORDER_DESC) { + sortKeyDesc = sortKeyDesc + "DESC"; + } + sortKeys.add(sortKeyDesc); + } + tblSortBucket += StringUtils.join(sortKeys, ", \n"); + tblSortBucket += ") \n"; + } + tblSortBucket += "INTO " + tbl.getNumBuckets() + " BUCKETS"; + } + + // Skewed Info + StringBuilder tblSkewedInfo = new StringBuilder(); + SkewedInfo skewedInfo = tbl.getSkewedInfo(); + if (skewedInfo != null && !skewedInfo.getSkewedColNames().isEmpty()) { + tblSkewedInfo.append("SKEWED BY (" + StringUtils.join(skewedInfo.getSkewedColNames(), ",") + ")\n"); + tblSkewedInfo.append(" ON ("); + List colValueList = new ArrayList(); + for (List colValues : skewedInfo.getSkewedColValues()) { + colValueList.add("('" + StringUtils.join(colValues, "','") + "')"); + } + tblSkewedInfo.append(StringUtils.join(colValueList, ",") + ")"); + if (tbl.isStoredAsSubDirectories()) { + tblSkewedInfo.append("\n STORED AS DIRECTORIES"); + } + } + + // Row format (SerDe) + StringBuilder tblRowFormat = new StringBuilder(); + StorageDescriptor sd = tbl.getTTable().getSd(); + SerDeInfo serdeInfo = sd.getSerdeInfo(); + Map serdeParams = serdeInfo.getParameters(); + tblRowFormat.append("ROW FORMAT SERDE \n"); + tblRowFormat.append(" '" + HiveStringUtils.escapeHiveCommand(serdeInfo.getSerializationLib()) + "' \n"); + if (tbl.getStorageHandler() == null) { + // If serialization.format property has the default value, it will not to be included in + // SERDE properties + if (Warehouse.DEFAULT_SERIALIZATION_FORMAT.equals(serdeParams.get(serdeConstants.SERIALIZATION_FORMAT))) { + serdeParams.remove(serdeConstants.SERIALIZATION_FORMAT); + } + if (!serdeParams.isEmpty()) { + appendSerdeParams(tblRowFormat, serdeParams).append(" \n"); + } + tblRowFormat.append("STORED AS INPUTFORMAT \n '" + + HiveStringUtils.escapeHiveCommand(sd.getInputFormat()) + "' \n"); + tblRowFormat.append("OUTPUTFORMAT \n '" + HiveStringUtils.escapeHiveCommand(sd.getOutputFormat()) + "'"); + } else { + duplicateProps.add(META_TABLE_STORAGE); + tblRowFormat.append("STORED BY \n '" + + HiveStringUtils.escapeHiveCommand(tbl.getParameters().get(META_TABLE_STORAGE)) + "' \n"); + // SerDe Properties + if (!serdeParams.isEmpty()) { + appendSerdeParams(tblRowFormat, serdeInfo.getParameters()); + } + } + String tblLocation = " '" + HiveStringUtils.escapeHiveCommand(sd.getLocation()) + "'"; + + // Table properties + duplicateProps.addAll(StatsSetupConst.TABLE_PARAMS_STATS_KEYS); + String tblProperties = DDLUtils.propertiesToString(tbl.getParameters(), duplicateProps); + + createTabStmt.add(TEMPORARY, tblTemp); + createTabStmt.add(EXTERNAL, tblExternal); + createTabStmt.add(LIST_COLUMNS, tblColumns); + createTabStmt.add(TBL_COMMENT, tblComment); + createTabStmt.add(LIST_PARTITIONS, tblPartitions); + createTabStmt.add(SORT_BUCKET, tblSortBucket); + createTabStmt.add(SKEWED_INFO, tblSkewedInfo); + createTabStmt.add(ROW_FORMAT, tblRowFormat); + // Table location should not be printed with hbase backed tables + if (needsLocation) { + createTabStmt.add(TBL_LOCATION, tblLocation); + } + createTabStmt.add(TBL_PROPERTIES, tblProperties); + + outStream.write(createTabStmt.render().getBytes(StandardCharsets.UTF_8)); + } catch (IOException e) { + LOG.info("show create table: ", e); + return 1; + } + + return 0; + } + + public static StringBuilder appendSerdeParams(StringBuilder builder, Map serdeParam) { + serdeParam = new TreeMap(serdeParam); + builder.append("WITH SERDEPROPERTIES ( \n"); + List serdeCols = new ArrayList(); + for (Entry entry : serdeParam.entrySet()) { + serdeCols.add(" '" + entry.getKey() + "'='" + HiveStringUtils.escapeHiveCommand(entry.getValue()) + "'"); + } + builder.append(StringUtils.join(serdeCols, ", \n")).append(')'); + return builder; + } +} diff --git ql/src/java/org/apache/hadoop/hive/ql/plan/ShowTblPropertiesDesc.java ql/src/java/org/apache/hadoop/hive/ql/ddl/table/ShowTablePropertiesDesc.java similarity index 51% rename from ql/src/java/org/apache/hadoop/hive/ql/plan/ShowTblPropertiesDesc.java rename to ql/src/java/org/apache/hadoop/hive/ql/ddl/table/ShowTablePropertiesDesc.java index aac0cf2b84..72caa58607 100644 --- ql/src/java/org/apache/hadoop/hive/ql/plan/ShowTblPropertiesDesc.java +++ ql/src/java/org/apache/hadoop/hive/ql/ddl/table/ShowTablePropertiesDesc.java @@ -16,65 +16,41 @@ * limitations under the License. */ -package org.apache.hadoop.hive.ql.plan; +package org.apache.hadoop.hive.ql.ddl.table; import java.io.Serializable; -import java.util.HashMap; -import org.apache.hadoop.fs.Path; +import org.apache.hadoop.hive.ql.ddl.DDLDesc; +import org.apache.hadoop.hive.ql.ddl.DDLTask2; +import org.apache.hadoop.hive.ql.plan.Explain; import org.apache.hadoop.hive.ql.plan.Explain.Level; - /** - * ShowTblPropertiesDesc. - * + * DDL task description for SHOW TABLE PROPERTIES commands. */ @Explain(displayName = "Show Table Properties", explainLevels = { Level.USER, Level.DEFAULT, Level.EXTENDED }) -public class ShowTblPropertiesDesc extends DDLDesc implements Serializable { +public class ShowTablePropertiesDesc implements DDLDesc, Serializable { private static final long serialVersionUID = 1L; - String resFile; - String tableName; - String propertyName; - - /** - * table name for the result of showtblproperties. - */ - private static final String table = "show_tableproperties"; - /** - * thrift ddl for the result of showtblproperties. - */ - private static final String schema = "prpt_name,prpt_value#string:string"; - - public String getTable() { - return table; - } - public String getSchema() { - return schema; + static { + DDLTask2.registerOperation(ShowTablePropertiesDesc.class, ShowTablePropertiesOperation.class); } /** - * For serialization use only. + * Thrift ddl for the result of showtblproperties. */ - public ShowTblPropertiesDesc() { - } + public static final String SCHEMA = "prpt_name,prpt_value#string:string"; - /** - * @param resFile - * @param tableName - * name of table to show - * @param propertyName - * name of property to show - */ - public ShowTblPropertiesDesc(String resFile, String tableName, String propertyName) { + private final String resFile; + private final String tableName; + private final String propertyName; + + public ShowTablePropertiesDesc(String resFile, String tableName, String propertyName) { this.resFile = resFile; this.tableName = tableName; this.propertyName = propertyName; } - /** - * @return the resFile - */ public String getResFile() { return resFile; } @@ -84,43 +60,13 @@ public String getResFileString() { return getResFile(); } - /** - * @param resFile - * the resFile to set - */ - public void setResFile(String resFile) { - this.resFile = resFile; - } - - /** - * @return the tableName - */ @Explain(displayName = "table name", explainLevels = { Level.USER, Level.DEFAULT, Level.EXTENDED }) public String getTableName() { return tableName; } - /** - * @param tableName - * the tableName to set - */ - public void setTableName(String tableName) { - this.tableName = tableName; - } - - /** - * @return the propertyName - */ @Explain(displayName = "property name") public String getPropertyName() { return propertyName; } - - /** - * @param propertyName - * the propertyName to set - */ - public void setPropertyName(String propertyName) { - this.propertyName = propertyName; - } } diff --git ql/src/java/org/apache/hadoop/hive/ql/ddl/table/ShowTablePropertiesOperation.java ql/src/java/org/apache/hadoop/hive/ql/ddl/table/ShowTablePropertiesOperation.java new file mode 100644 index 0000000000..385052d705 --- /dev/null +++ ql/src/java/org/apache/hadoop/hive/ql/ddl/table/ShowTablePropertiesOperation.java @@ -0,0 +1,89 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hive.ql.ddl.table; + +import org.apache.hadoop.hive.ql.ddl.DDLOperationContext; +import org.apache.hadoop.hive.ql.ddl.DDLUtils; + +import java.io.IOException; +import java.util.Map; +import java.util.TreeMap; +import java.util.Map.Entry; + +import org.apache.hadoop.hive.ql.ddl.DDLOperation; +import org.apache.hadoop.hive.ql.metadata.HiveException; +import org.apache.hadoop.hive.ql.metadata.Table; + +/** + * Operation process showing the table status. + */ +public class ShowTablePropertiesOperation extends DDLOperation { + private final ShowTablePropertiesDesc desc; + + public ShowTablePropertiesOperation(DDLOperationContext context, ShowTablePropertiesDesc desc) { + super(context); + this.desc = desc; + } + + @Override + public int execute() throws HiveException { + String tableName = desc.getTableName(); + + // show table properties - populate the output stream + Table tbl = context.getDb().getTable(tableName, false); + try { + if (tbl == null) { + String errMsg = "Table " + tableName + " does not exist"; + DDLUtils.writeToFile(errMsg, desc.getResFile(), context); + return 0; + } + + LOG.info("DDLTask: show properties for {}", tableName); + + StringBuilder builder = new StringBuilder(); + String propertyName = desc.getPropertyName(); + if (propertyName != null) { + String propertyValue = tbl.getProperty(propertyName); + if (propertyValue == null) { + String errMsg = "Table " + tableName + " does not have property: " + propertyName; + builder.append(errMsg); + } else { + DDLUtils.appendNonNull(builder, propertyName, true); + DDLUtils.appendNonNull(builder, propertyValue); + } + } else { + Map properties = new TreeMap(tbl.getParameters()); + for (Entry entry : properties.entrySet()) { + DDLUtils.appendNonNull(builder, entry.getKey(), true); + DDLUtils.appendNonNull(builder, entry.getValue()); + } + } + + LOG.info("DDLTask: written data for showing properties of {}", tableName); + DDLUtils.writeToFile(builder.toString(), desc.getResFile(), context); + } catch (IOException e) { + LOG.info("show table properties: ", e); + return 1; + } catch (Exception e) { + throw new HiveException(e); + } + + return 0; + } +} diff --git ql/src/java/org/apache/hadoop/hive/ql/plan/ShowTableStatusDesc.java ql/src/java/org/apache/hadoop/hive/ql/ddl/table/ShowTableStatusDesc.java similarity index 50% rename from ql/src/java/org/apache/hadoop/hive/ql/plan/ShowTableStatusDesc.java rename to ql/src/java/org/apache/hadoop/hive/ql/ddl/table/ShowTableStatusDesc.java index 5022e289d0..8c312a0c5e 100644 --- ql/src/java/org/apache/hadoop/hive/ql/plan/ShowTableStatusDesc.java +++ ql/src/java/org/apache/hadoop/hive/ql/ddl/table/ShowTableStatusDesc.java @@ -16,96 +16,53 @@ * limitations under the License. */ -package org.apache.hadoop.hive.ql.plan; +package org.apache.hadoop.hive.ql.ddl.table; import java.io.Serializable; -import java.util.HashMap; +import java.util.Map; -import org.apache.hadoop.fs.Path; +import org.apache.hadoop.hive.ql.ddl.DDLDesc; +import org.apache.hadoop.hive.ql.ddl.DDLTask2; +import org.apache.hadoop.hive.ql.plan.Explain; import org.apache.hadoop.hive.ql.plan.Explain.Level; - /** - * ShowTableStatusDesc. - * + * DDL task description for SHOW TABLE STATUS commands. */ @Explain(displayName = "Show Table Status", explainLevels = { Level.USER, Level.DEFAULT, Level.EXTENDED }) -public class ShowTableStatusDesc extends DDLDesc implements Serializable { +public class ShowTableStatusDesc implements DDLDesc, Serializable { private static final long serialVersionUID = 1L; - String pattern; - String resFile; - String dbName; - HashMap partSpec; - - /** - * table name for the result of show tables. - */ - private static final String table = "show_tablestatus"; - /** - * thrift ddl for the result of show tables. - */ - private static final String schema = "tab_name#string"; - - public String getTable() { - return table; - } - public String getSchema() { - return schema; + static { + DDLTask2.registerOperation(ShowTableStatusDesc.class, ShowTableStatusOperation.class); } /** - * For serializatino use only. + * Thrift ddl for the result of show tables. */ - public ShowTableStatusDesc() { - } + public static final String SCHEMA = "tab_name#string"; + + private final String resFile; + private final String dbName; + private final String pattern; + private final Map partSpec; - /** - * @param pattern - * names of tables to show - */ public ShowTableStatusDesc(String resFile, String dbName, String pattern) { - this.dbName = dbName; - this.resFile = resFile; - this.pattern = pattern; + this(resFile, dbName, pattern, null); } - /** - * @param resFile - * @param dbName - * data base name - * @param pattern - * names of tables to show - * @param partSpec - * partition specification - */ - public ShowTableStatusDesc(String resFile, String dbName, String pattern, - HashMap partSpec) { - this.dbName = dbName; + public ShowTableStatusDesc(String resFile, String dbName, String pattern, Map partSpec) { this.resFile = resFile; + this.dbName = dbName; this.pattern = pattern; this.partSpec = partSpec; } - /** - * @return the pattern - */ @Explain(displayName = "pattern") public String getPattern() { return pattern; } - /** - * @param pattern - * the pattern to set - */ - public void setPattern(String pattern) { - this.pattern = pattern; - } - - /** - * @return the resFile - */ public String getResFile() { return resFile; } @@ -115,43 +72,13 @@ public String getResFileString() { return getResFile(); } - /** - * @param resFile - * the resFile to set - */ - public void setResFile(String resFile) { - this.resFile = resFile; - } - - /** - * @return the database name - */ @Explain(displayName = "database", explainLevels = { Level.USER, Level.DEFAULT, Level.EXTENDED }) public String getDbName() { return dbName; } - /** - * @param dbName - * the database name - */ - public void setDbName(String dbName) { - this.dbName = dbName; - } - - /** - * @return the partSpec - */ @Explain(displayName = "partition", explainLevels = { Level.USER, Level.DEFAULT, Level.EXTENDED }) - public HashMap getPartSpec() { + public Map getPartSpec() { return partSpec; } - - /** - * @param partSpec - * the partSpec to set - */ - public void setPartSpec(HashMap partSpec) { - this.partSpec = partSpec; - } } diff --git ql/src/java/org/apache/hadoop/hive/ql/ddl/table/ShowTableStatusOperation.java ql/src/java/org/apache/hadoop/hive/ql/ddl/table/ShowTableStatusOperation.java new file mode 100644 index 0000000000..ea695fd1a3 --- /dev/null +++ ql/src/java/org/apache/hadoop/hive/ql/ddl/table/ShowTableStatusOperation.java @@ -0,0 +1,89 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hive.ql.ddl.table; + +import org.apache.hadoop.hive.ql.ddl.DDLOperationContext; +import org.apache.hadoop.hive.ql.ddl.DDLUtils; + +import java.io.DataOutputStream; +import java.util.ArrayList; +import java.util.Iterator; +import java.util.List; +import java.util.Map; +import java.util.SortedSet; +import java.util.TreeSet; + +import org.apache.hadoop.fs.Path; +import org.apache.hadoop.hive.ql.ErrorMsg; +import org.apache.hadoop.hive.ql.ddl.DDLOperation; +import org.apache.hadoop.hive.ql.metadata.HiveException; +import org.apache.hadoop.hive.ql.metadata.Partition; +import org.apache.hadoop.hive.ql.metadata.Table; +import org.apache.hadoop.io.IOUtils; + +/** + * Operation process showing the table status. + */ +public class ShowTableStatusOperation extends DDLOperation { + private final ShowTableStatusDesc desc; + + public ShowTableStatusOperation(DDLOperationContext context, ShowTableStatusDesc desc) { + super(context); + this.desc = desc; + } + + @Override + public int execute() throws HiveException { + // get the tables for the desired pattern - populate the output stream + List tbls = new ArrayList
(); + Map part = desc.getPartSpec(); + Partition par = null; + if (part != null) { + Table tbl = context.getDb().getTable(desc.getDbName(), desc.getPattern()); + par = context.getDb().getPartition(tbl, part, false); + if (par == null) { + throw new HiveException("Partition " + part + " for table " + desc.getPattern() + " does not exist."); + } + tbls.add(tbl); + } else { + LOG.debug("pattern: {}", desc.getPattern()); + List tblStr = context.getDb().getTablesForDb(desc.getDbName(), desc.getPattern()); + SortedSet sortedTbls = new TreeSet(tblStr); + Iterator iterTbls = sortedTbls.iterator(); + while (iterTbls.hasNext()) { + // create a row per table name + String tblName = iterTbls.next(); + Table tbl = context.getDb().getTable(desc.getDbName(), tblName); + tbls.add(tbl); + } + LOG.info("Found {} table(s) matching the SHOW TABLE EXTENDED statement.", tblStr.size()); + } + + // write the results in the file + DataOutputStream outStream = DDLUtils.getOutputStream(new Path(desc.getResFile()), context); + try { + context.getFormatter().showTableStatus(outStream, context.getDb(), context.getConf(), tbls, part, par); + } catch (Exception e) { + throw new HiveException(e, ErrorMsg.GENERIC_ERROR, "show table status"); + } finally { + IOUtils.closeStream(outStream); + } + return 0; + } +} diff --git ql/src/java/org/apache/hadoop/hive/ql/plan/ShowTablesDesc.java ql/src/java/org/apache/hadoop/hive/ql/ddl/table/ShowTablesDesc.java similarity index 59% rename from ql/src/java/org/apache/hadoop/hive/ql/plan/ShowTablesDesc.java rename to ql/src/java/org/apache/hadoop/hive/ql/ddl/table/ShowTablesDesc.java index 0f7a3cdc32..584433b0a0 100644 --- ql/src/java/org/apache/hadoop/hive/ql/plan/ShowTablesDesc.java +++ ql/src/java/org/apache/hadoop/hive/ql/ddl/table/ShowTablesDesc.java @@ -16,27 +16,27 @@ * limitations under the License. */ -package org.apache.hadoop.hive.ql.plan; +package org.apache.hadoop.hive.ql.ddl.table; import java.io.Serializable; import org.apache.hadoop.fs.Path; import org.apache.hadoop.hive.metastore.TableType; +import org.apache.hadoop.hive.ql.ddl.DDLDesc; +import org.apache.hadoop.hive.ql.ddl.DDLTask2; +import org.apache.hadoop.hive.ql.plan.Explain; import org.apache.hadoop.hive.ql.plan.Explain.Level; - /** - * ShowTablesDesc. - * + * DDL task description for SHOW TABLES commands. */ @Explain(displayName = "Show Tables", explainLevels = { Level.USER, Level.DEFAULT, Level.EXTENDED }) -public class ShowTablesDesc extends DDLDesc implements Serializable { +public class ShowTablesDesc implements DDLDesc, Serializable { private static final long serialVersionUID = 1L; - /** - * table name for the result of show tables. - */ - private static final String table = "show"; + static { + DDLTask2.registerOperation(ShowTablesDesc.class, ShowTablesOperation.class); + } /** * thrift ddl for the result of show tables and show views. @@ -54,161 +54,79 @@ private static final String MATERIALIZED_VIEWS_SCHEMA = "mv_name,rewrite_enabled,mode#string:string:string"; + private final String resFile; + private final String dbName; + private final String pattern; + private final TableType type; + private final TableType typeFilter; + private final boolean isExtended; - TableType type; - String pattern; - TableType typeFilter; - String dbName; - String resFile; - boolean isExtended; - - public String getTable() { - return table; + public ShowTablesDesc(Path resFile) { + this(resFile, null, null, null, null, false); } - public String getSchema() { - if (type != null && type == TableType.MATERIALIZED_VIEW) { - return MATERIALIZED_VIEWS_SCHEMA; - } - return isExtended ? EXTENDED_TABLES_SCHEMA : TABLES_VIEWS_SCHEMA; + public ShowTablesDesc(Path resFile, String dbName) { + this(resFile, dbName, null, null, null, false); } - public ShowTablesDesc() { + public ShowTablesDesc(Path resFile, String dbName, TableType type) { + this(resFile, dbName, null, type, null, false); } - /** - * @param resFile - */ - public ShowTablesDesc(Path resFile) { - this.resFile = resFile.toString(); - pattern = null; + public ShowTablesDesc(Path resFile, String dbName, String pattern, TableType typeFilter, boolean isExtended) { + this(resFile, dbName, pattern, null, typeFilter, isExtended); } - /** - * @param dbName - * name of database to show tables of - */ - public ShowTablesDesc(Path resFile, String dbName) { - this.resFile = resFile.toString(); - this.dbName = dbName; + public ShowTablesDesc(Path resFile, String dbName, String pattern, TableType type) { + this(resFile, dbName, pattern, type, null, false); } - /** - * @param pattern - * names of tables to show - */ - public ShowTablesDesc(Path resFile, String dbName, String pattern, TableType typeFilter, boolean isExtended) { + + public ShowTablesDesc(Path resFile, String dbName, String pattern, TableType type, TableType typeFilter, + boolean isExtended) { this.resFile = resFile.toString(); this.dbName = dbName; this.pattern = pattern; + this.type = type; this.typeFilter = typeFilter; this.isExtended = isExtended; } - /** - * @param type - * type of the tables to show - */ - public ShowTablesDesc(Path resFile, String dbName, String pattern, TableType type) { - this.resFile = resFile.toString(); - this.dbName = dbName; - this.pattern = pattern; - this.type = type; - } - - /** - * @return the pattern - */ @Explain(displayName = "pattern") public String getPattern() { return pattern; } - /** - * @param pattern - * the pattern to set - */ - public void setPattern(String pattern) { - this.pattern = pattern; - } - - /** - * @return the table type to be fetched - */ @Explain(displayName = "type") public TableType getType() { return type; } - /** - * @param type - * the table type to set - */ - public void setType(TableType type) { - this.type = type; - } - - /** - * @return the resFile - */ @Explain(displayName = "result file", explainLevels = { Level.EXTENDED }) public String getResFile() { return resFile; } - /** - * @param resFile - * the resFile to set - */ - public void setResFile(String resFile) { - this.resFile = resFile; - } - - /** - * @return the dbName - */ @Explain(displayName = "database name", explainLevels = { Level.USER, Level.DEFAULT, Level.EXTENDED }) public String getDbName() { return dbName; } - /** - * @param dbName - * the dbName to set - */ - public void setDbName(String dbName) { - this.dbName = dbName; - } - - /** - * @return is extended - */ - @Explain(displayName = "extended", explainLevels = { Level.USER, Level.DEFAULT, Level.EXTENDED }, displayOnlyOnTrue = true) + @Explain(displayName = "extended", explainLevels = { Level.USER, Level.DEFAULT, Level.EXTENDED }, + displayOnlyOnTrue = true) public boolean isExtended() { return isExtended; } - /** - * @param isExtended - * whether extended modifier is enabled - */ - public void setIsExtended(boolean isExtended) { - this.isExtended = isExtended; - } - - /** - * @return table type filter, null if it is not filtered - */ @Explain(displayName = "table type filter", explainLevels = { Level.USER, Level.DEFAULT, Level.EXTENDED }) public TableType getTypeFilter() { return typeFilter; } - /** - * @param typeFilter - * table type filter for show statement - */ - public void setTypeFilter(TableType typeFilter) { - this.typeFilter = typeFilter; + public String getSchema() { + if (type != null && type == TableType.MATERIALIZED_VIEW) { + return MATERIALIZED_VIEWS_SCHEMA; + } + return isExtended ? EXTENDED_TABLES_SCHEMA : TABLES_VIEWS_SCHEMA; } } diff --git ql/src/java/org/apache/hadoop/hive/ql/ddl/table/ShowTablesOperation.java ql/src/java/org/apache/hadoop/hive/ql/ddl/table/ShowTablesOperation.java new file mode 100644 index 0000000000..71b5717bb8 --- /dev/null +++ ql/src/java/org/apache/hadoop/hive/ql/ddl/table/ShowTablesOperation.java @@ -0,0 +1,115 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hive.ql.ddl.table; + +import org.apache.hadoop.hive.ql.ddl.DDLOperationContext; + +import java.io.DataOutputStream; +import java.util.ArrayList; +import java.util.Collections; +import java.util.Comparator; +import java.util.List; +import java.util.SortedSet; +import java.util.TreeSet; + +import org.apache.hadoop.fs.FileSystem; +import org.apache.hadoop.fs.Path; +import org.apache.hadoop.hive.metastore.TableType; +import org.apache.hadoop.hive.ql.ErrorMsg; +import org.apache.hadoop.hive.ql.ddl.DDLOperation; +import org.apache.hadoop.hive.ql.metadata.HiveException; +import org.apache.hadoop.hive.ql.metadata.Table; +import org.apache.hadoop.io.IOUtils; + +/** + * Operation process showing the tables. + */ +public class ShowTablesOperation extends DDLOperation { + private final ShowTablesDesc desc; + + public ShowTablesOperation(DDLOperationContext context, ShowTablesDesc desc) { + super(context); + this.desc = desc; + } + + @Override + public int execute() throws HiveException { + TableType type = desc.getType(); // null for tables, VIRTUAL_VIEW for views, MATERIALIZED_VIEW for MVs + String dbName = desc.getDbName(); + String pattern = desc.getPattern(); // if null, all tables/views are returned + TableType typeFilter = desc.getTypeFilter(); + String resultsFile = desc.getResFile(); + boolean isExtended = desc.isExtended(); + + if (!context.getDb().databaseExists(dbName)) { + throw new HiveException(ErrorMsg.DATABASE_NOT_EXISTS, dbName); + } + + LOG.debug("pattern: {}", pattern); + LOG.debug("typeFilter: {}", typeFilter); + + List tableNames = null; + List
tableObjects = null; + if (type == null) { + if (isExtended) { + tableObjects = new ArrayList<>(); + tableObjects.addAll(context.getDb().getTableObjectsByType(dbName, pattern, typeFilter)); + LOG.debug("Found {} table(s) matching the SHOW EXTENDED TABLES statement.", tableObjects.size()); + } else { + tableNames = context.getDb().getTablesByType(dbName, pattern, typeFilter); + LOG.debug("Found {} table(s) matching the SHOW TABLES statement.", tableNames.size()); + } + } else if (type == TableType.MATERIALIZED_VIEW) { + tableObjects = new ArrayList<>(); + tableObjects.addAll(context.getDb().getMaterializedViewObjectsByPattern(dbName, pattern)); + LOG.debug("Found {} materialized view(s) matching the SHOW MATERIALIZED VIEWS statement.", tableObjects.size()); + } else if (type == TableType.VIRTUAL_VIEW) { + tableNames = context.getDb().getTablesByType(dbName, pattern, type); + LOG.debug("Found {} view(s) matching the SHOW VIEWS statement.", tableNames.size()); + } else { + throw new HiveException("Option not recognized in SHOW TABLES/VIEWS/MATERIALIZED VIEWS"); + } + + // write the results in the file + DataOutputStream outStream = null; + try { + Path resFile = new Path(resultsFile); + FileSystem fs = resFile.getFileSystem(context.getConf()); + outStream = fs.create(resFile); + // Sort by name and print + if (tableNames != null) { + SortedSet sortedSet = new TreeSet(tableNames); + context.getFormatter().showTables(outStream, sortedSet); + } else { + Collections.sort(tableObjects, Comparator.comparing(Table::getTableName)); + if (isExtended) { + context.getFormatter().showTablesExtended(outStream, tableObjects); + } else { + context.getFormatter().showMaterializedViews(outStream, tableObjects); + } + } + outStream.close(); + } catch (Exception e) { + throw new HiveException(e, ErrorMsg.GENERIC_ERROR, "in database" + dbName); + } finally { + IOUtils.closeStream(outStream); + } + return 0; + } +} diff --git ql/src/java/org/apache/hadoop/hive/ql/plan/TruncateTableDesc.java ql/src/java/org/apache/hadoop/hive/ql/ddl/table/TruncateTableDesc.java similarity index 75% rename from ql/src/java/org/apache/hadoop/hive/ql/plan/TruncateTableDesc.java rename to ql/src/java/org/apache/hadoop/hive/ql/ddl/table/TruncateTableDesc.java index 61deb24eef..1f0cd82690 100644 --- ql/src/java/org/apache/hadoop/hive/ql/plan/TruncateTableDesc.java +++ ql/src/java/org/apache/hadoop/hive/ql/ddl/table/TruncateTableDesc.java @@ -16,56 +16,59 @@ * limitations under the License. */ -package org.apache.hadoop.hive.ql.plan; +package org.apache.hadoop.hive.ql.ddl.table; +import java.io.Serializable; import java.util.List; import java.util.Map; import org.apache.hadoop.fs.Path; -import org.apache.hadoop.hive.metastore.Warehouse; +import org.apache.hadoop.hive.common.TableName; +import org.apache.hadoop.hive.ql.ddl.DDLDesc; +import org.apache.hadoop.hive.ql.ddl.DDLTask2; import org.apache.hadoop.hive.ql.io.AcidUtils; import org.apache.hadoop.hive.ql.metadata.Table; import org.apache.hadoop.hive.ql.parse.ReplicationSpec; +import org.apache.hadoop.hive.ql.plan.Explain; +import org.apache.hadoop.hive.ql.plan.ListBucketingCtx; +import org.apache.hadoop.hive.ql.plan.DDLDesc.DDLDescWithWriteId; import org.apache.hadoop.hive.ql.plan.Explain.Level; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - /** - * Truncates managed table or partition + * DDL task description for TRUNCATE TABLE commands. */ @Explain(displayName = "Truncate Table or Partition", explainLevels = { Level.USER, Level.DEFAULT, Level.EXTENDED }) -public class TruncateTableDesc extends DDLDesc implements DDLDesc.DDLDescWithWriteId { - private final static Logger LOG = LoggerFactory.getLogger(TruncateTableDesc.class); - +public class TruncateTableDesc implements DDLDesc, Serializable, DDLDescWithWriteId { private static final long serialVersionUID = 1L; - private String tableName; - private String fullTableName; - private Map partSpec; + static { + DDLTask2.registerOperation(TruncateTableDesc.class, TruncateTableOperation.class); + } + + private final String tableName; + private final String fullTableName; + private final Map partSpec; + private final ReplicationSpec replicationSpec; + private final boolean isTransactional; + private List columnIndexes; private Path inputDir; private Path outputDir; private ListBucketingCtx lbCtx; - private ReplicationSpec replicationSpec; - private long writeId = 0; - private boolean isTransactional; - - public TruncateTableDesc() { - } + private long writeId = 0; public TruncateTableDesc(String tableName, Map partSpec, ReplicationSpec replicationSpec) { this(tableName, partSpec, replicationSpec, null); } - public TruncateTableDesc(String tableName, Map partSpec, - ReplicationSpec replicationSpec, Table table) { + public TruncateTableDesc(String tableName, Map partSpec, ReplicationSpec replicationSpec, + Table table) { this.tableName = tableName; + this.fullTableName = table == null ? tableName : TableName.getDbTable(table.getDbName(), table.getTableName()); this.partSpec = partSpec; this.replicationSpec = replicationSpec; this.isTransactional = AcidUtils.isTransactionalTable(table); - this.fullTableName = table == null ? tableName : Warehouse.getQualifiedName(table.getTTable()); } @Explain(displayName = "TableName", explainLevels = { Level.USER, Level.DEFAULT, Level.EXTENDED }) @@ -73,8 +76,9 @@ public String getTableName() { return tableName; } - public void setTableName(String tableName) { - this.tableName = tableName; + @Override + public String getFullTableName() { + return fullTableName; } @Explain(displayName = "Partition Spec", explainLevels = { Level.USER, Level.DEFAULT, Level.EXTENDED }) @@ -82,8 +86,12 @@ public void setTableName(String tableName) { return partSpec; } - public void setPartSpec(Map partSpec) { - this.partSpec = partSpec; + /** + * @return what kind of replication scope this truncate is running under. + * This can result in a "TRUNCATE IF NEWER THAN" kind of semantic + */ + public ReplicationSpec getReplicationSpec() { + return replicationSpec; } @Explain(displayName = "Column Indexes") @@ -119,23 +127,11 @@ public void setLbCtx(ListBucketingCtx lbCtx) { this.lbCtx = lbCtx; } - /** - * @return what kind of replication scope this truncate is running under. - * This can result in a "TRUNCATE IF NEWER THAN" kind of semantic - */ - public ReplicationSpec getReplicationSpec() { return this.replicationSpec; } - @Override public void setWriteId(long writeId) { this.writeId = writeId; } - @Override - public String getFullTableName() { - return fullTableName; - } - - @Override public boolean mayNeedWriteId() { return isTransactional; diff --git ql/src/java/org/apache/hadoop/hive/ql/ddl/table/TruncateTableOperation.java ql/src/java/org/apache/hadoop/hive/ql/ddl/table/TruncateTableOperation.java new file mode 100644 index 0000000000..9778bfac92 --- /dev/null +++ ql/src/java/org/apache/hadoop/hive/ql/ddl/table/TruncateTableOperation.java @@ -0,0 +1,89 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hive.ql.ddl.table; + +import java.io.Serializable; +import java.util.ArrayList; +import java.util.Map; + +import org.apache.hadoop.hive.common.FileUtils; +import org.apache.hadoop.hive.ql.ddl.DDLOperationContext; +import org.apache.hadoop.hive.ql.ddl.DDLUtils; +import org.apache.hadoop.hive.ql.exec.Task; +import org.apache.hadoop.hive.ql.DriverContext; +import org.apache.hadoop.hive.ql.ErrorMsg; +import org.apache.hadoop.hive.ql.ddl.DDLOperation; +import org.apache.hadoop.hive.ql.io.rcfile.truncate.ColumnTruncateTask; +import org.apache.hadoop.hive.ql.io.rcfile.truncate.ColumnTruncateWork; +import org.apache.hadoop.hive.ql.metadata.HiveException; +import org.apache.hadoop.hive.ql.parse.ReplicationSpec; + +/** + * Operation process of truncating a table. + */ +public class TruncateTableOperation extends DDLOperation { + private final TruncateTableDesc desc; + + public TruncateTableOperation(DDLOperationContext context, TruncateTableDesc desc) { + super(context); + this.desc = desc; + } + + @Override + public int execute() throws HiveException { + if (desc.getColumnIndexes() != null) { + ColumnTruncateWork truncateWork = new ColumnTruncateWork(desc.getColumnIndexes(), desc.getInputDir(), + desc.getOutputDir()); + truncateWork.setListBucketingCtx(desc.getLbCtx()); + truncateWork.setMapperCannotSpanPartns(true); + DriverContext driverCxt = new DriverContext(); + ColumnTruncateTask taskExec = new ColumnTruncateTask(); + taskExec.initialize(context.getQueryState(), null, driverCxt, null); + taskExec.setWork(truncateWork); + taskExec.setQueryPlan(context.getQueryPlan()); + Task subtask = taskExec; + int ret = taskExec.execute(driverCxt); + if (subtask.getException() != null) { + context.getTask().setException(subtask.getException()); + } + return ret; + } + + String tableName = desc.getTableName(); + Map partSpec = desc.getPartSpec(); + + ReplicationSpec replicationSpec = desc.getReplicationSpec(); + if (!DDLUtils.allowOperationInReplicationScope(context.getDb(), tableName, partSpec, replicationSpec)) { + // no truncate, the table is missing either due to drop/rename which follows the truncate. + // or the existing table is newer than our update. + LOG.debug("DDLTask: Truncate Table/Partition is skipped as table {} / partition {} is newer than update", + tableName, (partSpec == null) ? + "null" : FileUtils.makePartName(new ArrayList<>(partSpec.keySet()), new ArrayList<>(partSpec.values()))); + return 0; + } + + try { + context.getDb().truncateTable(tableName, partSpec, + replicationSpec != null && replicationSpec.isInReplicationScope() ? desc.getWriteId() : 0L); + } catch (Exception e) { + throw new HiveException(e, ErrorMsg.GENERIC_ERROR); + } + return 0; + } +} diff --git ql/src/java/org/apache/hadoop/hive/ql/plan/UnlockTableDesc.java ql/src/java/org/apache/hadoop/hive/ql/ddl/table/UnlockTableDesc.java similarity index 74% rename from ql/src/java/org/apache/hadoop/hive/ql/plan/UnlockTableDesc.java rename to ql/src/java/org/apache/hadoop/hive/ql/ddl/table/UnlockTableDesc.java index 0b91463229..86050244eb 100644 --- ql/src/java/org/apache/hadoop/hive/ql/plan/UnlockTableDesc.java +++ ql/src/java/org/apache/hadoop/hive/ql/ddl/table/UnlockTableDesc.java @@ -16,29 +16,30 @@ * limitations under the License. */ -package org.apache.hadoop.hive.ql.plan; +package org.apache.hadoop.hive.ql.ddl.table; import java.io.Serializable; import java.util.Map; -import org.apache.hadoop.fs.Path; +import org.apache.hadoop.hive.ql.ddl.DDLDesc; +import org.apache.hadoop.hive.ql.ddl.DDLTask2; +import org.apache.hadoop.hive.ql.plan.Explain; import org.apache.hadoop.hive.ql.plan.Explain.Level; - /** - * UnlockTableDesc. - * + * DDL task description for UNLOCK TABLE commands. */ @Explain(displayName = "Unlock Table", explainLevels = { Level.USER, Level.DEFAULT, Level.EXTENDED }) -public class UnlockTableDesc extends DDLDesc implements Serializable { +public class UnlockTableDesc implements DDLDesc, Serializable { private static final long serialVersionUID = 1L; - private String tableName; - private Map partSpec; - - public UnlockTableDesc() { + static { + DDLTask2.registerOperation(UnlockTableDesc.class, UnlockTableOperation.class); } + private final String tableName; + private final Map partSpec; + public UnlockTableDesc(String tableName, Map partSpec) { this.tableName = tableName; this.partSpec = partSpec; @@ -48,15 +49,7 @@ public String getTableName() { return tableName; } - public void setTableName(String tableName) { - this.tableName = tableName; - } - public Map getPartSpec() { return partSpec; } - - public void setPartSpec(Map partSpec) { - this.partSpec = partSpec; - } } diff --git ql/src/java/org/apache/hadoop/hive/ql/ddl/table/UnlockTableOperation.java ql/src/java/org/apache/hadoop/hive/ql/ddl/table/UnlockTableOperation.java new file mode 100644 index 0000000000..8b70e06ca6 --- /dev/null +++ ql/src/java/org/apache/hadoop/hive/ql/ddl/table/UnlockTableOperation.java @@ -0,0 +1,44 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hive.ql.ddl.table; + +import org.apache.hadoop.hive.ql.ddl.DDLOperationContext; +import org.apache.hadoop.hive.ql.Context; +import org.apache.hadoop.hive.ql.ddl.DDLOperation; +import org.apache.hadoop.hive.ql.lockmgr.HiveTxnManager; +import org.apache.hadoop.hive.ql.metadata.HiveException; + +/** + * Operation process of unlocking a table. + */ +public class UnlockTableOperation extends DDLOperation { + private final UnlockTableDesc desc; + + public UnlockTableOperation(DDLOperationContext context, UnlockTableDesc desc) { + super(context); + this.desc = desc; + } + + @Override + public int execute() throws HiveException { + Context ctx = context.getDriverContext().getCtx(); + HiveTxnManager txnManager = ctx.getHiveTxnManager(); + return txnManager.unlockTable(context.getDb(), desc); + } +} diff --git ql/src/java/org/apache/hadoop/hive/ql/ddl/table/package-info.java ql/src/java/org/apache/hadoop/hive/ql/ddl/table/package-info.java new file mode 100644 index 0000000000..6fc4730749 --- /dev/null +++ ql/src/java/org/apache/hadoop/hive/ql/ddl/table/package-info.java @@ -0,0 +1,20 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/** Table related DDL operation descriptions and operations. */ +package org.apache.hadoop.hive.ql.ddl.table; diff --git ql/src/java/org/apache/hadoop/hive/ql/exec/DDLTask.java ql/src/java/org/apache/hadoop/hive/ql/exec/DDLTask.java index a56695ba8b..f4281bdd7b 100644 --- ql/src/java/org/apache/hadoop/hive/ql/exec/DDLTask.java +++ ql/src/java/org/apache/hadoop/hive/ql/exec/DDLTask.java @@ -19,7 +19,6 @@ package org.apache.hadoop.hive.ql.exec; import static org.apache.commons.lang.StringUtils.join; -import static org.apache.hadoop.hive.metastore.api.hive_metastoreConstants.META_TABLE_STORAGE; import java.io.DataOutputStream; import java.io.FileNotFoundException; @@ -28,25 +27,18 @@ import java.io.Serializable; import java.net.URI; import java.net.URISyntaxException; -import java.nio.charset.StandardCharsets; import java.sql.SQLException; import java.util.ArrayList; -import java.util.Arrays; import java.util.Collections; import java.util.Comparator; -import java.util.HashMap; -import java.util.HashSet; import java.util.Iterator; import java.util.LinkedHashMap; import java.util.List; import java.util.Map; -import java.util.Map.Entry; import java.util.Set; import java.util.SortedSet; -import java.util.TreeMap; import java.util.TreeSet; import java.util.concurrent.ExecutionException; -import java.util.concurrent.TimeUnit; import java.util.regex.Matcher; import java.util.regex.Pattern; @@ -62,10 +54,7 @@ import org.apache.hadoop.fs.Path; import org.apache.hadoop.hive.common.FileUtils; import org.apache.hadoop.hive.common.StatsSetupConst; -import org.apache.hadoop.hive.common.ValidTxnList; import org.apache.hadoop.hive.common.ValidTxnWriteIdList; -import org.apache.hadoop.hive.common.type.HiveDecimal; -import org.apache.hadoop.hive.conf.Constants; import org.apache.hadoop.hive.conf.HiveConf; import org.apache.hadoop.hive.conf.HiveConf.ConfVars; import org.apache.hadoop.hive.metastore.DefaultHiveMetaHook; @@ -74,13 +63,8 @@ import org.apache.hadoop.hive.metastore.Msck; import org.apache.hadoop.hive.metastore.MsckInfo; import org.apache.hadoop.hive.metastore.PartitionDropOptions; -import org.apache.hadoop.hive.metastore.PartitionManagementTask; -import org.apache.hadoop.hive.metastore.StatObjectConverter; import org.apache.hadoop.hive.metastore.TableType; import org.apache.hadoop.hive.metastore.Warehouse; -import org.apache.hadoop.hive.metastore.api.AggrStats; -import org.apache.hadoop.hive.metastore.api.ColumnStatisticsData; -import org.apache.hadoop.hive.metastore.api.ColumnStatisticsObj; import org.apache.hadoop.hive.metastore.api.CompactionResponse; import org.apache.hadoop.hive.metastore.api.CreationMetadata; import org.apache.hadoop.hive.metastore.api.Database; @@ -93,14 +77,6 @@ import org.apache.hadoop.hive.metastore.api.NoSuchObjectException; import org.apache.hadoop.hive.metastore.api.Order; import org.apache.hadoop.hive.metastore.api.PrincipalType; -import org.apache.hadoop.hive.metastore.api.RolePrincipalGrant; -import org.apache.hadoop.hive.metastore.api.SQLCheckConstraint; -import org.apache.hadoop.hive.metastore.api.SQLDefaultConstraint; -import org.apache.hadoop.hive.metastore.api.SQLForeignKey; -import org.apache.hadoop.hive.metastore.api.SQLNotNullConstraint; -import org.apache.hadoop.hive.metastore.api.SQLPrimaryKey; -import org.apache.hadoop.hive.metastore.api.SQLUniqueConstraint; -import org.apache.hadoop.hive.metastore.api.SerDeInfo; import org.apache.hadoop.hive.metastore.api.ShowCompactResponse; import org.apache.hadoop.hive.metastore.api.ShowCompactResponseElement; import org.apache.hadoop.hive.metastore.api.ShowLocksRequest; @@ -138,8 +114,6 @@ import org.apache.hadoop.hive.ql.io.orc.OrcInputFormat; import org.apache.hadoop.hive.ql.io.orc.OrcSerde; import org.apache.hadoop.hive.ql.io.parquet.serde.ParquetHiveSerDe; -import org.apache.hadoop.hive.ql.io.rcfile.truncate.ColumnTruncateTask; -import org.apache.hadoop.hive.ql.io.rcfile.truncate.ColumnTruncateWork; import org.apache.hadoop.hive.ql.lockmgr.DbLockManager; import org.apache.hadoop.hive.ql.lockmgr.HiveLock; import org.apache.hadoop.hive.ql.lockmgr.HiveLockManager; @@ -147,30 +121,20 @@ import org.apache.hadoop.hive.ql.lockmgr.HiveLockObject; import org.apache.hadoop.hive.ql.lockmgr.HiveLockObject.HiveLockObjectData; import org.apache.hadoop.hive.ql.lockmgr.HiveTxnManager; -import org.apache.hadoop.hive.ql.metadata.CheckConstraint; -import org.apache.hadoop.hive.ql.metadata.DefaultConstraint; -import org.apache.hadoop.hive.ql.metadata.ForeignKeyInfo; import org.apache.hadoop.hive.ql.metadata.Hive; import org.apache.hadoop.hive.ql.metadata.HiveException; -import org.apache.hadoop.hive.ql.metadata.HiveMaterializedViewsRegistry; import org.apache.hadoop.hive.ql.metadata.InvalidTableException; -import org.apache.hadoop.hive.ql.metadata.NotNullConstraint; import org.apache.hadoop.hive.ql.metadata.Partition; import org.apache.hadoop.hive.ql.metadata.PartitionIterable; -import org.apache.hadoop.hive.ql.metadata.PrimaryKeyInfo; -import org.apache.hadoop.hive.ql.metadata.StorageHandlerInfo; import org.apache.hadoop.hive.ql.metadata.Table; -import org.apache.hadoop.hive.ql.metadata.UniqueConstraint; import org.apache.hadoop.hive.ql.metadata.formatting.MetaDataFormatUtils; import org.apache.hadoop.hive.ql.metadata.formatting.MetaDataFormatter; import org.apache.hadoop.hive.ql.metadata.formatting.TextMetaDataTable; import org.apache.hadoop.hive.ql.parse.AlterTablePartMergeFilesDesc; -import org.apache.hadoop.hive.ql.parse.BaseSemanticAnalyzer; import org.apache.hadoop.hive.ql.parse.CalcitePlanner; import org.apache.hadoop.hive.ql.parse.DDLSemanticAnalyzer; import org.apache.hadoop.hive.ql.parse.ExplainConfiguration.AnalyzeState; import org.apache.hadoop.hive.ql.parse.ParseUtils; -import org.apache.hadoop.hive.ql.parse.PreInsertTableDesc; import org.apache.hadoop.hive.ql.parse.ReplicationSpec; import org.apache.hadoop.hive.ql.parse.SemanticException; import org.apache.hadoop.hive.ql.parse.repl.dump.Utils; @@ -185,20 +149,16 @@ import org.apache.hadoop.hive.ql.plan.AlterTableSimpleDesc; import org.apache.hadoop.hive.ql.plan.AlterWMTriggerDesc; import org.apache.hadoop.hive.ql.plan.CacheMetadataDesc; -import org.apache.hadoop.hive.ql.plan.ColStatistics; import org.apache.hadoop.hive.ql.plan.CreateOrAlterWMMappingDesc; import org.apache.hadoop.hive.ql.plan.CreateOrAlterWMPoolDesc; import org.apache.hadoop.hive.ql.plan.CreateOrDropTriggerToPoolMappingDesc; import org.apache.hadoop.hive.ql.plan.CreateResourcePlanDesc; -import org.apache.hadoop.hive.ql.plan.CreateTableDesc; -import org.apache.hadoop.hive.ql.plan.CreateTableLikeDesc; import org.apache.hadoop.hive.ql.plan.CreateViewDesc; import org.apache.hadoop.hive.ql.plan.CreateWMTriggerDesc; import org.apache.hadoop.hive.ql.plan.DDLWork; import org.apache.hadoop.hive.ql.plan.DescFunctionDesc; -import org.apache.hadoop.hive.ql.plan.DescTableDesc; +import org.apache.hadoop.hive.ql.plan.DropPartitionDesc; import org.apache.hadoop.hive.ql.plan.DropResourcePlanDesc; -import org.apache.hadoop.hive.ql.plan.DropTableDesc; import org.apache.hadoop.hive.ql.plan.DropWMMappingDesc; import org.apache.hadoop.hive.ql.plan.DropWMPoolDesc; import org.apache.hadoop.hive.ql.plan.DropWMTriggerDesc; @@ -209,7 +169,6 @@ import org.apache.hadoop.hive.ql.plan.KillQueryDesc; import org.apache.hadoop.hive.ql.plan.ListBucketingCtx; import org.apache.hadoop.hive.ql.plan.LoadMultiFilesDesc; -import org.apache.hadoop.hive.ql.plan.LockTableDesc; import org.apache.hadoop.hive.ql.plan.MoveWork; import org.apache.hadoop.hive.ql.plan.MsckDesc; import org.apache.hadoop.hive.ql.plan.OperatorDesc; @@ -225,20 +184,13 @@ import org.apache.hadoop.hive.ql.plan.ShowColumnsDesc; import org.apache.hadoop.hive.ql.plan.ShowCompactionsDesc; import org.apache.hadoop.hive.ql.plan.ShowConfDesc; -import org.apache.hadoop.hive.ql.plan.ShowCreateDatabaseDesc; -import org.apache.hadoop.hive.ql.plan.ShowCreateTableDesc; import org.apache.hadoop.hive.ql.plan.ShowFunctionsDesc; import org.apache.hadoop.hive.ql.plan.ShowGrantDesc; import org.apache.hadoop.hive.ql.plan.ShowLocksDesc; import org.apache.hadoop.hive.ql.plan.ShowPartitionsDesc; import org.apache.hadoop.hive.ql.plan.ShowResourcePlanDesc; -import org.apache.hadoop.hive.ql.plan.ShowTableStatusDesc; -import org.apache.hadoop.hive.ql.plan.ShowTablesDesc; -import org.apache.hadoop.hive.ql.plan.ShowTblPropertiesDesc; import org.apache.hadoop.hive.ql.plan.ShowTxnsDesc; import org.apache.hadoop.hive.ql.plan.TezWork; -import org.apache.hadoop.hive.ql.plan.TruncateTableDesc; -import org.apache.hadoop.hive.ql.plan.UnlockTableDesc; import org.apache.hadoop.hive.ql.plan.api.StageType; import org.apache.hadoop.hive.ql.exec.repl.util.ReplUtils; import org.apache.hadoop.hive.ql.security.authorization.AuthorizationUtils; @@ -253,12 +205,9 @@ import org.apache.hadoop.hive.ql.security.authorization.plugin.HiveRoleGrant; import org.apache.hadoop.hive.ql.security.authorization.plugin.HiveV1Authorizer; import org.apache.hadoop.hive.ql.session.SessionState; -import org.apache.hadoop.hive.ql.stats.StatsUtils; import org.apache.hadoop.hive.serde.serdeConstants; -import org.apache.hadoop.hive.serde2.AbstractSerDe; import org.apache.hadoop.hive.serde2.Deserializer; import org.apache.hadoop.hive.serde2.MetadataTypedColumnsetSerDe; -import org.apache.hadoop.hive.serde2.SerDeSpec; import org.apache.hadoop.hive.serde2.avro.AvroSerdeUtils; import org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe; import org.apache.hadoop.hive.serde2.dynamic_type.DynamicSerDe; @@ -267,7 +216,6 @@ import org.apache.hadoop.hive.serde2.objectinspector.ObjectInspectorConverters; import org.apache.hadoop.hive.serde2.objectinspector.ObjectInspectorConverters.Converter; import org.apache.hadoop.hive.serde2.objectinspector.primitive.PrimitiveObjectInspectorFactory; -import org.apache.hadoop.hive.serde2.typeinfo.DecimalTypeInfo; import org.apache.hadoop.hive.serde2.typeinfo.TypeInfo; import org.apache.hadoop.hive.serde2.typeinfo.TypeInfoFactory; import org.apache.hadoop.hive.serde2.typeinfo.TypeInfoUtils; @@ -276,12 +224,10 @@ import org.apache.hadoop.tools.HadoopArchives; import org.apache.hadoop.util.ToolRunner; import org.apache.hive.common.util.AnnotationUtils; -import org.apache.hive.common.util.HiveStringUtils; import org.apache.hive.common.util.ReflectionUtil; import org.apache.thrift.TException; import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import org.stringtemplate.v4.ST; /** * DDLTask implementation. @@ -302,11 +248,6 @@ private MetaDataFormatter formatter; private final HiveAuthorizationTranslator defaultAuthorizationTranslator = new DefaultHiveAuthorizationTranslator(); - private Task subtask = null; - - public Task getSubtask() { - return subtask; - } @Override public boolean requireLock() { @@ -344,19 +285,9 @@ public int execute(DriverContext driverContext) { try { db = Hive.get(conf); - CreateTableDesc crtTbl = work.getCreateTblDesc(); - if (crtTbl != null) { - return createTable(db, crtTbl); - } - - CreateTableLikeDesc crtTblLike = work.getCreateTblLikeDesc(); - if (crtTblLike != null) { - return createTableLike(db, crtTblLike); - } - - DropTableDesc dropTbl = work.getDropTblDesc(); - if (dropTbl != null) { - dropTableOrPartitions(db, dropTbl); + DropPartitionDesc dropPartition = work.getDropPartitionDesc(); + if (dropPartition != null) { + dropPartitions(db, dropPartition); return 0; } @@ -410,36 +341,16 @@ public int execute(DriverContext driverContext) { return msck(db, msckDesc); } - DescTableDesc descTbl = work.getDescTblDesc(); - if (descTbl != null) { - return describeTable(db, descTbl); - } - DescFunctionDesc descFunc = work.getDescFunctionDesc(); if (descFunc != null) { return describeFunction(db, descFunc); } - ShowTablesDesc showTbls = work.getShowTblsDesc(); - if (showTbls != null) { - return showTablesOrViews(db, showTbls); - } - ShowColumnsDesc showCols = work.getShowColumnsDesc(); if (showCols != null) { return showColumns(db, showCols); } - ShowTableStatusDesc showTblStatus = work.getShowTblStatusDesc(); - if (showTblStatus != null) { - return showTableStatus(db, showTblStatus); - } - - ShowTblPropertiesDesc showTblProperties = work.getShowTblPropertiesDesc(); - if (showTblProperties != null) { - return showTableProperties(db, showTblProperties); - } - ShowFunctionsDesc showFuncs = work.getShowFuncsDesc(); if (showFuncs != null) { return showFunctions(db, showFuncs); @@ -465,31 +376,11 @@ public int execute(DriverContext driverContext) { return abortTxns(db, abortTxnsDesc); } - LockTableDesc lockTbl = work.getLockTblDesc(); - if (lockTbl != null) { - return lockTable(db, lockTbl); - } - - UnlockTableDesc unlockTbl = work.getUnlockTblDesc(); - if (unlockTbl != null) { - return unlockTable(db, unlockTbl); - } - ShowPartitionsDesc showParts = work.getShowPartsDesc(); if (showParts != null) { return showPartitions(db, showParts); } - ShowCreateDatabaseDesc showCreateDb = work.getShowCreateDbDesc(); - if (showCreateDb != null) { - return showCreateDatabase(db, showCreateDb); - } - - ShowCreateTableDesc showCreateTbl = work.getShowCreateTblDesc(); - if (showCreateTbl != null) { - return showCreateTable(db, showCreateTbl); - } - ShowConfDesc showConf = work.getShowConfDesc(); if (showConf != null) { return showConf(db, showConf); @@ -534,11 +425,6 @@ public int execute(DriverContext driverContext) { return alterTableAlterPart(db, alterPartDesc); } - TruncateTableDesc truncateTableDesc = work.getTruncateTblDesc(); - if (truncateTableDesc != null) { - return truncateTable(db, truncateTableDesc); - } - AlterTableExchangePartition alterTableExchangePartition = work.getAlterTableExchangePartition(); if (alterTableExchangePartition != null) { @@ -553,10 +439,6 @@ public int execute(DriverContext driverContext) { if (insertCommitHookDesc != null) { return insertCommitWork(db, insertCommitHookDesc); } - PreInsertTableDesc preInsertTableDesc = work.getPreInsertTableDesc(); - if (preInsertTableDesc != null) { - return preInsertWork(db, preInsertTableDesc); - } KillQueryDesc killQueryDesc = work.getKillQueryDesc(); if (killQueryDesc != null) { @@ -802,20 +684,6 @@ private int createOrDropTriggerToPoolMapping(Hive db, CreateOrDropTriggerToPoolM return 0; } - private int preInsertWork(Hive db, PreInsertTableDesc preInsertTableDesc) throws HiveException { - try{ - HiveMetaHook hook = preInsertTableDesc.getTable().getStorageHandler().getMetaHook(); - if (hook == null || !(hook instanceof DefaultHiveMetaHook)) { - return 0; - } - DefaultHiveMetaHook hiveMetaHook = (DefaultHiveMetaHook) hook; - hiveMetaHook.preInsertTable(preInsertTableDesc.getTable().getTTable(), preInsertTableDesc.isOverwrite()); - } catch (MetaException e) { - throw new HiveException(e); - } - return 0; - } - private int insertCommitWork(Hive db, InsertCommitHookDesc insertCommitHookDesc) throws MetaException { boolean failed = true; HiveMetaHook hook = insertCommitHookDesc.getTable().getStorageHandler().getMetaHook(); @@ -964,7 +832,7 @@ private int mergeFiles(Hive db, AlterTablePartMergeFilesDesc mergeFilesDesc, // initialize the task and execute task.initialize(queryState, getQueryPlan(), driverCxt, opContext); - subtask = task; + Task subtask = task; int ret = task.execute(driverCxt); if (subtask.getException() != null) { setException(subtask.getException()); @@ -2109,373 +1977,6 @@ private int showPartitions(Hive db, ShowPartitionsDesc showParts) throws HiveExc return 0; } - private int showCreateDatabase(Hive db, ShowCreateDatabaseDesc showCreateDb) throws HiveException { - DataOutputStream outStream = getOutputStream(showCreateDb.getResFile()); - try { - String dbName = showCreateDb.getDatabaseName(); - return showCreateDatabase(db, outStream, dbName); - } catch (Exception e) { - throw new HiveException(e); - } finally { - IOUtils.closeStream(outStream); - } - } - - private int showCreateDatabase(Hive db, DataOutputStream outStream, String databaseName) - throws Exception { - Database database = db.getDatabase(databaseName); - - StringBuilder createDb_str = new StringBuilder(); - createDb_str.append("CREATE DATABASE `").append(database.getName()).append("`\n"); - if (database.getDescription() != null) { - createDb_str.append("COMMENT\n '"); - createDb_str.append( - HiveStringUtils.escapeHiveCommand(database.getDescription())).append("'\n"); - } - createDb_str.append("LOCATION\n '"); - createDb_str.append(database.getLocationUri()).append("'\n"); - String propertiesToString = propertiesToString(database.getParameters(), null); - if (!propertiesToString.isEmpty()) { - createDb_str.append("WITH DBPROPERTIES (\n"); - createDb_str.append(propertiesToString).append(")\n"); - } - - outStream.write(createDb_str.toString().getBytes("UTF-8")); - return 0; - } - - /** - * Write a statement of how to create a table to a file. - * - * @param db - * The database in question. - * @param showCreateTbl - * This is the table we're interested in. - * @return Returns 0 when execution succeeds and above 0 if it fails. - * @throws HiveException - * Throws this exception if an unexpected error occurs. - */ - private int showCreateTable(Hive db, ShowCreateTableDesc showCreateTbl) throws HiveException { - // get the create table statement for the table and populate the output - DataOutputStream outStream = getOutputStream(showCreateTbl.getResFile()); - try { - String tableName = showCreateTbl.getTableName(); - return showCreateTable(db, outStream, tableName); - } catch (Exception e) { - throw new HiveException(e); - } finally { - IOUtils.closeStream(outStream); - } - } - - private int showCreateTable(Hive db, DataOutputStream outStream, String tableName) - throws HiveException { - final String EXTERNAL = "external"; - final String TEMPORARY = "temporary"; - final String LIST_COLUMNS = "columns"; - final String TBL_COMMENT = "tbl_comment"; - final String LIST_PARTITIONS = "partitions"; - final String SORT_BUCKET = "sort_bucket"; - final String SKEWED_INFO = "tbl_skewedinfo"; - final String ROW_FORMAT = "row_format"; - final String TBL_LOCATION = "tbl_location"; - final String TBL_PROPERTIES = "tbl_properties"; - boolean needsLocation = true; - StringBuilder createTab_str = new StringBuilder(); - - Table tbl = db.getTable(tableName, false); - List duplicateProps = new ArrayList(); - try { - needsLocation = doesTableNeedLocation(tbl); - - if (tbl.isView()) { - String createTab_stmt = "CREATE VIEW `" + tableName + "` AS " + - tbl.getViewExpandedText(); - outStream.write(createTab_stmt.getBytes(StandardCharsets.UTF_8)); - return 0; - } - - createTab_str.append("CREATE <" + TEMPORARY + "><" + EXTERNAL + ">TABLE `"); - createTab_str.append(tableName + "`(\n"); - createTab_str.append("<" + LIST_COLUMNS + ">)\n"); - createTab_str.append("<" + TBL_COMMENT + ">\n"); - createTab_str.append("<" + LIST_PARTITIONS + ">\n"); - createTab_str.append("<" + SORT_BUCKET + ">\n"); - createTab_str.append("<" + SKEWED_INFO + ">\n"); - createTab_str.append("<" + ROW_FORMAT + ">\n"); - if (needsLocation) { - createTab_str.append("LOCATION\n"); - createTab_str.append("<" + TBL_LOCATION + ">\n"); - } - createTab_str.append("TBLPROPERTIES (\n"); - createTab_str.append("<" + TBL_PROPERTIES + ">)\n"); - ST createTab_stmt = new ST(createTab_str.toString()); - - // For cases where the table is temporary - String tbl_temp = ""; - if (tbl.isTemporary()) { - duplicateProps.add("TEMPORARY"); - tbl_temp = "TEMPORARY "; - } - // For cases where the table is external - String tbl_external = ""; - if (tbl.getTableType() == TableType.EXTERNAL_TABLE) { - duplicateProps.add("EXTERNAL"); - tbl_external = "EXTERNAL "; - } - - // Columns - String tbl_columns = ""; - List cols = tbl.getCols(); - List columns = new ArrayList(); - for (FieldSchema col : cols) { - String columnDesc = " `" + col.getName() + "` " + col.getType(); - if (col.getComment() != null) { - columnDesc = columnDesc + " COMMENT '" - + HiveStringUtils.escapeHiveCommand(col.getComment()) + "'"; - } - columns.add(columnDesc); - } - tbl_columns = StringUtils.join(columns, ", \n"); - - // Table comment - String tbl_comment = ""; - String tabComment = tbl.getProperty("comment"); - if (tabComment != null) { - duplicateProps.add("comment"); - tbl_comment = "COMMENT '" - + HiveStringUtils.escapeHiveCommand(tabComment) + "'"; - } - - // Partitions - String tbl_partitions = ""; - List partKeys = tbl.getPartitionKeys(); - if (partKeys.size() > 0) { - tbl_partitions += "PARTITIONED BY ( \n"; - List partCols = new ArrayList(); - for (FieldSchema partKey : partKeys) { - String partColDesc = " `" + partKey.getName() + "` " + partKey.getType(); - if (partKey.getComment() != null) { - partColDesc = partColDesc + " COMMENT '" - + HiveStringUtils.escapeHiveCommand(partKey.getComment()) + "'"; - } - partCols.add(partColDesc); - } - tbl_partitions += StringUtils.join(partCols, ", \n"); - tbl_partitions += ")"; - } - - // Clusters (Buckets) - String tbl_sort_bucket = ""; - List buckCols = tbl.getBucketCols(); - if (buckCols.size() > 0) { - duplicateProps.add("SORTBUCKETCOLSPREFIX"); - tbl_sort_bucket += "CLUSTERED BY ( \n "; - tbl_sort_bucket += StringUtils.join(buckCols, ", \n "); - tbl_sort_bucket += ") \n"; - List sortCols = tbl.getSortCols(); - if (sortCols.size() > 0) { - tbl_sort_bucket += "SORTED BY ( \n"; - // Order - List sortKeys = new ArrayList(); - for (Order sortCol : sortCols) { - String sortKeyDesc = " " + sortCol.getCol() + " "; - if (sortCol.getOrder() == BaseSemanticAnalyzer.HIVE_COLUMN_ORDER_ASC) { - sortKeyDesc = sortKeyDesc + "ASC"; - } - else if (sortCol.getOrder() == BaseSemanticAnalyzer.HIVE_COLUMN_ORDER_DESC) { - sortKeyDesc = sortKeyDesc + "DESC"; - } - sortKeys.add(sortKeyDesc); - } - tbl_sort_bucket += StringUtils.join(sortKeys, ", \n"); - tbl_sort_bucket += ") \n"; - } - tbl_sort_bucket += "INTO " + tbl.getNumBuckets() + " BUCKETS"; - } - - // Skewed Info - StringBuilder tbl_skewedinfo = new StringBuilder(); - SkewedInfo skewedInfo = tbl.getSkewedInfo(); - if (skewedInfo != null && !skewedInfo.getSkewedColNames().isEmpty()) { - tbl_skewedinfo.append("SKEWED BY (" + StringUtils.join(skewedInfo.getSkewedColNames(), ",") + ")\n"); - tbl_skewedinfo.append(" ON ("); - List colValueList = new ArrayList(); - for (List colValues : skewedInfo.getSkewedColValues()) { - colValueList.add("('" + StringUtils.join(colValues, "','") + "')"); - } - tbl_skewedinfo.append(StringUtils.join(colValueList, ",") + ")"); - if (tbl.isStoredAsSubDirectories()) { - tbl_skewedinfo.append("\n STORED AS DIRECTORIES"); - } - } - - // Row format (SerDe) - StringBuilder tbl_row_format = new StringBuilder(); - StorageDescriptor sd = tbl.getTTable().getSd(); - SerDeInfo serdeInfo = sd.getSerdeInfo(); - Map serdeParams = serdeInfo.getParameters(); - tbl_row_format.append("ROW FORMAT SERDE \n"); - tbl_row_format.append(" '" - + HiveStringUtils.escapeHiveCommand(serdeInfo.getSerializationLib()) + "' \n"); - if (tbl.getStorageHandler() == null) { - // If serialization.format property has the default value, it will not to be included in - // SERDE properties - if (Warehouse.DEFAULT_SERIALIZATION_FORMAT.equals(serdeParams.get( - serdeConstants.SERIALIZATION_FORMAT))){ - serdeParams.remove(serdeConstants.SERIALIZATION_FORMAT); - } - if (!serdeParams.isEmpty()) { - appendSerdeParams(tbl_row_format, serdeParams).append(" \n"); - } - tbl_row_format.append("STORED AS INPUTFORMAT \n '" - + HiveStringUtils.escapeHiveCommand(sd.getInputFormat()) + "' \n"); - tbl_row_format.append("OUTPUTFORMAT \n '" - + HiveStringUtils.escapeHiveCommand(sd.getOutputFormat()) + "'"); - } else { - duplicateProps.add(META_TABLE_STORAGE); - tbl_row_format.append("STORED BY \n '" - + HiveStringUtils.escapeHiveCommand(tbl.getParameters().get( - META_TABLE_STORAGE)) + "' \n"); - // SerDe Properties - if (!serdeParams.isEmpty()) { - appendSerdeParams(tbl_row_format, serdeInfo.getParameters()); - } - } - String tbl_location = " '" + HiveStringUtils.escapeHiveCommand(sd.getLocation()) + "'"; - - // Table properties - duplicateProps.addAll(StatsSetupConst.TABLE_PARAMS_STATS_KEYS); - String tbl_properties = propertiesToString(tbl.getParameters(), duplicateProps); - - createTab_stmt.add(TEMPORARY, tbl_temp); - createTab_stmt.add(EXTERNAL, tbl_external); - createTab_stmt.add(LIST_COLUMNS, tbl_columns); - createTab_stmt.add(TBL_COMMENT, tbl_comment); - createTab_stmt.add(LIST_PARTITIONS, tbl_partitions); - createTab_stmt.add(SORT_BUCKET, tbl_sort_bucket); - createTab_stmt.add(SKEWED_INFO, tbl_skewedinfo); - createTab_stmt.add(ROW_FORMAT, tbl_row_format); - // Table location should not be printed with hbase backed tables - if (needsLocation) { - createTab_stmt.add(TBL_LOCATION, tbl_location); - } - createTab_stmt.add(TBL_PROPERTIES, tbl_properties); - - outStream.write(createTab_stmt.render().getBytes(StandardCharsets.UTF_8)); - } catch (IOException e) { - LOG.info("show create table: ", e); - return 1; - } - - return 0; - } - - private String propertiesToString(Map props, List exclude) { - String prop_string = ""; - if (!props.isEmpty()) { - Map properties = new TreeMap(props); - List realProps = new ArrayList(); - for (String key : properties.keySet()) { - if (properties.get(key) != null && (exclude == null || !exclude.contains(key))) { - realProps.add(" '" + key + "'='" + - HiveStringUtils.escapeHiveCommand(properties.get(key)) + "'"); - } - } - prop_string += StringUtils.join(realProps, ", \n"); - } - return prop_string; - } - - public static StringBuilder appendSerdeParams( - StringBuilder builder, Map serdeParam) { - serdeParam = new TreeMap(serdeParam); - builder.append("WITH SERDEPROPERTIES ( \n"); - List serdeCols = new ArrayList(); - for (Entry entry : serdeParam.entrySet()) { - serdeCols.add(" '" + entry.getKey() + "'='" - + HiveStringUtils.escapeHiveCommand(entry.getValue()) + "'"); - } - builder.append(StringUtils.join(serdeCols, ", \n")).append(')'); - return builder; - } - - /** - * Write a list of the tables/views in the database to a file. - * - * @param db - * The database in context. - * @param showDesc - * A ShowTablesDesc for tables or views we're interested in. - * @return Returns 0 when execution succeeds and above 0 if it fails. - * @throws HiveException - * Throws this exception if an unexpected error occurs. - */ - private int showTablesOrViews(Hive db, ShowTablesDesc showDesc) throws HiveException { - // get the tables/views for the desired pattern - populate the output stream - List tableNames = null; - List
tableObjects = null; - - TableType type = showDesc.getType(); // null for tables, VIRTUAL_VIEW for views, MATERIALIZED_VIEW for MVs - String dbName = showDesc.getDbName(); - String pattern = showDesc.getPattern(); // if null, all tables/views are returned - TableType typeFilter = showDesc.getTypeFilter(); - String resultsFile = showDesc.getResFile(); - boolean isExtended = showDesc.isExtended(); - - if (!db.databaseExists(dbName)) { - throw new HiveException(ErrorMsg.DATABASE_NOT_EXISTS, dbName); - } - - LOG.debug("pattern: {}", pattern); - LOG.debug("typeFilter: {}", typeFilter); - if (type == null) { - if (isExtended) { - tableObjects = new ArrayList<>(); - tableObjects.addAll(db.getTableObjectsByType(dbName, pattern, typeFilter)); - LOG.debug("Found {} table(s) matching the SHOW EXTENDED TABLES statement.", tableObjects.size()); - } else { - tableNames = db.getTablesByType(dbName, pattern, typeFilter); - LOG.debug("Found {} table(s) matching the SHOW TABLES statement.", tableNames.size()); - } - } else if (type == TableType.MATERIALIZED_VIEW) { - tableObjects = new ArrayList<>(); - tableObjects.addAll(db.getMaterializedViewObjectsByPattern(dbName, pattern)); - LOG.debug("Found {} materialized view(s) matching the SHOW MATERIALIZED VIEWS statement.", tableObjects.size()); - } else if (type == TableType.VIRTUAL_VIEW) { - tableNames = db.getTablesByType(dbName, pattern, type); - LOG.debug("Found {} view(s) matching the SHOW VIEWS statement.", tableNames.size()); - } else { - throw new HiveException("Option not recognized in SHOW TABLES/VIEWS/MATERIALIZED VIEWS"); - } - - // write the results in the file - DataOutputStream outStream = null; - try { - Path resFile = new Path(resultsFile); - FileSystem fs = resFile.getFileSystem(conf); - outStream = fs.create(resFile); - // Sort by name and print - if (tableNames != null) { - SortedSet sortedSet = new TreeSet(tableNames); - formatter.showTables(outStream, sortedSet); - } else { - Collections.sort(tableObjects, Comparator.comparing(Table::getTableName)); - if (isExtended) { - formatter.showTablesExtended(outStream, tableObjects); - } else { - formatter.showMaterializedViews(outStream, tableObjects); - } - } - outStream.close(); - } catch (Exception e) { - throw new HiveException(e, ErrorMsg.GENERIC_ERROR, "in database" + dbName); - } finally { - IOUtils.closeStream(outStream); - } - return 0; - } - /** * Write a list of the columns in the table to a file. * @@ -2949,38 +2450,6 @@ private int killQuery(Hive db, KillQueryDesc desc) throws HiveException { return 0; } - /** - * Lock the table/partition specified - * @param db - * - * @param lockTbl - * the table/partition to be locked along with the mode - * @return Returns 0 when execution succeeds and above 0 if it fails. - * @throws HiveException - * Throws this exception if an unexpected error occurs. - */ - private int lockTable(Hive db, LockTableDesc lockTbl) throws HiveException { - Context ctx = driverContext.getCtx(); - HiveTxnManager txnManager = ctx.getHiveTxnManager(); - return txnManager.lockTable(db, lockTbl); - } - - /** - * Unlock the table/partition specified - * @param db - * - * @param unlockTbl - * the table/partition to be unlocked - * @return Returns 0 when execution succeeds and above 0 if it fails. - * @throws HiveException - * Throws this exception if an unexpected error occurs. - */ - private int unlockTable(Hive db, UnlockTableDesc unlockTbl) throws HiveException { - Context ctx = driverContext.getCtx(); - HiveTxnManager txnManager = ctx.getHiveTxnManager(); - return txnManager.unlockTable(db, unlockTbl); - } - /** * Shows a description of a function. * @param db @@ -3055,117 +2524,6 @@ private int describeFunction(Hive db, DescFunctionDesc descFunc) throws HiveExce return 0; } - /** - * Write the status of tables to a file. - * - * @param db - * The database in question. - * @param showTblStatus - * tables we are interested in - * @return Return 0 when execution succeeds and above 0 if it fails. - */ - private int showTableStatus(Hive db, ShowTableStatusDesc showTblStatus) throws HiveException { - // get the tables for the desired pattern - populate the output stream - List
tbls = new ArrayList
(); - Map part = showTblStatus.getPartSpec(); - Partition par = null; - if (part != null) { - Table tbl = db.getTable(showTblStatus.getDbName(), showTblStatus.getPattern()); - par = db.getPartition(tbl, part, false); - if (par == null) { - throw new HiveException("Partition " + part + " for table " - + showTblStatus.getPattern() + " does not exist."); - } - tbls.add(tbl); - } else { - LOG.debug("pattern: {}", showTblStatus.getPattern()); - List tblStr = db.getTablesForDb(showTblStatus.getDbName(), - showTblStatus.getPattern()); - SortedSet sortedTbls = new TreeSet(tblStr); - Iterator iterTbls = sortedTbls.iterator(); - while (iterTbls.hasNext()) { - // create a row per table name - String tblName = iterTbls.next(); - Table tbl = db.getTable(showTblStatus.getDbName(), tblName); - tbls.add(tbl); - } - LOG.info("Found {} table(s) matching the SHOW TABLE EXTENDED statement.", tblStr.size()); - } - - // write the results in the file - DataOutputStream outStream = getOutputStream(showTblStatus.getResFile()); - try { - formatter.showTableStatus(outStream, db, conf, tbls, part, par); - } catch (Exception e) { - throw new HiveException(e, ErrorMsg.GENERIC_ERROR, "show table status"); - } finally { - IOUtils.closeStream(outStream); - } - return 0; - } - - /** - * Write the properties of a table to a file. - * - * @param db - * The database in question. - * @param showTblPrpt - * This is the table we're interested in. - * @return Returns 0 when execution succeeds and above 0 if it fails. - * @throws HiveException - * Throws this exception if an unexpected error occurs. - */ - private int showTableProperties(Hive db, ShowTblPropertiesDesc showTblPrpt) throws HiveException { - String tableName = showTblPrpt.getTableName(); - - // show table properties - populate the output stream - Table tbl = db.getTable(tableName, false); - try { - if (tbl == null) { - String errMsg = "Table " + tableName + " does not exist"; - writeToFile(errMsg, showTblPrpt.getResFile()); - return 0; - } - - LOG.info("DDLTask: show properties for {}", tableName); - - StringBuilder builder = new StringBuilder(); - String propertyName = showTblPrpt.getPropertyName(); - if (propertyName != null) { - String propertyValue = tbl.getProperty(propertyName); - if (propertyValue == null) { - String errMsg = "Table " + tableName + " does not have property: " + propertyName; - builder.append(errMsg); - } - else { - appendNonNull(builder, propertyName, true); - appendNonNull(builder, propertyValue); - } - } - else { - Map properties = new TreeMap(tbl.getParameters()); - for (Entry entry : properties.entrySet()) { - appendNonNull(builder, entry.getKey(), true); - appendNonNull(builder, entry.getValue()); - } - } - - LOG.info("DDLTask: written data for showing properties of {}", tableName); - writeToFile(builder.toString(), showTblPrpt.getResFile()); - - } catch (FileNotFoundException e) { - LOG.info("show table properties: ", e); - return 1; - } catch (IOException e) { - LOG.info("show table properties: ", e); - return 1; - } catch (Exception e) { - throw new HiveException(e); - } - - return 0; - } - private void writeToFile(String data, String file) throws IOException { Path resFile = new Path(file); FileSystem fs = resFile.getFileSystem(conf); @@ -3182,211 +2540,7 @@ private void writeToFile(String data, String file) throws IOException { } } - /** - * Write the description of a table to a file. - * - * @param db - * The database in question. - * @param descTbl - * This is the table we're interested in. - * @return Returns 0 when execution succeeds and above 0 if it fails. - * @throws HiveException - * Throws this exception if an unexpected error occurs. - * @throws MetaException - */ - private int describeTable(Hive db, DescTableDesc descTbl) throws HiveException, MetaException { - String colPath = descTbl.getColumnPath(); - String tableName = descTbl.getTableName(); - - // describe the table - populate the output stream - Table tbl = db.getTable(tableName, false); - if (tbl == null) { - throw new HiveException(ErrorMsg.INVALID_TABLE, tableName); - } - Partition part = null; - if (descTbl.getPartSpec() != null) { - part = db.getPartition(tbl, descTbl.getPartSpec(), false); - if (part == null) { - throw new HiveException(ErrorMsg.INVALID_PARTITION, - StringUtils.join(descTbl.getPartSpec().keySet(), ','), tableName); - } - tbl = part.getTable(); - } - - DataOutputStream outStream = getOutputStream(descTbl.getResFile()); - try { - LOG.debug("DDLTask: got data for {}", tableName); - - List cols = null; - List colStats = null; - - Deserializer deserializer = tbl.getDeserializer(true); - if (deserializer instanceof AbstractSerDe) { - String errorMsgs = ((AbstractSerDe) deserializer).getConfigurationErrors(); - if (errorMsgs != null && !errorMsgs.isEmpty()) { - throw new SQLException(errorMsgs); - } - } - - if (colPath.equals(tableName)) { - cols = (part == null || tbl.getTableType() == TableType.VIRTUAL_VIEW) ? - tbl.getCols() : part.getCols(); - - if (!descTbl.isFormatted()) { - cols.addAll(tbl.getPartCols()); - } - - if (tbl.isPartitioned() && part == null) { - // No partitioned specified for partitioned table, lets fetch all. - Map tblProps = tbl.getParameters() == null ? new HashMap() : tbl.getParameters(); - Map valueMap = new HashMap<>(); - Map stateMap = new HashMap<>(); - for (String stat : StatsSetupConst.SUPPORTED_STATS) { - valueMap.put(stat, 0L); - stateMap.put(stat, true); - } - PartitionIterable parts = new PartitionIterable(db, tbl, null, conf.getIntVar(HiveConf.ConfVars.METASTORE_BATCH_RETRIEVE_MAX)); - int numParts = 0; - for (Partition partition : parts) { - Map props = partition.getParameters(); - Boolean state = StatsSetupConst.areBasicStatsUptoDate(props); - for (String stat : StatsSetupConst.SUPPORTED_STATS) { - stateMap.put(stat, stateMap.get(stat) && state); - if (props != null && props.get(stat) != null) { - valueMap.put(stat, valueMap.get(stat) + Long.parseLong(props.get(stat))); - } - } - numParts++; - } - for (String stat : StatsSetupConst.SUPPORTED_STATS) { - StatsSetupConst.setBasicStatsState(tblProps, Boolean.toString(stateMap.get(stat))); - tblProps.put(stat, valueMap.get(stat).toString()); - } - tblProps.put(StatsSetupConst.NUM_PARTITIONS, Integer.toString(numParts)); - tbl.setParameters(tblProps); - } - } else { - if (descTbl.isFormatted()) { - // when column name is specified in describe table DDL, colPath will - // will be table_name.column_name - String colName = colPath.split("\\.")[1]; - String[] dbTab = Utilities.getDbTableName(tableName); - List colNames = new ArrayList(); - colNames.add(colName.toLowerCase()); - if (null == part) { - if (tbl.isPartitioned()) { - Map tblProps = tbl.getParameters() == null ? new HashMap() : tbl.getParameters(); - if (tbl.isPartitionKey(colNames.get(0))) { - FieldSchema partCol = tbl.getPartColByName(colNames.get(0)); - cols = Collections.singletonList(partCol); - PartitionIterable parts = new PartitionIterable(db, tbl, null, conf.getIntVar(HiveConf.ConfVars.METASTORE_BATCH_RETRIEVE_MAX)); - ColumnInfo ci = new ColumnInfo(partCol.getName(),TypeInfoUtils.getTypeInfoFromTypeString(partCol.getType()),null,false); - ColStatistics cs = StatsUtils.getColStatsForPartCol(ci, parts, conf); - ColumnStatisticsData data = new ColumnStatisticsData(); - ColStatistics.Range r = cs.getRange(); - StatObjectConverter.fillColumnStatisticsData(partCol.getType(), data, r == null ? null : r.minValue, r == null ? null : r.maxValue, - r == null ? null : r.minValue, r == null ? null : r.maxValue, r == null ? null : r.minValue.toString(), r == null ? null : r.maxValue.toString(), - cs.getNumNulls(), cs.getCountDistint(), null, cs.getAvgColLen(), cs.getAvgColLen(), cs.getNumTrues(), cs.getNumFalses()); - ColumnStatisticsObj cso = new ColumnStatisticsObj(partCol.getName(), partCol.getType(), data); - colStats = Collections.singletonList(cso); - StatsSetupConst.setColumnStatsState(tblProps, colNames); - } else { - cols = Hive.getFieldsFromDeserializer(colPath, deserializer); - List parts = db.getPartitionNames(dbTab[0].toLowerCase(), dbTab[1].toLowerCase(), (short) -1); - AggrStats aggrStats = db.getAggrColStatsFor( - dbTab[0].toLowerCase(), dbTab[1].toLowerCase(), colNames, parts, false); - colStats = aggrStats.getColStats(); - if (parts.size() == aggrStats.getPartsFound()) { - StatsSetupConst.setColumnStatsState(tblProps, colNames); - } else { - StatsSetupConst.removeColumnStatsState(tblProps, colNames); - } - } - tbl.setParameters(tblProps); - } else { - cols = Hive.getFieldsFromDeserializer(colPath, deserializer); - colStats = db.getTableColumnStatistics( - dbTab[0].toLowerCase(), dbTab[1].toLowerCase(), colNames, false); - } - } else { - List partitions = new ArrayList(); - partitions.add(part.getName()); - cols = Hive.getFieldsFromDeserializer(colPath, deserializer); - colStats = db.getPartitionColumnStatistics(dbTab[0].toLowerCase(), - dbTab[1].toLowerCase(), partitions, colNames, false).get(part.getName()); - } - } else { - cols = Hive.getFieldsFromDeserializer(colPath, deserializer); - } - } - PrimaryKeyInfo pkInfo = null; - ForeignKeyInfo fkInfo = null; - UniqueConstraint ukInfo = null; - NotNullConstraint nnInfo = null; - DefaultConstraint dInfo = null; - CheckConstraint cInfo = null; - StorageHandlerInfo storageHandlerInfo = null; - if (descTbl.isExt() || descTbl.isFormatted()) { - pkInfo = db.getPrimaryKeys(tbl.getDbName(), tbl.getTableName()); - fkInfo = db.getForeignKeys(tbl.getDbName(), tbl.getTableName()); - ukInfo = db.getUniqueConstraints(tbl.getDbName(), tbl.getTableName()); - nnInfo = db.getNotNullConstraints(tbl.getDbName(), tbl.getTableName()); - dInfo = db.getDefaultConstraints(tbl.getDbName(), tbl.getTableName()); - cInfo = db.getCheckConstraints(tbl.getDbName(), tbl.getTableName()); - storageHandlerInfo = db.getStorageHandlerInfo(tbl); - } - fixDecimalColumnTypeName(cols); - // Information for materialized views - if (tbl.isMaterializedView()) { - final String validTxnsList = db.getConf().get(ValidTxnList.VALID_TXNS_KEY); - if (validTxnsList != null) { - final List tablesUsed = - new ArrayList<>(tbl.getCreationMetadata().getTablesUsed()); - final ValidTxnWriteIdList currentTxnWriteIds = - SessionState.get().getTxnMgr().getValidWriteIds(tablesUsed, validTxnsList); - final long defaultTimeWindow = - HiveConf.getTimeVar(db.getConf(), HiveConf.ConfVars.HIVE_MATERIALIZED_VIEW_REWRITING_TIME_WINDOW, - TimeUnit.MILLISECONDS); - tbl.setOutdatedForRewriting(Hive.isOutdatedMaterializedView(tbl, - currentTxnWriteIds, defaultTimeWindow, tablesUsed, false)); - } - } - // In case the query is served by HiveServer2, don't pad it with spaces, - // as HiveServer2 output is consumed by JDBC/ODBC clients. - boolean isOutputPadded = !SessionState.get().isHiveServerQuery(); - formatter.describeTable(outStream, colPath, tableName, tbl, part, - cols, descTbl.isFormatted(), descTbl.isExt(), isOutputPadded, - colStats, pkInfo, fkInfo, ukInfo, nnInfo, dInfo, cInfo, - storageHandlerInfo); - - LOG.debug("DDLTask: written data for {}", tableName); - - } catch (SQLException e) { - throw new HiveException(e, ErrorMsg.GENERIC_ERROR, tableName); - } finally { - IOUtils.closeStream(outStream); - } - - return 0; - } - - /** - * Fix the type name of a column of type decimal w/o precision/scale specified. This makes - * the describe table show "decimal(10,0)" instead of "decimal" even if the type stored - * in metastore is "decimal", which is possible with previous hive. - * - * @param cols columns that to be fixed as such - */ - private static void fixDecimalColumnTypeName(List cols) { - for (FieldSchema col : cols) { - if (serdeConstants.DECIMAL_TYPE_NAME.equals(col.getType())) { - col.setType(DecimalTypeInfo.getQualifiedName(HiveDecimal.USER_DEFAULT_PRECISION, - HiveDecimal.USER_DEFAULT_SCALE)); - } - } - } - - static String writeGrantInfo(List privileges, boolean testMode) { + private String writeGrantInfo(List privileges, boolean testMode) { if (privileges == null || privileges.isEmpty()) { return ""; } @@ -3425,23 +2579,7 @@ public int compare(HivePrivilegeInfo o1, HivePrivilegeInfo o2) { return builder.toString(); } - static String writeRoleGrantsInfo(List roleGrants, boolean testMode) { - if (roleGrants == null || roleGrants.isEmpty()) { - return ""; - } - StringBuilder builder = new StringBuilder(); - //sort the list to get sorted (deterministic) output (for ease of testing) - Collections.sort(roleGrants); - for (RolePrincipalGrant roleGrant : roleGrants) { - appendNonNull(builder, roleGrant.getRoleName(), true); - appendNonNull(builder, roleGrant.isGrantOption()); - appendNonNull(builder, testMode ? -1 : roleGrant.getGrantTime() * 1000L); - appendNonNull(builder, roleGrant.getGrantorName()); - } - return builder.toString(); - } - - static String writeRolesGrantedInfo(List roles, boolean testMode) { + private String writeRolesGrantedInfo(List roles, boolean testMode) { if (roles == null || roles.isEmpty()) { return ""; } @@ -3457,11 +2595,11 @@ static String writeRolesGrantedInfo(List roles, boolean testMode) return builder.toString(); } - static StringBuilder appendNonNull(StringBuilder builder, Object value) { + private StringBuilder appendNonNull(StringBuilder builder, Object value) { return appendNonNull(builder, value, false); } - static StringBuilder appendNonNull(StringBuilder builder, Object value, boolean firstColumn) { + private StringBuilder appendNonNull(StringBuilder builder, Object value, boolean firstColumn) { if (!firstColumn) { builder.append((char)separator); } else if (builder.length() > 0) { @@ -4181,35 +3319,26 @@ private int updateColumns(Table tbl, Partition part) } /** - * Drop a given table or some partitions. DropTableDesc is currently used for both. + * Drop a given partitions. * * @param db * The database in question. - * @param dropTbl - * This is the table we're dropping. + * @param dropPartition + * This is the partition we're dropping. * @throws HiveException * Throws this exception if an unexpected error occurs. */ - private void dropTableOrPartitions(Hive db, DropTableDesc dropTbl) throws HiveException { + private void dropPartitions(Hive db, DropPartitionDesc dropPartition) throws HiveException { // We need to fetch the table before it is dropped so that it can be passed to // post-execution hook Table tbl = null; try { - tbl = db.getTable(dropTbl.getTableName()); + tbl = db.getTable(dropPartition.getTableName()); } catch (InvalidTableException e) { // drop table is idempotent } - if (dropTbl.getPartSpecs() == null) { - dropTable(db, tbl, dropTbl); - } else { - dropPartitions(db, tbl, dropTbl); - } - } - - private void dropPartitions(Hive db, Table tbl, DropTableDesc dropTbl) throws HiveException { - - ReplicationSpec replicationSpec = dropTbl.getReplicationSpec(); + ReplicationSpec replicationSpec = dropPartition.getReplicationSpec(); if (replicationSpec.isInReplicationScope()){ /** * ALTER TABLE DROP PARTITION ... FOR REPLICATION(x) behaves as a DROP PARTITION IF OLDER THAN x @@ -4231,7 +3360,7 @@ private void dropPartitions(Hive db, Table tbl, DropTableDesc dropTbl) throws Hi return; } - for (DropTableDesc.PartSpec partSpec : dropTbl.getPartSpecs()){ + for (DropPartitionDesc.PartSpec partSpec : dropPartition.getPartSpecs()){ List partitions = new ArrayList<>(); try { db.getPartitionsByExpr(tbl, partSpec.getPartSpec(), conf, partitions); @@ -4250,12 +3379,12 @@ private void dropPartitions(Hive db, Table tbl, DropTableDesc dropTbl) throws Hi // ifExists is currently verified in DDLSemanticAnalyzer List droppedParts - = db.dropPartitions(dropTbl.getTableName(), - dropTbl.getPartSpecs(), + = db.dropPartitions(dropPartition.getTableName(), + dropPartition.getPartSpecs(), PartitionDropOptions.instance() .deleteData(true) .ifExists(true) - .purgeData(dropTbl.getIfPurge())); + .purgeData(dropPartition.getIfPurge())); for (Partition partition : droppedParts) { console.printInfo("Dropped the partition " + partition.getName()); // We have already locked the table, don't lock the partitions. @@ -4263,102 +3392,6 @@ private void dropPartitions(Hive db, Table tbl, DropTableDesc dropTbl) throws Hi } } - private void dropTable(Hive db, Table tbl, DropTableDesc dropTbl) throws HiveException { - // This is a true DROP TABLE - if (tbl != null && dropTbl.getValidationRequired()) { - if (tbl.isView()) { - if (!dropTbl.getExpectView()) { - if (dropTbl.getIfExists()) { - return; - } - if (dropTbl.getExpectMaterializedView()) { - throw new HiveException("Cannot drop a view with DROP MATERIALIZED VIEW"); - } else { - throw new HiveException("Cannot drop a view with DROP TABLE"); - } - } - } else if (tbl.isMaterializedView()) { - if (!dropTbl.getExpectMaterializedView()) { - if (dropTbl.getIfExists()) { - return; - } - if (dropTbl.getExpectView()) { - throw new HiveException("Cannot drop a materialized view with DROP VIEW"); - } else { - throw new HiveException("Cannot drop a materialized view with DROP TABLE"); - } - } - } else { - if (dropTbl.getExpectView()) { - if (dropTbl.getIfExists()) { - return; - } - throw new HiveException( - "Cannot drop a base table with DROP VIEW"); - } else if (dropTbl.getExpectMaterializedView()) { - if (dropTbl.getIfExists()) { - return; - } - throw new HiveException( - "Cannot drop a base table with DROP MATERIALIZED VIEW"); - } - } - } - - ReplicationSpec replicationSpec = dropTbl.getReplicationSpec(); - if ((tbl!= null) && replicationSpec.isInReplicationScope()){ - /** - * DROP TABLE FOR REPLICATION behaves differently from DROP TABLE IF EXISTS - it more closely - * matches a DROP TABLE IF OLDER THAN(x) semantic. - * - * Ideally, commands executed under the scope of replication need to be idempotent and resilient - * to repeats. What can happen, sometimes, is that a drone processing a replication task can - * have been abandoned for not returning in time, but still execute its task after a while, - * which should not result in it mucking up data that has been impressed later on. So, for eg., - * if we create partition P1, followed by droppping it, followed by creating it yet again, - * the replication of that drop should not drop the newer partition if it runs after the destination - * object is already in the newer state. - * - * Thus, we check the replicationSpec.allowEventReplacementInto to determine whether or not we can - * drop the object in question(will return false if object is newer than the event, true if not) - * - * In addition, since DROP TABLE FOR REPLICATION can result in a table not being dropped, while DROP - * TABLE will always drop the table, and the included partitions, DROP TABLE FOR REPLICATION must - * do one more thing - if it does not drop the table because the table is in a newer state, it must - * drop the partitions inside it that are older than this event. To wit, DROP TABLE FOR REPL - * acts like a recursive DROP TABLE IF OLDER. - */ - if (!replicationSpec.allowEventReplacementInto(tbl.getParameters())){ - // Drop occured as part of replicating a drop, but the destination - // table was newer than the event being replicated. Ignore, but drop - // any partitions inside that are older. - if (tbl.isPartitioned()){ - - PartitionIterable partitions = new PartitionIterable(db,tbl,null, - conf.getIntVar(HiveConf.ConfVars.METASTORE_BATCH_RETRIEVE_MAX)); - - for (Partition p : Iterables.filter(partitions, replicationSpec.allowEventReplacementInto())){ - db.dropPartition(tbl.getDbName(),tbl.getTableName(),p.getValues(),true); - } - } - LOG.debug("DDLTask: Drop Table is skipped as table {} is newer than update", dropTbl.getTableName()); - return; // table is newer, leave it be. - } - } - - // drop the table - // TODO: API w/catalog name - db.dropTable(dropTbl.getTableName(), dropTbl.getIfPurge()); - if (tbl != null) { - // Remove from cache if it is a materialized view - if (tbl.isMaterializedView()) { - HiveMaterializedViewsRegistry.get().dropMaterializedView(tbl); - } - // We have already locked the table in DDLSemanticAnalyzer, don't do it again here - addIfAbsentByName(new WriteEntity(tbl, WriteEntity.WriteType.DDL_NO_LOCK)); - } - } - /** * Update last_modified_by and last_modified_time parameters in parameter map. * @@ -4375,10 +3408,6 @@ private boolean updateModifiedParameters(Map params, HiveConf co return true; } - private void validateSerDe(String serdeName) throws HiveException { - validateSerDe(serdeName, conf); - } - /** * Check if the given serde is valid. */ @@ -4395,257 +3424,6 @@ public static void validateSerDe(String serdeName, HiveConf conf) throws HiveExc } } - /** - * Create a new table. - * - * @param db - * The database in question. - * @param crtTbl - * This is the table we're creating. - * @return Returns 0 when execution succeeds and above 0 if it fails. - * @throws HiveException - * Throws this exception if an unexpected error occurs. - */ - private int createTable(Hive db, CreateTableDesc crtTbl) throws HiveException { - // create the table - Table tbl = crtTbl.toTable(conf); - List primaryKeys = crtTbl.getPrimaryKeys(); - List foreignKeys = crtTbl.getForeignKeys(); - List uniqueConstraints = crtTbl.getUniqueConstraints(); - List notNullConstraints = crtTbl.getNotNullConstraints(); - List defaultConstraints = crtTbl.getDefaultConstraints(); - List checkConstraints = crtTbl.getCheckConstraints(); - LOG.debug("creating table {} on {}",tbl.getFullyQualifiedName(),tbl.getDataLocation()); - - if (crtTbl.getReplicationSpec().isInReplicationScope() && (!crtTbl.getReplaceMode())){ - // if this is a replication spec, then replace-mode semantics might apply. - // if we're already asking for a table replacement, then we can skip this check. - // however, otherwise, if in replication scope, and we've not been explicitly asked - // to replace, we should check if the object we're looking at exists, and if so, - // trigger replace-mode semantics. - Table existingTable = db.getTable(tbl.getDbName(), tbl.getTableName(), false); - if (existingTable != null){ - if (crtTbl.getReplicationSpec().allowEventReplacementInto(existingTable.getParameters())){ - crtTbl.setReplaceMode(true); // we replace existing table. - ReplicationSpec.copyLastReplId(existingTable.getParameters(), tbl.getParameters()); - } else { - LOG.debug("DDLTask: Create Table is skipped as table {} is newer than update", - crtTbl.getTableName()); - return 0; // no replacement, the existing table state is newer than our update. - } - } - } - - // create the table - if (crtTbl.getReplaceMode()) { - ReplicationSpec replicationSpec = crtTbl.getReplicationSpec(); - long writeId = 0; - EnvironmentContext environmentContext = null; - if (replicationSpec != null && replicationSpec.isInReplicationScope()) { - if (replicationSpec.isMigratingToTxnTable()) { - // for migration we start the transaction and allocate write id in repl txn task for migration. - String writeIdPara = conf.get(ReplUtils.REPL_CURRENT_TBL_WRITE_ID); - if (writeIdPara == null) { - throw new HiveException("DDLTask : Write id is not set in the config by open txn task for migration"); - } - writeId = Long.parseLong(writeIdPara); - } else { - writeId = crtTbl.getReplWriteId(); - } - - // In case of replication statistics is obtained from the source, so do not update those - // on replica. Since we are not replicating statisics for transactional tables, do not do - // so for transactional tables right now. - if (!AcidUtils.isTransactionalTable(crtTbl)) { - environmentContext = new EnvironmentContext(); - environmentContext.putToProperties(StatsSetupConst.DO_NOT_UPDATE_STATS, StatsSetupConst.TRUE); - } - } - - // replace-mode creates are really alters using CreateTableDesc. - db.alterTable(tbl.getCatName(), tbl.getDbName(), tbl.getTableName(), tbl, false, - environmentContext, true, writeId); - } else { - if ((foreignKeys != null && foreignKeys.size() > 0) || - (primaryKeys != null && primaryKeys.size() > 0) || - (uniqueConstraints != null && uniqueConstraints.size() > 0) || - (notNullConstraints != null && notNullConstraints.size() > 0) || - (checkConstraints!= null && checkConstraints.size() > 0) || - defaultConstraints != null && defaultConstraints.size() > 0) { - db.createTable(tbl, crtTbl.getIfNotExists(), primaryKeys, foreignKeys, - uniqueConstraints, notNullConstraints, defaultConstraints, checkConstraints); - } else { - db.createTable(tbl, crtTbl.getIfNotExists()); - } - Long mmWriteId = crtTbl.getInitialMmWriteId(); - if (crtTbl.isCTAS() || mmWriteId != null) { - Table createdTable = db.getTable(tbl.getDbName(), tbl.getTableName()); - if (crtTbl.isCTAS()) { - DataContainer dc = new DataContainer(createdTable.getTTable()); - queryState.getLineageState().setLineage( - createdTable.getPath(), dc, createdTable.getCols() - ); - } - } - } - addIfAbsentByName(new WriteEntity(tbl, WriteEntity.WriteType.DDL_NO_LOCK)); - return 0; - } - - - /** - * Create a new table like an existing table. - * - * @param db - * The database in question. - * @param crtTbl - * This is the table we're creating. - * @return Returns 0 when execution succeeds and above 0 if it fails. - * @throws HiveException - * Throws this exception if an unexpected error occurs. - */ - private int createTableLike(Hive db, CreateTableLikeDesc crtTbl) throws Exception { - // Get the existing table - Table oldtbl = db.getTable(crtTbl.getLikeTableName()); - Table tbl; - if (oldtbl.getTableType() == TableType.VIRTUAL_VIEW || - oldtbl.getTableType() == TableType.MATERIALIZED_VIEW) { - String targetTableName = crtTbl.getTableName(); - tbl=db.newTable(targetTableName); - - if (crtTbl.getTblProps() != null) { - tbl.getTTable().getParameters().putAll(crtTbl.getTblProps()); - } - - tbl.setTableType(TableType.MANAGED_TABLE); - - if (crtTbl.isExternal()) { - tbl.setProperty("EXTERNAL", "TRUE"); - tbl.setTableType(TableType.EXTERNAL_TABLE); - // partition discovery is on by default - tbl.setProperty(PartitionManagementTask.DISCOVER_PARTITIONS_TBLPROPERTY, "true"); - } - - tbl.setFields(oldtbl.getCols()); - tbl.setPartCols(oldtbl.getPartCols()); - - if (crtTbl.getDefaultSerName() == null) { - LOG.info("Default to LazySimpleSerDe for table {}", targetTableName); - tbl.setSerializationLib(org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe.class.getName()); - } else { - // let's validate that the serde exists - validateSerDe(crtTbl.getDefaultSerName()); - tbl.setSerializationLib(crtTbl.getDefaultSerName()); - } - - if (crtTbl.getDefaultSerdeProps() != null) { - Iterator> iter = crtTbl.getDefaultSerdeProps().entrySet() - .iterator(); - while (iter.hasNext()) { - Entry m = iter.next(); - tbl.setSerdeParam(m.getKey(), m.getValue()); - } - } - - tbl.setInputFormatClass(crtTbl.getDefaultInputFormat()); - tbl.setOutputFormatClass(crtTbl.getDefaultOutputFormat()); - - tbl.getTTable().getSd().setInputFormat( - tbl.getInputFormatClass().getName()); - tbl.getTTable().getSd().setOutputFormat( - tbl.getOutputFormatClass().getName()); - } else { - tbl=oldtbl; - - // find out database name and table name of target table - String targetTableName = crtTbl.getTableName(); - String[] names = Utilities.getDbTableName(targetTableName); - - tbl.setDbName(names[0]); - tbl.setTableName(names[1]); - - // using old table object, hence reset the owner to current user for new table. - tbl.setOwner(SessionState.getUserFromAuthenticator()); - - if (crtTbl.getLocation() != null) { - tbl.setDataLocation(new Path(crtTbl.getLocation())); - } else { - tbl.unsetDataLocation(); - } - - Class serdeClass = oldtbl.getDeserializerClass(); - - Map params = tbl.getParameters(); - // We should copy only those table parameters that are specified in the config. - SerDeSpec spec = AnnotationUtils.getAnnotation(serdeClass, SerDeSpec.class); - String paramsStr = HiveConf.getVar(conf, HiveConf.ConfVars.DDL_CTL_PARAMETERS_WHITELIST); - - Set retainer = new HashSet(); - // for non-native table, property storage_handler should be retained - retainer.add(META_TABLE_STORAGE); - if (spec != null && spec.schemaProps() != null) { - retainer.addAll(Arrays.asList(spec.schemaProps())); - } - if (paramsStr != null) { - retainer.addAll(Arrays.asList(paramsStr.split(","))); - } - if (!retainer.isEmpty()) { - params.keySet().retainAll(retainer); - } else { - params.clear(); - } - - if (crtTbl.getTblProps() != null) { - params.putAll(crtTbl.getTblProps()); - } - - if (crtTbl.isUserStorageFormat()) { - tbl.setInputFormatClass(crtTbl.getDefaultInputFormat()); - tbl.setOutputFormatClass(crtTbl.getDefaultOutputFormat()); - tbl.getTTable().getSd().setInputFormat( - tbl.getInputFormatClass().getName()); - tbl.getTTable().getSd().setOutputFormat( - tbl.getOutputFormatClass().getName()); - if (crtTbl.getDefaultSerName() == null) { - LOG.info("Default to LazySimpleSerDe for like table {}", targetTableName); - tbl.setSerializationLib(org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe.class.getName()); - } else { - // let's validate that the serde exists - validateSerDe(crtTbl.getDefaultSerName()); - tbl.setSerializationLib(crtTbl.getDefaultSerName()); - } - } - - tbl.getTTable().setTemporary(crtTbl.isTemporary()); - tbl.getTTable().unsetId(); - - if (crtTbl.isExternal()) { - tbl.setProperty("EXTERNAL", "TRUE"); - tbl.setTableType(TableType.EXTERNAL_TABLE); - // partition discovery is on by default - tbl.setProperty(PartitionManagementTask.DISCOVER_PARTITIONS_TBLPROPERTY, "true"); - } else { - tbl.getParameters().remove("EXTERNAL"); - } - } - - // If location is specified - ensure that it is a full qualified name - if (DDLTask.doesTableNeedLocation(tbl)) { - makeLocationQualified(tbl.getDbName(), tbl, conf); - } - - if (crtTbl.getLocation() == null && !tbl.isPartitioned() - && conf.getBoolVar(HiveConf.ConfVars.HIVESTATSAUTOGATHER)) { - StatsSetupConst.setStatsStateForCreateTable(tbl.getTTable().getParameters(), - MetaStoreUtils.getColumnNames(tbl.getCols()), StatsSetupConst.TRUE); - } - - // create the table - db.createTable(tbl, crtTbl.getIfNotExists()); - addIfAbsentByName(new WriteEntity(tbl, WriteEntity.WriteType.DDL_NO_LOCK)); - return 0; - } - /** * Create a new view. * @@ -4725,50 +3503,6 @@ private int createView(Hive db, CreateViewDesc crtView) throws HiveException { return 0; } - private int truncateTable(Hive db, TruncateTableDesc truncateTableDesc) throws HiveException { - if (truncateTableDesc.getColumnIndexes() != null) { - ColumnTruncateWork truncateWork = new ColumnTruncateWork( - truncateTableDesc.getColumnIndexes(), truncateTableDesc.getInputDir(), - truncateTableDesc.getOutputDir()); - truncateWork.setListBucketingCtx(truncateTableDesc.getLbCtx()); - truncateWork.setMapperCannotSpanPartns(true); - DriverContext driverCxt = new DriverContext(); - ColumnTruncateTask taskExec = new ColumnTruncateTask(); - taskExec.initialize(queryState, null, driverCxt, null); - taskExec.setWork(truncateWork); - taskExec.setQueryPlan(this.getQueryPlan()); - subtask = taskExec; - int ret = taskExec.execute(driverCxt); - if (subtask.getException() != null) { - setException(subtask.getException()); - } - return ret; - } - - String tableName = truncateTableDesc.getTableName(); - Map partSpec = truncateTableDesc.getPartSpec(); - - ReplicationSpec replicationSpec = truncateTableDesc.getReplicationSpec(); - if (!allowOperationInReplicationScope(db, tableName, partSpec, replicationSpec)) { - // no truncate, the table is missing either due to drop/rename which follows the truncate. - // or the existing table is newer than our update. - if (LOG.isDebugEnabled()) { - LOG.debug("DDLTask: Truncate Table/Partition is skipped as table {} / partition {} is newer than update", - tableName, - (partSpec == null) ? "null" : FileUtils.makePartName(new ArrayList<>(partSpec.keySet()), new ArrayList<>(partSpec.values()))); - } - return 0; - } - - try { - db.truncateTable(tableName, partSpec, - replicationSpec != null && replicationSpec.isInReplicationScope() ? truncateTableDesc.getWriteId() : 0L); - } catch (Exception e) { - throw new HiveException(e, ErrorMsg.GENERIC_ERROR); - } - return 0; - } - private int exchangeTablePartition(Hive db, AlterTableExchangePartition exchangePartition) throws HiveException { Map partitionSpecs = exchangePartition.getPartitionSpecs(); @@ -4803,32 +3537,6 @@ public String getName() { return "DDL"; } - /** - * Make location in specified sd qualified. - * - * @param databaseName - * Database name. - */ - public static void makeLocationQualified(String databaseName, Table table, HiveConf conf) throws HiveException { - Path path = null; - StorageDescriptor sd = table.getTTable().getSd(); - // If the table's location is currently unset, it is left unset, allowing the metastore to - // fill in the table's location. - // Note that the previous logic for some reason would make a special case if the DB was the - // default database, and actually attempt to generate a location. - // This seems incorrect and uncessary, since the metastore is just as able to fill in the - // default table location in the case of the default DB, as it is for non-default DBs. - if (sd.isSetLocation()) - { - path = new Path(sd.getLocation()); - } - - if (path != null) - { - sd.setLocation(Utilities.getQualifiedPath(conf, path)); - } - } - /** * Validate if the given table/partition is eligible for update * @@ -4868,21 +3576,6 @@ private boolean allowOperationInReplicationScope(Hive db, String tableName, return false; } - public static boolean doesTableNeedLocation(Table tbl) { - // TODO: If we are ok with breaking compatibility of existing 3rd party StorageHandlers, - // this method could be moved to the HiveStorageHandler interface. - boolean retval = true; - if (tbl.getStorageHandler() != null) { - // TODO: why doesn't this check class name rather than toString? - String sh = tbl.getStorageHandler().toString(); - retval = !sh.equals("org.apache.hadoop.hive.hbase.HBaseStorageHandler") - && !sh.equals(Constants.DRUID_HIVE_STORAGE_HANDLER_ID) - && !sh.equals(Constants.JDBC_HIVE_STORAGE_HANDLER_ID) - && !sh.equals("org.apache.hadoop.hive.accumulo.AccumuloStorageHandler"); - } - return retval; - } - private int remFirstIncPendFlag(Hive hive, ReplRemoveFirstIncLoadPendFlagDesc desc) throws HiveException, TException { String dbNameOrPattern = desc.getDatabaseName(); String tableNameOrPattern = desc.getTableName(); diff --git ql/src/java/org/apache/hadoop/hive/ql/exec/Task.java ql/src/java/org/apache/hadoop/hive/ql/exec/Task.java index 3308797996..956c4ffabc 100644 --- ql/src/java/org/apache/hadoop/hive/ql/exec/Task.java +++ ql/src/java/org/apache/hadoop/hive/ql/exec/Task.java @@ -617,7 +617,7 @@ public Throwable getException() { return exception; } - protected void setException(Throwable ex) { + public void setException(Throwable ex) { exception = ex; } diff --git ql/src/java/org/apache/hadoop/hive/ql/exec/repl/bootstrap/load/table/LoadPartitions.java ql/src/java/org/apache/hadoop/hive/ql/exec/repl/bootstrap/load/table/LoadPartitions.java index c1773c93cc..0add38b213 100644 --- ql/src/java/org/apache/hadoop/hive/ql/exec/repl/bootstrap/load/table/LoadPartitions.java +++ ql/src/java/org/apache/hadoop/hive/ql/exec/repl/bootstrap/load/table/LoadPartitions.java @@ -44,7 +44,7 @@ import org.apache.hadoop.hive.ql.parse.repl.ReplLogger; import org.apache.hadoop.hive.ql.plan.AddPartitionDesc; import org.apache.hadoop.hive.ql.plan.DDLWork; -import org.apache.hadoop.hive.ql.plan.DropTableDesc; +import org.apache.hadoop.hive.ql.plan.DropPartitionDesc; import org.apache.hadoop.hive.ql.plan.ExprNodeGenericFuncDesc; import org.apache.hadoop.hive.ql.plan.ImportTableDesc; import org.apache.hadoop.hive.ql.plan.LoadMultiFilesDesc; @@ -349,8 +349,8 @@ private Path locationOnReplicaWarehouse(Table table, AddPartitionDesc.OnePartiti Map> partSpecsExpr = ReplUtils.genPartSpecs(table, Collections.singletonList(partSpec)); if (partSpecsExpr.size() > 0) { - DropTableDesc dropPtnDesc = new DropTableDesc(table.getFullyQualifiedName(), - partSpecsExpr, null, true, event.replicationSpec()); + DropPartitionDesc dropPtnDesc = new DropPartitionDesc(table.getFullyQualifiedName(), partSpecsExpr, true, + event.replicationSpec()); dropPtnTask = TaskFactory.get( new DDLWork(new HashSet<>(), new HashSet<>(), dropPtnDesc), context.hiveConf ); diff --git ql/src/java/org/apache/hadoop/hive/ql/exec/repl/bootstrap/load/table/LoadTable.java ql/src/java/org/apache/hadoop/hive/ql/exec/repl/bootstrap/load/table/LoadTable.java index 3b0b67aeff..b335f199f8 100644 --- ql/src/java/org/apache/hadoop/hive/ql/exec/repl/bootstrap/load/table/LoadTable.java +++ ql/src/java/org/apache/hadoop/hive/ql/exec/repl/bootstrap/load/table/LoadTable.java @@ -26,6 +26,8 @@ import org.apache.hadoop.hive.metastore.api.InvalidOperationException; import org.apache.hadoop.hive.metastore.api.MetaException; import org.apache.hadoop.hive.ql.ErrorMsg; +import org.apache.hadoop.hive.ql.ddl.DDLWork2; +import org.apache.hadoop.hive.ql.ddl.table.DropTableDesc; import org.apache.hadoop.hive.ql.exec.ReplCopyTask; import org.apache.hadoop.hive.ql.exec.Task; import org.apache.hadoop.hive.ql.exec.TaskFactory; @@ -45,8 +47,6 @@ import org.apache.hadoop.hive.ql.parse.ReplicationSpec; import org.apache.hadoop.hive.ql.parse.SemanticException; import org.apache.hadoop.hive.ql.parse.repl.ReplLogger; -import org.apache.hadoop.hive.ql.plan.DDLWork; -import org.apache.hadoop.hive.ql.plan.DropTableDesc; import org.apache.hadoop.hive.ql.plan.ImportTableDesc; import org.apache.hadoop.hive.ql.plan.LoadMultiFilesDesc; import org.apache.hadoop.hive.ql.plan.LoadTableDesc; @@ -325,6 +325,6 @@ static TableLocationTuple tableLocation(ImportTableDesc tblDesc, Database parent assert(table != null); DropTableDesc dropTblDesc = new DropTableDesc(table.getFullyQualifiedName(), table.getTableType(), true, false, event.replicationSpec()); - return TaskFactory.get(new DDLWork(new HashSet<>(), new HashSet<>(), dropTblDesc), context.hiveConf); + return TaskFactory.get(new DDLWork2(new HashSet<>(), new HashSet<>(), dropTblDesc), context.hiveConf); } } diff --git ql/src/java/org/apache/hadoop/hive/ql/io/AcidUtils.java ql/src/java/org/apache/hadoop/hive/ql/io/AcidUtils.java index 3961baa82a..50a233d5de 100644 --- ql/src/java/org/apache/hadoop/hive/ql/io/AcidUtils.java +++ ql/src/java/org/apache/hadoop/hive/ql/io/AcidUtils.java @@ -53,6 +53,7 @@ import org.apache.hadoop.hive.metastore.api.hive_metastoreConstants; import org.apache.hadoop.hive.metastore.utils.MetaStoreUtils; import org.apache.hadoop.hive.ql.ErrorMsg; +import org.apache.hadoop.hive.ql.ddl.table.CreateTableDesc; import org.apache.hadoop.hive.ql.exec.Utilities; import org.apache.hadoop.hive.ql.hooks.Entity; import org.apache.hadoop.hive.ql.hooks.ReadEntity; @@ -68,7 +69,6 @@ import org.apache.hadoop.hive.ql.metadata.Table; import org.apache.hadoop.hive.ql.parse.LoadSemanticAnalyzer; import org.apache.hadoop.hive.ql.parse.SemanticException; -import org.apache.hadoop.hive.ql.plan.CreateTableDesc; import org.apache.hadoop.hive.ql.plan.TableScanDesc; import org.apache.hadoop.hive.ql.session.SessionState; import org.apache.hadoop.hive.shims.HadoopShims; diff --git ql/src/java/org/apache/hadoop/hive/ql/lockmgr/DbTxnManager.java ql/src/java/org/apache/hadoop/hive/ql/lockmgr/DbTxnManager.java index 4aea8722b6..800d80a067 100644 --- ql/src/java/org/apache/hadoop/hive/ql/lockmgr/DbTxnManager.java +++ ql/src/java/org/apache/hadoop/hive/ql/lockmgr/DbTxnManager.java @@ -41,13 +41,13 @@ Licensed to the Apache Software Foundation (ASF) under one import org.apache.hadoop.hive.ql.QueryPlan; import org.apache.hadoop.hive.ql.ddl.database.LockDatabaseDesc; import org.apache.hadoop.hive.ql.ddl.database.UnlockDatabaseDesc; +import org.apache.hadoop.hive.ql.ddl.table.LockTableDesc; +import org.apache.hadoop.hive.ql.ddl.table.UnlockTableDesc; import org.apache.hadoop.hive.ql.hooks.WriteEntity; import org.apache.hadoop.hive.ql.io.AcidUtils; import org.apache.hadoop.hive.ql.metadata.Hive; import org.apache.hadoop.hive.ql.metadata.HiveException; import org.apache.hadoop.hive.ql.plan.HiveOperation; -import org.apache.hadoop.hive.ql.plan.LockTableDesc; -import org.apache.hadoop.hive.ql.plan.UnlockTableDesc; import org.apache.hadoop.security.UserGroupInformation; import org.apache.hive.common.util.ShutdownHookManager; import org.apache.thrift.TException; diff --git ql/src/java/org/apache/hadoop/hive/ql/lockmgr/HiveTxnManager.java ql/src/java/org/apache/hadoop/hive/ql/lockmgr/HiveTxnManager.java index 43dba73c6d..bb46bf9490 100644 --- ql/src/java/org/apache/hadoop/hive/ql/lockmgr/HiveTxnManager.java +++ ql/src/java/org/apache/hadoop/hive/ql/lockmgr/HiveTxnManager.java @@ -26,11 +26,11 @@ import org.apache.hadoop.hive.ql.Driver.LockedDriverState; import org.apache.hadoop.hive.ql.ddl.database.LockDatabaseDesc; import org.apache.hadoop.hive.ql.ddl.database.UnlockDatabaseDesc; +import org.apache.hadoop.hive.ql.ddl.table.LockTableDesc; +import org.apache.hadoop.hive.ql.ddl.table.UnlockTableDesc; import org.apache.hadoop.hive.ql.QueryPlan; import org.apache.hadoop.hive.ql.metadata.Hive; import org.apache.hadoop.hive.ql.metadata.HiveException; -import org.apache.hadoop.hive.ql.plan.LockTableDesc; -import org.apache.hadoop.hive.ql.plan.UnlockTableDesc; import java.util.List; diff --git ql/src/java/org/apache/hadoop/hive/ql/lockmgr/HiveTxnManagerImpl.java ql/src/java/org/apache/hadoop/hive/ql/lockmgr/HiveTxnManagerImpl.java index 0abec56654..0e148ed396 100644 --- ql/src/java/org/apache/hadoop/hive/ql/lockmgr/HiveTxnManagerImpl.java +++ ql/src/java/org/apache/hadoop/hive/ql/lockmgr/HiveTxnManagerImpl.java @@ -30,6 +30,8 @@ import org.apache.hadoop.hive.ql.Driver.LockedDriverState; import org.apache.hadoop.hive.ql.ddl.database.LockDatabaseDesc; import org.apache.hadoop.hive.ql.ddl.database.UnlockDatabaseDesc; +import org.apache.hadoop.hive.ql.ddl.table.LockTableDesc; +import org.apache.hadoop.hive.ql.ddl.table.UnlockTableDesc; import org.apache.hadoop.hive.ql.QueryPlan; import org.apache.hadoop.hive.metastore.api.Database; import org.apache.hadoop.hive.ql.ErrorMsg; @@ -38,8 +40,6 @@ import org.apache.hadoop.hive.ql.metadata.HiveException; import org.apache.hadoop.hive.ql.metadata.Partition; import org.apache.hadoop.hive.ql.metadata.Table; -import org.apache.hadoop.hive.ql.plan.LockTableDesc; -import org.apache.hadoop.hive.ql.plan.UnlockTableDesc; /** * An implementation HiveTxnManager that includes internal methods that all diff --git ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java index 17576ffe9e..33d157d4da 100644 --- ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java +++ ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java @@ -127,7 +127,7 @@ import org.apache.hadoop.hive.ql.optimizer.calcite.rules.views.HiveAugmentMaterializationRule; import org.apache.hadoop.hive.ql.optimizer.listbucketingpruner.ListBucketingPrunerUtils; import org.apache.hadoop.hive.ql.plan.AddPartitionDesc; -import org.apache.hadoop.hive.ql.plan.DropTableDesc; +import org.apache.hadoop.hive.ql.plan.DropPartitionDesc; import org.apache.hadoop.hive.ql.plan.ExprNodeColumnDesc; import org.apache.hadoop.hive.ql.plan.ExprNodeConstantDesc; import org.apache.hadoop.hive.ql.plan.ExprNodeGenericFuncDesc; @@ -3363,7 +3363,7 @@ public boolean dropPartition(String dbName, String tableName, List partV public List dropPartitions(Table table, ListpartDirNames, boolean deleteData, boolean ifExists) throws HiveException { // partitions to be dropped in this batch - List partSpecs = new ArrayList<>(partDirNames.size()); + List partSpecs = new ArrayList<>(partDirNames.size()); // parts of the partition String[] parts = null; @@ -3413,7 +3413,7 @@ public boolean dropPartition(String dbName, String tableName, List partV } // Add the expression to partition specification - partSpecs.add(new DropTableDesc.PartSpec(expr, partSpecKey)); + partSpecs.add(new DropPartitionDesc.PartSpec(expr, partSpecKey)); // Increment dropKey to get a new key for hash map ++partSpecKey; @@ -3423,14 +3423,14 @@ public boolean dropPartition(String dbName, String tableName, List partV return dropPartitions(names[0], names[1], partSpecs, deleteData, ifExists); } - public List dropPartitions(String tblName, List partSpecs, + public List dropPartitions(String tblName, List partSpecs, boolean deleteData, boolean ifExists) throws HiveException { String[] names = Utilities.getDbTableName(tblName); return dropPartitions(names[0], names[1], partSpecs, deleteData, ifExists); } public List dropPartitions(String dbName, String tblName, - List partSpecs, boolean deleteData, + List partSpecs, boolean deleteData, boolean ifExists) throws HiveException { return dropPartitions(dbName, tblName, partSpecs, PartitionDropOptions.instance() @@ -3438,19 +3438,19 @@ public boolean dropPartition(String dbName, String tableName, List partV .ifExists(ifExists)); } - public List dropPartitions(String tblName, List partSpecs, + public List dropPartitions(String tblName, List partSpecs, PartitionDropOptions dropOptions) throws HiveException { String[] names = Utilities.getDbTableName(tblName); return dropPartitions(names[0], names[1], partSpecs, dropOptions); } public List dropPartitions(String dbName, String tblName, - List partSpecs, PartitionDropOptions dropOptions) throws HiveException { + List partSpecs, PartitionDropOptions dropOptions) throws HiveException { try { Table tbl = getTable(dbName, tblName); List> partExprs = new ArrayList<>(partSpecs.size()); - for (DropTableDesc.PartSpec partSpec : partSpecs) { + for (DropPartitionDesc.PartSpec partSpec : partSpecs) { partExprs.add(new org.apache.hadoop.hive.metastore.utils.ObjectPair<>(partSpec.getPrefixLength(), SerializationUtilities.serializeExpressionToKryo(partSpec.getPartSpec()))); } diff --git ql/src/java/org/apache/hadoop/hive/ql/metadata/formatting/JsonMetaDataFormatter.java ql/src/java/org/apache/hadoop/hive/ql/metadata/formatting/JsonMetaDataFormatter.java index a3ae886c6e..f28d68fafe 100644 --- ql/src/java/org/apache/hadoop/hive/ql/metadata/formatting/JsonMetaDataFormatter.java +++ ql/src/java/org/apache/hadoop/hive/ql/metadata/formatting/JsonMetaDataFormatter.java @@ -43,6 +43,7 @@ import org.apache.hadoop.hive.metastore.TableType; import org.apache.hadoop.hive.metastore.api.ColumnStatisticsObj; import org.apache.hadoop.hive.metastore.api.FieldSchema; +import org.apache.hadoop.hive.metastore.api.PrincipalType; import org.apache.hadoop.hive.metastore.api.WMFullResourcePlan; import org.apache.hadoop.hive.metastore.api.WMResourcePlan; import org.apache.hadoop.hive.metastore.api.WMValidateResourcePlanResponse; @@ -490,7 +491,7 @@ public void showDatabases(DataOutputStream out, List databases) */ @Override public void showDatabaseDescription(DataOutputStream out, String database, String comment, - String location, String ownerName, String ownerType, Map params) + String location, String ownerName, PrincipalType ownerType, Map params) throws HiveException { MapBuilder builder = MapBuilder.create().put("database", database).put("comment", comment) .put("location", location); @@ -498,7 +499,7 @@ public void showDatabaseDescription(DataOutputStream out, String database, Strin builder.put("owner", ownerName); } if (null != ownerType) { - builder.put("ownerType", ownerType); + builder.put("ownerType", ownerType.name()); } if (null != params && !params.isEmpty()) { builder.put("params", params); diff --git ql/src/java/org/apache/hadoop/hive/ql/metadata/formatting/MetaDataFormatUtils.java ql/src/java/org/apache/hadoop/hive/ql/metadata/formatting/MetaDataFormatUtils.java index 4180dc471d..c9dd8541c2 100644 --- ql/src/java/org/apache/hadoop/hive/ql/metadata/formatting/MetaDataFormatUtils.java +++ ql/src/java/org/apache/hadoop/hive/ql/metadata/formatting/MetaDataFormatUtils.java @@ -42,6 +42,7 @@ import org.apache.hadoop.hive.metastore.api.WMPoolTrigger; import org.apache.hadoop.hive.metastore.api.WMResourcePlan; import org.apache.hadoop.hive.metastore.api.WMTrigger; +import org.apache.hadoop.hive.ql.ddl.table.DescTableDesc; import org.apache.hadoop.hive.ql.metadata.CheckConstraint; import org.apache.hadoop.hive.ql.metadata.DefaultConstraint; import org.apache.hadoop.hive.ql.metadata.ForeignKeyInfo; @@ -53,7 +54,6 @@ import org.apache.hadoop.hive.ql.metadata.UniqueConstraint; import org.apache.hadoop.hive.ql.metadata.UniqueConstraint.UniqueConstraintCol; import org.apache.hadoop.hive.ql.metadata.ForeignKeyInfo.ForeignKeyCol; -import org.apache.hadoop.hive.ql.plan.DescTableDesc; import org.apache.hadoop.hive.ql.plan.PlanUtils; import org.apache.hadoop.hive.serde2.io.DateWritableV2; import org.apache.hive.common.util.HiveStringUtils; @@ -764,7 +764,7 @@ public static MetaDataFormatter getFormatter(HiveConf conf) { if ("json".equals(conf.get(HiveConf.ConfVars.HIVE_DDL_OUTPUT_FORMAT.varname, "text"))) { return new JsonMetaDataFormatter(); } else { - return new TextMetaDataFormatter(conf.getIntVar(HiveConf.ConfVars.CLIPRETTYOUTPUTNUMCOLS), conf.getBoolVar(ConfVars.HIVE_DISPLAY_PARTITION_COLUMNS_SEPARATELY)); + return new TextMetaDataFormatter(conf.getBoolVar(ConfVars.HIVE_DISPLAY_PARTITION_COLUMNS_SEPARATELY)); } } diff --git ql/src/java/org/apache/hadoop/hive/ql/metadata/formatting/MetaDataFormatter.java ql/src/java/org/apache/hadoop/hive/ql/metadata/formatting/MetaDataFormatter.java index 80e3d8b200..b7e5ebe7c0 100644 --- ql/src/java/org/apache/hadoop/hive/ql/metadata/formatting/MetaDataFormatter.java +++ ql/src/java/org/apache/hadoop/hive/ql/metadata/formatting/MetaDataFormatter.java @@ -20,15 +20,14 @@ import java.io.DataOutputStream; import java.io.OutputStream; -import java.util.Collection; import java.util.List; import java.util.Map; import java.util.Set; -import javax.annotation.Nullable; import org.apache.hadoop.hive.conf.HiveConf; import org.apache.hadoop.hive.metastore.api.ColumnStatisticsObj; import org.apache.hadoop.hive.metastore.api.FieldSchema; +import org.apache.hadoop.hive.metastore.api.PrincipalType; import org.apache.hadoop.hive.metastore.api.WMFullResourcePlan; import org.apache.hadoop.hive.metastore.api.WMResourcePlan; import org.apache.hadoop.hive.metastore.api.WMValidateResourcePlanResponse; @@ -136,9 +135,9 @@ public void showDatabases(DataOutputStream out, List databases) /** * Describe a database. */ - public void showDatabaseDescription (DataOutputStream out, String database, String comment, - String location, String ownerName, String ownerType, Map params) - throws HiveException; + void showDatabaseDescription(DataOutputStream out, String database, String comment, String location, + String ownerName, PrincipalType ownerType, Map params) + throws HiveException; void showResourcePlans(DataOutputStream out, List resourcePlans) throws HiveException; diff --git ql/src/java/org/apache/hadoop/hive/ql/metadata/formatting/TextMetaDataFormatter.java ql/src/java/org/apache/hadoop/hive/ql/metadata/formatting/TextMetaDataFormatter.java index fbeb9c8f64..f7704bdd13 100644 --- ql/src/java/org/apache/hadoop/hive/ql/metadata/formatting/TextMetaDataFormatter.java +++ ql/src/java/org/apache/hadoop/hive/ql/metadata/formatting/TextMetaDataFormatter.java @@ -29,10 +29,8 @@ import java.util.Set; import java.util.concurrent.TimeUnit; -import org.apache.hadoop.hive.conf.Constants; import org.apache.hadoop.hive.metastore.utils.MetaStoreUtils; import org.apache.hadoop.hive.ql.metadata.StorageHandlerInfo; -import org.apache.hadoop.hive.ql.plan.DescTableDesc; import org.apache.hive.common.util.HiveStringUtils; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -45,6 +43,7 @@ import org.apache.hadoop.hive.conf.HiveConf; import org.apache.hadoop.hive.metastore.api.ColumnStatisticsObj; import org.apache.hadoop.hive.metastore.api.FieldSchema; +import org.apache.hadoop.hive.metastore.api.PrincipalType; import org.apache.hadoop.hive.metastore.api.WMFullResourcePlan; import org.apache.hadoop.hive.metastore.api.WMResourcePlan; import org.apache.hadoop.hive.metastore.api.WMValidateResourcePlanResponse; @@ -73,14 +72,9 @@ private static final int separator = Utilities.tabCode; private static final int terminator = Utilities.newLineCode; - /** The number of columns to be used in pretty formatting metadata output. - * If -1, then the current terminal width is auto-detected and used. - */ - private final int prettyOutputNumCols; private final boolean showPartColsSeparately; - public TextMetaDataFormatter(int prettyOutputNumCols, boolean partColsSeparately) { - this.prettyOutputNumCols = prettyOutputNumCols; + public TextMetaDataFormatter(boolean partColsSeparately) { this.showPartColsSeparately = partColsSeparately; } @@ -629,7 +623,7 @@ public void showDatabases(DataOutputStream outStream, List databases) */ @Override public void showDatabaseDescription(DataOutputStream outStream, String database, String comment, - String location, String ownerName, String ownerType, Map params) + String location, String ownerName, PrincipalType ownerType, Map params) throws HiveException { try { outStream.write(database.getBytes("UTF-8")); @@ -647,7 +641,7 @@ public void showDatabaseDescription(DataOutputStream outStream, String database, } outStream.write(separator); if (ownerType != null) { - outStream.write(ownerType.getBytes("UTF-8")); + outStream.write(ownerType.name().getBytes("UTF-8")); } outStream.write(separator); if (params != null && !params.isEmpty()) { diff --git ql/src/java/org/apache/hadoop/hive/ql/optimizer/QueryPlanPostProcessor.java ql/src/java/org/apache/hadoop/hive/ql/optimizer/QueryPlanPostProcessor.java index cf54aa3709..3f5b0e3e36 100644 --- ql/src/java/org/apache/hadoop/hive/ql/optimizer/QueryPlanPostProcessor.java +++ ql/src/java/org/apache/hadoop/hive/ql/optimizer/QueryPlanPostProcessor.java @@ -17,6 +17,7 @@ */ package org.apache.hadoop.hive.ql.optimizer; +import org.apache.hadoop.hive.ql.ddl.DDLWork2; import org.apache.hadoop.hive.ql.exec.FileSinkOperator; import org.apache.hadoop.hive.ql.exec.Operator; import org.apache.hadoop.hive.ql.exec.OperatorUtils; @@ -100,22 +101,23 @@ else if(work instanceof ExplainWork) { new QueryPlanPostProcessor(((ExplainWork)work).getRootTasks(), acidSinks, executionId); } else if(work instanceof ReplLoadWork || - work instanceof ReplStateLogWork || - work instanceof GenTezWork || - work instanceof GenSparkWork || - work instanceof ArchiveWork || - work instanceof ColumnStatsUpdateWork || - work instanceof BasicStatsWork || - work instanceof ConditionalWork || - work instanceof CopyWork || - work instanceof DDLWork || - work instanceof DependencyCollectionWork || - work instanceof ExplainSQRewriteWork || - work instanceof FetchWork || - work instanceof FunctionWork || - work instanceof MoveWork || - work instanceof BasicStatsNoJobWork || - work instanceof StatsWork) { + work instanceof ReplStateLogWork || + work instanceof GenTezWork || + work instanceof GenSparkWork || + work instanceof ArchiveWork || + work instanceof ColumnStatsUpdateWork || + work instanceof BasicStatsWork || + work instanceof ConditionalWork || + work instanceof CopyWork || + work instanceof DDLWork || + work instanceof DDLWork2 || + work instanceof DependencyCollectionWork || + work instanceof ExplainSQRewriteWork || + work instanceof FetchWork || + work instanceof FunctionWork || + work instanceof MoveWork || + work instanceof BasicStatsNoJobWork || + work instanceof StatsWork) { LOG.debug("Found " + work.getClass().getName() + " - no FileSinkOperation can be present. executionId=" + executionId); } else { diff --git ql/src/java/org/apache/hadoop/hive/ql/parse/AcidExportSemanticAnalyzer.java ql/src/java/org/apache/hadoop/hive/ql/parse/AcidExportSemanticAnalyzer.java index 4b2958af2b..960dd34a8c 100644 --- ql/src/java/org/apache/hadoop/hive/ql/parse/AcidExportSemanticAnalyzer.java +++ ql/src/java/org/apache/hadoop/hive/ql/parse/AcidExportSemanticAnalyzer.java @@ -36,7 +36,10 @@ import org.apache.hadoop.hive.ql.Context; import org.apache.hadoop.hive.ql.DriverContext; import org.apache.hadoop.hive.ql.QueryState; -import org.apache.hadoop.hive.ql.exec.DDLTask; +import org.apache.hadoop.hive.ql.ddl.DDLTask2; +import org.apache.hadoop.hive.ql.ddl.DDLWork2; +import org.apache.hadoop.hive.ql.ddl.table.CreateTableLikeDesc; +import org.apache.hadoop.hive.ql.ddl.table.DropTableDesc; import org.apache.hadoop.hive.ql.exec.StatsTask; import org.apache.hadoop.hive.ql.exec.Task; import org.apache.hadoop.hive.ql.exec.TaskFactory; @@ -47,9 +50,7 @@ import org.apache.hadoop.hive.ql.metadata.HiveUtils; import org.apache.hadoop.hive.ql.metadata.Table; import org.apache.hadoop.hive.ql.plan.AlterTableDesc; -import org.apache.hadoop.hive.ql.plan.CreateTableLikeDesc; import org.apache.hadoop.hive.ql.plan.DDLWork; -import org.apache.hadoop.hive.ql.plan.DropTableDesc; import org.apache.hadoop.hive.ql.plan.ExportWork; import org.apache.hadoop.hive.ql.session.SessionState; @@ -151,7 +152,7 @@ private void analyzeAcidExport(ASTNode ast) throws SemanticException { try { ReadEntity dbForTmpTable = new ReadEntity(db.getDatabase(exportTable.getDbName())); inputs.add(dbForTmpTable); //so the plan knows we are 'reading' this db - locks, security... - DDLTask createTableTask = (DDLTask) TaskFactory.get(new DDLWork(new HashSet<>(), new HashSet<>(), ctlt), conf); + DDLTask2 createTableTask = (DDLTask2) TaskFactory.get(new DDLWork2(new HashSet<>(), new HashSet<>(), ctlt), conf); createTableTask.setConf(conf); //above get() doesn't set it createTableTask.execute(new DriverContext(new Context(conf))); newTable = db.getTable(newTableName); @@ -199,7 +200,7 @@ private void analyzeAcidExport(ASTNode ast) throws SemanticException { // {@link DDLSemanticAnalyzer#analyzeDropTable(ASTNode ast, TableType expectedType) ReplicationSpec replicationSpec = new ReplicationSpec(); DropTableDesc dropTblDesc = new DropTableDesc(newTableName, TableType.MANAGED_TABLE, false, true, replicationSpec); - Task dropTask = TaskFactory.get(new DDLWork(new HashSet<>(), new HashSet<>(), dropTblDesc), conf); + Task dropTask = TaskFactory.get(new DDLWork2(new HashSet<>(), new HashSet<>(), dropTblDesc), conf); exportTask.addDependentTask(dropTask); markReadEntityForUpdate(); if (ctx.isExplainPlan()) { diff --git ql/src/java/org/apache/hadoop/hive/ql/parse/DDLSemanticAnalyzer.java ql/src/java/org/apache/hadoop/hive/ql/parse/DDLSemanticAnalyzer.java index 4a542aef85..baf635633d 100644 --- ql/src/java/org/apache/hadoop/hive/ql/parse/DDLSemanticAnalyzer.java +++ ql/src/java/org/apache/hadoop/hive/ql/parse/DDLSemanticAnalyzer.java @@ -76,9 +76,19 @@ import org.apache.hadoop.hive.ql.ddl.database.DescDatabaseDesc; import org.apache.hadoop.hive.ql.ddl.database.DropDatabaseDesc; import org.apache.hadoop.hive.ql.ddl.database.LockDatabaseDesc; +import org.apache.hadoop.hive.ql.ddl.database.ShowCreateDatabaseDesc; import org.apache.hadoop.hive.ql.ddl.database.ShowDatabasesDesc; import org.apache.hadoop.hive.ql.ddl.database.SwitchDatabaseDesc; import org.apache.hadoop.hive.ql.ddl.database.UnlockDatabaseDesc; +import org.apache.hadoop.hive.ql.ddl.table.DescTableDesc; +import org.apache.hadoop.hive.ql.ddl.table.DropTableDesc; +import org.apache.hadoop.hive.ql.ddl.table.LockTableDesc; +import org.apache.hadoop.hive.ql.ddl.table.ShowCreateTableDesc; +import org.apache.hadoop.hive.ql.ddl.table.ShowTableStatusDesc; +import org.apache.hadoop.hive.ql.ddl.table.ShowTablesDesc; +import org.apache.hadoop.hive.ql.ddl.table.ShowTablePropertiesDesc; +import org.apache.hadoop.hive.ql.ddl.table.TruncateTableDesc; +import org.apache.hadoop.hive.ql.ddl.table.UnlockTableDesc; import org.apache.hadoop.hive.ql.exec.ArchiveUtils; import org.apache.hadoop.hive.ql.exec.ColumnStatsUpdateTask; import org.apache.hadoop.hive.ql.exec.FunctionRegistry; @@ -131,9 +141,8 @@ import org.apache.hadoop.hive.ql.plan.DDLDesc.DDLDescWithWriteId; import org.apache.hadoop.hive.ql.plan.DDLWork; import org.apache.hadoop.hive.ql.plan.DescFunctionDesc; -import org.apache.hadoop.hive.ql.plan.DescTableDesc; +import org.apache.hadoop.hive.ql.plan.DropPartitionDesc; import org.apache.hadoop.hive.ql.plan.DropResourcePlanDesc; -import org.apache.hadoop.hive.ql.plan.DropTableDesc; import org.apache.hadoop.hive.ql.plan.DropWMMappingDesc; import org.apache.hadoop.hive.ql.plan.DropWMPoolDesc; import org.apache.hadoop.hive.ql.plan.DropWMTriggerDesc; @@ -145,7 +154,6 @@ import org.apache.hadoop.hive.ql.plan.KillQueryDesc; import org.apache.hadoop.hive.ql.plan.ListBucketingCtx; import org.apache.hadoop.hive.ql.plan.LoadTableDesc; -import org.apache.hadoop.hive.ql.plan.LockTableDesc; import org.apache.hadoop.hive.ql.plan.MoveWork; import org.apache.hadoop.hive.ql.plan.MsckDesc; import org.apache.hadoop.hive.ql.plan.PlanUtils; @@ -155,21 +163,14 @@ import org.apache.hadoop.hive.ql.plan.ShowColumnsDesc; import org.apache.hadoop.hive.ql.plan.ShowCompactionsDesc; import org.apache.hadoop.hive.ql.plan.ShowConfDesc; -import org.apache.hadoop.hive.ql.plan.ShowCreateDatabaseDesc; -import org.apache.hadoop.hive.ql.plan.ShowCreateTableDesc; import org.apache.hadoop.hive.ql.plan.ShowFunctionsDesc; import org.apache.hadoop.hive.ql.plan.ShowGrantDesc; import org.apache.hadoop.hive.ql.plan.ShowLocksDesc; import org.apache.hadoop.hive.ql.plan.ShowPartitionsDesc; import org.apache.hadoop.hive.ql.plan.ShowResourcePlanDesc; -import org.apache.hadoop.hive.ql.plan.ShowTableStatusDesc; -import org.apache.hadoop.hive.ql.plan.ShowTablesDesc; -import org.apache.hadoop.hive.ql.plan.ShowTblPropertiesDesc; import org.apache.hadoop.hive.ql.plan.ShowTxnsDesc; import org.apache.hadoop.hive.ql.plan.StatsWork; import org.apache.hadoop.hive.ql.plan.TableDesc; -import org.apache.hadoop.hive.ql.plan.TruncateTableDesc; -import org.apache.hadoop.hive.ql.plan.UnlockTableDesc; import org.apache.hadoop.hive.ql.session.SessionState; import org.apache.hadoop.hive.ql.udf.generic.GenericUDF; import org.apache.hadoop.hive.serde.serdeConstants; @@ -1445,8 +1446,7 @@ private void analyzeDropTable(ASTNode ast, TableType expectedType) boolean ifPurge = (ast.getFirstChildWithType(HiveParser.KW_PURGE) != null); DropTableDesc dropTblDesc = new DropTableDesc(tableName, expectedType, ifExists, ifPurge, replicationSpec); - rootTasks.add(TaskFactory.get(new DDLWork(getInputs(), getOutputs(), - dropTblDesc))); + rootTasks.add(TaskFactory.get(new DDLWork2(getInputs(), getOutputs(), dropTblDesc))); } private void analyzeTruncateTable(ASTNode ast) throws SemanticException { @@ -1493,7 +1493,7 @@ private void analyzeTruncateTable(ASTNode ast) throws SemanticException { setAcidDdlDesc(truncateTblDesc); } - DDLWork ddlWork = new DDLWork(getInputs(), getOutputs(), truncateTblDesc); + DDLWork2 ddlWork = new DDLWork2(getInputs(), getOutputs(), truncateTblDesc); Task truncateTask = TaskFactory.get(ddlWork); // Is this a truncate column command @@ -2518,26 +2518,26 @@ private void analyzeDescribeTable(ASTNode ast) throws SemanticException { validateTable(tableName, partSpec); } - DescTableDesc descTblDesc = new DescTableDesc( - ctx.getResFile(), tableName, partSpec, colPath); - boolean showColStats = false; + boolean isFormatted = false; + boolean isExt = false; if (ast.getChildCount() == 2) { int descOptions = ast.getChild(1).getType(); - descTblDesc.setFormatted(descOptions == HiveParser.KW_FORMATTED); - descTblDesc.setExt(descOptions == HiveParser.KW_EXTENDED); + isFormatted = descOptions == HiveParser.KW_FORMATTED; + isExt = descOptions == HiveParser.KW_EXTENDED; // in case of "DESCRIBE FORMATTED tablename column_name" statement, colPath // will contain tablename.column_name. If column_name is not specified // colPath will be equal to tableName. This is how we can differentiate // if we are describing a table or column - if (!colPath.equalsIgnoreCase(tableName) && descTblDesc.isFormatted()) { + if (!colPath.equalsIgnoreCase(tableName) && isFormatted) { showColStats = true; } } inputs.add(new ReadEntity(getTable(tableName))); - Task ddlTask = TaskFactory.get(new DDLWork(getInputs(), getOutputs(), - descTblDesc)); + + DescTableDesc descTblDesc = new DescTableDesc(ctx.getResFile(), tableName, partSpec, colPath, isExt, isFormatted); + Task ddlTask = TaskFactory.get(new DDLWork2(getInputs(), getOutputs(), descTblDesc)); rootTasks.add(ddlTask); String schema = DescTableDesc.getSchema(showColStats); setFetchTask(createFetchTask(schema)); @@ -2620,14 +2620,12 @@ private void analyzeShowPartitions(ASTNode ast) throws SemanticException { private void analyzeShowCreateDatabase(ASTNode ast) throws SemanticException { String dbName = getUnescapedName((ASTNode)ast.getChild(0)); - ShowCreateDatabaseDesc showCreateDbDesc = - new ShowCreateDatabaseDesc(dbName, ctx.getResFile().toString()); + ShowCreateDatabaseDesc showCreateDbDesc = new ShowCreateDatabaseDesc(dbName, ctx.getResFile().toString()); Database database = getDatabase(dbName); inputs.add(new ReadEntity(database)); - rootTasks.add(TaskFactory.get(new DDLWork(getInputs(), getOutputs(), - showCreateDbDesc))); - setFetchTask(createFetchTask(showCreateDbDesc.getSchema())); + rootTasks.add(TaskFactory.get(new DDLWork2(getInputs(), getOutputs(), showCreateDbDesc))); + setFetchTask(createFetchTask(ShowCreateDatabaseDesc.SCHEMA)); } @@ -2638,9 +2636,8 @@ private void analyzeShowCreateTable(ASTNode ast) throws SemanticException { Table tab = getTable(tableName); inputs.add(new ReadEntity(tab)); - rootTasks.add(TaskFactory.get(new DDLWork(getInputs(), getOutputs(), - showCreateTblDesc))); - setFetchTask(createFetchTask(showCreateTblDesc.getSchema())); + rootTasks.add(TaskFactory.get(new DDLWork2(getInputs(), getOutputs(), showCreateTblDesc))); + setFetchTask(createFetchTask(ShowCreateTableDesc.SCHEMA)); } private void analyzeShowDatabases(ASTNode ast) throws SemanticException { @@ -2686,8 +2683,7 @@ private void analyzeShowTables(ASTNode ast) throws SemanticException { showTblsDesc = new ShowTablesDesc(ctx.getResFile(), dbName, tableNames, tableTypeFilter, isExtended); inputs.add(new ReadEntity(getDatabase(dbName))); - rootTasks.add(TaskFactory.get(new DDLWork(getInputs(), getOutputs(), - showTblsDesc))); + rootTasks.add(TaskFactory.get(new DDLWork2(getInputs(), getOutputs(), showTblsDesc))); setFetchTask(createFetchTask(showTblsDesc.getSchema())); } @@ -2763,15 +2759,13 @@ private void analyzeShowTableStatus(ASTNode ast) throws SemanticException { validateTable(tableNames, partSpec); } - showTblStatusDesc = new ShowTableStatusDesc(ctx.getResFile().toString(), dbName, - tableNames, partSpec); - rootTasks.add(TaskFactory.get(new DDLWork(getInputs(), getOutputs(), - showTblStatusDesc))); - setFetchTask(createFetchTask(showTblStatusDesc.getSchema())); + showTblStatusDesc = new ShowTableStatusDesc(ctx.getResFile().toString(), dbName, tableNames, partSpec); + rootTasks.add(TaskFactory.get(new DDLWork2(getInputs(), getOutputs(), showTblStatusDesc))); + setFetchTask(createFetchTask(ShowTableStatusDesc.SCHEMA)); } private void analyzeShowTableProperties(ASTNode ast) throws SemanticException { - ShowTblPropertiesDesc showTblPropertiesDesc; + ShowTablePropertiesDesc showTblPropertiesDesc; String[] qualified = getQualifiedTableName((ASTNode) ast.getChild(0)); String propertyName = null; if (ast.getChildCount() > 1) { @@ -2781,11 +2775,9 @@ private void analyzeShowTableProperties(ASTNode ast) throws SemanticException { String tableNames = getDotName(qualified); validateTable(tableNames, null); - showTblPropertiesDesc = new ShowTblPropertiesDesc(ctx.getResFile().toString(), tableNames, - propertyName); - rootTasks.add(TaskFactory.get(new DDLWork(getInputs(), getOutputs(), - showTblPropertiesDesc))); - setFetchTask(createFetchTask(showTblPropertiesDesc.getSchema())); + showTblPropertiesDesc = new ShowTablePropertiesDesc(ctx.getResFile().toString(), tableNames, propertyName); + rootTasks.add(TaskFactory.get(new DDLWork2(getInputs(), getOutputs(), showTblPropertiesDesc))); + setFetchTask(createFetchTask(ShowTablePropertiesDesc.SCHEMA)); } /** @@ -2920,8 +2912,7 @@ private void analyzeShowViews(ASTNode ast) throws SemanticException { assert (ast.getChild(0).getType() == HiveParser.TOK_FROM); dbName = unescapeIdentifier(ast.getChild(1).getText()); validateDatabase(dbName); - showViewsDesc = new ShowTablesDesc(ctx.getResFile(), dbName); - showViewsDesc.setType(TableType.VIRTUAL_VIEW); + showViewsDesc = new ShowTablesDesc(ctx.getResFile(), dbName, TableType.VIRTUAL_VIEW); break; case 3: // Uses a pattern and specifies a DB assert (ast.getChild(0).getType() == HiveParser.TOK_FROM); @@ -2931,13 +2922,11 @@ private void analyzeShowViews(ASTNode ast) throws SemanticException { showViewsDesc = new ShowTablesDesc(ctx.getResFile(), dbName, viewNames, TableType.VIRTUAL_VIEW); break; default: // No pattern or DB - showViewsDesc = new ShowTablesDesc(ctx.getResFile(), dbName); - showViewsDesc.setType(TableType.VIRTUAL_VIEW); + showViewsDesc = new ShowTablesDesc(ctx.getResFile(), dbName, TableType.VIRTUAL_VIEW); break; } - rootTasks.add(TaskFactory.get(new DDLWork(getInputs(), getOutputs(), - showViewsDesc))); + rootTasks.add(TaskFactory.get(new DDLWork2(getInputs(), getOutputs(), showViewsDesc))); setFetchTask(createFetchTask(showViewsDesc.getSchema())); } @@ -2960,8 +2949,7 @@ private void analyzeShowMaterializedViews(ASTNode ast) throws SemanticException assert (ast.getChild(0).getType() == HiveParser.TOK_FROM); dbName = unescapeIdentifier(ast.getChild(1).getText()); validateDatabase(dbName); - showMaterializedViewsDesc = new ShowTablesDesc(ctx.getResFile(), dbName); - showMaterializedViewsDesc.setType(TableType.MATERIALIZED_VIEW); + showMaterializedViewsDesc = new ShowTablesDesc(ctx.getResFile(), dbName, TableType.MATERIALIZED_VIEW); break; case 3: // Uses a pattern and specifies a DB assert (ast.getChild(0).getType() == HiveParser.TOK_FROM); @@ -2972,13 +2960,11 @@ private void analyzeShowMaterializedViews(ASTNode ast) throws SemanticException ctx.getResFile(), dbName, materializedViewNames, TableType.MATERIALIZED_VIEW); break; default: // No pattern or DB - showMaterializedViewsDesc = new ShowTablesDesc(ctx.getResFile(), dbName); - showMaterializedViewsDesc.setType(TableType.MATERIALIZED_VIEW); + showMaterializedViewsDesc = new ShowTablesDesc(ctx.getResFile(), dbName, TableType.MATERIALIZED_VIEW); break; } - rootTasks.add(TaskFactory.get(new DDLWork(getInputs(), getOutputs(), - showMaterializedViewsDesc))); + rootTasks.add(TaskFactory.get(new DDLWork2(getInputs(), getOutputs(), showMaterializedViewsDesc))); setFetchTask(createFetchTask(showMaterializedViewsDesc.getSchema())); } @@ -3005,10 +2991,8 @@ private void analyzeLockTable(ASTNode ast) } LockTableDesc lockTblDesc = new LockTableDesc(tableName, mode, partSpec, - HiveConf.getVar(conf, ConfVars.HIVEQUERYID)); - lockTblDesc.setQueryStr(this.ctx.getCmd()); - rootTasks.add(TaskFactory.get(new DDLWork(getInputs(), getOutputs(), - lockTblDesc))); + HiveConf.getVar(conf, ConfVars.HIVEQUERYID), ctx.getCmd()); + rootTasks.add(TaskFactory.get(new DDLWork2(getInputs(), getOutputs(), lockTblDesc))); // Need to initialize the lock manager ctx.setNeedLockMgr(true); @@ -3107,8 +3091,7 @@ private void analyzeUnlockTable(ASTNode ast) } UnlockTableDesc unlockTblDesc = new UnlockTableDesc(tableName, partSpec); - rootTasks.add(TaskFactory.get(new DDLWork(getInputs(), getOutputs(), - unlockTblDesc))); + rootTasks.add(TaskFactory.get(new DDLWork2(getInputs(), getOutputs(), unlockTblDesc))); // Need to initialize the lock manager ctx.setNeedLockMgr(true); @@ -3438,9 +3421,8 @@ private void analyzeAlterTableDropParts(String[] qualified, ASTNode ast, boolean addTableDropPartsOutputs(tab, partSpecs.values(), !ifExists); - DropTableDesc dropTblDesc = - new DropTableDesc(getDotName(qualified), partSpecs, expectView ? TableType.VIRTUAL_VIEW : null, - mustPurge, replicationSpec); + DropPartitionDesc dropTblDesc = + new DropPartitionDesc(getDotName(qualified), partSpecs, mustPurge, replicationSpec); rootTasks.add(TaskFactory.get(new DDLWork(getInputs(), getOutputs(), dropTblDesc))); } diff --git ql/src/java/org/apache/hadoop/hive/ql/parse/ImportSemanticAnalyzer.java ql/src/java/org/apache/hadoop/hive/ql/parse/ImportSemanticAnalyzer.java index b6b4f585a8..cb9584c1d1 100644 --- ql/src/java/org/apache/hadoop/hive/ql/parse/ImportSemanticAnalyzer.java +++ ql/src/java/org/apache/hadoop/hive/ql/parse/ImportSemanticAnalyzer.java @@ -38,6 +38,8 @@ import org.apache.hadoop.hive.ql.ErrorMsg; import org.apache.hadoop.hive.metastore.txn.TxnUtils; import org.apache.hadoop.hive.ql.QueryState; +import org.apache.hadoop.hive.ql.ddl.DDLWork2; +import org.apache.hadoop.hive.ql.ddl.table.DropTableDesc; import org.apache.hadoop.hive.ql.exec.ReplCopyTask; import org.apache.hadoop.hive.ql.exec.Task; import org.apache.hadoop.hive.ql.exec.TaskFactory; @@ -58,7 +60,6 @@ import org.apache.hadoop.hive.ql.plan.CopyWork; import org.apache.hadoop.hive.ql.plan.ImportTableDesc; import org.apache.hadoop.hive.ql.plan.DDLWork; -import org.apache.hadoop.hive.ql.plan.DropTableDesc; import org.apache.hadoop.hive.ql.plan.LoadTableDesc; import org.apache.hadoop.hive.ql.plan.LoadMultiFilesDesc; import org.apache.hadoop.hive.ql.plan.LoadTableDesc.LoadFileType; @@ -565,7 +566,7 @@ private static ImportTableDesc getBaseCreateTableDescFromTable(String dbName, ReplicationSpec replicationSpec) { DropTableDesc dropTblDesc = new DropTableDesc(table.getTableName(), table.getTableType(), true, false, replicationSpec); - return TaskFactory.get(new DDLWork(x.getInputs(), x.getOutputs(), dropTblDesc), x.getConf()); + return TaskFactory.get(new DDLWork2(x.getInputs(), x.getOutputs(), dropTblDesc), x.getConf()); } private static Task alterTableTask(ImportTableDesc tableDesc, diff --git ql/src/java/org/apache/hadoop/hive/ql/parse/ParseContext.java ql/src/java/org/apache/hadoop/hive/ql/parse/ParseContext.java index 7b30b59377..77e181863e 100644 --- ql/src/java/org/apache/hadoop/hive/ql/parse/ParseContext.java +++ ql/src/java/org/apache/hadoop/hive/ql/parse/ParseContext.java @@ -23,6 +23,7 @@ import org.apache.hadoop.hive.ql.Context; import org.apache.hadoop.hive.ql.QueryProperties; import org.apache.hadoop.hive.ql.QueryState; +import org.apache.hadoop.hive.ql.ddl.table.CreateTableDesc; import org.apache.hadoop.hive.ql.exec.AbstractMapJoinOperator; import org.apache.hadoop.hive.ql.exec.FetchTask; import org.apache.hadoop.hive.ql.exec.GroupByOperator; @@ -43,7 +44,6 @@ import org.apache.hadoop.hive.ql.optimizer.ppr.PartitionPruner; import org.apache.hadoop.hive.ql.optimizer.unionproc.UnionProcContext; import org.apache.hadoop.hive.ql.parse.BaseSemanticAnalyzer.AnalyzeRewriteContext; -import org.apache.hadoop.hive.ql.plan.CreateTableDesc; import org.apache.hadoop.hive.ql.plan.CreateViewDesc; import org.apache.hadoop.hive.ql.plan.ExprNodeDesc; import org.apache.hadoop.hive.ql.plan.FileSinkDesc; diff --git ql/src/java/org/apache/hadoop/hive/ql/parse/QB.java ql/src/java/org/apache/hadoop/hive/ql/parse/QB.java index a2f6fbbcaa..0405ee8f02 100644 --- ql/src/java/org/apache/hadoop/hive/ql/parse/QB.java +++ ql/src/java/org/apache/hadoop/hive/ql/parse/QB.java @@ -30,8 +30,8 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; import org.apache.hadoop.fs.Path; +import org.apache.hadoop.hive.ql.ddl.table.CreateTableDesc; import org.apache.hadoop.hive.ql.metadata.Table; -import org.apache.hadoop.hive.ql.plan.CreateTableDesc; import org.apache.hadoop.hive.ql.plan.CreateViewDesc; /** diff --git ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java index 05257c9aa7..18dbbb219b 100644 --- ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java +++ ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java @@ -98,6 +98,10 @@ import org.apache.hadoop.hive.ql.QueryState; import org.apache.hadoop.hive.ql.cache.results.CacheUsage; import org.apache.hadoop.hive.ql.cache.results.QueryResultsCache; +import org.apache.hadoop.hive.ql.ddl.DDLWork2; +import org.apache.hadoop.hive.ql.ddl.table.CreateTableDesc; +import org.apache.hadoop.hive.ql.ddl.table.CreateTableLikeDesc; +import org.apache.hadoop.hive.ql.ddl.table.PreInsertTableDesc; import org.apache.hadoop.hive.ql.exec.AbstractMapJoinOperator; import org.apache.hadoop.hive.ql.exec.ArchiveUtils; import org.apache.hadoop.hive.ql.exec.ColumnInfo; @@ -190,8 +194,6 @@ import org.apache.hadoop.hive.ql.plan.AggregationDesc; import org.apache.hadoop.hive.ql.plan.AlterTableDesc; import org.apache.hadoop.hive.ql.plan.AlterTableDesc.AlterTableTypes; -import org.apache.hadoop.hive.ql.plan.CreateTableDesc; -import org.apache.hadoop.hive.ql.plan.CreateTableLikeDesc; import org.apache.hadoop.hive.ql.plan.CreateViewDesc; import org.apache.hadoop.hive.ql.plan.DDLWork; import org.apache.hadoop.hive.ql.plan.DynamicPartitionCtx; @@ -8079,7 +8081,7 @@ private DynamicPartitionCtx checkDynPart(QB qb, QBMetaData qbm, Table dest_tab, private void createPreInsertDesc(Table table, boolean overwrite) { PreInsertTableDesc preInsertTableDesc = new PreInsertTableDesc(table, overwrite); this.rootTasks - .add(TaskFactory.get(new DDLWork(getInputs(), getOutputs(), preInsertTableDesc))); + .add(TaskFactory.get(new DDLWork2(getInputs(), getOutputs(), preInsertTableDesc))); } @@ -12488,10 +12490,10 @@ void analyzeInternal(ASTNode ast, PlannerContextFactory pcf) throws SemanticExce if (optionalTezTask.isPresent()) { final TezTask tezTask = optionalTezTask.get(); rootTasks.stream() - .filter(task -> task.getWork() instanceof DDLWork) - .map(task -> (DDLWork) task.getWork()) - .filter(ddlWork -> ddlWork.getPreInsertTableDesc() != null) - .map(ddlWork -> ddlWork.getPreInsertTableDesc()) + .filter(task -> task.getWork() instanceof DDLWork2) + .map(task -> (DDLWork2) task.getWork()) + .filter(ddlWork -> ddlWork.getDDLDesc() != null) + .map(ddlWork -> (PreInsertTableDesc)ddlWork.getDDLDesc()) .map(ddlPreInsertTask -> new InsertCommitHookDesc(ddlPreInsertTask.getTable(), ddlPreInsertTask.isOverwrite())) .forEach(insertCommitHookDesc -> tezTask.addDependentTask( @@ -13434,8 +13436,7 @@ ASTNode analyzeCreateTable( crtTblDesc.validate(conf); // outputs is empty, which means this create table happens in the current // database. - rootTasks.add(TaskFactory.get(new DDLWork(getInputs(), getOutputs(), - crtTblDesc))); + rootTasks.add(TaskFactory.get(new DDLWork2(getInputs(), getOutputs(), crtTblDesc))); break; case ctt: // CREATE TRANSACTIONAL TABLE if (isExt) { @@ -13459,7 +13460,7 @@ ASTNode analyzeCreateTable( crtTranTblDesc.validate(conf); // outputs is empty, which means this create table happens in the current // database. - rootTasks.add(TaskFactory.get(new DDLWork(getInputs(), getOutputs(), crtTranTblDesc))); + rootTasks.add(TaskFactory.get(new DDLWork2(getInputs(), getOutputs(), crtTranTblDesc))); break; case CTLT: // create table like @@ -13478,8 +13479,7 @@ ASTNode analyzeCreateTable( storageFormat.getInputFormat(), storageFormat.getOutputFormat(), location, storageFormat.getSerde(), storageFormat.getSerdeProps(), tblProps, ifNotExists, likeTableName, isUserStorageFormat); - rootTasks.add(TaskFactory.get(new DDLWork(getInputs(), getOutputs(), - crtTblLikeDesc))); + rootTasks.add(TaskFactory.get(new DDLWork2(getInputs(), getOutputs(), crtTblLikeDesc))); break; case CTAS: // create table as select diff --git ql/src/java/org/apache/hadoop/hive/ql/parse/TaskCompiler.java ql/src/java/org/apache/hadoop/hive/ql/parse/TaskCompiler.java index 8a51e21898..0b6ff524b1 100644 --- ql/src/java/org/apache/hadoop/hive/ql/parse/TaskCompiler.java +++ ql/src/java/org/apache/hadoop/hive/ql/parse/TaskCompiler.java @@ -32,6 +32,8 @@ import org.apache.hadoop.hive.ql.Context; import org.apache.hadoop.hive.ql.ErrorMsg; import org.apache.hadoop.hive.ql.QueryState; +import org.apache.hadoop.hive.ql.ddl.DDLWork2; +import org.apache.hadoop.hive.ql.ddl.table.CreateTableDesc; import org.apache.hadoop.hive.ql.exec.DDLTask; import org.apache.hadoop.hive.ql.exec.FetchTask; import org.apache.hadoop.hive.ql.exec.FileSinkOperator; @@ -59,7 +61,6 @@ import org.apache.hadoop.hive.ql.parse.BaseSemanticAnalyzer.TableSpec; import org.apache.hadoop.hive.ql.plan.BasicStatsWork; import org.apache.hadoop.hive.ql.plan.ColumnStatsDesc; -import org.apache.hadoop.hive.ql.plan.CreateTableDesc; import org.apache.hadoop.hive.ql.plan.CreateViewDesc; import org.apache.hadoop.hive.ql.plan.DDLWork; import org.apache.hadoop.hive.ql.plan.FetchWork; @@ -358,8 +359,7 @@ public void compile(final ParseContext pCtx, // generate a DDL task and make it a dependent task of the leaf CreateTableDesc crtTblDesc = pCtx.getCreateTable(); crtTblDesc.validate(conf); - Task crtTblTask = TaskFactory.get(new DDLWork( - inputs, outputs, crtTblDesc)); + Task crtTblTask = TaskFactory.get(new DDLWork2(inputs, outputs, crtTblDesc)); patchUpAfterCTASorMaterializedView(rootTasks, outputs, crtTblTask, CollectionUtils.isEmpty(crtTblDesc.getPartColNames())); } else if (pCtx.getQueryProperties().isMaterializedView()) { // generate a DDL task and make it a dependent task of the leaf diff --git ql/src/java/org/apache/hadoop/hive/ql/parse/repl/load/message/DropPartitionHandler.java ql/src/java/org/apache/hadoop/hive/ql/parse/repl/load/message/DropPartitionHandler.java index b95a35a688..5e88b6ebae 100644 --- ql/src/java/org/apache/hadoop/hive/ql/parse/repl/load/message/DropPartitionHandler.java +++ ql/src/java/org/apache/hadoop/hive/ql/parse/repl/load/message/DropPartitionHandler.java @@ -24,7 +24,7 @@ import org.apache.hadoop.hive.ql.metadata.Table; import org.apache.hadoop.hive.ql.parse.SemanticException; import org.apache.hadoop.hive.ql.plan.DDLWork; -import org.apache.hadoop.hive.ql.plan.DropTableDesc; +import org.apache.hadoop.hive.ql.plan.DropPartitionDesc; import org.apache.hadoop.hive.ql.plan.ExprNodeGenericFuncDesc; import java.io.Serializable; @@ -43,8 +43,8 @@ Map> partSpecs = ReplUtils.genPartSpecs(new Table(msg.getTableObj()), msg.getPartitions()); if (partSpecs.size() > 0) { - DropTableDesc dropPtnDesc = new DropTableDesc(actualDbName + "." + actualTblName, - partSpecs, null, true, context.eventOnlyReplicationSpec()); + DropPartitionDesc dropPtnDesc = new DropPartitionDesc(actualDbName + "." + actualTblName, partSpecs, true, + context.eventOnlyReplicationSpec()); Task dropPtnTask = TaskFactory.get( new DDLWork(readEntitySet, writeEntitySet, dropPtnDesc), context.hiveConf ); diff --git ql/src/java/org/apache/hadoop/hive/ql/parse/repl/load/message/DropTableHandler.java ql/src/java/org/apache/hadoop/hive/ql/parse/repl/load/message/DropTableHandler.java index 62784e950d..edef74e618 100644 --- ql/src/java/org/apache/hadoop/hive/ql/parse/repl/load/message/DropTableHandler.java +++ ql/src/java/org/apache/hadoop/hive/ql/parse/repl/load/message/DropTableHandler.java @@ -18,11 +18,11 @@ package org.apache.hadoop.hive.ql.parse.repl.load.message; import org.apache.hadoop.hive.metastore.messaging.DropTableMessage; +import org.apache.hadoop.hive.ql.ddl.DDLWork2; +import org.apache.hadoop.hive.ql.ddl.table.DropTableDesc; import org.apache.hadoop.hive.ql.exec.Task; import org.apache.hadoop.hive.ql.exec.TaskFactory; import org.apache.hadoop.hive.ql.parse.SemanticException; -import org.apache.hadoop.hive.ql.plan.DDLWork; -import org.apache.hadoop.hive.ql.plan.DropTableDesc; import java.io.Serializable; import java.util.Collections; @@ -39,8 +39,8 @@ actualDbName + "." + actualTblName, null, true, true, context.eventOnlyReplicationSpec(), false ); - Task dropTableTask = TaskFactory.get( - new DDLWork(readEntitySet, writeEntitySet, dropTableDesc), context.hiveConf + Task dropTableTask = TaskFactory.get( + new DDLWork2(readEntitySet, writeEntitySet, dropTableDesc), context.hiveConf ); context.log.debug( "Added drop tbl task : {}:{}", dropTableTask.getId(), dropTableDesc.getTableName() diff --git ql/src/java/org/apache/hadoop/hive/ql/parse/repl/load/message/TruncatePartitionHandler.java ql/src/java/org/apache/hadoop/hive/ql/parse/repl/load/message/TruncatePartitionHandler.java index dec6ed5ccc..05a9f9123f 100644 --- ql/src/java/org/apache/hadoop/hive/ql/parse/repl/load/message/TruncatePartitionHandler.java +++ ql/src/java/org/apache/hadoop/hive/ql/parse/repl/load/message/TruncatePartitionHandler.java @@ -19,12 +19,12 @@ import org.apache.hadoop.hive.metastore.api.FieldSchema; import org.apache.hadoop.hive.metastore.messaging.AlterPartitionMessage; +import org.apache.hadoop.hive.ql.ddl.DDLWork2; +import org.apache.hadoop.hive.ql.ddl.table.TruncateTableDesc; import org.apache.hadoop.hive.ql.exec.Task; import org.apache.hadoop.hive.ql.exec.TaskFactory; import org.apache.hadoop.hive.ql.exec.repl.util.ReplUtils; import org.apache.hadoop.hive.ql.parse.SemanticException; -import org.apache.hadoop.hive.ql.plan.DDLWork; -import org.apache.hadoop.hive.ql.plan.TruncateTableDesc; import java.io.Serializable; import java.util.Iterator; @@ -59,8 +59,8 @@ actualDbName + "." + actualTblName, partSpec, context.eventOnlyReplicationSpec()); truncateTableDesc.setWriteId(msg.getWriteId()); - Task truncatePtnTask = TaskFactory.get( - new DDLWork(readEntitySet, writeEntitySet, truncateTableDesc), context.hiveConf); + Task truncatePtnTask = TaskFactory.get( + new DDLWork2(readEntitySet, writeEntitySet, truncateTableDesc), context.hiveConf); context.log.debug("Added truncate ptn task : {}:{}:{}", truncatePtnTask.getId(), truncateTableDesc.getTableName(), truncateTableDesc.getWriteId()); updatedMetadata.set(context.dmd.getEventTo().toString(), actualDbName, actualTblName, partSpec); diff --git ql/src/java/org/apache/hadoop/hive/ql/parse/repl/load/message/TruncateTableHandler.java ql/src/java/org/apache/hadoop/hive/ql/parse/repl/load/message/TruncateTableHandler.java index f037cbb08b..5ef66fafa4 100644 --- ql/src/java/org/apache/hadoop/hive/ql/parse/repl/load/message/TruncateTableHandler.java +++ ql/src/java/org/apache/hadoop/hive/ql/parse/repl/load/message/TruncateTableHandler.java @@ -18,12 +18,12 @@ package org.apache.hadoop.hive.ql.parse.repl.load.message; import org.apache.hadoop.hive.metastore.messaging.AlterTableMessage; +import org.apache.hadoop.hive.ql.ddl.DDLWork2; +import org.apache.hadoop.hive.ql.ddl.table.TruncateTableDesc; import org.apache.hadoop.hive.ql.exec.Task; import org.apache.hadoop.hive.ql.exec.TaskFactory; import org.apache.hadoop.hive.ql.exec.repl.util.ReplUtils; import org.apache.hadoop.hive.ql.parse.SemanticException; -import org.apache.hadoop.hive.ql.plan.DDLWork; -import org.apache.hadoop.hive.ql.plan.TruncateTableDesc; import java.io.Serializable; import java.util.List; @@ -39,8 +39,8 @@ actualDbName + "." + actualTblName, null, context.eventOnlyReplicationSpec()); truncateTableDesc.setWriteId(msg.getWriteId()); - Task truncateTableTask = TaskFactory.get( - new DDLWork(readEntitySet, writeEntitySet, truncateTableDesc), context.hiveConf); + Task truncateTableTask = TaskFactory.get( + new DDLWork2(readEntitySet, writeEntitySet, truncateTableDesc), context.hiveConf); context.log.debug("Added truncate tbl task : {}:{}:{}", truncateTableTask.getId(), truncateTableDesc.getTableName(), truncateTableDesc.getWriteId()); diff --git ql/src/java/org/apache/hadoop/hive/ql/plan/DDLWork.java ql/src/java/org/apache/hadoop/hive/ql/plan/DDLWork.java index 6527e52cae..2b653a5d21 100644 --- ql/src/java/org/apache/hadoop/hive/ql/plan/DDLWork.java +++ ql/src/java/org/apache/hadoop/hive/ql/plan/DDLWork.java @@ -20,7 +20,6 @@ import org.apache.hadoop.hive.ql.hooks.ReadEntity; import org.apache.hadoop.hive.ql.hooks.WriteEntity; import org.apache.hadoop.hive.ql.parse.AlterTablePartMergeFilesDesc; -import org.apache.hadoop.hive.ql.parse.PreInsertTableDesc; import org.apache.hadoop.hive.ql.plan.Explain.Level; import java.io.Serializable; @@ -34,19 +33,12 @@ private static final long serialVersionUID = 1L; // TODO: this can probably be replaced with much less code via dynamic dispatch and/or templates. - private PreInsertTableDesc preInsertTableDesc; private InsertCommitHookDesc insertCommitHookDesc; private AlterMaterializedViewDesc alterMVDesc; - private CreateTableDesc createTblDesc; - private CreateTableLikeDesc createTblLikeDesc; private CreateViewDesc createVwDesc; - private DropTableDesc dropTblDesc; + private DropPartitionDesc dropPartitionDesc; private AlterTableDesc alterTblDesc; - private ShowTablesDesc showTblsDesc; private ShowColumnsDesc showColumnsDesc; - private ShowTblPropertiesDesc showTblPropertiesDesc; - private LockTableDesc lockTblDesc; - private UnlockTableDesc unlockTblDesc; private ShowFunctionsDesc showFuncsDesc; private ShowLocksDesc showLocksDesc; private ShowCompactionsDesc showCompactionsDesc; @@ -54,16 +46,11 @@ private AbortTxnsDesc abortTxnsDesc; private DescFunctionDesc descFunctionDesc; private ShowPartitionsDesc showPartsDesc; - private ShowCreateDatabaseDesc showCreateDbDesc; - private ShowCreateTableDesc showCreateTblDesc; - private DescTableDesc descTblDesc; private AddPartitionDesc addPartitionDesc; private RenamePartitionDesc renamePartitionDesc; private AlterTableSimpleDesc alterTblSimpleDesc; private MsckDesc msckDesc; - private ShowTableStatusDesc showTblStatusDesc; private AlterTableAlterPartDesc alterTableAlterPartDesc; - private TruncateTableDesc truncateTblDesc; private AlterTableExchangePartition alterTableExchangePartition; private KillQueryDesc killQueryDesc; @@ -115,12 +102,6 @@ public DDLWork(HashSet inputs, HashSet outputs) { this.outputs = outputs; } - public DDLWork(HashSet inputs, HashSet outputs, - TruncateTableDesc truncateTblDesc) { - this(inputs, outputs); - this.truncateTblDesc = truncateTblDesc; - } - public DDLWork(HashSet inputs, HashSet outputs, ShowConfDesc showConfDesc) { this(inputs, outputs); @@ -147,28 +128,6 @@ public DDLWork(HashSet inputs, HashSet outputs, this.alterMVDesc = alterMVDesc; } - /** - * @param createTblDesc - * create table descriptor - */ - public DDLWork(HashSet inputs, HashSet outputs, - CreateTableDesc createTblDesc) { - this(inputs, outputs); - - this.createTblDesc = createTblDesc; - } - - /** - * @param createTblLikeDesc - * create table like descriptor - */ - public DDLWork(HashSet inputs, HashSet outputs, - CreateTableLikeDesc createTblLikeDesc) { - this(inputs, outputs); - - this.createTblLikeDesc = createTblLikeDesc; - } - /** * @param createVwDesc * create view descriptor @@ -185,30 +144,10 @@ public DDLWork(HashSet inputs, HashSet outputs, * drop table descriptor */ public DDLWork(HashSet inputs, HashSet outputs, - DropTableDesc dropTblDesc) { - this(inputs, outputs); - - this.dropTblDesc = dropTblDesc; - } - - /** - * @param descTblDesc - */ - public DDLWork(HashSet inputs, HashSet outputs, - DescTableDesc descTblDesc) { - this(inputs, outputs); - - this.descTblDesc = descTblDesc; - } - - /** - * @param showTblsDesc - */ - public DDLWork(HashSet inputs, HashSet outputs, - ShowTablesDesc showTblsDesc) { + DropPartitionDesc dropPartitionDesc) { this(inputs, outputs); - this.showTblsDesc = showTblsDesc; + this.dropPartitionDesc = dropPartitionDesc; } /** @@ -221,26 +160,6 @@ public DDLWork(HashSet inputs, HashSet outputs, this.showColumnsDesc = showColumnsDesc; } - /** - * @param lockTblDesc - */ - public DDLWork(HashSet inputs, HashSet outputs, - LockTableDesc lockTblDesc) { - this(inputs, outputs); - - this.lockTblDesc = lockTblDesc; - } - - /** - * @param unlockTblDesc - */ - public DDLWork(HashSet inputs, HashSet outputs, - UnlockTableDesc unlockTblDesc) { - this(inputs, outputs); - - this.unlockTblDesc = unlockTblDesc; - } - /** * @param showFuncsDesc */ @@ -299,26 +218,6 @@ public DDLWork(HashSet inputs, HashSet outputs, this.showPartsDesc = showPartsDesc; } - /** - * @param showCreateDbDesc - */ - public DDLWork(HashSet inputs, HashSet outputs, - ShowCreateDatabaseDesc showCreateDbDesc) { - this(inputs, outputs); - - this.showCreateDbDesc = showCreateDbDesc; - } - - /** - * @param showCreateTblDesc - */ - public DDLWork(HashSet inputs, HashSet outputs, - ShowCreateTableDesc showCreateTblDesc) { - this(inputs, outputs); - - this.showCreateTblDesc = showCreateTblDesc; - } - /** * @param addPartitionDesc * information about the partitions we want to add. @@ -360,28 +259,6 @@ public DDLWork(HashSet inputs, HashSet outputs, msckDesc = checkDesc; } - /** - * @param showTblStatusDesc - * show table status descriptor - */ - public DDLWork(HashSet inputs, HashSet outputs, - ShowTableStatusDesc showTblStatusDesc) { - this(inputs, outputs); - - this.showTblStatusDesc = showTblStatusDesc; - } - - /** - * @param showTblPropertiesDesc - * show table properties descriptor - */ - public DDLWork(HashSet inputs, HashSet outputs, - ShowTblPropertiesDesc showTblPropertiesDesc) { - this(inputs, outputs); - - this.showTblPropertiesDesc = showTblPropertiesDesc; - } - public DDLWork(HashSet inputs, HashSet outputs, RoleDDLDesc roleDDLDesc) { this(inputs, outputs); @@ -443,12 +320,6 @@ public DDLWork(HashSet inputs, HashSet outputs, this.insertCommitHookDesc = insertCommitHookDesc; } - public DDLWork(HashSet inputs, HashSet outputs, - PreInsertTableDesc preInsertTableDesc) { - this(inputs, outputs); - this.preInsertTableDesc = preInsertTableDesc; - } - public DDLWork(HashSet inputs, HashSet outputs, KillQueryDesc killQueryDesc) { this(inputs, outputs); @@ -533,22 +404,6 @@ public DDLWork(HashSet inputs, HashSet outputs, this.replSetFirstIncLoadFlagDesc = replSetFirstIncLoadFlagDesc; } - /** - * @return the createTblDesc - */ - @Explain(displayName = "Create Table Operator", explainLevels = { Level.USER, Level.DEFAULT, Level.EXTENDED }) - public CreateTableDesc getCreateTblDesc() { - return createTblDesc; - } - - /** - * @return the createTblDesc - */ - @Explain(displayName = "Create Table Operator", explainLevels = { Level.USER, Level.DEFAULT, Level.EXTENDED }) - public CreateTableLikeDesc getCreateTblLikeDesc() { - return createTblLikeDesc; - } - /** * @return the createTblDesc */ @@ -560,9 +415,9 @@ public CreateViewDesc getCreateViewDesc() { /** * @return the dropTblDesc */ - @Explain(displayName = "Drop Table Operator", explainLevels = { Level.USER, Level.DEFAULT, Level.EXTENDED }) - public DropTableDesc getDropTblDesc() { - return dropTblDesc; + @Explain(displayName = "Drop Partition Operator", explainLevels = { Level.USER, Level.DEFAULT, Level.EXTENDED }) + public DropPartitionDesc getDropPartitionDesc() { + return dropPartitionDesc; } /** @@ -582,14 +437,6 @@ public AlterMaterializedViewDesc getAlterMaterializedViewDesc() { return alterMVDesc; } - /** - * @return the showTblsDesc - */ - @Explain(displayName = "Show Table Operator", explainLevels = { Level.USER, Level.DEFAULT, Level.EXTENDED }) - public ShowTablesDesc getShowTblsDesc() { - return showTblsDesc; - } - /** * @return the showColumnsDesc */ @@ -629,22 +476,6 @@ public AbortTxnsDesc getAbortTxnsDesc() { return abortTxnsDesc; } - /** - * @return the lockTblDesc - */ - @Explain(displayName = "Lock Table Operator", explainLevels = { Level.USER, Level.DEFAULT, Level.EXTENDED }) - public LockTableDesc getLockTblDesc() { - return lockTblDesc; - } - - /** - * @return the unlockTblDesc - */ - @Explain(displayName = "Unlock Table Operator", explainLevels = { Level.USER, Level.DEFAULT, Level.EXTENDED }) - public UnlockTableDesc getUnlockTblDesc() { - return unlockTblDesc; - } - /** * @return the descFuncDesc */ @@ -666,28 +497,6 @@ public ShowPartitionsDesc getShowPartsDesc() { return showPartsDesc; } - @Explain(displayName = "Show Create Database Operator", - explainLevels = { Level.USER, Level.DEFAULT, Level.EXTENDED }) - public ShowCreateDatabaseDesc getShowCreateDbDesc() { - return showCreateDbDesc; - } - - /** - * @return the showCreateTblDesc - */ - @Explain(displayName = "Show Create Table Operator", explainLevels = { Level.USER, Level.DEFAULT, Level.EXTENDED }) - public ShowCreateTableDesc getShowCreateTblDesc() { - return showCreateTblDesc; - } - - /** - * @return the descTblDesc - */ - @Explain(displayName = "Describe Table Operator", explainLevels = { Level.USER, Level.DEFAULT, Level.EXTENDED }) - public DescTableDesc getDescTblDesc() { - return descTblDesc; - } - /** * @return information about the partitions we want to add. */ @@ -717,17 +526,6 @@ public MsckDesc getMsckDesc() { return msckDesc; } - /** - * @return show table descriptor - */ - public ShowTableStatusDesc getShowTblStatusDesc() { - return showTblStatusDesc; - } - - public ShowTblPropertiesDesc getShowTblPropertiesDesc() { - return showTblPropertiesDesc; - } - public HashSet getInputs() { return inputs; } @@ -787,11 +585,6 @@ public AlterTableAlterPartDesc getAlterTableAlterPartDesc() { return alterTableAlterPartDesc; } - @Explain(displayName = "Truncate Table Operator", explainLevels = { Level.USER, Level.DEFAULT, Level.EXTENDED }) - public TruncateTableDesc getTruncateTblDesc() { - return truncateTblDesc; - } - /** * @return information about the table partition to be exchanged */ @@ -815,11 +608,6 @@ public InsertCommitHookDesc getInsertCommitHookDesc() { return insertCommitHookDesc; } - @Explain(displayName = "Pre Insert operator", explainLevels = { Level.USER, Level.DEFAULT, Level.EXTENDED }) - public PreInsertTableDesc getPreInsertTableDesc() { - return preInsertTableDesc; - } - @Explain(displayName = "Create resource plan") public CreateResourcePlanDesc getCreateResourcePlanDesc() { return createResourcePlanDesc; diff --git ql/src/java/org/apache/hadoop/hive/ql/plan/DropPartitionDesc.java ql/src/java/org/apache/hadoop/hive/ql/plan/DropPartitionDesc.java new file mode 100644 index 0000000000..81fcc4689d --- /dev/null +++ ql/src/java/org/apache/hadoop/hive/ql/plan/DropPartitionDesc.java @@ -0,0 +1,99 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hive.ql.plan; + +import java.io.Serializable; +import java.util.ArrayList; +import java.util.List; +import java.util.Map; + +import org.apache.hadoop.hive.ql.parse.ReplicationSpec; +import org.apache.hadoop.hive.ql.plan.Explain.Level; + +/** + * DropPartitionDesc. + */ +@Explain(displayName = "Drop Partition", explainLevels = { Level.USER, Level.DEFAULT, Level.EXTENDED }) +public class DropPartitionDesc extends DDLDesc implements Serializable { + private static final long serialVersionUID = 1L; + + /** + * PartSpec. + */ + public static class PartSpec implements Serializable { + private static final long serialVersionUID = 1L; + + private ExprNodeGenericFuncDesc partSpec; + // TODO: see if we can get rid of this... used in one place to distinguish archived parts + private int prefixLength; + + public PartSpec(ExprNodeGenericFuncDesc partSpec, int prefixLength) { + this.partSpec = partSpec; + this.prefixLength = prefixLength; + } + + public ExprNodeGenericFuncDesc getPartSpec() { + return partSpec; + } + + public int getPrefixLength() { + return prefixLength; + } + } + + private final String tableName; + private final ArrayList partSpecs; + private final boolean ifPurge; + private final ReplicationSpec replicationSpec; + + public DropPartitionDesc(String tableName, Map> partSpecs, boolean ifPurge, + ReplicationSpec replicationSpec) { + this.tableName = tableName; + this.partSpecs = new ArrayList(partSpecs.size()); + for (Map.Entry> partSpec : partSpecs.entrySet()) { + int prefixLength = partSpec.getKey(); + for (ExprNodeGenericFuncDesc expr : partSpec.getValue()) { + this.partSpecs.add(new PartSpec(expr, prefixLength)); + } + } + this.ifPurge = ifPurge; + this.replicationSpec = replicationSpec == null ? new ReplicationSpec() : replicationSpec; + } + + @Explain(displayName = "table", explainLevels = { Level.USER, Level.DEFAULT, Level.EXTENDED }) + public String getTableName() { + return tableName; + } + + public ArrayList getPartSpecs() { + return partSpecs; + } + + public boolean getIfPurge() { + return ifPurge; + } + + /** + * @return what kind of replication scope this drop is running under. + * This can result in a "DROP IF OLDER THAN" kind of semantic + */ + public ReplicationSpec getReplicationSpec(){ + return replicationSpec; + } +} diff --git ql/src/java/org/apache/hadoop/hive/ql/plan/DropTableDesc.java ql/src/java/org/apache/hadoop/hive/ql/plan/DropTableDesc.java deleted file mode 100644 index 5d22154744..0000000000 --- ql/src/java/org/apache/hadoop/hive/ql/plan/DropTableDesc.java +++ /dev/null @@ -1,194 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.hive.ql.plan; - -import java.io.Serializable; -import java.util.ArrayList; -import java.util.List; -import java.util.Map; - -import org.apache.hadoop.hive.metastore.TableType; -import org.apache.hadoop.hive.ql.parse.ReplicationSpec; -import org.apache.hadoop.hive.ql.plan.Explain.Level; - -/** - * DropTableDesc. - * TODO: this is currently used for both drop table and drop partitions. - */ -@Explain(displayName = "Drop Table", explainLevels = { Level.USER, Level.DEFAULT, Level.EXTENDED }) -public class DropTableDesc extends DDLDesc implements Serializable { - private static final long serialVersionUID = 1L; - - public static class PartSpec { - public PartSpec(ExprNodeGenericFuncDesc partSpec, int prefixLength) { - this.partSpec = partSpec; - this.prefixLength = prefixLength; - } - public ExprNodeGenericFuncDesc getPartSpec() { - return partSpec; - } - public int getPrefixLength() { - return prefixLength; - } - private static final long serialVersionUID = 1L; - private ExprNodeGenericFuncDesc partSpec; - // TODO: see if we can get rid of this... used in one place to distinguish archived parts - private int prefixLength; - } - - String tableName; - ArrayList partSpecs; - TableType expectedType; - boolean ifExists; - boolean ifPurge; - ReplicationSpec replicationSpec; - boolean validationRequired; - - - public DropTableDesc() { - } - - /** - * @param tableName - * @param ifPurge - */ - public DropTableDesc( - String tableName, TableType expectedType, boolean ifExists, - boolean ifPurge, ReplicationSpec replicationSpec) { - this(tableName, expectedType, ifExists, ifPurge, replicationSpec, true); - } - - public DropTableDesc( - String tableName, TableType expectedType, boolean ifExists, - boolean ifPurge, ReplicationSpec replicationSpec, boolean validationRequired) { - this.tableName = tableName; - this.partSpecs = null; - this.expectedType = expectedType; - this.ifExists = ifExists; - this.ifPurge = ifPurge; - this.replicationSpec = replicationSpec; - this.validationRequired = validationRequired; - } - - public DropTableDesc(String tableName, Map> partSpecs, - TableType expectedType, boolean ifPurge, ReplicationSpec replicationSpec) { - this(tableName, partSpecs, expectedType, ifPurge, replicationSpec, true); - } - - public DropTableDesc(String tableName, Map> partSpecs, - TableType expectedType, boolean ifPurge, ReplicationSpec replicationSpec, boolean validationRequired) { - this.tableName = tableName; - this.partSpecs = new ArrayList(partSpecs.size()); - for (Map.Entry> partSpec : partSpecs.entrySet()) { - int prefixLength = partSpec.getKey(); - for (ExprNodeGenericFuncDesc expr : partSpec.getValue()) { - this.partSpecs.add(new PartSpec(expr, prefixLength)); - } - } - this.expectedType = expectedType; - this.ifPurge = ifPurge; - this.replicationSpec = replicationSpec; - this.validationRequired = validationRequired; - } - - /** - * @return the tableName - */ - @Explain(displayName = "table", explainLevels = { Level.USER, Level.DEFAULT, Level.EXTENDED }) - public String getTableName() { - return tableName; - } - - /** - * @param tableName - * the tableName to set - */ - public void setTableName(String tableName) { - this.tableName = tableName; - } - - /** - * @return the partSpecs - */ - public ArrayList getPartSpecs() { - return partSpecs; - } - - /** - * @return whether to expect a view being dropped - */ - public boolean getExpectView() { - return expectedType != null && expectedType == TableType.VIRTUAL_VIEW; - } - - /** - * @return whether to expect a materialized view being dropped - */ - public boolean getExpectMaterializedView() { - return expectedType != null && expectedType == TableType.MATERIALIZED_VIEW; - } - - /** - * @return whether IF EXISTS was specified - */ - public boolean getIfExists() { - return ifExists; - } - - /** - * @param ifExists - * set whether IF EXISTS was specified - */ - public void setIfExists(boolean ifExists) { - this.ifExists = ifExists; - } - - /** - * @return whether Purge was specified - */ - public boolean getIfPurge() { - return ifPurge; - } - - /** - * @param ifPurge - * set whether Purge was specified - */ - public void setIfPurge(boolean ifPurge) { - this.ifPurge = ifPurge; - } - - /** - * @return what kind of replication scope this drop is running under. - * This can result in a "DROP IF OLDER THAN" kind of semantic - */ - public ReplicationSpec getReplicationSpec(){ - if (replicationSpec == null){ - this.replicationSpec = new ReplicationSpec(); - } - return this.replicationSpec; - } - - /** - * @return whether the table type validation is needed (false in repl case) - */ - public boolean getValidationRequired(){ - return this.validationRequired; - } -} diff --git ql/src/java/org/apache/hadoop/hive/ql/plan/ImportTableDesc.java ql/src/java/org/apache/hadoop/hive/ql/plan/ImportTableDesc.java index 017e1c7f9b..381c3b54a5 100644 --- ql/src/java/org/apache/hadoop/hive/ql/plan/ImportTableDesc.java +++ ql/src/java/org/apache/hadoop/hive/ql/plan/ImportTableDesc.java @@ -28,6 +28,8 @@ import org.apache.hadoop.hive.metastore.TableType; import org.apache.hadoop.hive.metastore.api.FieldSchema; import org.apache.hadoop.hive.metastore.api.Order; +import org.apache.hadoop.hive.ql.ddl.DDLWork2; +import org.apache.hadoop.hive.ql.ddl.table.CreateTableDesc; import org.apache.hadoop.hive.ql.exec.Task; import org.apache.hadoop.hive.ql.exec.TaskFactory; import org.apache.hadoop.hive.ql.exec.Utilities; @@ -323,9 +325,9 @@ public String getDatabaseName() { HiveConf conf) { switch (getDescType()) { case TABLE: - return TaskFactory.get(new DDLWork(inputs, outputs, createTblDesc), conf); + return TaskFactory.get(new DDLWork2(inputs, outputs, createTblDesc), conf); case VIEW: - return TaskFactory.get(new DDLWork(inputs, outputs, createViewDesc), conf); + return TaskFactory.get(new DDLWork(inputs, outputs, createViewDesc), conf); } return null; } diff --git ql/src/java/org/apache/hadoop/hive/ql/plan/LoadFileDesc.java ql/src/java/org/apache/hadoop/hive/ql/plan/LoadFileDesc.java index 46761ffaec..3abdc4859f 100644 --- ql/src/java/org/apache/hadoop/hive/ql/plan/LoadFileDesc.java +++ ql/src/java/org/apache/hadoop/hive/ql/plan/LoadFileDesc.java @@ -21,6 +21,7 @@ import java.io.Serializable; import org.apache.hadoop.fs.Path; +import org.apache.hadoop.hive.ql.ddl.table.CreateTableDesc; import org.apache.hadoop.hive.ql.exec.Utilities; import org.apache.hadoop.hive.ql.io.AcidUtils; diff --git ql/src/java/org/apache/hadoop/hive/ql/plan/PlanUtils.java ql/src/java/org/apache/hadoop/hive/ql/plan/PlanUtils.java index 33a5371d1e..b668e40594 100644 --- ql/src/java/org/apache/hadoop/hive/ql/plan/PlanUtils.java +++ ql/src/java/org/apache/hadoop/hive/ql/plan/PlanUtils.java @@ -40,6 +40,7 @@ import org.apache.hadoop.hive.metastore.api.FieldSchema; import org.apache.hadoop.hive.metastore.api.hive_metastoreConstants; import org.apache.hadoop.hive.metastore.utils.MetaStoreUtils; +import org.apache.hadoop.hive.ql.ddl.table.CreateTableDesc; import org.apache.hadoop.hive.ql.exec.ColumnInfo; import org.apache.hadoop.hive.ql.exec.RowSchema; import org.apache.hadoop.hive.ql.exec.TableScanOperator; diff --git ql/src/java/org/apache/hadoop/hive/ql/txn/compactor/CompactorMR.java ql/src/java/org/apache/hadoop/hive/ql/txn/compactor/CompactorMR.java index 198f7fd351..10192859a7 100644 --- ql/src/java/org/apache/hadoop/hive/ql/txn/compactor/CompactorMR.java +++ ql/src/java/org/apache/hadoop/hive/ql/txn/compactor/CompactorMR.java @@ -60,9 +60,8 @@ import org.apache.hadoop.hive.metastore.api.Table; import org.apache.hadoop.hive.metastore.api.hive_metastoreConstants; import org.apache.hadoop.hive.metastore.txn.CompactionInfo; -import org.apache.hadoop.hive.metastore.txn.TxnStore; import org.apache.hadoop.hive.ql.DriverUtils; -import org.apache.hadoop.hive.ql.exec.DDLTask; +import org.apache.hadoop.hive.ql.ddl.table.ShowCreateTableOperation; import org.apache.hadoop.hive.ql.exec.FileSinkOperator.RecordWriter; import org.apache.hadoop.hive.ql.io.AcidInputFormat; import org.apache.hadoop.hive.ql.io.AcidOutputFormat; @@ -652,7 +651,7 @@ private String buildMmCompactionCtQuery( String sh = t.getParameters().get(hive_metastoreConstants.META_TABLE_STORAGE); assert sh == null; // Not supposed to be a compactable table. if (!serdeParams.isEmpty()) { - DDLTask.appendSerdeParams(query, serdeParams); + ShowCreateTableOperation.appendSerdeParams(query, serdeParams); } query.append("STORED AS INPUTFORMAT '").append( HiveStringUtils.escapeHiveCommand(sd.getInputFormat())).append("' OUTPUTFORMAT '").append( diff --git ql/src/test/org/apache/hadoop/hive/ql/parse/TestHiveDecimalParse.java ql/src/test/org/apache/hadoop/hive/ql/parse/TestHiveDecimalParse.java index 1ad0225381..ae22b7f47a 100644 --- ql/src/test/org/apache/hadoop/hive/ql/parse/TestHiveDecimalParse.java +++ ql/src/test/org/apache/hadoop/hive/ql/parse/TestHiveDecimalParse.java @@ -22,9 +22,9 @@ import org.apache.hadoop.hive.metastore.api.FieldSchema; import org.apache.hadoop.hive.ql.Driver; import org.apache.hadoop.hive.ql.QueryPlan; -import org.apache.hadoop.hive.ql.exec.DDLTask; -import org.apache.hadoop.hive.ql.plan.CreateTableDesc; -import org.apache.hadoop.hive.ql.plan.DDLWork; +import org.apache.hadoop.hive.ql.ddl.DDLTask2; +import org.apache.hadoop.hive.ql.ddl.DDLWork2; +import org.apache.hadoop.hive.ql.ddl.table.CreateTableDesc; import org.apache.hadoop.hive.ql.session.SessionState; import org.junit.Assert; import org.junit.Test; @@ -149,9 +149,9 @@ private String getColumnType(String query) { } QueryPlan plan = driver.getPlan(); - DDLTask task = (DDLTask) plan.getRootTasks().get(0); - DDLWork work = task.getWork(); - CreateTableDesc spec = work.getCreateTblDesc(); + DDLTask2 task = (DDLTask2) plan.getRootTasks().get(0); + DDLWork2 work = task.getWork(); + CreateTableDesc spec = (CreateTableDesc)work.getDDLDesc(); FieldSchema fs = spec.getCols().get(0); return fs.getType(); } diff --git ql/src/test/queries/clientpositive/db_ddl_explain.q ql/src/test/queries/clientpositive/db_ddl_explain.q new file mode 100644 index 0000000000..7ad0bdddf2 --- /dev/null +++ ql/src/test/queries/clientpositive/db_ddl_explain.q @@ -0,0 +1,20 @@ +EXPLAIN CREATE DATABASE d; +CREATE DATABASE d; + +EXPLAIN SHOW DATABASES; +SHOW DATABASES; + +EXPLAIN DESCRIBE DATABASE d; +DESCRIBE DATABASE d; + +EXPLAIN ALTER DATABASE d SET dbproperties('test'='yesthisis'); +ALTER DATABASE d SET dbproperties('test'='yesthisis'); + +EXPLAIN SHOW CREATE DATABASE d; +SHOW CREATE DATABASE d; + +EXPLAIN USE d; +USE d; + +EXPLAIN DROP DATABASE d; +DROP DATABASE d; diff --git ql/src/test/results/clientnegative/authorization_explain.q.out ql/src/test/results/clientnegative/authorization_explain.q.out index 792de42cc0..29542a1e0b 100644 --- ql/src/test/results/clientnegative/authorization_explain.q.out +++ ql/src/test/results/clientnegative/authorization_explain.q.out @@ -11,14 +11,13 @@ STAGE DEPENDENCIES: STAGE PLANS: Stage: Stage-0 - Create Table Operator: - Create Table - columns: key int, value string - if not exists: true - input format: org.apache.hadoop.mapred.TextInputFormat - output format: org.apache.hadoop.hive.ql.io.IgnoreKeyTextOutputFormat - serde name: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.authorization_explain + Create Table + columns: key int, value string + if not exists: true + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.IgnoreKeyTextOutputFormat + serde name: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + name: default.authorization_explain PREHOOK: query: create table if not exists authorization_explain (key int, value string) PREHOOK: type: CREATETABLE diff --git ql/src/test/results/clientnegative/avro_decimal.q.out ql/src/test/results/clientnegative/avro_decimal.q.out index 9d00d6ee32..077b2d8c6a 100644 --- ql/src/test/results/clientnegative/avro_decimal.q.out +++ ql/src/test/results/clientnegative/avro_decimal.q.out @@ -19,4 +19,4 @@ TBLPROPERTIES ( PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@avro_dec -FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.exec.DDLTask. java.lang.RuntimeException: MetaException(message:org.apache.hadoop.hive.serde2.avro.AvroSerdeException Invalid precision or scale for decimal type) +FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.ddl.DDLTask2. java.lang.RuntimeException: MetaException(message:org.apache.hadoop.hive.serde2.avro.AvroSerdeException Invalid precision or scale for decimal type) diff --git ql/src/test/results/clientnegative/constraint_duplicate_name.q.out ql/src/test/results/clientnegative/constraint_duplicate_name.q.out index 8a154f631e..b94ca1c1a6 100644 --- ql/src/test/results/clientnegative/constraint_duplicate_name.q.out +++ ql/src/test/results/clientnegative/constraint_duplicate_name.q.out @@ -10,4 +10,4 @@ PREHOOK: query: create table t1(j int constraint c1 default 4) PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@t1 -FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.exec.DDLTask. InvalidObjectException(message:Constraint name already exists: c1) +FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.ddl.DDLTask2. InvalidObjectException(message:Constraint name already exists: c1) diff --git ql/src/test/results/clientnegative/create_external_acid.q.out ql/src/test/results/clientnegative/create_external_acid.q.out index 123fe5a276..85b0458578 100644 --- ql/src/test/results/clientnegative/create_external_acid.q.out +++ ql/src/test/results/clientnegative/create_external_acid.q.out @@ -2,4 +2,4 @@ PREHOOK: query: create external table acid_external (a int, b varchar(128)) clus PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@acid_external -FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.exec.DDLTask. MetaException(message:default.acid_external cannot be declared transactional because it's an external table) +FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.ddl.DDLTask2. MetaException(message:default.acid_external cannot be declared transactional because it's an external table) diff --git ql/src/test/results/clientnegative/create_not_acid.q.out ql/src/test/results/clientnegative/create_not_acid.q.out index e5aad6182c..3172f182d5 100644 --- ql/src/test/results/clientnegative/create_not_acid.q.out +++ ql/src/test/results/clientnegative/create_not_acid.q.out @@ -2,4 +2,4 @@ PREHOOK: query: create table acid_notbucketed(a int, b varchar(128)) TBLPROPERTI PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@acid_notbucketed -FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.exec.DDLTask. MetaException(message:The table must be stored using an ACID compliant format (such as ORC): default.acid_notbucketed) +FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.ddl.DDLTask2. MetaException(message:The table must be stored using an ACID compliant format (such as ORC): default.acid_notbucketed) diff --git ql/src/test/results/clientnegative/create_table_wrong_regex.q.out ql/src/test/results/clientnegative/create_table_wrong_regex.q.out index 931f2a7294..694dc73e0f 100644 --- ql/src/test/results/clientnegative/create_table_wrong_regex.q.out +++ ql/src/test/results/clientnegative/create_table_wrong_regex.q.out @@ -8,6 +8,6 @@ PREHOOK: query: create table aa ( test STRING ) PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@aa -FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.exec.DDLTask. java.util.regex.PatternSyntaxException: Unclosed character class near index 7 +FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.ddl.DDLTask2. java.util.regex.PatternSyntaxException: Unclosed character class near index 7 [^\](.*) ^ diff --git ql/src/test/results/clientnegative/create_view_failure2.q.out ql/src/test/results/clientnegative/create_view_failure2.q.out index ad5d5fe624..c62dc3292e 100644 --- ql/src/test/results/clientnegative/create_view_failure2.q.out +++ ql/src/test/results/clientnegative/create_view_failure2.q.out @@ -17,4 +17,4 @@ PREHOOK: query: CREATE TABLE xxx4(key int) PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@xxx4 -FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.exec.DDLTask. AlreadyExistsException(message:Table hive.default.xxx4 already exists) +FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.ddl.DDLTask2. AlreadyExistsException(message:Table hive.default.xxx4 already exists) diff --git ql/src/test/results/clientnegative/create_with_constraints_duplicate_name.q.out ql/src/test/results/clientnegative/create_with_constraints_duplicate_name.q.out index b3d1d9fc20..01baf1b3dc 100644 --- ql/src/test/results/clientnegative/create_with_constraints_duplicate_name.q.out +++ ql/src/test/results/clientnegative/create_with_constraints_duplicate_name.q.out @@ -10,4 +10,4 @@ PREHOOK: query: create table t2(x int, constraint pk1 primary key (x) disable) PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@t2 -FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.exec.DDLTask. InvalidObjectException(message:Constraint name already exists: pk1) +FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.ddl.DDLTask2. InvalidObjectException(message:Constraint name already exists: pk1) diff --git ql/src/test/results/clientnegative/create_with_fk_constraint.q.out ql/src/test/results/clientnegative/create_with_fk_constraint.q.out index 6598d6c153..c54c724060 100644 --- ql/src/test/results/clientnegative/create_with_fk_constraint.q.out +++ ql/src/test/results/clientnegative/create_with_fk_constraint.q.out @@ -10,4 +10,4 @@ PREHOOK: query: CREATE TABLE table1 (a STRING, b STRING, CONSTRAINT fk1 FOREIGN PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@table1 -FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.exec.DDLTask. InvalidObjectException(message:Child column not found: x) +FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.ddl.DDLTask2. InvalidObjectException(message:Child column not found: x) diff --git ql/src/test/results/clientnegative/create_with_fk_pk_same_tab.q.out ql/src/test/results/clientnegative/create_with_fk_pk_same_tab.q.out index fae276955a..4ec45a6675 100644 --- ql/src/test/results/clientnegative/create_with_fk_pk_same_tab.q.out +++ ql/src/test/results/clientnegative/create_with_fk_pk_same_tab.q.out @@ -2,4 +2,4 @@ PREHOOK: query: CREATE TABLE table1 (a STRING PRIMARY KEY DISABLE, b STRING, CON PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@table1 -FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.exec.DDLTask. MetaException(message:Cannot be both foreign key and primary/unique key on same table: a:string;) +FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.ddl.DDLTask2. MetaException(message:Cannot be both foreign key and primary/unique key on same table: a:string;) diff --git ql/src/test/results/clientnegative/create_with_fk_uk_same_tab.q.out ql/src/test/results/clientnegative/create_with_fk_uk_same_tab.q.out index 1644d5a688..04365f0096 100644 --- ql/src/test/results/clientnegative/create_with_fk_uk_same_tab.q.out +++ ql/src/test/results/clientnegative/create_with_fk_uk_same_tab.q.out @@ -2,4 +2,4 @@ PREHOOK: query: CREATE TABLE table1 (a STRING UNIQUE DISABLE, b STRING, CONSTRAI PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@table1 -FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.exec.DDLTask. MetaException(message:Cannot be both foreign key and primary/unique key on same table: a:string;) +FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.ddl.DDLTask2. MetaException(message:Cannot be both foreign key and primary/unique key on same table: a:string;) diff --git ql/src/test/results/clientnegative/create_with_fk_wrong_ref.q.out ql/src/test/results/clientnegative/create_with_fk_wrong_ref.q.out index ce0f94728f..ca1304eb5b 100644 --- ql/src/test/results/clientnegative/create_with_fk_wrong_ref.q.out +++ ql/src/test/results/clientnegative/create_with_fk_wrong_ref.q.out @@ -10,4 +10,4 @@ PREHOOK: query: CREATE TABLE table1 (a STRING, b STRING, CONSTRAINT fk1 FOREIGN PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@table1 -FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.exec.DDLTask. MetaException(message:Foreign key references a:string; but no corresponding primary key or unique key exists. Possible keys: [a:int;]) +FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.ddl.DDLTask2. MetaException(message:Foreign key references a:string; but no corresponding primary key or unique key exists. Possible keys: [a:int;]) diff --git ql/src/test/results/clientnegative/create_with_fk_wrong_ref2.q.out ql/src/test/results/clientnegative/create_with_fk_wrong_ref2.q.out index 998c643af3..32d6284971 100644 --- ql/src/test/results/clientnegative/create_with_fk_wrong_ref2.q.out +++ ql/src/test/results/clientnegative/create_with_fk_wrong_ref2.q.out @@ -10,4 +10,4 @@ PREHOOK: query: CREATE TABLE table1 (a STRING, b STRING, CONSTRAINT fk1 FOREIGN PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@table1 -FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.exec.DDLTask. MetaException(message:Foreign key references a:string;b:string; but no corresponding primary key or unique key exists. Possible keys: [b:int;a:string;, a:string;b:int;]) +FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.ddl.DDLTask2. MetaException(message:Foreign key references a:string;b:string; but no corresponding primary key or unique key exists. Possible keys: [b:int;a:string;, a:string;b:int;]) diff --git ql/src/test/results/clientnegative/dbtxnmgr_notablelock.q.out ql/src/test/results/clientnegative/dbtxnmgr_notablelock.q.out index 3fad08c584..d2bcea06fb 100644 --- ql/src/test/results/clientnegative/dbtxnmgr_notablelock.q.out +++ ql/src/test/results/clientnegative/dbtxnmgr_notablelock.q.out @@ -12,4 +12,4 @@ POSTHOOK: Output: database:default POSTHOOK: Output: default@drop_notablelock PREHOOK: query: lock table drop_notablelock shared PREHOOK: type: LOCKTABLE -FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.exec.DDLTask. Current transaction manager does not support explicit lock requests. Transaction manager: org.apache.hadoop.hive.ql.lockmgr.DbTxnManager +FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.ddl.DDLTask2. Current transaction manager does not support explicit lock requests. Transaction manager: org.apache.hadoop.hive.ql.lockmgr.DbTxnManager diff --git ql/src/test/results/clientnegative/dbtxnmgr_notableunlock.q.out ql/src/test/results/clientnegative/dbtxnmgr_notableunlock.q.out index 2d9a20f253..8e7dc6a0c5 100644 --- ql/src/test/results/clientnegative/dbtxnmgr_notableunlock.q.out +++ ql/src/test/results/clientnegative/dbtxnmgr_notableunlock.q.out @@ -12,4 +12,4 @@ POSTHOOK: Output: database:default POSTHOOK: Output: default@drop_notableunlock PREHOOK: query: unlock table drop_notableunlock PREHOOK: type: UNLOCKTABLE -FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.exec.DDLTask. Current transaction manager does not support explicit lock requests. Transaction manager: org.apache.hadoop.hive.ql.lockmgr.DbTxnManager +FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.ddl.DDLTask2. Current transaction manager does not support explicit lock requests. Transaction manager: org.apache.hadoop.hive.ql.lockmgr.DbTxnManager diff --git ql/src/test/results/clientnegative/deletejar.q.out ql/src/test/results/clientnegative/deletejar.q.out index d52186be89..ff77603fbf 100644 --- ql/src/test/results/clientnegative/deletejar.q.out +++ ql/src/test/results/clientnegative/deletejar.q.out @@ -2,4 +2,4 @@ PREHOOK: query: CREATE TABLE DELETEJAR(KEY STRING, VALUE STRING) ROW FORMAT SERD PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@DELETEJAR -FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.exec.DDLTask. Cannot validate serde: org.apache.hadoop.hive.serde2.TestSerDe +FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.ddl.DDLTask2. Cannot validate serde: org.apache.hadoop.hive.serde2.TestSerDe diff --git ql/src/test/results/clientnegative/describe_xpath1.q.out ql/src/test/results/clientnegative/describe_xpath1.q.out index 322e6e8213..ca8e5d076f 100644 --- ql/src/test/results/clientnegative/describe_xpath1.q.out +++ ql/src/test/results/clientnegative/describe_xpath1.q.out @@ -1,4 +1,4 @@ PREHOOK: query: describe src_thrift $elem$ PREHOOK: type: DESCTABLE PREHOOK: Input: default@src_thrift -FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.exec.DDLTask. cannot find field $elem$ from [private int org.apache.hadoop.hive.serde2.thrift.test.Complex.aint, private java.lang.String org.apache.hadoop.hive.serde2.thrift.test.Complex.aString, private java.util.List org.apache.hadoop.hive.serde2.thrift.test.Complex.lint, private java.util.List org.apache.hadoop.hive.serde2.thrift.test.Complex.lString, private java.util.List org.apache.hadoop.hive.serde2.thrift.test.Complex.lintString, private java.util.Map org.apache.hadoop.hive.serde2.thrift.test.Complex.mStringString, private java.util.Map org.apache.hadoop.hive.serde2.thrift.test.Complex.attributes, private org.apache.hadoop.hive.serde2.thrift.test.PropValueUnion org.apache.hadoop.hive.serde2.thrift.test.Complex.unionField1, private org.apache.hadoop.hive.serde2.thrift.test.PropValueUnion org.apache.hadoop.hive.serde2.thrift.test.Complex.unionField2, private org.apache.hadoop.hive.serde2.thrift.test.PropValueUnion org.apache.hadoop.hive.serde2.thrift.test.Complex.unionField3] +FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.ddl.DDLTask2. cannot find field $elem$ from [private int org.apache.hadoop.hive.serde2.thrift.test.Complex.aint, private java.lang.String org.apache.hadoop.hive.serde2.thrift.test.Complex.aString, private java.util.List org.apache.hadoop.hive.serde2.thrift.test.Complex.lint, private java.util.List org.apache.hadoop.hive.serde2.thrift.test.Complex.lString, private java.util.List org.apache.hadoop.hive.serde2.thrift.test.Complex.lintString, private java.util.Map org.apache.hadoop.hive.serde2.thrift.test.Complex.mStringString, private java.util.Map org.apache.hadoop.hive.serde2.thrift.test.Complex.attributes, private org.apache.hadoop.hive.serde2.thrift.test.PropValueUnion org.apache.hadoop.hive.serde2.thrift.test.Complex.unionField1, private org.apache.hadoop.hive.serde2.thrift.test.PropValueUnion org.apache.hadoop.hive.serde2.thrift.test.Complex.unionField2, private org.apache.hadoop.hive.serde2.thrift.test.PropValueUnion org.apache.hadoop.hive.serde2.thrift.test.Complex.unionField3] diff --git ql/src/test/results/clientnegative/describe_xpath2.q.out ql/src/test/results/clientnegative/describe_xpath2.q.out index c1f2ec1cc8..f1099c94cb 100644 --- ql/src/test/results/clientnegative/describe_xpath2.q.out +++ ql/src/test/results/clientnegative/describe_xpath2.q.out @@ -1,4 +1,4 @@ PREHOOK: query: describe src_thrift $key$ PREHOOK: type: DESCTABLE PREHOOK: Input: default@src_thrift -FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.exec.DDLTask. cannot find field $key$ from [private int org.apache.hadoop.hive.serde2.thrift.test.Complex.aint, private java.lang.String org.apache.hadoop.hive.serde2.thrift.test.Complex.aString, private java.util.List org.apache.hadoop.hive.serde2.thrift.test.Complex.lint, private java.util.List org.apache.hadoop.hive.serde2.thrift.test.Complex.lString, private java.util.List org.apache.hadoop.hive.serde2.thrift.test.Complex.lintString, private java.util.Map org.apache.hadoop.hive.serde2.thrift.test.Complex.mStringString, private java.util.Map org.apache.hadoop.hive.serde2.thrift.test.Complex.attributes, private org.apache.hadoop.hive.serde2.thrift.test.PropValueUnion org.apache.hadoop.hive.serde2.thrift.test.Complex.unionField1, private org.apache.hadoop.hive.serde2.thrift.test.PropValueUnion org.apache.hadoop.hive.serde2.thrift.test.Complex.unionField2, private org.apache.hadoop.hive.serde2.thrift.test.PropValueUnion org.apache.hadoop.hive.serde2.thrift.test.Complex.unionField3] +FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.ddl.DDLTask2. cannot find field $key$ from [private int org.apache.hadoop.hive.serde2.thrift.test.Complex.aint, private java.lang.String org.apache.hadoop.hive.serde2.thrift.test.Complex.aString, private java.util.List org.apache.hadoop.hive.serde2.thrift.test.Complex.lint, private java.util.List org.apache.hadoop.hive.serde2.thrift.test.Complex.lString, private java.util.List org.apache.hadoop.hive.serde2.thrift.test.Complex.lintString, private java.util.Map org.apache.hadoop.hive.serde2.thrift.test.Complex.mStringString, private java.util.Map org.apache.hadoop.hive.serde2.thrift.test.Complex.attributes, private org.apache.hadoop.hive.serde2.thrift.test.PropValueUnion org.apache.hadoop.hive.serde2.thrift.test.Complex.unionField1, private org.apache.hadoop.hive.serde2.thrift.test.PropValueUnion org.apache.hadoop.hive.serde2.thrift.test.Complex.unionField2, private org.apache.hadoop.hive.serde2.thrift.test.PropValueUnion org.apache.hadoop.hive.serde2.thrift.test.Complex.unionField3] diff --git ql/src/test/results/clientnegative/describe_xpath3.q.out ql/src/test/results/clientnegative/describe_xpath3.q.out index a30063316e..d29d0930a0 100644 --- ql/src/test/results/clientnegative/describe_xpath3.q.out +++ ql/src/test/results/clientnegative/describe_xpath3.q.out @@ -1,4 +1,4 @@ PREHOOK: query: describe src_thrift lint.abc PREHOOK: type: DESCTABLE PREHOOK: Input: default@src_thrift -FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.exec.DDLTask. Error in getting fields from serde.Unknown type for abc +FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.ddl.DDLTask2. Error in getting fields from serde.Unknown type for abc diff --git ql/src/test/results/clientnegative/describe_xpath4.q.out ql/src/test/results/clientnegative/describe_xpath4.q.out index b569eca5d3..ec81c9c6b5 100644 --- ql/src/test/results/clientnegative/describe_xpath4.q.out +++ ql/src/test/results/clientnegative/describe_xpath4.q.out @@ -1,4 +1,4 @@ PREHOOK: query: describe src_thrift mStringString.abc PREHOOK: type: DESCTABLE PREHOOK: Input: default@src_thrift -FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.exec.DDLTask. Error in getting fields from serde.Unknown type for abc +FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.ddl.DDLTask2. Error in getting fields from serde.Unknown type for abc diff --git ql/src/test/results/clientnegative/drop_table_failure2.q.out ql/src/test/results/clientnegative/drop_table_failure2.q.out index f0097cda17..c3d94a77fe 100644 --- ql/src/test/results/clientnegative/drop_table_failure2.q.out +++ ql/src/test/results/clientnegative/drop_table_failure2.q.out @@ -13,4 +13,4 @@ PREHOOK: query: DROP TABLE xxx6 PREHOOK: type: DROPTABLE PREHOOK: Input: default@xxx6 PREHOOK: Output: default@xxx6 -FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.exec.DDLTask. Cannot drop a view with DROP TABLE +FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.ddl.DDLTask2. Cannot drop a view with DROP TABLE diff --git ql/src/test/results/clientnegative/drop_table_used_by_mv.q.out ql/src/test/results/clientnegative/drop_table_used_by_mv.q.out index 88e3b7dcde..efc080e4e1 100644 --- ql/src/test/results/clientnegative/drop_table_used_by_mv.q.out +++ ql/src/test/results/clientnegative/drop_table_used_by_mv.q.out @@ -32,4 +32,4 @@ PREHOOK: query: drop table mytable PREHOOK: type: DROPTABLE PREHOOK: Input: default@mytable PREHOOK: Output: default@mytable -FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.exec.DDLTask. Cannot drop table since it is used by at least one materialized view definition. Please drop any materialized view that uses the table before dropping it +FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.ddl.DDLTask2. Cannot drop table since it is used by at least one materialized view definition. Please drop any materialized view that uses the table before dropping it diff --git ql/src/test/results/clientnegative/drop_view_failure1.q.out ql/src/test/results/clientnegative/drop_view_failure1.q.out index a1a44988be..87d91ed94f 100644 --- ql/src/test/results/clientnegative/drop_view_failure1.q.out +++ ql/src/test/results/clientnegative/drop_view_failure1.q.out @@ -10,4 +10,4 @@ PREHOOK: query: DROP VIEW xxx1 PREHOOK: type: DROPVIEW PREHOOK: Input: default@xxx1 PREHOOK: Output: default@xxx1 -FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.exec.DDLTask. Cannot drop a base table with DROP VIEW +FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.ddl.DDLTask2. Cannot drop a base table with DROP VIEW diff --git ql/src/test/results/clientnegative/druid_address.q.out ql/src/test/results/clientnegative/druid_address.q.out index 66b7e142db..c26eff3a8d 100644 --- ql/src/test/results/clientnegative/druid_address.q.out +++ ql/src/test/results/clientnegative/druid_address.q.out @@ -4,4 +4,4 @@ TBLPROPERTIES ("druid.datasource" = "wikipedia") PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@druid_table_1 -FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.exec.DDLTask. java.lang.RuntimeException: MetaException(message:org.apache.hadoop.hive.serde2.SerDeException Druid broker address not specified in configuration) +FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.ddl.DDLTask2. java.lang.RuntimeException: MetaException(message:org.apache.hadoop.hive.serde2.SerDeException Druid broker address not specified in configuration) diff --git ql/src/test/results/clientnegative/druid_buckets.q.out ql/src/test/results/clientnegative/druid_buckets.q.out index 94e4f708b4..ad381f2044 100644 --- ql/src/test/results/clientnegative/druid_buckets.q.out +++ ql/src/test/results/clientnegative/druid_buckets.q.out @@ -5,4 +5,4 @@ TBLPROPERTIES ("druid.datasource" = "wikipedia") PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@druid_table_1 -FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.exec.DDLTask. MetaException(message:CLUSTERED BY may not be specified for Druid) +FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.ddl.DDLTask2. MetaException(message:CLUSTERED BY may not be specified for Druid) diff --git ql/src/test/results/clientnegative/druid_case.q.out ql/src/test/results/clientnegative/druid_case.q.out index 457028bb12..b18f44f60c 100644 --- ql/src/test/results/clientnegative/druid_case.q.out +++ ql/src/test/results/clientnegative/druid_case.q.out @@ -4,4 +4,4 @@ TBLPROPERTIES ("druid.datasource" = "wikipedia") PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@druid_table_1 -FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.exec.DDLTask. org.apache.hadoop.hive.ql.metadata.HiveException: Duplicate column name anonymous in the table definition. +FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.ddl.DDLTask2. org.apache.hadoop.hive.ql.metadata.HiveException: Duplicate column name anonymous in the table definition. diff --git ql/src/test/results/clientnegative/druid_datasource.q.out ql/src/test/results/clientnegative/druid_datasource.q.out index 177ffaa4d4..de170fdca2 100644 --- ql/src/test/results/clientnegative/druid_datasource.q.out +++ ql/src/test/results/clientnegative/druid_datasource.q.out @@ -4,4 +4,4 @@ TBLPROPERTIES ("property" = "localhost") PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@druid_table_1 -FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.exec.DDLTask. java.lang.RuntimeException: MetaException(message:org.apache.hadoop.hive.serde2.SerDeException Druid data source not specified; use druid.datasource in table properties) +FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.ddl.DDLTask2. java.lang.RuntimeException: MetaException(message:org.apache.hadoop.hive.serde2.SerDeException Druid data source not specified; use druid.datasource in table properties) diff --git ql/src/test/results/clientnegative/druid_datasource2.q.out ql/src/test/results/clientnegative/druid_datasource2.q.out index 2f783fe39a..71371c3f56 100644 --- ql/src/test/results/clientnegative/druid_datasource2.q.out +++ ql/src/test/results/clientnegative/druid_datasource2.q.out @@ -4,4 +4,4 @@ TBLPROPERTIES ("property" = "localhost", "druid.datasource" = "mydatasource") PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@druid_table_1 -FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.exec.DDLTask. MetaException(message:Table default.druid_table_1 failed strict managed table checks due to the following reason: Table is marked as a managed table but is not transactional.) +FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.ddl.DDLTask2. MetaException(message:Table default.druid_table_1 failed strict managed table checks due to the following reason: Table is marked as a managed table but is not transactional.) diff --git ql/src/test/results/clientnegative/druid_location.q.out ql/src/test/results/clientnegative/druid_location.q.out index 5727e8c723..7ee82627d9 100644 --- ql/src/test/results/clientnegative/druid_location.q.out +++ ql/src/test/results/clientnegative/druid_location.q.out @@ -6,4 +6,4 @@ PREHOOK: type: CREATETABLE #### A masked pattern was here #### PREHOOK: Output: database:default PREHOOK: Output: default@druid_table_1 -FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.exec.DDLTask. MetaException(message:LOCATION may not be specified for Druid) +FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.ddl.DDLTask2. MetaException(message:LOCATION may not be specified for Druid) diff --git ql/src/test/results/clientnegative/druid_partitions.q.out ql/src/test/results/clientnegative/druid_partitions.q.out index 6fb55c1991..81325a8088 100644 --- ql/src/test/results/clientnegative/druid_partitions.q.out +++ ql/src/test/results/clientnegative/druid_partitions.q.out @@ -5,4 +5,4 @@ TBLPROPERTIES ("druid.datasource" = "wikipedia") PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@druid_table_1 -FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.exec.DDLTask. MetaException(message:PARTITIONED BY may not be specified for Druid) +FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.ddl.DDLTask2. MetaException(message:PARTITIONED BY may not be specified for Druid) diff --git ql/src/test/results/clientnegative/external1.q.out ql/src/test/results/clientnegative/external1.q.out index f2bc9c6ea2..8d47ccde89 100644 --- ql/src/test/results/clientnegative/external1.q.out +++ ql/src/test/results/clientnegative/external1.q.out @@ -3,4 +3,4 @@ PREHOOK: type: CREATETABLE #### A masked pattern was here #### PREHOOK: Output: database:default PREHOOK: Output: default@external1 -FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.exec.DDLTask. org.apache.hadoop.fs.UnsupportedFileSystemException: No FileSystem for scheme "invalidscheme" +FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.ddl.DDLTask2. org.apache.hadoop.fs.UnsupportedFileSystemException: No FileSystem for scheme "invalidscheme" diff --git ql/src/test/results/clientnegative/insert_sorted.q.out ql/src/test/results/clientnegative/insert_sorted.q.out index bb3c7e3bf4..fef40ee11a 100644 --- ql/src/test/results/clientnegative/insert_sorted.q.out +++ ql/src/test/results/clientnegative/insert_sorted.q.out @@ -20,4 +20,4 @@ PREHOOK: query: create table acid_insertsort(a int, b varchar(128)) clustered by PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@acid_insertsort -FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.exec.DDLTask. MetaException(message:Table default.acid_insertsort cannot support full ACID functionality since it is sorted.) +FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.ddl.DDLTask2. MetaException(message:Table default.acid_insertsort cannot support full ACID functionality since it is sorted.) diff --git ql/src/test/results/clientnegative/lockneg1.q.out ql/src/test/results/clientnegative/lockneg1.q.out index 3a96cda24b..cbcefa0dd5 100644 --- ql/src/test/results/clientnegative/lockneg1.q.out +++ ql/src/test/results/clientnegative/lockneg1.q.out @@ -31,4 +31,4 @@ POSTHOOK: type: LOCKTABLE PREHOOK: query: LOCK TABLE tstsrc EXCLUSIVE PREHOOK: type: LOCKTABLE Unable to acquire EXPLICIT, EXCLUSIVE lock default@tstsrc after 1 attempts. -FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.exec.DDLTask +FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.ddl.DDLTask2 diff --git ql/src/test/results/clientnegative/lockneg2.q.out ql/src/test/results/clientnegative/lockneg2.q.out index 31e9087e74..3e988b9f01 100644 --- ql/src/test/results/clientnegative/lockneg2.q.out +++ ql/src/test/results/clientnegative/lockneg2.q.out @@ -22,4 +22,4 @@ POSTHOOK: Lineage: tstsrc.key SIMPLE [(src)src.FieldSchema(name:key, type:string POSTHOOK: Lineage: tstsrc.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] PREHOOK: query: UNLOCK TABLE tstsrc PREHOOK: type: UNLOCKTABLE -FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.exec.DDLTask. Table tstsrc is not locked +FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.ddl.DDLTask2. Table tstsrc is not locked diff --git ql/src/test/results/clientnegative/lockneg3.q.out ql/src/test/results/clientnegative/lockneg3.q.out index e4f635731d..1403c73a67 100644 --- ql/src/test/results/clientnegative/lockneg3.q.out +++ ql/src/test/results/clientnegative/lockneg3.q.out @@ -26,4 +26,4 @@ POSTHOOK: Lineage: tstsrcpart PARTITION(ds=2008-04-08,hr=11).key SIMPLE [(srcpar POSTHOOK: Lineage: tstsrcpart PARTITION(ds=2008-04-08,hr=11).value SIMPLE [(srcpart)srcpart.FieldSchema(name:value, type:string, comment:default), ] PREHOOK: query: UNLOCK TABLE tstsrcpart PARTITION(ds='2008-04-08', hr='11') PREHOOK: type: UNLOCKTABLE -FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.exec.DDLTask. Table tstsrcpart is not locked +FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.ddl.DDLTask2. Table tstsrcpart is not locked diff --git ql/src/test/results/clientnegative/materialized_view_drop.q.out ql/src/test/results/clientnegative/materialized_view_drop.q.out index da95afb05a..f059e6de34 100644 --- ql/src/test/results/clientnegative/materialized_view_drop.q.out +++ ql/src/test/results/clientnegative/materialized_view_drop.q.out @@ -39,4 +39,4 @@ PREHOOK: query: drop materialized view cmv_basetable PREHOOK: type: DROP_MATERIALIZED_VIEW PREHOOK: Input: default@cmv_basetable PREHOOK: Output: default@cmv_basetable -FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.exec.DDLTask. Cannot drop a base table with DROP MATERIALIZED VIEW +FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.ddl.DDLTask2. Cannot drop a base table with DROP MATERIALIZED VIEW diff --git ql/src/test/results/clientnegative/materialized_view_drop2.q.out ql/src/test/results/clientnegative/materialized_view_drop2.q.out index d4f243c5db..e3c705344d 100644 --- ql/src/test/results/clientnegative/materialized_view_drop2.q.out +++ ql/src/test/results/clientnegative/materialized_view_drop2.q.out @@ -31,4 +31,4 @@ PREHOOK: query: drop view cmv_mat_view PREHOOK: type: DROPVIEW PREHOOK: Input: default@cmv_mat_view PREHOOK: Output: default@cmv_mat_view -FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.exec.DDLTask. Cannot drop a materialized view with DROP VIEW +FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.ddl.DDLTask2. Cannot drop a materialized view with DROP VIEW diff --git ql/src/test/results/clientnegative/nested_complex_neg.q.out ql/src/test/results/clientnegative/nested_complex_neg.q.out index a6f9ac5dac..200eee1255 100644 --- ql/src/test/results/clientnegative/nested_complex_neg.q.out +++ ql/src/test/results/clientnegative/nested_complex_neg.q.out @@ -7,4 +7,4 @@ simple_string string) PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@nestedcomplex -FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.exec.DDLTask. java.lang.RuntimeException: MetaException(message:org.apache.hadoop.hive.serde2.SerDeException Number of levels of nesting supported for LazySimpleSerde is 7 Unable to work with level 23. Use hive.serialization.extend.nesting.levels serde property for tables using LazySimpleSerde.) +FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.ddl.DDLTask2. java.lang.RuntimeException: MetaException(message:org.apache.hadoop.hive.serde2.SerDeException Number of levels of nesting supported for LazySimpleSerde is 7 Unable to work with level 23. Use hive.serialization.extend.nesting.levels serde property for tables using LazySimpleSerde.) diff --git ql/src/test/results/clientnegative/serde_regex.q.out ql/src/test/results/clientnegative/serde_regex.q.out index 1047a82869..faa5d7b12e 100644 --- ql/src/test/results/clientnegative/serde_regex.q.out +++ ql/src/test/results/clientnegative/serde_regex.q.out @@ -22,4 +22,4 @@ STORED AS TEXTFILE PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@serde_regex -FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.exec.DDLTask. java.lang.RuntimeException: MetaException(message:org.apache.hadoop.hive.serde2.SerDeException org.apache.hadoop.hive.serde2.RegexSerDe doesn't allow column [9] named strct with type struct) +FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.ddl.DDLTask2. java.lang.RuntimeException: MetaException(message:org.apache.hadoop.hive.serde2.SerDeException org.apache.hadoop.hive.serde2.RegexSerDe doesn't allow column [9] named strct with type struct) diff --git ql/src/test/results/clientnegative/serde_regex3.q.out ql/src/test/results/clientnegative/serde_regex3.q.out index 33d647b163..22146382a9 100644 --- ql/src/test/results/clientnegative/serde_regex3.q.out +++ ql/src/test/results/clientnegative/serde_regex3.q.out @@ -19,4 +19,4 @@ STORED AS TEXTFILE PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@serde_regex -FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.exec.DDLTask. java.lang.RuntimeException: MetaException(message:org.apache.hadoop.hive.serde2.SerDeException This table does not have serde property "input.regex"!) +FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.ddl.DDLTask2. java.lang.RuntimeException: MetaException(message:org.apache.hadoop.hive.serde2.SerDeException This table does not have serde property "input.regex"!) diff --git ql/src/test/results/clientnegative/special_character_in_tabnames_1.q.out ql/src/test/results/clientnegative/special_character_in_tabnames_1.q.out index d7b9965d44..bacbda1019 100644 --- ql/src/test/results/clientnegative/special_character_in_tabnames_1.q.out +++ ql/src/test/results/clientnegative/special_character_in_tabnames_1.q.out @@ -2,4 +2,4 @@ PREHOOK: query: create table `c/b/o_t1`(key string, value string, c_int int, c_f PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@c/b/o_t1 -FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.exec.DDLTask. org.apache.hadoop.hive.ql.metadata.HiveException: [c/b/o_t1]: is not a valid table name +FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.ddl.DDLTask2. org.apache.hadoop.hive.ql.metadata.HiveException: [c/b/o_t1]: is not a valid table name diff --git ql/src/test/results/clientnegative/strict_managed_tables1.q.out ql/src/test/results/clientnegative/strict_managed_tables1.q.out index a659644797..04b8fb430f 100644 --- ql/src/test/results/clientnegative/strict_managed_tables1.q.out +++ ql/src/test/results/clientnegative/strict_managed_tables1.q.out @@ -26,4 +26,4 @@ PREHOOK: query: create table strict_managed_tables1_tab4 (c1 string, c2 string) PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@strict_managed_tables1_tab4 -FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.exec.DDLTask. MetaException(message:Table default.strict_managed_tables1_tab4 failed strict managed table checks due to the following reason: Table is marked as a managed table but is not transactional.) +FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.ddl.DDLTask2. MetaException(message:Table default.strict_managed_tables1_tab4 failed strict managed table checks due to the following reason: Table is marked as a managed table but is not transactional.) diff --git ql/src/test/results/clientnegative/strict_managed_tables4.q.out ql/src/test/results/clientnegative/strict_managed_tables4.q.out index 0c7576ff22..0bff565962 100644 --- ql/src/test/results/clientnegative/strict_managed_tables4.q.out +++ ql/src/test/results/clientnegative/strict_managed_tables4.q.out @@ -28,4 +28,4 @@ STORED AS AVRO PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@strict_managed_tables6_tab2 -FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.exec.DDLTask. MetaException(message:Table default.strict_managed_tables6_tab2 failed strict managed table checks due to the following reason: Table is marked as a managed table but is not transactional.) +FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.ddl.DDLTask2. MetaException(message:Table default.strict_managed_tables6_tab2 failed strict managed table checks due to the following reason: Table is marked as a managed table but is not transactional.) diff --git ql/src/test/results/clientnegative/strict_managed_tables5.q.out ql/src/test/results/clientnegative/strict_managed_tables5.q.out index 0e29fbdcf9..4d9b5d0dcb 100644 --- ql/src/test/results/clientnegative/strict_managed_tables5.q.out +++ ql/src/test/results/clientnegative/strict_managed_tables5.q.out @@ -16,4 +16,4 @@ STORED BY 'org.apache.hadoop.hive.ql.metadata.DefaultStorageHandler' PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@strict_managed_tables5_tab2 -FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.exec.DDLTask. MetaException(message:Table default.strict_managed_tables5_tab2 failed strict managed table checks due to the following reason: Table is marked as a managed table but is not transactional.) +FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.ddl.DDLTask2. MetaException(message:Table default.strict_managed_tables5_tab2 failed strict managed table checks due to the following reason: Table is marked as a managed table but is not transactional.) diff --git ql/src/test/results/clientpositive/ambiguitycheck.q.out ql/src/test/results/clientpositive/ambiguitycheck.q.out index aff57522e3..efbd0d8947 100644 --- ql/src/test/results/clientpositive/ambiguitycheck.q.out +++ ql/src/test/results/clientpositive/ambiguitycheck.q.out @@ -828,10 +828,9 @@ STAGE DEPENDENCIES: STAGE PLANS: Stage: Stage-0 - Describe Table Operator: - Describe Table + Describe Table #### A masked pattern was here #### - table: default.src + table: default.src Stage: Stage-1 Fetch Operator diff --git ql/src/test/results/clientpositive/annotate_stats_table.q.out ql/src/test/results/clientpositive/annotate_stats_table.q.out index d7f7b22121..1e68e78080 100644 --- ql/src/test/results/clientpositive/annotate_stats_table.q.out +++ ql/src/test/results/clientpositive/annotate_stats_table.q.out @@ -367,13 +367,12 @@ STAGE PLANS: #### A masked pattern was here #### Stage: Stage-7 - Create Table Operator: - Create Table - columns: _c0 int - input format: org.apache.hadoop.mapred.TextInputFormat - output format: org.apache.hadoop.hive.ql.io.IgnoreKeyTextOutputFormat - serde name: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.tmp_n0 + Create Table + columns: _c0 int + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.IgnoreKeyTextOutputFormat + serde name: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + name: default.tmp_n0 Stage: Stage-2 Map Reduce diff --git ql/src/test/results/clientpositive/create_union_table.q.out ql/src/test/results/clientpositive/create_union_table.q.out index f773f34711..17b5fc0ab4 100644 --- ql/src/test/results/clientpositive/create_union_table.q.out +++ ql/src/test/results/clientpositive/create_union_table.q.out @@ -13,13 +13,12 @@ STAGE DEPENDENCIES: STAGE PLANS: Stage: Stage-0 - Create Table Operator: - Create Table - columns: mydata uniontype,struct>, strct struct - input format: org.apache.hadoop.mapred.TextInputFormat - output format: org.apache.hadoop.hive.ql.io.IgnoreKeyTextOutputFormat - serde name: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.abc + Create Table + columns: mydata uniontype,struct>, strct struct + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.IgnoreKeyTextOutputFormat + serde name: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + name: default.abc PREHOOK: query: create table abc(mydata uniontype,struct>, strct struct) diff --git ql/src/test/results/clientpositive/ctas.q.out ql/src/test/results/clientpositive/ctas.q.out index 7c7378af45..c4168b13cc 100644 --- ql/src/test/results/clientpositive/ctas.q.out +++ ql/src/test/results/clientpositive/ctas.q.out @@ -113,13 +113,12 @@ STAGE PLANS: #### A masked pattern was here #### Stage: Stage-5 - Create Table Operator: - Create Table - columns: k string, value string - input format: org.apache.hadoop.mapred.TextInputFormat - output format: org.apache.hadoop.hive.ql.io.IgnoreKeyTextOutputFormat - serde name: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.nzhang_CTAS1 + Create Table + columns: k string, value string + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.IgnoreKeyTextOutputFormat + serde name: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + name: default.nzhang_CTAS1 Stage: Stage-3 Stats Work @@ -316,13 +315,12 @@ STAGE PLANS: #### A masked pattern was here #### Stage: Stage-5 - Create Table Operator: - Create Table - columns: key string, value string - input format: org.apache.hadoop.mapred.TextInputFormat - output format: org.apache.hadoop.hive.ql.io.IgnoreKeyTextOutputFormat - serde name: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.nzhang_ctas2 + Create Table + columns: key string, value string + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.IgnoreKeyTextOutputFormat + serde name: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + name: default.nzhang_ctas2 Stage: Stage-3 Stats Work @@ -519,13 +517,12 @@ STAGE PLANS: #### A masked pattern was here #### Stage: Stage-5 - Create Table Operator: - Create Table - columns: half_key double, conb string - input format: org.apache.hadoop.hive.ql.io.RCFileInputFormat - output format: org.apache.hadoop.hive.ql.io.RCFileOutputFormat - serde name: org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe - name: default.nzhang_ctas3 + Create Table + columns: half_key double, conb string + input format: org.apache.hadoop.hive.ql.io.RCFileInputFormat + output format: org.apache.hadoop.hive.ql.io.RCFileOutputFormat + serde name: org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe + name: default.nzhang_ctas3 Stage: Stage-3 Stats Work @@ -786,14 +783,13 @@ STAGE PLANS: #### A masked pattern was here #### Stage: Stage-5 - Create Table Operator: - Create Table - columns: key string, value string - field delimiter: , - input format: org.apache.hadoop.mapred.TextInputFormat - output format: org.apache.hadoop.hive.ql.io.IgnoreKeyTextOutputFormat - serde name: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.nzhang_ctas4 + Create Table + columns: key string, value string + field delimiter: , + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.IgnoreKeyTextOutputFormat + serde name: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + name: default.nzhang_ctas4 Stage: Stage-3 Stats Work @@ -991,16 +987,15 @@ STAGE PLANS: #### A masked pattern was here #### Stage: Stage-5 - Create Table Operator: - Create Table - columns: key string, value string - field delimiter: , - input format: org.apache.hadoop.mapred.TextInputFormat - line delimiter: + Create Table + columns: key string, value string + field delimiter: , + input format: org.apache.hadoop.mapred.TextInputFormat + line delimiter: - output format: org.apache.hadoop.hive.ql.io.IgnoreKeyTextOutputFormat - serde name: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.nzhang_ctas5 + output format: org.apache.hadoop.hive.ql.io.IgnoreKeyTextOutputFormat + serde name: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + name: default.nzhang_ctas5 Stage: Stage-3 Stats Work diff --git ql/src/test/results/clientpositive/ctas_colname.q.out ql/src/test/results/clientpositive/ctas_colname.q.out index 9db4ddd0d8..b875615c43 100644 --- ql/src/test/results/clientpositive/ctas_colname.q.out +++ ql/src/test/results/clientpositive/ctas_colname.q.out @@ -76,13 +76,12 @@ STAGE PLANS: #### A masked pattern was here #### Stage: Stage-4 - Create Table Operator: - Create Table - columns: key string, value string, _c1 double, _c2 string - input format: org.apache.hadoop.mapred.TextInputFormat - output format: org.apache.hadoop.hive.ql.io.IgnoreKeyTextOutputFormat - serde name: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.summary + Create Table + columns: key string, value string, _c1 double, _c2 string + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.IgnoreKeyTextOutputFormat + serde name: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + name: default.summary Stage: Stage-2 Stats Work @@ -287,13 +286,12 @@ STAGE PLANS: #### A masked pattern was here #### Stage: Stage-4 - Create Table Operator: - Create Table - columns: key string, value string, rr int - input format: org.apache.hadoop.mapred.TextInputFormat - output format: org.apache.hadoop.hive.ql.io.IgnoreKeyTextOutputFormat - serde name: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.x4 + Create Table + columns: key string, value string, rr int + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.IgnoreKeyTextOutputFormat + serde name: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + name: default.x4 Stage: Stage-2 Stats Work @@ -530,13 +528,12 @@ STAGE PLANS: #### A masked pattern was here #### Stage: Stage-5 - Create Table Operator: - Create Table - columns: key string, value string, lead1 string - input format: org.apache.hadoop.mapred.TextInputFormat - output format: org.apache.hadoop.hive.ql.io.IgnoreKeyTextOutputFormat - serde name: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.x5 + Create Table + columns: key string, value string, lead1 string + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.IgnoreKeyTextOutputFormat + serde name: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + name: default.x5 Stage: Stage-3 Stats Work @@ -731,13 +728,12 @@ STAGE PLANS: #### A masked pattern was here #### Stage: Stage-8 - Create Table Operator: - Create Table - columns: key string, value string, _c1 double - input format: org.apache.hadoop.mapred.TextInputFormat - output format: org.apache.hadoop.hive.ql.io.IgnoreKeyTextOutputFormat - serde name: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.x6 + Create Table + columns: key string, value string, _c1 double + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.IgnoreKeyTextOutputFormat + serde name: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + name: default.x6 Stage: Stage-2 Stats Work @@ -940,13 +936,12 @@ STAGE PLANS: #### A masked pattern was here #### Stage: Stage-4 - Create Table Operator: - Create Table - columns: _col0 string, _col1 string, _c1 bigint - input format: org.apache.hadoop.mapred.TextInputFormat - output format: org.apache.hadoop.hive.ql.io.IgnoreKeyTextOutputFormat - serde name: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.x7 + Create Table + columns: _col0 string, _col1 string, _c1 bigint + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.IgnoreKeyTextOutputFormat + serde name: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + name: default.x7 Stage: Stage-2 Stats Work @@ -1426,13 +1421,12 @@ STAGE PLANS: #### A masked pattern was here #### Stage: Stage-4 - Create Table Operator: - Create Table - columns: _col0 string, _col1 string, _c1 bigint - input format: org.apache.hadoop.mapred.TextInputFormat - output format: org.apache.hadoop.hive.ql.io.IgnoreKeyTextOutputFormat - serde name: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.x8 + Create Table + columns: _col0 string, _col1 string, _c1 bigint + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.IgnoreKeyTextOutputFormat + serde name: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + name: default.x8 Stage: Stage-2 Stats Work @@ -1615,13 +1609,12 @@ STAGE PLANS: #### A masked pattern was here #### Stage: Stage-4 - Create Table Operator: - Create Table - columns: _c0 string, key string - input format: org.apache.hadoop.mapred.TextInputFormat - output format: org.apache.hadoop.hive.ql.io.IgnoreKeyTextOutputFormat - serde name: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.x9 + Create Table + columns: _c0 string, key string + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.IgnoreKeyTextOutputFormat + serde name: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + name: default.x9 Stage: Stage-2 Stats Work diff --git ql/src/test/results/clientpositive/ctas_uses_database_location.q.out ql/src/test/results/clientpositive/ctas_uses_database_location.q.out index eb3872eb83..ddc8368e10 100644 --- ql/src/test/results/clientpositive/ctas_uses_database_location.q.out +++ ql/src/test/results/clientpositive/ctas_uses_database_location.q.out @@ -97,13 +97,12 @@ STAGE PLANS: #### A masked pattern was here #### Stage: Stage-8 - Create Table Operator: - Create Table - columns: key string, value string - input format: org.apache.hadoop.mapred.TextInputFormat - output format: org.apache.hadoop.hive.ql.io.IgnoreKeyTextOutputFormat - serde name: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: db1.table_db1 + Create Table + columns: key string, value string + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.IgnoreKeyTextOutputFormat + serde name: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + name: db1.table_db1 Stage: Stage-2 Stats Work diff --git ql/src/test/results/clientpositive/db_ddl_explain.q.out ql/src/test/results/clientpositive/db_ddl_explain.q.out new file mode 100644 index 0000000000..8e85d75fac --- /dev/null +++ ql/src/test/results/clientpositive/db_ddl_explain.q.out @@ -0,0 +1,171 @@ +PREHOOK: query: EXPLAIN CREATE DATABASE d +PREHOOK: type: CREATEDATABASE +PREHOOK: Output: database:d +POSTHOOK: query: EXPLAIN CREATE DATABASE d +POSTHOOK: type: CREATEDATABASE +POSTHOOK: Output: database:d +STAGE DEPENDENCIES: + Stage-0 is a root stage + +STAGE PLANS: + Stage: Stage-0 + Create Database + name: d + +PREHOOK: query: CREATE DATABASE d +PREHOOK: type: CREATEDATABASE +PREHOOK: Output: database:d +POSTHOOK: query: CREATE DATABASE d +POSTHOOK: type: CREATEDATABASE +POSTHOOK: Output: database:d +PREHOOK: query: EXPLAIN SHOW DATABASES +PREHOOK: type: SHOWDATABASES +POSTHOOK: query: EXPLAIN SHOW DATABASES +POSTHOOK: type: SHOWDATABASES +STAGE DEPENDENCIES: + Stage-0 is a root stage + Stage-1 depends on stages: Stage-0 + +STAGE PLANS: + Stage: Stage-0 + Show Databases + + Stage: Stage-1 + Fetch Operator + limit: -1 + Processor Tree: + ListSink + +PREHOOK: query: SHOW DATABASES +PREHOOK: type: SHOWDATABASES +POSTHOOK: query: SHOW DATABASES +POSTHOOK: type: SHOWDATABASES +d +default +PREHOOK: query: EXPLAIN DESCRIBE DATABASE d +PREHOOK: type: DESCDATABASE +PREHOOK: Input: database:d +POSTHOOK: query: EXPLAIN DESCRIBE DATABASE d +POSTHOOK: type: DESCDATABASE +POSTHOOK: Input: database:d +STAGE DEPENDENCIES: + Stage-0 is a root stage + Stage-1 depends on stages: Stage-0 + +STAGE PLANS: + Stage: Stage-0 + Describe Database + database: d + + Stage: Stage-1 + Fetch Operator + limit: -1 + Processor Tree: + ListSink + +PREHOOK: query: DESCRIBE DATABASE d +PREHOOK: type: DESCDATABASE +PREHOOK: Input: database:d +POSTHOOK: query: DESCRIBE DATABASE d +POSTHOOK: type: DESCDATABASE +POSTHOOK: Input: database:d +d location/in/test hive_test_user USER +PREHOOK: query: EXPLAIN ALTER DATABASE d SET dbproperties('test'='yesthisis') +PREHOOK: type: ALTERDATABASE +PREHOOK: Output: database:d +POSTHOOK: query: EXPLAIN ALTER DATABASE d SET dbproperties('test'='yesthisis') +POSTHOOK: type: ALTERDATABASE +POSTHOOK: Output: database:d +STAGE DEPENDENCIES: + Stage-0 is a root stage + +STAGE PLANS: + Stage: Stage-0 + Alter Database + name: d + properties: + test yesthisis + +PREHOOK: query: ALTER DATABASE d SET dbproperties('test'='yesthisis') +PREHOOK: type: ALTERDATABASE +PREHOOK: Output: database:d +POSTHOOK: query: ALTER DATABASE d SET dbproperties('test'='yesthisis') +POSTHOOK: type: ALTERDATABASE +POSTHOOK: Output: database:d +PREHOOK: query: EXPLAIN SHOW CREATE DATABASE d +PREHOOK: type: SHOW_CREATEDATABASE +PREHOOK: Input: database:d +POSTHOOK: query: EXPLAIN SHOW CREATE DATABASE d +POSTHOOK: type: SHOW_CREATEDATABASE +POSTHOOK: Input: database:d +STAGE DEPENDENCIES: + Stage-0 is a root stage + Stage-1 depends on stages: Stage-0 + +STAGE PLANS: + Stage: Stage-0 + Show Create Database + database name: d + + Stage: Stage-1 + Fetch Operator + limit: -1 + Processor Tree: + ListSink + +PREHOOK: query: SHOW CREATE DATABASE d +PREHOOK: type: SHOW_CREATEDATABASE +PREHOOK: Input: database:d +POSTHOOK: query: SHOW CREATE DATABASE d +POSTHOOK: type: SHOW_CREATEDATABASE +POSTHOOK: Input: database:d +CREATE DATABASE `d` +LOCATION +#### A masked pattern was here #### +WITH DBPROPERTIES ( + 'test'='yesthisis') +PREHOOK: query: EXPLAIN USE d +PREHOOK: type: SWITCHDATABASE +PREHOOK: Input: database:d +POSTHOOK: query: EXPLAIN USE d +POSTHOOK: type: SWITCHDATABASE +POSTHOOK: Input: database:d +STAGE DEPENDENCIES: + Stage-0 is a root stage + +STAGE PLANS: + Stage: Stage-0 + Switch Database + name: d + +PREHOOK: query: USE d +PREHOOK: type: SWITCHDATABASE +PREHOOK: Input: database:d +POSTHOOK: query: USE d +POSTHOOK: type: SWITCHDATABASE +POSTHOOK: Input: database:d +PREHOOK: query: EXPLAIN DROP DATABASE d +PREHOOK: type: DROPDATABASE +PREHOOK: Input: database:d +PREHOOK: Output: database:d +POSTHOOK: query: EXPLAIN DROP DATABASE d +POSTHOOK: type: DROPDATABASE +POSTHOOK: Input: database:d +POSTHOOK: Output: database:d +STAGE DEPENDENCIES: + Stage-0 is a root stage + +STAGE PLANS: + Stage: Stage-0 + Drop Database + database: d + if exists: false + +PREHOOK: query: DROP DATABASE d +PREHOOK: type: DROPDATABASE +PREHOOK: Input: database:d +PREHOOK: Output: database:d +POSTHOOK: query: DROP DATABASE d +POSTHOOK: type: DROPDATABASE +POSTHOOK: Input: database:d +POSTHOOK: Output: database:d diff --git ql/src/test/results/clientpositive/drop_deleted_partitions.q.out ql/src/test/results/clientpositive/drop_deleted_partitions.q.out index 85f4f53bc4..e2c4443055 100644 --- ql/src/test/results/clientpositive/drop_deleted_partitions.q.out +++ ql/src/test/results/clientpositive/drop_deleted_partitions.q.out @@ -42,8 +42,8 @@ STAGE DEPENDENCIES: STAGE PLANS: Stage: Stage-0 - Drop Table Operator: - Drop Table + Drop Partition Operator: + Drop Partition table: dmp.mp PREHOOK: query: alter table dmp.mp drop partition (b='1') diff --git ql/src/test/results/clientpositive/drop_multi_partitions.q.out ql/src/test/results/clientpositive/drop_multi_partitions.q.out index 6b597496dd..53978e8cc9 100644 --- ql/src/test/results/clientpositive/drop_multi_partitions.q.out +++ ql/src/test/results/clientpositive/drop_multi_partitions.q.out @@ -57,8 +57,8 @@ STAGE DEPENDENCIES: STAGE PLANS: Stage: Stage-0 - Drop Table Operator: - Drop Table + Drop Partition Operator: + Drop Partition table: dmp.mp_n0 PREHOOK: query: alter table dmp.mp_n0 drop partition (b='1') diff --git ql/src/test/results/clientpositive/druid/druidmini_dynamic_partition.q.out ql/src/test/results/clientpositive/druid/druidmini_dynamic_partition.q.out index d25b35028c..7506ca8bea 100644 --- ql/src/test/results/clientpositive/druid/druidmini_dynamic_partition.q.out +++ ql/src/test/results/clientpositive/druid/druidmini_dynamic_partition.q.out @@ -166,17 +166,16 @@ STAGE PLANS: Dependency Collection Stage: Stage-4 - Create Table Operator: - Create Table - columns: __time timestamp with local time zone, cstring1 string, cstring2 string, cdouble double, cfloat float, ctinyint tinyint, csmallint smallint, cint int, cbigint bigint, cboolean1 boolean, cboolean2 boolean - storage handler: org.apache.hadoop.hive.druid.DruidStorageHandler - name: default.druid_partitioned_table - table properties: - druid.query.granularity MINUTE - druid.segment.granularity HOUR - druid.segment.targetShardsPerGranularity 6 - external.table.purge true - isExternal: true + Create Table + columns: __time timestamp with local time zone, cstring1 string, cstring2 string, cdouble double, cfloat float, ctinyint tinyint, csmallint smallint, cint int, cbigint bigint, cboolean1 boolean, cboolean2 boolean + storage handler: org.apache.hadoop.hive.druid.DruidStorageHandler + name: default.druid_partitioned_table + table properties: + druid.query.granularity MINUTE + druid.segment.granularity HOUR + druid.segment.targetShardsPerGranularity 6 + external.table.purge true + isExternal: true Stage: Stage-3 Stats Work @@ -353,8 +352,7 @@ STAGE PLANS: COLUMN_STATS_ACCURATE Stage: Stage-1 - Pre Insert operator: - Pre-Insert task + Pre-Insert task Stage: Stage-2 Tez @@ -498,8 +496,7 @@ STAGE PLANS: COLUMN_STATS_ACCURATE Stage: Stage-1 - Pre Insert operator: - Pre-Insert task + Pre-Insert task Stage: Stage-2 Tez diff --git ql/src/test/results/clientpositive/druid/druidmini_mv.q.out ql/src/test/results/clientpositive/druid/druidmini_mv.q.out index 96690af8be..8bd8809708 100644 --- ql/src/test/results/clientpositive/druid/druidmini_mv.q.out +++ ql/src/test/results/clientpositive/druid/druidmini_mv.q.out @@ -480,8 +480,7 @@ STAGE PLANS: Materialized View Work Stage: Stage-1 - Pre Insert operator: - Pre-Insert task + Pre-Insert task Stage: Stage-2 Tez diff --git ql/src/test/results/clientpositive/explain_ddl.q.out ql/src/test/results/clientpositive/explain_ddl.q.out index a71925f33c..fa98560e18 100644 --- ql/src/test/results/clientpositive/explain_ddl.q.out +++ ql/src/test/results/clientpositive/explain_ddl.q.out @@ -125,13 +125,12 @@ STAGE PLANS: #### A masked pattern was here #### Stage: Stage-8 - Create Table Operator: - Create Table - columns: key string, value string - input format: org.apache.hadoop.mapred.TextInputFormat - output format: org.apache.hadoop.hive.ql.io.IgnoreKeyTextOutputFormat - serde name: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.M1 + Create Table + columns: key string, value string + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.IgnoreKeyTextOutputFormat + serde name: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + name: default.M1 Stage: Stage-2 Stats Work @@ -254,13 +253,12 @@ STAGE PLANS: #### A masked pattern was here #### Stage: Stage-8 - Create Table Operator: - Create Table - columns: key string, value string - input format: org.apache.hadoop.mapred.TextInputFormat - output format: org.apache.hadoop.hive.ql.io.IgnoreKeyTextOutputFormat - serde name: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.M1 + Create Table + columns: key string, value string + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.IgnoreKeyTextOutputFormat + serde name: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + name: default.M1 Stage: Stage-2 Stats Work @@ -387,13 +385,12 @@ STAGE PLANS: #### A masked pattern was here #### Stage: Stage-8 - Create Table Operator: - Create Table - columns: key string, value string - input format: org.apache.hadoop.mapred.TextInputFormat - output format: org.apache.hadoop.hive.ql.io.IgnoreKeyTextOutputFormat - serde name: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.M1 + Create Table + columns: key string, value string + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.IgnoreKeyTextOutputFormat + serde name: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + name: default.M1 Stage: Stage-2 Stats Work @@ -516,13 +513,12 @@ STAGE PLANS: #### A masked pattern was here #### Stage: Stage-8 - Create Table Operator: - Create Table - columns: key string, value string - input format: org.apache.hadoop.mapred.TextInputFormat - output format: org.apache.hadoop.hive.ql.io.IgnoreKeyTextOutputFormat - serde name: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.V1_n0 + Create Table + columns: key string, value string + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.IgnoreKeyTextOutputFormat + serde name: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + name: default.V1_n0 Stage: Stage-2 Stats Work @@ -597,13 +593,12 @@ STAGE DEPENDENCIES: STAGE PLANS: Stage: Stage-0 - Create Table Operator: - Create Table - default input format: org.apache.hadoop.mapred.TextInputFormat - default output format: org.apache.hadoop.hive.ql.io.IgnoreKeyTextOutputFormat - default serde name: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - like: src - name: default.M1 + Create Table + default input format: org.apache.hadoop.mapred.TextInputFormat + default output format: org.apache.hadoop.hive.ql.io.IgnoreKeyTextOutputFormat + default serde name: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + like: src + name: default.M1 PREHOOK: query: EXPLAIN CREATE TABLE M1 LIKE M1 PREHOOK: type: CREATETABLE @@ -618,13 +613,12 @@ STAGE DEPENDENCIES: STAGE PLANS: Stage: Stage-0 - Create Table Operator: - Create Table - default input format: org.apache.hadoop.mapred.TextInputFormat - default output format: org.apache.hadoop.hive.ql.io.IgnoreKeyTextOutputFormat - default serde name: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - like: M1 - name: default.M1 + Create Table + default input format: org.apache.hadoop.mapred.TextInputFormat + default output format: org.apache.hadoop.hive.ql.io.IgnoreKeyTextOutputFormat + default serde name: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + like: M1 + name: default.M1 PREHOOK: query: EXPLAIN DROP TABLE M1 PREHOOK: type: DROPTABLE @@ -639,9 +633,8 @@ STAGE DEPENDENCIES: STAGE PLANS: Stage: Stage-0 - Drop Table Operator: - Drop Table - table: M1 + Drop Table + table: M1 PREHOOK: query: select count(*) from M1 where key > 0 PREHOOK: type: QUERY @@ -793,9 +786,8 @@ STAGE DEPENDENCIES: STAGE PLANS: Stage: Stage-0 - Truncate Table Operator: - Truncate Table or Partition - TableName: M1 + Truncate Table or Partition + TableName: M1 PREHOOK: query: select count(*) from M1 where key > 0 PREHOOK: type: QUERY diff --git ql/src/test/results/clientpositive/fileformat_sequencefile.q.out ql/src/test/results/clientpositive/fileformat_sequencefile.q.out index 8610fa0577..37555e5439 100644 --- ql/src/test/results/clientpositive/fileformat_sequencefile.q.out +++ ql/src/test/results/clientpositive/fileformat_sequencefile.q.out @@ -17,12 +17,11 @@ STAGE DEPENDENCIES: STAGE PLANS: Stage: Stage-0 - Create Table Operator: - Create Table - columns: key int, value string - input format: org.apache.hadoop.mapred.SequenceFileInputFormat - output format: org.apache.hadoop.mapred.SequenceFileOutputFormat - name: default.dest1_n85 + Create Table + columns: key int, value string + input format: org.apache.hadoop.mapred.SequenceFileInputFormat + output format: org.apache.hadoop.mapred.SequenceFileOutputFormat + name: default.dest1_n85 PREHOOK: query: CREATE TABLE dest1_n85(key INT, value STRING) STORED AS INPUTFORMAT 'org.apache.hadoop.mapred.SequenceFileInputFormat' diff --git ql/src/test/results/clientpositive/fileformat_text.q.out ql/src/test/results/clientpositive/fileformat_text.q.out index 387f8076ec..6d89660571 100644 --- ql/src/test/results/clientpositive/fileformat_text.q.out +++ ql/src/test/results/clientpositive/fileformat_text.q.out @@ -17,12 +17,11 @@ STAGE DEPENDENCIES: STAGE PLANS: Stage: Stage-0 - Create Table Operator: - Create Table - columns: key int, value string - input format: org.apache.hadoop.mapred.TextInputFormat - output format: org.apache.hadoop.hive.ql.io.IgnoreKeyTextOutputFormat - name: default.dest1_n107 + Create Table + columns: key int, value string + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.IgnoreKeyTextOutputFormat + name: default.dest1_n107 PREHOOK: query: CREATE TABLE dest1_n107(key INT, value STRING) STORED AS INPUTFORMAT 'org.apache.hadoop.mapred.TextInputFormat' diff --git ql/src/test/results/clientpositive/groupby_duplicate_key.q.out ql/src/test/results/clientpositive/groupby_duplicate_key.q.out index 432ff08640..d6d1472ca6 100644 --- ql/src/test/results/clientpositive/groupby_duplicate_key.q.out +++ ql/src/test/results/clientpositive/groupby_duplicate_key.q.out @@ -162,13 +162,12 @@ STAGE PLANS: #### A masked pattern was here #### Stage: Stage-4 - Create Table Operator: - Create Table - columns: key string, dummy1 string, dummy2 string - input format: org.apache.hadoop.mapred.TextInputFormat - output format: org.apache.hadoop.hive.ql.io.IgnoreKeyTextOutputFormat - serde name: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.dummy_n6 + Create Table + columns: key string, dummy1 string, dummy2 string + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.IgnoreKeyTextOutputFormat + serde name: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + name: default.dummy_n6 Stage: Stage-2 Stats Work diff --git ql/src/test/results/clientpositive/input1.q.out ql/src/test/results/clientpositive/input1.q.out index 63f8af018c..623b973037 100644 --- ql/src/test/results/clientpositive/input1.q.out +++ ql/src/test/results/clientpositive/input1.q.out @@ -20,9 +20,8 @@ STAGE DEPENDENCIES: STAGE PLANS: Stage: Stage-0 - Describe Table Operator: - Describe Table - table: TEST1_n6 + Describe Table + table: TEST1_n6 Stage: Stage-1 Fetch Operator diff --git ql/src/test/results/clientpositive/input10.q.out ql/src/test/results/clientpositive/input10.q.out index bbdff6ef8e..977cf333a2 100644 --- ql/src/test/results/clientpositive/input10.q.out +++ ql/src/test/results/clientpositive/input10.q.out @@ -20,9 +20,8 @@ STAGE DEPENDENCIES: STAGE PLANS: Stage: Stage-0 - Describe Table Operator: - Describe Table - table: TEST10 + Describe Table + table: TEST10 Stage: Stage-1 Fetch Operator diff --git ql/src/test/results/clientpositive/input15.q.out ql/src/test/results/clientpositive/input15.q.out index 2dbf6fb1e4..ecfacf2787 100644 --- ql/src/test/results/clientpositive/input15.q.out +++ ql/src/test/results/clientpositive/input15.q.out @@ -13,14 +13,13 @@ STAGE DEPENDENCIES: STAGE PLANS: Stage: Stage-0 - Create Table Operator: - Create Table - columns: key int, value string - field delimiter: - input format: org.apache.hadoop.mapred.TextInputFormat - output format: org.apache.hadoop.hive.ql.io.IgnoreKeyTextOutputFormat - serde name: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.TEST15 + Create Table + columns: key int, value string + field delimiter: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.IgnoreKeyTextOutputFormat + serde name: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + name: default.TEST15 PREHOOK: query: CREATE TABLE TEST15(key INT, value STRING) ROW FORMAT DELIMITED FIELDS TERMINATED BY '\t' STORED AS TEXTFILE PREHOOK: type: CREATETABLE diff --git ql/src/test/results/clientpositive/input2.q.out ql/src/test/results/clientpositive/input2.q.out index aada917ec5..28f7da4f02 100644 --- ql/src/test/results/clientpositive/input2.q.out +++ ql/src/test/results/clientpositive/input2.q.out @@ -84,10 +84,9 @@ STAGE DEPENDENCIES: STAGE PLANS: Stage: Stage-0 - Show Table Operator: - Show Tables - database name: default - pattern: TEST2* + Show Tables + database name: default + pattern: TEST2* Stage: Stage-1 Fetch Operator diff --git ql/src/test/results/clientpositive/inputddl1.q.out ql/src/test/results/clientpositive/inputddl1.q.out index a95e9f145d..2c51ce11cb 100644 --- ql/src/test/results/clientpositive/inputddl1.q.out +++ ql/src/test/results/clientpositive/inputddl1.q.out @@ -13,13 +13,12 @@ STAGE DEPENDENCIES: STAGE PLANS: Stage: Stage-0 - Create Table Operator: - Create Table - columns: key int, value string - input format: org.apache.hadoop.mapred.TextInputFormat - output format: org.apache.hadoop.hive.ql.io.IgnoreKeyTextOutputFormat - serde name: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.INPUTDDL1 + Create Table + columns: key int, value string + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.IgnoreKeyTextOutputFormat + serde name: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + name: default.INPUTDDL1 PREHOOK: query: CREATE TABLE INPUTDDL1(key INT, value STRING) STORED AS TEXTFILE PREHOOK: type: CREATETABLE diff --git ql/src/test/results/clientpositive/inputddl2.q.out ql/src/test/results/clientpositive/inputddl2.q.out index a5ec1c9816..b6cfbd630d 100644 --- ql/src/test/results/clientpositive/inputddl2.q.out +++ ql/src/test/results/clientpositive/inputddl2.q.out @@ -13,14 +13,13 @@ STAGE DEPENDENCIES: STAGE PLANS: Stage: Stage-0 - Create Table Operator: - Create Table - columns: key int, value string - input format: org.apache.hadoop.mapred.TextInputFormat - output format: org.apache.hadoop.hive.ql.io.IgnoreKeyTextOutputFormat - partition columns: ds string, country string - serde name: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.INPUTDDL2 + Create Table + columns: key int, value string + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.IgnoreKeyTextOutputFormat + partition columns: ds string, country string + serde name: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + name: default.INPUTDDL2 PREHOOK: query: CREATE TABLE INPUTDDL2(key INT, value STRING) PARTITIONED BY(ds STRING, country STRING) STORED AS TEXTFILE PREHOOK: type: CREATETABLE diff --git ql/src/test/results/clientpositive/inputddl3.q.out ql/src/test/results/clientpositive/inputddl3.q.out index 639f0954fa..c57ca60746 100644 --- ql/src/test/results/clientpositive/inputddl3.q.out +++ ql/src/test/results/clientpositive/inputddl3.q.out @@ -13,14 +13,13 @@ STAGE DEPENDENCIES: STAGE PLANS: Stage: Stage-0 - Create Table Operator: - Create Table - columns: key int, value string - field delimiter: - input format: org.apache.hadoop.mapred.TextInputFormat - output format: org.apache.hadoop.hive.ql.io.IgnoreKeyTextOutputFormat - serde name: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.INPUTDDL3 + Create Table + columns: key int, value string + field delimiter: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.IgnoreKeyTextOutputFormat + serde name: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + name: default.INPUTDDL3 PREHOOK: query: CREATE TABLE INPUTDDL3(key INT, value STRING) ROW FORMAT DELIMITED FIELDS TERMINATED BY '\t' STORED AS TEXTFILE PREHOOK: type: CREATETABLE diff --git ql/src/test/results/clientpositive/inputddl6.q.out ql/src/test/results/clientpositive/inputddl6.q.out index e14807c3ab..47cc61f10e 100644 --- ql/src/test/results/clientpositive/inputddl6.q.out +++ ql/src/test/results/clientpositive/inputddl6.q.out @@ -91,11 +91,10 @@ STAGE DEPENDENCIES: STAGE PLANS: Stage: Stage-0 - Describe Table Operator: - Describe Table - partition: - ds 2008-04-09 - table: INPUTDDL6 + Describe Table + partition: + ds 2008-04-09 + table: INPUTDDL6 Stage: Stage-1 Fetch Operator diff --git ql/src/test/results/clientpositive/llap/ctas.q.out ql/src/test/results/clientpositive/llap/ctas.q.out index c761b9dd66..c42a494d0c 100644 --- ql/src/test/results/clientpositive/llap/ctas.q.out +++ ql/src/test/results/clientpositive/llap/ctas.q.out @@ -123,13 +123,12 @@ STAGE PLANS: Dependency Collection Stage: Stage-4 - Create Table Operator: - Create Table - columns: k string, value string - input format: org.apache.hadoop.mapred.TextInputFormat - output format: org.apache.hadoop.hive.ql.io.IgnoreKeyTextOutputFormat - serde name: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.nzhang_CTAS1 + Create Table + columns: k string, value string + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.IgnoreKeyTextOutputFormat + serde name: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + name: default.nzhang_CTAS1 Stage: Stage-3 Stats Work @@ -319,13 +318,12 @@ STAGE PLANS: Dependency Collection Stage: Stage-4 - Create Table Operator: - Create Table - columns: key string, value string - input format: org.apache.hadoop.mapred.TextInputFormat - output format: org.apache.hadoop.hive.ql.io.IgnoreKeyTextOutputFormat - serde name: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.nzhang_ctas2 + Create Table + columns: key string, value string + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.IgnoreKeyTextOutputFormat + serde name: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + name: default.nzhang_ctas2 Stage: Stage-3 Stats Work @@ -515,13 +513,12 @@ STAGE PLANS: Dependency Collection Stage: Stage-4 - Create Table Operator: - Create Table - columns: half_key double, conb string - input format: org.apache.hadoop.hive.ql.io.RCFileInputFormat - output format: org.apache.hadoop.hive.ql.io.RCFileOutputFormat - serde name: org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe - name: default.nzhang_ctas3 + Create Table + columns: half_key double, conb string + input format: org.apache.hadoop.hive.ql.io.RCFileInputFormat + output format: org.apache.hadoop.hive.ql.io.RCFileOutputFormat + serde name: org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe + name: default.nzhang_ctas3 Stage: Stage-3 Stats Work @@ -775,14 +772,13 @@ STAGE PLANS: Dependency Collection Stage: Stage-4 - Create Table Operator: - Create Table - columns: key string, value string - field delimiter: , - input format: org.apache.hadoop.mapred.TextInputFormat - output format: org.apache.hadoop.hive.ql.io.IgnoreKeyTextOutputFormat - serde name: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.nzhang_ctas4 + Create Table + columns: key string, value string + field delimiter: , + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.IgnoreKeyTextOutputFormat + serde name: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + name: default.nzhang_ctas4 Stage: Stage-3 Stats Work @@ -973,16 +969,15 @@ STAGE PLANS: Dependency Collection Stage: Stage-4 - Create Table Operator: - Create Table - columns: key string, value string - field delimiter: , - input format: org.apache.hadoop.mapred.TextInputFormat - line delimiter: + Create Table + columns: key string, value string + field delimiter: , + input format: org.apache.hadoop.mapred.TextInputFormat + line delimiter: - output format: org.apache.hadoop.hive.ql.io.IgnoreKeyTextOutputFormat - serde name: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.nzhang_ctas5 + output format: org.apache.hadoop.hive.ql.io.IgnoreKeyTextOutputFormat + serde name: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + name: default.nzhang_ctas5 Stage: Stage-3 Stats Work diff --git ql/src/test/results/clientpositive/llap/dynamic_partition_pruning.q.out ql/src/test/results/clientpositive/llap/dynamic_partition_pruning.q.out index adf8011b3a..7edfc385ae 100644 --- ql/src/test/results/clientpositive/llap/dynamic_partition_pruning.q.out +++ ql/src/test/results/clientpositive/llap/dynamic_partition_pruning.q.out @@ -142,13 +142,12 @@ STAGE PLANS: Dependency Collection Stage: Stage-4 - Create Table Operator: - Create Table - columns: ds string, date string - input format: org.apache.hadoop.mapred.TextInputFormat - output format: org.apache.hadoop.hive.ql.io.IgnoreKeyTextOutputFormat - serde name: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.srcpart_date_n2 + Create Table + columns: ds string, date string + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.IgnoreKeyTextOutputFormat + serde name: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + name: default.srcpart_date_n2 Stage: Stage-3 Stats Work diff --git ql/src/test/results/clientpositive/llap/explainuser_1.q.out ql/src/test/results/clientpositive/llap/explainuser_1.q.out index 1ee459b5d6..1ea8fdcbb2 100644 --- ql/src/test/results/clientpositive/llap/explainuser_1.q.out +++ ql/src/test/results/clientpositive/llap/explainuser_1.q.out @@ -7,8 +7,7 @@ POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@src_orc_merge_test_part_n1 Stage-0 - Create Table Operator: - name:default.src_orc_merge_test_part_n1 + Create Table{"name:":"default.src_orc_merge_test_part_n1"} PREHOOK: query: create table src_orc_merge_test_part_n1(key int, value string) partitioned by (ds string, ts string) stored as orc PREHOOK: type: CREATETABLE @@ -3350,8 +3349,7 @@ POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@abcd_n1 Stage-0 - Create Table Operator: - name:default.abcd_n1 + Create Table{"name:":"default.abcd_n1"} PREHOOK: query: create table abcd_n1 (a int, b int, c int, d int) PREHOOK: type: CREATETABLE @@ -3438,8 +3436,7 @@ POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@src_rc_merge_test_n0 Stage-0 - Create Table Operator: - name:default.src_rc_merge_test_n0 + Create Table{"name:":"default.src_rc_merge_test_n0"} PREHOOK: query: create table src_rc_merge_test_n0(key int, value string) stored as rcfile PREHOOK: type: CREATETABLE @@ -3466,8 +3463,7 @@ POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@tgt_rc_merge_test_n0 Stage-0 - Create Table Operator: - name:default.tgt_rc_merge_test_n0 + Create Table{"name:":"default.tgt_rc_merge_test_n0"} PREHOOK: query: create table tgt_rc_merge_test_n0(key int, value string) stored as rcfile PREHOOK: type: CREATETABLE @@ -3697,8 +3693,7 @@ POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@nzhang_Tmp_n1 Stage-0 - Create Table Operator: - name:default.nzhang_Tmp_n1 + Create Table{"name:":"default.nzhang_Tmp_n1"} PREHOOK: query: create table nzhang_Tmp_n1(a int, b string) PREHOOK: type: CREATETABLE @@ -3728,8 +3723,7 @@ Reducer 4 <- Reducer 3 (CUSTOM_SIMPLE_EDGE) Stage-3 Stats Work{} Stage-4 - Create Table Operator: - name:default.nzhang_CTAS1_n1 + Create Table{"name:":"default.nzhang_CTAS1_n1"} Stage-0 Move Operator Stage-1 @@ -3796,8 +3790,7 @@ Reducer 4 <- Reducer 3 (CUSTOM_SIMPLE_EDGE) Stage-3 Stats Work{} Stage-4 - Create Table Operator: - name:default.nzhang_ctas3_n1 + Create Table{"name:":"default.nzhang_ctas3_n1"} Stage-0 Move Operator Stage-1 @@ -3862,8 +3855,7 @@ POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@acid_dtt Stage-0 - Create Table Operator: - name:default.acid_dtt + Create Table{"name:":"default.acid_dtt"} PREHOOK: query: create temporary table acid_dtt(a int, b varchar(128)) clustered by (a) into 2 buckets stored as orc TBLPROPERTIES ('transactional'='true') PREHOOK: type: CREATETABLE diff --git ql/src/test/results/clientpositive/llap/partition_ctas.q.out ql/src/test/results/clientpositive/llap/partition_ctas.q.out index 3e290b3766..d0a706cb9d 100644 --- ql/src/test/results/clientpositive/llap/partition_ctas.q.out +++ ql/src/test/results/clientpositive/llap/partition_ctas.q.out @@ -92,14 +92,13 @@ STAGE PLANS: Dependency Collection Stage: Stage-4 - Create Table Operator: - Create Table - columns: value string - input format: org.apache.hadoop.mapred.TextInputFormat - output format: org.apache.hadoop.hive.ql.io.IgnoreKeyTextOutputFormat - partition columns: key string - serde name: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.partition_ctas_1 + Create Table + columns: value string + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.IgnoreKeyTextOutputFormat + partition columns: key string + serde name: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + name: default.partition_ctas_1 Stage: Stage-0 Move Operator diff --git ql/src/test/results/clientpositive/llap/rcfile_createas1.q.out ql/src/test/results/clientpositive/llap/rcfile_createas1.q.out index 4f1a47917a..298f7f39cc 100644 --- ql/src/test/results/clientpositive/llap/rcfile_createas1.q.out +++ ql/src/test/results/clientpositive/llap/rcfile_createas1.q.out @@ -129,13 +129,12 @@ STAGE PLANS: Dependency Collection Stage: Stage-4 - Create Table Operator: - Create Table - columns: key int, value string, part int - input format: org.apache.hadoop.hive.ql.io.RCFileInputFormat - output format: org.apache.hadoop.hive.ql.io.RCFileOutputFormat - serde name: org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe - name: default.rcfile_createas1b + Create Table + columns: key int, value string, part int + input format: org.apache.hadoop.hive.ql.io.RCFileInputFormat + output format: org.apache.hadoop.hive.ql.io.RCFileOutputFormat + serde name: org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe + name: default.rcfile_createas1b Stage: Stage-3 Stats Work diff --git ql/src/test/results/clientpositive/llap/semijoin_reddedup.q.out ql/src/test/results/clientpositive/llap/semijoin_reddedup.q.out index 1fca347c63..01d2bccd86 100644 --- ql/src/test/results/clientpositive/llap/semijoin_reddedup.q.out +++ ql/src/test/results/clientpositive/llap/semijoin_reddedup.q.out @@ -496,16 +496,15 @@ STAGE PLANS: Dependency Collection Stage: Stage-4 - Create Table Operator: - Create Table - columns: c_name string, c_custkey bigint, o_orderkey bigint, o_orderdate string, o_totalprice double, _c5 double - input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat - output format: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat - serde name: org.apache.hadoop.hive.ql.io.orc.OrcSerde - name: tpch_test.q18_large_volume_customer_cached - table properties: - transactional true - transactional_properties default + Create Table + columns: c_name string, c_custkey bigint, o_orderkey bigint, o_orderdate string, o_totalprice double, _c5 double + input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat + output format: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat + serde name: org.apache.hadoop.hive.ql.io.orc.OrcSerde + name: tpch_test.q18_large_volume_customer_cached + table properties: + transactional true + transactional_properties default Stage: Stage-3 Stats Work diff --git ql/src/test/results/clientpositive/llap/temp_table.q.out ql/src/test/results/clientpositive/llap/temp_table.q.out index 45be750db3..af147bfd68 100644 --- ql/src/test/results/clientpositive/llap/temp_table.q.out +++ ql/src/test/results/clientpositive/llap/temp_table.q.out @@ -48,15 +48,14 @@ STAGE PLANS: Dependency Collection Stage: Stage-4 - Create Table Operator: - Create Table - columns: key string, value string - input format: org.apache.hadoop.mapred.TextInputFormat -#### A masked pattern was here #### - output format: org.apache.hadoop.hive.ql.io.IgnoreKeyTextOutputFormat - serde name: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.foo - isTemporary: true + Create Table + columns: key string, value string + input format: org.apache.hadoop.mapred.TextInputFormat +#### A masked pattern was here #### + output format: org.apache.hadoop.hive.ql.io.IgnoreKeyTextOutputFormat + serde name: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + name: default.foo + isTemporary: true Stage: Stage-3 Stats Work @@ -128,15 +127,14 @@ STAGE PLANS: Dependency Collection Stage: Stage-4 - Create Table Operator: - Create Table - columns: key string, value string - input format: org.apache.hadoop.mapred.TextInputFormat -#### A masked pattern was here #### - output format: org.apache.hadoop.hive.ql.io.IgnoreKeyTextOutputFormat - serde name: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.bar - isTemporary: true + Create Table + columns: key string, value string + input format: org.apache.hadoop.mapred.TextInputFormat +#### A masked pattern was here #### + output format: org.apache.hadoop.hive.ql.io.IgnoreKeyTextOutputFormat + serde name: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + name: default.bar + isTemporary: true Stage: Stage-3 Stats Work diff --git ql/src/test/results/clientpositive/llap/tez_dml.q.out ql/src/test/results/clientpositive/llap/tez_dml.q.out index 1e8ab44cae..843237e6fe 100644 --- ql/src/test/results/clientpositive/llap/tez_dml.q.out +++ ql/src/test/results/clientpositive/llap/tez_dml.q.out @@ -101,13 +101,12 @@ STAGE PLANS: Dependency Collection Stage: Stage-4 - Create Table Operator: - Create Table - columns: value string, cnt bigint - input format: org.apache.hadoop.mapred.TextInputFormat - output format: org.apache.hadoop.hive.ql.io.IgnoreKeyTextOutputFormat - serde name: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.tmp_src + Create Table + columns: value string, cnt bigint + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.IgnoreKeyTextOutputFormat + serde name: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + name: default.tmp_src Stage: Stage-3 Stats Work diff --git ql/src/test/results/clientpositive/llap/tez_join_result_complex.q.out ql/src/test/results/clientpositive/llap/tez_join_result_complex.q.out index c54a89bca3..ee9a5de0a2 100644 --- ql/src/test/results/clientpositive/llap/tez_join_result_complex.q.out +++ ql/src/test/results/clientpositive/llap/tez_join_result_complex.q.out @@ -381,13 +381,12 @@ STAGE PLANS: Dependency Collection Stage: Stage-4 - Create Table Operator: - Create Table - columns: contact_event_id string, ce_create_dt string, ce_end_dt string, contact_type string, cnctevs_cd string, contact_mode string, cntvnst_stts_cd string, total_transfers int, ce_notes array, svcrqst_id string, svcrqct_cds array, svcrtyp_cd string, cmpltyp_cd string, src string, cnctmd_cd string, notes array - input format: org.apache.hadoop.mapred.TextInputFormat - output format: org.apache.hadoop.hive.ql.io.IgnoreKeyTextOutputFormat - serde name: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.ct_events1_test + Create Table + columns: contact_event_id string, ce_create_dt string, ce_end_dt string, contact_type string, cnctevs_cd string, contact_mode string, cntvnst_stts_cd string, total_transfers int, ce_notes array, svcrqst_id string, svcrqct_cds array, svcrtyp_cd string, cmpltyp_cd string, src string, cnctmd_cd string, notes array + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.IgnoreKeyTextOutputFormat + serde name: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + name: default.ct_events1_test Stage: Stage-3 Stats Work @@ -1387,13 +1386,12 @@ STAGE PLANS: Dependency Collection Stage: Stage-4 - Create Table Operator: - Create Table - columns: contact_event_id string, ce_create_dt string, ce_end_dt string, contact_type string, cnctevs_cd string, contact_mode string, cntvnst_stts_cd string, total_transfers int, ce_notes array, svcrqst_id string, svcrqct_cds array, svcrtyp_cd string, cmpltyp_cd string, src string, cnctmd_cd string, notes array - input format: org.apache.hadoop.mapred.TextInputFormat - output format: org.apache.hadoop.hive.ql.io.IgnoreKeyTextOutputFormat - serde name: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.ct_events1_test + Create Table + columns: contact_event_id string, ce_create_dt string, ce_end_dt string, contact_type string, cnctevs_cd string, contact_mode string, cntvnst_stts_cd string, total_transfers int, ce_notes array, svcrqst_id string, svcrqct_cds array, svcrtyp_cd string, cmpltyp_cd string, src string, cnctmd_cd string, notes array + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.IgnoreKeyTextOutputFormat + serde name: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + name: default.ct_events1_test Stage: Stage-3 Stats Work diff --git ql/src/test/results/clientpositive/llap/unionDistinct_1.q.out ql/src/test/results/clientpositive/llap/unionDistinct_1.q.out index 7f2cc85392..ce5e63e9d9 100644 --- ql/src/test/results/clientpositive/llap/unionDistinct_1.q.out +++ ql/src/test/results/clientpositive/llap/unionDistinct_1.q.out @@ -6918,13 +6918,12 @@ STAGE PLANS: Dependency Collection Stage: Stage-4 - Create Table Operator: - Create Table - columns: counts bigint, key string, value string - input format: org.apache.hadoop.mapred.TextInputFormat - output format: org.apache.hadoop.hive.ql.io.IgnoreKeyTextOutputFormat - serde name: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.tmp_unionall_n0 + Create Table + columns: counts bigint, key string, value string + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.IgnoreKeyTextOutputFormat + serde name: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + name: default.tmp_unionall_n0 Stage: Stage-3 Stats Work diff --git ql/src/test/results/clientpositive/llap/union_top_level.q.out ql/src/test/results/clientpositive/llap/union_top_level.q.out index 8fc40fc9cd..0f97fc44d1 100644 --- ql/src/test/results/clientpositive/llap/union_top_level.q.out +++ ql/src/test/results/clientpositive/llap/union_top_level.q.out @@ -630,13 +630,12 @@ STAGE PLANS: Dependency Collection Stage: Stage-4 - Create Table Operator: - Create Table - columns: key string, value int - input format: org.apache.hadoop.mapred.TextInputFormat - output format: org.apache.hadoop.hive.ql.io.IgnoreKeyTextOutputFormat - serde name: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.union_top + Create Table + columns: key string, value int + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.IgnoreKeyTextOutputFormat + serde name: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + name: default.union_top Stage: Stage-3 Stats Work diff --git ql/src/test/results/clientpositive/llap/vector_char_varchar_1.q.out ql/src/test/results/clientpositive/llap/vector_char_varchar_1.q.out index e919a7087f..cb6ce791f9 100644 --- ql/src/test/results/clientpositive/llap/vector_char_varchar_1.q.out +++ ql/src/test/results/clientpositive/llap/vector_char_varchar_1.q.out @@ -121,13 +121,12 @@ STAGE PLANS: Dependency Collection Stage: Stage-4 - Create Table Operator: - Create Table - columns: _c0 int, _c1 string - input format: org.apache.hadoop.mapred.TextInputFormat - output format: org.apache.hadoop.hive.ql.io.IgnoreKeyTextOutputFormat - serde name: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.varchar_ctas_1 + Create Table + columns: _c0 int, _c1 string + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.IgnoreKeyTextOutputFormat + serde name: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + name: default.varchar_ctas_1 Stage: Stage-3 Stats Work @@ -294,13 +293,12 @@ STAGE PLANS: Dependency Collection Stage: Stage-4 - Create Table Operator: - Create Table - columns: _c0 int, _c1 string - input format: org.apache.hadoop.mapred.TextInputFormat - output format: org.apache.hadoop.hive.ql.io.IgnoreKeyTextOutputFormat - serde name: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.char_ctas_1 + Create Table + columns: _c0 int, _c1 string + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.IgnoreKeyTextOutputFormat + serde name: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + name: default.char_ctas_1 Stage: Stage-3 Stats Work diff --git ql/src/test/results/clientpositive/llap/vector_decimal_6.q.out ql/src/test/results/clientpositive/llap/vector_decimal_6.q.out index ad1757c1fc..b90e707748 100644 --- ql/src/test/results/clientpositive/llap/vector_decimal_6.q.out +++ ql/src/test/results/clientpositive/llap/vector_decimal_6.q.out @@ -754,13 +754,12 @@ STAGE PLANS: Dependency Collection Stage: Stage-4 - Create Table Operator: - Create Table - columns: k decimal(11,5), v int - input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat - output format: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat - serde name: org.apache.hadoop.hive.ql.io.orc.OrcSerde - name: default.DECIMAL_6_3 + Create Table + columns: k decimal(11,5), v int + input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat + output format: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat + serde name: org.apache.hadoop.hive.ql.io.orc.OrcSerde + name: default.DECIMAL_6_3 Stage: Stage-3 Stats Work diff --git ql/src/test/results/clientpositive/llap/vector_windowing_streaming.q.out ql/src/test/results/clientpositive/llap/vector_windowing_streaming.q.out index 41d12bae9e..e9544f3138 100644 --- ql/src/test/results/clientpositive/llap/vector_windowing_streaming.q.out +++ ql/src/test/results/clientpositive/llap/vector_windowing_streaming.q.out @@ -933,14 +933,13 @@ STAGE PLANS: Dependency Collection Stage: Stage-4 - Create Table Operator: - Create Table - columns: ctinyint tinyint, cdouble double, r int - field delimiter: , - input format: org.apache.hadoop.mapred.TextInputFormat - output format: org.apache.hadoop.hive.ql.io.IgnoreKeyTextOutputFormat - serde name: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.sD + Create Table + columns: ctinyint tinyint, cdouble double, r int + field delimiter: , + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.IgnoreKeyTextOutputFormat + serde name: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + name: default.sD Stage: Stage-3 Stats Work diff --git ql/src/test/results/clientpositive/llap/vectorized_dynamic_partition_pruning.q.out ql/src/test/results/clientpositive/llap/vectorized_dynamic_partition_pruning.q.out index 21d4e1b905..7a1ca6304a 100644 --- ql/src/test/results/clientpositive/llap/vectorized_dynamic_partition_pruning.q.out +++ ql/src/test/results/clientpositive/llap/vectorized_dynamic_partition_pruning.q.out @@ -165,13 +165,12 @@ STAGE PLANS: Dependency Collection Stage: Stage-4 - Create Table Operator: - Create Table - columns: ds string, date string - input format: org.apache.hadoop.mapred.TextInputFormat - output format: org.apache.hadoop.hive.ql.io.IgnoreKeyTextOutputFormat - serde name: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.srcpart_date_n8 + Create Table + columns: ds string, date string + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.IgnoreKeyTextOutputFormat + serde name: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + name: default.srcpart_date_n8 Stage: Stage-3 Stats Work diff --git ql/src/test/results/clientpositive/merge3.q.out ql/src/test/results/clientpositive/merge3.q.out index 00280e7f49..644f9263e1 100644 --- ql/src/test/results/clientpositive/merge3.q.out +++ ql/src/test/results/clientpositive/merge3.q.out @@ -224,13 +224,12 @@ STAGE PLANS: #### A masked pattern was here #### Stage: Stage-8 - Create Table Operator: - Create Table - columns: key string, value string - input format: org.apache.hadoop.mapred.TextInputFormat - output format: org.apache.hadoop.hive.ql.io.IgnoreKeyTextOutputFormat - serde name: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.merge_src2 + Create Table + columns: key string, value string + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.IgnoreKeyTextOutputFormat + serde name: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + name: default.merge_src2 Stage: Stage-2 Stats Work diff --git ql/src/test/results/clientpositive/nonReservedKeyWords.q.out ql/src/test/results/clientpositive/nonReservedKeyWords.q.out index 7d27c348f3..cec82b8784 100644 --- ql/src/test/results/clientpositive/nonReservedKeyWords.q.out +++ ql/src/test/results/clientpositive/nonReservedKeyWords.q.out @@ -11,13 +11,12 @@ STAGE DEPENDENCIES: STAGE PLANS: Stage: Stage-0 - Create Table Operator: - Create Table - columns: col string - input format: org.apache.hadoop.mapred.TextInputFormat - output format: org.apache.hadoop.hive.ql.io.IgnoreKeyTextOutputFormat - serde name: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.CACHE + Create Table + columns: col string + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.IgnoreKeyTextOutputFormat + serde name: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + name: default.CACHE PREHOOK: query: explain CREATE TABLE DAYOFWEEK (col STRING) PREHOOK: type: CREATETABLE @@ -32,13 +31,12 @@ STAGE DEPENDENCIES: STAGE PLANS: Stage: Stage-0 - Create Table Operator: - Create Table - columns: col string - input format: org.apache.hadoop.mapred.TextInputFormat - output format: org.apache.hadoop.hive.ql.io.IgnoreKeyTextOutputFormat - serde name: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.DAYOFWEEK + Create Table + columns: col string + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.IgnoreKeyTextOutputFormat + serde name: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + name: default.DAYOFWEEK PREHOOK: query: explain CREATE TABLE VIEWS (col STRING) PREHOOK: type: CREATETABLE @@ -53,11 +51,10 @@ STAGE DEPENDENCIES: STAGE PLANS: Stage: Stage-0 - Create Table Operator: - Create Table - columns: col string - input format: org.apache.hadoop.mapred.TextInputFormat - output format: org.apache.hadoop.hive.ql.io.IgnoreKeyTextOutputFormat - serde name: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.VIEWS + Create Table + columns: col string + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.IgnoreKeyTextOutputFormat + serde name: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + name: default.VIEWS diff --git ql/src/test/results/clientpositive/nonmr_fetch.q.out ql/src/test/results/clientpositive/nonmr_fetch.q.out index 6d439afdf5..64875f4059 100644 --- ql/src/test/results/clientpositive/nonmr_fetch.q.out +++ ql/src/test/results/clientpositive/nonmr_fetch.q.out @@ -1363,13 +1363,12 @@ STAGE PLANS: #### A masked pattern was here #### Stage: Stage-4 - Create Table Operator: - Create Table - columns: key string, value string - input format: org.apache.hadoop.mapred.TextInputFormat - output format: org.apache.hadoop.hive.ql.io.IgnoreKeyTextOutputFormat - serde name: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.srcx + Create Table + columns: key string, value string + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.IgnoreKeyTextOutputFormat + serde name: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + name: default.srcx Stage: Stage-2 Stats Work diff --git ql/src/test/results/clientpositive/nullformat.q.out ql/src/test/results/clientpositive/nullformat.q.out index d14c570285..9bb52f1391 100644 --- ql/src/test/results/clientpositive/nullformat.q.out +++ ql/src/test/results/clientpositive/nullformat.q.out @@ -47,13 +47,12 @@ STAGE DEPENDENCIES: STAGE PLANS: Stage: Stage-0 - Create Table Operator: - Create Table - columns: a string, b string - input format: org.apache.hadoop.mapred.TextInputFormat - output format: org.apache.hadoop.hive.ql.io.IgnoreKeyTextOutputFormat - serde name: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.null_tab1 + Create Table + columns: a string, b string + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.IgnoreKeyTextOutputFormat + serde name: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + name: default.null_tab1 PREHOOK: query: CREATE TABLE null_tab1(a STRING, b STRING) ROW FORMAT DELIMITED NULL DEFINED AS 'fooNull' PREHOOK: type: CREATETABLE diff --git ql/src/test/results/clientpositive/nullformatCTAS.q.out ql/src/test/results/clientpositive/nullformatCTAS.q.out index 1d9edbcb41..974257935c 100644 --- ql/src/test/results/clientpositive/nullformatCTAS.q.out +++ ql/src/test/results/clientpositive/nullformatCTAS.q.out @@ -119,13 +119,12 @@ STAGE PLANS: #### A masked pattern was here #### Stage: Stage-8 - Create Table Operator: - Create Table - columns: a string, b string - input format: org.apache.hadoop.mapred.TextInputFormat - output format: org.apache.hadoop.hive.ql.io.IgnoreKeyTextOutputFormat - serde name: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.null_tab3 + Create Table + columns: a string, b string + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.IgnoreKeyTextOutputFormat + serde name: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + name: default.null_tab3 Stage: Stage-2 Stats Work diff --git ql/src/test/results/clientpositive/orc_createas1.q.out ql/src/test/results/clientpositive/orc_createas1.q.out index aa6ba1b687..b5eeed193a 100644 --- ql/src/test/results/clientpositive/orc_createas1.q.out +++ ql/src/test/results/clientpositive/orc_createas1.q.out @@ -131,13 +131,12 @@ STAGE PLANS: #### A masked pattern was here #### Stage: Stage-8 - Create Table Operator: - Create Table - columns: key string, value string - input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat - output format: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat - serde name: org.apache.hadoop.hive.ql.io.orc.OrcSerde - name: default.orc_createas1b + Create Table + columns: key string, value string + input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat + output format: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat + serde name: org.apache.hadoop.hive.ql.io.orc.OrcSerde + name: default.orc_createas1b Stage: Stage-2 Stats Work @@ -342,13 +341,12 @@ STAGE PLANS: #### A masked pattern was here #### Stage: Stage-8 - Create Table Operator: - Create Table - columns: key int, value string, part int - input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat - output format: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat - serde name: org.apache.hadoop.hive.ql.io.orc.OrcSerde - name: default.orc_createas1c + Create Table + columns: key int, value string, part int + input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat + output format: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat + serde name: org.apache.hadoop.hive.ql.io.orc.OrcSerde + name: default.orc_createas1c Stage: Stage-2 Stats Work diff --git ql/src/test/results/clientpositive/parallel_orderby.q.out ql/src/test/results/clientpositive/parallel_orderby.q.out index 158e6ee749..9bd5b73b2a 100644 --- ql/src/test/results/clientpositive/parallel_orderby.q.out +++ ql/src/test/results/clientpositive/parallel_orderby.q.out @@ -97,13 +97,12 @@ STAGE PLANS: #### A masked pattern was here #### Stage: Stage-3 - Create Table Operator: - Create Table - columns: key string, value string - input format: org.apache.hadoop.mapred.TextInputFormat - output format: org.apache.hadoop.hive.ql.io.IgnoreKeyTextOutputFormat - serde name: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.total_ordered + Create Table + columns: key string, value string + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.IgnoreKeyTextOutputFormat + serde name: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + name: default.total_ordered Stage: Stage-2 Stats Work diff --git ql/src/test/results/clientpositive/serde_opencsv.q.out ql/src/test/results/clientpositive/serde_opencsv.q.out index 1f80eeb4ff..7ce4ca2d1d 100644 --- ql/src/test/results/clientpositive/serde_opencsv.q.out +++ ql/src/test/results/clientpositive/serde_opencsv.q.out @@ -41,17 +41,16 @@ STAGE DEPENDENCIES: STAGE PLANS: Stage: Stage-0 - Create Table Operator: - Create Table - columns: words string, int1 int, tinyint1 tinyint, smallint1 smallint, bigint1 bigint, boolean1 boolean, float1 float, double1 double - input format: org.apache.hadoop.mapred.TextInputFormat - output format: org.apache.hadoop.hive.ql.io.IgnoreKeyTextOutputFormat - serde name: org.apache.hadoop.hive.serde2.OpenCSVSerde - serde properties: - escapeChar \ - quoteChar ' - separatorChar , - name: default.serde_opencsv + Create Table + columns: words string, int1 int, tinyint1 tinyint, smallint1 smallint, bigint1 bigint, boolean1 boolean, float1 float, double1 double + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.IgnoreKeyTextOutputFormat + serde name: org.apache.hadoop.hive.serde2.OpenCSVSerde + serde properties: + escapeChar \ + quoteChar ' + separatorChar , + name: default.serde_opencsv PREHOOK: query: CREATE TABLE serde_opencsv( words STRING, diff --git ql/src/test/results/clientpositive/serde_regex.q.out ql/src/test/results/clientpositive/serde_regex.q.out index 1d00a498e3..9fe26f079d 100644 --- ql/src/test/results/clientpositive/serde_regex.q.out +++ ql/src/test/results/clientpositive/serde_regex.q.out @@ -41,15 +41,14 @@ STAGE DEPENDENCIES: STAGE PLANS: Stage: Stage-0 - Create Table Operator: - Create Table - columns: host string, identity string, user string, time string, request string, status string, size int, referer string, agent string - input format: org.apache.hadoop.mapred.TextInputFormat - output format: org.apache.hadoop.hive.ql.io.IgnoreKeyTextOutputFormat - serde name: org.apache.hadoop.hive.serde2.RegexSerDe - serde properties: - input.regex ([^ ]*) ([^ ]*) ([^ ]*) (-|\[[^\]]*\]) ([^ "]*|"[^"]*") (-|[0-9]*) (-|[0-9]*)(?: ([^ "]*|"[^"]*") ([^ "]*|"[^"]*"))? - name: default.serde_regex + Create Table + columns: host string, identity string, user string, time string, request string, status string, size int, referer string, agent string + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.IgnoreKeyTextOutputFormat + serde name: org.apache.hadoop.hive.serde2.RegexSerDe + serde properties: + input.regex ([^ ]*) ([^ ]*) ([^ ]*) (-|\[[^\]]*\]) ([^ "]*|"[^"]*") (-|[0-9]*) (-|[0-9]*)(?: ([^ "]*|"[^"]*") ([^ "]*|"[^"]*"))? + name: default.serde_regex PREHOOK: query: CREATE TABLE serde_regex( host STRING, @@ -160,15 +159,14 @@ STAGE DEPENDENCIES: STAGE PLANS: Stage: Stage-0 - Create Table Operator: - Create Table - columns: key decimal(38,18), value int - input format: org.apache.hadoop.mapred.TextInputFormat - output format: org.apache.hadoop.hive.ql.io.IgnoreKeyTextOutputFormat - serde name: org.apache.hadoop.hive.serde2.RegexSerDe - serde properties: - input.regex ([^ ]*) ([^ ]*) - name: default.serde_regex1 + Create Table + columns: key decimal(38,18), value int + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.IgnoreKeyTextOutputFormat + serde name: org.apache.hadoop.hive.serde2.RegexSerDe + serde properties: + input.regex ([^ ]*) ([^ ]*) + name: default.serde_regex1 PREHOOK: query: CREATE TABLE serde_regex1( key decimal(38,18), diff --git ql/src/test/results/clientpositive/show_tables.q.out ql/src/test/results/clientpositive/show_tables.q.out index 18d7c599cf..82cc06f8b8 100644 --- ql/src/test/results/clientpositive/show_tables.q.out +++ ql/src/test/results/clientpositive/show_tables.q.out @@ -28,10 +28,9 @@ STAGE DEPENDENCIES: STAGE PLANS: Stage: Stage-0 - Show Table Operator: - Show Tables - database name: default - pattern: shtb_* + Show Tables + database name: default + pattern: shtb_* Stage: Stage-1 Fetch Operator @@ -61,10 +60,9 @@ STAGE DEPENDENCIES: STAGE PLANS: Stage: Stage-0 - Show Table Operator: - Show Tables - database name: default - pattern: shtb_test1_n0|shtb_test2_n0 + Show Tables + database name: default + pattern: shtb_test1_n0|shtb_test2_n0 Stage: Stage-1 Fetch Operator diff --git ql/src/test/results/clientpositive/show_tablestatus.q.out ql/src/test/results/clientpositive/show_tablestatus.q.out index 3fbc93f13f..f875778430 100644 --- ql/src/test/results/clientpositive/show_tablestatus.q.out +++ ql/src/test/results/clientpositive/show_tablestatus.q.out @@ -10,6 +10,9 @@ STAGE DEPENDENCIES: STAGE PLANS: Stage: Stage-0 + Show Table Status + database: default + pattern: src Stage: Stage-1 Fetch Operator diff --git ql/src/test/results/clientpositive/skewjoin_noskew.q.out ql/src/test/results/clientpositive/skewjoin_noskew.q.out index f22cee003f..ab02b5c001 100644 --- ql/src/test/results/clientpositive/skewjoin_noskew.q.out +++ ql/src/test/results/clientpositive/skewjoin_noskew.q.out @@ -162,13 +162,12 @@ STAGE PLANS: #### A masked pattern was here #### Stage: Stage-9 - Create Table Operator: - Create Table - columns: key string, value string - input format: org.apache.hadoop.mapred.TextInputFormat - output format: org.apache.hadoop.hive.ql.io.IgnoreKeyTextOutputFormat - serde name: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.noskew + Create Table + columns: key string, value string + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.IgnoreKeyTextOutputFormat + serde name: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + name: default.noskew Stage: Stage-3 Stats Work diff --git ql/src/test/results/clientpositive/skewjoin_onesideskew.q.out ql/src/test/results/clientpositive/skewjoin_onesideskew.q.out index 28b7c8bd4b..e55c9f172d 100644 --- ql/src/test/results/clientpositive/skewjoin_onesideskew.q.out +++ ql/src/test/results/clientpositive/skewjoin_onesideskew.q.out @@ -228,13 +228,12 @@ STAGE PLANS: #### A masked pattern was here #### Stage: Stage-8 - Create Table Operator: - Create Table - columns: key string, value string - input format: org.apache.hadoop.mapred.TextInputFormat - output format: org.apache.hadoop.hive.ql.io.IgnoreKeyTextOutputFormat - serde name: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.result_n1 + Create Table + columns: key string, value string + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.IgnoreKeyTextOutputFormat + serde name: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + name: default.result_n1 Stage: Stage-2 Stats Work diff --git ql/src/test/results/clientpositive/smb_mapjoin9.q.out ql/src/test/results/clientpositive/smb_mapjoin9.q.out index cc78847174..f286fbb530 100644 --- ql/src/test/results/clientpositive/smb_mapjoin9.q.out +++ ql/src/test/results/clientpositive/smb_mapjoin9.q.out @@ -319,13 +319,12 @@ STAGE PLANS: #### A masked pattern was here #### Stage: Stage-9 - Create Table Operator: - Create Table - columns: k1 int, value string, ds string, k2 int - input format: org.apache.hadoop.mapred.TextInputFormat - output format: org.apache.hadoop.hive.ql.io.IgnoreKeyTextOutputFormat - serde name: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.smb_mapjoin9_results + Create Table + columns: k1 int, value string, ds string, k2 int + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.IgnoreKeyTextOutputFormat + serde name: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + name: default.smb_mapjoin9_results Stage: Stage-2 Stats Work diff --git ql/src/test/results/clientpositive/spark/ctas.q.out ql/src/test/results/clientpositive/spark/ctas.q.out index 907ace2c1a..b1fa6ecc1b 100644 --- ql/src/test/results/clientpositive/spark/ctas.q.out +++ ql/src/test/results/clientpositive/spark/ctas.q.out @@ -94,13 +94,12 @@ STAGE PLANS: #### A masked pattern was here #### Stage: Stage-3 - Create Table Operator: - Create Table - columns: k string, value string - input format: org.apache.hadoop.mapred.TextInputFormat - output format: org.apache.hadoop.hive.ql.io.IgnoreKeyTextOutputFormat - serde name: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.nzhang_CTAS1 + Create Table + columns: k string, value string + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.IgnoreKeyTextOutputFormat + serde name: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + name: default.nzhang_CTAS1 Stage: Stage-2 Stats Work @@ -251,13 +250,12 @@ STAGE PLANS: #### A masked pattern was here #### Stage: Stage-3 - Create Table Operator: - Create Table - columns: key string, value string - input format: org.apache.hadoop.mapred.TextInputFormat - output format: org.apache.hadoop.hive.ql.io.IgnoreKeyTextOutputFormat - serde name: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.nzhang_ctas2 + Create Table + columns: key string, value string + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.IgnoreKeyTextOutputFormat + serde name: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + name: default.nzhang_ctas2 Stage: Stage-2 Stats Work @@ -408,13 +406,12 @@ STAGE PLANS: #### A masked pattern was here #### Stage: Stage-3 - Create Table Operator: - Create Table - columns: half_key double, conb string - input format: org.apache.hadoop.hive.ql.io.RCFileInputFormat - output format: org.apache.hadoop.hive.ql.io.RCFileOutputFormat - serde name: org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe - name: default.nzhang_ctas3 + Create Table + columns: half_key double, conb string + input format: org.apache.hadoop.hive.ql.io.RCFileInputFormat + output format: org.apache.hadoop.hive.ql.io.RCFileOutputFormat + serde name: org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe + name: default.nzhang_ctas3 Stage: Stage-2 Stats Work @@ -629,14 +626,13 @@ STAGE PLANS: #### A masked pattern was here #### Stage: Stage-3 - Create Table Operator: - Create Table - columns: key string, value string - field delimiter: , - input format: org.apache.hadoop.mapred.TextInputFormat - output format: org.apache.hadoop.hive.ql.io.IgnoreKeyTextOutputFormat - serde name: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.nzhang_ctas4 + Create Table + columns: key string, value string + field delimiter: , + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.IgnoreKeyTextOutputFormat + serde name: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + name: default.nzhang_ctas4 Stage: Stage-2 Stats Work @@ -788,16 +784,15 @@ STAGE PLANS: #### A masked pattern was here #### Stage: Stage-3 - Create Table Operator: - Create Table - columns: key string, value string - field delimiter: , - input format: org.apache.hadoop.mapred.TextInputFormat - line delimiter: + Create Table + columns: key string, value string + field delimiter: , + input format: org.apache.hadoop.mapred.TextInputFormat + line delimiter: - output format: org.apache.hadoop.hive.ql.io.IgnoreKeyTextOutputFormat - serde name: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.nzhang_ctas5 + output format: org.apache.hadoop.hive.ql.io.IgnoreKeyTextOutputFormat + serde name: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + name: default.nzhang_ctas5 Stage: Stage-2 Stats Work diff --git ql/src/test/results/clientpositive/spark/parallel_orderby.q.out ql/src/test/results/clientpositive/spark/parallel_orderby.q.out index edb7793bac..aba4911144 100644 --- ql/src/test/results/clientpositive/spark/parallel_orderby.q.out +++ ql/src/test/results/clientpositive/spark/parallel_orderby.q.out @@ -84,13 +84,12 @@ STAGE PLANS: destination: hdfs://### HDFS PATH ### Stage: Stage-3 - Create Table Operator: - Create Table - columns: key string, value string - input format: org.apache.hadoop.mapred.TextInputFormat - output format: org.apache.hadoop.hive.ql.io.IgnoreKeyTextOutputFormat - serde name: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.total_ordered + Create Table + columns: key string, value string + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.IgnoreKeyTextOutputFormat + serde name: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + name: default.total_ordered Stage: Stage-2 Stats Work diff --git ql/src/test/results/clientpositive/spark/skewjoin_noskew.q.out ql/src/test/results/clientpositive/spark/skewjoin_noskew.q.out index 7afa720626..a92a5839c3 100644 --- ql/src/test/results/clientpositive/spark/skewjoin_noskew.q.out +++ ql/src/test/results/clientpositive/spark/skewjoin_noskew.q.out @@ -166,13 +166,12 @@ STAGE PLANS: #### A masked pattern was here #### Stage: Stage-7 - Create Table Operator: - Create Table - columns: key string, value string - input format: org.apache.hadoop.mapred.TextInputFormat - output format: org.apache.hadoop.hive.ql.io.IgnoreKeyTextOutputFormat - serde name: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.noskew + Create Table + columns: key string, value string + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.IgnoreKeyTextOutputFormat + serde name: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + name: default.noskew Stage: Stage-2 Stats Work diff --git ql/src/test/results/clientpositive/spark/spark_dynamic_partition_pruning.q.out ql/src/test/results/clientpositive/spark/spark_dynamic_partition_pruning.q.out index d3c55a36a7..fdb15d0af3 100644 --- ql/src/test/results/clientpositive/spark/spark_dynamic_partition_pruning.q.out +++ ql/src/test/results/clientpositive/spark/spark_dynamic_partition_pruning.q.out @@ -111,13 +111,12 @@ STAGE PLANS: destination: hdfs://### HDFS PATH ### Stage: Stage-3 - Create Table Operator: - Create Table - columns: ds string, date string - input format: org.apache.hadoop.mapred.TextInputFormat - output format: org.apache.hadoop.hive.ql.io.IgnoreKeyTextOutputFormat - serde name: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.srcpart_date_n4 + Create Table + columns: ds string, date string + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.IgnoreKeyTextOutputFormat + serde name: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + name: default.srcpart_date_n4 Stage: Stage-2 Stats Work diff --git ql/src/test/results/clientpositive/spark/spark_explainuser_1.q.out ql/src/test/results/clientpositive/spark/spark_explainuser_1.q.out index 0422dc21f4..b8403f4e39 100644 --- ql/src/test/results/clientpositive/spark/spark_explainuser_1.q.out +++ ql/src/test/results/clientpositive/spark/spark_explainuser_1.q.out @@ -7,8 +7,7 @@ POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@src_orc_merge_test_part_n0 Stage-0 - Create Table Operator: - name:default.src_orc_merge_test_part_n0 + Create Table{"name:":"default.src_orc_merge_test_part_n0"} PREHOOK: query: create table src_orc_merge_test_part_n0(key int, value string) partitioned by (ds string, ts string) stored as orc PREHOOK: type: CREATETABLE @@ -3248,8 +3247,7 @@ POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@abcd_n0 Stage-0 - Create Table Operator: - name:default.abcd_n0 + Create Table{"name:":"default.abcd_n0"} PREHOOK: query: create table abcd_n0 (a int, b int, c int, d int) PREHOOK: type: CREATETABLE @@ -3336,8 +3334,7 @@ POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@src_rc_merge_test Stage-0 - Create Table Operator: - name:default.src_rc_merge_test + Create Table{"name:":"default.src_rc_merge_test"} PREHOOK: query: create table src_rc_merge_test(key int, value string) stored as rcfile PREHOOK: type: CREATETABLE @@ -3364,8 +3361,7 @@ POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@tgt_rc_merge_test Stage-0 - Create Table Operator: - name:default.tgt_rc_merge_test + Create Table{"name:":"default.tgt_rc_merge_test"} PREHOOK: query: create table tgt_rc_merge_test(key int, value string) stored as rcfile PREHOOK: type: CREATETABLE @@ -3597,8 +3593,7 @@ POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@nzhang_Tmp_n0 Stage-0 - Create Table Operator: - name:default.nzhang_Tmp_n0 + Create Table{"name:":"default.nzhang_Tmp_n0"} PREHOOK: query: create table nzhang_Tmp_n0(a int, b string) PREHOOK: type: CREATETABLE @@ -3627,8 +3622,7 @@ Reducer 3 <- Reducer 2 (PARTITION-LEVEL SORT) Stage-2 Stats Work{} Stage-3 - Create Table Operator: - name:default.nzhang_CTAS1_n0 + Create Table{"name:":"default.nzhang_CTAS1_n0"} Stage-0 Move Operator Stage-1 @@ -3683,8 +3677,7 @@ Reducer 3 <- Reducer 2 (PARTITION-LEVEL SORT) Stage-2 Stats Work{} Stage-3 - Create Table Operator: - name:default.nzhang_ctas3_n0 + Create Table{"name:":"default.nzhang_ctas3_n0"} Stage-0 Move Operator Stage-1 @@ -3738,8 +3731,7 @@ POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@acid_dtt Stage-0 - Create Table Operator: - name:default.acid_dtt + Create Table{"name:":"default.acid_dtt"} PREHOOK: query: create temporary table acid_dtt(a int, b varchar(128)) clustered by (a) into 2 buckets stored as orc TBLPROPERTIES ('transactional'='true') PREHOOK: type: CREATETABLE diff --git ql/src/test/results/clientpositive/spark/spark_vectorized_dynamic_partition_pruning.q.out ql/src/test/results/clientpositive/spark/spark_vectorized_dynamic_partition_pruning.q.out index 6fbab4641d..495e9dc9ef 100644 --- ql/src/test/results/clientpositive/spark/spark_vectorized_dynamic_partition_pruning.q.out +++ ql/src/test/results/clientpositive/spark/spark_vectorized_dynamic_partition_pruning.q.out @@ -179,13 +179,12 @@ STAGE PLANS: destination: hdfs://### HDFS PATH ### Stage: Stage-3 - Create Table Operator: - Create Table - columns: ds string, date string - input format: org.apache.hadoop.mapred.TextInputFormat - output format: org.apache.hadoop.hive.ql.io.IgnoreKeyTextOutputFormat - serde name: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.srcpart_date_n0 + Create Table + columns: ds string, date string + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.IgnoreKeyTextOutputFormat + serde name: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + name: default.srcpart_date_n0 Stage: Stage-2 Stats Work diff --git ql/src/test/results/clientpositive/spark/temp_table.q.out ql/src/test/results/clientpositive/spark/temp_table.q.out index 8ec449b9c3..e1c91ea5a1 100644 --- ql/src/test/results/clientpositive/spark/temp_table.q.out +++ ql/src/test/results/clientpositive/spark/temp_table.q.out @@ -49,15 +49,14 @@ STAGE PLANS: #### A masked pattern was here #### Stage: Stage-3 - Create Table Operator: - Create Table - columns: key string, value string - input format: org.apache.hadoop.mapred.TextInputFormat -#### A masked pattern was here #### - output format: org.apache.hadoop.hive.ql.io.IgnoreKeyTextOutputFormat - serde name: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.foo - isTemporary: true + Create Table + columns: key string, value string + input format: org.apache.hadoop.mapred.TextInputFormat +#### A masked pattern was here #### + output format: org.apache.hadoop.hive.ql.io.IgnoreKeyTextOutputFormat + serde name: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + name: default.foo + isTemporary: true Stage: Stage-2 Stats Work @@ -124,15 +123,14 @@ STAGE PLANS: #### A masked pattern was here #### Stage: Stage-3 - Create Table Operator: - Create Table - columns: key string, value string - input format: org.apache.hadoop.mapred.TextInputFormat -#### A masked pattern was here #### - output format: org.apache.hadoop.hive.ql.io.IgnoreKeyTextOutputFormat - serde name: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.bar - isTemporary: true + Create Table + columns: key string, value string + input format: org.apache.hadoop.mapred.TextInputFormat +#### A masked pattern was here #### + output format: org.apache.hadoop.hive.ql.io.IgnoreKeyTextOutputFormat + serde name: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + name: default.bar + isTemporary: true Stage: Stage-2 Stats Work diff --git ql/src/test/results/clientpositive/spark/union25.q.out ql/src/test/results/clientpositive/spark/union25.q.out index 6c80376bd0..dc6035128b 100644 --- ql/src/test/results/clientpositive/spark/union25.q.out +++ ql/src/test/results/clientpositive/spark/union25.q.out @@ -200,13 +200,12 @@ STAGE PLANS: #### A masked pattern was here #### Stage: Stage-3 - Create Table Operator: - Create Table - columns: counts bigint, key string, value string - input format: org.apache.hadoop.mapred.TextInputFormat - output format: org.apache.hadoop.hive.ql.io.IgnoreKeyTextOutputFormat - serde name: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.tmp_unionall + Create Table + columns: counts bigint, key string, value string + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.IgnoreKeyTextOutputFormat + serde name: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + name: default.tmp_unionall Stage: Stage-2 Stats Work diff --git ql/src/test/results/clientpositive/spark/union_top_level.q.out ql/src/test/results/clientpositive/spark/union_top_level.q.out index 51fae271ff..06f1bae0bd 100644 --- ql/src/test/results/clientpositive/spark/union_top_level.q.out +++ ql/src/test/results/clientpositive/spark/union_top_level.q.out @@ -526,13 +526,12 @@ STAGE PLANS: #### A masked pattern was here #### Stage: Stage-3 - Create Table Operator: - Create Table - columns: key string, value int - input format: org.apache.hadoop.mapred.TextInputFormat - output format: org.apache.hadoop.hive.ql.io.IgnoreKeyTextOutputFormat - serde name: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.union_top + Create Table + columns: key string, value int + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.IgnoreKeyTextOutputFormat + serde name: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + name: default.union_top Stage: Stage-2 Stats Work diff --git ql/src/test/results/clientpositive/symlink_text_input_format.q.out ql/src/test/results/clientpositive/symlink_text_input_format.q.out index fd8800c964..2ac2e0d3c6 100644 --- ql/src/test/results/clientpositive/symlink_text_input_format.q.out +++ ql/src/test/results/clientpositive/symlink_text_input_format.q.out @@ -17,12 +17,11 @@ STAGE DEPENDENCIES: STAGE PLANS: Stage: Stage-0 - Create Table Operator: - Create Table - columns: key string, value string - input format: org.apache.hadoop.hive.ql.io.SymlinkTextInputFormat - output format: org.apache.hadoop.hive.ql.io.IgnoreKeyTextOutputFormat - name: default.symlink_text_input_format + Create Table + columns: key string, value string + input format: org.apache.hadoop.hive.ql.io.SymlinkTextInputFormat + output format: org.apache.hadoop.hive.ql.io.IgnoreKeyTextOutputFormat + name: default.symlink_text_input_format PREHOOK: query: CREATE TABLE symlink_text_input_format (key STRING, value STRING) STORED AS INPUTFORMAT 'org.apache.hadoop.hive.ql.io.SymlinkTextInputFormat' OUTPUTFORMAT 'org.apache.hadoop.hive.ql.io.IgnoreKeyTextOutputFormat' PREHOOK: type: CREATETABLE diff --git ql/src/test/results/clientpositive/temp_table_truncate.q.out ql/src/test/results/clientpositive/temp_table_truncate.q.out index ba7133b45a..7c470f4414 100644 --- ql/src/test/results/clientpositive/temp_table_truncate.q.out +++ ql/src/test/results/clientpositive/temp_table_truncate.q.out @@ -81,9 +81,8 @@ STAGE DEPENDENCIES: STAGE PLANS: Stage: Stage-0 - Truncate Table Operator: - Truncate Table or Partition - TableName: tmp_src + Truncate Table or Partition + TableName: tmp_src PREHOOK: query: TRUNCATE TABLE tmp_src PREHOOK: type: TRUNCATETABLE @@ -111,9 +110,8 @@ STAGE DEPENDENCIES: STAGE PLANS: Stage: Stage-0 - Truncate Table Operator: - Truncate Table or Partition - TableName: tmp_srcpart + Truncate Table or Partition + TableName: tmp_srcpart PREHOOK: query: TRUNCATE TABLE tmp_srcpart PREHOOK: type: TRUNCATETABLE diff --git ql/src/test/results/clientpositive/tez/explainanalyze_1.q.out ql/src/test/results/clientpositive/tez/explainanalyze_1.q.out index 77395ade1d..38da086cac 100644 --- ql/src/test/results/clientpositive/tez/explainanalyze_1.q.out +++ ql/src/test/results/clientpositive/tez/explainanalyze_1.q.out @@ -96,8 +96,7 @@ Reducer 2 <- Map 1 (CUSTOM_SIMPLE_EDGE) Stage-3 Stats Work{} Stage-4 - Create Table Operator: - name:default.t_n28 + Create Table{"name:":"default.t_n28"} Stage-0 Move Operator Stage-1 diff --git ql/src/test/results/clientpositive/tez/explainanalyze_3.q.out ql/src/test/results/clientpositive/tez/explainanalyze_3.q.out index 4d34541689..eca2bf9e09 100644 --- ql/src/test/results/clientpositive/tez/explainanalyze_3.q.out +++ ql/src/test/results/clientpositive/tez/explainanalyze_3.q.out @@ -74,8 +74,7 @@ Stage-1 Fetch Operator limit:-1 Stage-0 - Show Table Operator: - database name:default + Show Tables{"database name:":"default"} #### A masked pattern was here #### PREHOOK: type: CREATEDATABASE @@ -94,6 +93,7 @@ POSTHOOK: type: CREATEDATABASE POSTHOOK: Output: database:newDB POSTHOOK: Output: hdfs://### HDFS PATH ### Stage-0 + Create Database{"name:":"newDB"} #### A masked pattern was here #### PREHOOK: type: CREATEDATABASE @@ -119,6 +119,7 @@ Stage-1 Fetch Operator limit:-1 Stage-0 + Describe Database{"database:":"newDB"} PREHOOK: query: describe database extended newDB PREHOOK: type: DESCDATABASE @@ -140,6 +141,7 @@ POSTHOOK: query: explain analyze use newDB POSTHOOK: type: SWITCHDATABASE POSTHOOK: Input: database:newdb Stage-0 + Switch Database{"name:":"newDB"} PREHOOK: query: use newDB PREHOOK: type: SWITCHDATABASE @@ -192,8 +194,7 @@ POSTHOOK: type: DROPTABLE POSTHOOK: Input: newdb@tab_n2 POSTHOOK: Output: newdb@tab_n2 Stage-0 - Drop Table Operator: - table:tab_n2 + Drop Table{"table:":"tab_n2"} PREHOOK: query: drop table tab_n2 PREHOOK: type: DROPTABLE @@ -216,6 +217,7 @@ POSTHOOK: query: explain analyze use default POSTHOOK: type: SWITCHDATABASE POSTHOOK: Input: database:default Stage-0 + Switch Database{"name:":"default"} PREHOOK: query: use default PREHOOK: type: SWITCHDATABASE @@ -404,8 +406,7 @@ Reducer 2 <- Map 1 (CUSTOM_SIMPLE_EDGE) Stage-3 Stats Work{} Stage-4 - Create Table Operator: - name:default.src_autho_test_n4 + Create Table{"name:":"default.src_autho_test_n4"} Stage-0 Move Operator Stage-1 @@ -632,8 +633,7 @@ PREHOOK: type: DROPVIEW POSTHOOK: query: explain analyze drop view v_n5 POSTHOOK: type: DROPVIEW Stage-0 - Drop Table Operator: - table:v_n5 + Drop Table{"table:":"v_n5"} PREHOOK: query: create view v_n5 as with cte as (select * from src order by key limit 5) select * from cte diff --git ql/src/test/results/clientpositive/tez/explainuser_3.q.out ql/src/test/results/clientpositive/tez/explainuser_3.q.out index 2b2027c754..3cb41ad1f7 100644 --- ql/src/test/results/clientpositive/tez/explainuser_3.q.out +++ ql/src/test/results/clientpositive/tez/explainuser_3.q.out @@ -106,8 +106,7 @@ Stage-1 Fetch Operator limit:-1 Stage-0 - Show Table Operator: - database name:default + Show Tables{"database name:":"default"} #### A masked pattern was here #### PREHOOK: type: CREATEDATABASE @@ -118,6 +117,7 @@ POSTHOOK: type: CREATEDATABASE POSTHOOK: Output: database:newDB POSTHOOK: Output: hdfs://### HDFS PATH ### Stage-0 + Create Database{"name:":"newDB"} #### A masked pattern was here #### PREHOOK: type: CREATEDATABASE @@ -137,6 +137,7 @@ Stage-1 Fetch Operator limit:-1 Stage-0 + Describe Database{"database:":"newDB"} PREHOOK: query: describe database extended newDB PREHOOK: type: DESCDATABASE @@ -152,6 +153,7 @@ POSTHOOK: query: explain use newDB POSTHOOK: type: SWITCHDATABASE POSTHOOK: Input: database:newdb Stage-0 + Switch Database{"name:":"newDB"} PREHOOK: query: use newDB PREHOOK: type: SWITCHDATABASE @@ -188,8 +190,7 @@ POSTHOOK: type: DROPTABLE POSTHOOK: Input: newdb@tab_n1 POSTHOOK: Output: newdb@tab_n1 Stage-0 - Drop Table Operator: - table:tab_n1 + Drop Table{"table:":"tab_n1"} PREHOOK: query: drop table tab_n1 PREHOOK: type: DROPTABLE @@ -206,6 +207,7 @@ POSTHOOK: query: explain use default POSTHOOK: type: SWITCHDATABASE POSTHOOK: Input: database:default Stage-0 + Switch Database{"name:":"default"} PREHOOK: query: use default PREHOOK: type: SWITCHDATABASE @@ -332,8 +334,7 @@ Reducer 2 <- Map 1 (CUSTOM_SIMPLE_EDGE) Stage-3 Stats Work{} Stage-4 - Create Table Operator: - name:default.src_autho_test_n3 + Create Table{"name:":"default.src_autho_test_n3"} Stage-0 Move Operator Stage-1 @@ -510,8 +511,7 @@ PREHOOK: type: DROPVIEW POSTHOOK: query: explain drop view v_n1 POSTHOOK: type: DROPVIEW Stage-0 - Drop Table Operator: - table:v_n1 + Drop Table{"table:":"v_n1"} PREHOOK: query: explain create view v_n1 as with cte as (select * from src order by key limit 5) select * from cte diff --git ql/src/test/results/clientpositive/truncate_table.q.out ql/src/test/results/clientpositive/truncate_table.q.out index 6ce5f3a8ae..ba35012bac 100644 --- ql/src/test/results/clientpositive/truncate_table.q.out +++ ql/src/test/results/clientpositive/truncate_table.q.out @@ -125,9 +125,8 @@ STAGE DEPENDENCIES: STAGE PLANS: Stage: Stage-0 - Truncate Table Operator: - Truncate Table or Partition - TableName: src_truncate + Truncate Table or Partition + TableName: src_truncate PREHOOK: query: TRUNCATE TABLE src_truncate PREHOOK: type: TRUNCATETABLE @@ -163,12 +162,11 @@ STAGE DEPENDENCIES: STAGE PLANS: Stage: Stage-0 - Truncate Table Operator: - Truncate Table or Partition - Partition Spec: - ds 2008-04-08 - hr 11 - TableName: srcpart_truncate + Truncate Table or Partition + Partition Spec: + ds 2008-04-08 + hr 11 + TableName: srcpart_truncate PREHOOK: query: TRUNCATE TABLE srcpart_truncate partition (ds='2008-04-08', hr='11') PREHOOK: type: TRUNCATETABLE @@ -208,12 +206,11 @@ STAGE DEPENDENCIES: STAGE PLANS: Stage: Stage-0 - Truncate Table Operator: - Truncate Table or Partition - Partition Spec: - ds - hr 12 - TableName: srcpart_truncate + Truncate Table or Partition + Partition Spec: + ds + hr 12 + TableName: srcpart_truncate PREHOOK: query: TRUNCATE TABLE srcpart_truncate partition (ds, hr='12') PREHOOK: type: TRUNCATETABLE @@ -261,9 +258,8 @@ STAGE DEPENDENCIES: STAGE PLANS: Stage: Stage-0 - Truncate Table Operator: - Truncate Table or Partition - TableName: srcpart_truncate + Truncate Table or Partition + TableName: srcpart_truncate PREHOOK: query: TRUNCATE TABLE srcpart_truncate PREHOOK: type: TRUNCATETABLE diff --git ql/src/test/results/clientpositive/union25.q.out ql/src/test/results/clientpositive/union25.q.out index 2423f5cbb4..eb459d41f3 100644 --- ql/src/test/results/clientpositive/union25.q.out +++ ql/src/test/results/clientpositive/union25.q.out @@ -219,13 +219,12 @@ STAGE PLANS: #### A masked pattern was here #### Stage: Stage-7 - Create Table Operator: - Create Table - columns: counts bigint, key string, value string - input format: org.apache.hadoop.mapred.TextInputFormat - output format: org.apache.hadoop.hive.ql.io.IgnoreKeyTextOutputFormat - serde name: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.tmp_unionall + Create Table + columns: counts bigint, key string, value string + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.IgnoreKeyTextOutputFormat + serde name: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + name: default.tmp_unionall Stage: Stage-3 Stats Work diff --git ql/src/test/results/clientpositive/vector_decimal_6.q.out ql/src/test/results/clientpositive/vector_decimal_6.q.out index f351f3e395..26bc66df69 100644 --- ql/src/test/results/clientpositive/vector_decimal_6.q.out +++ ql/src/test/results/clientpositive/vector_decimal_6.q.out @@ -612,13 +612,12 @@ STAGE PLANS: #### A masked pattern was here #### Stage: Stage-3 - Create Table Operator: - Create Table - columns: k decimal(11,5), v int - input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat - output format: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat - serde name: org.apache.hadoop.hive.ql.io.orc.OrcSerde - name: default.DECIMAL_6_3 + Create Table + columns: k decimal(11,5), v int + input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat + output format: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat + serde name: org.apache.hadoop.hive.ql.io.orc.OrcSerde + name: default.DECIMAL_6_3 Stage: Stage-2 Stats Work diff --git ql/src/test/results/clientpositive/vector_tablesample_rows.q.out ql/src/test/results/clientpositive/vector_tablesample_rows.q.out index ff0982414b..a2abd783bf 100644 --- ql/src/test/results/clientpositive/vector_tablesample_rows.q.out +++ ql/src/test/results/clientpositive/vector_tablesample_rows.q.out @@ -383,15 +383,14 @@ STAGE PLANS: #### A masked pattern was here #### Stage: Stage-8 - Create Table Operator: - Create Table - columns: _c0 int - input format: org.apache.hadoop.mapred.TextInputFormat + Create Table + columns: _c0 int + input format: org.apache.hadoop.mapred.TextInputFormat #### A masked pattern was here #### - output format: org.apache.hadoop.hive.ql.io.IgnoreKeyTextOutputFormat - serde name: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.dual - isTemporary: true + output format: org.apache.hadoop.hive.ql.io.IgnoreKeyTextOutputFormat + serde name: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + name: default.dual + isTemporary: true Stage: Stage-2 Stats Work