diff --git hcatalog/core/src/main/java/org/apache/hive/hcatalog/cli/SemanticAnalysis/HCatSemanticAnalyzerBase.java hcatalog/core/src/main/java/org/apache/hive/hcatalog/cli/SemanticAnalysis/HCatSemanticAnalyzerBase.java index e84dfdc931..a237db87d2 100644 --- hcatalog/core/src/main/java/org/apache/hive/hcatalog/cli/SemanticAnalysis/HCatSemanticAnalyzerBase.java +++ hcatalog/core/src/main/java/org/apache/hive/hcatalog/cli/SemanticAnalysis/HCatSemanticAnalyzerBase.java @@ -34,7 +34,6 @@ import org.apache.hadoop.hive.ql.parse.AbstractSemanticAnalyzerHook; import org.apache.hadoop.hive.ql.parse.HiveSemanticAnalyzerHookContext; import org.apache.hadoop.hive.ql.parse.SemanticException; -import org.apache.hadoop.hive.ql.plan.DDLWork; import org.apache.hadoop.hive.ql.security.authorization.HiveAuthorizationProvider; import org.apache.hadoop.hive.ql.security.authorization.Privilege; import org.apache.hadoop.hive.ql.session.SessionState; @@ -98,12 +97,7 @@ protected void authorizeDDL(HiveSemanticAnalyzerHookContext context, hive = context.getHive(); for (Task task : rootTasks) { - if (task.getWork() instanceof DDLWork) { - DDLWork work = (DDLWork) task.getWork(); - if (work != null) { - authorizeDDLWork(context, hive, work); - } - } else if (task.getWork() instanceof DDLWork2) { + if (task.getWork() instanceof DDLWork2) { DDLWork2 work = (DDLWork2) task.getWork(); if (work != null) { authorizeDDLWork2(context, hive, work); @@ -119,15 +113,6 @@ protected void authorizeDDL(HiveSemanticAnalyzerHookContext context, } } - /** - * Authorized the given DDLWork. Does nothing by default. Override this - * and delegate to the relevant method in HiveAuthorizationProvider obtained by - * getAuthProvider(). - */ - protected void authorizeDDLWork(HiveSemanticAnalyzerHookContext context, - Hive hive, DDLWork work) throws HiveException { - } - /** * Authorized the given DDLWork2. It is only for the interim time while DDLTask and DDLWork are being refactored. */ diff --git ql/src/java/org/apache/hadoop/hive/ql/ddl/misc/CacheMetadataDesc.java ql/src/java/org/apache/hadoop/hive/ql/ddl/misc/CacheMetadataDesc.java new file mode 100644 index 0000000000..174b077374 --- /dev/null +++ ql/src/java/org/apache/hadoop/hive/ql/ddl/misc/CacheMetadataDesc.java @@ -0,0 +1,75 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hive.ql.ddl.misc; + +import org.apache.hadoop.hive.ql.ddl.DDLDesc; +import org.apache.hadoop.hive.ql.ddl.DDLTask2; +import org.apache.hadoop.hive.ql.plan.Explain; +import org.apache.hadoop.hive.ql.plan.Explain.Level; + +/** + * DDL task description for ANALYZE TABLE ... CACHE METADATA commands. + */ +@Explain(displayName = "Cache Metadata", explainLevels = { Level.USER, Level.DEFAULT, Level.EXTENDED }) +public class CacheMetadataDesc implements DDLDesc { + static { + DDLTask2.registerOperation(CacheMetadataDesc.class, CacheMetadataOperation.class); + } + + private final String dbName; + private final String tableName; + private final String partitionName; + private final boolean isAllPartitions; + + public CacheMetadataDesc(String dbName, String tableName, String partitionName) { + this(dbName, tableName, partitionName, false); + } + + public CacheMetadataDesc(String dbName, String tableName, boolean isAllPartitions) { + this(dbName, tableName, null, isAllPartitions); + } + + private CacheMetadataDesc(String dbName, String tableName, String partitionName, boolean isAllPartitions) { + this.dbName = dbName; + this.tableName = tableName; + this.partitionName = partitionName; + this.isAllPartitions = isAllPartitions; + } + + @Explain(displayName = "db name", explainLevels = { Level.USER, Level.DEFAULT, Level.EXTENDED }) + public String getDbName() { + return dbName; + } + + @Explain(displayName = "table name", explainLevels = { Level.USER, Level.DEFAULT, Level.EXTENDED }) + public String getTableName() { + return tableName; + } + + @Explain(displayName = "partition name", explainLevels = { Level.USER, Level.DEFAULT, Level.EXTENDED }) + public String getPartitionName() { + return partitionName; + } + + @Explain(displayName = "all partitions", displayOnlyOnTrue =true, + explainLevels = { Level.USER, Level.DEFAULT, Level.EXTENDED }) + public boolean isAllPartitions() { + return isAllPartitions; + } +} diff --git ql/src/java/org/apache/hadoop/hive/ql/ddl/misc/CacheMetadataOperation.java ql/src/java/org/apache/hadoop/hive/ql/ddl/misc/CacheMetadataOperation.java new file mode 100644 index 0000000000..98dda31e51 --- /dev/null +++ ql/src/java/org/apache/hadoop/hive/ql/ddl/misc/CacheMetadataOperation.java @@ -0,0 +1,45 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hive.ql.ddl.misc; + +import org.apache.hadoop.hive.ql.ddl.DDLOperationContext; + +import java.io.IOException; + +import org.apache.hadoop.hive.ql.ddl.DDLOperation; +import org.apache.hadoop.hive.ql.metadata.HiveException; + +/** + * Operation process of caching the metadata. + */ +public class CacheMetadataOperation extends DDLOperation { + private final CacheMetadataDesc desc; + + public CacheMetadataOperation(DDLOperationContext context, CacheMetadataDesc desc) { + super(context); + this.desc = desc; + } + + @Override + public int execute() throws HiveException, IOException { + context.getDb().cacheFileMetadata(desc.getDbName(), desc.getTableName(), desc.getPartitionName(), + desc.isAllPartitions()); + return 0; + } +} diff --git ql/src/java/org/apache/hadoop/hive/ql/plan/InsertCommitHookDesc.java ql/src/java/org/apache/hadoop/hive/ql/ddl/misc/InsertCommitHookDesc.java similarity index 56% rename from ql/src/java/org/apache/hadoop/hive/ql/plan/InsertCommitHookDesc.java rename to ql/src/java/org/apache/hadoop/hive/ql/ddl/misc/InsertCommitHookDesc.java index 8136506381..f58e20c221 100644 --- ql/src/java/org/apache/hadoop/hive/ql/plan/InsertCommitHookDesc.java +++ ql/src/java/org/apache/hadoop/hive/ql/ddl/misc/InsertCommitHookDesc.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -16,13 +16,27 @@ * limitations under the License. */ -package org.apache.hadoop.hive.ql.plan; +package org.apache.hadoop.hive.ql.ddl.misc; +import java.io.Serializable; + +import org.apache.hadoop.hive.ql.ddl.DDLDesc; +import org.apache.hadoop.hive.ql.ddl.DDLTask2; import org.apache.hadoop.hive.ql.metadata.Table; +import org.apache.hadoop.hive.ql.plan.Explain; +import org.apache.hadoop.hive.ql.plan.Explain.Level; + +/** + * DDL task description for Inserting Commit Hooks. + */ +@Explain(displayName = "Commit Insert Hook", explainLevels = { Level.USER, Level.DEFAULT, Level.EXTENDED }) +public class InsertCommitHookDesc implements DDLDesc, Serializable { + private static final long serialVersionUID = 1L; + + static { + DDLTask2.registerOperation(InsertCommitHookDesc.class, InsertCommitHookOperation.class); + } -@Explain(displayName = "Commit-Insert-Hook", explainLevels = { Explain.Level.USER, - Explain.Level.DEFAULT, Explain.Level.EXTENDED }) -public class InsertCommitHookDesc extends DDLDesc { private final Table table; private final boolean overwrite; @@ -35,6 +49,8 @@ public Table getTable() { return table; } + @Explain(displayName = "is overwrite", displayOnlyOnTrue = true, + explainLevels = { Level.USER, Level.DEFAULT, Level.EXTENDED }) public boolean isOverwrite() { return overwrite; } diff --git ql/src/java/org/apache/hadoop/hive/ql/ddl/misc/InsertCommitHookOperation.java ql/src/java/org/apache/hadoop/hive/ql/ddl/misc/InsertCommitHookOperation.java new file mode 100644 index 0000000000..c32fa31be1 --- /dev/null +++ ql/src/java/org/apache/hadoop/hive/ql/ddl/misc/InsertCommitHookOperation.java @@ -0,0 +1,56 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hive.ql.ddl.misc; + +import org.apache.hadoop.hive.ql.ddl.DDLOperationContext; + +import org.apache.hadoop.hive.metastore.DefaultHiveMetaHook; +import org.apache.hadoop.hive.metastore.HiveMetaHook; +import org.apache.hadoop.hive.metastore.api.MetaException; +import org.apache.hadoop.hive.ql.ddl.DDLOperation; + +/** + * Operation process of inserting a commit hook. + */ +public class InsertCommitHookOperation extends DDLOperation { + private final InsertCommitHookDesc desc; + + public InsertCommitHookOperation(DDLOperationContext context, InsertCommitHookDesc desc) { + super(context); + this.desc = desc; + } + + @Override + public int execute() throws MetaException { + HiveMetaHook hook = desc.getTable().getStorageHandler().getMetaHook(); + if (hook == null || !(hook instanceof DefaultHiveMetaHook)) { + return 0; + } + DefaultHiveMetaHook hiveMetaHook = (DefaultHiveMetaHook) hook; + + try { + hiveMetaHook.commitInsertTable(desc.getTable().getTTable(), desc.isOverwrite()); + } catch (Throwable t) { + hiveMetaHook.rollbackInsertTable(desc.getTable().getTTable(), desc.isOverwrite()); + throw t; + } + + return 0; + } +} diff --git ql/src/java/org/apache/hadoop/hive/ql/ddl/misc/MsckDesc.java ql/src/java/org/apache/hadoop/hive/ql/ddl/misc/MsckDesc.java new file mode 100644 index 0000000000..5e008319cf --- /dev/null +++ ql/src/java/org/apache/hadoop/hive/ql/ddl/misc/MsckDesc.java @@ -0,0 +1,94 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hive.ql.ddl.misc; + +import java.io.Serializable; +import java.util.ArrayList; +import java.util.LinkedHashMap; +import java.util.List; +import java.util.Map; + +import org.apache.hadoop.fs.Path; +import org.apache.hadoop.hive.ql.ddl.DDLDesc; +import org.apache.hadoop.hive.ql.ddl.DDLTask2; +import org.apache.hadoop.hive.ql.plan.Explain; +import org.apache.hadoop.hive.ql.plan.Explain.Level; + +/** + * DDL task description for MSCK [REPAIR] TABLE ... [ADD|DROP|SYNC PARTITIONS] commands. + */ +@Explain(displayName = "Metastore Check", explainLevels = { Level.USER, Level.DEFAULT, Level.EXTENDED }) +public class MsckDesc implements DDLDesc, Serializable { + private static final long serialVersionUID = 1L; + + static { + DDLTask2.registerOperation(MsckDesc.class, MsckOperation.class); + } + + private final String tableName; + private final ArrayList> partitionsSpecs; + private final String resFile; + private final boolean repairPartitions; + private final boolean addPartitions; + private final boolean dropPartitions; + + public MsckDesc(String tableName, List> partitionSpecs, Path resFile, + boolean repairPartitions, boolean addPartitions, boolean dropPartitions) { + this.tableName = tableName; + this.partitionsSpecs = new ArrayList>(partitionSpecs.size()); + for (Map partSpec : partitionSpecs) { + this.partitionsSpecs.add(new LinkedHashMap<>(partSpec)); + } + this.resFile = resFile.toString(); + this.repairPartitions = repairPartitions; + this.addPartitions = addPartitions; + this.dropPartitions = dropPartitions; + } + + @Explain(displayName = "table name", explainLevels = { Level.USER, Level.DEFAULT, Level.EXTENDED }) + public String getTableName() { + return tableName; + } + + @Explain(displayName = "partitions specs", explainLevels = { Level.USER, Level.DEFAULT, Level.EXTENDED }) + public ArrayList> getPartitionsSpecs() { + return partitionsSpecs; + } + + public String getResFile() { + return resFile; + } + + @Explain(displayName = "repair partition", displayOnlyOnTrue = true, + explainLevels = { Level.USER, Level.DEFAULT, Level.EXTENDED }) + public boolean isRepairPartitions() { + return repairPartitions; + } + + @Explain(displayName = "add partition", displayOnlyOnTrue = true, + explainLevels = { Level.USER, Level.DEFAULT, Level.EXTENDED }) + public boolean isAddPartitions() { + return addPartitions; + } + + @Explain(displayName = "drop partition", displayOnlyOnTrue = true, + explainLevels = { Level.USER, Level.DEFAULT, Level.EXTENDED }) + public boolean isDropPartitions() { + return dropPartitions; + } +} diff --git ql/src/java/org/apache/hadoop/hive/ql/ddl/misc/MsckOperation.java ql/src/java/org/apache/hadoop/hive/ql/ddl/misc/MsckOperation.java new file mode 100644 index 0000000000..5edc798851 --- /dev/null +++ ql/src/java/org/apache/hadoop/hive/ql/ddl/misc/MsckOperation.java @@ -0,0 +1,67 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hive.ql.ddl.misc; + +import org.apache.hadoop.hive.ql.ddl.DDLOperationContext; +import org.apache.hadoop.hive.ql.exec.Utilities; + +import java.io.IOException; + +import org.apache.hadoop.hive.metastore.Msck; +import org.apache.hadoop.hive.metastore.MsckInfo; +import org.apache.hadoop.hive.metastore.api.MetaException; +import org.apache.hadoop.hive.ql.ddl.DDLOperation; +import org.apache.hadoop.hive.ql.metadata.HiveException; +import org.apache.hadoop.hive.ql.parse.SemanticException; +import org.apache.hadoop.hive.ql.session.SessionState; + +/** + * Operation process of metastore check. + * + * MetastoreCheck, see if the data in the metastore matches what is on the dfs. Current version checks for tables + * and partitions that are either missing on disk on in the metastore. + */ +public class MsckOperation extends DDLOperation { + private final MsckDesc desc; + + public MsckOperation(DDLOperationContext context, MsckDesc desc) { + super(context); + this.desc = desc; + } + + @Override + public int execute() throws HiveException, IOException { + try { + Msck msck = new Msck(false, false); + msck.init(context.getDb().getConf()); + + String[] names = Utilities.getDbTableName(desc.getTableName()); + MsckInfo msckInfo = new MsckInfo(SessionState.get().getCurrentCatalog(), names[0], names[1], + desc.getPartitionsSpecs(), desc.getResFile(), desc.isRepairPartitions(), desc.isAddPartitions(), + desc.isDropPartitions(), -1); + return msck.repair(msckInfo); + } catch (MetaException e) { + LOG.error("Unable to create msck instance.", e); + return 1; + } catch (SemanticException e) { + LOG.error("Msck failed.", e); + return 1; + } + } +} diff --git ql/src/java/org/apache/hadoop/hive/ql/plan/ReplRemoveFirstIncLoadPendFlagDesc.java ql/src/java/org/apache/hadoop/hive/ql/ddl/misc/ReplRemoveFirstIncLoadPendFlagDesc.java similarity index 69% rename from ql/src/java/org/apache/hadoop/hive/ql/plan/ReplRemoveFirstIncLoadPendFlagDesc.java rename to ql/src/java/org/apache/hadoop/hive/ql/ddl/misc/ReplRemoveFirstIncLoadPendFlagDesc.java index afa0a09af2..33d9dd7e2c 100644 --- ql/src/java/org/apache/hadoop/hive/ql/plan/ReplRemoveFirstIncLoadPendFlagDesc.java +++ ql/src/java/org/apache/hadoop/hive/ql/ddl/misc/ReplRemoveFirstIncLoadPendFlagDesc.java @@ -16,39 +16,34 @@ * limitations under the License. */ -package org.apache.hadoop.hive.ql.plan; +package org.apache.hadoop.hive.ql.ddl.misc; + +import org.apache.hadoop.hive.ql.ddl.DDLDesc; +import org.apache.hadoop.hive.ql.ddl.DDLTask2; +import org.apache.hadoop.hive.ql.plan.Explain; import org.apache.hadoop.hive.ql.plan.Explain.Level; import java.io.Serializable; /** - * ReplRemoveFirstIncLoadPendFlagDesc. -- Remove the flag from db/table property if its already present. - * + * Remove the flag from db property if its already present. */ @Explain(displayName = "Set First Incr Load Pend Flag", explainLevels = { Level.USER, Level.DEFAULT, Level.EXTENDED }) -public class ReplRemoveFirstIncLoadPendFlagDesc extends DDLDesc implements Serializable { - +public class ReplRemoveFirstIncLoadPendFlagDesc implements DDLDesc, Serializable { private static final long serialVersionUID = 1L; - String databaseName; - String tableName; - /** - * For serialization only. - */ - public ReplRemoveFirstIncLoadPendFlagDesc() { + static { + DDLTask2.registerOperation(ReplRemoveFirstIncLoadPendFlagDesc.class, ReplRemoveFirstIncLoadPendFlagOperation.class); } + private final String databaseName; + public ReplRemoveFirstIncLoadPendFlagDesc(String databaseName) { - super(); this.databaseName = databaseName; } - @Explain(displayName="db_name", explainLevels = { Level.USER, Level.DEFAULT, Level.EXTENDED }) + @Explain(displayName="db name", explainLevels = { Level.USER, Level.DEFAULT, Level.EXTENDED }) public String getDatabaseName() { return databaseName; } - - public void setDatabaseName(String databaseName) { - this.databaseName = databaseName; - } } diff --git ql/src/java/org/apache/hadoop/hive/ql/ddl/misc/ReplRemoveFirstIncLoadPendFlagOperation.java ql/src/java/org/apache/hadoop/hive/ql/ddl/misc/ReplRemoveFirstIncLoadPendFlagOperation.java new file mode 100644 index 0000000000..dadfedb70d --- /dev/null +++ ql/src/java/org/apache/hadoop/hive/ql/ddl/misc/ReplRemoveFirstIncLoadPendFlagOperation.java @@ -0,0 +1,58 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hive.ql.ddl.misc; + +import org.apache.hadoop.hive.ql.ddl.DDLOperationContext; +import org.apache.hadoop.hive.ql.exec.repl.util.ReplUtils; + +import java.util.Map; + +import org.apache.hadoop.hive.metastore.api.Database; +import org.apache.hadoop.hive.ql.ddl.DDLOperation; +import org.apache.hadoop.hive.ql.parse.repl.dump.Utils; + +/** + * Operation process of removing the REPL_FIRST_INC_PENDING_FLAG parameter from some tables or databases. + */ +public class ReplRemoveFirstIncLoadPendFlagOperation extends DDLOperation { + private final ReplRemoveFirstIncLoadPendFlagDesc desc; + + public ReplRemoveFirstIncLoadPendFlagOperation(DDLOperationContext context, ReplRemoveFirstIncLoadPendFlagDesc desc) { + super(context); + this.desc = desc; + } + + @Override + public int execute() throws Exception { + String dbNameOrPattern = desc.getDatabaseName(); + + // Flag is set only in database for db level load. + for (String dbName : Utils.matchesDb(context.getDb(), dbNameOrPattern)) { + Database database = context.getDb().getMSC().getDatabase(dbName); + Map parameters = database.getParameters(); + String incPendPara = parameters != null ? parameters.get(ReplUtils.REPL_FIRST_INC_PENDING_FLAG) : null; + if (incPendPara != null) { + parameters.remove(ReplUtils.REPL_FIRST_INC_PENDING_FLAG); + context.getDb().getMSC().alterDatabase(dbName, database); + } + } + + return 0; + } +} diff --git ql/src/java/org/apache/hadoop/hive/ql/plan/ShowConfDesc.java ql/src/java/org/apache/hadoop/hive/ql/ddl/misc/ShowConfDesc.java similarity index 66% rename from ql/src/java/org/apache/hadoop/hive/ql/plan/ShowConfDesc.java rename to ql/src/java/org/apache/hadoop/hive/ql/ddl/misc/ShowConfDesc.java index 9efecb1845..f9a8d2df71 100644 --- ql/src/java/org/apache/hadoop/hive/ql/plan/ShowConfDesc.java +++ ql/src/java/org/apache/hadoop/hive/ql/ddl/misc/ShowConfDesc.java @@ -15,27 +15,31 @@ * See the License for the specific language governing permissions and * limitations under the License. */ -package org.apache.hadoop.hive.ql.plan; +package org.apache.hadoop.hive.ql.ddl.misc; import org.apache.hadoop.fs.Path; +import org.apache.hadoop.hive.ql.ddl.DDLDesc; +import org.apache.hadoop.hive.ql.ddl.DDLTask2; +import org.apache.hadoop.hive.ql.plan.Explain; import org.apache.hadoop.hive.ql.plan.Explain.Level; import java.io.Serializable; -public class ShowConfDesc extends DDLDesc implements Serializable { +/** + * DDL task description for SHOW CONF commands. + */ +@Explain(displayName = "Show Configuration", explainLevels = { Level.USER, Level.DEFAULT, Level.EXTENDED }) +public class ShowConfDesc implements DDLDesc, Serializable { private static final long serialVersionUID = 1L; - private Path resFile; - private String confName; - - private static final String schema = "default,type,desc#string,string,string"; - - public String getSchema() { - return schema; + static { + DDLTask2.registerOperation(ShowConfDesc.class, ShowConfOperation.class); } - public ShowConfDesc() { - } + public static final String SCHEMA = "default,type,desc#string,string,string"; + + private Path resFile; + private String confName; public ShowConfDesc(Path resFile, String confName) { this.resFile = resFile; @@ -47,16 +51,8 @@ public Path getResFile() { return resFile; } - public void setResFile(Path resFile) { - this.resFile = resFile; - } - - @Explain(displayName = "conf name", explainLevels = { Level.EXTENDED }) + @Explain(displayName = "conf name", explainLevels = { Level.USER, Level.DEFAULT, Level.EXTENDED }) public String getConfName() { return confName; } - - public void setConfName(String confName) { - this.confName = confName; - } } diff --git ql/src/java/org/apache/hadoop/hive/ql/ddl/misc/ShowConfOperation.java ql/src/java/org/apache/hadoop/hive/ql/ddl/misc/ShowConfOperation.java new file mode 100644 index 0000000000..714d0e4b07 --- /dev/null +++ ql/src/java/org/apache/hadoop/hive/ql/ddl/misc/ShowConfOperation.java @@ -0,0 +1,68 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hive.ql.ddl.misc; + +import org.apache.hadoop.hive.ql.ddl.DDLOperationContext; +import org.apache.hadoop.hive.ql.ddl.DDLUtils; +import org.apache.hadoop.hive.ql.exec.Utilities; + +import java.io.DataOutputStream; +import java.io.IOException; + +import org.apache.hadoop.hive.conf.HiveConf; +import org.apache.hadoop.hive.conf.HiveConf.ConfVars; +import org.apache.hadoop.hive.ql.ddl.DDLOperation; +import org.apache.hadoop.hive.ql.metadata.HiveException; + +/** + * Operation process of showing some configuration. + */ +public class ShowConfOperation extends DDLOperation { + private final ShowConfDesc desc; + + public ShowConfOperation(DDLOperationContext context, ShowConfDesc desc) { + super(context); + this.desc = desc; + } + + @Override + public int execute() throws HiveException, IOException { + ConfVars conf = HiveConf.getConfVars(desc.getConfName()); + if (conf == null) { + throw new HiveException("invalid configuration name " + desc.getConfName()); + } + String description = conf.getDescription(); + String defaultValue = conf.getDefaultValue(); + + try (DataOutputStream output = DDLUtils.getOutputStream(desc.getResFile(), context)) { + if (defaultValue != null) { + output.write(defaultValue.getBytes("UTF-8")); + } + output.write(Utilities.tabCode); + output.write(conf.typeString().getBytes("UTF-8")); + output.write(Utilities.tabCode); + if (description != null) { + output.write(description.replaceAll(" *\n *", " ").getBytes("UTF-8")); + } + output.write(Utilities.newLineCode); + } + + return 0; + } +} diff --git ql/src/java/org/apache/hadoop/hive/ql/ddl/misc/package-info.java ql/src/java/org/apache/hadoop/hive/ql/ddl/misc/package-info.java new file mode 100644 index 0000000000..9b7f261e59 --- /dev/null +++ ql/src/java/org/apache/hadoop/hive/ql/ddl/misc/package-info.java @@ -0,0 +1,20 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/** DDL operation descriptions and operations which can not be classified elsewhere. */ +package org.apache.hadoop.hive.ql.ddl.misc; diff --git ql/src/java/org/apache/hadoop/hive/ql/ddl/view/CreateViewDesc.java ql/src/java/org/apache/hadoop/hive/ql/ddl/view/CreateViewDesc.java index 595da9c5ea..2ec8c32178 100644 --- ql/src/java/org/apache/hadoop/hive/ql/ddl/view/CreateViewDesc.java +++ ql/src/java/org/apache/hadoop/hive/ql/ddl/view/CreateViewDesc.java @@ -30,7 +30,7 @@ import org.apache.hadoop.hive.metastore.api.FieldSchema; import org.apache.hadoop.hive.ql.ddl.DDLDesc; import org.apache.hadoop.hive.ql.ddl.DDLTask2; -import org.apache.hadoop.hive.ql.exec.DDLTask; +import org.apache.hadoop.hive.ql.ddl.DDLUtils; import org.apache.hadoop.hive.ql.exec.Utilities; import org.apache.hadoop.hive.ql.metadata.HiveException; import org.apache.hadoop.hive.ql.metadata.HiveStorageHandler; @@ -370,7 +370,7 @@ public Table toTable(HiveConf conf) throws HiveException { } else { // let's validate that the serde exists serDeClassName = getSerde(); - DDLTask.validateSerDe(serDeClassName, conf); + DDLUtils.validateSerDe(serDeClassName, conf); } tbl.setSerializationLib(serDeClassName); diff --git ql/src/java/org/apache/hadoop/hive/ql/exec/DDLTask.java ql/src/java/org/apache/hadoop/hive/ql/exec/DDLTask.java deleted file mode 100644 index 89dca2e7bd..0000000000 --- ql/src/java/org/apache/hadoop/hive/ql/exec/DDLTask.java +++ /dev/null @@ -1,300 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.hive.ql.exec; - -import java.io.DataOutputStream; -import java.io.Serializable; -import java.util.Map; -import java.util.Set; - -import org.apache.hadoop.fs.FileSystem; -import org.apache.hadoop.fs.Path; -import org.apache.hadoop.hive.conf.HiveConf; -import org.apache.hadoop.hive.conf.HiveConf.ConfVars; -import org.apache.hadoop.hive.metastore.DefaultHiveMetaHook; -import org.apache.hadoop.hive.metastore.HiveMetaHook; -import org.apache.hadoop.hive.metastore.Msck; -import org.apache.hadoop.hive.metastore.MsckInfo; -import org.apache.hadoop.hive.metastore.api.Database; -import org.apache.hadoop.hive.metastore.api.MetaException; -import org.apache.hadoop.hive.ql.DriverContext; -import org.apache.hadoop.hive.ql.hooks.WriteEntity; -import org.apache.hadoop.hive.ql.metadata.Hive; -import org.apache.hadoop.hive.ql.metadata.HiveException; -import org.apache.hadoop.hive.ql.parse.ExplainConfiguration.AnalyzeState; -import org.apache.hadoop.hive.ql.parse.SemanticException; -import org.apache.hadoop.hive.ql.parse.repl.dump.Utils; -import org.apache.hadoop.hive.ql.plan.CacheMetadataDesc; -import org.apache.hadoop.hive.ql.plan.DDLWork; -import org.apache.hadoop.hive.ql.plan.InsertCommitHookDesc; -import org.apache.hadoop.hive.ql.plan.MsckDesc; -import org.apache.hadoop.hive.ql.plan.ReplRemoveFirstIncLoadPendFlagDesc; -import org.apache.hadoop.hive.ql.plan.ShowConfDesc; -import org.apache.hadoop.hive.ql.plan.api.StageType; -import org.apache.hadoop.hive.ql.exec.repl.util.ReplUtils; -import org.apache.hadoop.hive.ql.session.SessionState; -import org.apache.hadoop.hive.serde2.Deserializer; -import org.apache.hive.common.util.ReflectionUtil; -import org.apache.thrift.TException; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -/** - * DDLTask implementation. - * - **/ -public class DDLTask extends Task implements Serializable { - private static final long serialVersionUID = 1L; - private static final Logger LOG = LoggerFactory.getLogger("hive.ql.exec.DDLTask"); - - private static final int separator = Utilities.tabCode; - private static final int terminator = Utilities.newLineCode; - - @Override - public boolean requireLock() { - return this.work != null && this.work.getNeedLock(); - } - - public DDLTask() { - super(); - } - - @Override - public int execute(DriverContext driverContext) { - if (driverContext.getCtx().getExplainAnalyze() == AnalyzeState.RUNNING) { - return 0; - } - - // Create the db - Hive db; - try { - db = Hive.get(conf); - - MsckDesc msckDesc = work.getMsckDesc(); - if (msckDesc != null) { - return msck(db, msckDesc); - } - - ShowConfDesc showConf = work.getShowConfDesc(); - if (showConf != null) { - return showConf(db, showConf); - } - - CacheMetadataDesc cacheMetadataDesc = work.getCacheMetadataDesc(); - if (cacheMetadataDesc != null) { - return cacheMetadata(db, cacheMetadataDesc); - } - InsertCommitHookDesc insertCommitHookDesc = work.getInsertCommitHookDesc(); - if (insertCommitHookDesc != null) { - return insertCommitWork(db, insertCommitHookDesc); - } - - if (work.getReplSetFirstIncLoadFlagDesc() != null) { - return remFirstIncPendFlag(db, work.getReplSetFirstIncLoadFlagDesc()); - } - } catch (Throwable e) { - failed(e); - return 1; - } - assert false; - return 0; - } - - private int insertCommitWork(Hive db, InsertCommitHookDesc insertCommitHookDesc) throws MetaException { - boolean failed = true; - HiveMetaHook hook = insertCommitHookDesc.getTable().getStorageHandler().getMetaHook(); - if (hook == null || !(hook instanceof DefaultHiveMetaHook)) { - return 0; - } - DefaultHiveMetaHook hiveMetaHook = (DefaultHiveMetaHook) hook; - try { - hiveMetaHook.commitInsertTable(insertCommitHookDesc.getTable().getTTable(), - insertCommitHookDesc.isOverwrite() - ); - failed = false; - } finally { - if (failed) { - hiveMetaHook.rollbackInsertTable(insertCommitHookDesc.getTable().getTTable(), - insertCommitHookDesc.isOverwrite() - ); - } - } - return 0; - } - - private int cacheMetadata(Hive db, CacheMetadataDesc desc) throws HiveException { - db.cacheFileMetadata(desc.getDbName(), desc.getTableName(), - desc.getPartName(), desc.isAllParts()); - return 0; - } - - private void failed(Throwable e) { - while (e.getCause() != null && e.getClass() == RuntimeException.class) { - e = e.getCause(); - } - setException(e); - LOG.error("Failed", e); - } - - private int showConf(Hive db, ShowConfDesc showConf) throws Exception { - ConfVars conf = HiveConf.getConfVars(showConf.getConfName()); - if (conf == null) { - throw new HiveException("invalid configuration name " + showConf.getConfName()); - } - String description = conf.getDescription(); - String defaultValue = conf.getDefaultValue(); - DataOutputStream output = getOutputStream(showConf.getResFile()); - try { - if (defaultValue != null) { - output.write(defaultValue.getBytes()); - } - output.write(separator); - output.write(conf.typeString().getBytes()); - output.write(separator); - if (description != null) { - output.write(description.replaceAll(" *\n *", " ").getBytes()); - } - output.write(terminator); - } finally { - output.close(); - } - return 0; - } - - private DataOutputStream getOutputStream(Path outputFile) throws HiveException { - try { - FileSystem fs = outputFile.getFileSystem(conf); - return fs.create(outputFile); - } catch (Exception e) { - throw new HiveException(e); - } - } - - /** - * MetastoreCheck, see if the data in the metastore matches what is on the - * dfs. Current version checks for tables and partitions that are either - * missing on disk on in the metastore. - * - * @param db - * The database in question. - * @param msckDesc - * Information about the tables and partitions we want to check for. - * @return Returns 0 when execution succeeds and above 0 if it fails. - */ - private int msck(Hive db, MsckDesc msckDesc) { - Msck msck; - try { - msck = new Msck( false, false); - msck.init(db.getConf()); - String[] names = Utilities.getDbTableName(msckDesc.getTableName()); - MsckInfo msckInfo = new MsckInfo(SessionState.get().getCurrentCatalog(), names[0], - names[1], msckDesc.getPartSpecs(), msckDesc.getResFile(), - msckDesc.isRepairPartitions(), msckDesc.isAddPartitions(), msckDesc.isDropPartitions(), -1); - return msck.repair(msckInfo); - } catch (MetaException e) { - LOG.error("Unable to create msck instance.", e); - return 1; - } catch (SemanticException e) { - LOG.error("Msck failed.", e); - return 1; - } - } - - /** - * There are many places where "duplicate" Read/WriteEnity objects are added. The way this was - * initially implemented, the duplicate just replaced the previous object. - * (work.getOutputs() is a Set and WriteEntity#equals() relies on name) - * This may be benign for ReadEntity and perhaps was benign for WriteEntity before WriteType was - * added. Now that WriteEntity has a WriteType it replaces it with one with possibly different - * {@link org.apache.hadoop.hive.ql.hooks.WriteEntity.WriteType}. It's hard to imagine - * how this is desirable. - * - * As of HIVE-14993, WriteEntity with different WriteType must be considered different. - * So WriteEntity created in DDLTask cause extra output in golden files, but only because - * DDLTask sets a different WriteType for the same Entity. - * - * In the spirit of bug-for-bug compatibility, this method ensures we only add new - * WriteEntity if it's really new. - * - * @return {@code true} if item was added - */ - static boolean addIfAbsentByName(WriteEntity newWriteEntity, Set outputs) { - for(WriteEntity writeEntity : outputs) { - if(writeEntity.getName().equalsIgnoreCase(newWriteEntity.getName())) { - LOG.debug("Ignoring request to add {} because {} is present", - newWriteEntity.toStringDetail(), writeEntity.toStringDetail()); - return false; - } - } - outputs.add(newWriteEntity); - return true; - } - - /** - * Check if the given serde is valid. - */ - public static void validateSerDe(String serdeName, HiveConf conf) throws HiveException { - try { - - Deserializer d = ReflectionUtil.newInstance(conf.getClassByName(serdeName). - asSubclass(Deserializer.class), conf); - if (d != null) { - LOG.debug("Found class for {}", serdeName); - } - } catch (Exception e) { - throw new HiveException("Cannot validate serde: " + serdeName, e); - } - } - - @Override - public StageType getType() { - return StageType.DDL; - } - - @Override - public String getName() { - return "DDL"; - } - - private int remFirstIncPendFlag(Hive hive, ReplRemoveFirstIncLoadPendFlagDesc desc) throws HiveException, TException { - String dbNameOrPattern = desc.getDatabaseName(); - Map parameters; - - // Flag is set only in database for db level load. - for (String dbName : Utils.matchesDb(hive, dbNameOrPattern)) { - Database database = hive.getMSC().getDatabase(dbName); - parameters = database.getParameters(); - String incPendPara = parameters != null ? parameters.get(ReplUtils.REPL_FIRST_INC_PENDING_FLAG) : null; - if (incPendPara != null) { - parameters.remove(ReplUtils.REPL_FIRST_INC_PENDING_FLAG); - hive.getMSC().alterDatabase(dbName, database); - } - } - return 0; - } - - /* - uses the authorizer from SessionState will need some more work to get this to run in parallel, - however this should not be a bottle neck so might not need to parallelize this. - */ - @Override - public boolean canExecuteInParallel() { - return false; - } -} diff --git ql/src/java/org/apache/hadoop/hive/ql/exec/MoveTask.java ql/src/java/org/apache/hadoop/hive/ql/exec/MoveTask.java index 04d12075a5..695d08bbe2 100644 --- ql/src/java/org/apache/hadoop/hive/ql/exec/MoveTask.java +++ ql/src/java/org/apache/hadoop/hive/ql/exec/MoveTask.java @@ -32,6 +32,7 @@ import org.apache.hadoop.hive.ql.Context; import org.apache.hadoop.hive.ql.DriverContext; import org.apache.hadoop.hive.ql.ErrorMsg; +import org.apache.hadoop.hive.ql.ddl.DDLUtils; import org.apache.hadoop.hive.ql.exec.mr.MapRedTask; import org.apache.hadoop.hive.ql.exec.mr.MapredLocalTask; import org.apache.hadoop.hive.ql.hooks.LineageInfo.DataContainer; @@ -423,7 +424,7 @@ public int execute(DriverContext driverContext) { resetStatisticsProps(table), tbd.getWriteId(), tbd.getStmtId(), tbd.isInsertOverwrite()); if (work.getOutputs() != null) { - DDLTask.addIfAbsentByName(new WriteEntity(table, + DDLUtils.addIfAbsentByName(new WriteEntity(table, getWriteType(tbd, work.getLoadTableWork().getWriteType())), work.getOutputs()); } } else { @@ -535,7 +536,7 @@ private DataContainer handleStaticParts(Hive db, Table table, LoadTableDesc tbd, DataContainer dc = new DataContainer(table.getTTable(), partn.getTPartition()); // add this partition to post-execution hook if (work.getOutputs() != null) { - DDLTask.addIfAbsentByName(new WriteEntity(partn, + DDLUtils.addIfAbsentByName(new WriteEntity(partn, getWriteType(tbd, work.getLoadTableWork().getWriteType())), work.getOutputs()); } return dc; @@ -602,7 +603,7 @@ private DataContainer handleDynParts(Hive db, Table table, LoadTableDesc tbd, WriteEntity enty = new WriteEntity(partn, getWriteType(tbd, work.getLoadTableWork().getWriteType())); if (work.getOutputs() != null) { - DDLTask.addIfAbsentByName(enty, work.getOutputs()); + DDLUtils.addIfAbsentByName(enty, work.getOutputs()); } // Need to update the queryPlan's output as well so that post-exec hook get executed. // This is only needed for dynamic partitioning since for SP the the WriteEntity is diff --git ql/src/java/org/apache/hadoop/hive/ql/exec/TaskFactory.java ql/src/java/org/apache/hadoop/hive/ql/exec/TaskFactory.java index 6ac695d502..c4e537042a 100644 --- ql/src/java/org/apache/hadoop/hive/ql/exec/TaskFactory.java +++ ql/src/java/org/apache/hadoop/hive/ql/exec/TaskFactory.java @@ -41,7 +41,6 @@ import org.apache.hadoop.hive.ql.plan.StatsWork; import org.apache.hadoop.hive.ql.plan.ConditionalWork; import org.apache.hadoop.hive.ql.plan.CopyWork; -import org.apache.hadoop.hive.ql.plan.DDLWork; import org.apache.hadoop.hive.ql.plan.DependencyCollectionWork; import org.apache.hadoop.hive.ql.plan.ExplainSQRewriteWork; import org.apache.hadoop.hive.ql.plan.ExplainWork; @@ -89,7 +88,6 @@ public TaskTuple(Class workClass, Class> taskClass) { taskvec.add(new TaskTuple(FetchWork.class, FetchTask.class)); taskvec.add(new TaskTuple(CopyWork.class, CopyTask.class)); taskvec.add(new TaskTuple(ReplCopyWork.class, ReplCopyTask.class)); - taskvec.add(new TaskTuple(DDLWork.class, DDLTask.class)); taskvec.add(new TaskTuple(DDLWork2.class, DDLTask2.class)); taskvec.add(new TaskTuple( MaterializedViewDesc.class, diff --git ql/src/java/org/apache/hadoop/hive/ql/exec/repl/incremental/IncrementalLoadTasksBuilder.java ql/src/java/org/apache/hadoop/hive/ql/exec/repl/incremental/IncrementalLoadTasksBuilder.java index f6975d3362..f3f5a2b1d7 100644 --- ql/src/java/org/apache/hadoop/hive/ql/exec/repl/incremental/IncrementalLoadTasksBuilder.java +++ ql/src/java/org/apache/hadoop/hive/ql/exec/repl/incremental/IncrementalLoadTasksBuilder.java @@ -28,6 +28,7 @@ import org.apache.hadoop.hive.ql.DriverContext; import org.apache.hadoop.hive.ql.ddl.DDLWork2; import org.apache.hadoop.hive.ql.ddl.database.AlterDatabaseDesc; +import org.apache.hadoop.hive.ql.ddl.misc.ReplRemoveFirstIncLoadPendFlagDesc; import org.apache.hadoop.hive.ql.ddl.table.misc.AlterTableSetPropertiesDesc; import org.apache.hadoop.hive.ql.exec.Task; import org.apache.hadoop.hive.ql.exec.TaskFactory; @@ -47,10 +48,8 @@ import org.apache.hadoop.hive.ql.parse.repl.load.UpdatedMetaDataTracker; import org.apache.hadoop.hive.ql.parse.repl.load.log.IncrementalLoadLogger; import org.apache.hadoop.hive.ql.parse.repl.load.message.MessageHandler; -import org.apache.hadoop.hive.ql.plan.DDLWork; import org.apache.hadoop.hive.ql.plan.DependencyCollectionWork; import org.apache.hadoop.hive.ql.plan.ReplTxnWork; -import org.apache.hadoop.hive.ql.plan.ReplRemoveFirstIncLoadPendFlagDesc; import org.apache.hadoop.hive.ql.stats.StatsUtils; import org.slf4j.Logger; @@ -160,7 +159,7 @@ public IncrementalLoadTasksBuilder(String dbName, String loadPath, } ReplRemoveFirstIncLoadPendFlagDesc desc = new ReplRemoveFirstIncLoadPendFlagDesc(dbName); - Task updateIncPendTask = TaskFactory.get(new DDLWork(inputs, outputs, desc), conf); + Task updateIncPendTask = TaskFactory.get(new DDLWork2(inputs, outputs, desc), conf); taskChainTail.addDependentTask(updateIncPendTask); taskChainTail = updateIncPendTask; diff --git ql/src/java/org/apache/hadoop/hive/ql/optimizer/QueryPlanPostProcessor.java ql/src/java/org/apache/hadoop/hive/ql/optimizer/QueryPlanPostProcessor.java index 3f5b0e3e36..6b97d662f8 100644 --- ql/src/java/org/apache/hadoop/hive/ql/optimizer/QueryPlanPostProcessor.java +++ ql/src/java/org/apache/hadoop/hive/ql/optimizer/QueryPlanPostProcessor.java @@ -35,7 +35,6 @@ import org.apache.hadoop.hive.ql.plan.ColumnStatsUpdateWork; import org.apache.hadoop.hive.ql.plan.ConditionalWork; import org.apache.hadoop.hive.ql.plan.CopyWork; -import org.apache.hadoop.hive.ql.plan.DDLWork; import org.apache.hadoop.hive.ql.plan.DependencyCollectionWork; import org.apache.hadoop.hive.ql.plan.ExplainSQRewriteWork; import org.apache.hadoop.hive.ql.plan.ExplainWork; @@ -109,7 +108,6 @@ else if(work instanceof ReplLoadWork || work instanceof BasicStatsWork || work instanceof ConditionalWork || work instanceof CopyWork || - work instanceof DDLWork || work instanceof DDLWork2 || work instanceof DependencyCollectionWork || work instanceof ExplainSQRewriteWork || diff --git ql/src/java/org/apache/hadoop/hive/ql/parse/DDLSemanticAnalyzer.java ql/src/java/org/apache/hadoop/hive/ql/parse/DDLSemanticAnalyzer.java index f13593e36c..e6990eeb12 100644 --- ql/src/java/org/apache/hadoop/hive/ql/parse/DDLSemanticAnalyzer.java +++ ql/src/java/org/apache/hadoop/hive/ql/parse/DDLSemanticAnalyzer.java @@ -84,6 +84,9 @@ import org.apache.hadoop.hive.ql.ddl.database.UnlockDatabaseDesc; import org.apache.hadoop.hive.ql.ddl.function.DescFunctionDesc; import org.apache.hadoop.hive.ql.ddl.function.ShowFunctionsDesc; +import org.apache.hadoop.hive.ql.ddl.misc.CacheMetadataDesc; +import org.apache.hadoop.hive.ql.ddl.misc.MsckDesc; +import org.apache.hadoop.hive.ql.ddl.misc.ShowConfDesc; import org.apache.hadoop.hive.ql.ddl.privilege.PrincipalDesc; import org.apache.hadoop.hive.ql.ddl.privilege.ShowGrantDesc; import org.apache.hadoop.hive.ql.ddl.privilege.ShowPrincipalsDesc; @@ -185,10 +188,8 @@ import org.apache.hadoop.hive.ql.parse.authorization.HiveAuthorizationTaskFactory; import org.apache.hadoop.hive.ql.parse.authorization.HiveAuthorizationTaskFactoryImpl; import org.apache.hadoop.hive.ql.plan.BasicStatsWork; -import org.apache.hadoop.hive.ql.plan.CacheMetadataDesc; import org.apache.hadoop.hive.ql.plan.ColumnStatsUpdateWork; import org.apache.hadoop.hive.ql.plan.DDLDesc.DDLDescWithWriteId; -import org.apache.hadoop.hive.ql.plan.DDLWork; import org.apache.hadoop.hive.ql.plan.ExprNodeColumnDesc; import org.apache.hadoop.hive.ql.plan.ExprNodeConstantDesc; import org.apache.hadoop.hive.ql.plan.ExprNodeDesc; @@ -197,9 +198,7 @@ import org.apache.hadoop.hive.ql.plan.ListBucketingCtx; import org.apache.hadoop.hive.ql.plan.LoadTableDesc; import org.apache.hadoop.hive.ql.plan.MoveWork; -import org.apache.hadoop.hive.ql.plan.MsckDesc; import org.apache.hadoop.hive.ql.plan.PlanUtils; -import org.apache.hadoop.hive.ql.plan.ShowConfDesc; import org.apache.hadoop.hive.ql.plan.StatsWork; import org.apache.hadoop.hive.ql.plan.TableDesc; import org.apache.hadoop.hive.ql.plan.ValidationUtility; @@ -653,7 +652,7 @@ private void analyzeCacheMetadata(ASTNode ast) throws SemanticException { desc = new CacheMetadataDesc(tbl.getDbName(), tbl.getTableName(), tbl.isPartitioned()); inputs.add(new ReadEntity(tbl)); } - rootTasks.add(TaskFactory.get(new DDLWork(getInputs(), getOutputs(), desc))); + rootTasks.add(TaskFactory.get(new DDLWork2(getInputs(), getOutputs(), desc))); } private void analyzeAlterTableUpdateStats(ASTNode ast, String tblName, Map partSpec) @@ -788,7 +787,7 @@ private void analyzeShowRolePrincipals(ASTNode ast) throws SemanticException { private void analyzeShowRoles(ASTNode ast) throws SemanticException { @SuppressWarnings("unchecked") - Task roleDDLTask = (Task) hiveAuthorizationTaskFactory + Task roleDDLTask = (Task) hiveAuthorizationTaskFactory .createShowRolesTask(ast, ctx.getResFile(), getInputs(), getOutputs()); if (roleDDLTask != null) { @@ -2859,9 +2858,8 @@ private void analyzeShowDbLocks(ASTNode ast) throws SemanticException { private void analyzeShowConf(ASTNode ast) throws SemanticException { String confName = stripQuotes(ast.getChild(0).getText()); ShowConfDesc showConfDesc = new ShowConfDesc(ctx.getResFile(), confName); - rootTasks.add(TaskFactory.get(new DDLWork(getInputs(), getOutputs(), - showConfDesc))); - setFetchTask(createFetchTask(showConfDesc.getSchema())); + rootTasks.add(TaskFactory.get(new DDLWork2(getInputs(), getOutputs(), showConfDesc))); + setFetchTask(createFetchTask(ShowConfDesc.SCHEMA)); } private void analyzeShowViews(ASTNode ast) throws SemanticException { @@ -3789,10 +3787,8 @@ private void analyzeMetastoreCheck(CommonTree ast) throws SemanticException { } else { outputs.add(new WriteEntity(tab, WriteEntity.WriteType.DDL_SHARED)); } - MsckDesc checkDesc = new MsckDesc(tableName, specs, ctx.getResFile(), - repair, addPartitions, dropPartitions); - rootTasks.add(TaskFactory.get(new DDLWork(getInputs(), getOutputs(), - checkDesc))); + MsckDesc checkDesc = new MsckDesc(tableName, specs, ctx.getResFile(), repair, addPartitions, dropPartitions); + rootTasks.add(TaskFactory.get(new DDLWork2(getInputs(), getOutputs(), checkDesc))); } /** diff --git ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java index 1a2a1e8f47..b97ff31dbd 100644 --- ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java +++ ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java @@ -101,6 +101,7 @@ import org.apache.hadoop.hive.ql.cache.results.CacheUsage; import org.apache.hadoop.hive.ql.cache.results.QueryResultsCache; import org.apache.hadoop.hive.ql.ddl.DDLWork2; +import org.apache.hadoop.hive.ql.ddl.misc.InsertCommitHookDesc; import org.apache.hadoop.hive.ql.ddl.table.creation.CreateTableDesc; import org.apache.hadoop.hive.ql.ddl.table.creation.CreateTableLikeDesc; import org.apache.hadoop.hive.ql.ddl.table.misc.AlterTableUnsetPropertiesDesc; @@ -196,7 +197,6 @@ import org.apache.hadoop.hive.ql.parse.WindowingSpec.WindowSpec; import org.apache.hadoop.hive.ql.parse.WindowingSpec.WindowType; import org.apache.hadoop.hive.ql.plan.AggregationDesc; -import org.apache.hadoop.hive.ql.plan.DDLWork; import org.apache.hadoop.hive.ql.plan.DynamicPartitionCtx; import org.apache.hadoop.hive.ql.plan.ExprNodeColumnDesc; import org.apache.hadoop.hive.ql.plan.ExprNodeColumnListDesc; @@ -211,7 +211,6 @@ import org.apache.hadoop.hive.ql.plan.ForwardDesc; import org.apache.hadoop.hive.ql.plan.GroupByDesc; import org.apache.hadoop.hive.ql.plan.HiveOperation; -import org.apache.hadoop.hive.ql.plan.InsertCommitHookDesc; import org.apache.hadoop.hive.ql.plan.JoinCondDesc; import org.apache.hadoop.hive.ql.plan.JoinDesc; import org.apache.hadoop.hive.ql.plan.LateralViewForwardDesc; @@ -12578,10 +12577,9 @@ void analyzeInternal(ASTNode ast, PlannerContextFactory pcf) throws SemanticExce .map(task -> (DDLWork2) task.getWork()) .filter(ddlWork -> ddlWork.getDDLDesc() instanceof PreInsertTableDesc) .map(ddlWork -> (PreInsertTableDesc)ddlWork.getDDLDesc()) - .map(ddlPreInsertTask -> new InsertCommitHookDesc(ddlPreInsertTask.getTable(), - ddlPreInsertTask.isOverwrite())) + .map(desc -> new InsertCommitHookDesc(desc.getTable(), desc.isOverwrite())) .forEach(insertCommitHookDesc -> tezTask.addDependentTask( - TaskFactory.get(new DDLWork(getInputs(), getOutputs(), insertCommitHookDesc), conf))); + TaskFactory.get(new DDLWork2(getInputs(), getOutputs(), insertCommitHookDesc), conf))); } LOG.info("Completed plan generation"); diff --git ql/src/java/org/apache/hadoop/hive/ql/plan/CacheMetadataDesc.java ql/src/java/org/apache/hadoop/hive/ql/plan/CacheMetadataDesc.java deleted file mode 100644 index e061adb757..0000000000 --- ql/src/java/org/apache/hadoop/hive/ql/plan/CacheMetadataDesc.java +++ /dev/null @@ -1,58 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.hive.ql.plan; - - -@SuppressWarnings("serial") -public class CacheMetadataDesc extends DDLDesc { - private final String dbName, tableName, partName; - private final boolean isAllParts; - - public CacheMetadataDesc(String dbName, String tableName, String partName) { - this(dbName, tableName, partName, false); - } - - public CacheMetadataDesc(String dbName, String tableName, boolean isAllParts) { - this(dbName, tableName, null, isAllParts); - } - - private CacheMetadataDesc(String dbName, String tableName, String partName, boolean isAllParts) { - super(); - this.dbName = dbName; - this.tableName = tableName; - this.partName = partName; - this.isAllParts = isAllParts; - } - - public boolean isAllParts() { - return isAllParts; - } - - public String getPartName() { - return partName; - } - - public String getDbName() { - return dbName; - } - - public String getTableName() { - return tableName; - } -} diff --git ql/src/java/org/apache/hadoop/hive/ql/plan/DDLWork.java ql/src/java/org/apache/hadoop/hive/ql/plan/DDLWork.java deleted file mode 100644 index 639aaba9e8..0000000000 --- ql/src/java/org/apache/hadoop/hive/ql/plan/DDLWork.java +++ /dev/null @@ -1,140 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.hive.ql.plan; - -import org.apache.hadoop.hive.ql.hooks.ReadEntity; -import org.apache.hadoop.hive.ql.hooks.WriteEntity; -import org.apache.hadoop.hive.ql.plan.Explain.Level; - -import java.io.Serializable; -import java.util.HashSet; - -/** - * DDLWork. - * - */ -public class DDLWork implements Serializable { - private static final long serialVersionUID = 1L; - - // TODO: this can probably be replaced with much less code via dynamic dispatch and/or templates. - private InsertCommitHookDesc insertCommitHookDesc; - private MsckDesc msckDesc; - - private ShowConfDesc showConfDesc; - - private ReplRemoveFirstIncLoadPendFlagDesc replSetFirstIncLoadFlagDesc; - - boolean needLock = false; - - /** - * ReadEntitites that are passed to the hooks. - */ - protected HashSet inputs; - /** - * List of WriteEntities that are passed to the hooks. - */ - protected HashSet outputs; - private CacheMetadataDesc cacheMetadataDesc; - - public DDLWork() { - } - - public DDLWork(HashSet inputs, HashSet outputs) { - this.inputs = inputs; - this.outputs = outputs; - } - - public DDLWork(HashSet inputs, HashSet outputs, - ShowConfDesc showConfDesc) { - this(inputs, outputs); - this.showConfDesc = showConfDesc; - } - - public DDLWork(HashSet inputs, HashSet outputs, - MsckDesc checkDesc) { - this(inputs, outputs); - - msckDesc = checkDesc; - } - - public DDLWork(HashSet inputs, HashSet outputs, - CacheMetadataDesc cacheMetadataDesc) { - this(inputs, outputs); - this.cacheMetadataDesc = cacheMetadataDesc; - } - - public DDLWork(HashSet inputs, HashSet outputs, - InsertCommitHookDesc insertCommitHookDesc - ) { - this(inputs, outputs); - this.insertCommitHookDesc = insertCommitHookDesc; - } - - public DDLWork(HashSet inputs, HashSet outputs, - ReplRemoveFirstIncLoadPendFlagDesc replSetFirstIncLoadFlagDesc) { - this(inputs, outputs); - this.replSetFirstIncLoadFlagDesc = replSetFirstIncLoadFlagDesc; - } - - /** - * @return Metastore check description - */ - public MsckDesc getMsckDesc() { - return msckDesc; - } - - public HashSet getInputs() { - return inputs; - } - - public HashSet getOutputs() { - return outputs; - } - - public boolean getNeedLock() { - return needLock; - } - - public void setNeedLock(boolean needLock) { - this.needLock = needLock; - } - - /** - * @return information about the metadata to be cached - */ - public CacheMetadataDesc getCacheMetadataDesc() { - return this.cacheMetadataDesc; - } - - public ShowConfDesc getShowConfDesc() { - return showConfDesc; - } - - @Explain(displayName = "Insert operator", explainLevels = { Level.USER, Level.DEFAULT, Level.EXTENDED }) - public InsertCommitHookDesc getInsertCommitHookDesc() { - return insertCommitHookDesc; - } - - public ReplRemoveFirstIncLoadPendFlagDesc getReplSetFirstIncLoadFlagDesc() { - return replSetFirstIncLoadFlagDesc; - } - - public void setReplSetFirstIncLoadFlagDesc(ReplRemoveFirstIncLoadPendFlagDesc replSetFirstIncLoadFlagDesc) { - this.replSetFirstIncLoadFlagDesc = replSetFirstIncLoadFlagDesc; - } -} diff --git ql/src/java/org/apache/hadoop/hive/ql/plan/MsckDesc.java ql/src/java/org/apache/hadoop/hive/ql/plan/MsckDesc.java deleted file mode 100644 index 5d2307c168..0000000000 --- ql/src/java/org/apache/hadoop/hive/ql/plan/MsckDesc.java +++ /dev/null @@ -1,152 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.hive.ql.plan; - -import java.io.Serializable; -import java.util.ArrayList; -import java.util.LinkedHashMap; -import java.util.List; -import java.util.Map; - -import org.apache.hadoop.fs.Path; - -/** - * MsckDesc. - * - */ -public class MsckDesc extends DDLWork implements Serializable { - - private String tableName; - private ArrayList> partSpecs; - private String resFile; - private boolean repairPartitions; - private boolean addPartitions; - private boolean dropPartitions; - - /** - * For serialization use only. - */ - public MsckDesc() { - } - - /** - * Description of a msck command. - * - * @param tableName - * Table to check, can be null. - * @param partSpecs - * Partition specification, can be null. - * @param resFile - * Where to save the output of the command - * @param repairPartitions - * remove stale / add new partitions found during the check - * @param addPartitions - * find partitions that are missing from metastore, and add them when repairPartitions - * is set to true - * @param dropPartitions - * find stale partitions in metastore, and drop them when repairPartitions - * is set to true - */ - public MsckDesc(String tableName, List> partSpecs, - Path resFile, boolean repairPartitions, boolean addPartitions, boolean dropPartitions) { - super(); - this.tableName = tableName; - this.partSpecs = new ArrayList>(partSpecs.size()); - for (Map partSpec : partSpecs) { - this.partSpecs.add(new LinkedHashMap<>(partSpec)); - } - this.resFile = resFile.toString(); - this.repairPartitions = repairPartitions; - this.addPartitions = addPartitions; - this.dropPartitions = dropPartitions; - } - - /** - * @return the table to check - */ - public String getTableName() { - return tableName; - } - - /** - * @param tableName - * the table to check - */ - public void setTableName(String tableName) { - this.tableName = tableName; - } - - /** - * @return partitions to check. - */ - public ArrayList> getPartSpecs() { - return partSpecs; - } - - /** - * @param partSpecs - * partitions to check. - */ - public void setPartSpecs(ArrayList> partSpecs) { - this.partSpecs = partSpecs; - } - - /** - * @return file to save command output to - */ - public String getResFile() { - return resFile; - } - - /** - * @param resFile - * file to save command output to - */ - public void setResFile(String resFile) { - this.resFile = resFile; - } - - /** - * @return remove stale / add new partitions found during the check - */ - public boolean isRepairPartitions() { - return repairPartitions; - } - - /** - * @return if missing partitions is to be found, and added with repair option - */ - public boolean isAddPartitions() { - return addPartitions; - } - - /** - * @return if stale partitions is to be found, and removed with repair option - */ - public boolean isDropPartitions() { - return dropPartitions; - } - - /** - * @param repairPartitions - * stale / add new partitions found during the check - */ - public void setRepairPartitions(boolean repairPartitions) { - this.repairPartitions = repairPartitions; - } -} diff --git ql/src/test/org/apache/hadoop/hive/ql/plan/TestConditionalResolverCommonJoin.java ql/src/test/org/apache/hadoop/hive/ql/plan/TestConditionalResolverCommonJoin.java index 6ee74ca257..bc95858244 100644 --- ql/src/test/org/apache/hadoop/hive/ql/plan/TestConditionalResolverCommonJoin.java +++ ql/src/test/org/apache/hadoop/hive/ql/plan/TestConditionalResolverCommonJoin.java @@ -22,7 +22,7 @@ import org.apache.hadoop.fs.Path; import org.apache.hadoop.hive.conf.HiveConf; -import org.apache.hadoop.hive.ql.exec.DDLTask; +import org.apache.hadoop.hive.ql.ddl.DDLTask2; import org.apache.hadoop.hive.ql.exec.Task; import org.junit.Test; @@ -49,9 +49,9 @@ public void testResolvingDriverAlias() throws Exception { aliasToKnownSize.put("alias2", 2048l); aliasToKnownSize.put("alias3", 4096l); - DDLTask task1 = new DDLTask(); + DDLTask2 task1 = new DDLTask2(); task1.setId("alias2"); - DDLTask task2 = new DDLTask(); + DDLTask2 task2 = new DDLTask2(); task2.setId("alias3"); // joins alias1, alias2, alias3 (alias1 was not eligible for big pos) diff --git ql/src/test/queries/clientpositive/msck_repair_drop.q ql/src/test/queries/clientpositive/msck_repair_drop.q index 407249e55a..3fe80ef786 100644 --- ql/src/test/queries/clientpositive/msck_repair_drop.q +++ ql/src/test/queries/clientpositive/msck_repair_drop.q @@ -37,7 +37,9 @@ dfs -touchz ${system:test.warehouse.dir}/repairtable_n1/p1=2/p2=29/p3=291/datafi dfs ${system:test.dfs.mkdir} ${system:test.warehouse.dir}/repairtable_n1/p1=2/p2=210/p3=2101; dfs -touchz ${system:test.warehouse.dir}/repairtable_n1/p1=2/p2=210/p3=2101/datafile; +EXPLAIN MSCK TABLE default.repairtable_n1; MSCK TABLE default.repairtable_n1; +EXPLAIN MSCK REPAIR TABLE default.repairtable_n1; MSCK REPAIR TABLE default.repairtable_n1; -- Now all 12 partitions are in @@ -48,7 +50,9 @@ dfs -rmr ${system:test.warehouse.dir}/repairtable_n1/p1=2; -- test 1: each partition is dropped individually set hive.msck.repair.batch.size=1; +EXPLAIN MSCK TABLE default.repairtable_n1 DROP PARTITIONS; MSCK TABLE default.repairtable_n1 DROP PARTITIONS; +EXPLAIN MSCK REPAIR TABLE default.repairtable_n1 DROP PARTITIONS; MSCK REPAIR TABLE default.repairtable_n1 DROP PARTITIONS; show partitions default.repairtable_n1; @@ -148,7 +152,9 @@ dfs -rmr ${system:test.warehouse.dir}/repairtable_n1/p1=3; -- Status: p1=3 dropped from filesystem, but exists in metastore -- p1=4 exists in filesystem but not in metastore -- test add partition: only brings in p1=4 and doesn't remove p1=3 +EXPLAIN MSCK TABLE default.repairtable_n1 ADD PARTITIONS; MSCK TABLE default.repairtable_n1 ADD PARTITIONS; +EXPLAIN MSCK REPAIR TABLE default.repairtable_n1 ADD PARTITIONS; MSCK REPAIR TABLE default.repairtable_n1 ADD PARTITIONS; show partitions default.repairtable_n1; -- test add partition keyword: end @@ -174,7 +180,9 @@ dfs -rmr ${system:test.warehouse.dir}/repairtable_n1/p1=4; -- Status: p1=4 dropped from filesystem, but exists in metastore -- p1=5 exists in filesystem but not in metastore (as part of drop test) -- test sync partition: removes p1=4 from metastore and updates metadata for p1=5 +EXPLAIN MSCK TABLE default.repairtable_n1 SYNC PARTITIONS; MSCK TABLE default.repairtable_n1 SYNC PARTITIONS; +EXPLAIN MSCK REPAIR TABLE default.repairtable_n1 SYNC PARTITIONS; MSCK REPAIR TABLE default.repairtable_n1 SYNC PARTITIONS; show partitions default.repairtable_n1; -- test sync partition keyword: end diff --git ql/src/test/queries/clientpositive/show_conf.q ql/src/test/queries/clientpositive/show_conf.q index 064358d49d..523df74696 100644 --- ql/src/test/queries/clientpositive/show_conf.q +++ ql/src/test/queries/clientpositive/show_conf.q @@ -1,3 +1,4 @@ +explain show conf "hive.auto.convert.sortmerge.join.to.mapjoin"; show conf "hive.auto.convert.sortmerge.join.to.mapjoin"; show conf "hive.zookeeper.session.timeout"; diff --git ql/src/test/queries/clientpositive/stats_filemetadata.q ql/src/test/queries/clientpositive/stats_filemetadata.q index 5ddbaac626..b0b9ef2d71 100644 --- ql/src/test/queries/clientpositive/stats_filemetadata.q +++ ql/src/test/queries/clientpositive/stats_filemetadata.q @@ -11,6 +11,7 @@ insert overwrite table many_files partition (ds='2') select * from src; dfs -ls -R ${hiveconf:hive.metastore.warehouse.dir}/many_files/; +explain analyze table many_files cache metadata; analyze table many_files cache metadata; set hive.fetch.task.conversion=none; diff --git ql/src/test/results/clientnegative/msck_repair_1.q.out ql/src/test/results/clientnegative/msck_repair_1.q.out index 174419fe44..2566cc37b0 100644 --- ql/src/test/results/clientnegative/msck_repair_1.q.out +++ ql/src/test/results/clientnegative/msck_repair_1.q.out @@ -19,4 +19,4 @@ POSTHOOK: Output: default@repairtable PREHOOK: query: MSCK TABLE default.repairtable PREHOOK: type: MSCK PREHOOK: Output: default@repairtable -FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.exec.DDLTask +FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.ddl.DDLTask2 diff --git ql/src/test/results/clientnegative/msck_repair_2.q.out ql/src/test/results/clientnegative/msck_repair_2.q.out index 174419fe44..2566cc37b0 100644 --- ql/src/test/results/clientnegative/msck_repair_2.q.out +++ ql/src/test/results/clientnegative/msck_repair_2.q.out @@ -19,4 +19,4 @@ POSTHOOK: Output: default@repairtable PREHOOK: query: MSCK TABLE default.repairtable PREHOOK: type: MSCK PREHOOK: Output: default@repairtable -FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.exec.DDLTask +FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.ddl.DDLTask2 diff --git ql/src/test/results/clientnegative/msck_repair_3.q.out ql/src/test/results/clientnegative/msck_repair_3.q.out index 174419fe44..2566cc37b0 100644 --- ql/src/test/results/clientnegative/msck_repair_3.q.out +++ ql/src/test/results/clientnegative/msck_repair_3.q.out @@ -19,4 +19,4 @@ POSTHOOK: Output: default@repairtable PREHOOK: query: MSCK TABLE default.repairtable PREHOOK: type: MSCK PREHOOK: Output: default@repairtable -FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.exec.DDLTask +FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.ddl.DDLTask2 diff --git ql/src/test/results/clientnegative/msck_repair_4.q.out ql/src/test/results/clientnegative/msck_repair_4.q.out index bb9cf47b08..3499bfeea6 100644 --- ql/src/test/results/clientnegative/msck_repair_4.q.out +++ ql/src/test/results/clientnegative/msck_repair_4.q.out @@ -19,4 +19,4 @@ POSTHOOK: Output: default@repairtable PREHOOK: query: MSCK REPAIR TABLE default.repairtable PREHOOK: type: MSCK PREHOOK: Output: default@repairtable -FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.exec.DDLTask +FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.ddl.DDLTask2 diff --git ql/src/test/results/clientnegative/table_nonprintable_negative.q.out ql/src/test/results/clientnegative/table_nonprintable_negative.q.out index 3891747c8d..d3d92dc99b 100644 --- ql/src/test/results/clientnegative/table_nonprintable_negative.q.out +++ ql/src/test/results/clientnegative/table_nonprintable_negative.q.out @@ -17,4 +17,4 @@ POSTHOOK: Output: default@table_external PREHOOK: query: msck repair table table_external PREHOOK: type: MSCK PREHOOK: Output: default@table_external -FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.exec.DDLTask +FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.ddl.DDLTask2 diff --git ql/src/test/results/clientpositive/druid/druidmini_dynamic_partition.q.out ql/src/test/results/clientpositive/druid/druidmini_dynamic_partition.q.out index 1401dbc375..09636a4153 100644 --- ql/src/test/results/clientpositive/druid/druidmini_dynamic_partition.q.out +++ ql/src/test/results/clientpositive/druid/druidmini_dynamic_partition.q.out @@ -402,8 +402,7 @@ STAGE PLANS: name: default.druid_partitioned_table Stage: Stage-4 - Insert operator: - Commit-Insert-Hook + Commit Insert Hook PREHOOK: query: INSERT INTO TABLE druid_partitioned_table SELECT cast (`ctimestamp2` as timestamp with local time zone) as `__time`, @@ -544,8 +543,8 @@ STAGE PLANS: name: default.druid_partitioned_table Stage: Stage-4 - Insert operator: - Commit-Insert-Hook + Commit Insert Hook + is overwrite: true PREHOOK: query: INSERT OVERWRITE TABLE druid_partitioned_table SELECT cast (`ctimestamp1` as timestamp with local time zone) as `__time`, diff --git ql/src/test/results/clientpositive/druid/druidmini_mv.q.out ql/src/test/results/clientpositive/druid/druidmini_mv.q.out index 08a83da67a..194057195e 100644 --- ql/src/test/results/clientpositive/druid/druidmini_mv.q.out +++ ql/src/test/results/clientpositive/druid/druidmini_mv.q.out @@ -557,8 +557,8 @@ STAGE PLANS: Vertex: Union 2 Stage: Stage-5 - Insert operator: - Commit-Insert-Hook + Commit Insert Hook + is overwrite: true PREHOOK: query: ALTER MATERIALIZED VIEW cmv_mat_view2_n0 REBUILD PREHOOK: type: QUERY diff --git ql/src/test/results/clientpositive/msck_repair_drop.q.out ql/src/test/results/clientpositive/msck_repair_drop.q.out index 27b718c670..04179f3304 100644 --- ql/src/test/results/clientpositive/msck_repair_drop.q.out +++ ql/src/test/results/clientpositive/msck_repair_drop.q.out @@ -10,6 +10,21 @@ POSTHOOK: query: CREATE TABLE repairtable_n1(col STRING) PARTITIONED BY (p1 STRI POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@repairtable_n1 +PREHOOK: query: EXPLAIN MSCK TABLE default.repairtable_n1 +PREHOOK: type: MSCK +PREHOOK: Output: default@repairtable_n1 +POSTHOOK: query: EXPLAIN MSCK TABLE default.repairtable_n1 +POSTHOOK: type: MSCK +POSTHOOK: Output: default@repairtable_n1 +STAGE DEPENDENCIES: + Stage-0 is a root stage + +STAGE PLANS: + Stage: Stage-0 + Metastore Check + table name: default.repairtable_n1 + add partition: true + PREHOOK: query: MSCK TABLE default.repairtable_n1 PREHOOK: type: MSCK PREHOOK: Output: default@repairtable_n1 @@ -17,6 +32,22 @@ POSTHOOK: query: MSCK TABLE default.repairtable_n1 POSTHOOK: type: MSCK POSTHOOK: Output: default@repairtable_n1 Partitions not in metastore: repairtable_n1:p1=1/p2=11 repairtable_n1:p1=1/p2=12 repairtable_n1:p1=2/p2=21 repairtable_n1:p1=2/p2=210 repairtable_n1:p1=2/p2=22 repairtable_n1:p1=2/p2=23 repairtable_n1:p1=2/p2=24 repairtable_n1:p1=2/p2=25 repairtable_n1:p1=2/p2=26 repairtable_n1:p1=2/p2=27 repairtable_n1:p1=2/p2=28 repairtable_n1:p1=2/p2=29 +PREHOOK: query: EXPLAIN MSCK REPAIR TABLE default.repairtable_n1 +PREHOOK: type: MSCK +PREHOOK: Output: default@repairtable_n1 +POSTHOOK: query: EXPLAIN MSCK REPAIR TABLE default.repairtable_n1 +POSTHOOK: type: MSCK +POSTHOOK: Output: default@repairtable_n1 +STAGE DEPENDENCIES: + Stage-0 is a root stage + +STAGE PLANS: + Stage: Stage-0 + Metastore Check + table name: default.repairtable_n1 + add partition: true + repair partition: true + PREHOOK: query: MSCK REPAIR TABLE default.repairtable_n1 PREHOOK: type: MSCK PREHOOK: Output: default@repairtable_n1 @@ -44,6 +75,21 @@ p1=2/p2=27 p1=2/p2=28 p1=2/p2=29 #### A masked pattern was here #### +PREHOOK: query: EXPLAIN MSCK TABLE default.repairtable_n1 DROP PARTITIONS +PREHOOK: type: MSCK +PREHOOK: Output: default@repairtable_n1 +POSTHOOK: query: EXPLAIN MSCK TABLE default.repairtable_n1 DROP PARTITIONS +POSTHOOK: type: MSCK +POSTHOOK: Output: default@repairtable_n1 +STAGE DEPENDENCIES: + Stage-0 is a root stage + +STAGE PLANS: + Stage: Stage-0 + Metastore Check + table name: default.repairtable_n1 + drop partition: true + PREHOOK: query: MSCK TABLE default.repairtable_n1 DROP PARTITIONS PREHOOK: type: MSCK PREHOOK: Output: default@repairtable_n1 @@ -51,6 +97,22 @@ POSTHOOK: query: MSCK TABLE default.repairtable_n1 DROP PARTITIONS POSTHOOK: type: MSCK POSTHOOK: Output: default@repairtable_n1 Partitions missing from filesystem: repairtable_n1:p1=2/p2=21 repairtable_n1:p1=2/p2=210 repairtable_n1:p1=2/p2=22 repairtable_n1:p1=2/p2=23 repairtable_n1:p1=2/p2=24 repairtable_n1:p1=2/p2=25 repairtable_n1:p1=2/p2=26 repairtable_n1:p1=2/p2=27 repairtable_n1:p1=2/p2=28 repairtable_n1:p1=2/p2=29 +PREHOOK: query: EXPLAIN MSCK REPAIR TABLE default.repairtable_n1 DROP PARTITIONS +PREHOOK: type: MSCK +PREHOOK: Output: default@repairtable_n1 +POSTHOOK: query: EXPLAIN MSCK REPAIR TABLE default.repairtable_n1 DROP PARTITIONS +POSTHOOK: type: MSCK +POSTHOOK: Output: default@repairtable_n1 +STAGE DEPENDENCIES: + Stage-0 is a root stage + +STAGE PLANS: + Stage: Stage-0 + Metastore Check + table name: default.repairtable_n1 + drop partition: true + repair partition: true + PREHOOK: query: MSCK REPAIR TABLE default.repairtable_n1 DROP PARTITIONS PREHOOK: type: MSCK PREHOOK: Output: default@repairtable_n1 @@ -234,6 +296,21 @@ p1=1/p2=12 p1=3/p2=31 p1=3/p2=32 #### A masked pattern was here #### +PREHOOK: query: EXPLAIN MSCK TABLE default.repairtable_n1 ADD PARTITIONS +PREHOOK: type: MSCK +PREHOOK: Output: default@repairtable_n1 +POSTHOOK: query: EXPLAIN MSCK TABLE default.repairtable_n1 ADD PARTITIONS +POSTHOOK: type: MSCK +POSTHOOK: Output: default@repairtable_n1 +STAGE DEPENDENCIES: + Stage-0 is a root stage + +STAGE PLANS: + Stage: Stage-0 + Metastore Check + table name: default.repairtable_n1 + add partition: true + PREHOOK: query: MSCK TABLE default.repairtable_n1 ADD PARTITIONS PREHOOK: type: MSCK PREHOOK: Output: default@repairtable_n1 @@ -242,6 +319,22 @@ POSTHOOK: type: MSCK POSTHOOK: Output: default@repairtable_n1 Partitions not in metastore: repairtable_n1:p1=4/p2=41 repairtable_n1:p1=4/p2=42 Partitions missing from filesystem: repairtable_n1:p1=3/p2=31 repairtable_n1:p1=3/p2=32 +PREHOOK: query: EXPLAIN MSCK REPAIR TABLE default.repairtable_n1 ADD PARTITIONS +PREHOOK: type: MSCK +PREHOOK: Output: default@repairtable_n1 +POSTHOOK: query: EXPLAIN MSCK REPAIR TABLE default.repairtable_n1 ADD PARTITIONS +POSTHOOK: type: MSCK +POSTHOOK: Output: default@repairtable_n1 +STAGE DEPENDENCIES: + Stage-0 is a root stage + +STAGE PLANS: + Stage: Stage-0 + Metastore Check + table name: default.repairtable_n1 + add partition: true + repair partition: true + PREHOOK: query: MSCK REPAIR TABLE default.repairtable_n1 ADD PARTITIONS PREHOOK: type: MSCK PREHOOK: Output: default@repairtable_n1 @@ -292,6 +385,22 @@ p1=1/p2=12 p1=4/p2=41 p1=4/p2=42 #### A masked pattern was here #### +PREHOOK: query: EXPLAIN MSCK TABLE default.repairtable_n1 SYNC PARTITIONS +PREHOOK: type: MSCK +PREHOOK: Output: default@repairtable_n1 +POSTHOOK: query: EXPLAIN MSCK TABLE default.repairtable_n1 SYNC PARTITIONS +POSTHOOK: type: MSCK +POSTHOOK: Output: default@repairtable_n1 +STAGE DEPENDENCIES: + Stage-0 is a root stage + +STAGE PLANS: + Stage: Stage-0 + Metastore Check + table name: default.repairtable_n1 + add partition: true + drop partition: true + PREHOOK: query: MSCK TABLE default.repairtable_n1 SYNC PARTITIONS PREHOOK: type: MSCK PREHOOK: Output: default@repairtable_n1 @@ -300,6 +409,23 @@ POSTHOOK: type: MSCK POSTHOOK: Output: default@repairtable_n1 Partitions not in metastore: repairtable_n1:p1=5/p2=51 repairtable_n1:p1=5/p2=52 Partitions missing from filesystem: repairtable_n1:p1=4/p2=41 repairtable_n1:p1=4/p2=42 +PREHOOK: query: EXPLAIN MSCK REPAIR TABLE default.repairtable_n1 SYNC PARTITIONS +PREHOOK: type: MSCK +PREHOOK: Output: default@repairtable_n1 +POSTHOOK: query: EXPLAIN MSCK REPAIR TABLE default.repairtable_n1 SYNC PARTITIONS +POSTHOOK: type: MSCK +POSTHOOK: Output: default@repairtable_n1 +STAGE DEPENDENCIES: + Stage-0 is a root stage + +STAGE PLANS: + Stage: Stage-0 + Metastore Check + table name: default.repairtable_n1 + add partition: true + drop partition: true + repair partition: true + PREHOOK: query: MSCK REPAIR TABLE default.repairtable_n1 SYNC PARTITIONS PREHOOK: type: MSCK PREHOOK: Output: default@repairtable_n1 diff --git ql/src/test/results/clientpositive/show_conf.q.out ql/src/test/results/clientpositive/show_conf.q.out index 7e65dfd31e..9d4a3174cd 100644 --- ql/src/test/results/clientpositive/show_conf.q.out +++ ql/src/test/results/clientpositive/show_conf.q.out @@ -1,3 +1,22 @@ +PREHOOK: query: explain show conf "hive.auto.convert.sortmerge.join.to.mapjoin" +PREHOOK: type: SHOWCONF +POSTHOOK: query: explain show conf "hive.auto.convert.sortmerge.join.to.mapjoin" +POSTHOOK: type: SHOWCONF +STAGE DEPENDENCIES: + Stage-0 is a root stage + Stage-1 depends on stages: Stage-0 + +STAGE PLANS: + Stage: Stage-0 + Show Configuration + conf name: hive.auto.convert.sortmerge.join.to.mapjoin + + Stage: Stage-1 + Fetch Operator + limit: -1 + Processor Tree: + ListSink + PREHOOK: query: show conf "hive.auto.convert.sortmerge.join.to.mapjoin" PREHOOK: type: SHOWCONF POSTHOOK: query: show conf "hive.auto.convert.sortmerge.join.to.mapjoin"