diff --git a/common/src/java/org/apache/hadoop/hive/conf/Constants.java b/common/src/java/org/apache/hadoop/hive/conf/Constants.java index 51408b1c77..ad2f9143f5 100644 --- a/common/src/java/org/apache/hadoop/hive/conf/Constants.java +++ b/common/src/java/org/apache/hadoop/hive/conf/Constants.java @@ -25,6 +25,9 @@ public static final String LLAP_NUM_BUCKETS = "llap.num.buckets"; public static final String LLAP_BUCKET_ID = "llap.bucket.id"; + /* Constants for MV */ + public static final String MATERIALIZED_VIEW_VERSION = "materialized.view.version"; + /* Constants for Druid storage handler */ public static final String DRUID_HIVE_STORAGE_HANDLER_ID = "org.apache.hadoop.hive.druid.DruidStorageHandler"; diff --git a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/parse/TestReplicationScenarios.java b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/parse/TestReplicationScenarios.java index d1d2d1f097..2de98e6e79 100644 --- a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/parse/TestReplicationScenarios.java +++ b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/parse/TestReplicationScenarios.java @@ -2239,9 +2239,8 @@ public void testViewsReplication() throws IOException { run("LOAD DATA LOCAL INPATH '" + ptn_locn_2 + "' OVERWRITE INTO TABLE " + dbName + ".ptned PARTITION(b=2)", driver); verifySetup("SELECT a from " + dbName + ".ptned WHERE b=2", ptn_data_2, driver); - // TODO: Enable back when HIVE-18387 goes in, as it fixes the issue. - // The problem is that alter for stats is removing the metadata information. - // HIVE-18387 rewrites that logic and will fix the issue. + // TODO: This does not work because materialized views need the creation metadata + // to be updated in case tables used were replicated to a different database. //run("CREATE MATERIALIZED VIEW " + dbName + ".mat_view AS SELECT a FROM " + dbName + ".ptned where b=1", driver); //verifySetup("SELECT a from " + dbName + ".mat_view", ptn_data_1, driver); diff --git a/ql/src/java/org/apache/hadoop/hive/ql/QueryLifeTimeHookRunner.java b/ql/src/java/org/apache/hadoop/hive/ql/QueryLifeTimeHookRunner.java index 53d716bceb..9ed231a584 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/QueryLifeTimeHookRunner.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/QueryLifeTimeHookRunner.java @@ -26,7 +26,6 @@ import org.apache.hadoop.hive.conf.HiveConf; import org.apache.hadoop.hive.ql.hooks.HookContext; import org.apache.hadoop.hive.ql.hooks.HooksLoader; -import org.apache.hadoop.hive.ql.hooks.MaterializedViewRegistryUpdateHook; import org.apache.hadoop.hive.ql.hooks.MetricsQueryLifeTimeHook; import org.apache.hadoop.hive.ql.hooks.QueryLifeTimeHook; import org.apache.hadoop.hive.ql.hooks.QueryLifeTimeHookContext; @@ -60,7 +59,6 @@ if (conf.getBoolVar(HiveConf.ConfVars.HIVE_SERVER2_METRICS_ENABLED)) { queryHooks.add(new MetricsQueryLifeTimeHook()); } - queryHooks.add(new MaterializedViewRegistryUpdateHook()); List propertyDefinedHoooks; try { diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/DDLTask.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/DDLTask.java index d3aa571913..e231a883d1 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/exec/DDLTask.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/DDLTask.java @@ -51,7 +51,6 @@ import java.util.concurrent.ExecutionException; import com.google.common.collect.ImmutableSet; -import org.apache.commons.collections.CollectionUtils; import org.apache.commons.lang.StringUtils; import org.apache.hadoop.fs.FSDataOutputStream; import org.apache.hadoop.fs.FileStatus; @@ -61,7 +60,6 @@ import org.apache.hadoop.hive.common.FileUtils; import org.apache.hadoop.hive.common.JavaUtils; import org.apache.hadoop.hive.common.StatsSetupConst; -import org.apache.hadoop.hive.common.ValidReadTxnList; import org.apache.hadoop.hive.common.ValidTxnList; import org.apache.hadoop.hive.common.type.HiveDecimal; import org.apache.hadoop.hive.conf.Constants; @@ -76,7 +74,6 @@ import org.apache.hadoop.hive.metastore.Warehouse; import org.apache.hadoop.hive.metastore.api.AggrStats; import org.apache.hadoop.hive.metastore.api.AlreadyExistsException; -import org.apache.hadoop.hive.metastore.api.BasicTxnInfo; import org.apache.hadoop.hive.metastore.api.ColumnStatisticsData; import org.apache.hadoop.hive.metastore.api.ColumnStatisticsObj; import org.apache.hadoop.hive.metastore.api.CompactionResponse; @@ -108,7 +105,6 @@ import org.apache.hadoop.hive.metastore.api.TxnInfo; import org.apache.hadoop.hive.metastore.api.WMFullResourcePlan; import org.apache.hadoop.hive.metastore.api.WMNullableResourcePlan; -import org.apache.hadoop.hive.metastore.api.WMResourcePlan; import org.apache.hadoop.hive.metastore.api.WMResourcePlanStatus; import org.apache.hadoop.hive.metastore.api.WMTrigger; import org.apache.hadoop.hive.metastore.api.WMValidateResourcePlanResponse; @@ -240,6 +236,7 @@ import org.apache.hadoop.hive.ql.plan.ShowTablesDesc; import org.apache.hadoop.hive.ql.plan.ShowTblPropertiesDesc; import org.apache.hadoop.hive.ql.plan.ShowTxnsDesc; +import org.apache.hadoop.hive.ql.plan.StatsWork; import org.apache.hadoop.hive.ql.plan.SwitchDatabaseDesc; import org.apache.hadoop.hive.ql.plan.TezWork; import org.apache.hadoop.hive.ql.plan.TruncateTableDesc; @@ -5101,7 +5098,7 @@ private int createTableLike(Hive db, CreateTableLikeDesc crtTbl) throws Exceptio * @throws HiveException * Throws this exception if an unexpected error occurs. */ - private int createView(Hive db, CreateViewDesc crtView) throws HiveException { + private int createView(Hive db, CreateViewDesc crtView) throws HiveException, MetaException { Table oldview = db.getTable(crtView.getViewName(), false); if (oldview != null) { // Check whether we are replicating @@ -5122,14 +5119,43 @@ private int createView(Hive db, CreateViewDesc crtView) throws HiveException { } if (crtView.isMaterialized()) { + // Recall that this is a REBUILD // We need to update the status of the creation signature CreationMetadata cm = new CreationMetadata(oldview.getDbName(), oldview.getTableName(), ImmutableSet.copyOf(crtView.getTablesUsed())); cm.setValidTxnList(conf.get(ValidTxnList.VALID_TXNS_KEY)); oldview.getTTable().setCreationMetadata(cm); - db.alterTable(crtView.getViewName(), oldview, null); - // This is a replace/rebuild, so we need an exclusive lock + // We disable the stats for the time being + oldview.getTTable().getParameters().remove(StatsSetupConst.COLUMN_STATS_ACCURATE); + // We need to set the properties and location so we get the new version + oldview.getTTable().getParameters().put(Constants.MATERIALIZED_VIEW_VERSION, + crtView.getTblProps().get(Constants.MATERIALIZED_VIEW_VERSION)); + final Path prevDataLocation = oldview.getDataLocation(); + oldview.getTTable().getSd().setLocation(crtView.getLocation()); + // As table object is modified in this method, we need to update + // the subsequent stats tasks (if any) + updateChildrenStatsTask(oldview); + // We commit changes to the metastore + boolean failed = true; + HiveMetaHook hook = oldview.getStorageHandler() != null ? + oldview.getStorageHandler().getMetaHook() : null; + if (hook != null && hook instanceof DefaultHiveMetaHook) { + DefaultHiveMetaHook hiveMetaHook = (DefaultHiveMetaHook) hook; + try { + // We execute the OVERWRITE hook + hiveMetaHook.commitInsertTable(oldview.getTTable(), true); + // We update metastore + db.alterTable(crtView.getViewName(), oldview, null); + failed = false; + } finally { + if (failed) { + hiveMetaHook.rollbackInsertTable(oldview.getTTable(), true); + } + } + } + // We need to delete the previous location for the materialized view + deleteDir(prevDataLocation); addIfAbsentByName(new WriteEntity(oldview, WriteEntity.WriteType.DDL_EXCLUSIVE)); } else { // replace existing view @@ -5152,7 +5178,7 @@ private int createView(Hive db, CreateViewDesc crtView) throws HiveException { } oldview.checkValidity(null); db.alterTable(crtView.getViewName(), oldview, null); - addIfAbsentByName(new WriteEntity(oldview, WriteEntity.WriteType.DDL_NO_LOCK)); + addIfAbsentByName(new WriteEntity(oldview, WriteEntity.WriteType.DDL_SHARED)); } } else { // We create new view @@ -5166,6 +5192,9 @@ private int createView(Hive db, CreateViewDesc crtView) throws HiveException { tbl.getTTable().setCreationMetadata(cm); } db.createTable(tbl, crtView.getIfNotExists()); + // As table object is modified in this method, we need to update + // the subsequent stats tasks (if any) + updateChildrenStatsTask(tbl); addIfAbsentByName(new WriteEntity(tbl, WriteEntity.WriteType.DDL_NO_LOCK)); //set lineage info @@ -5175,6 +5204,19 @@ private int createView(Hive db, CreateViewDesc crtView) throws HiveException { return 0; } + private void updateChildrenStatsTask(Table viewTbl) { + // As table object is modified in this method, we need to update + // the subsequent stats tasks (if any) + if (getChildTasks() != null) { + for (Task t : getChildTasks()) { + if (t.getWork() instanceof StatsWork) { + StatsWork sw = (StatsWork) t.getWork(); + sw.setTable(viewTbl); + } + } + } + } + private int truncateTable(Hive db, TruncateTableDesc truncateTableDesc) throws HiveException { if (truncateTableDesc.getColumnIndexes() != null) { diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/MaterializedViewUpdateRegistryTask.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/MaterializedViewUpdateRegistryTask.java new file mode 100644 index 0000000000..b4abef9c20 --- /dev/null +++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/MaterializedViewUpdateRegistryTask.java @@ -0,0 +1,72 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hive.ql.exec; + +import org.apache.hadoop.hive.ql.DriverContext; +import org.apache.hadoop.hive.ql.metadata.Hive; +import org.apache.hadoop.hive.ql.metadata.HiveException; +import org.apache.hadoop.hive.ql.metadata.HiveMaterializedViewsRegistry; +import org.apache.hadoop.hive.ql.metadata.Table; +import org.apache.hadoop.hive.ql.parse.ExplainConfiguration.AnalyzeState; +import org.apache.hadoop.hive.ql.plan.api.StageType; + +import java.io.Serializable; + +/** + * This task adds the materialized view to the registry. + */ +public class MaterializedViewUpdateRegistryTask extends Task implements Serializable { + + private static final long serialVersionUID = 1L; + + public MaterializedViewUpdateRegistryTask() { + super(); + } + + @Override + public int execute(DriverContext driverContext) { + if (driverContext.getCtx().getExplainAnalyze() == AnalyzeState.RUNNING) { + return 0; + } + try { + if (getWork().isRetrieveAndInclude()) { + Hive db = Hive.get(conf); + Table mvTable = db.getTable(getWork().getViewName()); + HiveMaterializedViewsRegistry.get().createMaterializedView(db.getConf(), mvTable); + } else if (getWork().isDisableRewrite()) { + // Disabling rewriting, removing from cache + String[] names = getWork().getViewName().split("\\."); + HiveMaterializedViewsRegistry.get().dropMaterializedView(names[0], names[1]); + } + } catch (HiveException e) { + LOG.debug("Exception during materialized view cache update", e); + } + return 0; + } + + @Override + public StageType getType() { + return StageType.DDL; + } + + @Override + public String getName() { + return MaterializedViewUpdateRegistryTask.class.getSimpleName(); + } +} diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/MaterializedViewUpdateRegistryWork.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/MaterializedViewUpdateRegistryWork.java new file mode 100644 index 0000000000..35ae002e4a --- /dev/null +++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/MaterializedViewUpdateRegistryWork.java @@ -0,0 +1,49 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hive.ql.exec; + +import org.apache.hadoop.hive.ql.plan.Explain; +import org.apache.hadoop.hive.ql.plan.Explain.Level; + +import java.io.Serializable; + +@Explain(displayName = "Materialized View Registry Update", explainLevels = { Level.USER, Level.DEFAULT, Level.EXTENDED }) +public class MaterializedViewUpdateRegistryWork implements Serializable { + private static final long serialVersionUID = 1L; + private final String viewName; + private final boolean retrieveAndInclude; + private final boolean disableRewrite; + + public MaterializedViewUpdateRegistryWork(String viewName, boolean retrieveAndInclude, boolean disableRewrite) { + this.viewName = viewName; + this.retrieveAndInclude = retrieveAndInclude; + this.disableRewrite = disableRewrite; + } + + public String getViewName() { + return viewName; + } + + public boolean isRetrieveAndInclude() { + return retrieveAndInclude; + } + + public boolean isDisableRewrite() { + return disableRewrite; + } +} diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/TaskFactory.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/TaskFactory.java index 85cef86646..602b92ecdb 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/exec/TaskFactory.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/TaskFactory.java @@ -83,6 +83,9 @@ public TaskTuple(Class workClass, Class> taskClass) { taskvec.add(new TaskTuple(CopyWork.class, CopyTask.class)); taskvec.add(new TaskTuple(ReplCopyWork.class, ReplCopyTask.class)); taskvec.add(new TaskTuple(DDLWork.class, DDLTask.class)); + taskvec.add(new TaskTuple( + MaterializedViewUpdateRegistryWork.class, + MaterializedViewUpdateRegistryTask.class)); taskvec.add(new TaskTuple(FunctionWork.class, FunctionTask.class)); taskvec diff --git a/ql/src/java/org/apache/hadoop/hive/ql/hooks/MaterializedViewRegistryUpdateHook.java b/ql/src/java/org/apache/hadoop/hive/ql/hooks/MaterializedViewRegistryUpdateHook.java deleted file mode 100644 index e886399d53..0000000000 --- a/ql/src/java/org/apache/hadoop/hive/ql/hooks/MaterializedViewRegistryUpdateHook.java +++ /dev/null @@ -1,103 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.hive.ql.hooks; - -import java.io.Serializable; -import java.util.List; - -import org.apache.hadoop.hive.conf.HiveConf; -import org.apache.hadoop.hive.conf.HiveConf.ConfVars; -import org.apache.hadoop.hive.ql.exec.DDLTask; -import org.apache.hadoop.hive.ql.exec.Task; -import org.apache.hadoop.hive.ql.exec.TaskRunner; -import org.apache.hadoop.hive.ql.metadata.Hive; -import org.apache.hadoop.hive.ql.metadata.HiveException; -import org.apache.hadoop.hive.ql.metadata.HiveMaterializedViewsRegistry; -import org.apache.hadoop.hive.ql.metadata.Table; -import org.apache.hadoop.hive.ql.plan.DDLWork; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -/** - * Updates the materialized view registry after changes. - */ -public class MaterializedViewRegistryUpdateHook implements QueryLifeTimeHook { - - private static final Logger LOG = LoggerFactory.getLogger(MaterializedViewRegistryUpdateHook.class); - - @Override - public void beforeCompile(QueryLifeTimeHookContext ctx) { - } - - @Override - public void afterCompile(QueryLifeTimeHookContext ctx, boolean hasError) { - } - - @Override - public void beforeExecution(QueryLifeTimeHookContext ctx) { - } - - @Override - public void afterExecution(QueryLifeTimeHookContext ctx, boolean hasError) { - if (hasError) { - return; - } - HiveConf hiveConf = ctx.getHiveConf(); - try { - List completedTasks = ctx.getHookContext().getCompleteTaskList(); - for (TaskRunner taskRunner : completedTasks) { - Task task = taskRunner.getTask(); - if (task instanceof DDLTask) { - DDLTask ddlTask = (DDLTask) task; - DDLWork work = ddlTask.getWork(); - String tableName = null; - boolean isRewriteEnabled = false; - if (work.getCreateViewDesc() != null && work.getCreateViewDesc().isMaterialized()) { - tableName = work.getCreateViewDesc().toTable(hiveConf).getFullyQualifiedName(); - isRewriteEnabled = work.getCreateViewDesc().isRewriteEnabled(); - } else if (work.getAlterMaterializedViewDesc() != null) { - tableName = work.getAlterMaterializedViewDesc().getMaterializedViewName(); - isRewriteEnabled = work.getAlterMaterializedViewDesc().isRewriteEnable(); - } else { - continue; - } - - if (isRewriteEnabled) { - Hive db = Hive.get(); - Table mvTable = db.getTable(tableName); - HiveMaterializedViewsRegistry.get().createMaterializedView(db.getConf(), mvTable); - } else if (work.getAlterMaterializedViewDesc() != null) { - // Disabling rewriting, removing from cache - String[] names = tableName.split("\\."); - HiveMaterializedViewsRegistry.get().dropMaterializedView(names[0], names[1]); - } - } - } - } catch (HiveException e) { - if (HiveConf.getBoolVar(hiveConf, ConfVars.HIVE_MATERIALIZED_VIEW_ENABLE_AUTO_REWRITING)) { - String message = "Error updating materialized view cache; consider disabling: " + ConfVars.HIVE_MATERIALIZED_VIEW_ENABLE_AUTO_REWRITING.varname; - LOG.error(message, e); - throw new RuntimeException(message, e); - } else { - LOG.debug("Exception during materialized view cache update", e); - } - } - } - -} diff --git a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/GenMapRedUtils.java b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/GenMapRedUtils.java index 69447d9d34..50abf23468 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/GenMapRedUtils.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/GenMapRedUtils.java @@ -87,6 +87,7 @@ import org.apache.hadoop.hive.ql.plan.ConditionalResolverMergeFiles; import org.apache.hadoop.hive.ql.plan.ConditionalResolverMergeFiles.ConditionalResolverMergeFilesCtx; import org.apache.hadoop.hive.ql.plan.ConditionalWork; +import org.apache.hadoop.hive.ql.plan.CreateViewDesc; import org.apache.hadoop.hive.ql.plan.DependencyCollectionWork; import org.apache.hadoop.hive.ql.plan.DynamicPartitionCtx; import org.apache.hadoop.hive.ql.plan.ExprNodeDesc; @@ -1508,25 +1509,7 @@ public static void addStatsTask(FileSinkOperator nd, MoveTask mvTask, LOG.debug("can't pre-create table for CTAS", e); table = null; } - } else if (mvWork.getLoadFileWork().getCreateViewDesc() != null) { - if (mvWork.getLoadFileWork().getCreateViewDesc().isReplace()) { - // ALTER MV ... REBUILD - String tableName = mvWork.getLoadFileWork().getCreateViewDesc().getViewName(); - try { - table = Hive.get().getTable(tableName); - } catch (HiveException e) { - throw new RuntimeException("unexpected; MV should be present already..: " + tableName, e); - } - } else { - // CREATE MATERIALIZED VIEW ... - try { - table = mvWork.getLoadFileWork().getCreateViewDesc().toTable(hconf); - } catch (HiveException e) { - LOG.debug("can't pre-create table for MV", e); - table = null; - } - } - } else { + } else if (mvWork.getLoadFileWork().getCreateViewDesc() == null) { throw new RuntimeException("unexpected; this should be a CTAS or a CREATE/REBUILD MV - however no desc present"); } } diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java index b67a03f213..e2cf297791 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java @@ -62,6 +62,7 @@ import org.apache.hadoop.hive.common.ObjectPair; import org.apache.hadoop.hive.common.StatsSetupConst; import org.apache.hadoop.hive.common.StatsSetupConst.StatDB; +import org.apache.hadoop.hive.conf.Constants; import org.apache.hadoop.hive.conf.HiveConf; import org.apache.hadoop.hive.conf.HiveConf.ConfVars; import org.apache.hadoop.hive.conf.HiveConf.StrictChecks; @@ -12317,7 +12318,7 @@ ASTNode analyzeCreateTable( } } - addDbAndTabToOutputs(qualifiedTabName, TableType.MANAGED_TABLE); + addDbAndTabToOutputs(qualifiedTabName, TableType.MANAGED_TABLE, WriteEntity.WriteType.DDL_NO_LOCK); if (isTemporary) { if (partCols.size() > 0) { @@ -12466,13 +12467,13 @@ ASTNode analyzeCreateTable( return null; } - private void addDbAndTabToOutputs(String[] qualifiedTabName, TableType type) throws SemanticException { + private void addDbAndTabToOutputs(String[] qualifiedTabName, TableType type, WriteEntity.WriteType writeType) throws SemanticException { Database database = getDatabase(qualifiedTabName[0]); outputs.add(new WriteEntity(database, WriteEntity.WriteType.DDL_SHARED)); Table t = new Table(qualifiedTabName[0], qualifiedTabName[1]); t.setTableType(type); - outputs.add(new WriteEntity(t, WriteEntity.WriteType.DDL_NO_LOCK)); + outputs.add(new WriteEntity(t, writeType)); } protected ASTNode analyzeCreateView(ASTNode ast, QB qb, PlannerContext plannerCtx) throws SemanticException { @@ -12570,13 +12571,69 @@ protected ASTNode analyzeCreateView(ASTNode ast, QB qb, PlannerContext plannerCt unparseTranslator.enable(); if (isMaterialized) { - createVwDesc = new CreateViewDesc( - dbDotTable, cols, comment, tblProps, partColNames, - ifNotExists, isRebuild, rewriteEnabled, isAlterViewAs, - storageFormat.getInputFormat(), storageFormat.getOutputFormat(), - location, storageFormat.getSerde(), storageFormat.getStorageHandler(), - storageFormat.getSerdeProps()); - addDbAndTabToOutputs(qualTabName, TableType.MATERIALIZED_VIEW); + Path dataLocation; + String mvVersion; + if (isRebuild) { + // We need to go lookup the table and get the select statement and then parse it. + Table tab; + try { + tab = getTableObjectByName(dbDotTable, true); + // We need to use the expanded text for the materialized view, as it will contain + // the qualified table aliases, etc. + String viewText = tab.getViewExpandedText(); + if (viewText.trim().isEmpty()) { + throw new SemanticException(ErrorMsg.MATERIALIZED_VIEW_DEF_EMPTY); + } + Context ctx = new Context(queryState.getConf()); + selectStmt = ParseUtils.parse(viewText, ctx); + // For CBO + if (plannerCtx != null) { + plannerCtx.setViewToken(selectStmt); + } + } catch (Exception e) { + throw new SemanticException(e); + } + // Create view descriptor + createVwDesc = CreateViewDesc.fromTable(tab); + createVwDesc.setReplace(true); + // Generate the new directory and increase the version + dataLocation = tab.getDataLocation().getParent(); + mvVersion = String.valueOf(Integer.parseInt( + tab.getProperty(Constants.MATERIALIZED_VIEW_VERSION)) + 1); + // Rebuild materialized view, exclusive lock + addDbAndTabToOutputs(qualTabName, TableType.MATERIALIZED_VIEW, WriteEntity.WriteType.DDL_EXCLUSIVE); + outputs.add(BaseSemanticAnalyzer.toWriteEntity(tab.getDataLocation(), conf)); + } else { + // Create view descriptor + createVwDesc = new CreateViewDesc( + dbDotTable, cols, comment, tblProps, partColNames, + ifNotExists, false, rewriteEnabled, + storageFormat.getInputFormat(), storageFormat.getOutputFormat(), + location, storageFormat.getSerde(), storageFormat.getStorageHandler(), + storageFormat.getSerdeProps()); + // For materialized views, properties should exist + if (createVwDesc.getTblProps() == null) { + createVwDesc.setTblProps(new HashMap<>()); + } + // Add version property ('0') and set up location correctly + if (createVwDesc.getLocation() == null) { + try { + dataLocation = new Warehouse(conf).getDefaultTablePath( + db.getDatabase(qualTabName[0]), qualTabName[1]); + } catch (Exception e) { + throw new SemanticException(e); + } + } else { + dataLocation = new Path(createVwDesc.getLocation()); + } + // We create a new materialized view, hence we use version 0 + mvVersion = String.valueOf(0); + // New materialized view, no need for lock + addDbAndTabToOutputs(qualTabName, TableType.MATERIALIZED_VIEW, WriteEntity.WriteType.DDL_NO_LOCK); + } + // Set up the new directory and version in tblProps + createVwDesc.setLocation(new Path(dataLocation, mvVersion).toString()); + createVwDesc.getTblProps().put(Constants.MATERIALIZED_VIEW_VERSION, mvVersion); queryState.setCommandType(HiveOperation.CREATE_MATERIALIZED_VIEW); } else { createVwDesc = new CreateViewDesc( @@ -12585,32 +12642,11 @@ protected ASTNode analyzeCreateView(ASTNode ast, QB qb, PlannerContext plannerCt storageFormat.getOutputFormat(), storageFormat.getSerde()); rootTasks.add(TaskFactory.get(new DDLWork(getInputs(), getOutputs(), createVwDesc), conf)); - addDbAndTabToOutputs(qualTabName, TableType.VIRTUAL_VIEW); + addDbAndTabToOutputs(qualTabName, TableType.VIRTUAL_VIEW, WriteEntity.WriteType.DDL_NO_LOCK); queryState.setCommandType(HiveOperation.CREATEVIEW); } qb.setViewDesc(createVwDesc); - if (isRebuild) { - // We need to go lookup the table and get the select statement and then parse it. - try { - Table tab = getTableObjectByName(dbDotTable, true); - // We need to use the expanded text for the materialized view, as it will contain - // the qualified table aliases, etc. - String viewText = tab.getViewExpandedText(); - if (viewText.trim().isEmpty()) { - throw new SemanticException(ErrorMsg.MATERIALIZED_VIEW_DEF_EMPTY); - } - Context ctx = new Context(queryState.getConf()); - selectStmt = ParseUtils.parse(viewText, ctx); - // For CBO - if (plannerCtx != null) { - plannerCtx.setViewToken(selectStmt); - } - } catch (Exception e) { - throw new SemanticException(e); - } - } - return selectStmt; } diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/TaskCompiler.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/TaskCompiler.java index 0c1c4e09d6..3148962798 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/parse/TaskCompiler.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/TaskCompiler.java @@ -28,6 +28,8 @@ import java.util.Map; import java.util.Set; +import org.apache.hadoop.hive.ql.exec.DDLTask; +import org.apache.hadoop.hive.ql.exec.MaterializedViewUpdateRegistryWork; import org.apache.hadoop.hive.ql.io.AcidUtils; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -72,7 +74,6 @@ import org.apache.hadoop.hive.ql.plan.PlanUtils; import org.apache.hadoop.hive.ql.plan.StatsWork; import org.apache.hadoop.hive.ql.plan.TableDesc; -import org.apache.hadoop.hive.ql.session.LineageState; import org.apache.hadoop.hive.ql.session.SessionState; import org.apache.hadoop.hive.ql.session.SessionState.LogHelper; import org.apache.hadoop.hive.serde.serdeConstants; @@ -490,6 +491,7 @@ private void patchUpAfterCTASorMaterializedView(final List> leaves = new LinkedHashSet<>(); getLeafTasks(rootTasks, leaves); assert (leaves.size() > 0); + Task targetTask = createTask; for (Task task : leaves) { if (task instanceof StatsTask) { // StatsTask require table to already exist @@ -500,10 +502,36 @@ private void patchUpAfterCTASorMaterializedView(final List schema, String comment, Map tblProps, List partColNames, - boolean ifNotExists, boolean replace, boolean rewriteEnabled, boolean isAlterViewAs, + boolean ifNotExists, boolean replace, boolean rewriteEnabled, String inputFormat, String outputFormat, String location, String serde, String storageHandler, Map serdeProps) { this.viewName = viewName; @@ -107,7 +106,7 @@ public CreateViewDesc(String viewName, List schema, String comment, this.replace = replace; this.isMaterialized = true; this.rewriteEnabled = rewriteEnabled; - this.isAlterViewAs = isAlterViewAs; + this.isAlterViewAs = false; this.inputFormat = inputFormat; this.outputFormat = outputFormat; this.location = location; @@ -116,6 +115,33 @@ public CreateViewDesc(String viewName, List schema, String comment, this.serdeProps = serdeProps; } + /** + * Generates a descriptor from a metastore table object, copying + * its properties. + */ + public static CreateViewDesc fromTable(Table tab) { + CreateViewDesc cvd = new CreateViewDesc(); + cvd.setViewName(tab.getFullyQualifiedName()); + cvd.setViewOriginalText(tab.getViewOriginalText()); + cvd.setViewExpandedText(tab.getViewExpandedText()); + cvd.setSchema(tab.getAllCols()); + cvd.setTblProps(tab.getParameters()); + cvd.setPartColNames(tab.getPartColNames()); + cvd.setComment(tab.getProperty("comment")); + cvd.setMaterialized(tab.isMaterializedView()); + cvd.setRewriteEnabled(tab.isRewriteEnabled()); + cvd.setInputFormat(tab.getSd().getInputFormat()); + cvd.setOutputFormat(tab.getSd().getOutputFormat()); + cvd.setLocation(tab.getSd().getLocation()); + cvd.setSerde(tab.getSerializationLib()); + cvd.setStorageHandler( + tab.getStorageHandler() == null ? null : tab.getStorageHandler().toString()); + cvd.setSerdeProps( + tab.getSd().getSerdeInfo() == null ? + null : tab.getSd().getSerdeInfo().getParameters()); + return cvd; + } + /** * Used to create a view descriptor * @param viewName @@ -288,6 +314,10 @@ public void setOutputFormat(String outputFormat) { this.outputFormat = outputFormat; } + public void setMaterialized(boolean isMaterialized) { + this.isMaterialized = isMaterialized; + } + public boolean isMaterialized() { return isMaterialized; } @@ -295,18 +325,31 @@ public boolean isMaterialized() { public void setLocation(String location) { this.location = location; } + public String getLocation() { return location; } + public void setSerde(String serde) { + this.serde = serde; + } + public String getSerde() { return serde; } + public void setStorageHandler(String storageHandler) { + this.storageHandler = storageHandler; + } + public String getStorageHandler() { return storageHandler; } + public void setSerdeProps(Map serdeProps) { + this.serdeProps = serdeProps; + } + public Map getSerdeProps() { return serdeProps; } diff --git a/ql/src/java/org/apache/hadoop/hive/ql/plan/ImportTableDesc.java b/ql/src/java/org/apache/hadoop/hive/ql/plan/ImportTableDesc.java index bdfb63244a..427e47cb92 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/plan/ImportTableDesc.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/plan/ImportTableDesc.java @@ -94,13 +94,15 @@ public ImportTableDesc(String dbName, Table table) throws Exception { null, // comment passed as table params table.getParameters(), table.getPartColNames(), - false,false,false,false, + false,false,false, table.getSd().getInputFormat(), table.getSd().getOutputFormat(), null, // location: set to null here, can be overwritten by the IMPORT stmt table.getSd().getSerdeInfo().getSerializationLib(), null, // storagehandler passed as table params table.getSd().getSerdeInfo().getParameters()); + // TODO: If the DB name from the creation metadata for any of the tables has changed, + // we should update it. Currently it refers to the source database name. this.createViewDesc.setTablesUsed(table.getCreationMetadata() != null ? table.getCreationMetadata().getTablesUsed() : ImmutableSet.of()); } else { diff --git a/ql/src/java/org/apache/hadoop/hive/ql/plan/StatsWork.java b/ql/src/java/org/apache/hadoop/hive/ql/plan/StatsWork.java index 9b45f43026..0715b34b2c 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/plan/StatsWork.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/plan/StatsWork.java @@ -118,6 +118,10 @@ public Table getTable() { return table; } + public void setTable(Table table) { + this.table = table; + } + public void collectStatsFromAggregator(IStatsGatherDesc conf) { // AggKey in StatsWork is used for stats aggregation while StatsAggPrefix // in FileSinkDesc is used for stats publishing. They should be consistent. diff --git a/ql/src/java/org/apache/hadoop/hive/ql/stats/BasicStatsTask.java b/ql/src/java/org/apache/hadoop/hive/ql/stats/BasicStatsTask.java index b48379013d..1d7660e8b2 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/stats/BasicStatsTask.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/stats/BasicStatsTask.java @@ -252,8 +252,6 @@ private int aggregateStats(Hive db) { if (res == null) { return 0; } - // Stats task should not set creation signature - res.getTTable().unsetCreationMetadata(); db.alterTable(tableFullName, res, environmentContext); if (conf.getBoolVar(ConfVars.TEZ_EXEC_SUMMARY)) { diff --git a/ql/src/test/queries/clientpositive/druidmini_mv.q b/ql/src/test/queries/clientpositive/druidmini_mv.q index e059357602..3acbadf9a7 100644 --- a/ql/src/test/queries/clientpositive/druidmini_mv.q +++ b/ql/src/test/queries/clientpositive/druidmini_mv.q @@ -5,20 +5,28 @@ set hive.txn.manager=org.apache.hadoop.hive.ql.lockmgr.DbTxnManager; set hive.strict.checks.cartesian.product=false; set hive.materializedview.rewriting=true; -create table cmv_basetable (a int, b varchar(256), c decimal(10,2), d int) stored as orc TBLPROPERTIES ('transactional'='true'); - -insert into cmv_basetable values - (1, 'alfred', 10.30, 2), - (2, 'bob', 3.14, 3), - (2, 'bonnie', 172342.2, 3), - (3, 'calvin', 978.76, 3), - (3, 'charlie', 9.8, 1); +CREATE TABLE cmv_basetable +STORED AS orc +TBLPROPERTIES ('transactional'='true') +AS +SELECT cast(unix_timestamp() AS timestamp) AS t, + cast(a AS int) AS a, + cast(b AS varchar(256)) AS b, + cast(c AS decimal(10,2)) AS c, + cast(d AS int) AS d +FROM TABLE ( + VALUES + (1, 'alfred', 10.30, 2), + (2, 'bob', 3.14, 3), + (2, 'bonnie', 172342.2, 3), + (3, 'calvin', 978.76, 3), + (3, 'charlie', 9.8, 1)) as q (a, b, c, d); CREATE MATERIALIZED VIEW cmv_mat_view ENABLE REWRITE STORED BY 'org.apache.hadoop.hive.druid.DruidStorageHandler' TBLPROPERTIES ("druid.segment.granularity" = "HOUR") AS -SELECT cast(current_timestamp() as timestamp with local time zone) as `__time`, a, b, c +SELECT cast(t AS timestamp with local time zone) as `__time`, a, b, c FROM cmv_basetable WHERE a = 2; @@ -30,7 +38,7 @@ CREATE MATERIALIZED VIEW IF NOT EXISTS cmv_mat_view2 ENABLE REWRITE STORED BY 'org.apache.hadoop.hive.druid.DruidStorageHandler' TBLPROPERTIES ("druid.segment.granularity" = "HOUR") AS -SELECT cast(current_timestamp() as timestamp with local time zone) as `__time`, a, c +SELECT cast(t AS timestamp with local time zone) as `__time`, a, b, c FROM cmv_basetable WHERE a = 3; @@ -61,7 +69,7 @@ SELECT * FROM ( ON table1.a = table2.a); INSERT INTO cmv_basetable VALUES - (3, 'charlie', 15.8, 1); + (cast(unix_timestamp() AS timestamp), 3, 'charlie', 15.8, 1); -- TODO: CANNOT USE THE VIEW, IT IS OUTDATED EXPLAIN @@ -77,8 +85,8 @@ SELECT * FROM ( (SELECT a, c FROM cmv_basetable WHERE d = 3) table2 ON table1.a = table2.a); --- REBUILD: TODO FOR MVS USING CUSTOM STORAGE HANDLERS --- ALTER MATERIALIZED VIEW cmv_mat_view REBUILD; +-- REBUILD +ALTER MATERIALIZED VIEW cmv_mat_view2 REBUILD; -- NOW IT CAN BE USED AGAIN EXPLAIN diff --git a/ql/src/test/queries/clientpositive/materialized_view_create_rewrite_4.q b/ql/src/test/queries/clientpositive/materialized_view_create_rewrite_4.q index efc65c4061..c841a3a79a 100644 --- a/ql/src/test/queries/clientpositive/materialized_view_create_rewrite_4.q +++ b/ql/src/test/queries/clientpositive/materialized_view_create_rewrite_4.q @@ -53,12 +53,20 @@ insert into cmv_basetable_2 values analyze table cmv_basetable_2 compute statistics for columns; +-- CHECK THAT VERSION '0' EXISTS AND VERSION '1' DOES NOT +dfs -ls ${hiveconf:hive.metastore.warehouse.dir}/cmv_mat_view; +dfs -test -e ${hiveconf:hive.metastore.warehouse.dir}/cmv_mat_view/0; + -- ENABLE FOR REWRITE EXPLAIN ALTER MATERIALIZED VIEW cmv_mat_view ENABLE REWRITE; ALTER MATERIALIZED VIEW cmv_mat_view ENABLE REWRITE; +-- CHECK THAT VERSION '0' EXISTS AND VERSION '1' DOES NOT +dfs -ls ${hiveconf:hive.metastore.warehouse.dir}/cmv_mat_view; +dfs -test -e ${hiveconf:hive.metastore.warehouse.dir}/cmv_mat_view/0; + -- CANNOT USE THE VIEW, IT IS OUTDATED EXPLAIN SELECT cmv_basetable.a @@ -77,6 +85,10 @@ ALTER MATERIALIZED VIEW cmv_mat_view REBUILD; ALTER MATERIALIZED VIEW cmv_mat_view REBUILD; +-- NOW VERSION '0' SHOULD HAVE BEEN DELETED WHILE VERSION '1' SHOULD EXIST +dfs -ls ${hiveconf:hive.metastore.warehouse.dir}/cmv_mat_view; +dfs -test -e ${hiveconf:hive.metastore.warehouse.dir}/cmv_mat_view/1; + -- NOW IT CAN BE USED AGAIN EXPLAIN SELECT cmv_basetable.a diff --git a/ql/src/test/results/clientpositive/druid/druidmini_mv.q.out b/ql/src/test/results/clientpositive/druid/druidmini_mv.q.out index 5a0b885f77..09d45dd019 100644 --- a/ql/src/test/results/clientpositive/druid/druidmini_mv.q.out +++ b/ql/src/test/results/clientpositive/druid/druidmini_mv.q.out @@ -1,38 +1,55 @@ -PREHOOK: query: create table cmv_basetable (a int, b varchar(256), c decimal(10,2), d int) stored as orc TBLPROPERTIES ('transactional'='true') -PREHOOK: type: CREATETABLE -PREHOOK: Output: database:default -PREHOOK: Output: default@cmv_basetable -POSTHOOK: query: create table cmv_basetable (a int, b varchar(256), c decimal(10,2), d int) stored as orc TBLPROPERTIES ('transactional'='true') -POSTHOOK: type: CREATETABLE -POSTHOOK: Output: database:default -POSTHOOK: Output: default@cmv_basetable -PREHOOK: query: insert into cmv_basetable values - (1, 'alfred', 10.30, 2), - (2, 'bob', 3.14, 3), - (2, 'bonnie', 172342.2, 3), - (3, 'calvin', 978.76, 3), - (3, 'charlie', 9.8, 1) -PREHOOK: type: QUERY +unix_timestamp(void) is deprecated. Use current_timestamp instead. +unix_timestamp(void) is deprecated. Use current_timestamp instead. +PREHOOK: query: CREATE TABLE cmv_basetable +STORED AS orc +TBLPROPERTIES ('transactional'='true') +AS +SELECT cast(unix_timestamp() AS timestamp) AS t, + cast(a AS int) AS a, + cast(b AS varchar(256)) AS b, + cast(c AS decimal(10,2)) AS c, + cast(d AS int) AS d +FROM TABLE ( + VALUES + (1, 'alfred', 10.30, 2), + (2, 'bob', 3.14, 3), + (2, 'bonnie', 172342.2, 3), + (3, 'calvin', 978.76, 3), + (3, 'charlie', 9.8, 1)) as q (a, b, c, d) +PREHOOK: type: CREATETABLE_AS_SELECT PREHOOK: Input: _dummy_database@_dummy_table +PREHOOK: Output: database:default PREHOOK: Output: default@cmv_basetable -POSTHOOK: query: insert into cmv_basetable values - (1, 'alfred', 10.30, 2), - (2, 'bob', 3.14, 3), - (2, 'bonnie', 172342.2, 3), - (3, 'calvin', 978.76, 3), - (3, 'charlie', 9.8, 1) -POSTHOOK: type: QUERY +POSTHOOK: query: CREATE TABLE cmv_basetable +STORED AS orc +TBLPROPERTIES ('transactional'='true') +AS +SELECT cast(unix_timestamp() AS timestamp) AS t, + cast(a AS int) AS a, + cast(b AS varchar(256)) AS b, + cast(c AS decimal(10,2)) AS c, + cast(d AS int) AS d +FROM TABLE ( + VALUES + (1, 'alfred', 10.30, 2), + (2, 'bob', 3.14, 3), + (2, 'bonnie', 172342.2, 3), + (3, 'calvin', 978.76, 3), + (3, 'charlie', 9.8, 1)) as q (a, b, c, d) +POSTHOOK: type: CREATETABLE_AS_SELECT POSTHOOK: Input: _dummy_database@_dummy_table +POSTHOOK: Output: database:default POSTHOOK: Output: default@cmv_basetable POSTHOOK: Lineage: cmv_basetable.a SCRIPT [] POSTHOOK: Lineage: cmv_basetable.b SCRIPT [] POSTHOOK: Lineage: cmv_basetable.c SCRIPT [] POSTHOOK: Lineage: cmv_basetable.d SCRIPT [] +POSTHOOK: Lineage: cmv_basetable.t SIMPLE [] PREHOOK: query: CREATE MATERIALIZED VIEW cmv_mat_view ENABLE REWRITE STORED BY 'org.apache.hadoop.hive.druid.DruidStorageHandler' TBLPROPERTIES ("druid.segment.granularity" = "HOUR") AS -SELECT cast(current_timestamp() as timestamp with local time zone) as `__time`, a, b, c +SELECT cast(t AS timestamp with local time zone) as `__time`, a, b, c FROM cmv_basetable WHERE a = 2 PREHOOK: type: CREATE_MATERIALIZED_VIEW @@ -43,7 +60,7 @@ POSTHOOK: query: CREATE MATERIALIZED VIEW cmv_mat_view ENABLE REWRITE STORED BY 'org.apache.hadoop.hive.druid.DruidStorageHandler' TBLPROPERTIES ("druid.segment.granularity" = "HOUR") AS -SELECT cast(current_timestamp() as timestamp with local time zone) as `__time`, a, b, c +SELECT cast(t AS timestamp with local time zone) as `__time`, a, b, c FROM cmv_basetable WHERE a = 2 POSTHOOK: type: CREATE_MATERIALIZED_VIEW @@ -67,6 +84,7 @@ POSTHOOK: type: SHOW_TBLPROPERTIES COLUMN_STATS_ACCURATE {"BASIC_STATS":"true"} druid.datasource default.cmv_mat_view druid.segment.granularity HOUR +materialized.view.version 0 numFiles 0 numRows 2 rawDataSize 0 @@ -77,7 +95,7 @@ PREHOOK: query: CREATE MATERIALIZED VIEW IF NOT EXISTS cmv_mat_view2 ENABLE REWR STORED BY 'org.apache.hadoop.hive.druid.DruidStorageHandler' TBLPROPERTIES ("druid.segment.granularity" = "HOUR") AS -SELECT cast(current_timestamp() as timestamp with local time zone) as `__time`, a, c +SELECT cast(t AS timestamp with local time zone) as `__time`, a, b, c FROM cmv_basetable WHERE a = 3 PREHOOK: type: CREATE_MATERIALIZED_VIEW @@ -88,7 +106,7 @@ POSTHOOK: query: CREATE MATERIALIZED VIEW IF NOT EXISTS cmv_mat_view2 ENABLE REW STORED BY 'org.apache.hadoop.hive.druid.DruidStorageHandler' TBLPROPERTIES ("druid.segment.granularity" = "HOUR") AS -SELECT cast(current_timestamp() as timestamp with local time zone) as `__time`, a, c +SELECT cast(t AS timestamp with local time zone) as `__time`, a, b, c FROM cmv_basetable WHERE a = 3 POSTHOOK: type: CREATE_MATERIALIZED_VIEW @@ -103,7 +121,8 @@ POSTHOOK: query: SELECT a, c FROM cmv_mat_view2 POSTHOOK: type: QUERY POSTHOOK: Input: default@cmv_mat_view2 #### A masked pattern was here #### -6 988.56 +3 9.80 +3 978.76 PREHOOK: query: SHOW TBLPROPERTIES cmv_mat_view2 PREHOOK: type: SHOW_TBLPROPERTIES POSTHOOK: query: SHOW TBLPROPERTIES cmv_mat_view2 @@ -111,6 +130,7 @@ POSTHOOK: type: SHOW_TBLPROPERTIES COLUMN_STATS_ACCURATE {"BASIC_STATS":"true"} druid.datasource default.cmv_mat_view2 druid.segment.granularity HOUR +materialized.view.version 0 numFiles 0 numRows 2 rawDataSize 0 @@ -161,7 +181,8 @@ POSTHOOK: type: QUERY POSTHOOK: Input: default@cmv_basetable POSTHOOK: Input: default@cmv_mat_view2 #### A masked pattern was here #### -6 988.56 +3 9.80 +3 978.76 Warning: Shuffle Join JOIN[6][tables = [cmv_mat_view2, $hdt$_0]] in Stage 'Stage-1:MAPRED' is a cross product PREHOOK: query: EXPLAIN SELECT * FROM ( @@ -187,17 +208,17 @@ STAGE PLANS: Map Operator Tree: TableScan alias: cmv_basetable - Statistics: Num rows: 1 Data size: 9310 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 5 Data size: 10350 Basic stats: COMPLETE Column stats: NONE Filter Operator predicate: ((3 = a) and (d = 3)) (type: boolean) - Statistics: Num rows: 1 Data size: 9310 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 1 Data size: 2070 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: c (type: decimal(10,2)) outputColumnNames: _col0 - Statistics: Num rows: 1 Data size: 9310 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 1 Data size: 2070 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator sort order: - Statistics: Num rows: 1 Data size: 9310 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 1 Data size: 2070 Basic stats: COMPLETE Column stats: NONE value expressions: _col0 (type: decimal(10,2)) TableScan alias: cmv_mat_view2 @@ -217,14 +238,14 @@ STAGE PLANS: 0 1 outputColumnNames: _col1, _col5 - Statistics: Num rows: 2 Data size: 18622 Basic stats: PARTIAL Column stats: NONE + Statistics: Num rows: 2 Data size: 4142 Basic stats: PARTIAL Column stats: NONE Select Operator expressions: 3 (type: int), _col1 (type: decimal(10,2)), 3 (type: int), _col5 (type: decimal(10,2)) outputColumnNames: _col0, _col1, _col2, _col3 - Statistics: Num rows: 2 Data size: 18622 Basic stats: PARTIAL Column stats: NONE + Statistics: Num rows: 2 Data size: 4142 Basic stats: PARTIAL Column stats: NONE File Output Operator compressed: false - Statistics: Num rows: 2 Data size: 18622 Basic stats: PARTIAL Column stats: NONE + Statistics: Num rows: 2 Data size: 4142 Basic stats: PARTIAL Column stats: NONE table: input format: org.apache.hadoop.mapred.SequenceFileInputFormat output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat @@ -255,14 +276,17 @@ POSTHOOK: type: QUERY POSTHOOK: Input: default@cmv_basetable POSTHOOK: Input: default@cmv_mat_view2 #### A masked pattern was here #### -3 988.56 3 978.76 +3 9.80 3 978.76 +3 978.76 3 978.76 +unix_timestamp(void) is deprecated. Use current_timestamp instead. +unix_timestamp(void) is deprecated. Use current_timestamp instead. PREHOOK: query: INSERT INTO cmv_basetable VALUES - (3, 'charlie', 15.8, 1) + (cast(unix_timestamp() AS timestamp), 3, 'chloe', 15.8, 1) PREHOOK: type: QUERY PREHOOK: Input: _dummy_database@_dummy_table PREHOOK: Output: default@cmv_basetable POSTHOOK: query: INSERT INTO cmv_basetable VALUES - (3, 'charlie', 15.8, 1) + (cast(unix_timestamp() AS timestamp), 3, 'chloe', 15.8, 1) POSTHOOK: type: QUERY POSTHOOK: Input: _dummy_database@_dummy_table POSTHOOK: Output: default@cmv_basetable @@ -270,6 +294,7 @@ POSTHOOK: Lineage: cmv_basetable.a SCRIPT [] POSTHOOK: Lineage: cmv_basetable.b SCRIPT [] POSTHOOK: Lineage: cmv_basetable.c SCRIPT [] POSTHOOK: Lineage: cmv_basetable.d SCRIPT [] +POSTHOOK: Lineage: cmv_basetable.t SCRIPT [] Warning: Shuffle Join JOIN[8][tables = [$hdt$_0, $hdt$_1]] in Stage 'Stage-1:MAPRED' is a cross product PREHOOK: query: EXPLAIN SELECT * FROM ( @@ -295,31 +320,31 @@ STAGE PLANS: Map Operator Tree: TableScan alias: cmv_basetable - Statistics: Num rows: 1 Data size: 17540 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 5 Data size: 19600 Basic stats: COMPLETE Column stats: NONE Filter Operator predicate: (a = 3) (type: boolean) - Statistics: Num rows: 1 Data size: 17540 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 2 Data size: 7840 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: c (type: decimal(10,2)) outputColumnNames: _col0 - Statistics: Num rows: 1 Data size: 17540 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 2 Data size: 7840 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator sort order: - Statistics: Num rows: 1 Data size: 17540 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 2 Data size: 7840 Basic stats: COMPLETE Column stats: NONE value expressions: _col0 (type: decimal(10,2)) TableScan alias: cmv_basetable - Statistics: Num rows: 1 Data size: 17540 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 5 Data size: 19600 Basic stats: COMPLETE Column stats: NONE Filter Operator predicate: ((3 = a) and (d = 3)) (type: boolean) - Statistics: Num rows: 1 Data size: 17540 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 1 Data size: 3920 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: c (type: decimal(10,2)) outputColumnNames: _col0 - Statistics: Num rows: 1 Data size: 17540 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 1 Data size: 3920 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator sort order: - Statistics: Num rows: 1 Data size: 17540 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 1 Data size: 3920 Basic stats: COMPLETE Column stats: NONE value expressions: _col0 (type: decimal(10,2)) Reduce Operator Tree: Join Operator @@ -329,14 +354,14 @@ STAGE PLANS: 0 1 outputColumnNames: _col0, _col1 - Statistics: Num rows: 1 Data size: 35081 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 2 Data size: 15682 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: 3 (type: int), _col0 (type: decimal(10,2)), 3 (type: int), _col1 (type: decimal(10,2)) outputColumnNames: _col0, _col1, _col2, _col3 - Statistics: Num rows: 1 Data size: 35081 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 2 Data size: 15682 Basic stats: COMPLETE Column stats: NONE File Output Operator compressed: false - Statistics: Num rows: 1 Data size: 35081 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 2 Data size: 15682 Basic stats: COMPLETE Column stats: NONE table: input format: org.apache.hadoop.mapred.SequenceFileInputFormat output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat @@ -368,7 +393,17 @@ POSTHOOK: Input: default@cmv_basetable 3 15.80 3 978.76 3 9.80 3 978.76 3 978.76 3 978.76 -Warning: Shuffle Join JOIN[8][tables = [$hdt$_0, $hdt$_1]] in Stage 'Stage-1:MAPRED' is a cross product +PREHOOK: query: ALTER MATERIALIZED VIEW cmv_mat_view2 REBUILD +PREHOOK: type: CREATE_MATERIALIZED_VIEW +PREHOOK: Input: default@cmv_basetable +PREHOOK: Output: database:default +PREHOOK: Output: default@cmv_mat_view2 +POSTHOOK: query: ALTER MATERIALIZED VIEW cmv_mat_view2 REBUILD +POSTHOOK: type: CREATE_MATERIALIZED_VIEW +POSTHOOK: Input: default@cmv_basetable +POSTHOOK: Output: database:default +POSTHOOK: Output: default@cmv_mat_view2 +Warning: Shuffle Join JOIN[6][tables = [cmv_mat_view2, $hdt$_0]] in Stage 'Stage-1:MAPRED' is a cross product PREHOOK: query: EXPLAIN SELECT * FROM ( (SELECT a, c FROM cmv_basetable WHERE a = 3) table1 @@ -393,32 +428,28 @@ STAGE PLANS: Map Operator Tree: TableScan alias: cmv_basetable - Statistics: Num rows: 1 Data size: 17540 Basic stats: COMPLETE Column stats: NONE - Filter Operator - predicate: (a = 3) (type: boolean) - Statistics: Num rows: 1 Data size: 17540 Basic stats: COMPLETE Column stats: NONE - Select Operator - expressions: c (type: decimal(10,2)) - outputColumnNames: _col0 - Statistics: Num rows: 1 Data size: 17540 Basic stats: COMPLETE Column stats: NONE - Reduce Output Operator - sort order: - Statistics: Num rows: 1 Data size: 17540 Basic stats: COMPLETE Column stats: NONE - value expressions: _col0 (type: decimal(10,2)) - TableScan - alias: cmv_basetable - Statistics: Num rows: 1 Data size: 17540 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 5 Data size: 19600 Basic stats: COMPLETE Column stats: NONE Filter Operator predicate: ((3 = a) and (d = 3)) (type: boolean) - Statistics: Num rows: 1 Data size: 17540 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 1 Data size: 3920 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: c (type: decimal(10,2)) outputColumnNames: _col0 - Statistics: Num rows: 1 Data size: 17540 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 1 Data size: 3920 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator sort order: - Statistics: Num rows: 1 Data size: 17540 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 1 Data size: 3920 Basic stats: COMPLETE Column stats: NONE value expressions: _col0 (type: decimal(10,2)) + TableScan + alias: cmv_mat_view2 + properties: + druid.query.json {"queryType":"select","dataSource":"default.cmv_mat_view2","descending":false,"intervals":["1900-01-01T00:00:00.000Z/3000-01-01T00:00:00.000Z"],"dimensions":[],"metrics":["c"],"granularity":"all","pagingSpec":{"threshold":16384,"fromNext":true},"context":{"druid.query.fetch":false}} + druid.query.type select + Statistics: Num rows: 3 Data size: 0 Basic stats: PARTIAL Column stats: NONE + Reduce Output Operator + sort order: + Statistics: Num rows: 3 Data size: 0 Basic stats: PARTIAL Column stats: NONE + value expressions: c (type: decimal(10,2)) Reduce Operator Tree: Join Operator condition map: @@ -426,15 +457,15 @@ STAGE PLANS: keys: 0 1 - outputColumnNames: _col0, _col1 - Statistics: Num rows: 1 Data size: 35081 Basic stats: COMPLETE Column stats: NONE + outputColumnNames: _col1, _col5 + Statistics: Num rows: 3 Data size: 11763 Basic stats: PARTIAL Column stats: NONE Select Operator - expressions: 3 (type: int), _col0 (type: decimal(10,2)), 3 (type: int), _col1 (type: decimal(10,2)) + expressions: 3 (type: int), _col1 (type: decimal(10,2)), 3 (type: int), _col5 (type: decimal(10,2)) outputColumnNames: _col0, _col1, _col2, _col3 - Statistics: Num rows: 1 Data size: 35081 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 3 Data size: 11763 Basic stats: PARTIAL Column stats: NONE File Output Operator compressed: false - Statistics: Num rows: 1 Data size: 35081 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 3 Data size: 11763 Basic stats: PARTIAL Column stats: NONE table: input format: org.apache.hadoop.mapred.SequenceFileInputFormat output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat @@ -446,7 +477,7 @@ STAGE PLANS: Processor Tree: ListSink -Warning: Shuffle Join JOIN[8][tables = [$hdt$_0, $hdt$_1]] in Stage 'Stage-1:MAPRED' is a cross product +Warning: Shuffle Join JOIN[6][tables = [cmv_mat_view2, $hdt$_0]] in Stage 'Stage-1:MAPRED' is a cross product PREHOOK: query: SELECT * FROM ( (SELECT a, c FROM cmv_basetable WHERE a = 3) table1 JOIN @@ -454,6 +485,7 @@ PREHOOK: query: SELECT * FROM ( ON table1.a = table2.a) PREHOOK: type: QUERY PREHOOK: Input: default@cmv_basetable +PREHOOK: Input: default@cmv_mat_view2 #### A masked pattern was here #### POSTHOOK: query: SELECT * FROM ( (SELECT a, c FROM cmv_basetable WHERE a = 3) table1 @@ -462,6 +494,7 @@ POSTHOOK: query: SELECT * FROM ( ON table1.a = table2.a) POSTHOOK: type: QUERY POSTHOOK: Input: default@cmv_basetable +POSTHOOK: Input: default@cmv_mat_view2 #### A masked pattern was here #### 3 15.80 3 978.76 3 9.80 3 978.76 diff --git a/ql/src/test/results/clientpositive/materialized_view_create.q.out b/ql/src/test/results/clientpositive/materialized_view_create.q.out index 89523042a9..daa44dad5e 100644 --- a/ql/src/test/results/clientpositive/materialized_view_create.q.out +++ b/ql/src/test/results/clientpositive/materialized_view_create.q.out @@ -46,6 +46,7 @@ Retention: 0 Table Type: MATERIALIZED_VIEW Table Parameters: COLUMN_STATS_ACCURATE {\"BASIC_STATS\":\"true\"} + materialized.view.version 0 numFiles 1 numRows 5 rawDataSize 1025 @@ -106,6 +107,7 @@ Retention: 0 Table Type: MATERIALIZED_VIEW Table Parameters: COLUMN_STATS_ACCURATE {\"BASIC_STATS\":\"true\"} + materialized.view.version 0 numFiles 1 numRows 5 rawDataSize 580 @@ -241,6 +243,7 @@ POSTHOOK: query: show tblproperties cmv_mat_view5 POSTHOOK: type: SHOW_TBLPROPERTIES COLUMN_STATS_ACCURATE {"BASIC_STATS":"true"} key value +materialized.view.version 0 numFiles 1 numRows 5 rawDataSize 1605 diff --git a/ql/src/test/results/clientpositive/materialized_view_create_rewrite.q.out b/ql/src/test/results/clientpositive/materialized_view_create_rewrite.q.out index 95b1d61f94..6cf057aab8 100644 --- a/ql/src/test/results/clientpositive/materialized_view_create_rewrite.q.out +++ b/ql/src/test/results/clientpositive/materialized_view_create_rewrite.q.out @@ -65,6 +65,7 @@ PREHOOK: type: SHOW_TBLPROPERTIES POSTHOOK: query: show tblproperties cmv_mat_view POSTHOOK: type: SHOW_TBLPROPERTIES COLUMN_STATS_ACCURATE {"BASIC_STATS":"true"} +materialized.view.version 0 numFiles 1 numRows 2 rawDataSize 408 @@ -97,6 +98,7 @@ PREHOOK: type: SHOW_TBLPROPERTIES POSTHOOK: query: show tblproperties cmv_mat_view2 POSTHOOK: type: SHOW_TBLPROPERTIES COLUMN_STATS_ACCURATE {"BASIC_STATS":"true"} +materialized.view.version 0 numFiles 1 numRows 2 rawDataSize 232 diff --git a/ql/src/test/results/clientpositive/materialized_view_create_rewrite_3.q.out b/ql/src/test/results/clientpositive/materialized_view_create_rewrite_3.q.out index 0d8d238e8b..5b28669e34 100644 --- a/ql/src/test/results/clientpositive/materialized_view_create_rewrite_3.q.out +++ b/ql/src/test/results/clientpositive/materialized_view_create_rewrite_3.q.out @@ -92,6 +92,7 @@ STAGE DEPENDENCIES: Stage-0 depends on stages: Stage-2 Stage-5 depends on stages: Stage-0 Stage-3 depends on stages: Stage-5 + Stage-6 depends on stages: Stage-3 STAGE PLANS: Stage: Stage-1 @@ -183,6 +184,8 @@ STAGE PLANS: Create View Operator: Create View columns: a int, c decimal(10,2) + table properties: + materialized.view.version 0 expanded text: SELECT `cmv_basetable`.`a`, `cmv_basetable_2`.`c` FROM `default`.`cmv_basetable` JOIN `default`.`cmv_basetable_2` ON (`cmv_basetable`.`a` = `cmv_basetable_2`.`a`) WHERE `cmv_basetable_2`.`c` > 10.0 @@ -198,6 +201,9 @@ STAGE PLANS: Stats Work Basic Stats Work: + Stage: Stage-6 + Materialized View Registry Update + PREHOOK: query: CREATE MATERIALIZED VIEW cmv_mat_view ENABLE REWRITE AS SELECT cmv_basetable.a, cmv_basetable_2.c FROM cmv_basetable JOIN cmv_basetable_2 ON (cmv_basetable.a = cmv_basetable_2.a) @@ -443,6 +449,7 @@ STAGE DEPENDENCIES: Stage-0 depends on stages: Stage-2 Stage-5 depends on stages: Stage-0 Stage-3 depends on stages: Stage-5 + Stage-6 depends on stages: Stage-3 STAGE PLANS: Stage: Stage-1 @@ -534,13 +541,33 @@ STAGE PLANS: Create View Operator: Create View columns: a int, c decimal(10,2) + table properties: + COLUMN_STATS_ACCURATE {"BASIC_STATS":"true"} + materialized.view.version 1 + numFiles 1 + numRows 2 + rawDataSize 232 + totalSize 325 +#### A masked pattern was here #### + expanded text: SELECT `cmv_basetable`.`a`, `cmv_basetable_2`.`c` + FROM `default`.`cmv_basetable` JOIN `default`.`cmv_basetable_2` ON (`cmv_basetable`.`a` = `cmv_basetable_2`.`a`) + WHERE `cmv_basetable_2`.`c` > 10.0 + GROUP BY `cmv_basetable`.`a`, `cmv_basetable_2`.`c` name: default.cmv_mat_view + original text: SELECT cmv_basetable.a, cmv_basetable_2.c + FROM cmv_basetable JOIN cmv_basetable_2 ON (cmv_basetable.a = cmv_basetable_2.a) + WHERE cmv_basetable_2.c > 10.0 + GROUP BY cmv_basetable.a, cmv_basetable_2.c replace: true + rewrite enabled: true Stage: Stage-3 Stats Work Basic Stats Work: + Stage: Stage-6 + Materialized View Registry Update + PREHOOK: query: ALTER MATERIALIZED VIEW cmv_mat_view REBUILD PREHOOK: type: CREATE_MATERIALIZED_VIEW PREHOOK: Input: default@cmv_basetable diff --git a/ql/src/test/results/clientpositive/materialized_view_create_rewrite_4.q.out b/ql/src/test/results/clientpositive/materialized_view_create_rewrite_4.q.out index 8ab1517186..94628182c6 100644 --- a/ql/src/test/results/clientpositive/materialized_view_create_rewrite_4.q.out +++ b/ql/src/test/results/clientpositive/materialized_view_create_rewrite_4.q.out @@ -92,6 +92,7 @@ STAGE DEPENDENCIES: Stage-0 depends on stages: Stage-2 Stage-5 depends on stages: Stage-0 Stage-3 depends on stages: Stage-5 + Stage-6 depends on stages: Stage-3 STAGE PLANS: Stage: Stage-1 @@ -183,6 +184,8 @@ STAGE PLANS: Create View Operator: Create View columns: a int, c decimal(10,2) + table properties: + materialized.view.version 0 expanded text: SELECT `cmv_basetable`.`a`, `cmv_basetable_2`.`c` FROM `default`.`cmv_basetable` JOIN `default`.`cmv_basetable_2` ON (`cmv_basetable`.`a` = `cmv_basetable_2`.`a`) WHERE `cmv_basetable_2`.`c` > 10.0 @@ -197,6 +200,9 @@ STAGE PLANS: Stats Work Basic Stats Work: + Stage: Stage-6 + Materialized View Registry Update + PREHOOK: query: CREATE MATERIALIZED VIEW cmv_mat_view AS SELECT cmv_basetable.a, cmv_basetable_2.c FROM cmv_basetable JOIN cmv_basetable_2 ON (cmv_basetable.a = cmv_basetable_2.a) @@ -365,6 +371,8 @@ POSTHOOK: type: QUERY POSTHOOK: Input: default@cmv_basetable_2 POSTHOOK: Output: default@cmv_basetable_2 #### A masked pattern was here #### +Found 1 items +#### A masked pattern was here #### PREHOOK: query: EXPLAIN ALTER MATERIALIZED VIEW cmv_mat_view ENABLE REWRITE PREHOOK: type: ALTER_MATERIALIZED_VIEW_REWRITE @@ -389,6 +397,8 @@ POSTHOOK: query: ALTER MATERIALIZED VIEW cmv_mat_view ENABLE REWRITE POSTHOOK: type: ALTER_MATERIALIZED_VIEW_REWRITE POSTHOOK: Input: default@cmv_mat_view POSTHOOK: Output: default@cmv_mat_view +Found 1 items +#### A masked pattern was here #### PREHOOK: query: EXPLAIN SELECT cmv_basetable.a FROM cmv_basetable join cmv_basetable_2 ON (cmv_basetable.a = cmv_basetable_2.a) @@ -526,6 +536,7 @@ STAGE DEPENDENCIES: Stage-0 depends on stages: Stage-2 Stage-5 depends on stages: Stage-0 Stage-3 depends on stages: Stage-5 + Stage-6 depends on stages: Stage-3 STAGE PLANS: Stage: Stage-1 @@ -617,13 +628,33 @@ STAGE PLANS: Create View Operator: Create View columns: a int, c decimal(10,2) + table properties: + COLUMN_STATS_ACCURATE {"BASIC_STATS":"true"} + materialized.view.version 1 + numFiles 1 + numRows 2 + rawDataSize 232 + totalSize 325 +#### A masked pattern was here #### + expanded text: SELECT `cmv_basetable`.`a`, `cmv_basetable_2`.`c` + FROM `default`.`cmv_basetable` JOIN `default`.`cmv_basetable_2` ON (`cmv_basetable`.`a` = `cmv_basetable_2`.`a`) + WHERE `cmv_basetable_2`.`c` > 10.0 + GROUP BY `cmv_basetable`.`a`, `cmv_basetable_2`.`c` name: default.cmv_mat_view + original text: SELECT cmv_basetable.a, cmv_basetable_2.c + FROM cmv_basetable JOIN cmv_basetable_2 ON (cmv_basetable.a = cmv_basetable_2.a) + WHERE cmv_basetable_2.c > 10.0 + GROUP BY cmv_basetable.a, cmv_basetable_2.c replace: true + rewrite enabled: true Stage: Stage-3 Stats Work Basic Stats Work: + Stage: Stage-6 + Materialized View Registry Update + PREHOOK: query: ALTER MATERIALIZED VIEW cmv_mat_view REBUILD PREHOOK: type: CREATE_MATERIALIZED_VIEW PREHOOK: Input: default@cmv_basetable @@ -636,6 +667,8 @@ POSTHOOK: Input: default@cmv_basetable POSTHOOK: Input: default@cmv_basetable_2 POSTHOOK: Output: database:default POSTHOOK: Output: default@cmv_mat_view +Found 1 items +#### A masked pattern was here #### PREHOOK: query: EXPLAIN SELECT cmv_basetable.a FROM cmv_basetable join cmv_basetable_2 ON (cmv_basetable.a = cmv_basetable_2.a) diff --git a/ql/src/test/results/clientpositive/materialized_view_create_rewrite_multi_db.q.out b/ql/src/test/results/clientpositive/materialized_view_create_rewrite_multi_db.q.out index d7ee468b49..6dee5ec48b 100644 --- a/ql/src/test/results/clientpositive/materialized_view_create_rewrite_multi_db.q.out +++ b/ql/src/test/results/clientpositive/materialized_view_create_rewrite_multi_db.q.out @@ -89,6 +89,7 @@ PREHOOK: type: SHOW_TBLPROPERTIES POSTHOOK: query: show tblproperties cmv_mat_view POSTHOOK: type: SHOW_TBLPROPERTIES COLUMN_STATS_ACCURATE {"BASIC_STATS":"true"} +materialized.view.version 0 numFiles 1 numRows 2 rawDataSize 408 @@ -121,6 +122,7 @@ PREHOOK: type: SHOW_TBLPROPERTIES POSTHOOK: query: show tblproperties cmv_mat_view2 POSTHOOK: type: SHOW_TBLPROPERTIES COLUMN_STATS_ACCURATE {"BASIC_STATS":"true"} +materialized.view.version 0 numFiles 1 numRows 2 rawDataSize 232 diff --git a/ql/src/test/results/clientpositive/materialized_view_describe.q.out b/ql/src/test/results/clientpositive/materialized_view_describe.q.out index efd60d4345..fb96fef4e0 100644 --- a/ql/src/test/results/clientpositive/materialized_view_describe.q.out +++ b/ql/src/test/results/clientpositive/materialized_view_describe.q.out @@ -69,6 +69,7 @@ Table Parameters: COLUMN_STATS_ACCURATE {\"BASIC_STATS\":\"true\"} comment this is the first view key foo + materialized.view.version 0 numFiles 1 numRows 5 rawDataSize 580 @@ -95,6 +96,7 @@ POSTHOOK: type: SHOW_TBLPROPERTIES COLUMN_STATS_ACCURATE {"BASIC_STATS":"true"} comment this is the first view key foo +materialized.view.version 0 numFiles 1 numRows 5 rawDataSize 580 @@ -157,6 +159,7 @@ Table Parameters: comment this is the second view key alice key2 bob + materialized.view.version 0 numFiles 1 numRows 5 rawDataSize 5 @@ -235,6 +238,7 @@ Table Type: MATERIALIZED_VIEW Table Parameters: COLUMN_STATS_ACCURATE {\"BASIC_STATS\":\"true\"} comment this is the third view + materialized.view.version 0 numFiles 1 numRows 5 rawDataSize 1025 @@ -324,6 +328,7 @@ Table Type: MATERIALIZED_VIEW Table Parameters: COLUMN_STATS_ACCURATE {\"BASIC_STATS\":\"true\"} comment this is the last view + materialized.view.version 0 numFiles 1 numRows 5 rawDataSize 5 diff --git a/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/ObjectStore.java b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/ObjectStore.java index 3d1c67f97c..f2886285e0 100644 --- a/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/ObjectStore.java +++ b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/ObjectStore.java @@ -3715,8 +3715,7 @@ public void alterTable(String dbname, String name, Table newTable) oldt.setRewriteEnabled(newt.isRewriteEnabled()); registerCreationSignature = newt.getCreationMetadata() != null; if (registerCreationSignature) { - oldt.getCreationMetadata().setTables(newt.getCreationMetadata().getTables()); - oldt.getCreationMetadata().setTxnList(newt.getCreationMetadata().getTxnList()); + oldt.setCreationMetadata(newt.getCreationMetadata()); } // commit the changes