diff --git ql/src/java/org/apache/hadoop/hive/ql/ddl/view/AlterMaterializedViewDesc.java ql/src/java/org/apache/hadoop/hive/ql/ddl/view/AlterMaterializedViewDesc.java deleted file mode 100644 index 7f8b16b369..0000000000 --- ql/src/java/org/apache/hadoop/hive/ql/ddl/view/AlterMaterializedViewDesc.java +++ /dev/null @@ -1,51 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.hive.ql.ddl.view; - -import java.io.Serializable; - -import org.apache.hadoop.hive.ql.plan.Explain; -import org.apache.hadoop.hive.ql.ddl.DDLDesc; -import org.apache.hadoop.hive.ql.plan.Explain.Level; - -/** - * DDL task description for all the ALTER MATERIALIZED VIEW commands. - */ -@Explain(displayName = "Alter Materialized View", explainLevels = { Level.USER, Level.DEFAULT, Level.EXTENDED }) -abstract class AlterMaterializedViewDesc implements DDLDesc, Serializable { - private static final long serialVersionUID = 1L; - - /** - * Alter Materialized View Types. - */ - enum AlterMaterializedViewTypes { - UPDATE_REWRITE_FLAG - }; - - private AlterMaterializedViewTypes op; - - AlterMaterializedViewDesc(AlterMaterializedViewTypes type) { - this.op = type; - } - - @Explain(displayName = "operation", explainLevels = { Level.USER, Level.DEFAULT, Level.EXTENDED }) - public String getOpString() { - return op.toString(); - } -} diff --git ql/src/java/org/apache/hadoop/hive/ql/ddl/view/AlterMaterializedViewRewriteDesc.java ql/src/java/org/apache/hadoop/hive/ql/ddl/view/AlterMaterializedViewRewriteDesc.java index bcf09773d2..0e3df61cfc 100644 --- ql/src/java/org/apache/hadoop/hive/ql/ddl/view/AlterMaterializedViewRewriteDesc.java +++ ql/src/java/org/apache/hadoop/hive/ql/ddl/view/AlterMaterializedViewRewriteDesc.java @@ -23,17 +23,14 @@ import org.apache.hadoop.hive.ql.plan.Explain.Level; /** - * DDL task description for the ALTER MATERIALIZED VIEW commands. + * DDL task description for the ALTER MATERIALIZED VIEW (ENABLE|DISABLE) REWRITE commands. */ -@Explain(displayName = "Alter Materialized View", explainLevels = { Level.USER, Level.DEFAULT, Level.EXTENDED }) -public class AlterMaterializedViewRewriteDesc extends AlterMaterializedViewDesc implements DDLDescWithWriteId { - private static final long serialVersionUID = 1L; - +@Explain(displayName = "Alter Materialized View Rewrite", explainLevels = { Level.USER, Level.DEFAULT, Level.EXTENDED }) +public class AlterMaterializedViewRewriteDesc implements DDLDescWithWriteId { private final String fqMaterializedViewName; private final boolean rewriteEnable; public AlterMaterializedViewRewriteDesc(String fqMaterializedViewName, boolean rewriteEnable) { - super(AlterMaterializedViewTypes.UPDATE_REWRITE_FLAG); this.fqMaterializedViewName = fqMaterializedViewName; this.rewriteEnable = rewriteEnable; } @@ -43,10 +40,19 @@ public String getMaterializedViewName() { return fqMaterializedViewName; } + @Explain(displayName = "enable", displayOnlyOnTrue = true, + explainLevels = { Level.USER, Level.DEFAULT, Level.EXTENDED }) public boolean isRewriteEnable() { return rewriteEnable; } + /** Only for explaining. */ + @Explain(displayName = "disable", displayOnlyOnTrue = true, + explainLevels = { Level.USER, Level.DEFAULT, Level.EXTENDED }) + public boolean isRewriteDisable() { + return !rewriteEnable; + } + @Override public void setWriteId(long writeId) { // We don't actually need the write id, but by implementing DDLDescWithWriteId it ensures that it is allocated diff --git ql/src/java/org/apache/hadoop/hive/ql/exec/MaterializedViewDesc.java ql/src/java/org/apache/hadoop/hive/ql/ddl/view/MaterializedViewUpdateDesc.java similarity index 59% rename from ql/src/java/org/apache/hadoop/hive/ql/exec/MaterializedViewDesc.java rename to ql/src/java/org/apache/hadoop/hive/ql/ddl/view/MaterializedViewUpdateDesc.java index f7af0737bf..674a256979 100644 --- ql/src/java/org/apache/hadoop/hive/ql/exec/MaterializedViewDesc.java +++ ql/src/java/org/apache/hadoop/hive/ql/ddl/view/MaterializedViewUpdateDesc.java @@ -15,42 +15,53 @@ * See the License for the specific language governing permissions and * limitations under the License. */ -package org.apache.hadoop.hive.ql.exec; +package org.apache.hadoop.hive.ql.ddl.view; -import org.apache.hadoop.hive.metastore.api.CreationMetadata; +import org.apache.hadoop.hive.ql.ddl.DDLDesc; import org.apache.hadoop.hive.ql.plan.Explain; import org.apache.hadoop.hive.ql.plan.Explain.Level; import java.io.Serializable; -@Explain(displayName = "Materialized View Work", explainLevels = { Level.USER, Level.DEFAULT, Level.EXTENDED }) -public class MaterializedViewDesc implements Serializable { +/** + * DDL task description of updating a materialized view. + */ +@Explain(displayName = "Materialized View Update", explainLevels = { Level.USER, Level.DEFAULT, Level.EXTENDED }) +public class MaterializedViewUpdateDesc implements DDLDesc, Serializable { private static final long serialVersionUID = 1L; - private final String viewName; + + private final String name; private final boolean retrieveAndInclude; private final boolean disableRewrite; private final boolean updateCreationMetadata; - public MaterializedViewDesc(String viewName, boolean retrieveAndInclude, boolean disableRewrite, + public MaterializedViewUpdateDesc(String name, boolean retrieveAndInclude, boolean disableRewrite, boolean updateCreationMetadata) { - this.viewName = viewName; + this.name = name; this.retrieveAndInclude = retrieveAndInclude; this.disableRewrite = disableRewrite; this.updateCreationMetadata = updateCreationMetadata; } - public String getViewName() { - return viewName; + @Explain(displayName = "name", explainLevels = { Level.USER, Level.DEFAULT, Level.EXTENDED }) + public String getName() { + return name; } + @Explain(displayName = "retrieveAndInclude", displayOnlyOnTrue = true, + explainLevels = { Level.USER, Level.DEFAULT, Level.EXTENDED }) public boolean isRetrieveAndInclude() { return retrieveAndInclude; } + @Explain(displayName = "disableRewrite", displayOnlyOnTrue = true, + explainLevels = { Level.USER, Level.DEFAULT, Level.EXTENDED }) public boolean isDisableRewrite() { return disableRewrite; } + @Explain(displayName = "updateCreationMetadata", displayOnlyOnTrue = true, + explainLevels = { Level.USER, Level.DEFAULT, Level.EXTENDED }) public boolean isUpdateCreationMetadata() { return updateCreationMetadata; } diff --git ql/src/java/org/apache/hadoop/hive/ql/ddl/view/MaterializedViewUpdateOperation.java ql/src/java/org/apache/hadoop/hive/ql/ddl/view/MaterializedViewUpdateOperation.java new file mode 100644 index 0000000000..ad6e163a4f --- /dev/null +++ ql/src/java/org/apache/hadoop/hive/ql/ddl/view/MaterializedViewUpdateOperation.java @@ -0,0 +1,71 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hive.ql.ddl.view; + +import org.apache.hadoop.hive.common.ValidTxnWriteIdList; +import org.apache.hadoop.hive.metastore.api.CreationMetadata; +import org.apache.hadoop.hive.metastore.utils.MetaStoreUtils; +import org.apache.hadoop.hive.ql.ddl.DDLOperation; +import org.apache.hadoop.hive.ql.ddl.DDLOperationContext; +import org.apache.hadoop.hive.ql.metadata.HiveException; +import org.apache.hadoop.hive.ql.metadata.HiveMaterializedViewsRegistry; +import org.apache.hadoop.hive.ql.metadata.Table; +import org.apache.hadoop.hive.ql.parse.ExplainConfiguration.AnalyzeState; + +import com.google.common.collect.ImmutableSet; + +/** + * Operation process of updating a materialized view. + */ +public class MaterializedViewUpdateOperation extends DDLOperation { + public MaterializedViewUpdateOperation(DDLOperationContext context, MaterializedViewUpdateDesc desc) { + super(context, desc); + } + + @Override + public int execute() throws HiveException { + if (context.getDriverContext().getCtx().getExplainAnalyze() == AnalyzeState.RUNNING) { + return 0; + } + + try { + if (desc.isRetrieveAndInclude()) { + Table mvTable = context.getDb().getTable(desc.getName()); + HiveMaterializedViewsRegistry.get().createMaterializedView(context.getDb().getConf(), mvTable); + } else if (desc.isDisableRewrite()) { + // Disabling rewriting, removing from cache + String[] names = desc.getName().split("\\."); + HiveMaterializedViewsRegistry.get().dropMaterializedView(names[0], names[1]); + } else if (desc.isUpdateCreationMetadata()) { + // We need to update the status of the creation signature + Table mvTable = context.getDb().getTable(desc.getName()); + CreationMetadata cm = new CreationMetadata(MetaStoreUtils.getDefaultCatalog(context.getConf()), + mvTable.getDbName(), mvTable.getTableName(), + ImmutableSet.copyOf(mvTable.getCreationMetadata().getTablesUsed())); + cm.setValidTxnList(context.getConf().get(ValidTxnWriteIdList.VALID_TABLES_WRITEIDS_KEY)); + context.getDb().updateCreationMetadata(mvTable.getDbName(), mvTable.getTableName(), cm); + } + } catch (HiveException e) { + LOG.debug("Exception during materialized view cache update", e); + context.getTask().setException(e); + } + + return 0; + } +} diff --git ql/src/java/org/apache/hadoop/hive/ql/exec/MaterializedViewTask.java ql/src/java/org/apache/hadoop/hive/ql/exec/MaterializedViewTask.java deleted file mode 100644 index 87828b14ee..0000000000 --- ql/src/java/org/apache/hadoop/hive/ql/exec/MaterializedViewTask.java +++ /dev/null @@ -1,91 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.hive.ql.exec; - -import com.google.common.collect.ImmutableSet; -import org.apache.hadoop.hive.common.ValidTxnWriteIdList; -import org.apache.hadoop.hive.metastore.api.CreationMetadata; -import org.apache.hadoop.hive.metastore.utils.MetaStoreUtils; -import org.apache.hadoop.hive.ql.DriverContext; -import org.apache.hadoop.hive.ql.metadata.Hive; -import org.apache.hadoop.hive.ql.metadata.HiveException; -import org.apache.hadoop.hive.ql.metadata.HiveMaterializedViewsRegistry; -import org.apache.hadoop.hive.ql.metadata.Table; -import org.apache.hadoop.hive.ql.parse.ExplainConfiguration.AnalyzeState; -import org.apache.hadoop.hive.ql.plan.api.StageType; - -import java.io.Serializable; - -import static org.apache.hadoop.hive.metastore.Warehouse.DEFAULT_CATALOG_NAME; - -/** - * This task does some work related to materialized views. In particular, it adds - * or removes the materialized view from the registry if needed, or registers new - * creation metadata. - */ -public class MaterializedViewTask extends Task implements Serializable { - - private static final long serialVersionUID = 1L; - - public MaterializedViewTask() { - super(); - } - - @Override - public int execute(DriverContext driverContext) { - if (driverContext.getCtx().getExplainAnalyze() == AnalyzeState.RUNNING) { - return 0; - } - try { - if (getWork().isRetrieveAndInclude()) { - Hive db = Hive.get(conf); - Table mvTable = db.getTable(getWork().getViewName()); - HiveMaterializedViewsRegistry.get().createMaterializedView(db.getConf(), mvTable); - } else if (getWork().isDisableRewrite()) { - // Disabling rewriting, removing from cache - String[] names = getWork().getViewName().split("\\."); - HiveMaterializedViewsRegistry.get().dropMaterializedView(names[0], names[1]); - } else if (getWork().isUpdateCreationMetadata()) { - // We need to update the status of the creation signature - Hive db = Hive.get(conf); - Table mvTable = db.getTable(getWork().getViewName()); - CreationMetadata cm = - new CreationMetadata(MetaStoreUtils.getDefaultCatalog(conf), mvTable.getDbName(), - mvTable.getTableName(), - ImmutableSet.copyOf(mvTable.getCreationMetadata().getTablesUsed())); - cm.setValidTxnList(conf.get(ValidTxnWriteIdList.VALID_TABLES_WRITEIDS_KEY)); - db.updateCreationMetadata(mvTable.getDbName(), mvTable.getTableName(), cm); - } - } catch (HiveException e) { - LOG.debug("Exception during materialized view cache update", e); - setException(e); - } - return 0; - } - - @Override - public StageType getType() { - return StageType.DDL; - } - - @Override - public String getName() { - return MaterializedViewTask.class.getSimpleName(); - } -} diff --git ql/src/java/org/apache/hadoop/hive/ql/exec/TaskFactory.java ql/src/java/org/apache/hadoop/hive/ql/exec/TaskFactory.java index 7025b4a9e7..f70726409b 100644 --- ql/src/java/org/apache/hadoop/hive/ql/exec/TaskFactory.java +++ ql/src/java/org/apache/hadoop/hive/ql/exec/TaskFactory.java @@ -88,9 +88,6 @@ public TaskTuple(Class workClass, Class> taskClass) { taskvec.add(new TaskTuple(CopyWork.class, CopyTask.class)); taskvec.add(new TaskTuple(ReplCopyWork.class, ReplCopyTask.class)); taskvec.add(new TaskTuple(DDLWork.class, DDLTask.class)); - taskvec.add(new TaskTuple( - MaterializedViewDesc.class, - MaterializedViewTask.class)); taskvec .add(new TaskTuple(ExplainWork.class, ExplainTask.class)); taskvec diff --git ql/src/java/org/apache/hadoop/hive/ql/parse/ParseContext.java ql/src/java/org/apache/hadoop/hive/ql/parse/ParseContext.java index 1c6d4ac897..67b4901b0c 100644 --- ql/src/java/org/apache/hadoop/hive/ql/parse/ParseContext.java +++ ql/src/java/org/apache/hadoop/hive/ql/parse/ParseContext.java @@ -25,13 +25,13 @@ import org.apache.hadoop.hive.ql.QueryState; import org.apache.hadoop.hive.ql.ddl.table.creation.CreateTableDesc; import org.apache.hadoop.hive.ql.ddl.view.CreateViewDesc; +import org.apache.hadoop.hive.ql.ddl.view.MaterializedViewUpdateDesc; import org.apache.hadoop.hive.ql.exec.AbstractMapJoinOperator; import org.apache.hadoop.hive.ql.exec.FetchTask; import org.apache.hadoop.hive.ql.exec.GroupByOperator; import org.apache.hadoop.hive.ql.exec.JoinOperator; import org.apache.hadoop.hive.ql.exec.ListSinkOperator; import org.apache.hadoop.hive.ql.exec.MapJoinOperator; -import org.apache.hadoop.hive.ql.exec.MaterializedViewDesc; import org.apache.hadoop.hive.ql.exec.Operator; import org.apache.hadoop.hive.ql.exec.ReduceSinkOperator; import org.apache.hadoop.hive.ql.exec.SMBMapJoinOperator; @@ -122,7 +122,7 @@ private AnalyzeRewriteContext analyzeRewrite; private CreateTableDesc createTableDesc; private CreateViewDesc createViewDesc; - private MaterializedViewDesc materializedViewUpdateDesc; + private MaterializedViewUpdateDesc materializedViewUpdateDesc; private boolean reduceSinkAddedBySortedDynPartition; private Map viewProjectToViewSchema; @@ -199,7 +199,8 @@ public ParseContext( Map viewAliasToInput, List reduceSinkOperatorsAddedByEnforceBucketingSorting, AnalyzeRewriteContext analyzeRewrite, CreateTableDesc createTableDesc, - CreateViewDesc createViewDesc, MaterializedViewDesc materializedViewUpdateDesc, QueryProperties queryProperties, + CreateViewDesc createViewDesc, MaterializedViewUpdateDesc materializedViewUpdateDesc, + QueryProperties queryProperties, Map viewProjectToTableSchema, Set acidFileSinks) { this.queryState = queryState; this.conf = queryState.getConf(); @@ -611,7 +612,7 @@ public CreateViewDesc getCreateViewDesc() { return createViewDesc; } - public MaterializedViewDesc getMaterializedViewUpdateDesc() { + public MaterializedViewUpdateDesc getMaterializedViewUpdateDesc() { return materializedViewUpdateDesc; } diff --git ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java index d395db1b59..59a0155cff 100644 --- ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java +++ ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java @@ -107,6 +107,7 @@ import org.apache.hadoop.hive.ql.ddl.table.misc.AlterTableUnsetPropertiesDesc; import org.apache.hadoop.hive.ql.ddl.table.misc.PreInsertTableDesc; import org.apache.hadoop.hive.ql.ddl.view.CreateViewDesc; +import org.apache.hadoop.hive.ql.ddl.view.MaterializedViewUpdateDesc; import org.apache.hadoop.hive.ql.exec.AbstractMapJoinOperator; import org.apache.hadoop.hive.ql.exec.ArchiveUtils; import org.apache.hadoop.hive.ql.exec.ColumnInfo; @@ -120,7 +121,6 @@ import org.apache.hadoop.hive.ql.exec.GroupByOperator; import org.apache.hadoop.hive.ql.exec.JoinOperator; import org.apache.hadoop.hive.ql.exec.LimitOperator; -import org.apache.hadoop.hive.ql.exec.MaterializedViewDesc; import org.apache.hadoop.hive.ql.exec.Operator; import org.apache.hadoop.hive.ql.exec.OperatorFactory; import org.apache.hadoop.hive.ql.exec.RecordReader; @@ -338,7 +338,7 @@ Map prunedPartitions; protected List resultSchema; protected CreateViewDesc createVwDesc; - protected MaterializedViewDesc materializedViewUpdateDesc; + protected MaterializedViewUpdateDesc materializedViewUpdateDesc; protected ArrayList viewsExpanded; protected ASTNode viewSelect; protected final UnparseTranslator unparseTranslator; @@ -7356,7 +7356,7 @@ protected Operator genFileSinkPlan(String dest, QB qb, Operator input) } if (destinationTable.isMaterializedView()) { - materializedViewUpdateDesc = new MaterializedViewDesc( + materializedViewUpdateDesc = new MaterializedViewUpdateDesc( destinationTable.getFullyQualifiedName(), false, false, true); } diff --git ql/src/java/org/apache/hadoop/hive/ql/parse/TaskCompiler.java ql/src/java/org/apache/hadoop/hive/ql/parse/TaskCompiler.java index 0a7fa5af29..0b55a39a8a 100644 --- ql/src/java/org/apache/hadoop/hive/ql/parse/TaskCompiler.java +++ ql/src/java/org/apache/hadoop/hive/ql/parse/TaskCompiler.java @@ -38,9 +38,9 @@ import org.apache.hadoop.hive.ql.ddl.table.creation.CreateTableDesc; import org.apache.hadoop.hive.ql.ddl.view.AlterMaterializedViewRewriteDesc; import org.apache.hadoop.hive.ql.ddl.view.CreateViewDesc; +import org.apache.hadoop.hive.ql.ddl.view.MaterializedViewUpdateDesc; import org.apache.hadoop.hive.ql.exec.FetchTask; import org.apache.hadoop.hive.ql.exec.FileSinkOperator; -import org.apache.hadoop.hive.ql.exec.MaterializedViewDesc; import org.apache.hadoop.hive.ql.exec.MoveTask; import org.apache.hadoop.hive.ql.exec.Operator; import org.apache.hadoop.hive.ql.exec.OperatorUtils; @@ -361,20 +361,23 @@ public void compile(final ParseContext pCtx, CreateTableDesc crtTblDesc = pCtx.getCreateTable(); crtTblDesc.validate(conf); Task crtTblTask = TaskFactory.get(new DDLWork(inputs, outputs, crtTblDesc)); - patchUpAfterCTASorMaterializedView(rootTasks, outputs, crtTblTask, CollectionUtils.isEmpty(crtTblDesc.getPartColNames())); + patchUpAfterCTASorMaterializedView(rootTasks, inputs, outputs, crtTblTask, + CollectionUtils.isEmpty(crtTblDesc.getPartColNames())); } else if (pCtx.getQueryProperties().isMaterializedView()) { // generate a DDL task and make it a dependent task of the leaf CreateViewDesc viewDesc = pCtx.getCreateViewDesc(); Task crtViewTask = TaskFactory.get(new DDLWork( inputs, outputs, viewDesc)); - patchUpAfterCTASorMaterializedView(rootTasks, outputs, crtViewTask, CollectionUtils.isEmpty(viewDesc.getPartColNames())); + patchUpAfterCTASorMaterializedView(rootTasks, inputs, outputs, crtViewTask, + CollectionUtils.isEmpty(viewDesc.getPartColNames())); } else if (pCtx.getMaterializedViewUpdateDesc() != null) { // If there is a materialized view update desc, we create introduce it at the end // of the tree. - MaterializedViewDesc materializedViewDesc = pCtx.getMaterializedViewUpdateDesc(); + MaterializedViewUpdateDesc materializedViewDesc = pCtx.getMaterializedViewUpdateDesc(); + DDLWork ddlWork = new DDLWork(inputs, outputs, materializedViewDesc); Set> leafTasks = new LinkedHashSet>(); getLeafTasks(rootTasks, leafTasks); - Task materializedViewTask = TaskFactory.get(materializedViewDesc, conf); + Task materializedViewTask = TaskFactory.get(ddlWork, conf); for (Task task : leafTasks) { task.addDependentTask(materializedViewTask); } @@ -491,10 +494,9 @@ private Path getDefaultCtasLocation(final ParseContext pCtx) throws SemanticExce } } - private void patchUpAfterCTASorMaterializedView(final List> rootTasks, - final HashSet outputs, - Task createTask, - boolean createTaskAfterMoveTask) { + private void patchUpAfterCTASorMaterializedView(List> rootTasks, + Set inputs, Set outputs, Task createTask, + boolean createTaskAfterMoveTask) { // clear the mapredWork output file from outputs for CTAS // DDLWork at the tail of the chain will have the output Iterator outIter = outputs.iterator(); @@ -510,7 +512,7 @@ private void patchUpAfterCTASorMaterializedView(final List> leaves = new LinkedHashSet<>(); + Set> leaves = new LinkedHashSet<>(); getLeafTasks(rootTasks, leaves); assert (leaves.size() > 0); // Target task is supposed to be the last task @@ -554,16 +556,20 @@ private void patchUpAfterCTASorMaterializedView(final List 200 and key < 250 @@ -701,7 +703,9 @@ STAGE PLANS: Table: default.partition_mv_1 Stage: Stage-4 - Materialized View Work + Materialized View Update + name: default.partition_mv_1 + updateCreationMetadata: true PREHOOK: query: ALTER MATERIALIZED VIEW partition_mv_1 REBUILD PREHOOK: type: QUERY @@ -1059,7 +1063,9 @@ STAGE PLANS: Table: default.partition_mv_3 Stage: Stage-4 - Materialized View Work + Materialized View Update + name: default.partition_mv_3 + updateCreationMetadata: true PREHOOK: query: ALTER MATERIALIZED VIEW partition_mv_3 REBUILD PREHOOK: type: QUERY diff --git ql/src/test/results/clientpositive/llap/materialized_view_partitioned_3.q.out ql/src/test/results/clientpositive/llap/materialized_view_partitioned_3.q.out index aefc67e797..6f1f440e3c 100644 --- ql/src/test/results/clientpositive/llap/materialized_view_partitioned_3.q.out +++ ql/src/test/results/clientpositive/llap/materialized_view_partitioned_3.q.out @@ -150,5 +150,7 @@ STAGE PLANS: Table: default.partition_mv_sdp Stage: Stage-5 - Materialized View Work + Materialized View Update + name: default.partition_mv_sdp + retrieveAndInclude: true diff --git ql/src/test/results/clientpositive/masking_mv.q.out ql/src/test/results/clientpositive/masking_mv.q.out index 87e873cc37..99146ac854 100644 --- ql/src/test/results/clientpositive/masking_mv.q.out +++ ql/src/test/results/clientpositive/masking_mv.q.out @@ -117,7 +117,9 @@ STAGE PLANS: Table: default.masking_test_view_n_mv Stage: Stage-9 - Materialized View Work + Materialized View Update + name: default.masking_test_view_n_mv + retrieveAndInclude: true Stage: Stage-3 Merge File Operator @@ -789,7 +791,9 @@ STAGE PLANS: Table: default.masking_test_view_n_mv_2 Stage: Stage-9 - Materialized View Work + Materialized View Update + name: default.masking_test_view_n_mv_2 + retrieveAndInclude: true Stage: Stage-3 Merge File Operator