diff --git a/itests/src/test/resources/testconfiguration.properties b/itests/src/test/resources/testconfiguration.properties index 6cd1eaa7e5..9121ca8d1e 100644 --- a/itests/src/test/resources/testconfiguration.properties +++ b/itests/src/test/resources/testconfiguration.properties @@ -1597,5 +1597,6 @@ spark.perf.disabled.query.files=query14.q,\ query64.q druid.query.files=druidmini_test1.q,\ - druidmini_test_insert.q + druidmini_test_insert.q,\ + druidmini_mv.q diff --git a/ql/src/java/org/apache/hadoop/hive/ql/metadata/HiveMaterializedViewsRegistry.java b/ql/src/java/org/apache/hadoop/hive/ql/metadata/HiveMaterializedViewsRegistry.java index f8825a27ca..9f5e2c5079 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/metadata/HiveMaterializedViewsRegistry.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/metadata/HiveMaterializedViewsRegistry.java @@ -35,16 +35,21 @@ import org.apache.calcite.adapter.druid.DruidQuery; import org.apache.calcite.adapter.druid.DruidSchema; import org.apache.calcite.adapter.druid.DruidTable; +import org.apache.calcite.interpreter.BindableConvention; import org.apache.calcite.jdbc.JavaTypeFactoryImpl; import org.apache.calcite.plan.RelOptCluster; import org.apache.calcite.plan.RelOptMaterialization; import org.apache.calcite.plan.RelOptPlanner; +import org.apache.calcite.plan.RelOptUtil; +import org.apache.calcite.rel.RelCollation; import org.apache.calcite.rel.RelNode; +import org.apache.calcite.rel.core.Project; import org.apache.calcite.rel.core.TableScan; import org.apache.calcite.rel.type.RelDataType; import org.apache.calcite.rel.type.RelDataTypeField; import org.apache.calcite.rel.type.RelDataTypeImpl; import org.apache.calcite.rex.RexBuilder; +import org.apache.calcite.rex.RexNode; import org.apache.calcite.sql.type.SqlTypeName; import org.apache.hadoop.hive.conf.Constants; import org.apache.hadoop.hive.conf.HiveConf; @@ -53,9 +58,10 @@ import org.apache.hadoop.hive.ql.QueryState; import org.apache.hadoop.hive.ql.exec.ColumnInfo; import org.apache.hadoop.hive.ql.optimizer.calcite.CalciteSemanticException; +import org.apache.hadoop.hive.ql.optimizer.calcite.HiveCalciteUtil; import org.apache.hadoop.hive.ql.optimizer.calcite.HiveTypeSystemImpl; import org.apache.hadoop.hive.ql.optimizer.calcite.RelOptHiveTable; -import org.apache.hadoop.hive.ql.optimizer.calcite.cost.HiveVolcanoPlanner; +import org.apache.hadoop.hive.ql.optimizer.calcite.reloperators.HiveProject; import org.apache.hadoop.hive.ql.optimizer.calcite.reloperators.HiveRelNode; import org.apache.hadoop.hive.ql.optimizer.calcite.reloperators.HiveTableScan; import org.apache.hadoop.hive.ql.optimizer.calcite.translator.TypeConverter; @@ -180,6 +186,12 @@ public void addMaterializedView(Table materializedViewTable) { " ignored; error creating view replacement"); return; } + final List qualifiedTableName; + if (tableRel instanceof Project) { + qualifiedTableName = tableRel.getInput(0).getTable().getQualifiedName(); + } else { + qualifiedTableName = tableRel.getTable().getQualifiedName(); + } final RelNode queryRel = parseQuery(viewQuery); if (queryRel == null) { LOG.warn("Materialized view " + materializedViewTable.getCompleteName() + @@ -187,10 +199,10 @@ public void addMaterializedView(Table materializedViewTable) { return; } RelOptMaterialization materialization = new RelOptMaterialization(tableRel, queryRel, - null, tableRel.getTable().getQualifiedName()); + null, qualifiedTableName); cq.put(vk, materialization); if (LOG.isDebugEnabled()) { - LOG.debug("Cached materialized view for rewriting: " + tableRel.getTable().getQualifiedName()); + LOG.debug("Cached materialized view for rewriting: " + qualifiedTableName); } return; } @@ -223,7 +235,8 @@ public void dropMaterializedView(Table materializedViewTable) { private static RelNode createTableScan(Table viewTable) { // 0. Recreate cluster - final RelOptPlanner planner = HiveVolcanoPlanner.createPlanner(null); + final HiveConf conf = SessionState.get().getConf(); + final RelOptPlanner planner = CalcitePlanner.createPlanner(conf); final RexBuilder rexBuilder = new RexBuilder( new JavaTypeFactoryImpl( new HiveTypeSystemImpl())); @@ -287,7 +300,7 @@ private static RelNode createTableScan(Table viewTable) { rowType, viewTable, nonPartitionColumns, partitionColumns, new ArrayList(), SessionState.get().getConf(), new HashMap(), new HashMap(), new AtomicInteger()); - RelNode tableRel; + RelNode rel; // 3. Build operator if (obtainTableType(viewTable) == TableType.DRUID) { @@ -314,19 +327,20 @@ private static RelNode createTableScan(Table viewTable) { List intervals = Arrays.asList(DruidTable.DEFAULT_INTERVAL); - DruidTable druidTable = new DruidTable(new DruidSchema(address, address, false), + final DruidTable druidTable = new DruidTable(new DruidSchema(address, address, false), dataSource, RelDataTypeImpl.proto(rowType), metrics, DruidTable.DEFAULT_TIMESTAMP_COLUMN, intervals, null, null); final TableScan scan = new HiveTableScan(cluster, cluster.traitSetOf(HiveRelNode.CONVENTION), optTable, viewTable.getTableName(), null, false, false); - tableRel = DruidQuery.create(cluster, cluster.traitSetOf(HiveRelNode.CONVENTION), + rel = DruidQuery.create(cluster, cluster.traitSetOf(BindableConvention.INSTANCE), optTable, druidTable, ImmutableList.of(scan)); } else { // Build Hive Table Scan Rel - tableRel = new HiveTableScan(cluster, cluster.traitSetOf(HiveRelNode.CONVENTION), optTable, + rel = new HiveTableScan(cluster, cluster.traitSetOf(HiveRelNode.CONVENTION), optTable, viewTable.getTableName(), null, false, false); } - return tableRel; + + return rel; } private static RelNode parseQuery(String viewQuery) { diff --git a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/cost/HiveVolcanoPlanner.java b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/cost/HiveVolcanoPlanner.java index 1bd12b750a..28c2c37812 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/cost/HiveVolcanoPlanner.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/cost/HiveVolcanoPlanner.java @@ -18,11 +18,14 @@ package org.apache.hadoop.hive.ql.optimizer.calcite.cost; +import org.apache.calcite.adapter.druid.DruidQuery; import org.apache.calcite.plan.ConventionTraitDef; import org.apache.calcite.plan.RelOptPlanner; import org.apache.calcite.plan.volcano.VolcanoPlanner; import org.apache.calcite.rel.RelCollationTraitDef; +import org.apache.calcite.rel.RelNode; import org.apache.hadoop.hive.ql.optimizer.calcite.HivePlannerContext; +import org.apache.hadoop.hive.ql.optimizer.calcite.rules.HiveDruidRules; /** * Refinement of {@link org.apache.calcite.plan.volcano.VolcanoPlanner} for Hive. @@ -47,4 +50,26 @@ public static RelOptPlanner createPlanner(HivePlannerContext conf) { } return planner; } + + @Override + public void registerClass(RelNode node) { + if (node instanceof DruidQuery) { + // Special handling for Druid rules here as otherwise + // planner will add Druid rules with logical builder + addRule(HiveDruidRules.FILTER); + addRule(HiveDruidRules.PROJECT_FILTER_TRANSPOSE); + addRule(HiveDruidRules.AGGREGATE_FILTER_TRANSPOSE); + addRule(HiveDruidRules.AGGREGATE_PROJECT); + addRule(HiveDruidRules.PROJECT); + addRule(HiveDruidRules.AGGREGATE); + addRule(HiveDruidRules.POST_AGGREGATION_PROJECT); + addRule(HiveDruidRules.FILTER_AGGREGATE_TRANSPOSE); + addRule(HiveDruidRules.FILTER_PROJECT_TRANSPOSE); + addRule(HiveDruidRules.SORT_PROJECT_TRANSPOSE); + addRule(HiveDruidRules.SORT); + addRule(HiveDruidRules.PROJECT_SORT_TRANSPOSE); + return; + } + super.registerClass(node); + } } diff --git a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/rules/HiveDruidRules.java b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/rules/HiveDruidRules.java new file mode 100644 index 0000000000..b6e773de80 --- /dev/null +++ b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/rules/HiveDruidRules.java @@ -0,0 +1,74 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hive.ql.optimizer.calcite.rules; + +import org.apache.calcite.adapter.druid.DruidRules.DruidAggregateFilterTransposeRule; +import org.apache.calcite.adapter.druid.DruidRules.DruidAggregateProjectRule; +import org.apache.calcite.adapter.druid.DruidRules.DruidAggregateRule; +import org.apache.calcite.adapter.druid.DruidRules.DruidFilterAggregateTransposeRule; +import org.apache.calcite.adapter.druid.DruidRules.DruidFilterProjectTransposeRule; +import org.apache.calcite.adapter.druid.DruidRules.DruidFilterRule; +import org.apache.calcite.adapter.druid.DruidRules.DruidPostAggregationProjectRule; +import org.apache.calcite.adapter.druid.DruidRules.DruidProjectFilterTransposeRule; +import org.apache.calcite.adapter.druid.DruidRules.DruidProjectRule; +import org.apache.calcite.adapter.druid.DruidRules.DruidProjectSortTransposeRule; +import org.apache.calcite.adapter.druid.DruidRules.DruidSortProjectTransposeRule; +import org.apache.calcite.adapter.druid.DruidRules.DruidSortRule; +import org.apache.hadoop.hive.ql.optimizer.calcite.HiveRelFactories; + +/** + * Druid rules with Hive builder factory. + */ +public class HiveDruidRules { + + public static final DruidFilterRule FILTER = + new DruidFilterRule(HiveRelFactories.HIVE_BUILDER); + + public static final DruidProjectRule PROJECT = + new DruidProjectRule(HiveRelFactories.HIVE_BUILDER); + + public static final DruidAggregateRule AGGREGATE = + new DruidAggregateRule(HiveRelFactories.HIVE_BUILDER); + + public static final DruidAggregateProjectRule AGGREGATE_PROJECT = + new DruidAggregateProjectRule(HiveRelFactories.HIVE_BUILDER); + + public static final DruidSortRule SORT = + new DruidSortRule(HiveRelFactories.HIVE_BUILDER); + + public static final DruidSortProjectTransposeRule SORT_PROJECT_TRANSPOSE = + new DruidSortProjectTransposeRule(HiveRelFactories.HIVE_BUILDER); + + public static final DruidProjectSortTransposeRule PROJECT_SORT_TRANSPOSE = + new DruidProjectSortTransposeRule(HiveRelFactories.HIVE_BUILDER); + + public static final DruidProjectFilterTransposeRule PROJECT_FILTER_TRANSPOSE = + new DruidProjectFilterTransposeRule(HiveRelFactories.HIVE_BUILDER); + + public static final DruidFilterProjectTransposeRule FILTER_PROJECT_TRANSPOSE = + new DruidFilterProjectTransposeRule(HiveRelFactories.HIVE_BUILDER); + + public static final DruidAggregateFilterTransposeRule AGGREGATE_FILTER_TRANSPOSE = + new DruidAggregateFilterTransposeRule(HiveRelFactories.HIVE_BUILDER); + + public static final DruidFilterAggregateTransposeRule FILTER_AGGREGATE_TRANSPOSE = + new DruidFilterAggregateTransposeRule(HiveRelFactories.HIVE_BUILDER); + + public static final DruidPostAggregationProjectRule POST_AGGREGATION_PROJECT = + new DruidPostAggregationProjectRule(HiveRelFactories.HIVE_BUILDER); +} diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/CalcitePlanner.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/CalcitePlanner.java index efd5f7af15..511d7cd094 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/parse/CalcitePlanner.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/CalcitePlanner.java @@ -49,12 +49,12 @@ import org.antlr.runtime.tree.TreeVisitor; import org.antlr.runtime.tree.TreeVisitorAction; import org.apache.calcite.adapter.druid.DruidQuery; -import org.apache.calcite.adapter.druid.DruidRules; import org.apache.calcite.adapter.druid.DruidSchema; import org.apache.calcite.adapter.druid.DruidTable; import org.apache.calcite.config.CalciteConnectionConfig; import org.apache.calcite.config.CalciteConnectionConfigImpl; import org.apache.calcite.config.CalciteConnectionProperty; +import org.apache.calcite.interpreter.BindableConvention; import org.apache.calcite.plan.RelOptCluster; import org.apache.calcite.plan.RelOptMaterialization; import org.apache.calcite.plan.RelOptPlanner; @@ -176,6 +176,7 @@ import org.apache.hadoop.hive.ql.optimizer.calcite.rules.HiveAggregateProjectMergeRule; import org.apache.hadoop.hive.ql.optimizer.calcite.rules.HiveAggregatePullUpConstantsRule; import org.apache.hadoop.hive.ql.optimizer.calcite.rules.HiveAggregateReduceRule; +import org.apache.hadoop.hive.ql.optimizer.calcite.rules.HiveDruidRules; import org.apache.hadoop.hive.ql.optimizer.calcite.rules.HiveExceptRewriteRule; import org.apache.hadoop.hive.ql.optimizer.calcite.rules.HiveExpandDistinctAggregatesRule; import org.apache.hadoop.hive.ql.optimizer.calcite.rules.HiveFilterAggregateTransposeRule; @@ -335,6 +336,32 @@ public RelNode genLogicalPlan(ASTNode ast) throws SemanticException { return resPlan; } + public static RelOptPlanner createPlanner(HiveConf conf) { + return createPlanner(conf, new HashSet(), new HashSet()); + } + + private static RelOptPlanner createPlanner( + HiveConf conf, Set corrScalarRexSQWithAgg, Set scalarAggNoGbyNoWin) { + final Double maxSplitSize = (double) HiveConf.getLongVar( + conf, HiveConf.ConfVars.MAPREDMAXSPLITSIZE); + final Double maxMemory = (double) HiveConf.getLongVar( + conf, HiveConf.ConfVars.HIVECONVERTJOINNOCONDITIONALTASKTHRESHOLD); + HiveAlgorithmsConf algorithmsConf = new HiveAlgorithmsConf(maxSplitSize, maxMemory); + HiveRulesRegistry registry = new HiveRulesRegistry(); + Properties calciteConfigProperties = new Properties(); + calciteConfigProperties.setProperty( + CalciteConnectionProperty.TIME_ZONE.camelName(), + conf.getLocalTimeZone().getId()); + calciteConfigProperties.setProperty( + CalciteConnectionProperty.MATERIALIZATIONS_ENABLED.camelName(), + Boolean.FALSE.toString()); + CalciteConnectionConfig calciteConfig = new CalciteConnectionConfigImpl(calciteConfigProperties); + boolean isCorrelatedColumns = HiveConf.getBoolVar(conf, HiveConf.ConfVars.HIVE_STATS_CORRELATED_MULTI_KEY_JOINS); + HivePlannerContext confContext = new HivePlannerContext(algorithmsConf, registry, calciteConfig, + corrScalarRexSQWithAgg, scalarAggNoGbyNoWin, new HiveConfPlannerContext(isCorrelatedColumns)); + return HiveVolcanoPlanner.createPlanner(confContext); + } + @Override @SuppressWarnings("rawtypes") Operator genOPTree(ASTNode ast, PlannerContext plannerCtx) throws SemanticException { @@ -1360,24 +1387,7 @@ public RelNode apply(RelOptCluster cluster, RelOptSchema relOptSchema, SchemaPlu /* * recreate cluster, so that it picks up the additional traitDef */ - final Double maxSplitSize = (double) HiveConf.getLongVar( - conf, HiveConf.ConfVars.MAPREDMAXSPLITSIZE); - final Double maxMemory = (double) HiveConf.getLongVar( - conf, HiveConf.ConfVars.HIVECONVERTJOINNOCONDITIONALTASKTHRESHOLD); - HiveAlgorithmsConf algorithmsConf = new HiveAlgorithmsConf(maxSplitSize, maxMemory); - HiveRulesRegistry registry = new HiveRulesRegistry(); - Properties calciteConfigProperties = new Properties(); - calciteConfigProperties.setProperty( - CalciteConnectionProperty.TIME_ZONE.camelName(), - conf.getLocalTimeZone().getId()); - calciteConfigProperties.setProperty( - CalciteConnectionProperty.MATERIALIZATIONS_ENABLED.camelName(), - Boolean.FALSE.toString()); - CalciteConnectionConfig calciteConfig = new CalciteConnectionConfigImpl(calciteConfigProperties); - boolean isCorrelatedColumns = HiveConf.getBoolVar(conf, HiveConf.ConfVars.HIVE_STATS_CORRELATED_MULTI_KEY_JOINS); - HivePlannerContext confContext = new HivePlannerContext(algorithmsConf, registry, calciteConfig, - corrScalarRexSQWithAgg, scalarAggNoGbyNoWin, new HiveConfPlannerContext(isCorrelatedColumns)); - RelOptPlanner planner = HiveVolcanoPlanner.createPlanner(confContext); + RelOptPlanner planner = createPlanner(conf, corrScalarRexSQWithAgg, scalarAggNoGbyNoWin); final RexBuilder rexBuilder = cluster.getRexBuilder(); final RelOptCluster optCluster = RelOptCluster.create(planner, rexBuilder); @@ -1509,7 +1519,7 @@ public RelOptMaterialization apply(RelOptMaterialization materialization) { if (viewScan instanceof Project) { // There is a Project on top (due to nullability) final Project pq = (Project) viewScan; - newViewScan = HiveProject.create(optCluster, copyNodeScan(viewScan), + newViewScan = HiveProject.create(optCluster, copyNodeScan(pq.getInput()), pq.getChildExps(), pq.getRowType(), Collections. emptyList()); } else { newViewScan = copyNodeScan(viewScan); @@ -1522,7 +1532,7 @@ private RelNode copyNodeScan(RelNode scan) { final RelNode newScan; if (scan instanceof DruidQuery) { final DruidQuery dq = (DruidQuery) scan; - newScan = DruidQuery.create(optCluster, optCluster.traitSetOf(HiveRelNode.CONVENTION), + newScan = DruidQuery.create(optCluster, optCluster.traitSetOf(BindableConvention.INSTANCE), scan.getTable(), dq.getDruidTable(), ImmutableList.of(dq.getTableScan())); } else { @@ -1623,18 +1633,18 @@ private RelNode copyNodeScan(RelNode scan) { perfLogger.PerfLogBegin(this.getClass().getName(), PerfLogger.OPTIMIZER); calciteOptimizedPlan = hepPlan(calciteOptimizedPlan, false, mdProvider.getMetadataProvider(), null, HepMatchOrder.BOTTOM_UP, - DruidRules.FILTER, - DruidRules.PROJECT_FILTER_TRANSPOSE, - DruidRules.AGGREGATE_FILTER_TRANSPOSE, - DruidRules.AGGREGATE_PROJECT, - DruidRules.PROJECT, - DruidRules.AGGREGATE, - DruidRules.POST_AGGREGATION_PROJECT, - DruidRules.FILTER_AGGREGATE_TRANSPOSE, - DruidRules.FILTER_PROJECT_TRANSPOSE, - DruidRules.SORT_PROJECT_TRANSPOSE, - DruidRules.SORT, - DruidRules.PROJECT_SORT_TRANSPOSE + HiveDruidRules.FILTER, + HiveDruidRules.PROJECT_FILTER_TRANSPOSE, + HiveDruidRules.AGGREGATE_FILTER_TRANSPOSE, + HiveDruidRules.AGGREGATE_PROJECT, + HiveDruidRules.PROJECT, + HiveDruidRules.AGGREGATE, + HiveDruidRules.POST_AGGREGATION_PROJECT, + HiveDruidRules.FILTER_AGGREGATE_TRANSPOSE, + HiveDruidRules.FILTER_PROJECT_TRANSPOSE, + HiveDruidRules.SORT_PROJECT_TRANSPOSE, + HiveDruidRules.SORT, + HiveDruidRules.PROJECT_SORT_TRANSPOSE ); perfLogger.PerfLogEnd(this.getClass().getName(), PerfLogger.OPTIMIZER, "Calcite: Druid transformation rules"); @@ -2436,7 +2446,7 @@ private RelNode genTableLogicalPlan(String tableAlias, QB qb) throws SemanticExc getAliasId(tableAlias, qb), HiveConf.getBoolVar(conf, HiveConf.ConfVars.HIVE_CBO_RETPATH_HIVEOP), qb.isInsideView() || qb.getAliasInsideView().contains(tableAlias.toLowerCase())); - tableRel = DruidQuery.create(cluster, cluster.traitSetOf(HiveRelNode.CONVENTION), + tableRel = DruidQuery.create(cluster, cluster.traitSetOf(BindableConvention.INSTANCE), optTable, druidTable, ImmutableList.of(scan)); } else { // Build row type from field diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java index c41e371abc..b5e170eaa9 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java @@ -12720,7 +12720,7 @@ protected ASTNode analyzeCreateView(ASTNode ast, QB qb, PlannerContext plannerCt // We need to go lookup the table and get the select statement and then parse it. try { Table tab = getTableObjectByName(dbDotTable, true); - String viewText = tab.getViewOriginalText(); + String viewText = tab.getViewExpandedText(); if (viewText.trim().isEmpty()) { throw new SemanticException(ErrorMsg.MATERIALIZED_VIEW_DEF_EMPTY); } diff --git a/ql/src/test/queries/clientpositive/druidmini_mv.q b/ql/src/test/queries/clientpositive/druidmini_mv.q new file mode 100644 index 0000000000..284c9c04c7 --- /dev/null +++ b/ql/src/test/queries/clientpositive/druidmini_mv.q @@ -0,0 +1,96 @@ +set hive.strict.checks.cartesian.product=false; +set hive.materializedview.rewriting=true; +set hive.stats.column.autogather=true; + +create table cmv_basetable (a int, b varchar(256), c decimal(10,2), d int); + +insert into cmv_basetable values + (1, 'alfred', 10.30, 2), + (2, 'bob', 3.14, 3), + (2, 'bonnie', 172342.2, 3), + (3, 'calvin', 978.76, 3), + (3, 'charlie', 9.8, 1); + +CREATE MATERIALIZED VIEW cmv_mat_view ENABLE REWRITE +STORED BY 'org.apache.hadoop.hive.druid.DruidStorageHandler' +TBLPROPERTIES ("druid.segment.granularity" = "HOUR") +AS +SELECT cast(current_timestamp() as timestamp with local time zone) as `__time`, a, b, c +FROM cmv_basetable +WHERE a = 2; + +SELECT a, b, c FROM cmv_mat_view; + +SHOW TBLPROPERTIES cmv_mat_view; + +CREATE MATERIALIZED VIEW IF NOT EXISTS cmv_mat_view2 ENABLE REWRITE +STORED BY 'org.apache.hadoop.hive.druid.DruidStorageHandler' +TBLPROPERTIES ("druid.segment.granularity" = "HOUR") +AS +SELECT cast(current_timestamp() as timestamp with local time zone) as `__time`, a, c +FROM cmv_basetable +WHERE a = 3; + +SELECT a, c FROM cmv_mat_view2; + +SHOW TBLPROPERTIES cmv_mat_view2; + +EXPLAIN +SELECT a, c +FROM cmv_basetable +WHERE a = 3; + +SELECT a, c +FROM cmv_basetable +WHERE a = 3; + +EXPLAIN +SELECT * FROM ( + (SELECT a, c FROM cmv_basetable WHERE a = 3) table1 + JOIN + (SELECT a, c FROM cmv_basetable WHERE d = 3) table2 + ON table1.a = table2.a); + +SELECT * FROM ( + (SELECT a, c FROM cmv_basetable WHERE a = 3) table1 + JOIN + (SELECT a, c FROM cmv_basetable WHERE d = 3) table2 + ON table1.a = table2.a); + +INSERT INTO cmv_basetable VALUES + (3, 'charlie', 15.8, 1); + +-- TODO: CANNOT USE THE VIEW, IT IS OUTDATED +EXPLAIN +SELECT * FROM ( + (SELECT a, c FROM cmv_basetable WHERE a = 3) table1 + JOIN + (SELECT a, c FROM cmv_basetable WHERE d = 3) table2 + ON table1.a = table2.a); + +SELECT * FROM ( + (SELECT a, c FROM cmv_basetable WHERE a = 3) table1 + JOIN + (SELECT a, c FROM cmv_basetable WHERE d = 3) table2 + ON table1.a = table2.a); + +-- REBUILD: TODO FOR MVS USING CUSTOM STORAGE HANDLERS +-- ALTER MATERIALIZED VIEW cmv_mat_view REBUILD; + +-- NOW IT CAN BE USED AGAIN +EXPLAIN +SELECT * FROM ( + (SELECT a, c FROM cmv_basetable WHERE a = 3) table1 + JOIN + (SELECT a, c FROM cmv_basetable WHERE d = 3) table2 + ON table1.a = table2.a); + +SELECT * FROM ( + (SELECT a, c FROM cmv_basetable WHERE a = 3) table1 + JOIN + (SELECT a, c FROM cmv_basetable WHERE d = 3) table2 + ON table1.a = table2.a); + +DROP MATERIALIZED VIEW cmv_mat_view; +DROP MATERIALIZED VIEW cmv_mat_view2; +DROP TABLE cmv_basetable; diff --git a/ql/src/test/results/clientpositive/druid/druidmini_mv.q.out b/ql/src/test/results/clientpositive/druid/druidmini_mv.q.out new file mode 100644 index 0000000000..d8ecc0fc6d --- /dev/null +++ b/ql/src/test/results/clientpositive/druid/druidmini_mv.q.out @@ -0,0 +1,480 @@ +PREHOOK: query: create table cmv_basetable (a int, b varchar(256), c decimal(10,2), d int) +PREHOOK: type: CREATETABLE +PREHOOK: Output: database:default +PREHOOK: Output: default@cmv_basetable +POSTHOOK: query: create table cmv_basetable (a int, b varchar(256), c decimal(10,2), d int) +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: database:default +POSTHOOK: Output: default@cmv_basetable +PREHOOK: query: insert into cmv_basetable values + (1, 'alfred', 10.30, 2), + (2, 'bob', 3.14, 3), + (2, 'bonnie', 172342.2, 3), + (3, 'calvin', 978.76, 3), + (3, 'charlie', 9.8, 1) +PREHOOK: type: QUERY +PREHOOK: Output: default@cmv_basetable +POSTHOOK: query: insert into cmv_basetable values + (1, 'alfred', 10.30, 2), + (2, 'bob', 3.14, 3), + (2, 'bonnie', 172342.2, 3), + (3, 'calvin', 978.76, 3), + (3, 'charlie', 9.8, 1) +POSTHOOK: type: QUERY +POSTHOOK: Output: default@cmv_basetable +POSTHOOK: Lineage: cmv_basetable.a EXPRESSION [(values__tmp__table__1)values__tmp__table__1.FieldSchema(name:tmp_values_col1, type:string, comment:), ] +POSTHOOK: Lineage: cmv_basetable.b EXPRESSION [(values__tmp__table__1)values__tmp__table__1.FieldSchema(name:tmp_values_col2, type:string, comment:), ] +POSTHOOK: Lineage: cmv_basetable.c EXPRESSION [(values__tmp__table__1)values__tmp__table__1.FieldSchema(name:tmp_values_col3, type:string, comment:), ] +POSTHOOK: Lineage: cmv_basetable.d EXPRESSION [(values__tmp__table__1)values__tmp__table__1.FieldSchema(name:tmp_values_col4, type:string, comment:), ] +PREHOOK: query: CREATE MATERIALIZED VIEW cmv_mat_view ENABLE REWRITE +STORED BY 'org.apache.hadoop.hive.druid.DruidStorageHandler' +TBLPROPERTIES ("druid.segment.granularity" = "HOUR") +AS +SELECT cast(current_timestamp() as timestamp with local time zone) as `__time`, a, b, c +FROM cmv_basetable +WHERE a = 2 +PREHOOK: type: CREATE_MATERIALIZED_VIEW +PREHOOK: Input: default@cmv_basetable +PREHOOK: Output: database:default +PREHOOK: Output: default@cmv_mat_view +POSTHOOK: query: CREATE MATERIALIZED VIEW cmv_mat_view ENABLE REWRITE +STORED BY 'org.apache.hadoop.hive.druid.DruidStorageHandler' +TBLPROPERTIES ("druid.segment.granularity" = "HOUR") +AS +SELECT cast(current_timestamp() as timestamp with local time zone) as `__time`, a, b, c +FROM cmv_basetable +WHERE a = 2 +POSTHOOK: type: CREATE_MATERIALIZED_VIEW +POSTHOOK: Input: default@cmv_basetable +POSTHOOK: Output: database:default +POSTHOOK: Output: default@cmv_mat_view +PREHOOK: query: SELECT a, b, c FROM cmv_mat_view +PREHOOK: type: QUERY +PREHOOK: Input: default@cmv_mat_view +#### A masked pattern was here #### +POSTHOOK: query: SELECT a, b, c FROM cmv_mat_view +POSTHOOK: type: QUERY +POSTHOOK: Input: default@cmv_mat_view +#### A masked pattern was here #### +2 bob 3.14 +2 bonnie 172342.20 +PREHOOK: query: SHOW TBLPROPERTIES cmv_mat_view +PREHOOK: type: SHOW_TBLPROPERTIES +POSTHOOK: query: SHOW TBLPROPERTIES cmv_mat_view +POSTHOOK: type: SHOW_TBLPROPERTIES +COLUMN_STATS_ACCURATE {"BASIC_STATS":"true"} +druid.datasource default.cmv_mat_view +druid.segment.granularity HOUR +numFiles 0 +numRows 2 +rawDataSize 0 +storage_handler org.apache.hadoop.hive.druid.DruidStorageHandler +totalSize 0 +#### A masked pattern was here #### +PREHOOK: query: CREATE MATERIALIZED VIEW IF NOT EXISTS cmv_mat_view2 ENABLE REWRITE +STORED BY 'org.apache.hadoop.hive.druid.DruidStorageHandler' +TBLPROPERTIES ("druid.segment.granularity" = "HOUR") +AS +SELECT cast(current_timestamp() as timestamp with local time zone) as `__time`, a, c +FROM cmv_basetable +WHERE a = 3 +PREHOOK: type: CREATE_MATERIALIZED_VIEW +PREHOOK: Input: default@cmv_basetable +PREHOOK: Output: database:default +PREHOOK: Output: default@cmv_mat_view2 +POSTHOOK: query: CREATE MATERIALIZED VIEW IF NOT EXISTS cmv_mat_view2 ENABLE REWRITE +STORED BY 'org.apache.hadoop.hive.druid.DruidStorageHandler' +TBLPROPERTIES ("druid.segment.granularity" = "HOUR") +AS +SELECT cast(current_timestamp() as timestamp with local time zone) as `__time`, a, c +FROM cmv_basetable +WHERE a = 3 +POSTHOOK: type: CREATE_MATERIALIZED_VIEW +POSTHOOK: Input: default@cmv_basetable +POSTHOOK: Output: database:default +POSTHOOK: Output: default@cmv_mat_view2 +PREHOOK: query: SELECT a, c FROM cmv_mat_view2 +PREHOOK: type: QUERY +PREHOOK: Input: default@cmv_mat_view2 +#### A masked pattern was here #### +POSTHOOK: query: SELECT a, c FROM cmv_mat_view2 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@cmv_mat_view2 +#### A masked pattern was here #### +6 988.56 +PREHOOK: query: SHOW TBLPROPERTIES cmv_mat_view2 +PREHOOK: type: SHOW_TBLPROPERTIES +POSTHOOK: query: SHOW TBLPROPERTIES cmv_mat_view2 +POSTHOOK: type: SHOW_TBLPROPERTIES +COLUMN_STATS_ACCURATE {"BASIC_STATS":"true"} +druid.datasource default.cmv_mat_view2 +druid.segment.granularity HOUR +numFiles 0 +numRows 2 +rawDataSize 0 +storage_handler org.apache.hadoop.hive.druid.DruidStorageHandler +totalSize 0 +#### A masked pattern was here #### +PREHOOK: query: EXPLAIN +SELECT a, c +FROM cmv_basetable +WHERE a = 3 +PREHOOK: type: QUERY +POSTHOOK: query: EXPLAIN +SELECT a, c +FROM cmv_basetable +WHERE a = 3 +POSTHOOK: type: QUERY +STAGE DEPENDENCIES: + Stage-0 is a root stage + +STAGE PLANS: + Stage: Stage-0 + Fetch Operator + limit: -1 + Processor Tree: + TableScan + alias: cmv_mat_view2 + properties: + druid.query.json {"queryType":"select","dataSource":"default.cmv_mat_view2","descending":false,"intervals":["1900-01-01T00:00:00.000Z/3000-01-01T00:00:00.000Z"],"dimensions":[],"metrics":["a","c"],"granularity":"all","pagingSpec":{"threshold":16384,"fromNext":true},"context":{"druid.query.fetch":false}} + druid.query.type select + Statistics: Num rows: 2 Data size: 0 Basic stats: PARTIAL Column stats: NONE + Select Operator + expressions: a (type: int), c (type: decimal(10,2)) + outputColumnNames: _col0, _col1 + Statistics: Num rows: 2 Data size: 0 Basic stats: PARTIAL Column stats: NONE + ListSink + +PREHOOK: query: SELECT a, c +FROM cmv_basetable +WHERE a = 3 +PREHOOK: type: QUERY +PREHOOK: Input: default@cmv_basetable +PREHOOK: Input: default@cmv_mat_view2 +#### A masked pattern was here #### +POSTHOOK: query: SELECT a, c +FROM cmv_basetable +WHERE a = 3 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@cmv_basetable +POSTHOOK: Input: default@cmv_mat_view2 +#### A masked pattern was here #### +6 988.56 +Warning: Shuffle Join JOIN[6][tables = [cmv_mat_view2, $hdt$_0]] in Stage 'Stage-1:MAPRED' is a cross product +PREHOOK: query: EXPLAIN +SELECT * FROM ( + (SELECT a, c FROM cmv_basetable WHERE a = 3) table1 + JOIN + (SELECT a, c FROM cmv_basetable WHERE d = 3) table2 + ON table1.a = table2.a) +PREHOOK: type: QUERY +POSTHOOK: query: EXPLAIN +SELECT * FROM ( + (SELECT a, c FROM cmv_basetable WHERE a = 3) table1 + JOIN + (SELECT a, c FROM cmv_basetable WHERE d = 3) table2 + ON table1.a = table2.a) +POSTHOOK: type: QUERY +STAGE DEPENDENCIES: + Stage-1 is a root stage + Stage-0 depends on stages: Stage-1 + +STAGE PLANS: + Stage: Stage-1 + Map Reduce + Map Operator Tree: + TableScan + alias: cmv_basetable + Statistics: Num rows: 5 Data size: 81 Basic stats: COMPLETE Column stats: NONE + Filter Operator + predicate: ((3 = a) and (d = 3)) (type: boolean) + Statistics: Num rows: 1 Data size: 16 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: c (type: decimal(10,2)) + outputColumnNames: _col0 + Statistics: Num rows: 1 Data size: 16 Basic stats: COMPLETE Column stats: NONE + Reduce Output Operator + sort order: + Statistics: Num rows: 1 Data size: 16 Basic stats: COMPLETE Column stats: NONE + value expressions: _col0 (type: decimal(10,2)) + TableScan + alias: cmv_mat_view2 + properties: + druid.query.json {"queryType":"select","dataSource":"default.cmv_mat_view2","descending":false,"intervals":["1900-01-01T00:00:00.000Z/3000-01-01T00:00:00.000Z"],"dimensions":[],"metrics":["c"],"granularity":"all","pagingSpec":{"threshold":16384,"fromNext":true},"context":{"druid.query.fetch":false}} + druid.query.type select + Statistics: Num rows: 2 Data size: 0 Basic stats: PARTIAL Column stats: NONE + Reduce Output Operator + sort order: + Statistics: Num rows: 2 Data size: 0 Basic stats: PARTIAL Column stats: NONE + value expressions: c (type: decimal(10,2)) + Reduce Operator Tree: + Join Operator + condition map: + Inner Join 0 to 1 + keys: + 0 + 1 + outputColumnNames: _col1, _col5 + Statistics: Num rows: 2 Data size: 34 Basic stats: PARTIAL Column stats: NONE + Select Operator + expressions: 3 (type: int), _col1 (type: decimal(10,2)), 3 (type: int), _col5 (type: decimal(10,2)) + outputColumnNames: _col0, _col1, _col2, _col3 + Statistics: Num rows: 2 Data size: 34 Basic stats: PARTIAL Column stats: NONE + File Output Operator + compressed: false + Statistics: Num rows: 2 Data size: 34 Basic stats: PARTIAL Column stats: NONE + table: + input format: org.apache.hadoop.mapred.SequenceFileInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + + Stage: Stage-0 + Fetch Operator + limit: -1 + Processor Tree: + ListSink + +Warning: Shuffle Join JOIN[6][tables = [cmv_mat_view2, $hdt$_0]] in Stage 'Stage-1:MAPRED' is a cross product +PREHOOK: query: SELECT * FROM ( + (SELECT a, c FROM cmv_basetable WHERE a = 3) table1 + JOIN + (SELECT a, c FROM cmv_basetable WHERE d = 3) table2 + ON table1.a = table2.a) +PREHOOK: type: QUERY +PREHOOK: Input: default@cmv_basetable +PREHOOK: Input: default@cmv_mat_view2 +#### A masked pattern was here #### +POSTHOOK: query: SELECT * FROM ( + (SELECT a, c FROM cmv_basetable WHERE a = 3) table1 + JOIN + (SELECT a, c FROM cmv_basetable WHERE d = 3) table2 + ON table1.a = table2.a) +POSTHOOK: type: QUERY +POSTHOOK: Input: default@cmv_basetable +POSTHOOK: Input: default@cmv_mat_view2 +#### A masked pattern was here #### +3 988.56 3 978.76 +PREHOOK: query: INSERT INTO cmv_basetable VALUES + (3, 'charlie', 15.8, 1) +PREHOOK: type: QUERY +PREHOOK: Output: default@cmv_basetable +POSTHOOK: query: INSERT INTO cmv_basetable VALUES + (3, 'charlie', 15.8, 1) +POSTHOOK: type: QUERY +POSTHOOK: Output: default@cmv_basetable +POSTHOOK: Lineage: cmv_basetable.a EXPRESSION [(values__tmp__table__2)values__tmp__table__2.FieldSchema(name:tmp_values_col1, type:string, comment:), ] +POSTHOOK: Lineage: cmv_basetable.b EXPRESSION [(values__tmp__table__2)values__tmp__table__2.FieldSchema(name:tmp_values_col2, type:string, comment:), ] +POSTHOOK: Lineage: cmv_basetable.c EXPRESSION [(values__tmp__table__2)values__tmp__table__2.FieldSchema(name:tmp_values_col3, type:string, comment:), ] +POSTHOOK: Lineage: cmv_basetable.d EXPRESSION [(values__tmp__table__2)values__tmp__table__2.FieldSchema(name:tmp_values_col4, type:string, comment:), ] +Warning: Shuffle Join JOIN[6][tables = [cmv_mat_view2, $hdt$_0]] in Stage 'Stage-1:MAPRED' is a cross product +PREHOOK: query: EXPLAIN +SELECT * FROM ( + (SELECT a, c FROM cmv_basetable WHERE a = 3) table1 + JOIN + (SELECT a, c FROM cmv_basetable WHERE d = 3) table2 + ON table1.a = table2.a) +PREHOOK: type: QUERY +POSTHOOK: query: EXPLAIN +SELECT * FROM ( + (SELECT a, c FROM cmv_basetable WHERE a = 3) table1 + JOIN + (SELECT a, c FROM cmv_basetable WHERE d = 3) table2 + ON table1.a = table2.a) +POSTHOOK: type: QUERY +STAGE DEPENDENCIES: + Stage-1 is a root stage + Stage-0 depends on stages: Stage-1 + +STAGE PLANS: + Stage: Stage-1 + Map Reduce + Map Operator Tree: + TableScan + alias: cmv_basetable + Statistics: Num rows: 6 Data size: 98 Basic stats: COMPLETE Column stats: NONE + Filter Operator + predicate: ((3 = a) and (d = 3)) (type: boolean) + Statistics: Num rows: 1 Data size: 16 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: c (type: decimal(10,2)) + outputColumnNames: _col0 + Statistics: Num rows: 1 Data size: 16 Basic stats: COMPLETE Column stats: NONE + Reduce Output Operator + sort order: + Statistics: Num rows: 1 Data size: 16 Basic stats: COMPLETE Column stats: NONE + value expressions: _col0 (type: decimal(10,2)) + TableScan + alias: cmv_mat_view2 + properties: + druid.query.json {"queryType":"select","dataSource":"default.cmv_mat_view2","descending":false,"intervals":["1900-01-01T00:00:00.000Z/3000-01-01T00:00:00.000Z"],"dimensions":[],"metrics":["c"],"granularity":"all","pagingSpec":{"threshold":16384,"fromNext":true},"context":{"druid.query.fetch":false}} + druid.query.type select + Statistics: Num rows: 2 Data size: 0 Basic stats: PARTIAL Column stats: NONE + Reduce Output Operator + sort order: + Statistics: Num rows: 2 Data size: 0 Basic stats: PARTIAL Column stats: NONE + value expressions: c (type: decimal(10,2)) + Reduce Operator Tree: + Join Operator + condition map: + Inner Join 0 to 1 + keys: + 0 + 1 + outputColumnNames: _col1, _col5 + Statistics: Num rows: 2 Data size: 34 Basic stats: PARTIAL Column stats: NONE + Select Operator + expressions: 3 (type: int), _col1 (type: decimal(10,2)), 3 (type: int), _col5 (type: decimal(10,2)) + outputColumnNames: _col0, _col1, _col2, _col3 + Statistics: Num rows: 2 Data size: 34 Basic stats: PARTIAL Column stats: NONE + File Output Operator + compressed: false + Statistics: Num rows: 2 Data size: 34 Basic stats: PARTIAL Column stats: NONE + table: + input format: org.apache.hadoop.mapred.SequenceFileInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + + Stage: Stage-0 + Fetch Operator + limit: -1 + Processor Tree: + ListSink + +Warning: Shuffle Join JOIN[6][tables = [cmv_mat_view2, $hdt$_0]] in Stage 'Stage-1:MAPRED' is a cross product +PREHOOK: query: SELECT * FROM ( + (SELECT a, c FROM cmv_basetable WHERE a = 3) table1 + JOIN + (SELECT a, c FROM cmv_basetable WHERE d = 3) table2 + ON table1.a = table2.a) +PREHOOK: type: QUERY +PREHOOK: Input: default@cmv_basetable +PREHOOK: Input: default@cmv_mat_view2 +#### A masked pattern was here #### +POSTHOOK: query: SELECT * FROM ( + (SELECT a, c FROM cmv_basetable WHERE a = 3) table1 + JOIN + (SELECT a, c FROM cmv_basetable WHERE d = 3) table2 + ON table1.a = table2.a) +POSTHOOK: type: QUERY +POSTHOOK: Input: default@cmv_basetable +POSTHOOK: Input: default@cmv_mat_view2 +#### A masked pattern was here #### +3 988.56 3 978.76 +Warning: Shuffle Join JOIN[6][tables = [cmv_mat_view2, $hdt$_0]] in Stage 'Stage-1:MAPRED' is a cross product +PREHOOK: query: EXPLAIN +SELECT * FROM ( + (SELECT a, c FROM cmv_basetable WHERE a = 3) table1 + JOIN + (SELECT a, c FROM cmv_basetable WHERE d = 3) table2 + ON table1.a = table2.a) +PREHOOK: type: QUERY +POSTHOOK: query: EXPLAIN +SELECT * FROM ( + (SELECT a, c FROM cmv_basetable WHERE a = 3) table1 + JOIN + (SELECT a, c FROM cmv_basetable WHERE d = 3) table2 + ON table1.a = table2.a) +POSTHOOK: type: QUERY +STAGE DEPENDENCIES: + Stage-1 is a root stage + Stage-0 depends on stages: Stage-1 + +STAGE PLANS: + Stage: Stage-1 + Map Reduce + Map Operator Tree: + TableScan + alias: cmv_basetable + Statistics: Num rows: 6 Data size: 98 Basic stats: COMPLETE Column stats: NONE + Filter Operator + predicate: ((3 = a) and (d = 3)) (type: boolean) + Statistics: Num rows: 1 Data size: 16 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: c (type: decimal(10,2)) + outputColumnNames: _col0 + Statistics: Num rows: 1 Data size: 16 Basic stats: COMPLETE Column stats: NONE + Reduce Output Operator + sort order: + Statistics: Num rows: 1 Data size: 16 Basic stats: COMPLETE Column stats: NONE + value expressions: _col0 (type: decimal(10,2)) + TableScan + alias: cmv_mat_view2 + properties: + druid.query.json {"queryType":"select","dataSource":"default.cmv_mat_view2","descending":false,"intervals":["1900-01-01T00:00:00.000Z/3000-01-01T00:00:00.000Z"],"dimensions":[],"metrics":["c"],"granularity":"all","pagingSpec":{"threshold":16384,"fromNext":true},"context":{"druid.query.fetch":false}} + druid.query.type select + Statistics: Num rows: 2 Data size: 0 Basic stats: PARTIAL Column stats: NONE + Reduce Output Operator + sort order: + Statistics: Num rows: 2 Data size: 0 Basic stats: PARTIAL Column stats: NONE + value expressions: c (type: decimal(10,2)) + Reduce Operator Tree: + Join Operator + condition map: + Inner Join 0 to 1 + keys: + 0 + 1 + outputColumnNames: _col1, _col5 + Statistics: Num rows: 2 Data size: 34 Basic stats: PARTIAL Column stats: NONE + Select Operator + expressions: 3 (type: int), _col1 (type: decimal(10,2)), 3 (type: int), _col5 (type: decimal(10,2)) + outputColumnNames: _col0, _col1, _col2, _col3 + Statistics: Num rows: 2 Data size: 34 Basic stats: PARTIAL Column stats: NONE + File Output Operator + compressed: false + Statistics: Num rows: 2 Data size: 34 Basic stats: PARTIAL Column stats: NONE + table: + input format: org.apache.hadoop.mapred.SequenceFileInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + + Stage: Stage-0 + Fetch Operator + limit: -1 + Processor Tree: + ListSink + +Warning: Shuffle Join JOIN[6][tables = [cmv_mat_view2, $hdt$_0]] in Stage 'Stage-1:MAPRED' is a cross product +PREHOOK: query: SELECT * FROM ( + (SELECT a, c FROM cmv_basetable WHERE a = 3) table1 + JOIN + (SELECT a, c FROM cmv_basetable WHERE d = 3) table2 + ON table1.a = table2.a) +PREHOOK: type: QUERY +PREHOOK: Input: default@cmv_basetable +PREHOOK: Input: default@cmv_mat_view2 +#### A masked pattern was here #### +POSTHOOK: query: SELECT * FROM ( + (SELECT a, c FROM cmv_basetable WHERE a = 3) table1 + JOIN + (SELECT a, c FROM cmv_basetable WHERE d = 3) table2 + ON table1.a = table2.a) +POSTHOOK: type: QUERY +POSTHOOK: Input: default@cmv_basetable +POSTHOOK: Input: default@cmv_mat_view2 +#### A masked pattern was here #### +3 988.56 3 978.76 +PREHOOK: query: DROP MATERIALIZED VIEW cmv_mat_view +PREHOOK: type: DROP_MATERIALIZED_VIEW +PREHOOK: Input: default@cmv_mat_view +PREHOOK: Output: default@cmv_mat_view +POSTHOOK: query: DROP MATERIALIZED VIEW cmv_mat_view +POSTHOOK: type: DROP_MATERIALIZED_VIEW +POSTHOOK: Input: default@cmv_mat_view +POSTHOOK: Output: default@cmv_mat_view +PREHOOK: query: DROP MATERIALIZED VIEW cmv_mat_view2 +PREHOOK: type: DROP_MATERIALIZED_VIEW +PREHOOK: Input: default@cmv_mat_view2 +PREHOOK: Output: default@cmv_mat_view2 +POSTHOOK: query: DROP MATERIALIZED VIEW cmv_mat_view2 +POSTHOOK: type: DROP_MATERIALIZED_VIEW +POSTHOOK: Input: default@cmv_mat_view2 +POSTHOOK: Output: default@cmv_mat_view2 +PREHOOK: query: DROP TABLE cmv_basetable +PREHOOK: type: DROPTABLE +PREHOOK: Input: default@cmv_basetable +PREHOOK: Output: default@cmv_basetable +POSTHOOK: query: DROP TABLE cmv_basetable +POSTHOOK: type: DROPTABLE +POSTHOOK: Input: default@cmv_basetable +POSTHOOK: Output: default@cmv_basetable