diff --git a/data/conf/hive-site.xml b/data/conf/hive-site.xml
index b56cbd2469..932d996f33 100644
--- a/data/conf/hive-site.xml
+++ b/data/conf/hive-site.xml
@@ -141,7 +141,7 @@
hive.exec.pre.hooks
- org.apache.hadoop.hive.ql.hooks.PreExecutePrinter, org.apache.hadoop.hive.ql.hooks.EnforceReadOnlyTables, org.apache.hadoop.hive.ql.hooks.MaterializedViewRegistryPropertiesHook
+ org.apache.hadoop.hive.ql.hooks.PreExecutePrinter, org.apache.hadoop.hive.ql.hooks.EnforceReadOnlyTables
Pre Execute Hook for Tests
diff --git a/itests/hcatalog-unit/src/test/java/org/apache/hive/hcatalog/listener/DummyRawStoreFailEvent.java b/itests/hcatalog-unit/src/test/java/org/apache/hive/hcatalog/listener/DummyRawStoreFailEvent.java
index 78b26374f2..a3725c5395 100644
--- a/itests/hcatalog-unit/src/test/java/org/apache/hive/hcatalog/listener/DummyRawStoreFailEvent.java
+++ b/itests/hcatalog-unit/src/test/java/org/apache/hive/hcatalog/listener/DummyRawStoreFailEvent.java
@@ -33,6 +33,7 @@
import org.apache.hadoop.hive.metastore.api.AggrStats;
import org.apache.hadoop.hive.metastore.api.AlreadyExistsException;
import org.apache.hadoop.hive.metastore.api.ColumnStatistics;
+import org.apache.hadoop.hive.metastore.api.CreationMetadata;
import org.apache.hadoop.hive.metastore.api.CurrentNotificationEventId;
import org.apache.hadoop.hive.metastore.api.Database;
import org.apache.hadoop.hive.metastore.api.FieldSchema;
@@ -247,6 +248,12 @@ public boolean dropPartition(String dbName, String tableName, List partV
return objectStore.getPartitions(dbName, tableName, max);
}
+ @Override
+ public void updateCreationMetadata(String dbname, String tablename, CreationMetadata cm)
+ throws MetaException {
+ objectStore.updateCreationMetadata(dbname, tablename, cm);
+ }
+
@Override
public void alterTable(String dbName, String name, Table newTable)
throws InvalidObjectException, MetaException {
diff --git a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/parse/TestReplicationScenarios.java b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/parse/TestReplicationScenarios.java
index d763666ab3..41c89b1cd3 100644
--- a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/parse/TestReplicationScenarios.java
+++ b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/parse/TestReplicationScenarios.java
@@ -2228,9 +2228,8 @@ public void testViewsReplication() throws IOException {
run("LOAD DATA LOCAL INPATH '" + ptn_locn_2 + "' OVERWRITE INTO TABLE " + dbName + ".ptned PARTITION(b=2)", driver);
verifySetup("SELECT a from " + dbName + ".ptned WHERE b=2", ptn_data_2, driver);
- // TODO: Enable back when HIVE-18387 goes in, as it fixes the issue.
- // The problem is that alter for stats is removing the metadata information.
- // HIVE-18387 rewrites that logic and will fix the issue.
+ // TODO: This does not work because materialized views need the creation metadata
+ // to be updated in case tables used were replicated to a different database.
//run("CREATE MATERIALIZED VIEW " + dbName + ".mat_view AS SELECT a FROM " + dbName + ".ptned where b=1", driver);
//verifySetup("SELECT a from " + dbName + ".mat_view", ptn_data_1, driver);
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/ErrorMsg.java b/ql/src/java/org/apache/hadoop/hive/ql/ErrorMsg.java
index 883dcdad0d..6087e0209b 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/ErrorMsg.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/ErrorMsg.java
@@ -455,6 +455,7 @@
"Alter table with non-partitioned table does not support cascade"),
HIVE_GROUPING_SETS_SIZE_LIMIT(10411,
"Grouping sets size cannot be greater than 64"),
+ REBUILD_NO_MATERIALIZED_VIEW(10412, "Rebuild command only valid for materialized views"),
//========================== 20000 range starts here ========================//
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/HookRunner.java b/ql/src/java/org/apache/hadoop/hive/ql/HookRunner.java
index 52e99f9f09..2a32a51588 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/HookRunner.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/HookRunner.java
@@ -29,7 +29,6 @@
import org.apache.hadoop.hive.ql.hooks.Hook;
import org.apache.hadoop.hive.ql.hooks.HookContext;
import org.apache.hadoop.hive.ql.hooks.HookUtils;
-import org.apache.hadoop.hive.ql.hooks.MaterializedViewRegistryUpdateHook;
import org.apache.hadoop.hive.ql.hooks.MetricsQueryLifeTimeHook;
import org.apache.hadoop.hive.ql.hooks.QueryLifeTimeHook;
import org.apache.hadoop.hive.ql.hooks.QueryLifeTimeHookContext;
@@ -83,7 +82,6 @@ public void initialize() {
if (conf.getBoolVar(HiveConf.ConfVars.HIVE_SERVER2_METRICS_ENABLED)) {
queryHooks.add(new MetricsQueryLifeTimeHook());
}
- queryHooks.add(new MaterializedViewRegistryUpdateHook());
}
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/DDLTask.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/DDLTask.java
index 802349fe86..e37585f559 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/DDLTask.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/DDLTask.java
@@ -835,7 +835,8 @@ private int createOrDropTriggerToPoolMapping(Hive db, CreateOrDropTriggerToPoolM
private int preInsertWork(Hive db, PreInsertTableDesc preInsertTableDesc) throws HiveException {
try{
- HiveMetaHook hook = preInsertTableDesc.getTable().getStorageHandler().getMetaHook();
+ HiveMetaHook hook = preInsertTableDesc.getTable().getStorageHandler() != null ?
+ preInsertTableDesc.getTable().getStorageHandler().getMetaHook() : null;
if (hook == null || !(hook instanceof DefaultHiveMetaHook)) {
return 0;
}
@@ -847,9 +848,22 @@ private int preInsertWork(Hive db, PreInsertTableDesc preInsertTableDesc) throws
return 0;
}
- private int insertCommitWork(Hive db, InsertTableDesc insertTableDesc) throws MetaException {
+ private int insertCommitWork(Hive db, InsertTableDesc insertTableDesc)
+ throws HiveException, MetaException {
+ // Execute DDL insert specific tasks
+ if (insertTableDesc.getTable().isMaterializedView()) {
+ // We need to update the status of the creation signature
+ Table table = insertTableDesc.getTable();
+ CreationMetadata cm =
+ new CreationMetadata(table.getDbName(), table.getTableName(),
+ ImmutableSet.copyOf(table.getCreationMetadata().getTablesUsed()));
+ cm.setValidTxnList(conf.get(ValidTxnList.VALID_TXNS_KEY));
+ db.updateCreationMetadata(table.getDbName(), table.getTableName(), cm);
+ }
+ // Execute hook (if present)
boolean failed = true;
- HiveMetaHook hook = insertTableDesc.getTable().getStorageHandler().getMetaHook();
+ HiveMetaHook hook = insertTableDesc.getTable().getStorageHandler() != null ?
+ insertTableDesc.getTable().getStorageHandler().getMetaHook() : null;
if (hook == null || !(hook instanceof DefaultHiveMetaHook)) {
return 0;
}
@@ -4923,39 +4937,30 @@ private int createView(Hive db, CreateViewDesc crtView) throws HiveException {
throw new HiveException(ErrorMsg.TABLE_ALREADY_EXISTS.getMsg(crtView.getViewName()));
}
- if (crtView.isMaterialized()) {
- // We need to update the status of the creation signature
- CreationMetadata cm =
- new CreationMetadata(oldview.getDbName(), oldview.getTableName(),
- ImmutableSet.copyOf(crtView.getTablesUsed()));
- cm.setValidTxnList(conf.get(ValidTxnList.VALID_TXNS_KEY));
- oldview.getTTable().setCreationMetadata(cm);
- db.alterTable(crtView.getViewName(), oldview, null);
- // This is a replace/rebuild, so we need an exclusive lock
- addIfAbsentByName(new WriteEntity(oldview, WriteEntity.WriteType.DDL_EXCLUSIVE));
- } else {
- // replace existing view
- // remove the existing partition columns from the field schema
- oldview.setViewOriginalText(crtView.getViewOriginalText());
- oldview.setViewExpandedText(crtView.getViewExpandedText());
- oldview.setFields(crtView.getSchema());
- if (crtView.getComment() != null) {
- oldview.setProperty("comment", crtView.getComment());
- }
- if (crtView.getTblProps() != null) {
- oldview.getTTable().getParameters().putAll(crtView.getTblProps());
- }
- oldview.setPartCols(crtView.getPartCols());
- if (crtView.getInputFormat() != null) {
- oldview.setInputFormatClass(crtView.getInputFormat());
- }
- if (crtView.getOutputFormat() != null) {
- oldview.setOutputFormatClass(crtView.getOutputFormat());
- }
- oldview.checkValidity(null);
- db.alterTable(crtView.getViewName(), oldview, null);
- addIfAbsentByName(new WriteEntity(oldview, WriteEntity.WriteType.DDL_NO_LOCK));
+ // It should not be a materialized view
+ assert !crtView.isMaterialized();
+
+ // replace existing view
+ // remove the existing partition columns from the field schema
+ oldview.setViewOriginalText(crtView.getViewOriginalText());
+ oldview.setViewExpandedText(crtView.getViewExpandedText());
+ oldview.setFields(crtView.getSchema());
+ if (crtView.getComment() != null) {
+ oldview.setProperty("comment", crtView.getComment());
+ }
+ if (crtView.getTblProps() != null) {
+ oldview.getTTable().getParameters().putAll(crtView.getTblProps());
}
+ oldview.setPartCols(crtView.getPartCols());
+ if (crtView.getInputFormat() != null) {
+ oldview.setInputFormatClass(crtView.getInputFormat());
+ }
+ if (crtView.getOutputFormat() != null) {
+ oldview.setOutputFormatClass(crtView.getOutputFormat());
+ }
+ oldview.checkValidity(null);
+ db.alterTable(crtView.getViewName(), oldview, null);
+ addIfAbsentByName(new WriteEntity(oldview, WriteEntity.WriteType.DDL_NO_LOCK));
} else {
// We create new view
Table tbl = crtView.toTable(conf);
@@ -4977,8 +4982,7 @@ private int createView(Hive db, CreateViewDesc crtView) throws HiveException {
return 0;
}
- private int truncateTable(Hive db, TruncateTableDesc truncateTableDesc) throws HiveException {
-
+ private int truncateTable(Hive db, TruncateTableDesc truncateTableDesc) throws HiveException {
if (truncateTableDesc.getColumnIndexes() != null) {
ColumnTruncateWork truncateWork = new ColumnTruncateWork(
truncateTableDesc.getColumnIndexes(), truncateTableDesc.getInputDir(),
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/MaterializedViewUpdateRegistryTask.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/MaterializedViewUpdateRegistryTask.java
new file mode 100644
index 0000000000..b4abef9c20
--- /dev/null
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/MaterializedViewUpdateRegistryTask.java
@@ -0,0 +1,72 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hive.ql.exec;
+
+import org.apache.hadoop.hive.ql.DriverContext;
+import org.apache.hadoop.hive.ql.metadata.Hive;
+import org.apache.hadoop.hive.ql.metadata.HiveException;
+import org.apache.hadoop.hive.ql.metadata.HiveMaterializedViewsRegistry;
+import org.apache.hadoop.hive.ql.metadata.Table;
+import org.apache.hadoop.hive.ql.parse.ExplainConfiguration.AnalyzeState;
+import org.apache.hadoop.hive.ql.plan.api.StageType;
+
+import java.io.Serializable;
+
+/**
+ * This task adds the materialized view to the registry.
+ */
+public class MaterializedViewUpdateRegistryTask extends Task implements Serializable {
+
+ private static final long serialVersionUID = 1L;
+
+ public MaterializedViewUpdateRegistryTask() {
+ super();
+ }
+
+ @Override
+ public int execute(DriverContext driverContext) {
+ if (driverContext.getCtx().getExplainAnalyze() == AnalyzeState.RUNNING) {
+ return 0;
+ }
+ try {
+ if (getWork().isRetrieveAndInclude()) {
+ Hive db = Hive.get(conf);
+ Table mvTable = db.getTable(getWork().getViewName());
+ HiveMaterializedViewsRegistry.get().createMaterializedView(db.getConf(), mvTable);
+ } else if (getWork().isDisableRewrite()) {
+ // Disabling rewriting, removing from cache
+ String[] names = getWork().getViewName().split("\\.");
+ HiveMaterializedViewsRegistry.get().dropMaterializedView(names[0], names[1]);
+ }
+ } catch (HiveException e) {
+ LOG.debug("Exception during materialized view cache update", e);
+ }
+ return 0;
+ }
+
+ @Override
+ public StageType getType() {
+ return StageType.DDL;
+ }
+
+ @Override
+ public String getName() {
+ return MaterializedViewUpdateRegistryTask.class.getSimpleName();
+ }
+}
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/MaterializedViewUpdateRegistryWork.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/MaterializedViewUpdateRegistryWork.java
new file mode 100644
index 0000000000..35ae002e4a
--- /dev/null
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/MaterializedViewUpdateRegistryWork.java
@@ -0,0 +1,49 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hive.ql.exec;
+
+import org.apache.hadoop.hive.ql.plan.Explain;
+import org.apache.hadoop.hive.ql.plan.Explain.Level;
+
+import java.io.Serializable;
+
+@Explain(displayName = "Materialized View Registry Update", explainLevels = { Level.USER, Level.DEFAULT, Level.EXTENDED })
+public class MaterializedViewUpdateRegistryWork implements Serializable {
+ private static final long serialVersionUID = 1L;
+ private final String viewName;
+ private final boolean retrieveAndInclude;
+ private final boolean disableRewrite;
+
+ public MaterializedViewUpdateRegistryWork(String viewName, boolean retrieveAndInclude, boolean disableRewrite) {
+ this.viewName = viewName;
+ this.retrieveAndInclude = retrieveAndInclude;
+ this.disableRewrite = disableRewrite;
+ }
+
+ public String getViewName() {
+ return viewName;
+ }
+
+ public boolean isRetrieveAndInclude() {
+ return retrieveAndInclude;
+ }
+
+ public boolean isDisableRewrite() {
+ return disableRewrite;
+ }
+}
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/TaskFactory.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/TaskFactory.java
index 83590e2176..2e02787a85 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/TaskFactory.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/TaskFactory.java
@@ -81,6 +81,9 @@ public TaskTuple(Class workClass, Class extends Task> taskClass) {
taskvec.add(new TaskTuple(CopyWork.class, CopyTask.class));
taskvec.add(new TaskTuple(ReplCopyWork.class, ReplCopyTask.class));
taskvec.add(new TaskTuple(DDLWork.class, DDLTask.class));
+ taskvec.add(new TaskTuple(
+ MaterializedViewUpdateRegistryWork.class,
+ MaterializedViewUpdateRegistryTask.class));
taskvec.add(new TaskTuple(FunctionWork.class,
FunctionTask.class));
taskvec
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/hooks/MaterializedViewRegistryPropertiesHook.java b/ql/src/java/org/apache/hadoop/hive/ql/hooks/MaterializedViewRegistryPropertiesHook.java
deleted file mode 100644
index 98d5e88989..0000000000
--- a/ql/src/java/org/apache/hadoop/hive/ql/hooks/MaterializedViewRegistryPropertiesHook.java
+++ /dev/null
@@ -1,41 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.hive.ql.hooks;
-
-import org.apache.hadoop.hive.conf.HiveConf;
-import org.apache.hadoop.hive.ql.metadata.Hive;
-import org.apache.hadoop.hive.ql.metadata.HiveMaterializedViewsRegistry;
-import org.apache.hadoop.hive.ql.session.SessionState;
-
-/**
- * Implementation of a pre execute hook that reloads the materialized view registry
- * if needed by the test framework
- */
-public class MaterializedViewRegistryPropertiesHook implements ExecuteWithHookContext {
-
- @Override
- public void run(HookContext hookContext) throws Exception {
- SessionState ss = SessionState.get();
- if (ss != null && ss.getConf().get(HiveConf.ConfVars.HIVE_SERVER2_MATERIALIZED_VIEWS_REGISTRY_IMPL.varname)
- .equals("DUMMY")) {
- HiveMaterializedViewsRegistry.get().init(Hive.get(ss.getConf()));
- }
- }
-
-}
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/hooks/MaterializedViewRegistryUpdateHook.java b/ql/src/java/org/apache/hadoop/hive/ql/hooks/MaterializedViewRegistryUpdateHook.java
deleted file mode 100644
index e886399d53..0000000000
--- a/ql/src/java/org/apache/hadoop/hive/ql/hooks/MaterializedViewRegistryUpdateHook.java
+++ /dev/null
@@ -1,103 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.hive.ql.hooks;
-
-import java.io.Serializable;
-import java.util.List;
-
-import org.apache.hadoop.hive.conf.HiveConf;
-import org.apache.hadoop.hive.conf.HiveConf.ConfVars;
-import org.apache.hadoop.hive.ql.exec.DDLTask;
-import org.apache.hadoop.hive.ql.exec.Task;
-import org.apache.hadoop.hive.ql.exec.TaskRunner;
-import org.apache.hadoop.hive.ql.metadata.Hive;
-import org.apache.hadoop.hive.ql.metadata.HiveException;
-import org.apache.hadoop.hive.ql.metadata.HiveMaterializedViewsRegistry;
-import org.apache.hadoop.hive.ql.metadata.Table;
-import org.apache.hadoop.hive.ql.plan.DDLWork;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-/**
- * Updates the materialized view registry after changes.
- */
-public class MaterializedViewRegistryUpdateHook implements QueryLifeTimeHook {
-
- private static final Logger LOG = LoggerFactory.getLogger(MaterializedViewRegistryUpdateHook.class);
-
- @Override
- public void beforeCompile(QueryLifeTimeHookContext ctx) {
- }
-
- @Override
- public void afterCompile(QueryLifeTimeHookContext ctx, boolean hasError) {
- }
-
- @Override
- public void beforeExecution(QueryLifeTimeHookContext ctx) {
- }
-
- @Override
- public void afterExecution(QueryLifeTimeHookContext ctx, boolean hasError) {
- if (hasError) {
- return;
- }
- HiveConf hiveConf = ctx.getHiveConf();
- try {
- List completedTasks = ctx.getHookContext().getCompleteTaskList();
- for (TaskRunner taskRunner : completedTasks) {
- Task extends Serializable> task = taskRunner.getTask();
- if (task instanceof DDLTask) {
- DDLTask ddlTask = (DDLTask) task;
- DDLWork work = ddlTask.getWork();
- String tableName = null;
- boolean isRewriteEnabled = false;
- if (work.getCreateViewDesc() != null && work.getCreateViewDesc().isMaterialized()) {
- tableName = work.getCreateViewDesc().toTable(hiveConf).getFullyQualifiedName();
- isRewriteEnabled = work.getCreateViewDesc().isRewriteEnabled();
- } else if (work.getAlterMaterializedViewDesc() != null) {
- tableName = work.getAlterMaterializedViewDesc().getMaterializedViewName();
- isRewriteEnabled = work.getAlterMaterializedViewDesc().isRewriteEnable();
- } else {
- continue;
- }
-
- if (isRewriteEnabled) {
- Hive db = Hive.get();
- Table mvTable = db.getTable(tableName);
- HiveMaterializedViewsRegistry.get().createMaterializedView(db.getConf(), mvTable);
- } else if (work.getAlterMaterializedViewDesc() != null) {
- // Disabling rewriting, removing from cache
- String[] names = tableName.split("\\.");
- HiveMaterializedViewsRegistry.get().dropMaterializedView(names[0], names[1]);
- }
- }
- }
- } catch (HiveException e) {
- if (HiveConf.getBoolVar(hiveConf, ConfVars.HIVE_MATERIALIZED_VIEW_ENABLE_AUTO_REWRITING)) {
- String message = "Error updating materialized view cache; consider disabling: " + ConfVars.HIVE_MATERIALIZED_VIEW_ENABLE_AUTO_REWRITING.varname;
- LOG.error(message, e);
- throw new RuntimeException(message, e);
- } else {
- LOG.debug("Exception during materialized view cache update", e);
- }
- }
- }
-
-}
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java b/ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java
index a45cac60cb..7b7e14071e 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java
@@ -90,6 +90,7 @@
import org.apache.hadoop.hive.metastore.api.ColumnStatisticsObj;
import org.apache.hadoop.hive.metastore.api.CompactionResponse;
import org.apache.hadoop.hive.metastore.api.CompactionType;
+import org.apache.hadoop.hive.metastore.api.CreationMetadata;
import org.apache.hadoop.hive.metastore.api.Database;
import org.apache.hadoop.hive.metastore.api.EnvironmentContext;
import org.apache.hadoop.hive.metastore.api.FieldSchema;
@@ -124,6 +125,7 @@
import org.apache.hadoop.hive.metastore.api.ShowCompactResponse;
import org.apache.hadoop.hive.metastore.api.SkewedInfo;
import org.apache.hadoop.hive.metastore.api.UniqueConstraintsRequest;
+import org.apache.hadoop.hive.metastore.api.UnknownDBException;
import org.apache.hadoop.hive.metastore.api.WMFullResourcePlan;
import org.apache.hadoop.hive.metastore.api.WMMapping;
import org.apache.hadoop.hive.metastore.api.WMNullablePool;
@@ -658,6 +660,15 @@ public void alterTable(String dbName, String tblName, Table newTbl, boolean casc
}
}
+ public void updateCreationMetadata(String dbName, String tableName, CreationMetadata cm)
+ throws HiveException {
+ try {
+ getMSC().updateCreationMetadata(dbName, tableName, cm);
+ } catch (TException e) {
+ throw new HiveException("Unable to update creation metadata " + e.getMessage(), e);
+ }
+ }
+
/**
* Updates the existing partition metadata with the new metadata.
*
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/GenMapRedUtils.java b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/GenMapRedUtils.java
index 59c0fe4d4c..302314405d 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/GenMapRedUtils.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/GenMapRedUtils.java
@@ -1509,22 +1509,12 @@ public static void addStatsTask(FileSinkOperator nd, MoveTask mvTask,
table = null;
}
} else if (mvWork.getLoadFileWork().getCreateViewDesc() != null) {
- if (mvWork.getLoadFileWork().getCreateViewDesc().isReplace()) {
- // ALTER MV ... REBUILD
- String tableName = mvWork.getLoadFileWork().getCreateViewDesc().getViewName();
- try {
- table = Hive.get().getTable(tableName);
- } catch (HiveException e) {
- throw new RuntimeException("unexpected; MV should be present already..: " + tableName, e);
- }
- } else {
- // CREATE MATERIALIZED VIEW ...
- try {
- table = mvWork.getLoadFileWork().getCreateViewDesc().toTable(hconf);
- } catch (HiveException e) {
- LOG.debug("can't pre-create table for MV", e);
- table = null;
- }
+ // CREATE MATERIALIZED VIEW ...
+ try {
+ table = mvWork.getLoadFileWork().getCreateViewDesc().toTable(hconf);
+ } catch (HiveException e) {
+ LOG.debug("can't pre-create table for MV", e);
+ table = null;
}
} else {
throw new RuntimeException("unexpected; this should be a CTAS or a CREATE/REBUILD MV - however no desc present");
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/BaseSemanticAnalyzer.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/BaseSemanticAnalyzer.java
index d18dba554e..171825eb74 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/parse/BaseSemanticAnalyzer.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/BaseSemanticAnalyzer.java
@@ -1185,9 +1185,7 @@ public TableSpec(Hive db, HiveConf conf, ASTNode ast, boolean allowDynamicPartit
|| ast.getToken().getType() == HiveParser.TOK_TABLE_PARTITION
|| ast.getToken().getType() == HiveParser.TOK_TABTYPE
|| ast.getToken().getType() == HiveParser.TOK_CREATETABLE
- || ast.getToken().getType() == HiveParser.TOK_CREATE_MATERIALIZED_VIEW
- || (ast.getToken().getType() == HiveParser.TOK_ALTER_MATERIALIZED_VIEW &&
- ast.getChild(1).getType() == HiveParser.TOK_ALTER_MATERIALIZED_VIEW_REBUILD));
+ || ast.getToken().getType() == HiveParser.TOK_CREATE_MATERIALIZED_VIEW);
int childIndex = 0;
numDynParts = 0;
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/MaterializedViewRebuildSemanticAnalyzer.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/MaterializedViewRebuildSemanticAnalyzer.java
new file mode 100644
index 0000000000..75eb50c579
--- /dev/null
+++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/MaterializedViewRebuildSemanticAnalyzer.java
@@ -0,0 +1,99 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hive.ql.parse;
+
+import org.apache.hadoop.hive.common.HiveStatsUtils;
+import org.apache.hadoop.hive.conf.HiveConf;
+import org.apache.hadoop.hive.conf.HiveConf.ConfVars;
+import org.apache.hadoop.hive.conf.HiveVariableSource;
+import org.apache.hadoop.hive.conf.VariableSubstitution;
+import org.apache.hadoop.hive.metastore.api.FieldSchema;
+import org.apache.hadoop.hive.ql.Context;
+import org.apache.hadoop.hive.ql.ErrorMsg;
+import org.apache.hadoop.hive.ql.QueryState;
+import org.apache.hadoop.hive.ql.exec.Utilities;
+import org.apache.hadoop.hive.ql.metadata.HiveException;
+import org.apache.hadoop.hive.ql.metadata.Table;
+import org.apache.hadoop.hive.ql.session.SessionState;
+import org.apache.hadoop.hive.ql.session.SessionState.LogHelper;
+import org.apache.hadoop.hive.serde.serdeConstants;
+import org.apache.hadoop.hive.serde2.objectinspector.ObjectInspector;
+import org.apache.hadoop.hive.serde2.typeinfo.TypeInfo;
+import org.apache.hadoop.hive.serde2.typeinfo.TypeInfoUtils;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.LinkedList;
+import java.util.List;
+import java.util.Map;
+
+/**
+ * MaterializedViewRebuildSemanticAnalyzer.
+ * Rewrites ALTER MATERIALIZED VIEW _mv_name_ REBUILD statement into
+ * INSERT OVERWRITE TABLE _mv_name_ _mv_query_ .
+ */
+public class MaterializedViewRebuildSemanticAnalyzer extends CalcitePlanner {
+
+ private static final Logger LOG =
+ LoggerFactory.getLogger(MaterializedViewRebuildSemanticAnalyzer.class);
+ static final private LogHelper console = new LogHelper(LOG);
+
+
+ public MaterializedViewRebuildSemanticAnalyzer(QueryState queryState) throws SemanticException {
+ super(queryState);
+ }
+
+
+ @Override
+ public void analyzeInternal(ASTNode ast) throws SemanticException {
+ if (rewrittenRebuild) {
+ super.analyzeInternal(ast);
+ return;
+ }
+
+ String[] qualifiedTableName = getQualifiedTableName((ASTNode) ast.getChild(0));
+ String dbDotTable = getDotName(qualifiedTableName);
+ ASTNode rewrittenAST;
+ // We need to go lookup the table and get the select statement and then parse it.
+ try {
+ Table tab = getTableObjectByName(dbDotTable, true);
+ if (!tab.isMaterializedView()) {
+ // Cannot rebuild not materialized view
+ throw new SemanticException(ErrorMsg.REBUILD_NO_MATERIALIZED_VIEW);
+ }
+ // We need to use the expanded text for the materialized view, as it will contain
+ // the qualified table aliases, etc.
+ String viewText = tab.getViewExpandedText();
+ if (viewText.trim().isEmpty()) {
+ throw new SemanticException(ErrorMsg.MATERIALIZED_VIEW_DEF_EMPTY);
+ }
+ Context ctx = new Context(queryState.getConf());
+ rewrittenAST = ParseUtils.parse("insert overwrite table `" +
+ dbDotTable + "` " + viewText, ctx);
+ this.ctx.addRewrittenStatementContext(ctx);
+ } catch (Exception e) {
+ throw new SemanticException(e);
+ }
+ rewrittenRebuild = true;
+ LOG.info("Rebuilding view " + dbDotTable);
+ super.analyzeInternal(rewrittenAST);
+ }
+}
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/ParseContext.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/ParseContext.java
index 4c41920cba..dab1a8863e 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/parse/ParseContext.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/ParseContext.java
@@ -55,6 +55,7 @@
import org.apache.hadoop.hive.ql.plan.ExprNodeDesc;
import org.apache.hadoop.hive.ql.plan.FileSinkDesc;
import org.apache.hadoop.hive.ql.plan.FilterDesc.SampleDesc;
+import org.apache.hadoop.hive.ql.plan.InsertTableDesc;
import org.apache.hadoop.hive.ql.plan.LoadFileDesc;
import org.apache.hadoop.hive.ql.plan.LoadTableDesc;
import org.apache.hadoop.hive.ql.plan.MapJoinDesc;
@@ -84,6 +85,8 @@
private HashMap nameToSplitSample;
private List loadTableWork;
private List loadFileWork;
+ private List preInsertTableWork;
+ private List insertTableWork;
private List columnStatsAutoGatherContexts;
private Context ctx;
private QueryState queryState;
@@ -181,6 +184,7 @@ public ParseContext(
Set joinOps,
Set smbMapJoinOps,
List loadTableWork, List loadFileWork,
+ List preInsertTableWork, List insertTableWork,
List columnStatsAutoGatherContexts,
Context ctx, HashMap idToTableNameMap, int destTableId,
UnionProcContext uCtx, List> listMapJoinOpsNoReducer,
@@ -204,6 +208,8 @@ public ParseContext(
this.smbMapJoinOps = smbMapJoinOps;
this.loadFileWork = loadFileWork;
this.loadTableWork = loadTableWork;
+ this.preInsertTableWork = preInsertTableWork;
+ this.insertTableWork = insertTableWork;
this.columnStatsAutoGatherContexts = columnStatsAutoGatherContexts;
this.topOps = topOps;
this.ctx = ctx;
@@ -349,6 +355,20 @@ public void setNameToSplitSample(HashMap nameToSplitSample)
return loadFileWork;
}
+ /**
+ * @return the preInsertTableWork
+ */
+ public List getPreInsertTableWork() {
+ return preInsertTableWork;
+ }
+
+ /**
+ * @return the preInsertTableWork
+ */
+ public List getInsertTableWork() {
+ return insertTableWork;
+ }
+
/**
* @param loadFileWork
* the loadFileWork to set
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java
index 9d77f49e22..27442ac190 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java
@@ -285,6 +285,8 @@
protected LinkedHashMap, OpParseContext> opParseCtx;
private List loadTableWork;
private List loadFileWork;
+ private List preInsertTableWork;
+ private List insertTableWork;
private final List columnStatsAutoGatherContexts;
private final Map joinContext;
private final Map smbMapJoinContext;
@@ -330,6 +332,9 @@
// flag for no scan during analyze ... compute statistics
protected boolean noscan;
+ // whether this is a mv rebuild rewritten expression
+ protected boolean rewrittenRebuild = false;
+
protected volatile boolean disableJoinMerge = false;
protected final boolean defaultJoinMerge;
@@ -382,6 +387,8 @@ public SemanticAnalyzer(QueryState queryState) throws SemanticException {
topOps = new LinkedHashMap();
loadTableWork = new ArrayList();
loadFileWork = new ArrayList();
+ preInsertTableWork = new ArrayList();
+ insertTableWork = new ArrayList();
columnStatsAutoGatherContexts = new ArrayList();
opParseCtx = new LinkedHashMap, OpParseContext>();
joinContext = new HashMap();
@@ -432,6 +439,8 @@ protected void reset(boolean clearCache) {
tabNameToTabObject.clear();
loadTableWork.clear();
loadFileWork.clear();
+ preInsertTableWork.clear();
+ insertTableWork.clear();
columnStatsAutoGatherContexts.clear();
topOps.clear();
destTableId = 1;
@@ -473,6 +482,8 @@ public void initParseCtx(ParseContext pctx) {
topOps = pctx.getTopOps();
loadTableWork = pctx.getLoadTableWork();
loadFileWork = pctx.getLoadFileWork();
+ preInsertTableWork = pctx.getPreInsertTableWork();
+ insertTableWork = pctx.getInsertTableWork();
ctx = pctx.getContext();
destTableId = pctx.getDestTableId();
idToTableNameMap = pctx.getIdToTableNameMap();
@@ -490,7 +501,8 @@ public ParseContext getParseContext() {
return new ParseContext(queryState, opToPartPruner, opToPartList, topOps,
new HashSet(joinContext.keySet()),
new HashSet(smbMapJoinContext.keySet()),
- loadTableWork, loadFileWork, columnStatsAutoGatherContexts, ctx, idToTableNameMap, destTableId, uCtx,
+ loadTableWork, loadFileWork, preInsertTableWork, insertTableWork, columnStatsAutoGatherContexts,
+ ctx, idToTableNameMap, destTableId, uCtx,
listMapJoinOpsNoReducer, prunedPartitions, tabNameToTabObject,
opToSamplePruner, globalLimitCtx, nameToSplitSample, inputs, rootTasks,
opToPartToSkewedPruner, viewAliasToInput, reduceSinkOperatorsAddedByEnforceBucketingSorting,
@@ -1984,7 +1996,8 @@ private void getMetaData(QB qb, ReadEntity parentInput)
switch (ast.getToken().getType()) {
case HiveParser.TOK_TAB: {
TableSpec ts = new TableSpec(db, conf, ast);
- if (ts.tableHandle.isView() || ts.tableHandle.isMaterializedView()) {
+ if (ts.tableHandle.isView() ||
+ (!rewrittenRebuild && ts.tableHandle.isMaterializedView())) {
throw new SemanticException(ErrorMsg.DML_AGAINST_VIEW.getMsg());
}
@@ -6893,12 +6906,15 @@ protected Operator genFileSinkPlan(String dest, QB qb, Operator input)
// This is a non-native table.
// We need to set stats as inaccurate.
setStatsForNonNativeTable(dest_tab);
- // true if it is insert overwrite.
- boolean overwrite = !qb.getParseInfo().isInsertIntoTable(
- String.format("%s.%s", dest_tab.getDbName(), dest_tab.getTableName()));
- createInsertDesc(dest_tab, overwrite);
}
+ // true if it is insert overwrite.
+ // We create the descriptors for the pre-insert and insert works.
+ boolean overwrite = !qb.getParseInfo().isInsertIntoTable(
+ String.format("%s.%s", dest_tab.getDbName(), dest_tab.getTableName()));
+ preInsertTableWork.add(new PreInsertTableDesc(dest_tab, overwrite));
+ insertTableWork.add(new InsertTableDesc(dest_tab, overwrite));
+
WriteEntity output = generateTableWriteEntity(
dest, dest_tab, partSpec, ltd, dpCtx, isNonNativeTable);
ctx.getLoadTableOutputMap().put(ltd, output);
@@ -7465,18 +7481,6 @@ private DynamicPartitionCtx checkDynPart(QB qb, QBMetaData qbm, Table dest_tab,
return dpCtx;
}
-
- private void createInsertDesc(Table table, boolean overwrite) {
- Task extends Serializable>[] tasks = new Task[this.rootTasks.size()];
- tasks = this.rootTasks.toArray(tasks);
- PreInsertTableDesc preInsertTableDesc = new PreInsertTableDesc(table, overwrite);
- InsertTableDesc insertTableDesc = new InsertTableDesc(table, overwrite);
- this.rootTasks
- .add(TaskFactory.get(new DDLWork(getInputs(), getOutputs(), preInsertTableDesc), conf));
- TaskFactory
- .getAndMakeChild(new DDLWork(getInputs(), getOutputs(), insertTableDesc), conf, tasks);
- }
-
private void genAutoColumnStatsGatheringPipeline(QB qb, TableDesc table_desc,
Map partSpec, Operator curr, boolean isInsertInto) throws SemanticException {
String tableName = table_desc.getTableName();
@@ -11207,7 +11211,7 @@ void resetToken() {
}
}
- private Table getTableObjectByName(String tableName, boolean throwException) throws HiveException {
+ protected Table getTableObjectByName(String tableName, boolean throwException) throws HiveException {
if (!tabNameToTabObject.containsKey(tableName)) {
Table table = db.getTable(tableName, throwException);
if (table != null) {
@@ -11475,8 +11479,6 @@ boolean genResolvedParseTree(ASTNode ast, PlannerContext plannerCtx) throws Sema
// 3. analyze create view command
if (ast.getToken().getType() == HiveParser.TOK_CREATEVIEW ||
ast.getToken().getType() == HiveParser.TOK_CREATE_MATERIALIZED_VIEW ||
- (ast.getToken().getType() == HiveParser.TOK_ALTER_MATERIALIZED_VIEW &&
- ast.getChild(1).getType() == HiveParser.TOK_ALTER_MATERIALIZED_VIEW_REBUILD) ||
(ast.getToken().getType() == HiveParser.TOK_ALTERVIEW &&
ast.getChild(1).getType() == HiveParser.TOK_QUERY)) {
child = analyzeCreateView(ast, qb, plannerCtx);
@@ -11698,7 +11700,8 @@ void analyzeInternal(ASTNode ast, PlannerContextFactory pcf) throws SemanticExce
ParseContext pCtx = new ParseContext(queryState, opToPartPruner, opToPartList, topOps,
new HashSet(joinContext.keySet()),
new HashSet(smbMapJoinContext.keySet()),
- loadTableWork, loadFileWork, columnStatsAutoGatherContexts, ctx, idToTableNameMap, destTableId, uCtx,
+ loadTableWork, loadFileWork, preInsertTableWork, insertTableWork,
+ columnStatsAutoGatherContexts, ctx, idToTableNameMap, destTableId, uCtx,
listMapJoinOpsNoReducer, prunedPartitions, tabNameToTabObject, opToSamplePruner,
globalLimitCtx, nameToSplitSample, inputs, rootTasks, opToPartToSkewedPruner,
viewAliasToInput, reduceSinkOperatorsAddedByEnforceBucketingSorting,
@@ -12771,10 +12774,6 @@ protected ASTNode analyzeCreateView(ASTNode ast, QB qb, PlannerContext plannerCt
case HiveParser.TOK_ORREPLACE:
orReplace = true;
break;
- case HiveParser.TOK_ALTER_MATERIALIZED_VIEW_REBUILD:
- isMaterialized = true;
- isRebuild = true;
- break;
case HiveParser.TOK_QUERY:
// For CBO
if (plannerCtx != null) {
@@ -12850,27 +12849,6 @@ protected ASTNode analyzeCreateView(ASTNode ast, QB qb, PlannerContext plannerCt
}
qb.setViewDesc(createVwDesc);
- if (isRebuild) {
- // We need to go lookup the table and get the select statement and then parse it.
- try {
- Table tab = getTableObjectByName(dbDotTable, true);
- // We need to use the expanded text for the materialized view, as it will contain
- // the qualified table aliases, etc.
- String viewText = tab.getViewExpandedText();
- if (viewText.trim().isEmpty()) {
- throw new SemanticException(ErrorMsg.MATERIALIZED_VIEW_DEF_EMPTY);
- }
- Context ctx = new Context(queryState.getConf());
- selectStmt = ParseUtils.parse(viewText, ctx);
- // For CBO
- if (plannerCtx != null) {
- plannerCtx.setViewToken(selectStmt);
- }
- } catch (Exception e) {
- throw new SemanticException(e);
- }
- }
-
return selectStmt;
}
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzerFactory.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzerFactory.java
index 34963ff0c9..78f83ef039 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzerFactory.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzerFactory.java
@@ -277,8 +277,7 @@ private static BaseSemanticAnalyzer getInternal(QueryState queryState, ASTNode t
case HiveParser.TOK_ALTER_MATERIALIZED_VIEW_REBUILD:
opType = commandType.get(child.getType());
queryState.setCommandType(opType);
- return HiveConf.getBoolVar(queryState.getConf(), HiveConf.ConfVars.HIVE_CBO_ENABLED) ?
- new CalcitePlanner(queryState) : new SemanticAnalyzer(queryState);
+ return new MaterializedViewRebuildSemanticAnalyzer(queryState);
}
// Operation not recognized, set to null and let upper level handle this case
queryState.setCommandType(null);
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/TaskCompiler.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/TaskCompiler.java
index 3122db8267..4ae3d12222 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/parse/TaskCompiler.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/TaskCompiler.java
@@ -28,7 +28,11 @@
import java.util.Map;
import java.util.Set;
+import org.apache.commons.lang3.tuple.Pair;
+import org.apache.hadoop.hive.ql.exec.DDLTask;
+import org.apache.hadoop.hive.ql.exec.MaterializedViewUpdateRegistryWork;
import org.apache.hadoop.hive.ql.io.AcidUtils;
+import org.apache.hadoop.hive.ql.plan.InsertTableDesc;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import com.google.common.collect.Interner;
@@ -334,6 +338,45 @@ public void compile(final ParseContext pCtx,
Task extends Serializable> crtViewTask = TaskFactory.get(new DDLWork(
inputs, outputs, viewDesc), conf);
patchUpAfterCTASorMaterializedView(rootTasks, outputs, crtViewTask);
+ } else if (!pCtx.getInsertTableWork().isEmpty()) {
+ // If it is an insert table, we create the preInsert and insert DDL tasks for
+ // each of statement.
+ List preInsertDescs = pCtx.getPreInsertTableWork();
+ List> newRoots = new ArrayList<>();
+ for (PreInsertTableDesc preInsertDesc : preInsertDescs) {
+ Task extends Serializable> preInsertTask = TaskFactory.get(new DDLWork(
+ inputs, outputs, preInsertDesc), conf);
+ for (Task> t : rootTasks) {
+ preInsertTask.addDependentTask(t);
+ }
+ newRoots.add(preInsertTask);
+ }
+ // Pre insert desc is the first thing that we will do, hence we add
+ // it as root
+ rootTasks.clear();
+ rootTasks.addAll(newRoots);
+ // Now we insert the insert tasks as leaves
+ Set> leafTasks = new LinkedHashSet>();
+ getLeafTasks(rootTasks, leafTasks);
+ List insertTableDescs = pCtx.getInsertTableWork();
+ for (InsertTableDesc insertTableDesc : insertTableDescs) {
+ Task extends Serializable> insertTask = TaskFactory.get(new DDLWork(
+ inputs, outputs, insertTableDesc), conf);
+ for (Task extends Serializable> task : leafTasks) {
+ if (task instanceof StatsTask) {
+ // StatsTask require table to already exist
+ for (Task extends Serializable> parentOfStatsTask : task.getParentTasks()) {
+ parentOfStatsTask.addDependentTask(insertTask);
+ }
+ for (Task extends Serializable> parentOfCrtTblTask : insertTask.getParentTasks()) {
+ parentOfCrtTblTask.removeDependentTask(task);
+ }
+ insertTask.addDependentTask(task);
+ } else {
+ task.addDependentTask(insertTask);
+ }
+ }
+ }
}
if (globalLimitCtx.isEnable() && pCtx.getFetchTask() != null) {
@@ -464,6 +507,7 @@ private void patchUpAfterCTASorMaterializedView(final List> leaves = new LinkedHashSet<>();
getLeafTasks(rootTasks, leaves);
assert (leaves.size() > 0);
+ Task extends Serializable> targetTask = createTask;
for (Task extends Serializable> task : leaves) {
if (task instanceof StatsTask) {
// StatsTask require table to already exist
@@ -474,10 +518,35 @@ private void patchUpAfterCTASorMaterializedView(final List 10.0
GROUP BY cmv_basetable.a, cmv_basetable_2.c;
+DESCRIBE FORMATTED cmv_mat_view;
+
-- CANNOT USE THE VIEW, IT IS DISABLED FOR REWRITE
EXPLAIN
SELECT cmv_basetable.a
@@ -59,6 +61,8 @@ ALTER MATERIALIZED VIEW cmv_mat_view ENABLE REWRITE;
ALTER MATERIALIZED VIEW cmv_mat_view ENABLE REWRITE;
+DESCRIBE FORMATTED cmv_mat_view;
+
-- CANNOT USE THE VIEW, IT IS OUTDATED
EXPLAIN
SELECT cmv_basetable.a
@@ -77,6 +81,8 @@ ALTER MATERIALIZED VIEW cmv_mat_view REBUILD;
ALTER MATERIALIZED VIEW cmv_mat_view REBUILD;
+DESCRIBE FORMATTED cmv_mat_view;
+
-- NOW IT CAN BE USED AGAIN
EXPLAIN
SELECT cmv_basetable.a
diff --git a/ql/src/test/results/clientpositive/druid/druidmini_mv.q.out b/ql/src/test/results/clientpositive/druid/druidmini_mv.q.out
index 294b84affd..60ff81ad53 100644
--- a/ql/src/test/results/clientpositive/druid/druidmini_mv.q.out
+++ b/ql/src/test/results/clientpositive/druid/druidmini_mv.q.out
@@ -1,38 +1,55 @@
-PREHOOK: query: create table cmv_basetable (a int, b varchar(256), c decimal(10,2), d int) stored as orc TBLPROPERTIES ('transactional'='true')
-PREHOOK: type: CREATETABLE
-PREHOOK: Output: database:default
-PREHOOK: Output: default@cmv_basetable
-POSTHOOK: query: create table cmv_basetable (a int, b varchar(256), c decimal(10,2), d int) stored as orc TBLPROPERTIES ('transactional'='true')
-POSTHOOK: type: CREATETABLE
-POSTHOOK: Output: database:default
-POSTHOOK: Output: default@cmv_basetable
-PREHOOK: query: insert into cmv_basetable values
- (1, 'alfred', 10.30, 2),
- (2, 'bob', 3.14, 3),
- (2, 'bonnie', 172342.2, 3),
- (3, 'calvin', 978.76, 3),
- (3, 'charlie', 9.8, 1)
-PREHOOK: type: QUERY
+PREHOOK: query: CREATE TABLE cmv_basetable
+STORED AS orc
+TBLPROPERTIES ('transactional'='true')
+AS
+SELECT cast(current_timestamp() AS timestamp) AS t,
+ cast(a AS int) AS a,
+ cast(b AS varchar(256)) AS b,
+ cast(c AS double) AS c,
+ cast(d AS int) AS d
+FROM TABLE (
+ VALUES
+ (1, 'alfred', 10.30, 2),
+ (2, 'bob', 3.14, 3),
+ (2, 'bonnie', 172342.2, 3),
+ (3, 'calvin', 978.76, 3),
+ (3, 'charlie', 9.8, 1),
+ (3, 'charlie', 15.8, 1)) as q (a, b, c, d)
+PREHOOK: type: CREATETABLE_AS_SELECT
PREHOOK: Input: _dummy_database@_dummy_table
+PREHOOK: Output: database:default
PREHOOK: Output: default@cmv_basetable
-POSTHOOK: query: insert into cmv_basetable values
- (1, 'alfred', 10.30, 2),
- (2, 'bob', 3.14, 3),
- (2, 'bonnie', 172342.2, 3),
- (3, 'calvin', 978.76, 3),
- (3, 'charlie', 9.8, 1)
-POSTHOOK: type: QUERY
+POSTHOOK: query: CREATE TABLE cmv_basetable
+STORED AS orc
+TBLPROPERTIES ('transactional'='true')
+AS
+SELECT cast(current_timestamp() AS timestamp) AS t,
+ cast(a AS int) AS a,
+ cast(b AS varchar(256)) AS b,
+ cast(c AS double) AS c,
+ cast(d AS int) AS d
+FROM TABLE (
+ VALUES
+ (1, 'alfred', 10.30, 2),
+ (2, 'bob', 3.14, 3),
+ (2, 'bonnie', 172342.2, 3),
+ (3, 'calvin', 978.76, 3),
+ (3, 'charlie', 9.8, 1),
+ (3, 'charlie', 15.8, 1)) as q (a, b, c, d)
+POSTHOOK: type: CREATETABLE_AS_SELECT
POSTHOOK: Input: _dummy_database@_dummy_table
+POSTHOOK: Output: database:default
POSTHOOK: Output: default@cmv_basetable
POSTHOOK: Lineage: cmv_basetable.a SCRIPT []
POSTHOOK: Lineage: cmv_basetable.b SCRIPT []
POSTHOOK: Lineage: cmv_basetable.c SCRIPT []
POSTHOOK: Lineage: cmv_basetable.d SCRIPT []
+POSTHOOK: Lineage: cmv_basetable.t SIMPLE []
PREHOOK: query: CREATE MATERIALIZED VIEW cmv_mat_view ENABLE REWRITE
STORED BY 'org.apache.hadoop.hive.druid.DruidStorageHandler'
TBLPROPERTIES ("druid.segment.granularity" = "HOUR")
AS
-SELECT cast(current_timestamp() as timestamp with local time zone) as `__time`, a, b, cast(c as double)
+SELECT cast(t AS timestamp with local time zone) as `__time`, a, b, c
FROM cmv_basetable
WHERE a = 2
PREHOOK: type: CREATE_MATERIALIZED_VIEW
@@ -43,7 +60,7 @@ POSTHOOK: query: CREATE MATERIALIZED VIEW cmv_mat_view ENABLE REWRITE
STORED BY 'org.apache.hadoop.hive.druid.DruidStorageHandler'
TBLPROPERTIES ("druid.segment.granularity" = "HOUR")
AS
-SELECT cast(current_timestamp() as timestamp with local time zone) as `__time`, a, b, cast(c as double)
+SELECT cast(t AS timestamp with local time zone) as `__time`, a, b, c
FROM cmv_basetable
WHERE a = 2
POSTHOOK: type: CREATE_MATERIALIZED_VIEW
@@ -77,7 +94,7 @@ PREHOOK: query: CREATE MATERIALIZED VIEW IF NOT EXISTS cmv_mat_view2 ENABLE REWR
STORED BY 'org.apache.hadoop.hive.druid.DruidStorageHandler'
TBLPROPERTIES ("druid.segment.granularity" = "HOUR")
AS
-SELECT cast(current_timestamp() as timestamp with local time zone) as `__time`, a, cast(c as double)
+SELECT cast(t AS timestamp with local time zone) as `__time`, a, b, c
FROM cmv_basetable
WHERE a = 3
PREHOOK: type: CREATE_MATERIALIZED_VIEW
@@ -88,7 +105,7 @@ POSTHOOK: query: CREATE MATERIALIZED VIEW IF NOT EXISTS cmv_mat_view2 ENABLE REW
STORED BY 'org.apache.hadoop.hive.druid.DruidStorageHandler'
TBLPROPERTIES ("druid.segment.granularity" = "HOUR")
AS
-SELECT cast(current_timestamp() as timestamp with local time zone) as `__time`, a, cast(c as double)
+SELECT cast(t AS timestamp with local time zone) as `__time`, a, b, c
FROM cmv_basetable
WHERE a = 3
POSTHOOK: type: CREATE_MATERIALIZED_VIEW
@@ -103,7 +120,8 @@ POSTHOOK: query: SELECT a, c FROM cmv_mat_view2
POSTHOOK: type: QUERY
POSTHOOK: Input: default@cmv_mat_view2
#### A masked pattern was here ####
-6 988.5599975585938
+3 978.760009765625
+6 25.600000381469727
PREHOOK: query: SHOW TBLPROPERTIES cmv_mat_view2
PREHOOK: type: SHOW_TBLPROPERTIES
POSTHOOK: query: SHOW TBLPROPERTIES cmv_mat_view2
@@ -112,7 +130,7 @@ COLUMN_STATS_ACCURATE {"BASIC_STATS":"true"}
druid.datasource default.cmv_mat_view2
druid.segment.granularity HOUR
numFiles 0
-numRows 2
+numRows 3
rawDataSize 0
storage_handler org.apache.hadoop.hive.druid.DruidStorageHandler
totalSize 0
@@ -137,17 +155,17 @@ STAGE PLANS:
Map Operator Tree:
TableScan
alias: cmv_basetable
- Statistics: Num rows: 1 Data size: 9310 Basic stats: COMPLETE Column stats: NONE
+ Statistics: Num rows: 6 Data size: 10760 Basic stats: COMPLETE Column stats: NONE
Filter Operator
predicate: (a = 3) (type: boolean)
- Statistics: Num rows: 1 Data size: 9310 Basic stats: COMPLETE Column stats: NONE
+ Statistics: Num rows: 3 Data size: 5380 Basic stats: COMPLETE Column stats: NONE
Select Operator
- expressions: 3 (type: int), c (type: decimal(10,2))
+ expressions: 3 (type: int), c (type: double)
outputColumnNames: _col0, _col1
- Statistics: Num rows: 1 Data size: 9310 Basic stats: COMPLETE Column stats: NONE
+ Statistics: Num rows: 3 Data size: 5380 Basic stats: COMPLETE Column stats: NONE
File Output Operator
compressed: false
- Statistics: Num rows: 1 Data size: 9310 Basic stats: COMPLETE Column stats: NONE
+ Statistics: Num rows: 3 Data size: 5380 Basic stats: COMPLETE Column stats: NONE
table:
input format: org.apache.hadoop.mapred.SequenceFileInputFormat
output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
@@ -171,7 +189,8 @@ WHERE a = 3
POSTHOOK: type: QUERY
POSTHOOK: Input: default@cmv_basetable
#### A masked pattern was here ####
-3 9.80
+3 15.8
+3 9.8
3 978.76
Warning: Shuffle Join JOIN[8][tables = [$hdt$_0, $hdt$_1]] in Stage 'Stage-1:MAPRED' is a cross product
PREHOOK: query: EXPLAIN
@@ -198,32 +217,32 @@ STAGE PLANS:
Map Operator Tree:
TableScan
alias: cmv_basetable
- Statistics: Num rows: 1 Data size: 9310 Basic stats: COMPLETE Column stats: NONE
+ Statistics: Num rows: 6 Data size: 10760 Basic stats: COMPLETE Column stats: NONE
Filter Operator
predicate: (a = 3) (type: boolean)
- Statistics: Num rows: 1 Data size: 9310 Basic stats: COMPLETE Column stats: NONE
+ Statistics: Num rows: 3 Data size: 5380 Basic stats: COMPLETE Column stats: NONE
Select Operator
- expressions: c (type: decimal(10,2))
+ expressions: c (type: double)
outputColumnNames: _col0
- Statistics: Num rows: 1 Data size: 9310 Basic stats: COMPLETE Column stats: NONE
+ Statistics: Num rows: 3 Data size: 5380 Basic stats: COMPLETE Column stats: NONE
Reduce Output Operator
sort order:
- Statistics: Num rows: 1 Data size: 9310 Basic stats: COMPLETE Column stats: NONE
- value expressions: _col0 (type: decimal(10,2))
+ Statistics: Num rows: 3 Data size: 5380 Basic stats: COMPLETE Column stats: NONE
+ value expressions: _col0 (type: double)
TableScan
alias: cmv_basetable
- Statistics: Num rows: 1 Data size: 9310 Basic stats: COMPLETE Column stats: NONE
+ Statistics: Num rows: 6 Data size: 10760 Basic stats: COMPLETE Column stats: NONE
Filter Operator
predicate: ((3 = a) and (d = 3)) (type: boolean)
- Statistics: Num rows: 1 Data size: 9310 Basic stats: COMPLETE Column stats: NONE
+ Statistics: Num rows: 1 Data size: 1793 Basic stats: COMPLETE Column stats: NONE
Select Operator
- expressions: c (type: decimal(10,2))
+ expressions: c (type: double)
outputColumnNames: _col0
- Statistics: Num rows: 1 Data size: 9310 Basic stats: COMPLETE Column stats: NONE
+ Statistics: Num rows: 1 Data size: 1793 Basic stats: COMPLETE Column stats: NONE
Reduce Output Operator
sort order:
- Statistics: Num rows: 1 Data size: 9310 Basic stats: COMPLETE Column stats: NONE
- value expressions: _col0 (type: decimal(10,2))
+ Statistics: Num rows: 1 Data size: 1793 Basic stats: COMPLETE Column stats: NONE
+ value expressions: _col0 (type: double)
Reduce Operator Tree:
Join Operator
condition map:
@@ -232,14 +251,14 @@ STAGE PLANS:
0
1
outputColumnNames: _col0, _col1
- Statistics: Num rows: 1 Data size: 18621 Basic stats: COMPLETE Column stats: NONE
+ Statistics: Num rows: 3 Data size: 10762 Basic stats: COMPLETE Column stats: NONE
Select Operator
- expressions: 3 (type: int), _col0 (type: decimal(10,2)), 3 (type: int), _col1 (type: decimal(10,2))
+ expressions: 3 (type: int), _col0 (type: double), 3 (type: int), _col1 (type: double)
outputColumnNames: _col0, _col1, _col2, _col3
- Statistics: Num rows: 1 Data size: 18621 Basic stats: COMPLETE Column stats: NONE
+ Statistics: Num rows: 3 Data size: 10762 Basic stats: COMPLETE Column stats: NONE
File Output Operator
compressed: false
- Statistics: Num rows: 1 Data size: 18621 Basic stats: COMPLETE Column stats: NONE
+ Statistics: Num rows: 3 Data size: 10762 Basic stats: COMPLETE Column stats: NONE
table:
input format: org.apache.hadoop.mapred.SequenceFileInputFormat
output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
@@ -268,15 +287,16 @@ POSTHOOK: query: SELECT * FROM (
POSTHOOK: type: QUERY
POSTHOOK: Input: default@cmv_basetable
#### A masked pattern was here ####
-3 9.80 3 978.76
+3 15.8 3 978.76
+3 9.8 3 978.76
3 978.76 3 978.76
PREHOOK: query: INSERT INTO cmv_basetable VALUES
- (3, 'charlie', 15.8, 1)
+ (cast(current_timestamp() AS timestamp), 3, 'charlie', 15.8, 1)
PREHOOK: type: QUERY
PREHOOK: Input: _dummy_database@_dummy_table
PREHOOK: Output: default@cmv_basetable
POSTHOOK: query: INSERT INTO cmv_basetable VALUES
- (3, 'charlie', 15.8, 1)
+ (cast(current_timestamp() AS timestamp), 3, 'charlie', 15.8, 1)
POSTHOOK: type: QUERY
POSTHOOK: Input: _dummy_database@_dummy_table
POSTHOOK: Output: default@cmv_basetable
@@ -284,6 +304,7 @@ POSTHOOK: Lineage: cmv_basetable.a SCRIPT []
POSTHOOK: Lineage: cmv_basetable.b SCRIPT []
POSTHOOK: Lineage: cmv_basetable.c SCRIPT []
POSTHOOK: Lineage: cmv_basetable.d SCRIPT []
+POSTHOOK: Lineage: cmv_basetable.t SCRIPT []
Warning: Shuffle Join JOIN[8][tables = [$hdt$_0, $hdt$_1]] in Stage 'Stage-1:MAPRED' is a cross product
PREHOOK: query: EXPLAIN
SELECT * FROM (
@@ -309,32 +330,32 @@ STAGE PLANS:
Map Operator Tree:
TableScan
alias: cmv_basetable
- Statistics: Num rows: 1 Data size: 17540 Basic stats: COMPLETE Column stats: NONE
+ Statistics: Num rows: 6 Data size: 20220 Basic stats: COMPLETE Column stats: NONE
Filter Operator
predicate: (a = 3) (type: boolean)
- Statistics: Num rows: 1 Data size: 17540 Basic stats: COMPLETE Column stats: NONE
+ Statistics: Num rows: 3 Data size: 10110 Basic stats: COMPLETE Column stats: NONE
Select Operator
- expressions: c (type: decimal(10,2))
+ expressions: c (type: double)
outputColumnNames: _col0
- Statistics: Num rows: 1 Data size: 17540 Basic stats: COMPLETE Column stats: NONE
+ Statistics: Num rows: 3 Data size: 10110 Basic stats: COMPLETE Column stats: NONE
Reduce Output Operator
sort order:
- Statistics: Num rows: 1 Data size: 17540 Basic stats: COMPLETE Column stats: NONE
- value expressions: _col0 (type: decimal(10,2))
+ Statistics: Num rows: 3 Data size: 10110 Basic stats: COMPLETE Column stats: NONE
+ value expressions: _col0 (type: double)
TableScan
alias: cmv_basetable
- Statistics: Num rows: 1 Data size: 17540 Basic stats: COMPLETE Column stats: NONE
+ Statistics: Num rows: 6 Data size: 20220 Basic stats: COMPLETE Column stats: NONE
Filter Operator
predicate: ((3 = a) and (d = 3)) (type: boolean)
- Statistics: Num rows: 1 Data size: 17540 Basic stats: COMPLETE Column stats: NONE
+ Statistics: Num rows: 1 Data size: 3370 Basic stats: COMPLETE Column stats: NONE
Select Operator
- expressions: c (type: decimal(10,2))
+ expressions: c (type: double)
outputColumnNames: _col0
- Statistics: Num rows: 1 Data size: 17540 Basic stats: COMPLETE Column stats: NONE
+ Statistics: Num rows: 1 Data size: 3370 Basic stats: COMPLETE Column stats: NONE
Reduce Output Operator
sort order:
- Statistics: Num rows: 1 Data size: 17540 Basic stats: COMPLETE Column stats: NONE
- value expressions: _col0 (type: decimal(10,2))
+ Statistics: Num rows: 1 Data size: 3370 Basic stats: COMPLETE Column stats: NONE
+ value expressions: _col0 (type: double)
Reduce Operator Tree:
Join Operator
condition map:
@@ -343,14 +364,14 @@ STAGE PLANS:
0
1
outputColumnNames: _col0, _col1
- Statistics: Num rows: 1 Data size: 35081 Basic stats: COMPLETE Column stats: NONE
+ Statistics: Num rows: 3 Data size: 20223 Basic stats: COMPLETE Column stats: NONE
Select Operator
- expressions: 3 (type: int), _col0 (type: decimal(10,2)), 3 (type: int), _col1 (type: decimal(10,2))
+ expressions: 3 (type: int), _col0 (type: double), 3 (type: int), _col1 (type: double)
outputColumnNames: _col0, _col1, _col2, _col3
- Statistics: Num rows: 1 Data size: 35081 Basic stats: COMPLETE Column stats: NONE
+ Statistics: Num rows: 3 Data size: 20223 Basic stats: COMPLETE Column stats: NONE
File Output Operator
compressed: false
- Statistics: Num rows: 1 Data size: 35081 Basic stats: COMPLETE Column stats: NONE
+ Statistics: Num rows: 3 Data size: 20223 Basic stats: COMPLETE Column stats: NONE
table:
input format: org.apache.hadoop.mapred.SequenceFileInputFormat
output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
@@ -379,10 +400,99 @@ POSTHOOK: query: SELECT * FROM (
POSTHOOK: type: QUERY
POSTHOOK: Input: default@cmv_basetable
#### A masked pattern was here ####
-3 15.80 3 978.76
-3 9.80 3 978.76
+3 15.8 3 978.76
+3 15.8 3 978.76
+3 9.8 3 978.76
3 978.76 3 978.76
-Warning: Shuffle Join JOIN[8][tables = [$hdt$_0, $hdt$_1]] in Stage 'Stage-1:MAPRED' is a cross product
+PREHOOK: query: EXPLAIN
+ALTER MATERIALIZED VIEW cmv_mat_view2 REBUILD
+PREHOOK: type: QUERY
+POSTHOOK: query: EXPLAIN
+ALTER MATERIALIZED VIEW cmv_mat_view2 REBUILD
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+ Stage-2 is a root stage
+ Stage-0 depends on stages: Stage-2
+ Stage-3 depends on stages: Stage-0, Stage-1
+ Stage-1 depends on stages: Stage-2
+
+STAGE PLANS:
+ Stage: Stage-2
+ Pre Insert operator:
+ Pre-Insert task
+
+ Stage: Stage-0
+ Alter Table Operator:
+ Alter Table
+ type: drop props
+ old name: default.cmv_mat_view2
+ properties:
+ COLUMN_STATS_ACCURATE
+
+ Stage: Stage-3
+ Insert operator:
+ Insert
+
+ Stage: Stage-1
+ Map Reduce
+ Map Operator Tree:
+ TableScan
+ alias: cmv_basetable
+ Statistics: Num rows: 6 Data size: 20220 Basic stats: COMPLETE Column stats: NONE
+ Filter Operator
+ predicate: (a = 3) (type: boolean)
+ Statistics: Num rows: 3 Data size: 10110 Basic stats: COMPLETE Column stats: NONE
+ Select Operator
+ expressions: CAST( t AS timestamp with local time zone) (type: timestamp with local time zone), 3 (type: int), b (type: varchar(256)), c (type: double)
+ outputColumnNames: _col0, _col1, _col2, _col3
+ Statistics: Num rows: 3 Data size: 10110 Basic stats: COMPLETE Column stats: NONE
+ Select Operator
+ expressions: _col0 (type: timestamp with local time zone), _col1 (type: int), _col2 (type: varchar(256)), _col3 (type: double), floor_hour(CAST( GenericUDFEpochMilli(_col0) AS TIMESTAMP)) (type: timestamp)
+ outputColumnNames: _col0, _col1, _col2, _col3, __time_granularity
+ Statistics: Num rows: 3 Data size: 10110 Basic stats: COMPLETE Column stats: NONE
+ Reduce Output Operator
+ key expressions: __time_granularity (type: timestamp)
+ sort order: +
+ Map-reduce partition columns: __time_granularity (type: timestamp)
+ Statistics: Num rows: 3 Data size: 10110 Basic stats: COMPLETE Column stats: NONE
+ value expressions: _col0 (type: timestamp with local time zone), _col1 (type: int), _col2 (type: varchar(256)), _col3 (type: double)
+ Reduce Operator Tree:
+ Select Operator
+ expressions: VALUE._col0 (type: timestamp with local time zone), VALUE._col1 (type: int), VALUE._col2 (type: varchar(256)), VALUE._col3 (type: double), KEY.__time_granularity (type: timestamp)
+ outputColumnNames: _col0, _col1, _col2, _col3, __time_granularity
+ Statistics: Num rows: 3 Data size: 10110 Basic stats: COMPLETE Column stats: NONE
+ File Output Operator
+ compressed: false
+ Dp Sort State: PARTITION_SORTED
+ Statistics: Num rows: 3 Data size: 10110 Basic stats: COMPLETE Column stats: NONE
+ table:
+ input format: org.apache.hadoop.hive.druid.io.DruidQueryBasedInputFormat
+ output format: org.apache.hadoop.hive.druid.io.DruidOutputFormat
+ serde: org.apache.hadoop.hive.druid.serde.DruidSerDe
+ name: default.cmv_mat_view2
+
+PREHOOK: query: ALTER MATERIALIZED VIEW cmv_mat_view2 REBUILD
+PREHOOK: type: QUERY
+PREHOOK: Input: default@cmv_basetable
+PREHOOK: Output: default@cmv_mat_view2
+POSTHOOK: query: ALTER MATERIALIZED VIEW cmv_mat_view2 REBUILD
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@cmv_basetable
+POSTHOOK: Output: default@cmv_mat_view2
+PREHOOK: query: SHOW TBLPROPERTIES cmv_mat_view2
+PREHOOK: type: SHOW_TBLPROPERTIES
+POSTHOOK: query: SHOW TBLPROPERTIES cmv_mat_view2
+POSTHOOK: type: SHOW_TBLPROPERTIES
+druid.datasource default.cmv_mat_view2
+druid.segment.granularity HOUR
+#### A masked pattern was here ####
+numFiles 0
+numRows 3
+rawDataSize 0
+storage_handler org.apache.hadoop.hive.druid.DruidStorageHandler
+totalSize 0
+#### A masked pattern was here ####
+Warning: Shuffle Join JOIN[6][tables = [cmv_mat_view2, $hdt$_0]] in Stage 'Stage-1:MAPRED' is a cross product
PREHOOK: query: EXPLAIN
SELECT * FROM (
(SELECT a, c FROM cmv_basetable WHERE a = 3) table1
@@ -407,32 +517,28 @@ STAGE PLANS:
Map Operator Tree:
TableScan
alias: cmv_basetable
- Statistics: Num rows: 1 Data size: 17540 Basic stats: COMPLETE Column stats: NONE
- Filter Operator
- predicate: (a = 3) (type: boolean)
- Statistics: Num rows: 1 Data size: 17540 Basic stats: COMPLETE Column stats: NONE
- Select Operator
- expressions: c (type: decimal(10,2))
- outputColumnNames: _col0
- Statistics: Num rows: 1 Data size: 17540 Basic stats: COMPLETE Column stats: NONE
- Reduce Output Operator
- sort order:
- Statistics: Num rows: 1 Data size: 17540 Basic stats: COMPLETE Column stats: NONE
- value expressions: _col0 (type: decimal(10,2))
- TableScan
- alias: cmv_basetable
- Statistics: Num rows: 1 Data size: 17540 Basic stats: COMPLETE Column stats: NONE
+ Statistics: Num rows: 6 Data size: 20220 Basic stats: COMPLETE Column stats: NONE
Filter Operator
predicate: ((3 = a) and (d = 3)) (type: boolean)
- Statistics: Num rows: 1 Data size: 17540 Basic stats: COMPLETE Column stats: NONE
+ Statistics: Num rows: 1 Data size: 3370 Basic stats: COMPLETE Column stats: NONE
Select Operator
- expressions: c (type: decimal(10,2))
+ expressions: c (type: double)
outputColumnNames: _col0
- Statistics: Num rows: 1 Data size: 17540 Basic stats: COMPLETE Column stats: NONE
+ Statistics: Num rows: 1 Data size: 3370 Basic stats: COMPLETE Column stats: NONE
Reduce Output Operator
sort order:
- Statistics: Num rows: 1 Data size: 17540 Basic stats: COMPLETE Column stats: NONE
- value expressions: _col0 (type: decimal(10,2))
+ Statistics: Num rows: 1 Data size: 3370 Basic stats: COMPLETE Column stats: NONE
+ value expressions: _col0 (type: double)
+ TableScan
+ alias: cmv_mat_view2
+ properties:
+ druid.query.json {"queryType":"select","dataSource":"default.cmv_mat_view2","descending":false,"intervals":["1900-01-01T00:00:00.000Z/3000-01-01T00:00:00.000Z"],"dimensions":[],"metrics":["c"],"granularity":"all","pagingSpec":{"threshold":16384,"fromNext":true},"context":{"druid.query.fetch":false}}
+ druid.query.type select
+ Statistics: Num rows: 3 Data size: 0 Basic stats: PARTIAL Column stats: NONE
+ Reduce Output Operator
+ sort order:
+ Statistics: Num rows: 3 Data size: 0 Basic stats: PARTIAL Column stats: NONE
+ value expressions: c (type: double)
Reduce Operator Tree:
Join Operator
condition map:
@@ -440,15 +546,15 @@ STAGE PLANS:
keys:
0
1
- outputColumnNames: _col0, _col1
- Statistics: Num rows: 1 Data size: 35081 Basic stats: COMPLETE Column stats: NONE
+ outputColumnNames: _col1, _col5
+ Statistics: Num rows: 3 Data size: 10113 Basic stats: PARTIAL Column stats: NONE
Select Operator
- expressions: 3 (type: int), _col0 (type: decimal(10,2)), 3 (type: int), _col1 (type: decimal(10,2))
+ expressions: 3 (type: int), _col1 (type: double), 3 (type: int), _col5 (type: double)
outputColumnNames: _col0, _col1, _col2, _col3
- Statistics: Num rows: 1 Data size: 35081 Basic stats: COMPLETE Column stats: NONE
+ Statistics: Num rows: 3 Data size: 10113 Basic stats: PARTIAL Column stats: NONE
File Output Operator
compressed: false
- Statistics: Num rows: 1 Data size: 35081 Basic stats: COMPLETE Column stats: NONE
+ Statistics: Num rows: 3 Data size: 10113 Basic stats: PARTIAL Column stats: NONE
table:
input format: org.apache.hadoop.mapred.SequenceFileInputFormat
output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
@@ -460,7 +566,7 @@ STAGE PLANS:
Processor Tree:
ListSink
-Warning: Shuffle Join JOIN[8][tables = [$hdt$_0, $hdt$_1]] in Stage 'Stage-1:MAPRED' is a cross product
+Warning: Shuffle Join JOIN[6][tables = [cmv_mat_view2, $hdt$_0]] in Stage 'Stage-1:MAPRED' is a cross product
PREHOOK: query: SELECT * FROM (
(SELECT a, c FROM cmv_basetable WHERE a = 3) table1
JOIN
@@ -468,6 +574,7 @@ PREHOOK: query: SELECT * FROM (
ON table1.a = table2.a)
PREHOOK: type: QUERY
PREHOOK: Input: default@cmv_basetable
+PREHOOK: Input: default@cmv_mat_view2
#### A masked pattern was here ####
POSTHOOK: query: SELECT * FROM (
(SELECT a, c FROM cmv_basetable WHERE a = 3) table1
@@ -476,10 +583,11 @@ POSTHOOK: query: SELECT * FROM (
ON table1.a = table2.a)
POSTHOOK: type: QUERY
POSTHOOK: Input: default@cmv_basetable
+POSTHOOK: Input: default@cmv_mat_view2
#### A masked pattern was here ####
-3 15.80 3 978.76
-3 9.80 3 978.76
-3 978.76 3 978.76
+3 15.800000190734863 3 978.76
+3 25.600000381469727 3 978.76
+3 978.760009765625 3 978.76
PREHOOK: query: DROP MATERIALIZED VIEW cmv_mat_view
PREHOOK: type: DROP_MATERIALIZED_VIEW
PREHOOK: Input: default@cmv_mat_view
diff --git a/ql/src/test/results/clientpositive/materialized_view_create_rewrite_3.q.out b/ql/src/test/results/clientpositive/materialized_view_create_rewrite_3.q.out
index 0d8d238e8b..49e6ac8552 100644
--- a/ql/src/test/results/clientpositive/materialized_view_create_rewrite_3.q.out
+++ b/ql/src/test/results/clientpositive/materialized_view_create_rewrite_3.q.out
@@ -92,6 +92,7 @@ STAGE DEPENDENCIES:
Stage-0 depends on stages: Stage-2
Stage-5 depends on stages: Stage-0
Stage-3 depends on stages: Stage-5
+ Stage-6 depends on stages: Stage-3
STAGE PLANS:
Stage: Stage-1
@@ -198,6 +199,9 @@ STAGE PLANS:
Stats Work
Basic Stats Work:
+ Stage: Stage-6
+ Materialized View Registry Update
+
PREHOOK: query: CREATE MATERIALIZED VIEW cmv_mat_view ENABLE REWRITE AS
SELECT cmv_basetable.a, cmv_basetable_2.c
FROM cmv_basetable JOIN cmv_basetable_2 ON (cmv_basetable.a = cmv_basetable_2.a)
@@ -433,18 +437,24 @@ POSTHOOK: Input: default@cmv_basetable_2
3
PREHOOK: query: EXPLAIN
ALTER MATERIALIZED VIEW cmv_mat_view REBUILD
-PREHOOK: type: CREATE_MATERIALIZED_VIEW
+PREHOOK: type: QUERY
POSTHOOK: query: EXPLAIN
ALTER MATERIALIZED VIEW cmv_mat_view REBUILD
-POSTHOOK: type: CREATE_MATERIALIZED_VIEW
+POSTHOOK: type: QUERY
STAGE DEPENDENCIES:
- Stage-1 is a root stage
+ Stage-6 is a root stage
+ Stage-1 depends on stages: Stage-6
Stage-2 depends on stages: Stage-1
Stage-0 depends on stages: Stage-2
- Stage-5 depends on stages: Stage-0
- Stage-3 depends on stages: Stage-5
+ Stage-7 depends on stages: Stage-0, Stage-4
+ Stage-3 depends on stages: Stage-7
+ Stage-4 depends on stages: Stage-2
STAGE PLANS:
+ Stage: Stage-6
+ Pre Insert operator:
+ Pre-Insert task
+
Stage: Stage-1
Map Reduce
Map Operator Tree:
@@ -523,36 +533,78 @@ STAGE PLANS:
output format: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat
serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde
name: default.cmv_mat_view
+ Select Operator
+ expressions: _col0 (type: int), _col1 (type: decimal(10,2))
+ outputColumnNames: a, c
+ Statistics: Num rows: 2 Data size: 530 Basic stats: COMPLETE Column stats: NONE
+ Group By Operator
+ aggregations: compute_stats(a, 'hll'), compute_stats(c, 'hll')
+ mode: hash
+ outputColumnNames: _col0, _col1
+ Statistics: Num rows: 1 Data size: 1056 Basic stats: COMPLETE Column stats: NONE
+ File Output Operator
+ compressed: false
+ table:
+ input format: org.apache.hadoop.mapred.SequenceFileInputFormat
+ output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
+ serde: org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe
Stage: Stage-0
Move Operator
- files:
- hdfs directory: true
-#### A masked pattern was here ####
-
- Stage: Stage-5
- Create View Operator:
- Create View
- columns: a int, c decimal(10,2)
- name: default.cmv_mat_view
+ tables:
replace: true
+ table:
+ input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
+ output format: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat
+ serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde
+ name: default.cmv_mat_view
+
+ Stage: Stage-7
+ Insert operator:
+ Insert
Stage: Stage-3
Stats Work
Basic Stats Work:
+ Column Stats Desc:
+ Columns: a, c
+ Column Types: int, decimal(10,2)
+ Table: default.cmv_mat_view
+
+ Stage: Stage-4
+ Map Reduce
+ Map Operator Tree:
+ TableScan
+ Reduce Output Operator
+ sort order:
+ Statistics: Num rows: 1 Data size: 1056 Basic stats: COMPLETE Column stats: NONE
+ value expressions: _col0 (type: struct), _col1 (type: struct)
+ Reduce Operator Tree:
+ Group By Operator
+ aggregations: compute_stats(VALUE._col0), compute_stats(VALUE._col1)
+ mode: mergepartial
+ outputColumnNames: _col0, _col1
+ Statistics: Num rows: 1 Data size: 1088 Basic stats: COMPLETE Column stats: NONE
+ File Output Operator
+ compressed: false
+ Statistics: Num rows: 1 Data size: 1088 Basic stats: COMPLETE Column stats: NONE
+ table:
+ input format: org.apache.hadoop.mapred.SequenceFileInputFormat
+ output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
+ serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
PREHOOK: query: ALTER MATERIALIZED VIEW cmv_mat_view REBUILD
-PREHOOK: type: CREATE_MATERIALIZED_VIEW
+PREHOOK: type: QUERY
PREHOOK: Input: default@cmv_basetable
PREHOOK: Input: default@cmv_basetable_2
-PREHOOK: Output: database:default
PREHOOK: Output: default@cmv_mat_view
POSTHOOK: query: ALTER MATERIALIZED VIEW cmv_mat_view REBUILD
-POSTHOOK: type: CREATE_MATERIALIZED_VIEW
+POSTHOOK: type: QUERY
POSTHOOK: Input: default@cmv_basetable
POSTHOOK: Input: default@cmv_basetable_2
-POSTHOOK: Output: database:default
POSTHOOK: Output: default@cmv_mat_view
+POSTHOOK: Lineage: cmv_mat_view.a SIMPLE [(cmv_basetable)cmv_basetable.FieldSchema(name:a, type:int, comment:null), ]
+POSTHOOK: Lineage: cmv_mat_view.c SIMPLE [(cmv_basetable_2)cmv_basetable_2.FieldSchema(name:c, type:decimal(10,2), comment:null), ]
PREHOOK: query: EXPLAIN
SELECT cmv_basetable.a
FROM cmv_basetable join cmv_basetable_2 ON (cmv_basetable.a = cmv_basetable_2.a)
@@ -750,17 +802,17 @@ POSTHOOK: Input: default@cmv_basetable_2
#### A masked pattern was here ####
1
PREHOOK: query: ALTER MATERIALIZED VIEW cmv_mat_view REBUILD
-PREHOOK: type: CREATE_MATERIALIZED_VIEW
+PREHOOK: type: QUERY
PREHOOK: Input: default@cmv_basetable
PREHOOK: Input: default@cmv_basetable_2
-PREHOOK: Output: database:default
PREHOOK: Output: default@cmv_mat_view
POSTHOOK: query: ALTER MATERIALIZED VIEW cmv_mat_view REBUILD
-POSTHOOK: type: CREATE_MATERIALIZED_VIEW
+POSTHOOK: type: QUERY
POSTHOOK: Input: default@cmv_basetable
POSTHOOK: Input: default@cmv_basetable_2
-POSTHOOK: Output: database:default
POSTHOOK: Output: default@cmv_mat_view
+POSTHOOK: Lineage: cmv_mat_view.a SIMPLE [(cmv_basetable)cmv_basetable.FieldSchema(name:a, type:int, comment:null), ]
+POSTHOOK: Lineage: cmv_mat_view.c SIMPLE [(cmv_basetable_2)cmv_basetable_2.FieldSchema(name:c, type:decimal(10,2), comment:null), ]
PREHOOK: query: EXPLAIN
SELECT cmv_basetable.a
FROM cmv_basetable join cmv_basetable_2 ON (cmv_basetable.a = cmv_basetable_2.a)
diff --git a/ql/src/test/results/clientpositive/materialized_view_create_rewrite_4.q.out b/ql/src/test/results/clientpositive/materialized_view_create_rewrite_4.q.out
index 8ab1517186..2595ce0455 100644
--- a/ql/src/test/results/clientpositive/materialized_view_create_rewrite_4.q.out
+++ b/ql/src/test/results/clientpositive/materialized_view_create_rewrite_4.q.out
@@ -92,6 +92,7 @@ STAGE DEPENDENCIES:
Stage-0 depends on stages: Stage-2
Stage-5 depends on stages: Stage-0
Stage-3 depends on stages: Stage-5
+ Stage-6 depends on stages: Stage-3
STAGE PLANS:
Stage: Stage-1
@@ -197,6 +198,9 @@ STAGE PLANS:
Stats Work
Basic Stats Work:
+ Stage: Stage-6
+ Materialized View Registry Update
+
PREHOOK: query: CREATE MATERIALIZED VIEW cmv_mat_view AS
SELECT cmv_basetable.a, cmv_basetable_2.c
FROM cmv_basetable JOIN cmv_basetable_2 ON (cmv_basetable.a = cmv_basetable_2.a)
@@ -217,6 +221,49 @@ POSTHOOK: Input: default@cmv_basetable
POSTHOOK: Input: default@cmv_basetable_2
POSTHOOK: Output: database:default
POSTHOOK: Output: default@cmv_mat_view
+PREHOOK: query: DESCRIBE FORMATTED cmv_mat_view
+PREHOOK: type: DESCTABLE
+PREHOOK: Input: default@cmv_mat_view
+POSTHOOK: query: DESCRIBE FORMATTED cmv_mat_view
+POSTHOOK: type: DESCTABLE
+POSTHOOK: Input: default@cmv_mat_view
+# col_name data_type comment
+a int
+c decimal(10,2)
+
+# Detailed Table Information
+Database: default
+#### A masked pattern was here ####
+Retention: 0
+#### A masked pattern was here ####
+Table Type: MATERIALIZED_VIEW
+Table Parameters:
+ COLUMN_STATS_ACCURATE {\"BASIC_STATS\":\"true\"}
+ numFiles 1
+ numRows 2
+ rawDataSize 232
+ totalSize 325
+#### A masked pattern was here ####
+
+# Storage Information
+SerDe Library: org.apache.hadoop.hive.ql.io.orc.OrcSerde
+InputFormat: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
+OutputFormat: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat
+Compressed: No
+Num Buckets: -1
+Bucket Columns: []
+Sort Columns: []
+
+# View Information
+View Original Text: SELECT cmv_basetable.a, cmv_basetable_2.c
+ FROM cmv_basetable JOIN cmv_basetable_2 ON (cmv_basetable.a = cmv_basetable_2.a)
+ WHERE cmv_basetable_2.c > 10.0
+ GROUP BY cmv_basetable.a, cmv_basetable_2.c
+View Expanded Text: SELECT `cmv_basetable`.`a`, `cmv_basetable_2`.`c`
+ FROM `default`.`cmv_basetable` JOIN `default`.`cmv_basetable_2` ON (`cmv_basetable`.`a` = `cmv_basetable_2`.`a`)
+ WHERE `cmv_basetable_2`.`c` > 10.0
+ GROUP BY `cmv_basetable`.`a`, `cmv_basetable_2`.`c`
+View Rewrite Enabled: No
PREHOOK: query: EXPLAIN
SELECT cmv_basetable.a
FROM cmv_basetable join cmv_basetable_2 ON (cmv_basetable.a = cmv_basetable_2.a)
@@ -389,6 +436,49 @@ POSTHOOK: query: ALTER MATERIALIZED VIEW cmv_mat_view ENABLE REWRITE
POSTHOOK: type: ALTER_MATERIALIZED_VIEW_REWRITE
POSTHOOK: Input: default@cmv_mat_view
POSTHOOK: Output: default@cmv_mat_view
+PREHOOK: query: DESCRIBE FORMATTED cmv_mat_view
+PREHOOK: type: DESCTABLE
+PREHOOK: Input: default@cmv_mat_view
+POSTHOOK: query: DESCRIBE FORMATTED cmv_mat_view
+POSTHOOK: type: DESCTABLE
+POSTHOOK: Input: default@cmv_mat_view
+# col_name data_type comment
+a int
+c decimal(10,2)
+
+# Detailed Table Information
+Database: default
+#### A masked pattern was here ####
+Retention: 0
+#### A masked pattern was here ####
+Table Type: MATERIALIZED_VIEW
+Table Parameters:
+ COLUMN_STATS_ACCURATE {\"BASIC_STATS\":\"true\"}
+ numFiles 1
+ numRows 2
+ rawDataSize 232
+ totalSize 325
+#### A masked pattern was here ####
+
+# Storage Information
+SerDe Library: org.apache.hadoop.hive.ql.io.orc.OrcSerde
+InputFormat: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
+OutputFormat: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat
+Compressed: No
+Num Buckets: -1
+Bucket Columns: []
+Sort Columns: []
+
+# View Information
+View Original Text: SELECT cmv_basetable.a, cmv_basetable_2.c
+ FROM cmv_basetable JOIN cmv_basetable_2 ON (cmv_basetable.a = cmv_basetable_2.a)
+ WHERE cmv_basetable_2.c > 10.0
+ GROUP BY cmv_basetable.a, cmv_basetable_2.c
+View Expanded Text: SELECT `cmv_basetable`.`a`, `cmv_basetable_2`.`c`
+ FROM `default`.`cmv_basetable` JOIN `default`.`cmv_basetable_2` ON (`cmv_basetable`.`a` = `cmv_basetable_2`.`a`)
+ WHERE `cmv_basetable_2`.`c` > 10.0
+ GROUP BY `cmv_basetable`.`a`, `cmv_basetable_2`.`c`
+View Rewrite Enabled: Yes
PREHOOK: query: EXPLAIN
SELECT cmv_basetable.a
FROM cmv_basetable join cmv_basetable_2 ON (cmv_basetable.a = cmv_basetable_2.a)
@@ -516,18 +606,24 @@ POSTHOOK: Input: default@cmv_basetable_2
3
PREHOOK: query: EXPLAIN
ALTER MATERIALIZED VIEW cmv_mat_view REBUILD
-PREHOOK: type: CREATE_MATERIALIZED_VIEW
+PREHOOK: type: QUERY
POSTHOOK: query: EXPLAIN
ALTER MATERIALIZED VIEW cmv_mat_view REBUILD
-POSTHOOK: type: CREATE_MATERIALIZED_VIEW
+POSTHOOK: type: QUERY
STAGE DEPENDENCIES:
- Stage-1 is a root stage
+ Stage-6 is a root stage
+ Stage-1 depends on stages: Stage-6
Stage-2 depends on stages: Stage-1
Stage-0 depends on stages: Stage-2
- Stage-5 depends on stages: Stage-0
- Stage-3 depends on stages: Stage-5
+ Stage-7 depends on stages: Stage-0, Stage-4
+ Stage-3 depends on stages: Stage-7
+ Stage-4 depends on stages: Stage-2
STAGE PLANS:
+ Stage: Stage-6
+ Pre Insert operator:
+ Pre-Insert task
+
Stage: Stage-1
Map Reduce
Map Operator Tree:
@@ -606,36 +702,121 @@ STAGE PLANS:
output format: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat
serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde
name: default.cmv_mat_view
+ Select Operator
+ expressions: _col0 (type: int), _col1 (type: decimal(10,2))
+ outputColumnNames: a, c
+ Statistics: Num rows: 2 Data size: 530 Basic stats: COMPLETE Column stats: NONE
+ Group By Operator
+ aggregations: compute_stats(a, 'hll'), compute_stats(c, 'hll')
+ mode: hash
+ outputColumnNames: _col0, _col1
+ Statistics: Num rows: 1 Data size: 1056 Basic stats: COMPLETE Column stats: NONE
+ File Output Operator
+ compressed: false
+ table:
+ input format: org.apache.hadoop.mapred.SequenceFileInputFormat
+ output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
+ serde: org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe
Stage: Stage-0
Move Operator
- files:
- hdfs directory: true
-#### A masked pattern was here ####
-
- Stage: Stage-5
- Create View Operator:
- Create View
- columns: a int, c decimal(10,2)
- name: default.cmv_mat_view
+ tables:
replace: true
+ table:
+ input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
+ output format: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat
+ serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde
+ name: default.cmv_mat_view
+
+ Stage: Stage-7
+ Insert operator:
+ Insert
Stage: Stage-3
Stats Work
Basic Stats Work:
+ Column Stats Desc:
+ Columns: a, c
+ Column Types: int, decimal(10,2)
+ Table: default.cmv_mat_view
+
+ Stage: Stage-4
+ Map Reduce
+ Map Operator Tree:
+ TableScan
+ Reduce Output Operator
+ sort order:
+ Statistics: Num rows: 1 Data size: 1056 Basic stats: COMPLETE Column stats: NONE
+ value expressions: _col0 (type: struct), _col1 (type: struct)
+ Reduce Operator Tree:
+ Group By Operator
+ aggregations: compute_stats(VALUE._col0), compute_stats(VALUE._col1)
+ mode: mergepartial
+ outputColumnNames: _col0, _col1
+ Statistics: Num rows: 1 Data size: 1088 Basic stats: COMPLETE Column stats: NONE
+ File Output Operator
+ compressed: false
+ Statistics: Num rows: 1 Data size: 1088 Basic stats: COMPLETE Column stats: NONE
+ table:
+ input format: org.apache.hadoop.mapred.SequenceFileInputFormat
+ output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
+ serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
PREHOOK: query: ALTER MATERIALIZED VIEW cmv_mat_view REBUILD
-PREHOOK: type: CREATE_MATERIALIZED_VIEW
+PREHOOK: type: QUERY
PREHOOK: Input: default@cmv_basetable
PREHOOK: Input: default@cmv_basetable_2
-PREHOOK: Output: database:default
PREHOOK: Output: default@cmv_mat_view
POSTHOOK: query: ALTER MATERIALIZED VIEW cmv_mat_view REBUILD
-POSTHOOK: type: CREATE_MATERIALIZED_VIEW
+POSTHOOK: type: QUERY
POSTHOOK: Input: default@cmv_basetable
POSTHOOK: Input: default@cmv_basetable_2
-POSTHOOK: Output: database:default
POSTHOOK: Output: default@cmv_mat_view
+POSTHOOK: Lineage: cmv_mat_view.a SIMPLE [(cmv_basetable)cmv_basetable.FieldSchema(name:a, type:int, comment:null), ]
+POSTHOOK: Lineage: cmv_mat_view.c SIMPLE [(cmv_basetable_2)cmv_basetable_2.FieldSchema(name:c, type:decimal(10,2), comment:null), ]
+PREHOOK: query: DESCRIBE FORMATTED cmv_mat_view
+PREHOOK: type: DESCTABLE
+PREHOOK: Input: default@cmv_mat_view
+POSTHOOK: query: DESCRIBE FORMATTED cmv_mat_view
+POSTHOOK: type: DESCTABLE
+POSTHOOK: Input: default@cmv_mat_view
+# col_name data_type comment
+a int
+c decimal(10,2)
+
+# Detailed Table Information
+Database: default
+#### A masked pattern was here ####
+Retention: 0
+#### A masked pattern was here ####
+Table Type: MATERIALIZED_VIEW
+Table Parameters:
+ COLUMN_STATS_ACCURATE {\"BASIC_STATS\":\"true\",\"COLUMN_STATS\":{\"a\":\"true\",\"c\":\"true\"}}
+ numFiles 1
+ numRows 3
+ rawDataSize 348
+ totalSize 332
+#### A masked pattern was here ####
+
+# Storage Information
+SerDe Library: org.apache.hadoop.hive.ql.io.orc.OrcSerde
+InputFormat: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
+OutputFormat: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat
+Compressed: No
+Num Buckets: -1
+Bucket Columns: []
+Sort Columns: []
+
+# View Information
+View Original Text: SELECT cmv_basetable.a, cmv_basetable_2.c
+ FROM cmv_basetable JOIN cmv_basetable_2 ON (cmv_basetable.a = cmv_basetable_2.a)
+ WHERE cmv_basetable_2.c > 10.0
+ GROUP BY cmv_basetable.a, cmv_basetable_2.c
+View Expanded Text: SELECT `cmv_basetable`.`a`, `cmv_basetable_2`.`c`
+ FROM `default`.`cmv_basetable` JOIN `default`.`cmv_basetable_2` ON (`cmv_basetable`.`a` = `cmv_basetable_2`.`a`)
+ WHERE `cmv_basetable_2`.`c` > 10.0
+ GROUP BY `cmv_basetable`.`a`, `cmv_basetable_2`.`c`
+View Rewrite Enabled: Yes
PREHOOK: query: EXPLAIN
SELECT cmv_basetable.a
FROM cmv_basetable join cmv_basetable_2 ON (cmv_basetable.a = cmv_basetable_2.a)
diff --git a/standalone-metastore/src/gen/thrift/gen-cpp/ThriftHiveMetastore.cpp b/standalone-metastore/src/gen/thrift/gen-cpp/ThriftHiveMetastore.cpp
index af0fd6b0e0..6bd6aa2289 100644
--- a/standalone-metastore/src/gen/thrift/gen-cpp/ThriftHiveMetastore.cpp
+++ b/standalone-metastore/src/gen/thrift/gen-cpp/ThriftHiveMetastore.cpp
@@ -9293,6 +9293,265 @@ uint32_t ThriftHiveMetastore_get_materialization_invalidation_info_presult::read
}
+ThriftHiveMetastore_update_creation_metadata_args::~ThriftHiveMetastore_update_creation_metadata_args() throw() {
+}
+
+
+uint32_t ThriftHiveMetastore_update_creation_metadata_args::read(::apache::thrift::protocol::TProtocol* iprot) {
+
+ apache::thrift::protocol::TInputRecursionTracker tracker(*iprot);
+ uint32_t xfer = 0;
+ std::string fname;
+ ::apache::thrift::protocol::TType ftype;
+ int16_t fid;
+
+ xfer += iprot->readStructBegin(fname);
+
+ using ::apache::thrift::protocol::TProtocolException;
+
+
+ while (true)
+ {
+ xfer += iprot->readFieldBegin(fname, ftype, fid);
+ if (ftype == ::apache::thrift::protocol::T_STOP) {
+ break;
+ }
+ switch (fid)
+ {
+ case 1:
+ if (ftype == ::apache::thrift::protocol::T_STRING) {
+ xfer += iprot->readString(this->dbname);
+ this->__isset.dbname = true;
+ } else {
+ xfer += iprot->skip(ftype);
+ }
+ break;
+ case 2:
+ if (ftype == ::apache::thrift::protocol::T_STRING) {
+ xfer += iprot->readString(this->tbl_name);
+ this->__isset.tbl_name = true;
+ } else {
+ xfer += iprot->skip(ftype);
+ }
+ break;
+ case 3:
+ if (ftype == ::apache::thrift::protocol::T_STRUCT) {
+ xfer += this->creation_metadata.read(iprot);
+ this->__isset.creation_metadata = true;
+ } else {
+ xfer += iprot->skip(ftype);
+ }
+ break;
+ default:
+ xfer += iprot->skip(ftype);
+ break;
+ }
+ xfer += iprot->readFieldEnd();
+ }
+
+ xfer += iprot->readStructEnd();
+
+ return xfer;
+}
+
+uint32_t ThriftHiveMetastore_update_creation_metadata_args::write(::apache::thrift::protocol::TProtocol* oprot) const {
+ uint32_t xfer = 0;
+ apache::thrift::protocol::TOutputRecursionTracker tracker(*oprot);
+ xfer += oprot->writeStructBegin("ThriftHiveMetastore_update_creation_metadata_args");
+
+ xfer += oprot->writeFieldBegin("dbname", ::apache::thrift::protocol::T_STRING, 1);
+ xfer += oprot->writeString(this->dbname);
+ xfer += oprot->writeFieldEnd();
+
+ xfer += oprot->writeFieldBegin("tbl_name", ::apache::thrift::protocol::T_STRING, 2);
+ xfer += oprot->writeString(this->tbl_name);
+ xfer += oprot->writeFieldEnd();
+
+ xfer += oprot->writeFieldBegin("creation_metadata", ::apache::thrift::protocol::T_STRUCT, 3);
+ xfer += this->creation_metadata.write(oprot);
+ xfer += oprot->writeFieldEnd();
+
+ xfer += oprot->writeFieldStop();
+ xfer += oprot->writeStructEnd();
+ return xfer;
+}
+
+
+ThriftHiveMetastore_update_creation_metadata_pargs::~ThriftHiveMetastore_update_creation_metadata_pargs() throw() {
+}
+
+
+uint32_t ThriftHiveMetastore_update_creation_metadata_pargs::write(::apache::thrift::protocol::TProtocol* oprot) const {
+ uint32_t xfer = 0;
+ apache::thrift::protocol::TOutputRecursionTracker tracker(*oprot);
+ xfer += oprot->writeStructBegin("ThriftHiveMetastore_update_creation_metadata_pargs");
+
+ xfer += oprot->writeFieldBegin("dbname", ::apache::thrift::protocol::T_STRING, 1);
+ xfer += oprot->writeString((*(this->dbname)));
+ xfer += oprot->writeFieldEnd();
+
+ xfer += oprot->writeFieldBegin("tbl_name", ::apache::thrift::protocol::T_STRING, 2);
+ xfer += oprot->writeString((*(this->tbl_name)));
+ xfer += oprot->writeFieldEnd();
+
+ xfer += oprot->writeFieldBegin("creation_metadata", ::apache::thrift::protocol::T_STRUCT, 3);
+ xfer += (*(this->creation_metadata)).write(oprot);
+ xfer += oprot->writeFieldEnd();
+
+ xfer += oprot->writeFieldStop();
+ xfer += oprot->writeStructEnd();
+ return xfer;
+}
+
+
+ThriftHiveMetastore_update_creation_metadata_result::~ThriftHiveMetastore_update_creation_metadata_result() throw() {
+}
+
+
+uint32_t ThriftHiveMetastore_update_creation_metadata_result::read(::apache::thrift::protocol::TProtocol* iprot) {
+
+ apache::thrift::protocol::TInputRecursionTracker tracker(*iprot);
+ uint32_t xfer = 0;
+ std::string fname;
+ ::apache::thrift::protocol::TType ftype;
+ int16_t fid;
+
+ xfer += iprot->readStructBegin(fname);
+
+ using ::apache::thrift::protocol::TProtocolException;
+
+
+ while (true)
+ {
+ xfer += iprot->readFieldBegin(fname, ftype, fid);
+ if (ftype == ::apache::thrift::protocol::T_STOP) {
+ break;
+ }
+ switch (fid)
+ {
+ case 1:
+ if (ftype == ::apache::thrift::protocol::T_STRUCT) {
+ xfer += this->o1.read(iprot);
+ this->__isset.o1 = true;
+ } else {
+ xfer += iprot->skip(ftype);
+ }
+ break;
+ case 2:
+ if (ftype == ::apache::thrift::protocol::T_STRUCT) {
+ xfer += this->o2.read(iprot);
+ this->__isset.o2 = true;
+ } else {
+ xfer += iprot->skip(ftype);
+ }
+ break;
+ case 3:
+ if (ftype == ::apache::thrift::protocol::T_STRUCT) {
+ xfer += this->o3.read(iprot);
+ this->__isset.o3 = true;
+ } else {
+ xfer += iprot->skip(ftype);
+ }
+ break;
+ default:
+ xfer += iprot->skip(ftype);
+ break;
+ }
+ xfer += iprot->readFieldEnd();
+ }
+
+ xfer += iprot->readStructEnd();
+
+ return xfer;
+}
+
+uint32_t ThriftHiveMetastore_update_creation_metadata_result::write(::apache::thrift::protocol::TProtocol* oprot) const {
+
+ uint32_t xfer = 0;
+
+ xfer += oprot->writeStructBegin("ThriftHiveMetastore_update_creation_metadata_result");
+
+ if (this->__isset.o1) {
+ xfer += oprot->writeFieldBegin("o1", ::apache::thrift::protocol::T_STRUCT, 1);
+ xfer += this->o1.write(oprot);
+ xfer += oprot->writeFieldEnd();
+ } else if (this->__isset.o2) {
+ xfer += oprot->writeFieldBegin("o2", ::apache::thrift::protocol::T_STRUCT, 2);
+ xfer += this->o2.write(oprot);
+ xfer += oprot->writeFieldEnd();
+ } else if (this->__isset.o3) {
+ xfer += oprot->writeFieldBegin("o3", ::apache::thrift::protocol::T_STRUCT, 3);
+ xfer += this->o3.write(oprot);
+ xfer += oprot->writeFieldEnd();
+ }
+ xfer += oprot->writeFieldStop();
+ xfer += oprot->writeStructEnd();
+ return xfer;
+}
+
+
+ThriftHiveMetastore_update_creation_metadata_presult::~ThriftHiveMetastore_update_creation_metadata_presult() throw() {
+}
+
+
+uint32_t ThriftHiveMetastore_update_creation_metadata_presult::read(::apache::thrift::protocol::TProtocol* iprot) {
+
+ apache::thrift::protocol::TInputRecursionTracker tracker(*iprot);
+ uint32_t xfer = 0;
+ std::string fname;
+ ::apache::thrift::protocol::TType ftype;
+ int16_t fid;
+
+ xfer += iprot->readStructBegin(fname);
+
+ using ::apache::thrift::protocol::TProtocolException;
+
+
+ while (true)
+ {
+ xfer += iprot->readFieldBegin(fname, ftype, fid);
+ if (ftype == ::apache::thrift::protocol::T_STOP) {
+ break;
+ }
+ switch (fid)
+ {
+ case 1:
+ if (ftype == ::apache::thrift::protocol::T_STRUCT) {
+ xfer += this->o1.read(iprot);
+ this->__isset.o1 = true;
+ } else {
+ xfer += iprot->skip(ftype);
+ }
+ break;
+ case 2:
+ if (ftype == ::apache::thrift::protocol::T_STRUCT) {
+ xfer += this->o2.read(iprot);
+ this->__isset.o2 = true;
+ } else {
+ xfer += iprot->skip(ftype);
+ }
+ break;
+ case 3:
+ if (ftype == ::apache::thrift::protocol::T_STRUCT) {
+ xfer += this->o3.read(iprot);
+ this->__isset.o3 = true;
+ } else {
+ xfer += iprot->skip(ftype);
+ }
+ break;
+ default:
+ xfer += iprot->skip(ftype);
+ break;
+ }
+ xfer += iprot->readFieldEnd();
+ }
+
+ xfer += iprot->readStructEnd();
+
+ return xfer;
+}
+
+
ThriftHiveMetastore_get_table_names_by_filter_args::~ThriftHiveMetastore_get_table_names_by_filter_args() throw() {
}
@@ -47026,6 +47285,70 @@ void ThriftHiveMetastoreClient::recv_get_materialization_invalidation_info(std::
throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "get_materialization_invalidation_info failed: unknown result");
}
+void ThriftHiveMetastoreClient::update_creation_metadata(const std::string& dbname, const std::string& tbl_name, const CreationMetadata& creation_metadata)
+{
+ send_update_creation_metadata(dbname, tbl_name, creation_metadata);
+ recv_update_creation_metadata();
+}
+
+void ThriftHiveMetastoreClient::send_update_creation_metadata(const std::string& dbname, const std::string& tbl_name, const CreationMetadata& creation_metadata)
+{
+ int32_t cseqid = 0;
+ oprot_->writeMessageBegin("update_creation_metadata", ::apache::thrift::protocol::T_CALL, cseqid);
+
+ ThriftHiveMetastore_update_creation_metadata_pargs args;
+ args.dbname = &dbname;
+ args.tbl_name = &tbl_name;
+ args.creation_metadata = &creation_metadata;
+ args.write(oprot_);
+
+ oprot_->writeMessageEnd();
+ oprot_->getTransport()->writeEnd();
+ oprot_->getTransport()->flush();
+}
+
+void ThriftHiveMetastoreClient::recv_update_creation_metadata()
+{
+
+ int32_t rseqid = 0;
+ std::string fname;
+ ::apache::thrift::protocol::TMessageType mtype;
+
+ iprot_->readMessageBegin(fname, mtype, rseqid);
+ if (mtype == ::apache::thrift::protocol::T_EXCEPTION) {
+ ::apache::thrift::TApplicationException x;
+ x.read(iprot_);
+ iprot_->readMessageEnd();
+ iprot_->getTransport()->readEnd();
+ throw x;
+ }
+ if (mtype != ::apache::thrift::protocol::T_REPLY) {
+ iprot_->skip(::apache::thrift::protocol::T_STRUCT);
+ iprot_->readMessageEnd();
+ iprot_->getTransport()->readEnd();
+ }
+ if (fname.compare("update_creation_metadata") != 0) {
+ iprot_->skip(::apache::thrift::protocol::T_STRUCT);
+ iprot_->readMessageEnd();
+ iprot_->getTransport()->readEnd();
+ }
+ ThriftHiveMetastore_update_creation_metadata_presult result;
+ result.read(iprot_);
+ iprot_->readMessageEnd();
+ iprot_->getTransport()->readEnd();
+
+ if (result.__isset.o1) {
+ throw result.o1;
+ }
+ if (result.__isset.o2) {
+ throw result.o2;
+ }
+ if (result.__isset.o3) {
+ throw result.o3;
+ }
+ return;
+}
+
void ThriftHiveMetastoreClient::get_table_names_by_filter(std::vector & _return, const std::string& dbname, const std::string& filter, const int16_t max_tables)
{
send_get_table_names_by_filter(dbname, filter, max_tables);
@@ -58420,6 +58743,68 @@ void ThriftHiveMetastoreProcessor::process_get_materialization_invalidation_info
}
}
+void ThriftHiveMetastoreProcessor::process_update_creation_metadata(int32_t seqid, ::apache::thrift::protocol::TProtocol* iprot, ::apache::thrift::protocol::TProtocol* oprot, void* callContext)
+{
+ void* ctx = NULL;
+ if (this->eventHandler_.get() != NULL) {
+ ctx = this->eventHandler_->getContext("ThriftHiveMetastore.update_creation_metadata", callContext);
+ }
+ ::apache::thrift::TProcessorContextFreer freer(this->eventHandler_.get(), ctx, "ThriftHiveMetastore.update_creation_metadata");
+
+ if (this->eventHandler_.get() != NULL) {
+ this->eventHandler_->preRead(ctx, "ThriftHiveMetastore.update_creation_metadata");
+ }
+
+ ThriftHiveMetastore_update_creation_metadata_args args;
+ args.read(iprot);
+ iprot->readMessageEnd();
+ uint32_t bytes = iprot->getTransport()->readEnd();
+
+ if (this->eventHandler_.get() != NULL) {
+ this->eventHandler_->postRead(ctx, "ThriftHiveMetastore.update_creation_metadata", bytes);
+ }
+
+ ThriftHiveMetastore_update_creation_metadata_result result;
+ try {
+ iface_->update_creation_metadata(args.dbname, args.tbl_name, args.creation_metadata);
+ } catch (MetaException &o1) {
+ result.o1 = o1;
+ result.__isset.o1 = true;
+ } catch (InvalidOperationException &o2) {
+ result.o2 = o2;
+ result.__isset.o2 = true;
+ } catch (UnknownDBException &o3) {
+ result.o3 = o3;
+ result.__isset.o3 = true;
+ } catch (const std::exception& e) {
+ if (this->eventHandler_.get() != NULL) {
+ this->eventHandler_->handlerError(ctx, "ThriftHiveMetastore.update_creation_metadata");
+ }
+
+ ::apache::thrift::TApplicationException x(e.what());
+ oprot->writeMessageBegin("update_creation_metadata", ::apache::thrift::protocol::T_EXCEPTION, seqid);
+ x.write(oprot);
+ oprot->writeMessageEnd();
+ oprot->getTransport()->writeEnd();
+ oprot->getTransport()->flush();
+ return;
+ }
+
+ if (this->eventHandler_.get() != NULL) {
+ this->eventHandler_->preWrite(ctx, "ThriftHiveMetastore.update_creation_metadata");
+ }
+
+ oprot->writeMessageBegin("update_creation_metadata", ::apache::thrift::protocol::T_REPLY, seqid);
+ result.write(oprot);
+ oprot->writeMessageEnd();
+ bytes = oprot->getTransport()->writeEnd();
+ oprot->getTransport()->flush();
+
+ if (this->eventHandler_.get() != NULL) {
+ this->eventHandler_->postWrite(ctx, "ThriftHiveMetastore.update_creation_metadata", bytes);
+ }
+}
+
void ThriftHiveMetastoreProcessor::process_get_table_names_by_filter(int32_t seqid, ::apache::thrift::protocol::TProtocol* iprot, ::apache::thrift::protocol::TProtocol* oprot, void* callContext)
{
void* ctx = NULL;
@@ -70329,6 +70714,98 @@ void ThriftHiveMetastoreConcurrentClient::recv_get_materialization_invalidation_
} // end while(true)
}
+void ThriftHiveMetastoreConcurrentClient::update_creation_metadata(const std::string& dbname, const std::string& tbl_name, const CreationMetadata& creation_metadata)
+{
+ int32_t seqid = send_update_creation_metadata(dbname, tbl_name, creation_metadata);
+ recv_update_creation_metadata(seqid);
+}
+
+int32_t ThriftHiveMetastoreConcurrentClient::send_update_creation_metadata(const std::string& dbname, const std::string& tbl_name, const CreationMetadata& creation_metadata)
+{
+ int32_t cseqid = this->sync_.generateSeqId();
+ ::apache::thrift::async::TConcurrentSendSentry sentry(&this->sync_);
+ oprot_->writeMessageBegin("update_creation_metadata", ::apache::thrift::protocol::T_CALL, cseqid);
+
+ ThriftHiveMetastore_update_creation_metadata_pargs args;
+ args.dbname = &dbname;
+ args.tbl_name = &tbl_name;
+ args.creation_metadata = &creation_metadata;
+ args.write(oprot_);
+
+ oprot_->writeMessageEnd();
+ oprot_->getTransport()->writeEnd();
+ oprot_->getTransport()->flush();
+
+ sentry.commit();
+ return cseqid;
+}
+
+void ThriftHiveMetastoreConcurrentClient::recv_update_creation_metadata(const int32_t seqid)
+{
+
+ int32_t rseqid = 0;
+ std::string fname;
+ ::apache::thrift::protocol::TMessageType mtype;
+
+ // the read mutex gets dropped and reacquired as part of waitForWork()
+ // The destructor of this sentry wakes up other clients
+ ::apache::thrift::async::TConcurrentRecvSentry sentry(&this->sync_, seqid);
+
+ while(true) {
+ if(!this->sync_.getPending(fname, mtype, rseqid)) {
+ iprot_->readMessageBegin(fname, mtype, rseqid);
+ }
+ if(seqid == rseqid) {
+ if (mtype == ::apache::thrift::protocol::T_EXCEPTION) {
+ ::apache::thrift::TApplicationException x;
+ x.read(iprot_);
+ iprot_->readMessageEnd();
+ iprot_->getTransport()->readEnd();
+ sentry.commit();
+ throw x;
+ }
+ if (mtype != ::apache::thrift::protocol::T_REPLY) {
+ iprot_->skip(::apache::thrift::protocol::T_STRUCT);
+ iprot_->readMessageEnd();
+ iprot_->getTransport()->readEnd();
+ }
+ if (fname.compare("update_creation_metadata") != 0) {
+ iprot_->skip(::apache::thrift::protocol::T_STRUCT);
+ iprot_->readMessageEnd();
+ iprot_->getTransport()->readEnd();
+
+ // in a bad state, don't commit
+ using ::apache::thrift::protocol::TProtocolException;
+ throw TProtocolException(TProtocolException::INVALID_DATA);
+ }
+ ThriftHiveMetastore_update_creation_metadata_presult result;
+ result.read(iprot_);
+ iprot_->readMessageEnd();
+ iprot_->getTransport()->readEnd();
+
+ if (result.__isset.o1) {
+ sentry.commit();
+ throw result.o1;
+ }
+ if (result.__isset.o2) {
+ sentry.commit();
+ throw result.o2;
+ }
+ if (result.__isset.o3) {
+ sentry.commit();
+ throw result.o3;
+ }
+ sentry.commit();
+ return;
+ }
+ // seqid != rseqid
+ this->sync_.updatePending(fname, mtype, rseqid);
+
+ // this will temporarily unlock the readMutex, and let other clients get work done
+ this->sync_.waitForWork(seqid);
+ } // end while(true)
+}
+
void ThriftHiveMetastoreConcurrentClient::get_table_names_by_filter(std::vector & _return, const std::string& dbname, const std::string& filter, const int16_t max_tables)
{
int32_t seqid = send_get_table_names_by_filter(dbname, filter, max_tables);
diff --git a/standalone-metastore/src/gen/thrift/gen-cpp/ThriftHiveMetastore.h b/standalone-metastore/src/gen/thrift/gen-cpp/ThriftHiveMetastore.h
index bfa17eb3e6..2466498885 100644
--- a/standalone-metastore/src/gen/thrift/gen-cpp/ThriftHiveMetastore.h
+++ b/standalone-metastore/src/gen/thrift/gen-cpp/ThriftHiveMetastore.h
@@ -59,6 +59,7 @@ class ThriftHiveMetastoreIf : virtual public ::facebook::fb303::FacebookService
virtual void get_table_req(GetTableResult& _return, const GetTableRequest& req) = 0;
virtual void get_table_objects_by_name_req(GetTablesResult& _return, const GetTablesRequest& req) = 0;
virtual void get_materialization_invalidation_info(std::map & _return, const std::string& dbname, const std::vector & tbl_names) = 0;
+ virtual void update_creation_metadata(const std::string& dbname, const std::string& tbl_name, const CreationMetadata& creation_metadata) = 0;
virtual void get_table_names_by_filter(std::vector & _return, const std::string& dbname, const std::string& filter, const int16_t max_tables) = 0;
virtual void alter_table(const std::string& dbname, const std::string& tbl_name, const Table& new_tbl) = 0;
virtual void alter_table_with_environment_context(const std::string& dbname, const std::string& tbl_name, const Table& new_tbl, const EnvironmentContext& environment_context) = 0;
@@ -345,6 +346,9 @@ class ThriftHiveMetastoreNull : virtual public ThriftHiveMetastoreIf , virtual p
void get_materialization_invalidation_info(std::map & /* _return */, const std::string& /* dbname */, const std::vector & /* tbl_names */) {
return;
}
+ void update_creation_metadata(const std::string& /* dbname */, const std::string& /* tbl_name */, const CreationMetadata& /* creation_metadata */) {
+ return;
+ }
void get_table_names_by_filter(std::vector & /* _return */, const std::string& /* dbname */, const std::string& /* filter */, const int16_t /* max_tables */) {
return;
}
@@ -5339,6 +5343,140 @@ class ThriftHiveMetastore_get_materialization_invalidation_info_presult {
};
+typedef struct _ThriftHiveMetastore_update_creation_metadata_args__isset {
+ _ThriftHiveMetastore_update_creation_metadata_args__isset() : dbname(false), tbl_name(false), creation_metadata(false) {}
+ bool dbname :1;
+ bool tbl_name :1;
+ bool creation_metadata :1;
+} _ThriftHiveMetastore_update_creation_metadata_args__isset;
+
+class ThriftHiveMetastore_update_creation_metadata_args {
+ public:
+
+ ThriftHiveMetastore_update_creation_metadata_args(const ThriftHiveMetastore_update_creation_metadata_args&);
+ ThriftHiveMetastore_update_creation_metadata_args& operator=(const ThriftHiveMetastore_update_creation_metadata_args&);
+ ThriftHiveMetastore_update_creation_metadata_args() : dbname(), tbl_name() {
+ }
+
+ virtual ~ThriftHiveMetastore_update_creation_metadata_args() throw();
+ std::string dbname;
+ std::string tbl_name;
+ CreationMetadata creation_metadata;
+
+ _ThriftHiveMetastore_update_creation_metadata_args__isset __isset;
+
+ void __set_dbname(const std::string& val);
+
+ void __set_tbl_name(const std::string& val);
+
+ void __set_creation_metadata(const CreationMetadata& val);
+
+ bool operator == (const ThriftHiveMetastore_update_creation_metadata_args & rhs) const
+ {
+ if (!(dbname == rhs.dbname))
+ return false;
+ if (!(tbl_name == rhs.tbl_name))
+ return false;
+ if (!(creation_metadata == rhs.creation_metadata))
+ return false;
+ return true;
+ }
+ bool operator != (const ThriftHiveMetastore_update_creation_metadata_args &rhs) const {
+ return !(*this == rhs);
+ }
+
+ bool operator < (const ThriftHiveMetastore_update_creation_metadata_args & ) const;
+
+ uint32_t read(::apache::thrift::protocol::TProtocol* iprot);
+ uint32_t write(::apache::thrift::protocol::TProtocol* oprot) const;
+
+};
+
+
+class ThriftHiveMetastore_update_creation_metadata_pargs {
+ public:
+
+
+ virtual ~ThriftHiveMetastore_update_creation_metadata_pargs() throw();
+ const std::string* dbname;
+ const std::string* tbl_name;
+ const CreationMetadata* creation_metadata;
+
+ uint32_t write(::apache::thrift::protocol::TProtocol* oprot) const;
+
+};
+
+typedef struct _ThriftHiveMetastore_update_creation_metadata_result__isset {
+ _ThriftHiveMetastore_update_creation_metadata_result__isset() : o1(false), o2(false), o3(false) {}
+ bool o1 :1;
+ bool o2 :1;
+ bool o3 :1;
+} _ThriftHiveMetastore_update_creation_metadata_result__isset;
+
+class ThriftHiveMetastore_update_creation_metadata_result {
+ public:
+
+ ThriftHiveMetastore_update_creation_metadata_result(const ThriftHiveMetastore_update_creation_metadata_result&);
+ ThriftHiveMetastore_update_creation_metadata_result& operator=(const ThriftHiveMetastore_update_creation_metadata_result&);
+ ThriftHiveMetastore_update_creation_metadata_result() {
+ }
+
+ virtual ~ThriftHiveMetastore_update_creation_metadata_result() throw();
+ MetaException o1;
+ InvalidOperationException o2;
+ UnknownDBException o3;
+
+ _ThriftHiveMetastore_update_creation_metadata_result__isset __isset;
+
+ void __set_o1(const MetaException& val);
+
+ void __set_o2(const InvalidOperationException& val);
+
+ void __set_o3(const UnknownDBException& val);
+
+ bool operator == (const ThriftHiveMetastore_update_creation_metadata_result & rhs) const
+ {
+ if (!(o1 == rhs.o1))
+ return false;
+ if (!(o2 == rhs.o2))
+ return false;
+ if (!(o3 == rhs.o3))
+ return false;
+ return true;
+ }
+ bool operator != (const ThriftHiveMetastore_update_creation_metadata_result &rhs) const {
+ return !(*this == rhs);
+ }
+
+ bool operator < (const ThriftHiveMetastore_update_creation_metadata_result & ) const;
+
+ uint32_t read(::apache::thrift::protocol::TProtocol* iprot);
+ uint32_t write(::apache::thrift::protocol::TProtocol* oprot) const;
+
+};
+
+typedef struct _ThriftHiveMetastore_update_creation_metadata_presult__isset {
+ _ThriftHiveMetastore_update_creation_metadata_presult__isset() : o1(false), o2(false), o3(false) {}
+ bool o1 :1;
+ bool o2 :1;
+ bool o3 :1;
+} _ThriftHiveMetastore_update_creation_metadata_presult__isset;
+
+class ThriftHiveMetastore_update_creation_metadata_presult {
+ public:
+
+
+ virtual ~ThriftHiveMetastore_update_creation_metadata_presult() throw();
+ MetaException o1;
+ InvalidOperationException o2;
+ UnknownDBException o3;
+
+ _ThriftHiveMetastore_update_creation_metadata_presult__isset __isset;
+
+ uint32_t read(::apache::thrift::protocol::TProtocol* iprot);
+
+};
+
typedef struct _ThriftHiveMetastore_get_table_names_by_filter_args__isset {
_ThriftHiveMetastore_get_table_names_by_filter_args__isset() : dbname(false), filter(false), max_tables(true) {}
bool dbname :1;
@@ -23282,6 +23420,9 @@ class ThriftHiveMetastoreClient : virtual public ThriftHiveMetastoreIf, public
void get_materialization_invalidation_info(std::map & _return, const std::string& dbname, const std::vector & tbl_names);
void send_get_materialization_invalidation_info(const std::string& dbname, const std::vector & tbl_names);
void recv_get_materialization_invalidation_info(std::map & _return);
+ void update_creation_metadata(const std::string& dbname, const std::string& tbl_name, const CreationMetadata& creation_metadata);
+ void send_update_creation_metadata(const std::string& dbname, const std::string& tbl_name, const CreationMetadata& creation_metadata);
+ void recv_update_creation_metadata();
void get_table_names_by_filter(std::vector & _return, const std::string& dbname, const std::string& filter, const int16_t max_tables);
void send_get_table_names_by_filter(const std::string& dbname, const std::string& filter, const int16_t max_tables);
void recv_get_table_names_by_filter(std::vector & _return);
@@ -23761,6 +23902,7 @@ class ThriftHiveMetastoreProcessor : public ::facebook::fb303::FacebookServiceP
void process_get_table_req(int32_t seqid, ::apache::thrift::protocol::TProtocol* iprot, ::apache::thrift::protocol::TProtocol* oprot, void* callContext);
void process_get_table_objects_by_name_req(int32_t seqid, ::apache::thrift::protocol::TProtocol* iprot, ::apache::thrift::protocol::TProtocol* oprot, void* callContext);
void process_get_materialization_invalidation_info(int32_t seqid, ::apache::thrift::protocol::TProtocol* iprot, ::apache::thrift::protocol::TProtocol* oprot, void* callContext);
+ void process_update_creation_metadata(int32_t seqid, ::apache::thrift::protocol::TProtocol* iprot, ::apache::thrift::protocol::TProtocol* oprot, void* callContext);
void process_get_table_names_by_filter(int32_t seqid, ::apache::thrift::protocol::TProtocol* iprot, ::apache::thrift::protocol::TProtocol* oprot, void* callContext);
void process_alter_table(int32_t seqid, ::apache::thrift::protocol::TProtocol* iprot, ::apache::thrift::protocol::TProtocol* oprot, void* callContext);
void process_alter_table_with_environment_context(int32_t seqid, ::apache::thrift::protocol::TProtocol* iprot, ::apache::thrift::protocol::TProtocol* oprot, void* callContext);
@@ -23946,6 +24088,7 @@ class ThriftHiveMetastoreProcessor : public ::facebook::fb303::FacebookServiceP
processMap_["get_table_req"] = &ThriftHiveMetastoreProcessor::process_get_table_req;
processMap_["get_table_objects_by_name_req"] = &ThriftHiveMetastoreProcessor::process_get_table_objects_by_name_req;
processMap_["get_materialization_invalidation_info"] = &ThriftHiveMetastoreProcessor::process_get_materialization_invalidation_info;
+ processMap_["update_creation_metadata"] = &ThriftHiveMetastoreProcessor::process_update_creation_metadata;
processMap_["get_table_names_by_filter"] = &ThriftHiveMetastoreProcessor::process_get_table_names_by_filter;
processMap_["alter_table"] = &ThriftHiveMetastoreProcessor::process_alter_table;
processMap_["alter_table_with_environment_context"] = &ThriftHiveMetastoreProcessor::process_alter_table_with_environment_context;
@@ -24476,6 +24619,15 @@ class ThriftHiveMetastoreMultiface : virtual public ThriftHiveMetastoreIf, publi
return;
}
+ void update_creation_metadata(const std::string& dbname, const std::string& tbl_name, const CreationMetadata& creation_metadata) {
+ size_t sz = ifaces_.size();
+ size_t i = 0;
+ for (; i < (sz - 1); ++i) {
+ ifaces_[i]->update_creation_metadata(dbname, tbl_name, creation_metadata);
+ }
+ ifaces_[i]->update_creation_metadata(dbname, tbl_name, creation_metadata);
+ }
+
void get_table_names_by_filter(std::vector & _return, const std::string& dbname, const std::string& filter, const int16_t max_tables) {
size_t sz = ifaces_.size();
size_t i = 0;
@@ -25994,6 +26146,9 @@ class ThriftHiveMetastoreConcurrentClient : virtual public ThriftHiveMetastoreIf
void get_materialization_invalidation_info(std::map & _return, const std::string& dbname, const std::vector & tbl_names);
int32_t send_get_materialization_invalidation_info(const std::string& dbname, const std::vector & tbl_names);
void recv_get_materialization_invalidation_info(std::map & _return, const int32_t seqid);
+ void update_creation_metadata(const std::string& dbname, const std::string& tbl_name, const CreationMetadata& creation_metadata);
+ int32_t send_update_creation_metadata(const std::string& dbname, const std::string& tbl_name, const CreationMetadata& creation_metadata);
+ void recv_update_creation_metadata(const int32_t seqid);
void get_table_names_by_filter(std::vector & _return, const std::string& dbname, const std::string& filter, const int16_t max_tables);
int32_t send_get_table_names_by_filter(const std::string& dbname, const std::string& filter, const int16_t max_tables);
void recv_get_table_names_by_filter(std::vector & _return, const int32_t seqid);
diff --git a/standalone-metastore/src/gen/thrift/gen-cpp/ThriftHiveMetastore_server.skeleton.cpp b/standalone-metastore/src/gen/thrift/gen-cpp/ThriftHiveMetastore_server.skeleton.cpp
index cf9a1713aa..f5dc9f08ae 100644
--- a/standalone-metastore/src/gen/thrift/gen-cpp/ThriftHiveMetastore_server.skeleton.cpp
+++ b/standalone-metastore/src/gen/thrift/gen-cpp/ThriftHiveMetastore_server.skeleton.cpp
@@ -207,6 +207,11 @@ class ThriftHiveMetastoreHandler : virtual public ThriftHiveMetastoreIf {
printf("get_materialization_invalidation_info\n");
}
+ void update_creation_metadata(const std::string& dbname, const std::string& tbl_name, const CreationMetadata& creation_metadata) {
+ // Your implementation goes here
+ printf("update_creation_metadata\n");
+ }
+
void get_table_names_by_filter(std::vector & _return, const std::string& dbname, const std::string& filter, const int16_t max_tables) {
// Your implementation goes here
printf("get_table_names_by_filter\n");
diff --git a/standalone-metastore/src/gen/thrift/gen-cpp/hive_metastore_types.cpp b/standalone-metastore/src/gen/thrift/gen-cpp/hive_metastore_types.cpp
index aadf8f17c4..8f04b9da9e 100644
--- a/standalone-metastore/src/gen/thrift/gen-cpp/hive_metastore_types.cpp
+++ b/standalone-metastore/src/gen/thrift/gen-cpp/hive_metastore_types.cpp
@@ -21187,14 +21187,15 @@ Materialization::~Materialization() throw() {
}
-void Materialization::__set_materializationTable(const Table& val) {
- this->materializationTable = val;
-}
-
void Materialization::__set_tablesUsed(const std::set & val) {
this->tablesUsed = val;
}
+void Materialization::__set_validTxnList(const std::string& val) {
+ this->validTxnList = val;
+__isset.validTxnList = true;
+}
+
void Materialization::__set_invalidationTime(const int64_t val) {
this->invalidationTime = val;
}
@@ -21211,7 +21212,6 @@ uint32_t Materialization::read(::apache::thrift::protocol::TProtocol* iprot) {
using ::apache::thrift::protocol::TProtocolException;
- bool isset_materializationTable = false;
bool isset_tablesUsed = false;
bool isset_invalidationTime = false;
@@ -21224,14 +21224,6 @@ uint32_t Materialization::read(::apache::thrift::protocol::TProtocol* iprot) {
switch (fid)
{
case 1:
- if (ftype == ::apache::thrift::protocol::T_STRUCT) {
- xfer += this->materializationTable.read(iprot);
- isset_materializationTable = true;
- } else {
- xfer += iprot->skip(ftype);
- }
- break;
- case 2:
if (ftype == ::apache::thrift::protocol::T_SET) {
{
this->tablesUsed.clear();
@@ -21252,6 +21244,14 @@ uint32_t Materialization::read(::apache::thrift::protocol::TProtocol* iprot) {
xfer += iprot->skip(ftype);
}
break;
+ case 2:
+ if (ftype == ::apache::thrift::protocol::T_STRING) {
+ xfer += iprot->readString(this->validTxnList);
+ this->__isset.validTxnList = true;
+ } else {
+ xfer += iprot->skip(ftype);
+ }
+ break;
case 3:
if (ftype == ::apache::thrift::protocol::T_I64) {
xfer += iprot->readI64(this->invalidationTime);
@@ -21269,8 +21269,6 @@ uint32_t Materialization::read(::apache::thrift::protocol::TProtocol* iprot) {
xfer += iprot->readStructEnd();
- if (!isset_materializationTable)
- throw TProtocolException(TProtocolException::INVALID_DATA);
if (!isset_tablesUsed)
throw TProtocolException(TProtocolException::INVALID_DATA);
if (!isset_invalidationTime)
@@ -21283,11 +21281,7 @@ uint32_t Materialization::write(::apache::thrift::protocol::TProtocol* oprot) co
apache::thrift::protocol::TOutputRecursionTracker tracker(*oprot);
xfer += oprot->writeStructBegin("Materialization");
- xfer += oprot->writeFieldBegin("materializationTable", ::apache::thrift::protocol::T_STRUCT, 1);
- xfer += this->materializationTable.write(oprot);
- xfer += oprot->writeFieldEnd();
-
- xfer += oprot->writeFieldBegin("tablesUsed", ::apache::thrift::protocol::T_SET, 2);
+ xfer += oprot->writeFieldBegin("tablesUsed", ::apache::thrift::protocol::T_SET, 1);
{
xfer += oprot->writeSetBegin(::apache::thrift::protocol::T_STRING, static_cast(this->tablesUsed.size()));
std::set ::const_iterator _iter880;
@@ -21299,6 +21293,11 @@ uint32_t Materialization::write(::apache::thrift::protocol::TProtocol* oprot) co
}
xfer += oprot->writeFieldEnd();
+ if (this->__isset.validTxnList) {
+ xfer += oprot->writeFieldBegin("validTxnList", ::apache::thrift::protocol::T_STRING, 2);
+ xfer += oprot->writeString(this->validTxnList);
+ xfer += oprot->writeFieldEnd();
+ }
xfer += oprot->writeFieldBegin("invalidationTime", ::apache::thrift::protocol::T_I64, 3);
xfer += oprot->writeI64(this->invalidationTime);
xfer += oprot->writeFieldEnd();
@@ -21310,27 +21309,30 @@ uint32_t Materialization::write(::apache::thrift::protocol::TProtocol* oprot) co
void swap(Materialization &a, Materialization &b) {
using ::std::swap;
- swap(a.materializationTable, b.materializationTable);
swap(a.tablesUsed, b.tablesUsed);
+ swap(a.validTxnList, b.validTxnList);
swap(a.invalidationTime, b.invalidationTime);
+ swap(a.__isset, b.__isset);
}
Materialization::Materialization(const Materialization& other881) {
- materializationTable = other881.materializationTable;
tablesUsed = other881.tablesUsed;
+ validTxnList = other881.validTxnList;
invalidationTime = other881.invalidationTime;
+ __isset = other881.__isset;
}
Materialization& Materialization::operator=(const Materialization& other882) {
- materializationTable = other882.materializationTable;
tablesUsed = other882.tablesUsed;
+ validTxnList = other882.validTxnList;
invalidationTime = other882.invalidationTime;
+ __isset = other882.__isset;
return *this;
}
void Materialization::printTo(std::ostream& out) const {
using ::apache::thrift::to_string;
out << "Materialization(";
- out << "materializationTable=" << to_string(materializationTable);
- out << ", " << "tablesUsed=" << to_string(tablesUsed);
+ out << "tablesUsed=" << to_string(tablesUsed);
+ out << ", " << "validTxnList="; (__isset.validTxnList ? (out << to_string(validTxnList)) : (out << ""));
out << ", " << "invalidationTime=" << to_string(invalidationTime);
out << ")";
}
diff --git a/standalone-metastore/src/gen/thrift/gen-cpp/hive_metastore_types.h b/standalone-metastore/src/gen/thrift/gen-cpp/hive_metastore_types.h
index 4c09bc8fe6..c25089357b 100644
--- a/standalone-metastore/src/gen/thrift/gen-cpp/hive_metastore_types.h
+++ b/standalone-metastore/src/gen/thrift/gen-cpp/hive_metastore_types.h
@@ -8707,32 +8707,40 @@ inline std::ostream& operator<<(std::ostream& out, const TableMeta& obj)
return out;
}
+typedef struct _Materialization__isset {
+ _Materialization__isset() : validTxnList(false) {}
+ bool validTxnList :1;
+} _Materialization__isset;
class Materialization {
public:
Materialization(const Materialization&);
Materialization& operator=(const Materialization&);
- Materialization() : invalidationTime(0) {
+ Materialization() : validTxnList(), invalidationTime(0) {
}
virtual ~Materialization() throw();
- Table materializationTable;
std::set tablesUsed;
+ std::string validTxnList;
int64_t invalidationTime;
- void __set_materializationTable(const Table& val);
+ _Materialization__isset __isset;
void __set_tablesUsed(const std::set & val);
+ void __set_validTxnList(const std::string& val);
+
void __set_invalidationTime(const int64_t val);
bool operator == (const Materialization & rhs) const
{
- if (!(materializationTable == rhs.materializationTable))
- return false;
if (!(tablesUsed == rhs.tablesUsed))
return false;
+ if (__isset.validTxnList != rhs.__isset.validTxnList)
+ return false;
+ else if (__isset.validTxnList && !(validTxnList == rhs.validTxnList))
+ return false;
if (!(invalidationTime == rhs.invalidationTime))
return false;
return true;
diff --git a/standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/Materialization.java b/standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/Materialization.java
index b399d66422..ccef0244be 100644
--- a/standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/Materialization.java
+++ b/standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/Materialization.java
@@ -38,8 +38,8 @@
@org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public class Materialization implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable {
private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("Materialization");
- private static final org.apache.thrift.protocol.TField MATERIALIZATION_TABLE_FIELD_DESC = new org.apache.thrift.protocol.TField("materializationTable", org.apache.thrift.protocol.TType.STRUCT, (short)1);
- private static final org.apache.thrift.protocol.TField TABLES_USED_FIELD_DESC = new org.apache.thrift.protocol.TField("tablesUsed", org.apache.thrift.protocol.TType.SET, (short)2);
+ private static final org.apache.thrift.protocol.TField TABLES_USED_FIELD_DESC = new org.apache.thrift.protocol.TField("tablesUsed", org.apache.thrift.protocol.TType.SET, (short)1);
+ private static final org.apache.thrift.protocol.TField VALID_TXN_LIST_FIELD_DESC = new org.apache.thrift.protocol.TField("validTxnList", org.apache.thrift.protocol.TType.STRING, (short)2);
private static final org.apache.thrift.protocol.TField INVALIDATION_TIME_FIELD_DESC = new org.apache.thrift.protocol.TField("invalidationTime", org.apache.thrift.protocol.TType.I64, (short)3);
private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>();
@@ -48,14 +48,14 @@
schemes.put(TupleScheme.class, new MaterializationTupleSchemeFactory());
}
- private Table materializationTable; // required
private Set tablesUsed; // required
+ private String validTxnList; // optional
private long invalidationTime; // required
/** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */
public enum _Fields implements org.apache.thrift.TFieldIdEnum {
- MATERIALIZATION_TABLE((short)1, "materializationTable"),
- TABLES_USED((short)2, "tablesUsed"),
+ TABLES_USED((short)1, "tablesUsed"),
+ VALID_TXN_LIST((short)2, "validTxnList"),
INVALIDATION_TIME((short)3, "invalidationTime");
private static final Map byName = new HashMap();
@@ -71,10 +71,10 @@
*/
public static _Fields findByThriftId(int fieldId) {
switch(fieldId) {
- case 1: // MATERIALIZATION_TABLE
- return MATERIALIZATION_TABLE;
- case 2: // TABLES_USED
+ case 1: // TABLES_USED
return TABLES_USED;
+ case 2: // VALID_TXN_LIST
+ return VALID_TXN_LIST;
case 3: // INVALIDATION_TIME
return INVALIDATION_TIME;
default:
@@ -119,14 +119,15 @@ public String getFieldName() {
// isset id assignments
private static final int __INVALIDATIONTIME_ISSET_ID = 0;
private byte __isset_bitfield = 0;
+ private static final _Fields optionals[] = {_Fields.VALID_TXN_LIST};
public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap;
static {
Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
- tmpMap.put(_Fields.MATERIALIZATION_TABLE, new org.apache.thrift.meta_data.FieldMetaData("materializationTable", org.apache.thrift.TFieldRequirementType.REQUIRED,
- new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, Table.class)));
tmpMap.put(_Fields.TABLES_USED, new org.apache.thrift.meta_data.FieldMetaData("tablesUsed", org.apache.thrift.TFieldRequirementType.REQUIRED,
new org.apache.thrift.meta_data.SetMetaData(org.apache.thrift.protocol.TType.SET,
new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING))));
+ tmpMap.put(_Fields.VALID_TXN_LIST, new org.apache.thrift.meta_data.FieldMetaData("validTxnList", org.apache.thrift.TFieldRequirementType.OPTIONAL,
+ new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING)));
tmpMap.put(_Fields.INVALIDATION_TIME, new org.apache.thrift.meta_data.FieldMetaData("invalidationTime", org.apache.thrift.TFieldRequirementType.REQUIRED,
new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.I64)));
metaDataMap = Collections.unmodifiableMap(tmpMap);
@@ -137,12 +138,10 @@ public Materialization() {
}
public Materialization(
- Table materializationTable,
Set tablesUsed,
long invalidationTime)
{
this();
- this.materializationTable = materializationTable;
this.tablesUsed = tablesUsed;
this.invalidationTime = invalidationTime;
setInvalidationTimeIsSet(true);
@@ -153,13 +152,13 @@ public Materialization(
*/
public Materialization(Materialization other) {
__isset_bitfield = other.__isset_bitfield;
- if (other.isSetMaterializationTable()) {
- this.materializationTable = new Table(other.materializationTable);
- }
if (other.isSetTablesUsed()) {
Set __this__tablesUsed = new HashSet(other.tablesUsed);
this.tablesUsed = __this__tablesUsed;
}
+ if (other.isSetValidTxnList()) {
+ this.validTxnList = other.validTxnList;
+ }
this.invalidationTime = other.invalidationTime;
}
@@ -169,35 +168,12 @@ public Materialization deepCopy() {
@Override
public void clear() {
- this.materializationTable = null;
this.tablesUsed = null;
+ this.validTxnList = null;
setInvalidationTimeIsSet(false);
this.invalidationTime = 0;
}
- public Table getMaterializationTable() {
- return this.materializationTable;
- }
-
- public void setMaterializationTable(Table materializationTable) {
- this.materializationTable = materializationTable;
- }
-
- public void unsetMaterializationTable() {
- this.materializationTable = null;
- }
-
- /** Returns true if field materializationTable is set (has been assigned a value) and false otherwise */
- public boolean isSetMaterializationTable() {
- return this.materializationTable != null;
- }
-
- public void setMaterializationTableIsSet(boolean value) {
- if (!value) {
- this.materializationTable = null;
- }
- }
-
public int getTablesUsedSize() {
return (this.tablesUsed == null) ? 0 : this.tablesUsed.size();
}
@@ -236,6 +212,29 @@ public void setTablesUsedIsSet(boolean value) {
}
}
+ public String getValidTxnList() {
+ return this.validTxnList;
+ }
+
+ public void setValidTxnList(String validTxnList) {
+ this.validTxnList = validTxnList;
+ }
+
+ public void unsetValidTxnList() {
+ this.validTxnList = null;
+ }
+
+ /** Returns true if field validTxnList is set (has been assigned a value) and false otherwise */
+ public boolean isSetValidTxnList() {
+ return this.validTxnList != null;
+ }
+
+ public void setValidTxnListIsSet(boolean value) {
+ if (!value) {
+ this.validTxnList = null;
+ }
+ }
+
public long getInvalidationTime() {
return this.invalidationTime;
}
@@ -260,19 +259,19 @@ public void setInvalidationTimeIsSet(boolean value) {
public void setFieldValue(_Fields field, Object value) {
switch (field) {
- case MATERIALIZATION_TABLE:
+ case TABLES_USED:
if (value == null) {
- unsetMaterializationTable();
+ unsetTablesUsed();
} else {
- setMaterializationTable((Table)value);
+ setTablesUsed((Set)value);
}
break;
- case TABLES_USED:
+ case VALID_TXN_LIST:
if (value == null) {
- unsetTablesUsed();
+ unsetValidTxnList();
} else {
- setTablesUsed((Set)value);
+ setValidTxnList((String)value);
}
break;
@@ -289,12 +288,12 @@ public void setFieldValue(_Fields field, Object value) {
public Object getFieldValue(_Fields field) {
switch (field) {
- case MATERIALIZATION_TABLE:
- return getMaterializationTable();
-
case TABLES_USED:
return getTablesUsed();
+ case VALID_TXN_LIST:
+ return getValidTxnList();
+
case INVALIDATION_TIME:
return getInvalidationTime();
@@ -309,10 +308,10 @@ public boolean isSet(_Fields field) {
}
switch (field) {
- case MATERIALIZATION_TABLE:
- return isSetMaterializationTable();
case TABLES_USED:
return isSetTablesUsed();
+ case VALID_TXN_LIST:
+ return isSetValidTxnList();
case INVALIDATION_TIME:
return isSetInvalidationTime();
}
@@ -332,15 +331,6 @@ public boolean equals(Materialization that) {
if (that == null)
return false;
- boolean this_present_materializationTable = true && this.isSetMaterializationTable();
- boolean that_present_materializationTable = true && that.isSetMaterializationTable();
- if (this_present_materializationTable || that_present_materializationTable) {
- if (!(this_present_materializationTable && that_present_materializationTable))
- return false;
- if (!this.materializationTable.equals(that.materializationTable))
- return false;
- }
-
boolean this_present_tablesUsed = true && this.isSetTablesUsed();
boolean that_present_tablesUsed = true && that.isSetTablesUsed();
if (this_present_tablesUsed || that_present_tablesUsed) {
@@ -350,6 +340,15 @@ public boolean equals(Materialization that) {
return false;
}
+ boolean this_present_validTxnList = true && this.isSetValidTxnList();
+ boolean that_present_validTxnList = true && that.isSetValidTxnList();
+ if (this_present_validTxnList || that_present_validTxnList) {
+ if (!(this_present_validTxnList && that_present_validTxnList))
+ return false;
+ if (!this.validTxnList.equals(that.validTxnList))
+ return false;
+ }
+
boolean this_present_invalidationTime = true;
boolean that_present_invalidationTime = true;
if (this_present_invalidationTime || that_present_invalidationTime) {
@@ -366,16 +365,16 @@ public boolean equals(Materialization that) {
public int hashCode() {
List