diff --git a/data/conf/hive-site.xml b/data/conf/hive-site.xml index b56cbd2469..932d996f33 100644 --- a/data/conf/hive-site.xml +++ b/data/conf/hive-site.xml @@ -141,7 +141,7 @@ hive.exec.pre.hooks - org.apache.hadoop.hive.ql.hooks.PreExecutePrinter, org.apache.hadoop.hive.ql.hooks.EnforceReadOnlyTables, org.apache.hadoop.hive.ql.hooks.MaterializedViewRegistryPropertiesHook + org.apache.hadoop.hive.ql.hooks.PreExecutePrinter, org.apache.hadoop.hive.ql.hooks.EnforceReadOnlyTables Pre Execute Hook for Tests diff --git a/itests/hcatalog-unit/src/test/java/org/apache/hive/hcatalog/listener/DummyRawStoreFailEvent.java b/itests/hcatalog-unit/src/test/java/org/apache/hive/hcatalog/listener/DummyRawStoreFailEvent.java index 78b26374f2..a3725c5395 100644 --- a/itests/hcatalog-unit/src/test/java/org/apache/hive/hcatalog/listener/DummyRawStoreFailEvent.java +++ b/itests/hcatalog-unit/src/test/java/org/apache/hive/hcatalog/listener/DummyRawStoreFailEvent.java @@ -33,6 +33,7 @@ import org.apache.hadoop.hive.metastore.api.AggrStats; import org.apache.hadoop.hive.metastore.api.AlreadyExistsException; import org.apache.hadoop.hive.metastore.api.ColumnStatistics; +import org.apache.hadoop.hive.metastore.api.CreationMetadata; import org.apache.hadoop.hive.metastore.api.CurrentNotificationEventId; import org.apache.hadoop.hive.metastore.api.Database; import org.apache.hadoop.hive.metastore.api.FieldSchema; @@ -247,6 +248,12 @@ public boolean dropPartition(String dbName, String tableName, List partV return objectStore.getPartitions(dbName, tableName, max); } + @Override + public void updateCreationMetadata(String dbname, String tablename, CreationMetadata cm) + throws MetaException { + objectStore.updateCreationMetadata(dbname, tablename, cm); + } + @Override public void alterTable(String dbName, String name, Table newTable) throws InvalidObjectException, MetaException { diff --git a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/parse/TestReplicationScenarios.java b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/parse/TestReplicationScenarios.java index d763666ab3..41c89b1cd3 100644 --- a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/parse/TestReplicationScenarios.java +++ b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/parse/TestReplicationScenarios.java @@ -2228,9 +2228,8 @@ public void testViewsReplication() throws IOException { run("LOAD DATA LOCAL INPATH '" + ptn_locn_2 + "' OVERWRITE INTO TABLE " + dbName + ".ptned PARTITION(b=2)", driver); verifySetup("SELECT a from " + dbName + ".ptned WHERE b=2", ptn_data_2, driver); - // TODO: Enable back when HIVE-18387 goes in, as it fixes the issue. - // The problem is that alter for stats is removing the metadata information. - // HIVE-18387 rewrites that logic and will fix the issue. + // TODO: This does not work because materialized views need the creation metadata + // to be updated in case tables used were replicated to a different database. //run("CREATE MATERIALIZED VIEW " + dbName + ".mat_view AS SELECT a FROM " + dbName + ".ptned where b=1", driver); //verifySetup("SELECT a from " + dbName + ".mat_view", ptn_data_1, driver); diff --git a/ql/src/java/org/apache/hadoop/hive/ql/ErrorMsg.java b/ql/src/java/org/apache/hadoop/hive/ql/ErrorMsg.java index 883dcdad0d..6087e0209b 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/ErrorMsg.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/ErrorMsg.java @@ -455,6 +455,7 @@ "Alter table with non-partitioned table does not support cascade"), HIVE_GROUPING_SETS_SIZE_LIMIT(10411, "Grouping sets size cannot be greater than 64"), + REBUILD_NO_MATERIALIZED_VIEW(10412, "Rebuild command only valid for materialized views"), //========================== 20000 range starts here ========================// diff --git a/ql/src/java/org/apache/hadoop/hive/ql/HookRunner.java b/ql/src/java/org/apache/hadoop/hive/ql/HookRunner.java index 52e99f9f09..2a32a51588 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/HookRunner.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/HookRunner.java @@ -29,7 +29,6 @@ import org.apache.hadoop.hive.ql.hooks.Hook; import org.apache.hadoop.hive.ql.hooks.HookContext; import org.apache.hadoop.hive.ql.hooks.HookUtils; -import org.apache.hadoop.hive.ql.hooks.MaterializedViewRegistryUpdateHook; import org.apache.hadoop.hive.ql.hooks.MetricsQueryLifeTimeHook; import org.apache.hadoop.hive.ql.hooks.QueryLifeTimeHook; import org.apache.hadoop.hive.ql.hooks.QueryLifeTimeHookContext; @@ -83,7 +82,6 @@ public void initialize() { if (conf.getBoolVar(HiveConf.ConfVars.HIVE_SERVER2_METRICS_ENABLED)) { queryHooks.add(new MetricsQueryLifeTimeHook()); } - queryHooks.add(new MaterializedViewRegistryUpdateHook()); } diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/DDLTask.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/DDLTask.java index 802349fe86..3716c15ace 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/exec/DDLTask.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/DDLTask.java @@ -4923,39 +4923,30 @@ private int createView(Hive db, CreateViewDesc crtView) throws HiveException { throw new HiveException(ErrorMsg.TABLE_ALREADY_EXISTS.getMsg(crtView.getViewName())); } - if (crtView.isMaterialized()) { - // We need to update the status of the creation signature - CreationMetadata cm = - new CreationMetadata(oldview.getDbName(), oldview.getTableName(), - ImmutableSet.copyOf(crtView.getTablesUsed())); - cm.setValidTxnList(conf.get(ValidTxnList.VALID_TXNS_KEY)); - oldview.getTTable().setCreationMetadata(cm); - db.alterTable(crtView.getViewName(), oldview, null); - // This is a replace/rebuild, so we need an exclusive lock - addIfAbsentByName(new WriteEntity(oldview, WriteEntity.WriteType.DDL_EXCLUSIVE)); - } else { - // replace existing view - // remove the existing partition columns from the field schema - oldview.setViewOriginalText(crtView.getViewOriginalText()); - oldview.setViewExpandedText(crtView.getViewExpandedText()); - oldview.setFields(crtView.getSchema()); - if (crtView.getComment() != null) { - oldview.setProperty("comment", crtView.getComment()); - } - if (crtView.getTblProps() != null) { - oldview.getTTable().getParameters().putAll(crtView.getTblProps()); - } - oldview.setPartCols(crtView.getPartCols()); - if (crtView.getInputFormat() != null) { - oldview.setInputFormatClass(crtView.getInputFormat()); - } - if (crtView.getOutputFormat() != null) { - oldview.setOutputFormatClass(crtView.getOutputFormat()); - } - oldview.checkValidity(null); - db.alterTable(crtView.getViewName(), oldview, null); - addIfAbsentByName(new WriteEntity(oldview, WriteEntity.WriteType.DDL_NO_LOCK)); - } + // It should not be a materialized view + assert !crtView.isMaterialized(); + + // replace existing view + // remove the existing partition columns from the field schema + oldview.setViewOriginalText(crtView.getViewOriginalText()); + oldview.setViewExpandedText(crtView.getViewExpandedText()); + oldview.setFields(crtView.getSchema()); + if (crtView.getComment() != null) { + oldview.setProperty("comment", crtView.getComment()); + } + if (crtView.getTblProps() != null) { + oldview.getTTable().getParameters().putAll(crtView.getTblProps()); + } + oldview.setPartCols(crtView.getPartCols()); + if (crtView.getInputFormat() != null) { + oldview.setInputFormatClass(crtView.getInputFormat()); + } + if (crtView.getOutputFormat() != null) { + oldview.setOutputFormatClass(crtView.getOutputFormat()); + } + oldview.checkValidity(null); + db.alterTable(crtView.getViewName(), oldview, null); + addIfAbsentByName(new WriteEntity(oldview, WriteEntity.WriteType.DDL_NO_LOCK)); } else { // We create new view Table tbl = crtView.toTable(conf); @@ -4977,8 +4968,7 @@ private int createView(Hive db, CreateViewDesc crtView) throws HiveException { return 0; } - private int truncateTable(Hive db, TruncateTableDesc truncateTableDesc) throws HiveException { - + private int truncateTable(Hive db, TruncateTableDesc truncateTableDesc) throws HiveException { if (truncateTableDesc.getColumnIndexes() != null) { ColumnTruncateWork truncateWork = new ColumnTruncateWork( truncateTableDesc.getColumnIndexes(), truncateTableDesc.getInputDir(), diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/MaterializedViewDesc.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/MaterializedViewDesc.java new file mode 100644 index 0000000000..1e28ca843f --- /dev/null +++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/MaterializedViewDesc.java @@ -0,0 +1,57 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hive.ql.exec; + +import org.apache.hadoop.hive.metastore.api.CreationMetadata; +import org.apache.hadoop.hive.ql.plan.Explain; +import org.apache.hadoop.hive.ql.plan.Explain.Level; + +import java.io.Serializable; + +@Explain(displayName = "Materialized View Work", explainLevels = { Level.USER, Level.DEFAULT, Level.EXTENDED }) +public class MaterializedViewDesc implements Serializable { + private static final long serialVersionUID = 1L; + private final String viewName; + private final boolean retrieveAndInclude; + private final boolean disableRewrite; + private final boolean updateCreationMetadata; + + public MaterializedViewDesc(String viewName, boolean retrieveAndInclude, boolean disableRewrite, + boolean updateCreationMetadata) { + this.viewName = viewName; + this.retrieveAndInclude = retrieveAndInclude; + this.disableRewrite = disableRewrite; + this.updateCreationMetadata = updateCreationMetadata; + } + + public String getViewName() { + return viewName; + } + + public boolean isRetrieveAndInclude() { + return retrieveAndInclude; + } + + public boolean isDisableRewrite() { + return disableRewrite; + } + + public boolean isUpdateCreationMetadata() { + return updateCreationMetadata; + } +} diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/MaterializedViewTask.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/MaterializedViewTask.java new file mode 100644 index 0000000000..2b345d6ec7 --- /dev/null +++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/MaterializedViewTask.java @@ -0,0 +1,86 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hive.ql.exec; + +import com.google.common.collect.ImmutableSet; +import org.apache.hadoop.hive.common.ValidTxnList; +import org.apache.hadoop.hive.metastore.api.CreationMetadata; +import org.apache.hadoop.hive.ql.DriverContext; +import org.apache.hadoop.hive.ql.metadata.Hive; +import org.apache.hadoop.hive.ql.metadata.HiveException; +import org.apache.hadoop.hive.ql.metadata.HiveMaterializedViewsRegistry; +import org.apache.hadoop.hive.ql.metadata.Table; +import org.apache.hadoop.hive.ql.parse.ExplainConfiguration.AnalyzeState; +import org.apache.hadoop.hive.ql.plan.api.StageType; + +import java.io.Serializable; + +/** + * This task does some work related to materialized views. In particular, it adds + * or removes the materialized view from the registry if needed, or registers new + * creation metadata. + */ +public class MaterializedViewTask extends Task implements Serializable { + + private static final long serialVersionUID = 1L; + + public MaterializedViewTask() { + super(); + } + + @Override + public int execute(DriverContext driverContext) { + if (driverContext.getCtx().getExplainAnalyze() == AnalyzeState.RUNNING) { + return 0; + } + try { + if (getWork().isRetrieveAndInclude()) { + Hive db = Hive.get(conf); + Table mvTable = db.getTable(getWork().getViewName()); + HiveMaterializedViewsRegistry.get().createMaterializedView(db.getConf(), mvTable); + } else if (getWork().isDisableRewrite()) { + // Disabling rewriting, removing from cache + String[] names = getWork().getViewName().split("\\."); + HiveMaterializedViewsRegistry.get().dropMaterializedView(names[0], names[1]); + } else if (getWork().isUpdateCreationMetadata()) { + // We need to update the status of the creation signature + Hive db = Hive.get(conf); + Table mvTable = db.getTable(getWork().getViewName()); + CreationMetadata cm = + new CreationMetadata(mvTable.getDbName(), mvTable.getTableName(), + ImmutableSet.copyOf(mvTable.getCreationMetadata().getTablesUsed())); + cm.setValidTxnList(conf.get(ValidTxnList.VALID_TXNS_KEY)); + db.updateCreationMetadata(mvTable.getDbName(), mvTable.getTableName(), cm); + } + } catch (HiveException e) { + LOG.debug("Exception during materialized view cache update", e); + } + return 0; + } + + @Override + public StageType getType() { + return StageType.DDL; + } + + @Override + public String getName() { + return MaterializedViewTask.class.getSimpleName(); + } +} diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/TaskFactory.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/TaskFactory.java index 83590e2176..d049c37ff6 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/exec/TaskFactory.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/TaskFactory.java @@ -81,6 +81,9 @@ public TaskTuple(Class workClass, Class> taskClass) { taskvec.add(new TaskTuple(CopyWork.class, CopyTask.class)); taskvec.add(new TaskTuple(ReplCopyWork.class, ReplCopyTask.class)); taskvec.add(new TaskTuple(DDLWork.class, DDLTask.class)); + taskvec.add(new TaskTuple( + MaterializedViewDesc.class, + MaterializedViewTask.class)); taskvec.add(new TaskTuple(FunctionWork.class, FunctionTask.class)); taskvec diff --git a/ql/src/java/org/apache/hadoop/hive/ql/hooks/MaterializedViewRegistryPropertiesHook.java b/ql/src/java/org/apache/hadoop/hive/ql/hooks/MaterializedViewRegistryPropertiesHook.java deleted file mode 100644 index 98d5e88989..0000000000 --- a/ql/src/java/org/apache/hadoop/hive/ql/hooks/MaterializedViewRegistryPropertiesHook.java +++ /dev/null @@ -1,41 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.hive.ql.hooks; - -import org.apache.hadoop.hive.conf.HiveConf; -import org.apache.hadoop.hive.ql.metadata.Hive; -import org.apache.hadoop.hive.ql.metadata.HiveMaterializedViewsRegistry; -import org.apache.hadoop.hive.ql.session.SessionState; - -/** - * Implementation of a pre execute hook that reloads the materialized view registry - * if needed by the test framework - */ -public class MaterializedViewRegistryPropertiesHook implements ExecuteWithHookContext { - - @Override - public void run(HookContext hookContext) throws Exception { - SessionState ss = SessionState.get(); - if (ss != null && ss.getConf().get(HiveConf.ConfVars.HIVE_SERVER2_MATERIALIZED_VIEWS_REGISTRY_IMPL.varname) - .equals("DUMMY")) { - HiveMaterializedViewsRegistry.get().init(Hive.get(ss.getConf())); - } - } - -} diff --git a/ql/src/java/org/apache/hadoop/hive/ql/hooks/MaterializedViewRegistryUpdateHook.java b/ql/src/java/org/apache/hadoop/hive/ql/hooks/MaterializedViewRegistryUpdateHook.java deleted file mode 100644 index e886399d53..0000000000 --- a/ql/src/java/org/apache/hadoop/hive/ql/hooks/MaterializedViewRegistryUpdateHook.java +++ /dev/null @@ -1,103 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.hive.ql.hooks; - -import java.io.Serializable; -import java.util.List; - -import org.apache.hadoop.hive.conf.HiveConf; -import org.apache.hadoop.hive.conf.HiveConf.ConfVars; -import org.apache.hadoop.hive.ql.exec.DDLTask; -import org.apache.hadoop.hive.ql.exec.Task; -import org.apache.hadoop.hive.ql.exec.TaskRunner; -import org.apache.hadoop.hive.ql.metadata.Hive; -import org.apache.hadoop.hive.ql.metadata.HiveException; -import org.apache.hadoop.hive.ql.metadata.HiveMaterializedViewsRegistry; -import org.apache.hadoop.hive.ql.metadata.Table; -import org.apache.hadoop.hive.ql.plan.DDLWork; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -/** - * Updates the materialized view registry after changes. - */ -public class MaterializedViewRegistryUpdateHook implements QueryLifeTimeHook { - - private static final Logger LOG = LoggerFactory.getLogger(MaterializedViewRegistryUpdateHook.class); - - @Override - public void beforeCompile(QueryLifeTimeHookContext ctx) { - } - - @Override - public void afterCompile(QueryLifeTimeHookContext ctx, boolean hasError) { - } - - @Override - public void beforeExecution(QueryLifeTimeHookContext ctx) { - } - - @Override - public void afterExecution(QueryLifeTimeHookContext ctx, boolean hasError) { - if (hasError) { - return; - } - HiveConf hiveConf = ctx.getHiveConf(); - try { - List completedTasks = ctx.getHookContext().getCompleteTaskList(); - for (TaskRunner taskRunner : completedTasks) { - Task task = taskRunner.getTask(); - if (task instanceof DDLTask) { - DDLTask ddlTask = (DDLTask) task; - DDLWork work = ddlTask.getWork(); - String tableName = null; - boolean isRewriteEnabled = false; - if (work.getCreateViewDesc() != null && work.getCreateViewDesc().isMaterialized()) { - tableName = work.getCreateViewDesc().toTable(hiveConf).getFullyQualifiedName(); - isRewriteEnabled = work.getCreateViewDesc().isRewriteEnabled(); - } else if (work.getAlterMaterializedViewDesc() != null) { - tableName = work.getAlterMaterializedViewDesc().getMaterializedViewName(); - isRewriteEnabled = work.getAlterMaterializedViewDesc().isRewriteEnable(); - } else { - continue; - } - - if (isRewriteEnabled) { - Hive db = Hive.get(); - Table mvTable = db.getTable(tableName); - HiveMaterializedViewsRegistry.get().createMaterializedView(db.getConf(), mvTable); - } else if (work.getAlterMaterializedViewDesc() != null) { - // Disabling rewriting, removing from cache - String[] names = tableName.split("\\."); - HiveMaterializedViewsRegistry.get().dropMaterializedView(names[0], names[1]); - } - } - } - } catch (HiveException e) { - if (HiveConf.getBoolVar(hiveConf, ConfVars.HIVE_MATERIALIZED_VIEW_ENABLE_AUTO_REWRITING)) { - String message = "Error updating materialized view cache; consider disabling: " + ConfVars.HIVE_MATERIALIZED_VIEW_ENABLE_AUTO_REWRITING.varname; - LOG.error(message, e); - throw new RuntimeException(message, e); - } else { - LOG.debug("Exception during materialized view cache update", e); - } - } - } - -} diff --git a/ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java b/ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java index a45cac60cb..7b7e14071e 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java @@ -90,6 +90,7 @@ import org.apache.hadoop.hive.metastore.api.ColumnStatisticsObj; import org.apache.hadoop.hive.metastore.api.CompactionResponse; import org.apache.hadoop.hive.metastore.api.CompactionType; +import org.apache.hadoop.hive.metastore.api.CreationMetadata; import org.apache.hadoop.hive.metastore.api.Database; import org.apache.hadoop.hive.metastore.api.EnvironmentContext; import org.apache.hadoop.hive.metastore.api.FieldSchema; @@ -124,6 +125,7 @@ import org.apache.hadoop.hive.metastore.api.ShowCompactResponse; import org.apache.hadoop.hive.metastore.api.SkewedInfo; import org.apache.hadoop.hive.metastore.api.UniqueConstraintsRequest; +import org.apache.hadoop.hive.metastore.api.UnknownDBException; import org.apache.hadoop.hive.metastore.api.WMFullResourcePlan; import org.apache.hadoop.hive.metastore.api.WMMapping; import org.apache.hadoop.hive.metastore.api.WMNullablePool; @@ -658,6 +660,15 @@ public void alterTable(String dbName, String tblName, Table newTbl, boolean casc } } + public void updateCreationMetadata(String dbName, String tableName, CreationMetadata cm) + throws HiveException { + try { + getMSC().updateCreationMetadata(dbName, tableName, cm); + } catch (TException e) { + throw new HiveException("Unable to update creation metadata " + e.getMessage(), e); + } + } + /** * Updates the existing partition metadata with the new metadata. * diff --git a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/GenMapRedUtils.java b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/GenMapRedUtils.java index 59c0fe4d4c..302314405d 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/GenMapRedUtils.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/GenMapRedUtils.java @@ -1509,22 +1509,12 @@ public static void addStatsTask(FileSinkOperator nd, MoveTask mvTask, table = null; } } else if (mvWork.getLoadFileWork().getCreateViewDesc() != null) { - if (mvWork.getLoadFileWork().getCreateViewDesc().isReplace()) { - // ALTER MV ... REBUILD - String tableName = mvWork.getLoadFileWork().getCreateViewDesc().getViewName(); - try { - table = Hive.get().getTable(tableName); - } catch (HiveException e) { - throw new RuntimeException("unexpected; MV should be present already..: " + tableName, e); - } - } else { - // CREATE MATERIALIZED VIEW ... - try { - table = mvWork.getLoadFileWork().getCreateViewDesc().toTable(hconf); - } catch (HiveException e) { - LOG.debug("can't pre-create table for MV", e); - table = null; - } + // CREATE MATERIALIZED VIEW ... + try { + table = mvWork.getLoadFileWork().getCreateViewDesc().toTable(hconf); + } catch (HiveException e) { + LOG.debug("can't pre-create table for MV", e); + table = null; } } else { throw new RuntimeException("unexpected; this should be a CTAS or a CREATE/REBUILD MV - however no desc present"); diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/BaseSemanticAnalyzer.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/BaseSemanticAnalyzer.java index d18dba554e..171825eb74 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/parse/BaseSemanticAnalyzer.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/BaseSemanticAnalyzer.java @@ -1185,9 +1185,7 @@ public TableSpec(Hive db, HiveConf conf, ASTNode ast, boolean allowDynamicPartit || ast.getToken().getType() == HiveParser.TOK_TABLE_PARTITION || ast.getToken().getType() == HiveParser.TOK_TABTYPE || ast.getToken().getType() == HiveParser.TOK_CREATETABLE - || ast.getToken().getType() == HiveParser.TOK_CREATE_MATERIALIZED_VIEW - || (ast.getToken().getType() == HiveParser.TOK_ALTER_MATERIALIZED_VIEW && - ast.getChild(1).getType() == HiveParser.TOK_ALTER_MATERIALIZED_VIEW_REBUILD)); + || ast.getToken().getType() == HiveParser.TOK_CREATE_MATERIALIZED_VIEW); int childIndex = 0; numDynParts = 0; diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/MaterializedViewRebuildSemanticAnalyzer.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/MaterializedViewRebuildSemanticAnalyzer.java new file mode 100644 index 0000000000..75eb50c579 --- /dev/null +++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/MaterializedViewRebuildSemanticAnalyzer.java @@ -0,0 +1,99 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hive.ql.parse; + +import org.apache.hadoop.hive.common.HiveStatsUtils; +import org.apache.hadoop.hive.conf.HiveConf; +import org.apache.hadoop.hive.conf.HiveConf.ConfVars; +import org.apache.hadoop.hive.conf.HiveVariableSource; +import org.apache.hadoop.hive.conf.VariableSubstitution; +import org.apache.hadoop.hive.metastore.api.FieldSchema; +import org.apache.hadoop.hive.ql.Context; +import org.apache.hadoop.hive.ql.ErrorMsg; +import org.apache.hadoop.hive.ql.QueryState; +import org.apache.hadoop.hive.ql.exec.Utilities; +import org.apache.hadoop.hive.ql.metadata.HiveException; +import org.apache.hadoop.hive.ql.metadata.Table; +import org.apache.hadoop.hive.ql.session.SessionState; +import org.apache.hadoop.hive.ql.session.SessionState.LogHelper; +import org.apache.hadoop.hive.serde.serdeConstants; +import org.apache.hadoop.hive.serde2.objectinspector.ObjectInspector; +import org.apache.hadoop.hive.serde2.typeinfo.TypeInfo; +import org.apache.hadoop.hive.serde2.typeinfo.TypeInfoUtils; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.LinkedList; +import java.util.List; +import java.util.Map; + +/** + * MaterializedViewRebuildSemanticAnalyzer. + * Rewrites ALTER MATERIALIZED VIEW _mv_name_ REBUILD statement into + * INSERT OVERWRITE TABLE _mv_name_ _mv_query_ . + */ +public class MaterializedViewRebuildSemanticAnalyzer extends CalcitePlanner { + + private static final Logger LOG = + LoggerFactory.getLogger(MaterializedViewRebuildSemanticAnalyzer.class); + static final private LogHelper console = new LogHelper(LOG); + + + public MaterializedViewRebuildSemanticAnalyzer(QueryState queryState) throws SemanticException { + super(queryState); + } + + + @Override + public void analyzeInternal(ASTNode ast) throws SemanticException { + if (rewrittenRebuild) { + super.analyzeInternal(ast); + return; + } + + String[] qualifiedTableName = getQualifiedTableName((ASTNode) ast.getChild(0)); + String dbDotTable = getDotName(qualifiedTableName); + ASTNode rewrittenAST; + // We need to go lookup the table and get the select statement and then parse it. + try { + Table tab = getTableObjectByName(dbDotTable, true); + if (!tab.isMaterializedView()) { + // Cannot rebuild not materialized view + throw new SemanticException(ErrorMsg.REBUILD_NO_MATERIALIZED_VIEW); + } + // We need to use the expanded text for the materialized view, as it will contain + // the qualified table aliases, etc. + String viewText = tab.getViewExpandedText(); + if (viewText.trim().isEmpty()) { + throw new SemanticException(ErrorMsg.MATERIALIZED_VIEW_DEF_EMPTY); + } + Context ctx = new Context(queryState.getConf()); + rewrittenAST = ParseUtils.parse("insert overwrite table `" + + dbDotTable + "` " + viewText, ctx); + this.ctx.addRewrittenStatementContext(ctx); + } catch (Exception e) { + throw new SemanticException(e); + } + rewrittenRebuild = true; + LOG.info("Rebuilding view " + dbDotTable); + super.analyzeInternal(rewrittenAST); + } +} diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/ParseContext.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/ParseContext.java index 4c41920cba..d890b319e9 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/parse/ParseContext.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/ParseContext.java @@ -38,6 +38,7 @@ import org.apache.hadoop.hive.ql.exec.JoinOperator; import org.apache.hadoop.hive.ql.exec.ListSinkOperator; import org.apache.hadoop.hive.ql.exec.MapJoinOperator; +import org.apache.hadoop.hive.ql.exec.MaterializedViewDesc; import org.apache.hadoop.hive.ql.exec.Operator; import org.apache.hadoop.hive.ql.exec.ReduceSinkOperator; import org.apache.hadoop.hive.ql.exec.SMBMapJoinOperator; @@ -118,6 +119,7 @@ private AnalyzeRewriteContext analyzeRewrite; private CreateTableDesc createTableDesc; private CreateViewDesc createViewDesc; + private MaterializedViewDesc materializedViewUpdateDesc; private boolean reduceSinkAddedBySortedDynPartition; private Map viewProjectToViewSchema; @@ -194,7 +196,7 @@ public ParseContext( Map viewAliasToInput, List reduceSinkOperatorsAddedByEnforceBucketingSorting, AnalyzeRewriteContext analyzeRewrite, CreateTableDesc createTableDesc, - CreateViewDesc createViewDesc, QueryProperties queryProperties, + CreateViewDesc createViewDesc, MaterializedViewDesc materializedViewUpdateDesc, QueryProperties queryProperties, Map viewProjectToTableSchema, Set acidFileSinks) { this.queryState = queryState; this.conf = queryState.getConf(); @@ -225,6 +227,7 @@ public ParseContext( this.analyzeRewrite = analyzeRewrite; this.createTableDesc = createTableDesc; this.createViewDesc = createViewDesc; + this.materializedViewUpdateDesc = materializedViewUpdateDesc; this.queryProperties = queryProperties; this.viewProjectToViewSchema = viewProjectToTableSchema; this.needViewColumnAuthorization = viewProjectToTableSchema != null @@ -605,6 +608,10 @@ public CreateViewDesc getCreateViewDesc() { return createViewDesc; } + public MaterializedViewDesc getMaterializedViewUpdateDesc() { + return materializedViewUpdateDesc; + } + public void setReduceSinkAddedBySortedDynPartition( final boolean reduceSinkAddedBySortedDynPartition) { this.reduceSinkAddedBySortedDynPartition = reduceSinkAddedBySortedDynPartition; diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java index 9d77f49e22..19fc6a9229 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java @@ -43,6 +43,7 @@ import java.util.regex.Pattern; import java.util.regex.PatternSyntaxException; +import com.google.common.collect.ImmutableSet; import org.antlr.runtime.ClassicToken; import org.antlr.runtime.CommonToken; import org.antlr.runtime.TokenRewriteStream; @@ -63,12 +64,14 @@ import org.apache.hadoop.hive.common.ObjectPair; import org.apache.hadoop.hive.common.StatsSetupConst; import org.apache.hadoop.hive.common.StatsSetupConst.StatDB; +import org.apache.hadoop.hive.common.ValidTxnList; import org.apache.hadoop.hive.conf.HiveConf; import org.apache.hadoop.hive.conf.HiveConf.ConfVars; import org.apache.hadoop.hive.conf.HiveConf.StrictChecks; import org.apache.hadoop.hive.metastore.TableType; import org.apache.hadoop.hive.metastore.TransactionalValidationListener; import org.apache.hadoop.hive.metastore.Warehouse; +import org.apache.hadoop.hive.metastore.api.CreationMetadata; import org.apache.hadoop.hive.metastore.api.Database; import org.apache.hadoop.hive.metastore.api.FieldSchema; import org.apache.hadoop.hive.metastore.api.MetaException; @@ -98,6 +101,7 @@ import org.apache.hadoop.hive.ql.exec.GroupByOperator; import org.apache.hadoop.hive.ql.exec.JoinOperator; import org.apache.hadoop.hive.ql.exec.LimitOperator; +import org.apache.hadoop.hive.ql.exec.MaterializedViewDesc; import org.apache.hadoop.hive.ql.exec.Operator; import org.apache.hadoop.hive.ql.exec.OperatorFactory; import org.apache.hadoop.hive.ql.exec.RecordReader; @@ -308,6 +312,7 @@ Map prunedPartitions; protected List resultSchema; protected CreateViewDesc createVwDesc; + protected MaterializedViewDesc materializedViewUpdateDesc; protected ArrayList viewsExpanded; protected ASTNode viewSelect; protected final UnparseTranslator unparseTranslator; @@ -330,6 +335,9 @@ // flag for no scan during analyze ... compute statistics protected boolean noscan; + // whether this is a mv rebuild rewritten expression + protected boolean rewrittenRebuild = false; + protected volatile boolean disableJoinMerge = false; protected final boolean defaultJoinMerge; @@ -453,6 +461,7 @@ protected void reset(boolean clearCache) { nameToSplitSample.clear(); resultSchema = null; createVwDesc = null; + materializedViewUpdateDesc = null; viewsExpanded = null; viewSelect = null; ctesExpanded = null; @@ -490,11 +499,13 @@ public ParseContext getParseContext() { return new ParseContext(queryState, opToPartPruner, opToPartList, topOps, new HashSet(joinContext.keySet()), new HashSet(smbMapJoinContext.keySet()), - loadTableWork, loadFileWork, columnStatsAutoGatherContexts, ctx, idToTableNameMap, destTableId, uCtx, + loadTableWork, loadFileWork, columnStatsAutoGatherContexts, + ctx, idToTableNameMap, destTableId, uCtx, listMapJoinOpsNoReducer, prunedPartitions, tabNameToTabObject, opToSamplePruner, globalLimitCtx, nameToSplitSample, inputs, rootTasks, opToPartToSkewedPruner, viewAliasToInput, reduceSinkOperatorsAddedByEnforceBucketingSorting, - analyzeRewrite, tableDesc, createVwDesc, queryProperties, viewProjectToTableSchema, acidFileSinks); + analyzeRewrite, tableDesc, createVwDesc, materializedViewUpdateDesc, + queryProperties, viewProjectToTableSchema, acidFileSinks); } public CompilationOpContext getOpContext() { @@ -1984,7 +1995,8 @@ private void getMetaData(QB qb, ReadEntity parentInput) switch (ast.getToken().getType()) { case HiveParser.TOK_TAB: { TableSpec ts = new TableSpec(db, conf, ast); - if (ts.tableHandle.isView() || ts.tableHandle.isMaterializedView()) { + if (ts.tableHandle.isView() || + (!rewrittenRebuild && ts.tableHandle.isMaterializedView())) { throw new SemanticException(ErrorMsg.DML_AGAINST_VIEW.getMsg()); } @@ -6899,6 +6911,11 @@ protected Operator genFileSinkPlan(String dest, QB qb, Operator input) createInsertDesc(dest_tab, overwrite); } + if (dest_tab.isMaterializedView()) { + materializedViewUpdateDesc = new MaterializedViewDesc( + dest_tab.getFullyQualifiedName(), false, false, true); + } + WriteEntity output = generateTableWriteEntity( dest, dest_tab, partSpec, ltd, dpCtx, isNonNativeTable); ctx.getLoadTableOutputMap().put(ltd, output); @@ -7465,7 +7482,6 @@ private DynamicPartitionCtx checkDynPart(QB qb, QBMetaData qbm, Table dest_tab, return dpCtx; } - private void createInsertDesc(Table table, boolean overwrite) { Task[] tasks = new Task[this.rootTasks.size()]; tasks = this.rootTasks.toArray(tasks); @@ -11207,7 +11223,7 @@ void resetToken() { } } - private Table getTableObjectByName(String tableName, boolean throwException) throws HiveException { + protected Table getTableObjectByName(String tableName, boolean throwException) throws HiveException { if (!tabNameToTabObject.containsKey(tableName)) { Table table = db.getTable(tableName, throwException); if (table != null) { @@ -11475,8 +11491,6 @@ boolean genResolvedParseTree(ASTNode ast, PlannerContext plannerCtx) throws Sema // 3. analyze create view command if (ast.getToken().getType() == HiveParser.TOK_CREATEVIEW || ast.getToken().getType() == HiveParser.TOK_CREATE_MATERIALIZED_VIEW || - (ast.getToken().getType() == HiveParser.TOK_ALTER_MATERIALIZED_VIEW && - ast.getChild(1).getType() == HiveParser.TOK_ALTER_MATERIALIZED_VIEW_REBUILD) || (ast.getToken().getType() == HiveParser.TOK_ALTERVIEW && ast.getChild(1).getType() == HiveParser.TOK_QUERY)) { child = analyzeCreateView(ast, qb, plannerCtx); @@ -11702,7 +11716,8 @@ void analyzeInternal(ASTNode ast, PlannerContextFactory pcf) throws SemanticExce listMapJoinOpsNoReducer, prunedPartitions, tabNameToTabObject, opToSamplePruner, globalLimitCtx, nameToSplitSample, inputs, rootTasks, opToPartToSkewedPruner, viewAliasToInput, reduceSinkOperatorsAddedByEnforceBucketingSorting, - analyzeRewrite, tableDesc, createVwDesc, queryProperties, viewProjectToTableSchema, acidFileSinks); + analyzeRewrite, tableDesc, createVwDesc, materializedViewUpdateDesc, + queryProperties, viewProjectToTableSchema, acidFileSinks); // Set the semijoin hints in parse context pCtx.setSemiJoinHints(parseSemiJoinHint(getQB().getParseInfo().getHintList())); @@ -12771,10 +12786,6 @@ protected ASTNode analyzeCreateView(ASTNode ast, QB qb, PlannerContext plannerCt case HiveParser.TOK_ORREPLACE: orReplace = true; break; - case HiveParser.TOK_ALTER_MATERIALIZED_VIEW_REBUILD: - isMaterialized = true; - isRebuild = true; - break; case HiveParser.TOK_QUERY: // For CBO if (plannerCtx != null) { @@ -12850,27 +12861,6 @@ protected ASTNode analyzeCreateView(ASTNode ast, QB qb, PlannerContext plannerCt } qb.setViewDesc(createVwDesc); - if (isRebuild) { - // We need to go lookup the table and get the select statement and then parse it. - try { - Table tab = getTableObjectByName(dbDotTable, true); - // We need to use the expanded text for the materialized view, as it will contain - // the qualified table aliases, etc. - String viewText = tab.getViewExpandedText(); - if (viewText.trim().isEmpty()) { - throw new SemanticException(ErrorMsg.MATERIALIZED_VIEW_DEF_EMPTY); - } - Context ctx = new Context(queryState.getConf()); - selectStmt = ParseUtils.parse(viewText, ctx); - // For CBO - if (plannerCtx != null) { - plannerCtx.setViewToken(selectStmt); - } - } catch (Exception e) { - throw new SemanticException(e); - } - } - return selectStmt; } diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzerFactory.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzerFactory.java index 34963ff0c9..78f83ef039 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzerFactory.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzerFactory.java @@ -277,8 +277,7 @@ private static BaseSemanticAnalyzer getInternal(QueryState queryState, ASTNode t case HiveParser.TOK_ALTER_MATERIALIZED_VIEW_REBUILD: opType = commandType.get(child.getType()); queryState.setCommandType(opType); - return HiveConf.getBoolVar(queryState.getConf(), HiveConf.ConfVars.HIVE_CBO_ENABLED) ? - new CalcitePlanner(queryState) : new SemanticAnalyzer(queryState); + return new MaterializedViewRebuildSemanticAnalyzer(queryState); } // Operation not recognized, set to null and let upper level handle this case queryState.setCommandType(null); diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/TaskCompiler.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/TaskCompiler.java index 3122db8267..5e94bb7eb7 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/parse/TaskCompiler.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/TaskCompiler.java @@ -28,6 +28,8 @@ import java.util.Map; import java.util.Set; +import org.apache.hadoop.hive.ql.exec.DDLTask; +import org.apache.hadoop.hive.ql.exec.MaterializedViewDesc; import org.apache.hadoop.hive.ql.io.AcidUtils; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -322,9 +324,7 @@ public void compile(final ParseContext pCtx, if (pCtx.getQueryProperties().isCTAS() && !pCtx.getCreateTable().isMaterialization()) { // generate a DDL task and make it a dependent task of the leaf CreateTableDesc crtTblDesc = pCtx.getCreateTable(); - crtTblDesc.validate(conf); - Task crtTblTask = TaskFactory.get(new DDLWork( inputs, outputs, crtTblDesc), conf); patchUpAfterCTASorMaterializedView(rootTasks, outputs, crtTblTask); @@ -334,6 +334,16 @@ public void compile(final ParseContext pCtx, Task crtViewTask = TaskFactory.get(new DDLWork( inputs, outputs, viewDesc), conf); patchUpAfterCTASorMaterializedView(rootTasks, outputs, crtViewTask); + } else if (pCtx.getMaterializedViewUpdateDesc() != null) { + // If there is a materialized view update desc, we create introduce it at the end + // of the tree. + MaterializedViewDesc materializedViewDesc = pCtx.getMaterializedViewUpdateDesc(); + Set> leafTasks = new LinkedHashSet>(); + getLeafTasks(rootTasks, leafTasks); + Task materializedViewTask = TaskFactory.get(materializedViewDesc, conf); + for (Task task : leafTasks) { + task.addDependentTask(materializedViewTask); + } } if (globalLimitCtx.isEnable() && pCtx.getFetchTask() != null) { @@ -464,6 +474,7 @@ private void patchUpAfterCTASorMaterializedView(final List> leaves = new LinkedHashSet<>(); getLeafTasks(rootTasks, leaves); assert (leaves.size() > 0); + Task targetTask = createTask; for (Task task : leaves) { if (task instanceof StatsTask) { // StatsTask require table to already exist @@ -474,10 +485,36 @@ private void patchUpAfterCTASorMaterializedView(final List 10.0 GROUP BY cmv_basetable.a, cmv_basetable_2.c; +DESCRIBE FORMATTED cmv_mat_view; + -- CANNOT USE THE VIEW, IT IS DISABLED FOR REWRITE EXPLAIN SELECT cmv_basetable.a @@ -59,6 +61,8 @@ ALTER MATERIALIZED VIEW cmv_mat_view ENABLE REWRITE; ALTER MATERIALIZED VIEW cmv_mat_view ENABLE REWRITE; +DESCRIBE FORMATTED cmv_mat_view; + -- CANNOT USE THE VIEW, IT IS OUTDATED EXPLAIN SELECT cmv_basetable.a @@ -77,6 +81,8 @@ ALTER MATERIALIZED VIEW cmv_mat_view REBUILD; ALTER MATERIALIZED VIEW cmv_mat_view REBUILD; +DESCRIBE FORMATTED cmv_mat_view; + -- NOW IT CAN BE USED AGAIN EXPLAIN SELECT cmv_basetable.a diff --git a/ql/src/test/results/clientpositive/druid/druidmini_mv.q.out b/ql/src/test/results/clientpositive/druid/druidmini_mv.q.out index 294b84affd..34a727301f 100644 --- a/ql/src/test/results/clientpositive/druid/druidmini_mv.q.out +++ b/ql/src/test/results/clientpositive/druid/druidmini_mv.q.out @@ -1,38 +1,55 @@ -PREHOOK: query: create table cmv_basetable (a int, b varchar(256), c decimal(10,2), d int) stored as orc TBLPROPERTIES ('transactional'='true') -PREHOOK: type: CREATETABLE -PREHOOK: Output: database:default -PREHOOK: Output: default@cmv_basetable -POSTHOOK: query: create table cmv_basetable (a int, b varchar(256), c decimal(10,2), d int) stored as orc TBLPROPERTIES ('transactional'='true') -POSTHOOK: type: CREATETABLE -POSTHOOK: Output: database:default -POSTHOOK: Output: default@cmv_basetable -PREHOOK: query: insert into cmv_basetable values - (1, 'alfred', 10.30, 2), - (2, 'bob', 3.14, 3), - (2, 'bonnie', 172342.2, 3), - (3, 'calvin', 978.76, 3), - (3, 'charlie', 9.8, 1) -PREHOOK: type: QUERY +PREHOOK: query: CREATE TABLE cmv_basetable +STORED AS orc +TBLPROPERTIES ('transactional'='true') +AS +SELECT cast(current_timestamp() AS timestamp) AS t, + cast(a AS int) AS a, + cast(b AS varchar(256)) AS b, + cast(c AS double) AS c, + cast(d AS int) AS d +FROM TABLE ( + VALUES + (1, 'alfred', 10.30, 2), + (2, 'bob', 3.14, 3), + (2, 'bonnie', 172342.2, 3), + (3, 'calvin', 978.76, 3), + (3, 'charlie', 9.8, 1), + (3, 'charlie', 15.8, 1)) as q (a, b, c, d) +PREHOOK: type: CREATETABLE_AS_SELECT PREHOOK: Input: _dummy_database@_dummy_table +PREHOOK: Output: database:default PREHOOK: Output: default@cmv_basetable -POSTHOOK: query: insert into cmv_basetable values - (1, 'alfred', 10.30, 2), - (2, 'bob', 3.14, 3), - (2, 'bonnie', 172342.2, 3), - (3, 'calvin', 978.76, 3), - (3, 'charlie', 9.8, 1) -POSTHOOK: type: QUERY +POSTHOOK: query: CREATE TABLE cmv_basetable +STORED AS orc +TBLPROPERTIES ('transactional'='true') +AS +SELECT cast(current_timestamp() AS timestamp) AS t, + cast(a AS int) AS a, + cast(b AS varchar(256)) AS b, + cast(c AS double) AS c, + cast(d AS int) AS d +FROM TABLE ( + VALUES + (1, 'alfred', 10.30, 2), + (2, 'bob', 3.14, 3), + (2, 'bonnie', 172342.2, 3), + (3, 'calvin', 978.76, 3), + (3, 'charlie', 9.8, 1), + (3, 'charlie', 15.8, 1)) as q (a, b, c, d) +POSTHOOK: type: CREATETABLE_AS_SELECT POSTHOOK: Input: _dummy_database@_dummy_table +POSTHOOK: Output: database:default POSTHOOK: Output: default@cmv_basetable POSTHOOK: Lineage: cmv_basetable.a SCRIPT [] POSTHOOK: Lineage: cmv_basetable.b SCRIPT [] POSTHOOK: Lineage: cmv_basetable.c SCRIPT [] POSTHOOK: Lineage: cmv_basetable.d SCRIPT [] +POSTHOOK: Lineage: cmv_basetable.t SIMPLE [] PREHOOK: query: CREATE MATERIALIZED VIEW cmv_mat_view ENABLE REWRITE STORED BY 'org.apache.hadoop.hive.druid.DruidStorageHandler' TBLPROPERTIES ("druid.segment.granularity" = "HOUR") AS -SELECT cast(current_timestamp() as timestamp with local time zone) as `__time`, a, b, cast(c as double) +SELECT cast(t AS timestamp with local time zone) as `__time`, a, b, c FROM cmv_basetable WHERE a = 2 PREHOOK: type: CREATE_MATERIALIZED_VIEW @@ -43,7 +60,7 @@ POSTHOOK: query: CREATE MATERIALIZED VIEW cmv_mat_view ENABLE REWRITE STORED BY 'org.apache.hadoop.hive.druid.DruidStorageHandler' TBLPROPERTIES ("druid.segment.granularity" = "HOUR") AS -SELECT cast(current_timestamp() as timestamp with local time zone) as `__time`, a, b, cast(c as double) +SELECT cast(t AS timestamp with local time zone) as `__time`, a, b, c FROM cmv_basetable WHERE a = 2 POSTHOOK: type: CREATE_MATERIALIZED_VIEW @@ -77,7 +94,7 @@ PREHOOK: query: CREATE MATERIALIZED VIEW IF NOT EXISTS cmv_mat_view2 ENABLE REWR STORED BY 'org.apache.hadoop.hive.druid.DruidStorageHandler' TBLPROPERTIES ("druid.segment.granularity" = "HOUR") AS -SELECT cast(current_timestamp() as timestamp with local time zone) as `__time`, a, cast(c as double) +SELECT cast(t AS timestamp with local time zone) as `__time`, a, b, c FROM cmv_basetable WHERE a = 3 PREHOOK: type: CREATE_MATERIALIZED_VIEW @@ -88,7 +105,7 @@ POSTHOOK: query: CREATE MATERIALIZED VIEW IF NOT EXISTS cmv_mat_view2 ENABLE REW STORED BY 'org.apache.hadoop.hive.druid.DruidStorageHandler' TBLPROPERTIES ("druid.segment.granularity" = "HOUR") AS -SELECT cast(current_timestamp() as timestamp with local time zone) as `__time`, a, cast(c as double) +SELECT cast(t AS timestamp with local time zone) as `__time`, a, b, c FROM cmv_basetable WHERE a = 3 POSTHOOK: type: CREATE_MATERIALIZED_VIEW @@ -103,7 +120,8 @@ POSTHOOK: query: SELECT a, c FROM cmv_mat_view2 POSTHOOK: type: QUERY POSTHOOK: Input: default@cmv_mat_view2 #### A masked pattern was here #### -6 988.5599975585938 +3 978.760009765625 +6 25.600000381469727 PREHOOK: query: SHOW TBLPROPERTIES cmv_mat_view2 PREHOOK: type: SHOW_TBLPROPERTIES POSTHOOK: query: SHOW TBLPROPERTIES cmv_mat_view2 @@ -112,7 +130,7 @@ COLUMN_STATS_ACCURATE {"BASIC_STATS":"true"} druid.datasource default.cmv_mat_view2 druid.segment.granularity HOUR numFiles 0 -numRows 2 +numRows 3 rawDataSize 0 storage_handler org.apache.hadoop.hive.druid.DruidStorageHandler totalSize 0 @@ -137,17 +155,17 @@ STAGE PLANS: Map Operator Tree: TableScan alias: cmv_basetable - Statistics: Num rows: 1 Data size: 9310 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 6 Data size: 10770 Basic stats: COMPLETE Column stats: NONE Filter Operator predicate: (a = 3) (type: boolean) - Statistics: Num rows: 1 Data size: 9310 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 3 Data size: 5385 Basic stats: COMPLETE Column stats: NONE Select Operator - expressions: 3 (type: int), c (type: decimal(10,2)) + expressions: 3 (type: int), c (type: double) outputColumnNames: _col0, _col1 - Statistics: Num rows: 1 Data size: 9310 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 3 Data size: 5385 Basic stats: COMPLETE Column stats: NONE File Output Operator compressed: false - Statistics: Num rows: 1 Data size: 9310 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 3 Data size: 5385 Basic stats: COMPLETE Column stats: NONE table: input format: org.apache.hadoop.mapred.SequenceFileInputFormat output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat @@ -171,7 +189,8 @@ WHERE a = 3 POSTHOOK: type: QUERY POSTHOOK: Input: default@cmv_basetable #### A masked pattern was here #### -3 9.80 +3 15.8 +3 9.8 3 978.76 Warning: Shuffle Join JOIN[8][tables = [$hdt$_0, $hdt$_1]] in Stage 'Stage-1:MAPRED' is a cross product PREHOOK: query: EXPLAIN @@ -198,32 +217,32 @@ STAGE PLANS: Map Operator Tree: TableScan alias: cmv_basetable - Statistics: Num rows: 1 Data size: 9310 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 6 Data size: 10770 Basic stats: COMPLETE Column stats: NONE Filter Operator predicate: (a = 3) (type: boolean) - Statistics: Num rows: 1 Data size: 9310 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 3 Data size: 5385 Basic stats: COMPLETE Column stats: NONE Select Operator - expressions: c (type: decimal(10,2)) + expressions: c (type: double) outputColumnNames: _col0 - Statistics: Num rows: 1 Data size: 9310 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 3 Data size: 5385 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator sort order: - Statistics: Num rows: 1 Data size: 9310 Basic stats: COMPLETE Column stats: NONE - value expressions: _col0 (type: decimal(10,2)) + Statistics: Num rows: 3 Data size: 5385 Basic stats: COMPLETE Column stats: NONE + value expressions: _col0 (type: double) TableScan alias: cmv_basetable - Statistics: Num rows: 1 Data size: 9310 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 6 Data size: 10770 Basic stats: COMPLETE Column stats: NONE Filter Operator predicate: ((3 = a) and (d = 3)) (type: boolean) - Statistics: Num rows: 1 Data size: 9310 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 1 Data size: 1795 Basic stats: COMPLETE Column stats: NONE Select Operator - expressions: c (type: decimal(10,2)) + expressions: c (type: double) outputColumnNames: _col0 - Statistics: Num rows: 1 Data size: 9310 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 1 Data size: 1795 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator sort order: - Statistics: Num rows: 1 Data size: 9310 Basic stats: COMPLETE Column stats: NONE - value expressions: _col0 (type: decimal(10,2)) + Statistics: Num rows: 1 Data size: 1795 Basic stats: COMPLETE Column stats: NONE + value expressions: _col0 (type: double) Reduce Operator Tree: Join Operator condition map: @@ -232,14 +251,14 @@ STAGE PLANS: 0 1 outputColumnNames: _col0, _col1 - Statistics: Num rows: 1 Data size: 18621 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 3 Data size: 10773 Basic stats: COMPLETE Column stats: NONE Select Operator - expressions: 3 (type: int), _col0 (type: decimal(10,2)), 3 (type: int), _col1 (type: decimal(10,2)) + expressions: 3 (type: int), _col0 (type: double), 3 (type: int), _col1 (type: double) outputColumnNames: _col0, _col1, _col2, _col3 - Statistics: Num rows: 1 Data size: 18621 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 3 Data size: 10773 Basic stats: COMPLETE Column stats: NONE File Output Operator compressed: false - Statistics: Num rows: 1 Data size: 18621 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 3 Data size: 10773 Basic stats: COMPLETE Column stats: NONE table: input format: org.apache.hadoop.mapred.SequenceFileInputFormat output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat @@ -268,15 +287,16 @@ POSTHOOK: query: SELECT * FROM ( POSTHOOK: type: QUERY POSTHOOK: Input: default@cmv_basetable #### A masked pattern was here #### -3 9.80 3 978.76 +3 15.8 3 978.76 +3 9.8 3 978.76 3 978.76 3 978.76 PREHOOK: query: INSERT INTO cmv_basetable VALUES - (3, 'charlie', 15.8, 1) + (cast(current_timestamp() AS timestamp), 3, 'charlie', 15.8, 1) PREHOOK: type: QUERY PREHOOK: Input: _dummy_database@_dummy_table PREHOOK: Output: default@cmv_basetable POSTHOOK: query: INSERT INTO cmv_basetable VALUES - (3, 'charlie', 15.8, 1) + (cast(current_timestamp() AS timestamp), 3, 'charlie', 15.8, 1) POSTHOOK: type: QUERY POSTHOOK: Input: _dummy_database@_dummy_table POSTHOOK: Output: default@cmv_basetable @@ -284,6 +304,7 @@ POSTHOOK: Lineage: cmv_basetable.a SCRIPT [] POSTHOOK: Lineage: cmv_basetable.b SCRIPT [] POSTHOOK: Lineage: cmv_basetable.c SCRIPT [] POSTHOOK: Lineage: cmv_basetable.d SCRIPT [] +POSTHOOK: Lineage: cmv_basetable.t SCRIPT [] Warning: Shuffle Join JOIN[8][tables = [$hdt$_0, $hdt$_1]] in Stage 'Stage-1:MAPRED' is a cross product PREHOOK: query: EXPLAIN SELECT * FROM ( @@ -309,32 +330,32 @@ STAGE PLANS: Map Operator Tree: TableScan alias: cmv_basetable - Statistics: Num rows: 1 Data size: 17540 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 6 Data size: 20230 Basic stats: COMPLETE Column stats: NONE Filter Operator predicate: (a = 3) (type: boolean) - Statistics: Num rows: 1 Data size: 17540 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 3 Data size: 10115 Basic stats: COMPLETE Column stats: NONE Select Operator - expressions: c (type: decimal(10,2)) + expressions: c (type: double) outputColumnNames: _col0 - Statistics: Num rows: 1 Data size: 17540 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 3 Data size: 10115 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator sort order: - Statistics: Num rows: 1 Data size: 17540 Basic stats: COMPLETE Column stats: NONE - value expressions: _col0 (type: decimal(10,2)) + Statistics: Num rows: 3 Data size: 10115 Basic stats: COMPLETE Column stats: NONE + value expressions: _col0 (type: double) TableScan alias: cmv_basetable - Statistics: Num rows: 1 Data size: 17540 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 6 Data size: 20230 Basic stats: COMPLETE Column stats: NONE Filter Operator predicate: ((3 = a) and (d = 3)) (type: boolean) - Statistics: Num rows: 1 Data size: 17540 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 1 Data size: 3371 Basic stats: COMPLETE Column stats: NONE Select Operator - expressions: c (type: decimal(10,2)) + expressions: c (type: double) outputColumnNames: _col0 - Statistics: Num rows: 1 Data size: 17540 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 1 Data size: 3371 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator sort order: - Statistics: Num rows: 1 Data size: 17540 Basic stats: COMPLETE Column stats: NONE - value expressions: _col0 (type: decimal(10,2)) + Statistics: Num rows: 1 Data size: 3371 Basic stats: COMPLETE Column stats: NONE + value expressions: _col0 (type: double) Reduce Operator Tree: Join Operator condition map: @@ -343,14 +364,14 @@ STAGE PLANS: 0 1 outputColumnNames: _col0, _col1 - Statistics: Num rows: 1 Data size: 35081 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 3 Data size: 20231 Basic stats: COMPLETE Column stats: NONE Select Operator - expressions: 3 (type: int), _col0 (type: decimal(10,2)), 3 (type: int), _col1 (type: decimal(10,2)) + expressions: 3 (type: int), _col0 (type: double), 3 (type: int), _col1 (type: double) outputColumnNames: _col0, _col1, _col2, _col3 - Statistics: Num rows: 1 Data size: 35081 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 3 Data size: 20231 Basic stats: COMPLETE Column stats: NONE File Output Operator compressed: false - Statistics: Num rows: 1 Data size: 35081 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 3 Data size: 20231 Basic stats: COMPLETE Column stats: NONE table: input format: org.apache.hadoop.mapred.SequenceFileInputFormat output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat @@ -379,10 +400,103 @@ POSTHOOK: query: SELECT * FROM ( POSTHOOK: type: QUERY POSTHOOK: Input: default@cmv_basetable #### A masked pattern was here #### -3 15.80 3 978.76 -3 9.80 3 978.76 +3 15.8 3 978.76 +3 15.8 3 978.76 +3 9.8 3 978.76 3 978.76 3 978.76 -Warning: Shuffle Join JOIN[8][tables = [$hdt$_0, $hdt$_1]] in Stage 'Stage-1:MAPRED' is a cross product +PREHOOK: query: EXPLAIN +ALTER MATERIALIZED VIEW cmv_mat_view2 REBUILD +PREHOOK: type: QUERY +POSTHOOK: query: EXPLAIN +ALTER MATERIALIZED VIEW cmv_mat_view2 REBUILD +POSTHOOK: type: QUERY +STAGE DEPENDENCIES: + Stage-0 is a root stage + Stage-2 + Stage-4 depends on stages: Stage-2, Stage-1, Stage-3 + Stage-1 is a root stage + Stage-3 is a root stage + +STAGE PLANS: + Stage: Stage-0 + Alter Table Operator: + Alter Table + type: drop props + old name: default.cmv_mat_view2 + properties: + COLUMN_STATS_ACCURATE + + Stage: Stage-2 + Insert operator: + Insert + + Stage: Stage-4 + Materialized View Work + + Stage: Stage-1 + Pre Insert operator: + Pre-Insert task + + Stage: Stage-3 + Map Reduce + Map Operator Tree: + TableScan + alias: cmv_basetable + Statistics: Num rows: 6 Data size: 20230 Basic stats: COMPLETE Column stats: NONE + Filter Operator + predicate: (a = 3) (type: boolean) + Statistics: Num rows: 3 Data size: 10115 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: CAST( t AS timestamp with local time zone) (type: timestamp with local time zone), 3 (type: int), b (type: varchar(256)), c (type: double) + outputColumnNames: _col0, _col1, _col2, _col3 + Statistics: Num rows: 3 Data size: 10115 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: _col0 (type: timestamp with local time zone), _col1 (type: int), _col2 (type: varchar(256)), _col3 (type: double), floor_hour(CAST( GenericUDFEpochMilli(_col0) AS TIMESTAMP)) (type: timestamp) + outputColumnNames: _col0, _col1, _col2, _col3, __time_granularity + Statistics: Num rows: 3 Data size: 10115 Basic stats: COMPLETE Column stats: NONE + Reduce Output Operator + key expressions: __time_granularity (type: timestamp) + sort order: + + Map-reduce partition columns: __time_granularity (type: timestamp) + Statistics: Num rows: 3 Data size: 10115 Basic stats: COMPLETE Column stats: NONE + value expressions: _col0 (type: timestamp with local time zone), _col1 (type: int), _col2 (type: varchar(256)), _col3 (type: double) + Reduce Operator Tree: + Select Operator + expressions: VALUE._col0 (type: timestamp with local time zone), VALUE._col1 (type: int), VALUE._col2 (type: varchar(256)), VALUE._col3 (type: double), KEY.__time_granularity (type: timestamp) + outputColumnNames: _col0, _col1, _col2, _col3, __time_granularity + Statistics: Num rows: 3 Data size: 10115 Basic stats: COMPLETE Column stats: NONE + File Output Operator + compressed: false + Dp Sort State: PARTITION_SORTED + Statistics: Num rows: 3 Data size: 10115 Basic stats: COMPLETE Column stats: NONE + table: + input format: org.apache.hadoop.hive.druid.io.DruidQueryBasedInputFormat + output format: org.apache.hadoop.hive.druid.io.DruidOutputFormat + serde: org.apache.hadoop.hive.druid.serde.DruidSerDe + name: default.cmv_mat_view2 + +PREHOOK: query: ALTER MATERIALIZED VIEW cmv_mat_view2 REBUILD +PREHOOK: type: QUERY +PREHOOK: Input: default@cmv_basetable +PREHOOK: Output: default@cmv_mat_view2 +POSTHOOK: query: ALTER MATERIALIZED VIEW cmv_mat_view2 REBUILD +POSTHOOK: type: QUERY +POSTHOOK: Input: default@cmv_basetable +POSTHOOK: Output: default@cmv_mat_view2 +PREHOOK: query: SHOW TBLPROPERTIES cmv_mat_view2 +PREHOOK: type: SHOW_TBLPROPERTIES +POSTHOOK: query: SHOW TBLPROPERTIES cmv_mat_view2 +POSTHOOK: type: SHOW_TBLPROPERTIES +druid.datasource default.cmv_mat_view2 +druid.segment.granularity HOUR +#### A masked pattern was here #### +numFiles 0 +numRows 3 +rawDataSize 0 +storage_handler org.apache.hadoop.hive.druid.DruidStorageHandler +totalSize 0 +#### A masked pattern was here #### +Warning: Shuffle Join JOIN[6][tables = [cmv_mat_view2, $hdt$_0]] in Stage 'Stage-1:MAPRED' is a cross product PREHOOK: query: EXPLAIN SELECT * FROM ( (SELECT a, c FROM cmv_basetable WHERE a = 3) table1 @@ -407,32 +521,28 @@ STAGE PLANS: Map Operator Tree: TableScan alias: cmv_basetable - Statistics: Num rows: 1 Data size: 17540 Basic stats: COMPLETE Column stats: NONE - Filter Operator - predicate: (a = 3) (type: boolean) - Statistics: Num rows: 1 Data size: 17540 Basic stats: COMPLETE Column stats: NONE - Select Operator - expressions: c (type: decimal(10,2)) - outputColumnNames: _col0 - Statistics: Num rows: 1 Data size: 17540 Basic stats: COMPLETE Column stats: NONE - Reduce Output Operator - sort order: - Statistics: Num rows: 1 Data size: 17540 Basic stats: COMPLETE Column stats: NONE - value expressions: _col0 (type: decimal(10,2)) - TableScan - alias: cmv_basetable - Statistics: Num rows: 1 Data size: 17540 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 6 Data size: 20230 Basic stats: COMPLETE Column stats: NONE Filter Operator predicate: ((3 = a) and (d = 3)) (type: boolean) - Statistics: Num rows: 1 Data size: 17540 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 1 Data size: 3371 Basic stats: COMPLETE Column stats: NONE Select Operator - expressions: c (type: decimal(10,2)) + expressions: c (type: double) outputColumnNames: _col0 - Statistics: Num rows: 1 Data size: 17540 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 1 Data size: 3371 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator sort order: - Statistics: Num rows: 1 Data size: 17540 Basic stats: COMPLETE Column stats: NONE - value expressions: _col0 (type: decimal(10,2)) + Statistics: Num rows: 1 Data size: 3371 Basic stats: COMPLETE Column stats: NONE + value expressions: _col0 (type: double) + TableScan + alias: cmv_mat_view2 + properties: + druid.query.json {"queryType":"select","dataSource":"default.cmv_mat_view2","descending":false,"intervals":["1900-01-01T00:00:00.000Z/3000-01-01T00:00:00.000Z"],"dimensions":[],"metrics":["c"],"granularity":"all","pagingSpec":{"threshold":16384,"fromNext":true},"context":{"druid.query.fetch":false}} + druid.query.type select + Statistics: Num rows: 3 Data size: 0 Basic stats: PARTIAL Column stats: NONE + Reduce Output Operator + sort order: + Statistics: Num rows: 3 Data size: 0 Basic stats: PARTIAL Column stats: NONE + value expressions: c (type: double) Reduce Operator Tree: Join Operator condition map: @@ -440,15 +550,15 @@ STAGE PLANS: keys: 0 1 - outputColumnNames: _col0, _col1 - Statistics: Num rows: 1 Data size: 35081 Basic stats: COMPLETE Column stats: NONE + outputColumnNames: _col1, _col5 + Statistics: Num rows: 3 Data size: 10116 Basic stats: PARTIAL Column stats: NONE Select Operator - expressions: 3 (type: int), _col0 (type: decimal(10,2)), 3 (type: int), _col1 (type: decimal(10,2)) + expressions: 3 (type: int), _col1 (type: double), 3 (type: int), _col5 (type: double) outputColumnNames: _col0, _col1, _col2, _col3 - Statistics: Num rows: 1 Data size: 35081 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 3 Data size: 10116 Basic stats: PARTIAL Column stats: NONE File Output Operator compressed: false - Statistics: Num rows: 1 Data size: 35081 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 3 Data size: 10116 Basic stats: PARTIAL Column stats: NONE table: input format: org.apache.hadoop.mapred.SequenceFileInputFormat output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat @@ -460,7 +570,7 @@ STAGE PLANS: Processor Tree: ListSink -Warning: Shuffle Join JOIN[8][tables = [$hdt$_0, $hdt$_1]] in Stage 'Stage-1:MAPRED' is a cross product +Warning: Shuffle Join JOIN[6][tables = [cmv_mat_view2, $hdt$_0]] in Stage 'Stage-1:MAPRED' is a cross product PREHOOK: query: SELECT * FROM ( (SELECT a, c FROM cmv_basetable WHERE a = 3) table1 JOIN @@ -468,6 +578,7 @@ PREHOOK: query: SELECT * FROM ( ON table1.a = table2.a) PREHOOK: type: QUERY PREHOOK: Input: default@cmv_basetable +PREHOOK: Input: default@cmv_mat_view2 #### A masked pattern was here #### POSTHOOK: query: SELECT * FROM ( (SELECT a, c FROM cmv_basetable WHERE a = 3) table1 @@ -476,10 +587,11 @@ POSTHOOK: query: SELECT * FROM ( ON table1.a = table2.a) POSTHOOK: type: QUERY POSTHOOK: Input: default@cmv_basetable +POSTHOOK: Input: default@cmv_mat_view2 #### A masked pattern was here #### -3 15.80 3 978.76 -3 9.80 3 978.76 -3 978.76 3 978.76 +3 15.800000190734863 3 978.76 +3 25.600000381469727 3 978.76 +3 978.760009765625 3 978.76 PREHOOK: query: DROP MATERIALIZED VIEW cmv_mat_view PREHOOK: type: DROP_MATERIALIZED_VIEW PREHOOK: Input: default@cmv_mat_view diff --git a/ql/src/test/results/clientpositive/materialized_view_create_rewrite_3.q.out b/ql/src/test/results/clientpositive/materialized_view_create_rewrite_3.q.out index 0d8d238e8b..29e408c60c 100644 --- a/ql/src/test/results/clientpositive/materialized_view_create_rewrite_3.q.out +++ b/ql/src/test/results/clientpositive/materialized_view_create_rewrite_3.q.out @@ -92,6 +92,7 @@ STAGE DEPENDENCIES: Stage-0 depends on stages: Stage-2 Stage-5 depends on stages: Stage-0 Stage-3 depends on stages: Stage-5 + Stage-6 depends on stages: Stage-3 STAGE PLANS: Stage: Stage-1 @@ -198,6 +199,9 @@ STAGE PLANS: Stats Work Basic Stats Work: + Stage: Stage-6 + Materialized View Work + PREHOOK: query: CREATE MATERIALIZED VIEW cmv_mat_view ENABLE REWRITE AS SELECT cmv_basetable.a, cmv_basetable_2.c FROM cmv_basetable JOIN cmv_basetable_2 ON (cmv_basetable.a = cmv_basetable_2.a) @@ -433,16 +437,17 @@ POSTHOOK: Input: default@cmv_basetable_2 3 PREHOOK: query: EXPLAIN ALTER MATERIALIZED VIEW cmv_mat_view REBUILD -PREHOOK: type: CREATE_MATERIALIZED_VIEW +PREHOOK: type: QUERY POSTHOOK: query: EXPLAIN ALTER MATERIALIZED VIEW cmv_mat_view REBUILD -POSTHOOK: type: CREATE_MATERIALIZED_VIEW +POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage Stage-2 depends on stages: Stage-1 Stage-0 depends on stages: Stage-2 - Stage-5 depends on stages: Stage-0 - Stage-3 depends on stages: Stage-5 + Stage-3 depends on stages: Stage-0, Stage-4 + Stage-6 depends on stages: Stage-3 + Stage-4 depends on stages: Stage-2 STAGE PLANS: Stage: Stage-1 @@ -523,36 +528,77 @@ STAGE PLANS: output format: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde name: default.cmv_mat_view + Select Operator + expressions: _col0 (type: int), _col1 (type: decimal(10,2)) + outputColumnNames: a, c + Statistics: Num rows: 2 Data size: 530 Basic stats: COMPLETE Column stats: NONE + Group By Operator + aggregations: compute_stats(a, 'hll'), compute_stats(c, 'hll') + mode: hash + outputColumnNames: _col0, _col1 + Statistics: Num rows: 1 Data size: 1056 Basic stats: COMPLETE Column stats: NONE + File Output Operator + compressed: false + table: + input format: org.apache.hadoop.mapred.SequenceFileInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat + serde: org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe Stage: Stage-0 Move Operator - files: - hdfs directory: true -#### A masked pattern was here #### - - Stage: Stage-5 - Create View Operator: - Create View - columns: a int, c decimal(10,2) - name: default.cmv_mat_view + tables: replace: true + table: + input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat + output format: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat + serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde + name: default.cmv_mat_view Stage: Stage-3 Stats Work Basic Stats Work: + Column Stats Desc: + Columns: a, c + Column Types: int, decimal(10,2) + Table: default.cmv_mat_view + + Stage: Stage-6 + Materialized View Work + + Stage: Stage-4 + Map Reduce + Map Operator Tree: + TableScan + Reduce Output Operator + sort order: + Statistics: Num rows: 1 Data size: 1056 Basic stats: COMPLETE Column stats: NONE + value expressions: _col0 (type: struct), _col1 (type: struct) + Reduce Operator Tree: + Group By Operator + aggregations: compute_stats(VALUE._col0), compute_stats(VALUE._col1) + mode: mergepartial + outputColumnNames: _col0, _col1 + Statistics: Num rows: 1 Data size: 1088 Basic stats: COMPLETE Column stats: NONE + File Output Operator + compressed: false + Statistics: Num rows: 1 Data size: 1088 Basic stats: COMPLETE Column stats: NONE + table: + input format: org.apache.hadoop.mapred.SequenceFileInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe PREHOOK: query: ALTER MATERIALIZED VIEW cmv_mat_view REBUILD -PREHOOK: type: CREATE_MATERIALIZED_VIEW +PREHOOK: type: QUERY PREHOOK: Input: default@cmv_basetable PREHOOK: Input: default@cmv_basetable_2 -PREHOOK: Output: database:default PREHOOK: Output: default@cmv_mat_view POSTHOOK: query: ALTER MATERIALIZED VIEW cmv_mat_view REBUILD -POSTHOOK: type: CREATE_MATERIALIZED_VIEW +POSTHOOK: type: QUERY POSTHOOK: Input: default@cmv_basetable POSTHOOK: Input: default@cmv_basetable_2 -POSTHOOK: Output: database:default POSTHOOK: Output: default@cmv_mat_view +POSTHOOK: Lineage: cmv_mat_view.a SIMPLE [(cmv_basetable)cmv_basetable.FieldSchema(name:a, type:int, comment:null), ] +POSTHOOK: Lineage: cmv_mat_view.c SIMPLE [(cmv_basetable_2)cmv_basetable_2.FieldSchema(name:c, type:decimal(10,2), comment:null), ] PREHOOK: query: EXPLAIN SELECT cmv_basetable.a FROM cmv_basetable join cmv_basetable_2 ON (cmv_basetable.a = cmv_basetable_2.a) @@ -750,17 +796,17 @@ POSTHOOK: Input: default@cmv_basetable_2 #### A masked pattern was here #### 1 PREHOOK: query: ALTER MATERIALIZED VIEW cmv_mat_view REBUILD -PREHOOK: type: CREATE_MATERIALIZED_VIEW +PREHOOK: type: QUERY PREHOOK: Input: default@cmv_basetable PREHOOK: Input: default@cmv_basetable_2 -PREHOOK: Output: database:default PREHOOK: Output: default@cmv_mat_view POSTHOOK: query: ALTER MATERIALIZED VIEW cmv_mat_view REBUILD -POSTHOOK: type: CREATE_MATERIALIZED_VIEW +POSTHOOK: type: QUERY POSTHOOK: Input: default@cmv_basetable POSTHOOK: Input: default@cmv_basetable_2 -POSTHOOK: Output: database:default POSTHOOK: Output: default@cmv_mat_view +POSTHOOK: Lineage: cmv_mat_view.a SIMPLE [(cmv_basetable)cmv_basetable.FieldSchema(name:a, type:int, comment:null), ] +POSTHOOK: Lineage: cmv_mat_view.c SIMPLE [(cmv_basetable_2)cmv_basetable_2.FieldSchema(name:c, type:decimal(10,2), comment:null), ] PREHOOK: query: EXPLAIN SELECT cmv_basetable.a FROM cmv_basetable join cmv_basetable_2 ON (cmv_basetable.a = cmv_basetable_2.a) diff --git a/ql/src/test/results/clientpositive/materialized_view_create_rewrite_4.q.out b/ql/src/test/results/clientpositive/materialized_view_create_rewrite_4.q.out index 8ab1517186..48c0ecb23f 100644 --- a/ql/src/test/results/clientpositive/materialized_view_create_rewrite_4.q.out +++ b/ql/src/test/results/clientpositive/materialized_view_create_rewrite_4.q.out @@ -92,6 +92,7 @@ STAGE DEPENDENCIES: Stage-0 depends on stages: Stage-2 Stage-5 depends on stages: Stage-0 Stage-3 depends on stages: Stage-5 + Stage-6 depends on stages: Stage-3 STAGE PLANS: Stage: Stage-1 @@ -197,6 +198,9 @@ STAGE PLANS: Stats Work Basic Stats Work: + Stage: Stage-6 + Materialized View Work + PREHOOK: query: CREATE MATERIALIZED VIEW cmv_mat_view AS SELECT cmv_basetable.a, cmv_basetable_2.c FROM cmv_basetable JOIN cmv_basetable_2 ON (cmv_basetable.a = cmv_basetable_2.a) @@ -217,6 +221,49 @@ POSTHOOK: Input: default@cmv_basetable POSTHOOK: Input: default@cmv_basetable_2 POSTHOOK: Output: database:default POSTHOOK: Output: default@cmv_mat_view +PREHOOK: query: DESCRIBE FORMATTED cmv_mat_view +PREHOOK: type: DESCTABLE +PREHOOK: Input: default@cmv_mat_view +POSTHOOK: query: DESCRIBE FORMATTED cmv_mat_view +POSTHOOK: type: DESCTABLE +POSTHOOK: Input: default@cmv_mat_view +# col_name data_type comment +a int +c decimal(10,2) + +# Detailed Table Information +Database: default +#### A masked pattern was here #### +Retention: 0 +#### A masked pattern was here #### +Table Type: MATERIALIZED_VIEW +Table Parameters: + COLUMN_STATS_ACCURATE {\"BASIC_STATS\":\"true\"} + numFiles 1 + numRows 2 + rawDataSize 232 + totalSize 325 +#### A masked pattern was here #### + +# Storage Information +SerDe Library: org.apache.hadoop.hive.ql.io.orc.OrcSerde +InputFormat: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat +OutputFormat: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat +Compressed: No +Num Buckets: -1 +Bucket Columns: [] +Sort Columns: [] + +# View Information +View Original Text: SELECT cmv_basetable.a, cmv_basetable_2.c + FROM cmv_basetable JOIN cmv_basetable_2 ON (cmv_basetable.a = cmv_basetable_2.a) + WHERE cmv_basetable_2.c > 10.0 + GROUP BY cmv_basetable.a, cmv_basetable_2.c +View Expanded Text: SELECT `cmv_basetable`.`a`, `cmv_basetable_2`.`c` + FROM `default`.`cmv_basetable` JOIN `default`.`cmv_basetable_2` ON (`cmv_basetable`.`a` = `cmv_basetable_2`.`a`) + WHERE `cmv_basetable_2`.`c` > 10.0 + GROUP BY `cmv_basetable`.`a`, `cmv_basetable_2`.`c` +View Rewrite Enabled: No PREHOOK: query: EXPLAIN SELECT cmv_basetable.a FROM cmv_basetable join cmv_basetable_2 ON (cmv_basetable.a = cmv_basetable_2.a) @@ -389,6 +436,49 @@ POSTHOOK: query: ALTER MATERIALIZED VIEW cmv_mat_view ENABLE REWRITE POSTHOOK: type: ALTER_MATERIALIZED_VIEW_REWRITE POSTHOOK: Input: default@cmv_mat_view POSTHOOK: Output: default@cmv_mat_view +PREHOOK: query: DESCRIBE FORMATTED cmv_mat_view +PREHOOK: type: DESCTABLE +PREHOOK: Input: default@cmv_mat_view +POSTHOOK: query: DESCRIBE FORMATTED cmv_mat_view +POSTHOOK: type: DESCTABLE +POSTHOOK: Input: default@cmv_mat_view +# col_name data_type comment +a int +c decimal(10,2) + +# Detailed Table Information +Database: default +#### A masked pattern was here #### +Retention: 0 +#### A masked pattern was here #### +Table Type: MATERIALIZED_VIEW +Table Parameters: + COLUMN_STATS_ACCURATE {\"BASIC_STATS\":\"true\"} + numFiles 1 + numRows 2 + rawDataSize 232 + totalSize 325 +#### A masked pattern was here #### + +# Storage Information +SerDe Library: org.apache.hadoop.hive.ql.io.orc.OrcSerde +InputFormat: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat +OutputFormat: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat +Compressed: No +Num Buckets: -1 +Bucket Columns: [] +Sort Columns: [] + +# View Information +View Original Text: SELECT cmv_basetable.a, cmv_basetable_2.c + FROM cmv_basetable JOIN cmv_basetable_2 ON (cmv_basetable.a = cmv_basetable_2.a) + WHERE cmv_basetable_2.c > 10.0 + GROUP BY cmv_basetable.a, cmv_basetable_2.c +View Expanded Text: SELECT `cmv_basetable`.`a`, `cmv_basetable_2`.`c` + FROM `default`.`cmv_basetable` JOIN `default`.`cmv_basetable_2` ON (`cmv_basetable`.`a` = `cmv_basetable_2`.`a`) + WHERE `cmv_basetable_2`.`c` > 10.0 + GROUP BY `cmv_basetable`.`a`, `cmv_basetable_2`.`c` +View Rewrite Enabled: Yes PREHOOK: query: EXPLAIN SELECT cmv_basetable.a FROM cmv_basetable join cmv_basetable_2 ON (cmv_basetable.a = cmv_basetable_2.a) @@ -516,16 +606,17 @@ POSTHOOK: Input: default@cmv_basetable_2 3 PREHOOK: query: EXPLAIN ALTER MATERIALIZED VIEW cmv_mat_view REBUILD -PREHOOK: type: CREATE_MATERIALIZED_VIEW +PREHOOK: type: QUERY POSTHOOK: query: EXPLAIN ALTER MATERIALIZED VIEW cmv_mat_view REBUILD -POSTHOOK: type: CREATE_MATERIALIZED_VIEW +POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage Stage-2 depends on stages: Stage-1 Stage-0 depends on stages: Stage-2 - Stage-5 depends on stages: Stage-0 - Stage-3 depends on stages: Stage-5 + Stage-3 depends on stages: Stage-0, Stage-4 + Stage-6 depends on stages: Stage-3 + Stage-4 depends on stages: Stage-2 STAGE PLANS: Stage: Stage-1 @@ -606,36 +697,120 @@ STAGE PLANS: output format: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde name: default.cmv_mat_view + Select Operator + expressions: _col0 (type: int), _col1 (type: decimal(10,2)) + outputColumnNames: a, c + Statistics: Num rows: 2 Data size: 530 Basic stats: COMPLETE Column stats: NONE + Group By Operator + aggregations: compute_stats(a, 'hll'), compute_stats(c, 'hll') + mode: hash + outputColumnNames: _col0, _col1 + Statistics: Num rows: 1 Data size: 1056 Basic stats: COMPLETE Column stats: NONE + File Output Operator + compressed: false + table: + input format: org.apache.hadoop.mapred.SequenceFileInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat + serde: org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe Stage: Stage-0 Move Operator - files: - hdfs directory: true -#### A masked pattern was here #### - - Stage: Stage-5 - Create View Operator: - Create View - columns: a int, c decimal(10,2) - name: default.cmv_mat_view + tables: replace: true + table: + input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat + output format: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat + serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde + name: default.cmv_mat_view Stage: Stage-3 Stats Work Basic Stats Work: + Column Stats Desc: + Columns: a, c + Column Types: int, decimal(10,2) + Table: default.cmv_mat_view + + Stage: Stage-6 + Materialized View Work + + Stage: Stage-4 + Map Reduce + Map Operator Tree: + TableScan + Reduce Output Operator + sort order: + Statistics: Num rows: 1 Data size: 1056 Basic stats: COMPLETE Column stats: NONE + value expressions: _col0 (type: struct), _col1 (type: struct) + Reduce Operator Tree: + Group By Operator + aggregations: compute_stats(VALUE._col0), compute_stats(VALUE._col1) + mode: mergepartial + outputColumnNames: _col0, _col1 + Statistics: Num rows: 1 Data size: 1088 Basic stats: COMPLETE Column stats: NONE + File Output Operator + compressed: false + Statistics: Num rows: 1 Data size: 1088 Basic stats: COMPLETE Column stats: NONE + table: + input format: org.apache.hadoop.mapred.SequenceFileInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe PREHOOK: query: ALTER MATERIALIZED VIEW cmv_mat_view REBUILD -PREHOOK: type: CREATE_MATERIALIZED_VIEW +PREHOOK: type: QUERY PREHOOK: Input: default@cmv_basetable PREHOOK: Input: default@cmv_basetable_2 -PREHOOK: Output: database:default PREHOOK: Output: default@cmv_mat_view POSTHOOK: query: ALTER MATERIALIZED VIEW cmv_mat_view REBUILD -POSTHOOK: type: CREATE_MATERIALIZED_VIEW +POSTHOOK: type: QUERY POSTHOOK: Input: default@cmv_basetable POSTHOOK: Input: default@cmv_basetable_2 -POSTHOOK: Output: database:default POSTHOOK: Output: default@cmv_mat_view +POSTHOOK: Lineage: cmv_mat_view.a SIMPLE [(cmv_basetable)cmv_basetable.FieldSchema(name:a, type:int, comment:null), ] +POSTHOOK: Lineage: cmv_mat_view.c SIMPLE [(cmv_basetable_2)cmv_basetable_2.FieldSchema(name:c, type:decimal(10,2), comment:null), ] +PREHOOK: query: DESCRIBE FORMATTED cmv_mat_view +PREHOOK: type: DESCTABLE +PREHOOK: Input: default@cmv_mat_view +POSTHOOK: query: DESCRIBE FORMATTED cmv_mat_view +POSTHOOK: type: DESCTABLE +POSTHOOK: Input: default@cmv_mat_view +# col_name data_type comment +a int +c decimal(10,2) + +# Detailed Table Information +Database: default +#### A masked pattern was here #### +Retention: 0 +#### A masked pattern was here #### +Table Type: MATERIALIZED_VIEW +Table Parameters: + COLUMN_STATS_ACCURATE {\"BASIC_STATS\":\"true\",\"COLUMN_STATS\":{\"a\":\"true\",\"c\":\"true\"}} + numFiles 1 + numRows 3 + rawDataSize 348 + totalSize 332 +#### A masked pattern was here #### + +# Storage Information +SerDe Library: org.apache.hadoop.hive.ql.io.orc.OrcSerde +InputFormat: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat +OutputFormat: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat +Compressed: No +Num Buckets: -1 +Bucket Columns: [] +Sort Columns: [] + +# View Information +View Original Text: SELECT cmv_basetable.a, cmv_basetable_2.c + FROM cmv_basetable JOIN cmv_basetable_2 ON (cmv_basetable.a = cmv_basetable_2.a) + WHERE cmv_basetable_2.c > 10.0 + GROUP BY cmv_basetable.a, cmv_basetable_2.c +View Expanded Text: SELECT `cmv_basetable`.`a`, `cmv_basetable_2`.`c` + FROM `default`.`cmv_basetable` JOIN `default`.`cmv_basetable_2` ON (`cmv_basetable`.`a` = `cmv_basetable_2`.`a`) + WHERE `cmv_basetable_2`.`c` > 10.0 + GROUP BY `cmv_basetable`.`a`, `cmv_basetable_2`.`c` +View Rewrite Enabled: Yes PREHOOK: query: EXPLAIN SELECT cmv_basetable.a FROM cmv_basetable join cmv_basetable_2 ON (cmv_basetable.a = cmv_basetable_2.a) diff --git a/standalone-metastore/src/gen/thrift/gen-cpp/ThriftHiveMetastore.cpp b/standalone-metastore/src/gen/thrift/gen-cpp/ThriftHiveMetastore.cpp index af0fd6b0e0..6bd6aa2289 100644 --- a/standalone-metastore/src/gen/thrift/gen-cpp/ThriftHiveMetastore.cpp +++ b/standalone-metastore/src/gen/thrift/gen-cpp/ThriftHiveMetastore.cpp @@ -9293,6 +9293,265 @@ uint32_t ThriftHiveMetastore_get_materialization_invalidation_info_presult::read } +ThriftHiveMetastore_update_creation_metadata_args::~ThriftHiveMetastore_update_creation_metadata_args() throw() { +} + + +uint32_t ThriftHiveMetastore_update_creation_metadata_args::read(::apache::thrift::protocol::TProtocol* iprot) { + + apache::thrift::protocol::TInputRecursionTracker tracker(*iprot); + uint32_t xfer = 0; + std::string fname; + ::apache::thrift::protocol::TType ftype; + int16_t fid; + + xfer += iprot->readStructBegin(fname); + + using ::apache::thrift::protocol::TProtocolException; + + + while (true) + { + xfer += iprot->readFieldBegin(fname, ftype, fid); + if (ftype == ::apache::thrift::protocol::T_STOP) { + break; + } + switch (fid) + { + case 1: + if (ftype == ::apache::thrift::protocol::T_STRING) { + xfer += iprot->readString(this->dbname); + this->__isset.dbname = true; + } else { + xfer += iprot->skip(ftype); + } + break; + case 2: + if (ftype == ::apache::thrift::protocol::T_STRING) { + xfer += iprot->readString(this->tbl_name); + this->__isset.tbl_name = true; + } else { + xfer += iprot->skip(ftype); + } + break; + case 3: + if (ftype == ::apache::thrift::protocol::T_STRUCT) { + xfer += this->creation_metadata.read(iprot); + this->__isset.creation_metadata = true; + } else { + xfer += iprot->skip(ftype); + } + break; + default: + xfer += iprot->skip(ftype); + break; + } + xfer += iprot->readFieldEnd(); + } + + xfer += iprot->readStructEnd(); + + return xfer; +} + +uint32_t ThriftHiveMetastore_update_creation_metadata_args::write(::apache::thrift::protocol::TProtocol* oprot) const { + uint32_t xfer = 0; + apache::thrift::protocol::TOutputRecursionTracker tracker(*oprot); + xfer += oprot->writeStructBegin("ThriftHiveMetastore_update_creation_metadata_args"); + + xfer += oprot->writeFieldBegin("dbname", ::apache::thrift::protocol::T_STRING, 1); + xfer += oprot->writeString(this->dbname); + xfer += oprot->writeFieldEnd(); + + xfer += oprot->writeFieldBegin("tbl_name", ::apache::thrift::protocol::T_STRING, 2); + xfer += oprot->writeString(this->tbl_name); + xfer += oprot->writeFieldEnd(); + + xfer += oprot->writeFieldBegin("creation_metadata", ::apache::thrift::protocol::T_STRUCT, 3); + xfer += this->creation_metadata.write(oprot); + xfer += oprot->writeFieldEnd(); + + xfer += oprot->writeFieldStop(); + xfer += oprot->writeStructEnd(); + return xfer; +} + + +ThriftHiveMetastore_update_creation_metadata_pargs::~ThriftHiveMetastore_update_creation_metadata_pargs() throw() { +} + + +uint32_t ThriftHiveMetastore_update_creation_metadata_pargs::write(::apache::thrift::protocol::TProtocol* oprot) const { + uint32_t xfer = 0; + apache::thrift::protocol::TOutputRecursionTracker tracker(*oprot); + xfer += oprot->writeStructBegin("ThriftHiveMetastore_update_creation_metadata_pargs"); + + xfer += oprot->writeFieldBegin("dbname", ::apache::thrift::protocol::T_STRING, 1); + xfer += oprot->writeString((*(this->dbname))); + xfer += oprot->writeFieldEnd(); + + xfer += oprot->writeFieldBegin("tbl_name", ::apache::thrift::protocol::T_STRING, 2); + xfer += oprot->writeString((*(this->tbl_name))); + xfer += oprot->writeFieldEnd(); + + xfer += oprot->writeFieldBegin("creation_metadata", ::apache::thrift::protocol::T_STRUCT, 3); + xfer += (*(this->creation_metadata)).write(oprot); + xfer += oprot->writeFieldEnd(); + + xfer += oprot->writeFieldStop(); + xfer += oprot->writeStructEnd(); + return xfer; +} + + +ThriftHiveMetastore_update_creation_metadata_result::~ThriftHiveMetastore_update_creation_metadata_result() throw() { +} + + +uint32_t ThriftHiveMetastore_update_creation_metadata_result::read(::apache::thrift::protocol::TProtocol* iprot) { + + apache::thrift::protocol::TInputRecursionTracker tracker(*iprot); + uint32_t xfer = 0; + std::string fname; + ::apache::thrift::protocol::TType ftype; + int16_t fid; + + xfer += iprot->readStructBegin(fname); + + using ::apache::thrift::protocol::TProtocolException; + + + while (true) + { + xfer += iprot->readFieldBegin(fname, ftype, fid); + if (ftype == ::apache::thrift::protocol::T_STOP) { + break; + } + switch (fid) + { + case 1: + if (ftype == ::apache::thrift::protocol::T_STRUCT) { + xfer += this->o1.read(iprot); + this->__isset.o1 = true; + } else { + xfer += iprot->skip(ftype); + } + break; + case 2: + if (ftype == ::apache::thrift::protocol::T_STRUCT) { + xfer += this->o2.read(iprot); + this->__isset.o2 = true; + } else { + xfer += iprot->skip(ftype); + } + break; + case 3: + if (ftype == ::apache::thrift::protocol::T_STRUCT) { + xfer += this->o3.read(iprot); + this->__isset.o3 = true; + } else { + xfer += iprot->skip(ftype); + } + break; + default: + xfer += iprot->skip(ftype); + break; + } + xfer += iprot->readFieldEnd(); + } + + xfer += iprot->readStructEnd(); + + return xfer; +} + +uint32_t ThriftHiveMetastore_update_creation_metadata_result::write(::apache::thrift::protocol::TProtocol* oprot) const { + + uint32_t xfer = 0; + + xfer += oprot->writeStructBegin("ThriftHiveMetastore_update_creation_metadata_result"); + + if (this->__isset.o1) { + xfer += oprot->writeFieldBegin("o1", ::apache::thrift::protocol::T_STRUCT, 1); + xfer += this->o1.write(oprot); + xfer += oprot->writeFieldEnd(); + } else if (this->__isset.o2) { + xfer += oprot->writeFieldBegin("o2", ::apache::thrift::protocol::T_STRUCT, 2); + xfer += this->o2.write(oprot); + xfer += oprot->writeFieldEnd(); + } else if (this->__isset.o3) { + xfer += oprot->writeFieldBegin("o3", ::apache::thrift::protocol::T_STRUCT, 3); + xfer += this->o3.write(oprot); + xfer += oprot->writeFieldEnd(); + } + xfer += oprot->writeFieldStop(); + xfer += oprot->writeStructEnd(); + return xfer; +} + + +ThriftHiveMetastore_update_creation_metadata_presult::~ThriftHiveMetastore_update_creation_metadata_presult() throw() { +} + + +uint32_t ThriftHiveMetastore_update_creation_metadata_presult::read(::apache::thrift::protocol::TProtocol* iprot) { + + apache::thrift::protocol::TInputRecursionTracker tracker(*iprot); + uint32_t xfer = 0; + std::string fname; + ::apache::thrift::protocol::TType ftype; + int16_t fid; + + xfer += iprot->readStructBegin(fname); + + using ::apache::thrift::protocol::TProtocolException; + + + while (true) + { + xfer += iprot->readFieldBegin(fname, ftype, fid); + if (ftype == ::apache::thrift::protocol::T_STOP) { + break; + } + switch (fid) + { + case 1: + if (ftype == ::apache::thrift::protocol::T_STRUCT) { + xfer += this->o1.read(iprot); + this->__isset.o1 = true; + } else { + xfer += iprot->skip(ftype); + } + break; + case 2: + if (ftype == ::apache::thrift::protocol::T_STRUCT) { + xfer += this->o2.read(iprot); + this->__isset.o2 = true; + } else { + xfer += iprot->skip(ftype); + } + break; + case 3: + if (ftype == ::apache::thrift::protocol::T_STRUCT) { + xfer += this->o3.read(iprot); + this->__isset.o3 = true; + } else { + xfer += iprot->skip(ftype); + } + break; + default: + xfer += iprot->skip(ftype); + break; + } + xfer += iprot->readFieldEnd(); + } + + xfer += iprot->readStructEnd(); + + return xfer; +} + + ThriftHiveMetastore_get_table_names_by_filter_args::~ThriftHiveMetastore_get_table_names_by_filter_args() throw() { } @@ -47026,6 +47285,70 @@ void ThriftHiveMetastoreClient::recv_get_materialization_invalidation_info(std:: throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "get_materialization_invalidation_info failed: unknown result"); } +void ThriftHiveMetastoreClient::update_creation_metadata(const std::string& dbname, const std::string& tbl_name, const CreationMetadata& creation_metadata) +{ + send_update_creation_metadata(dbname, tbl_name, creation_metadata); + recv_update_creation_metadata(); +} + +void ThriftHiveMetastoreClient::send_update_creation_metadata(const std::string& dbname, const std::string& tbl_name, const CreationMetadata& creation_metadata) +{ + int32_t cseqid = 0; + oprot_->writeMessageBegin("update_creation_metadata", ::apache::thrift::protocol::T_CALL, cseqid); + + ThriftHiveMetastore_update_creation_metadata_pargs args; + args.dbname = &dbname; + args.tbl_name = &tbl_name; + args.creation_metadata = &creation_metadata; + args.write(oprot_); + + oprot_->writeMessageEnd(); + oprot_->getTransport()->writeEnd(); + oprot_->getTransport()->flush(); +} + +void ThriftHiveMetastoreClient::recv_update_creation_metadata() +{ + + int32_t rseqid = 0; + std::string fname; + ::apache::thrift::protocol::TMessageType mtype; + + iprot_->readMessageBegin(fname, mtype, rseqid); + if (mtype == ::apache::thrift::protocol::T_EXCEPTION) { + ::apache::thrift::TApplicationException x; + x.read(iprot_); + iprot_->readMessageEnd(); + iprot_->getTransport()->readEnd(); + throw x; + } + if (mtype != ::apache::thrift::protocol::T_REPLY) { + iprot_->skip(::apache::thrift::protocol::T_STRUCT); + iprot_->readMessageEnd(); + iprot_->getTransport()->readEnd(); + } + if (fname.compare("update_creation_metadata") != 0) { + iprot_->skip(::apache::thrift::protocol::T_STRUCT); + iprot_->readMessageEnd(); + iprot_->getTransport()->readEnd(); + } + ThriftHiveMetastore_update_creation_metadata_presult result; + result.read(iprot_); + iprot_->readMessageEnd(); + iprot_->getTransport()->readEnd(); + + if (result.__isset.o1) { + throw result.o1; + } + if (result.__isset.o2) { + throw result.o2; + } + if (result.__isset.o3) { + throw result.o3; + } + return; +} + void ThriftHiveMetastoreClient::get_table_names_by_filter(std::vector & _return, const std::string& dbname, const std::string& filter, const int16_t max_tables) { send_get_table_names_by_filter(dbname, filter, max_tables); @@ -58420,6 +58743,68 @@ void ThriftHiveMetastoreProcessor::process_get_materialization_invalidation_info } } +void ThriftHiveMetastoreProcessor::process_update_creation_metadata(int32_t seqid, ::apache::thrift::protocol::TProtocol* iprot, ::apache::thrift::protocol::TProtocol* oprot, void* callContext) +{ + void* ctx = NULL; + if (this->eventHandler_.get() != NULL) { + ctx = this->eventHandler_->getContext("ThriftHiveMetastore.update_creation_metadata", callContext); + } + ::apache::thrift::TProcessorContextFreer freer(this->eventHandler_.get(), ctx, "ThriftHiveMetastore.update_creation_metadata"); + + if (this->eventHandler_.get() != NULL) { + this->eventHandler_->preRead(ctx, "ThriftHiveMetastore.update_creation_metadata"); + } + + ThriftHiveMetastore_update_creation_metadata_args args; + args.read(iprot); + iprot->readMessageEnd(); + uint32_t bytes = iprot->getTransport()->readEnd(); + + if (this->eventHandler_.get() != NULL) { + this->eventHandler_->postRead(ctx, "ThriftHiveMetastore.update_creation_metadata", bytes); + } + + ThriftHiveMetastore_update_creation_metadata_result result; + try { + iface_->update_creation_metadata(args.dbname, args.tbl_name, args.creation_metadata); + } catch (MetaException &o1) { + result.o1 = o1; + result.__isset.o1 = true; + } catch (InvalidOperationException &o2) { + result.o2 = o2; + result.__isset.o2 = true; + } catch (UnknownDBException &o3) { + result.o3 = o3; + result.__isset.o3 = true; + } catch (const std::exception& e) { + if (this->eventHandler_.get() != NULL) { + this->eventHandler_->handlerError(ctx, "ThriftHiveMetastore.update_creation_metadata"); + } + + ::apache::thrift::TApplicationException x(e.what()); + oprot->writeMessageBegin("update_creation_metadata", ::apache::thrift::protocol::T_EXCEPTION, seqid); + x.write(oprot); + oprot->writeMessageEnd(); + oprot->getTransport()->writeEnd(); + oprot->getTransport()->flush(); + return; + } + + if (this->eventHandler_.get() != NULL) { + this->eventHandler_->preWrite(ctx, "ThriftHiveMetastore.update_creation_metadata"); + } + + oprot->writeMessageBegin("update_creation_metadata", ::apache::thrift::protocol::T_REPLY, seqid); + result.write(oprot); + oprot->writeMessageEnd(); + bytes = oprot->getTransport()->writeEnd(); + oprot->getTransport()->flush(); + + if (this->eventHandler_.get() != NULL) { + this->eventHandler_->postWrite(ctx, "ThriftHiveMetastore.update_creation_metadata", bytes); + } +} + void ThriftHiveMetastoreProcessor::process_get_table_names_by_filter(int32_t seqid, ::apache::thrift::protocol::TProtocol* iprot, ::apache::thrift::protocol::TProtocol* oprot, void* callContext) { void* ctx = NULL; @@ -70329,6 +70714,98 @@ void ThriftHiveMetastoreConcurrentClient::recv_get_materialization_invalidation_ } // end while(true) } +void ThriftHiveMetastoreConcurrentClient::update_creation_metadata(const std::string& dbname, const std::string& tbl_name, const CreationMetadata& creation_metadata) +{ + int32_t seqid = send_update_creation_metadata(dbname, tbl_name, creation_metadata); + recv_update_creation_metadata(seqid); +} + +int32_t ThriftHiveMetastoreConcurrentClient::send_update_creation_metadata(const std::string& dbname, const std::string& tbl_name, const CreationMetadata& creation_metadata) +{ + int32_t cseqid = this->sync_.generateSeqId(); + ::apache::thrift::async::TConcurrentSendSentry sentry(&this->sync_); + oprot_->writeMessageBegin("update_creation_metadata", ::apache::thrift::protocol::T_CALL, cseqid); + + ThriftHiveMetastore_update_creation_metadata_pargs args; + args.dbname = &dbname; + args.tbl_name = &tbl_name; + args.creation_metadata = &creation_metadata; + args.write(oprot_); + + oprot_->writeMessageEnd(); + oprot_->getTransport()->writeEnd(); + oprot_->getTransport()->flush(); + + sentry.commit(); + return cseqid; +} + +void ThriftHiveMetastoreConcurrentClient::recv_update_creation_metadata(const int32_t seqid) +{ + + int32_t rseqid = 0; + std::string fname; + ::apache::thrift::protocol::TMessageType mtype; + + // the read mutex gets dropped and reacquired as part of waitForWork() + // The destructor of this sentry wakes up other clients + ::apache::thrift::async::TConcurrentRecvSentry sentry(&this->sync_, seqid); + + while(true) { + if(!this->sync_.getPending(fname, mtype, rseqid)) { + iprot_->readMessageBegin(fname, mtype, rseqid); + } + if(seqid == rseqid) { + if (mtype == ::apache::thrift::protocol::T_EXCEPTION) { + ::apache::thrift::TApplicationException x; + x.read(iprot_); + iprot_->readMessageEnd(); + iprot_->getTransport()->readEnd(); + sentry.commit(); + throw x; + } + if (mtype != ::apache::thrift::protocol::T_REPLY) { + iprot_->skip(::apache::thrift::protocol::T_STRUCT); + iprot_->readMessageEnd(); + iprot_->getTransport()->readEnd(); + } + if (fname.compare("update_creation_metadata") != 0) { + iprot_->skip(::apache::thrift::protocol::T_STRUCT); + iprot_->readMessageEnd(); + iprot_->getTransport()->readEnd(); + + // in a bad state, don't commit + using ::apache::thrift::protocol::TProtocolException; + throw TProtocolException(TProtocolException::INVALID_DATA); + } + ThriftHiveMetastore_update_creation_metadata_presult result; + result.read(iprot_); + iprot_->readMessageEnd(); + iprot_->getTransport()->readEnd(); + + if (result.__isset.o1) { + sentry.commit(); + throw result.o1; + } + if (result.__isset.o2) { + sentry.commit(); + throw result.o2; + } + if (result.__isset.o3) { + sentry.commit(); + throw result.o3; + } + sentry.commit(); + return; + } + // seqid != rseqid + this->sync_.updatePending(fname, mtype, rseqid); + + // this will temporarily unlock the readMutex, and let other clients get work done + this->sync_.waitForWork(seqid); + } // end while(true) +} + void ThriftHiveMetastoreConcurrentClient::get_table_names_by_filter(std::vector & _return, const std::string& dbname, const std::string& filter, const int16_t max_tables) { int32_t seqid = send_get_table_names_by_filter(dbname, filter, max_tables); diff --git a/standalone-metastore/src/gen/thrift/gen-cpp/ThriftHiveMetastore.h b/standalone-metastore/src/gen/thrift/gen-cpp/ThriftHiveMetastore.h index bfa17eb3e6..2466498885 100644 --- a/standalone-metastore/src/gen/thrift/gen-cpp/ThriftHiveMetastore.h +++ b/standalone-metastore/src/gen/thrift/gen-cpp/ThriftHiveMetastore.h @@ -59,6 +59,7 @@ class ThriftHiveMetastoreIf : virtual public ::facebook::fb303::FacebookService virtual void get_table_req(GetTableResult& _return, const GetTableRequest& req) = 0; virtual void get_table_objects_by_name_req(GetTablesResult& _return, const GetTablesRequest& req) = 0; virtual void get_materialization_invalidation_info(std::map & _return, const std::string& dbname, const std::vector & tbl_names) = 0; + virtual void update_creation_metadata(const std::string& dbname, const std::string& tbl_name, const CreationMetadata& creation_metadata) = 0; virtual void get_table_names_by_filter(std::vector & _return, const std::string& dbname, const std::string& filter, const int16_t max_tables) = 0; virtual void alter_table(const std::string& dbname, const std::string& tbl_name, const Table& new_tbl) = 0; virtual void alter_table_with_environment_context(const std::string& dbname, const std::string& tbl_name, const Table& new_tbl, const EnvironmentContext& environment_context) = 0; @@ -345,6 +346,9 @@ class ThriftHiveMetastoreNull : virtual public ThriftHiveMetastoreIf , virtual p void get_materialization_invalidation_info(std::map & /* _return */, const std::string& /* dbname */, const std::vector & /* tbl_names */) { return; } + void update_creation_metadata(const std::string& /* dbname */, const std::string& /* tbl_name */, const CreationMetadata& /* creation_metadata */) { + return; + } void get_table_names_by_filter(std::vector & /* _return */, const std::string& /* dbname */, const std::string& /* filter */, const int16_t /* max_tables */) { return; } @@ -5339,6 +5343,140 @@ class ThriftHiveMetastore_get_materialization_invalidation_info_presult { }; +typedef struct _ThriftHiveMetastore_update_creation_metadata_args__isset { + _ThriftHiveMetastore_update_creation_metadata_args__isset() : dbname(false), tbl_name(false), creation_metadata(false) {} + bool dbname :1; + bool tbl_name :1; + bool creation_metadata :1; +} _ThriftHiveMetastore_update_creation_metadata_args__isset; + +class ThriftHiveMetastore_update_creation_metadata_args { + public: + + ThriftHiveMetastore_update_creation_metadata_args(const ThriftHiveMetastore_update_creation_metadata_args&); + ThriftHiveMetastore_update_creation_metadata_args& operator=(const ThriftHiveMetastore_update_creation_metadata_args&); + ThriftHiveMetastore_update_creation_metadata_args() : dbname(), tbl_name() { + } + + virtual ~ThriftHiveMetastore_update_creation_metadata_args() throw(); + std::string dbname; + std::string tbl_name; + CreationMetadata creation_metadata; + + _ThriftHiveMetastore_update_creation_metadata_args__isset __isset; + + void __set_dbname(const std::string& val); + + void __set_tbl_name(const std::string& val); + + void __set_creation_metadata(const CreationMetadata& val); + + bool operator == (const ThriftHiveMetastore_update_creation_metadata_args & rhs) const + { + if (!(dbname == rhs.dbname)) + return false; + if (!(tbl_name == rhs.tbl_name)) + return false; + if (!(creation_metadata == rhs.creation_metadata)) + return false; + return true; + } + bool operator != (const ThriftHiveMetastore_update_creation_metadata_args &rhs) const { + return !(*this == rhs); + } + + bool operator < (const ThriftHiveMetastore_update_creation_metadata_args & ) const; + + uint32_t read(::apache::thrift::protocol::TProtocol* iprot); + uint32_t write(::apache::thrift::protocol::TProtocol* oprot) const; + +}; + + +class ThriftHiveMetastore_update_creation_metadata_pargs { + public: + + + virtual ~ThriftHiveMetastore_update_creation_metadata_pargs() throw(); + const std::string* dbname; + const std::string* tbl_name; + const CreationMetadata* creation_metadata; + + uint32_t write(::apache::thrift::protocol::TProtocol* oprot) const; + +}; + +typedef struct _ThriftHiveMetastore_update_creation_metadata_result__isset { + _ThriftHiveMetastore_update_creation_metadata_result__isset() : o1(false), o2(false), o3(false) {} + bool o1 :1; + bool o2 :1; + bool o3 :1; +} _ThriftHiveMetastore_update_creation_metadata_result__isset; + +class ThriftHiveMetastore_update_creation_metadata_result { + public: + + ThriftHiveMetastore_update_creation_metadata_result(const ThriftHiveMetastore_update_creation_metadata_result&); + ThriftHiveMetastore_update_creation_metadata_result& operator=(const ThriftHiveMetastore_update_creation_metadata_result&); + ThriftHiveMetastore_update_creation_metadata_result() { + } + + virtual ~ThriftHiveMetastore_update_creation_metadata_result() throw(); + MetaException o1; + InvalidOperationException o2; + UnknownDBException o3; + + _ThriftHiveMetastore_update_creation_metadata_result__isset __isset; + + void __set_o1(const MetaException& val); + + void __set_o2(const InvalidOperationException& val); + + void __set_o3(const UnknownDBException& val); + + bool operator == (const ThriftHiveMetastore_update_creation_metadata_result & rhs) const + { + if (!(o1 == rhs.o1)) + return false; + if (!(o2 == rhs.o2)) + return false; + if (!(o3 == rhs.o3)) + return false; + return true; + } + bool operator != (const ThriftHiveMetastore_update_creation_metadata_result &rhs) const { + return !(*this == rhs); + } + + bool operator < (const ThriftHiveMetastore_update_creation_metadata_result & ) const; + + uint32_t read(::apache::thrift::protocol::TProtocol* iprot); + uint32_t write(::apache::thrift::protocol::TProtocol* oprot) const; + +}; + +typedef struct _ThriftHiveMetastore_update_creation_metadata_presult__isset { + _ThriftHiveMetastore_update_creation_metadata_presult__isset() : o1(false), o2(false), o3(false) {} + bool o1 :1; + bool o2 :1; + bool o3 :1; +} _ThriftHiveMetastore_update_creation_metadata_presult__isset; + +class ThriftHiveMetastore_update_creation_metadata_presult { + public: + + + virtual ~ThriftHiveMetastore_update_creation_metadata_presult() throw(); + MetaException o1; + InvalidOperationException o2; + UnknownDBException o3; + + _ThriftHiveMetastore_update_creation_metadata_presult__isset __isset; + + uint32_t read(::apache::thrift::protocol::TProtocol* iprot); + +}; + typedef struct _ThriftHiveMetastore_get_table_names_by_filter_args__isset { _ThriftHiveMetastore_get_table_names_by_filter_args__isset() : dbname(false), filter(false), max_tables(true) {} bool dbname :1; @@ -23282,6 +23420,9 @@ class ThriftHiveMetastoreClient : virtual public ThriftHiveMetastoreIf, public void get_materialization_invalidation_info(std::map & _return, const std::string& dbname, const std::vector & tbl_names); void send_get_materialization_invalidation_info(const std::string& dbname, const std::vector & tbl_names); void recv_get_materialization_invalidation_info(std::map & _return); + void update_creation_metadata(const std::string& dbname, const std::string& tbl_name, const CreationMetadata& creation_metadata); + void send_update_creation_metadata(const std::string& dbname, const std::string& tbl_name, const CreationMetadata& creation_metadata); + void recv_update_creation_metadata(); void get_table_names_by_filter(std::vector & _return, const std::string& dbname, const std::string& filter, const int16_t max_tables); void send_get_table_names_by_filter(const std::string& dbname, const std::string& filter, const int16_t max_tables); void recv_get_table_names_by_filter(std::vector & _return); @@ -23761,6 +23902,7 @@ class ThriftHiveMetastoreProcessor : public ::facebook::fb303::FacebookServiceP void process_get_table_req(int32_t seqid, ::apache::thrift::protocol::TProtocol* iprot, ::apache::thrift::protocol::TProtocol* oprot, void* callContext); void process_get_table_objects_by_name_req(int32_t seqid, ::apache::thrift::protocol::TProtocol* iprot, ::apache::thrift::protocol::TProtocol* oprot, void* callContext); void process_get_materialization_invalidation_info(int32_t seqid, ::apache::thrift::protocol::TProtocol* iprot, ::apache::thrift::protocol::TProtocol* oprot, void* callContext); + void process_update_creation_metadata(int32_t seqid, ::apache::thrift::protocol::TProtocol* iprot, ::apache::thrift::protocol::TProtocol* oprot, void* callContext); void process_get_table_names_by_filter(int32_t seqid, ::apache::thrift::protocol::TProtocol* iprot, ::apache::thrift::protocol::TProtocol* oprot, void* callContext); void process_alter_table(int32_t seqid, ::apache::thrift::protocol::TProtocol* iprot, ::apache::thrift::protocol::TProtocol* oprot, void* callContext); void process_alter_table_with_environment_context(int32_t seqid, ::apache::thrift::protocol::TProtocol* iprot, ::apache::thrift::protocol::TProtocol* oprot, void* callContext); @@ -23946,6 +24088,7 @@ class ThriftHiveMetastoreProcessor : public ::facebook::fb303::FacebookServiceP processMap_["get_table_req"] = &ThriftHiveMetastoreProcessor::process_get_table_req; processMap_["get_table_objects_by_name_req"] = &ThriftHiveMetastoreProcessor::process_get_table_objects_by_name_req; processMap_["get_materialization_invalidation_info"] = &ThriftHiveMetastoreProcessor::process_get_materialization_invalidation_info; + processMap_["update_creation_metadata"] = &ThriftHiveMetastoreProcessor::process_update_creation_metadata; processMap_["get_table_names_by_filter"] = &ThriftHiveMetastoreProcessor::process_get_table_names_by_filter; processMap_["alter_table"] = &ThriftHiveMetastoreProcessor::process_alter_table; processMap_["alter_table_with_environment_context"] = &ThriftHiveMetastoreProcessor::process_alter_table_with_environment_context; @@ -24476,6 +24619,15 @@ class ThriftHiveMetastoreMultiface : virtual public ThriftHiveMetastoreIf, publi return; } + void update_creation_metadata(const std::string& dbname, const std::string& tbl_name, const CreationMetadata& creation_metadata) { + size_t sz = ifaces_.size(); + size_t i = 0; + for (; i < (sz - 1); ++i) { + ifaces_[i]->update_creation_metadata(dbname, tbl_name, creation_metadata); + } + ifaces_[i]->update_creation_metadata(dbname, tbl_name, creation_metadata); + } + void get_table_names_by_filter(std::vector & _return, const std::string& dbname, const std::string& filter, const int16_t max_tables) { size_t sz = ifaces_.size(); size_t i = 0; @@ -25994,6 +26146,9 @@ class ThriftHiveMetastoreConcurrentClient : virtual public ThriftHiveMetastoreIf void get_materialization_invalidation_info(std::map & _return, const std::string& dbname, const std::vector & tbl_names); int32_t send_get_materialization_invalidation_info(const std::string& dbname, const std::vector & tbl_names); void recv_get_materialization_invalidation_info(std::map & _return, const int32_t seqid); + void update_creation_metadata(const std::string& dbname, const std::string& tbl_name, const CreationMetadata& creation_metadata); + int32_t send_update_creation_metadata(const std::string& dbname, const std::string& tbl_name, const CreationMetadata& creation_metadata); + void recv_update_creation_metadata(const int32_t seqid); void get_table_names_by_filter(std::vector & _return, const std::string& dbname, const std::string& filter, const int16_t max_tables); int32_t send_get_table_names_by_filter(const std::string& dbname, const std::string& filter, const int16_t max_tables); void recv_get_table_names_by_filter(std::vector & _return, const int32_t seqid); diff --git a/standalone-metastore/src/gen/thrift/gen-cpp/ThriftHiveMetastore_server.skeleton.cpp b/standalone-metastore/src/gen/thrift/gen-cpp/ThriftHiveMetastore_server.skeleton.cpp index cf9a1713aa..f5dc9f08ae 100644 --- a/standalone-metastore/src/gen/thrift/gen-cpp/ThriftHiveMetastore_server.skeleton.cpp +++ b/standalone-metastore/src/gen/thrift/gen-cpp/ThriftHiveMetastore_server.skeleton.cpp @@ -207,6 +207,11 @@ class ThriftHiveMetastoreHandler : virtual public ThriftHiveMetastoreIf { printf("get_materialization_invalidation_info\n"); } + void update_creation_metadata(const std::string& dbname, const std::string& tbl_name, const CreationMetadata& creation_metadata) { + // Your implementation goes here + printf("update_creation_metadata\n"); + } + void get_table_names_by_filter(std::vector & _return, const std::string& dbname, const std::string& filter, const int16_t max_tables) { // Your implementation goes here printf("get_table_names_by_filter\n"); diff --git a/standalone-metastore/src/gen/thrift/gen-cpp/hive_metastore_types.cpp b/standalone-metastore/src/gen/thrift/gen-cpp/hive_metastore_types.cpp index aadf8f17c4..8f04b9da9e 100644 --- a/standalone-metastore/src/gen/thrift/gen-cpp/hive_metastore_types.cpp +++ b/standalone-metastore/src/gen/thrift/gen-cpp/hive_metastore_types.cpp @@ -21187,14 +21187,15 @@ Materialization::~Materialization() throw() { } -void Materialization::__set_materializationTable(const Table& val) { - this->materializationTable = val; -} - void Materialization::__set_tablesUsed(const std::set & val) { this->tablesUsed = val; } +void Materialization::__set_validTxnList(const std::string& val) { + this->validTxnList = val; +__isset.validTxnList = true; +} + void Materialization::__set_invalidationTime(const int64_t val) { this->invalidationTime = val; } @@ -21211,7 +21212,6 @@ uint32_t Materialization::read(::apache::thrift::protocol::TProtocol* iprot) { using ::apache::thrift::protocol::TProtocolException; - bool isset_materializationTable = false; bool isset_tablesUsed = false; bool isset_invalidationTime = false; @@ -21224,14 +21224,6 @@ uint32_t Materialization::read(::apache::thrift::protocol::TProtocol* iprot) { switch (fid) { case 1: - if (ftype == ::apache::thrift::protocol::T_STRUCT) { - xfer += this->materializationTable.read(iprot); - isset_materializationTable = true; - } else { - xfer += iprot->skip(ftype); - } - break; - case 2: if (ftype == ::apache::thrift::protocol::T_SET) { { this->tablesUsed.clear(); @@ -21252,6 +21244,14 @@ uint32_t Materialization::read(::apache::thrift::protocol::TProtocol* iprot) { xfer += iprot->skip(ftype); } break; + case 2: + if (ftype == ::apache::thrift::protocol::T_STRING) { + xfer += iprot->readString(this->validTxnList); + this->__isset.validTxnList = true; + } else { + xfer += iprot->skip(ftype); + } + break; case 3: if (ftype == ::apache::thrift::protocol::T_I64) { xfer += iprot->readI64(this->invalidationTime); @@ -21269,8 +21269,6 @@ uint32_t Materialization::read(::apache::thrift::protocol::TProtocol* iprot) { xfer += iprot->readStructEnd(); - if (!isset_materializationTable) - throw TProtocolException(TProtocolException::INVALID_DATA); if (!isset_tablesUsed) throw TProtocolException(TProtocolException::INVALID_DATA); if (!isset_invalidationTime) @@ -21283,11 +21281,7 @@ uint32_t Materialization::write(::apache::thrift::protocol::TProtocol* oprot) co apache::thrift::protocol::TOutputRecursionTracker tracker(*oprot); xfer += oprot->writeStructBegin("Materialization"); - xfer += oprot->writeFieldBegin("materializationTable", ::apache::thrift::protocol::T_STRUCT, 1); - xfer += this->materializationTable.write(oprot); - xfer += oprot->writeFieldEnd(); - - xfer += oprot->writeFieldBegin("tablesUsed", ::apache::thrift::protocol::T_SET, 2); + xfer += oprot->writeFieldBegin("tablesUsed", ::apache::thrift::protocol::T_SET, 1); { xfer += oprot->writeSetBegin(::apache::thrift::protocol::T_STRING, static_cast(this->tablesUsed.size())); std::set ::const_iterator _iter880; @@ -21299,6 +21293,11 @@ uint32_t Materialization::write(::apache::thrift::protocol::TProtocol* oprot) co } xfer += oprot->writeFieldEnd(); + if (this->__isset.validTxnList) { + xfer += oprot->writeFieldBegin("validTxnList", ::apache::thrift::protocol::T_STRING, 2); + xfer += oprot->writeString(this->validTxnList); + xfer += oprot->writeFieldEnd(); + } xfer += oprot->writeFieldBegin("invalidationTime", ::apache::thrift::protocol::T_I64, 3); xfer += oprot->writeI64(this->invalidationTime); xfer += oprot->writeFieldEnd(); @@ -21310,27 +21309,30 @@ uint32_t Materialization::write(::apache::thrift::protocol::TProtocol* oprot) co void swap(Materialization &a, Materialization &b) { using ::std::swap; - swap(a.materializationTable, b.materializationTable); swap(a.tablesUsed, b.tablesUsed); + swap(a.validTxnList, b.validTxnList); swap(a.invalidationTime, b.invalidationTime); + swap(a.__isset, b.__isset); } Materialization::Materialization(const Materialization& other881) { - materializationTable = other881.materializationTable; tablesUsed = other881.tablesUsed; + validTxnList = other881.validTxnList; invalidationTime = other881.invalidationTime; + __isset = other881.__isset; } Materialization& Materialization::operator=(const Materialization& other882) { - materializationTable = other882.materializationTable; tablesUsed = other882.tablesUsed; + validTxnList = other882.validTxnList; invalidationTime = other882.invalidationTime; + __isset = other882.__isset; return *this; } void Materialization::printTo(std::ostream& out) const { using ::apache::thrift::to_string; out << "Materialization("; - out << "materializationTable=" << to_string(materializationTable); - out << ", " << "tablesUsed=" << to_string(tablesUsed); + out << "tablesUsed=" << to_string(tablesUsed); + out << ", " << "validTxnList="; (__isset.validTxnList ? (out << to_string(validTxnList)) : (out << "")); out << ", " << "invalidationTime=" << to_string(invalidationTime); out << ")"; } diff --git a/standalone-metastore/src/gen/thrift/gen-cpp/hive_metastore_types.h b/standalone-metastore/src/gen/thrift/gen-cpp/hive_metastore_types.h index 4c09bc8fe6..c25089357b 100644 --- a/standalone-metastore/src/gen/thrift/gen-cpp/hive_metastore_types.h +++ b/standalone-metastore/src/gen/thrift/gen-cpp/hive_metastore_types.h @@ -8707,32 +8707,40 @@ inline std::ostream& operator<<(std::ostream& out, const TableMeta& obj) return out; } +typedef struct _Materialization__isset { + _Materialization__isset() : validTxnList(false) {} + bool validTxnList :1; +} _Materialization__isset; class Materialization { public: Materialization(const Materialization&); Materialization& operator=(const Materialization&); - Materialization() : invalidationTime(0) { + Materialization() : validTxnList(), invalidationTime(0) { } virtual ~Materialization() throw(); - Table materializationTable; std::set tablesUsed; + std::string validTxnList; int64_t invalidationTime; - void __set_materializationTable(const Table& val); + _Materialization__isset __isset; void __set_tablesUsed(const std::set & val); + void __set_validTxnList(const std::string& val); + void __set_invalidationTime(const int64_t val); bool operator == (const Materialization & rhs) const { - if (!(materializationTable == rhs.materializationTable)) - return false; if (!(tablesUsed == rhs.tablesUsed)) return false; + if (__isset.validTxnList != rhs.__isset.validTxnList) + return false; + else if (__isset.validTxnList && !(validTxnList == rhs.validTxnList)) + return false; if (!(invalidationTime == rhs.invalidationTime)) return false; return true; diff --git a/standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/Materialization.java b/standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/Materialization.java index b399d66422..ccef0244be 100644 --- a/standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/Materialization.java +++ b/standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/Materialization.java @@ -38,8 +38,8 @@ @org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public class Materialization implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("Materialization"); - private static final org.apache.thrift.protocol.TField MATERIALIZATION_TABLE_FIELD_DESC = new org.apache.thrift.protocol.TField("materializationTable", org.apache.thrift.protocol.TType.STRUCT, (short)1); - private static final org.apache.thrift.protocol.TField TABLES_USED_FIELD_DESC = new org.apache.thrift.protocol.TField("tablesUsed", org.apache.thrift.protocol.TType.SET, (short)2); + private static final org.apache.thrift.protocol.TField TABLES_USED_FIELD_DESC = new org.apache.thrift.protocol.TField("tablesUsed", org.apache.thrift.protocol.TType.SET, (short)1); + private static final org.apache.thrift.protocol.TField VALID_TXN_LIST_FIELD_DESC = new org.apache.thrift.protocol.TField("validTxnList", org.apache.thrift.protocol.TType.STRING, (short)2); private static final org.apache.thrift.protocol.TField INVALIDATION_TIME_FIELD_DESC = new org.apache.thrift.protocol.TField("invalidationTime", org.apache.thrift.protocol.TType.I64, (short)3); private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); @@ -48,14 +48,14 @@ schemes.put(TupleScheme.class, new MaterializationTupleSchemeFactory()); } - private Table materializationTable; // required private Set tablesUsed; // required + private String validTxnList; // optional private long invalidationTime; // required /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ public enum _Fields implements org.apache.thrift.TFieldIdEnum { - MATERIALIZATION_TABLE((short)1, "materializationTable"), - TABLES_USED((short)2, "tablesUsed"), + TABLES_USED((short)1, "tablesUsed"), + VALID_TXN_LIST((short)2, "validTxnList"), INVALIDATION_TIME((short)3, "invalidationTime"); private static final Map byName = new HashMap(); @@ -71,10 +71,10 @@ */ public static _Fields findByThriftId(int fieldId) { switch(fieldId) { - case 1: // MATERIALIZATION_TABLE - return MATERIALIZATION_TABLE; - case 2: // TABLES_USED + case 1: // TABLES_USED return TABLES_USED; + case 2: // VALID_TXN_LIST + return VALID_TXN_LIST; case 3: // INVALIDATION_TIME return INVALIDATION_TIME; default: @@ -119,14 +119,15 @@ public String getFieldName() { // isset id assignments private static final int __INVALIDATIONTIME_ISSET_ID = 0; private byte __isset_bitfield = 0; + private static final _Fields optionals[] = {_Fields.VALID_TXN_LIST}; public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; static { Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); - tmpMap.put(_Fields.MATERIALIZATION_TABLE, new org.apache.thrift.meta_data.FieldMetaData("materializationTable", org.apache.thrift.TFieldRequirementType.REQUIRED, - new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, Table.class))); tmpMap.put(_Fields.TABLES_USED, new org.apache.thrift.meta_data.FieldMetaData("tablesUsed", org.apache.thrift.TFieldRequirementType.REQUIRED, new org.apache.thrift.meta_data.SetMetaData(org.apache.thrift.protocol.TType.SET, new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING)))); + tmpMap.put(_Fields.VALID_TXN_LIST, new org.apache.thrift.meta_data.FieldMetaData("validTxnList", org.apache.thrift.TFieldRequirementType.OPTIONAL, + new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING))); tmpMap.put(_Fields.INVALIDATION_TIME, new org.apache.thrift.meta_data.FieldMetaData("invalidationTime", org.apache.thrift.TFieldRequirementType.REQUIRED, new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.I64))); metaDataMap = Collections.unmodifiableMap(tmpMap); @@ -137,12 +138,10 @@ public Materialization() { } public Materialization( - Table materializationTable, Set tablesUsed, long invalidationTime) { this(); - this.materializationTable = materializationTable; this.tablesUsed = tablesUsed; this.invalidationTime = invalidationTime; setInvalidationTimeIsSet(true); @@ -153,13 +152,13 @@ public Materialization( */ public Materialization(Materialization other) { __isset_bitfield = other.__isset_bitfield; - if (other.isSetMaterializationTable()) { - this.materializationTable = new Table(other.materializationTable); - } if (other.isSetTablesUsed()) { Set __this__tablesUsed = new HashSet(other.tablesUsed); this.tablesUsed = __this__tablesUsed; } + if (other.isSetValidTxnList()) { + this.validTxnList = other.validTxnList; + } this.invalidationTime = other.invalidationTime; } @@ -169,35 +168,12 @@ public Materialization deepCopy() { @Override public void clear() { - this.materializationTable = null; this.tablesUsed = null; + this.validTxnList = null; setInvalidationTimeIsSet(false); this.invalidationTime = 0; } - public Table getMaterializationTable() { - return this.materializationTable; - } - - public void setMaterializationTable(Table materializationTable) { - this.materializationTable = materializationTable; - } - - public void unsetMaterializationTable() { - this.materializationTable = null; - } - - /** Returns true if field materializationTable is set (has been assigned a value) and false otherwise */ - public boolean isSetMaterializationTable() { - return this.materializationTable != null; - } - - public void setMaterializationTableIsSet(boolean value) { - if (!value) { - this.materializationTable = null; - } - } - public int getTablesUsedSize() { return (this.tablesUsed == null) ? 0 : this.tablesUsed.size(); } @@ -236,6 +212,29 @@ public void setTablesUsedIsSet(boolean value) { } } + public String getValidTxnList() { + return this.validTxnList; + } + + public void setValidTxnList(String validTxnList) { + this.validTxnList = validTxnList; + } + + public void unsetValidTxnList() { + this.validTxnList = null; + } + + /** Returns true if field validTxnList is set (has been assigned a value) and false otherwise */ + public boolean isSetValidTxnList() { + return this.validTxnList != null; + } + + public void setValidTxnListIsSet(boolean value) { + if (!value) { + this.validTxnList = null; + } + } + public long getInvalidationTime() { return this.invalidationTime; } @@ -260,19 +259,19 @@ public void setInvalidationTimeIsSet(boolean value) { public void setFieldValue(_Fields field, Object value) { switch (field) { - case MATERIALIZATION_TABLE: + case TABLES_USED: if (value == null) { - unsetMaterializationTable(); + unsetTablesUsed(); } else { - setMaterializationTable((Table)value); + setTablesUsed((Set)value); } break; - case TABLES_USED: + case VALID_TXN_LIST: if (value == null) { - unsetTablesUsed(); + unsetValidTxnList(); } else { - setTablesUsed((Set)value); + setValidTxnList((String)value); } break; @@ -289,12 +288,12 @@ public void setFieldValue(_Fields field, Object value) { public Object getFieldValue(_Fields field) { switch (field) { - case MATERIALIZATION_TABLE: - return getMaterializationTable(); - case TABLES_USED: return getTablesUsed(); + case VALID_TXN_LIST: + return getValidTxnList(); + case INVALIDATION_TIME: return getInvalidationTime(); @@ -309,10 +308,10 @@ public boolean isSet(_Fields field) { } switch (field) { - case MATERIALIZATION_TABLE: - return isSetMaterializationTable(); case TABLES_USED: return isSetTablesUsed(); + case VALID_TXN_LIST: + return isSetValidTxnList(); case INVALIDATION_TIME: return isSetInvalidationTime(); } @@ -332,15 +331,6 @@ public boolean equals(Materialization that) { if (that == null) return false; - boolean this_present_materializationTable = true && this.isSetMaterializationTable(); - boolean that_present_materializationTable = true && that.isSetMaterializationTable(); - if (this_present_materializationTable || that_present_materializationTable) { - if (!(this_present_materializationTable && that_present_materializationTable)) - return false; - if (!this.materializationTable.equals(that.materializationTable)) - return false; - } - boolean this_present_tablesUsed = true && this.isSetTablesUsed(); boolean that_present_tablesUsed = true && that.isSetTablesUsed(); if (this_present_tablesUsed || that_present_tablesUsed) { @@ -350,6 +340,15 @@ public boolean equals(Materialization that) { return false; } + boolean this_present_validTxnList = true && this.isSetValidTxnList(); + boolean that_present_validTxnList = true && that.isSetValidTxnList(); + if (this_present_validTxnList || that_present_validTxnList) { + if (!(this_present_validTxnList && that_present_validTxnList)) + return false; + if (!this.validTxnList.equals(that.validTxnList)) + return false; + } + boolean this_present_invalidationTime = true; boolean that_present_invalidationTime = true; if (this_present_invalidationTime || that_present_invalidationTime) { @@ -366,16 +365,16 @@ public boolean equals(Materialization that) { public int hashCode() { List list = new ArrayList(); - boolean present_materializationTable = true && (isSetMaterializationTable()); - list.add(present_materializationTable); - if (present_materializationTable) - list.add(materializationTable); - boolean present_tablesUsed = true && (isSetTablesUsed()); list.add(present_tablesUsed); if (present_tablesUsed) list.add(tablesUsed); + boolean present_validTxnList = true && (isSetValidTxnList()); + list.add(present_validTxnList); + if (present_validTxnList) + list.add(validTxnList); + boolean present_invalidationTime = true; list.add(present_invalidationTime); if (present_invalidationTime) @@ -392,22 +391,22 @@ public int compareTo(Materialization other) { int lastComparison = 0; - lastComparison = Boolean.valueOf(isSetMaterializationTable()).compareTo(other.isSetMaterializationTable()); + lastComparison = Boolean.valueOf(isSetTablesUsed()).compareTo(other.isSetTablesUsed()); if (lastComparison != 0) { return lastComparison; } - if (isSetMaterializationTable()) { - lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.materializationTable, other.materializationTable); + if (isSetTablesUsed()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.tablesUsed, other.tablesUsed); if (lastComparison != 0) { return lastComparison; } } - lastComparison = Boolean.valueOf(isSetTablesUsed()).compareTo(other.isSetTablesUsed()); + lastComparison = Boolean.valueOf(isSetValidTxnList()).compareTo(other.isSetValidTxnList()); if (lastComparison != 0) { return lastComparison; } - if (isSetTablesUsed()) { - lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.tablesUsed, other.tablesUsed); + if (isSetValidTxnList()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.validTxnList, other.validTxnList); if (lastComparison != 0) { return lastComparison; } @@ -442,14 +441,6 @@ public String toString() { StringBuilder sb = new StringBuilder("Materialization("); boolean first = true; - sb.append("materializationTable:"); - if (this.materializationTable == null) { - sb.append("null"); - } else { - sb.append(this.materializationTable); - } - first = false; - if (!first) sb.append(", "); sb.append("tablesUsed:"); if (this.tablesUsed == null) { sb.append("null"); @@ -457,6 +448,16 @@ public String toString() { sb.append(this.tablesUsed); } first = false; + if (isSetValidTxnList()) { + if (!first) sb.append(", "); + sb.append("validTxnList:"); + if (this.validTxnList == null) { + sb.append("null"); + } else { + sb.append(this.validTxnList); + } + first = false; + } if (!first) sb.append(", "); sb.append("invalidationTime:"); sb.append(this.invalidationTime); @@ -467,10 +468,6 @@ public String toString() { public void validate() throws org.apache.thrift.TException { // check for required fields - if (!isSetMaterializationTable()) { - throw new org.apache.thrift.protocol.TProtocolException("Required field 'materializationTable' is unset! Struct:" + toString()); - } - if (!isSetTablesUsed()) { throw new org.apache.thrift.protocol.TProtocolException("Required field 'tablesUsed' is unset! Struct:" + toString()); } @@ -480,9 +477,6 @@ public void validate() throws org.apache.thrift.TException { } // check for sub-struct validity - if (materializationTable != null) { - materializationTable.validate(); - } } private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException { @@ -521,16 +515,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, Materialization str break; } switch (schemeField.id) { - case 1: // MATERIALIZATION_TABLE - if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) { - struct.materializationTable = new Table(); - struct.materializationTable.read(iprot); - struct.setMaterializationTableIsSet(true); - } else { - org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); - } - break; - case 2: // TABLES_USED + case 1: // TABLES_USED if (schemeField.type == org.apache.thrift.protocol.TType.SET) { { org.apache.thrift.protocol.TSet _set746 = iprot.readSetBegin(); @@ -548,6 +533,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, Materialization str org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); } break; + case 2: // VALID_TXN_LIST + if (schemeField.type == org.apache.thrift.protocol.TType.STRING) { + struct.validTxnList = iprot.readString(); + struct.setValidTxnListIsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + break; case 3: // INVALIDATION_TIME if (schemeField.type == org.apache.thrift.protocol.TType.I64) { struct.invalidationTime = iprot.readI64(); @@ -569,11 +562,6 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, Materialization st struct.validate(); oprot.writeStructBegin(STRUCT_DESC); - if (struct.materializationTable != null) { - oprot.writeFieldBegin(MATERIALIZATION_TABLE_FIELD_DESC); - struct.materializationTable.write(oprot); - oprot.writeFieldEnd(); - } if (struct.tablesUsed != null) { oprot.writeFieldBegin(TABLES_USED_FIELD_DESC); { @@ -586,6 +574,13 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, Materialization st } oprot.writeFieldEnd(); } + if (struct.validTxnList != null) { + if (struct.isSetValidTxnList()) { + oprot.writeFieldBegin(VALID_TXN_LIST_FIELD_DESC); + oprot.writeString(struct.validTxnList); + oprot.writeFieldEnd(); + } + } oprot.writeFieldBegin(INVALIDATION_TIME_FIELD_DESC); oprot.writeI64(struct.invalidationTime); oprot.writeFieldEnd(); @@ -606,7 +601,6 @@ public MaterializationTupleScheme getScheme() { @Override public void write(org.apache.thrift.protocol.TProtocol prot, Materialization struct) throws org.apache.thrift.TException { TTupleProtocol oprot = (TTupleProtocol) prot; - struct.materializationTable.write(oprot); { oprot.writeI32(struct.tablesUsed.size()); for (String _iter750 : struct.tablesUsed) @@ -615,14 +609,19 @@ public void write(org.apache.thrift.protocol.TProtocol prot, Materialization str } } oprot.writeI64(struct.invalidationTime); + BitSet optionals = new BitSet(); + if (struct.isSetValidTxnList()) { + optionals.set(0); + } + oprot.writeBitSet(optionals, 1); + if (struct.isSetValidTxnList()) { + oprot.writeString(struct.validTxnList); + } } @Override public void read(org.apache.thrift.protocol.TProtocol prot, Materialization struct) throws org.apache.thrift.TException { TTupleProtocol iprot = (TTupleProtocol) prot; - struct.materializationTable = new Table(); - struct.materializationTable.read(iprot); - struct.setMaterializationTableIsSet(true); { org.apache.thrift.protocol.TSet _set751 = new org.apache.thrift.protocol.TSet(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); struct.tablesUsed = new HashSet(2*_set751.size); @@ -636,6 +635,11 @@ public void read(org.apache.thrift.protocol.TProtocol prot, Materialization stru struct.setTablesUsedIsSet(true); struct.invalidationTime = iprot.readI64(); struct.setInvalidationTimeIsSet(true); + BitSet incoming = iprot.readBitSet(1); + if (incoming.get(0)) { + struct.validTxnList = iprot.readString(); + struct.setValidTxnListIsSet(true); + } } } diff --git a/standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ThriftHiveMetastore.java b/standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ThriftHiveMetastore.java index d5e3527d09..05064cb2c9 100644 --- a/standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ThriftHiveMetastore.java +++ b/standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ThriftHiveMetastore.java @@ -116,6 +116,8 @@ public Map get_materialization_invalidation_info(String dbname, List tbl_names) throws MetaException, InvalidOperationException, UnknownDBException, org.apache.thrift.TException; + public void update_creation_metadata(String dbname, String tbl_name, CreationMetadata creation_metadata) throws MetaException, InvalidOperationException, UnknownDBException, org.apache.thrift.TException; + public List get_table_names_by_filter(String dbname, String filter, short max_tables) throws MetaException, InvalidOperationException, UnknownDBException, org.apache.thrift.TException; public void alter_table(String dbname, String tbl_name, Table new_tbl) throws InvalidOperationException, MetaException, org.apache.thrift.TException; @@ -482,6 +484,8 @@ public void get_materialization_invalidation_info(String dbname, List tbl_names, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException; + public void update_creation_metadata(String dbname, String tbl_name, CreationMetadata creation_metadata, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException; + public void get_table_names_by_filter(String dbname, String filter, short max_tables, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException; public void alter_table(String dbname, String tbl_name, Table new_tbl, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException; @@ -1855,6 +1859,37 @@ public void send_get_materialization_invalidation_info(String dbname, List get_table_names_by_filter(String dbname, String filter, short max_tables) throws MetaException, InvalidOperationException, UnknownDBException, org.apache.thrift.TException { send_get_table_names_by_filter(dbname, filter, max_tables); @@ -7323,6 +7358,44 @@ public void write_args(org.apache.thrift.protocol.TProtocol prot) throws org.apa } } + public void update_creation_metadata(String dbname, String tbl_name, CreationMetadata creation_metadata, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException { + checkReady(); + update_creation_metadata_call method_call = new update_creation_metadata_call(dbname, tbl_name, creation_metadata, resultHandler, this, ___protocolFactory, ___transport); + this.___currentMethod = method_call; + ___manager.call(method_call); + } + + @org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public static class update_creation_metadata_call extends org.apache.thrift.async.TAsyncMethodCall { + private String dbname; + private String tbl_name; + private CreationMetadata creation_metadata; + public update_creation_metadata_call(String dbname, String tbl_name, CreationMetadata creation_metadata, org.apache.thrift.async.AsyncMethodCallback resultHandler, org.apache.thrift.async.TAsyncClient client, org.apache.thrift.protocol.TProtocolFactory protocolFactory, org.apache.thrift.transport.TNonblockingTransport transport) throws org.apache.thrift.TException { + super(client, protocolFactory, transport, resultHandler, false); + this.dbname = dbname; + this.tbl_name = tbl_name; + this.creation_metadata = creation_metadata; + } + + public void write_args(org.apache.thrift.protocol.TProtocol prot) throws org.apache.thrift.TException { + prot.writeMessageBegin(new org.apache.thrift.protocol.TMessage("update_creation_metadata", org.apache.thrift.protocol.TMessageType.CALL, 0)); + update_creation_metadata_args args = new update_creation_metadata_args(); + args.setDbname(dbname); + args.setTbl_name(tbl_name); + args.setCreation_metadata(creation_metadata); + args.write(prot); + prot.writeMessageEnd(); + } + + public void getResult() throws MetaException, InvalidOperationException, UnknownDBException, org.apache.thrift.TException { + if (getState() != org.apache.thrift.async.TAsyncMethodCall.State.RESPONSE_READ) { + throw new IllegalStateException("Method call not finished!"); + } + org.apache.thrift.transport.TMemoryInputTransport memoryTransport = new org.apache.thrift.transport.TMemoryInputTransport(getFrameBuffer().array()); + org.apache.thrift.protocol.TProtocol prot = client.getProtocolFactory().getProtocol(memoryTransport); + (new Client(prot)).recv_update_creation_metadata(); + } + } + public void get_table_names_by_filter(String dbname, String filter, short max_tables, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException { checkReady(); get_table_names_by_filter_call method_call = new get_table_names_by_filter_call(dbname, filter, max_tables, resultHandler, this, ___protocolFactory, ___transport); @@ -12395,6 +12468,7 @@ protected Processor(I iface, Map extends org.apache.thrift.ProcessFunction { + public update_creation_metadata() { + super("update_creation_metadata"); + } + + public update_creation_metadata_args getEmptyArgsInstance() { + return new update_creation_metadata_args(); + } + + protected boolean isOneway() { + return false; + } + + public update_creation_metadata_result getResult(I iface, update_creation_metadata_args args) throws org.apache.thrift.TException { + update_creation_metadata_result result = new update_creation_metadata_result(); + try { + iface.update_creation_metadata(args.dbname, args.tbl_name, args.creation_metadata); + } catch (MetaException o1) { + result.o1 = o1; + } catch (InvalidOperationException o2) { + result.o2 = o2; + } catch (UnknownDBException o3) { + result.o3 = o3; + } + return result; + } + } + @org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public static class get_table_names_by_filter extends org.apache.thrift.ProcessFunction { public get_table_names_by_filter() { super("get_table_names_by_filter"); @@ -17236,6 +17338,7 @@ protected AsyncProcessor(I iface, Map extends org.apache.thrift.AsyncProcessFunction> { - public get_table_names_by_filter() { - super("get_table_names_by_filter"); + @org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public static class update_creation_metadata extends org.apache.thrift.AsyncProcessFunction { + public update_creation_metadata() { + super("update_creation_metadata"); } - public get_table_names_by_filter_args getEmptyArgsInstance() { - return new get_table_names_by_filter_args(); + public update_creation_metadata_args getEmptyArgsInstance() { + return new update_creation_metadata_args(); } - public AsyncMethodCallback> getResultHandler(final AsyncFrameBuffer fb, final int seqid) { + public AsyncMethodCallback getResultHandler(final AsyncFrameBuffer fb, final int seqid) { final org.apache.thrift.AsyncProcessFunction fcall = this; - return new AsyncMethodCallback>() { - public void onComplete(List o) { - get_table_names_by_filter_result result = new get_table_names_by_filter_result(); - result.success = o; + return new AsyncMethodCallback() { + public void onComplete(Void o) { + update_creation_metadata_result result = new update_creation_metadata_result(); try { fcall.sendResponse(fb,result, org.apache.thrift.protocol.TMessageType.REPLY,seqid); return; @@ -19699,7 +19801,7 @@ public void onComplete(List o) { public void onError(Exception e) { byte msgType = org.apache.thrift.protocol.TMessageType.REPLY; org.apache.thrift.TBase msg; - get_table_names_by_filter_result result = new get_table_names_by_filter_result(); + update_creation_metadata_result result = new update_creation_metadata_result(); if (e instanceof MetaException) { result.o1 = (MetaException) e; result.setO1IsSet(true); @@ -19735,208 +19837,25 @@ protected boolean isOneway() { return false; } - public void start(I iface, get_table_names_by_filter_args args, org.apache.thrift.async.AsyncMethodCallback> resultHandler) throws TException { - iface.get_table_names_by_filter(args.dbname, args.filter, args.max_tables,resultHandler); + public void start(I iface, update_creation_metadata_args args, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws TException { + iface.update_creation_metadata(args.dbname, args.tbl_name, args.creation_metadata,resultHandler); } } - @org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public static class alter_table extends org.apache.thrift.AsyncProcessFunction { - public alter_table() { - super("alter_table"); - } - - public alter_table_args getEmptyArgsInstance() { - return new alter_table_args(); - } - - public AsyncMethodCallback getResultHandler(final AsyncFrameBuffer fb, final int seqid) { - final org.apache.thrift.AsyncProcessFunction fcall = this; - return new AsyncMethodCallback() { - public void onComplete(Void o) { - alter_table_result result = new alter_table_result(); - try { - fcall.sendResponse(fb,result, org.apache.thrift.protocol.TMessageType.REPLY,seqid); - return; - } catch (Exception e) { - LOGGER.error("Exception writing to internal frame buffer", e); - } - fb.close(); - } - public void onError(Exception e) { - byte msgType = org.apache.thrift.protocol.TMessageType.REPLY; - org.apache.thrift.TBase msg; - alter_table_result result = new alter_table_result(); - if (e instanceof InvalidOperationException) { - result.o1 = (InvalidOperationException) e; - result.setO1IsSet(true); - msg = result; - } - else if (e instanceof MetaException) { - result.o2 = (MetaException) e; - result.setO2IsSet(true); - msg = result; - } - else - { - msgType = org.apache.thrift.protocol.TMessageType.EXCEPTION; - msg = (org.apache.thrift.TBase)new org.apache.thrift.TApplicationException(org.apache.thrift.TApplicationException.INTERNAL_ERROR, e.getMessage()); - } - try { - fcall.sendResponse(fb,msg,msgType,seqid); - return; - } catch (Exception ex) { - LOGGER.error("Exception writing to internal frame buffer", ex); - } - fb.close(); - } - }; - } - - protected boolean isOneway() { - return false; - } - - public void start(I iface, alter_table_args args, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws TException { - iface.alter_table(args.dbname, args.tbl_name, args.new_tbl,resultHandler); - } - } - - @org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public static class alter_table_with_environment_context extends org.apache.thrift.AsyncProcessFunction { - public alter_table_with_environment_context() { - super("alter_table_with_environment_context"); - } - - public alter_table_with_environment_context_args getEmptyArgsInstance() { - return new alter_table_with_environment_context_args(); - } - - public AsyncMethodCallback getResultHandler(final AsyncFrameBuffer fb, final int seqid) { - final org.apache.thrift.AsyncProcessFunction fcall = this; - return new AsyncMethodCallback() { - public void onComplete(Void o) { - alter_table_with_environment_context_result result = new alter_table_with_environment_context_result(); - try { - fcall.sendResponse(fb,result, org.apache.thrift.protocol.TMessageType.REPLY,seqid); - return; - } catch (Exception e) { - LOGGER.error("Exception writing to internal frame buffer", e); - } - fb.close(); - } - public void onError(Exception e) { - byte msgType = org.apache.thrift.protocol.TMessageType.REPLY; - org.apache.thrift.TBase msg; - alter_table_with_environment_context_result result = new alter_table_with_environment_context_result(); - if (e instanceof InvalidOperationException) { - result.o1 = (InvalidOperationException) e; - result.setO1IsSet(true); - msg = result; - } - else if (e instanceof MetaException) { - result.o2 = (MetaException) e; - result.setO2IsSet(true); - msg = result; - } - else - { - msgType = org.apache.thrift.protocol.TMessageType.EXCEPTION; - msg = (org.apache.thrift.TBase)new org.apache.thrift.TApplicationException(org.apache.thrift.TApplicationException.INTERNAL_ERROR, e.getMessage()); - } - try { - fcall.sendResponse(fb,msg,msgType,seqid); - return; - } catch (Exception ex) { - LOGGER.error("Exception writing to internal frame buffer", ex); - } - fb.close(); - } - }; - } - - protected boolean isOneway() { - return false; - } - - public void start(I iface, alter_table_with_environment_context_args args, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws TException { - iface.alter_table_with_environment_context(args.dbname, args.tbl_name, args.new_tbl, args.environment_context,resultHandler); - } - } - - @org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public static class alter_table_with_cascade extends org.apache.thrift.AsyncProcessFunction { - public alter_table_with_cascade() { - super("alter_table_with_cascade"); - } - - public alter_table_with_cascade_args getEmptyArgsInstance() { - return new alter_table_with_cascade_args(); - } - - public AsyncMethodCallback getResultHandler(final AsyncFrameBuffer fb, final int seqid) { - final org.apache.thrift.AsyncProcessFunction fcall = this; - return new AsyncMethodCallback() { - public void onComplete(Void o) { - alter_table_with_cascade_result result = new alter_table_with_cascade_result(); - try { - fcall.sendResponse(fb,result, org.apache.thrift.protocol.TMessageType.REPLY,seqid); - return; - } catch (Exception e) { - LOGGER.error("Exception writing to internal frame buffer", e); - } - fb.close(); - } - public void onError(Exception e) { - byte msgType = org.apache.thrift.protocol.TMessageType.REPLY; - org.apache.thrift.TBase msg; - alter_table_with_cascade_result result = new alter_table_with_cascade_result(); - if (e instanceof InvalidOperationException) { - result.o1 = (InvalidOperationException) e; - result.setO1IsSet(true); - msg = result; - } - else if (e instanceof MetaException) { - result.o2 = (MetaException) e; - result.setO2IsSet(true); - msg = result; - } - else - { - msgType = org.apache.thrift.protocol.TMessageType.EXCEPTION; - msg = (org.apache.thrift.TBase)new org.apache.thrift.TApplicationException(org.apache.thrift.TApplicationException.INTERNAL_ERROR, e.getMessage()); - } - try { - fcall.sendResponse(fb,msg,msgType,seqid); - return; - } catch (Exception ex) { - LOGGER.error("Exception writing to internal frame buffer", ex); - } - fb.close(); - } - }; - } - - protected boolean isOneway() { - return false; - } - - public void start(I iface, alter_table_with_cascade_args args, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws TException { - iface.alter_table_with_cascade(args.dbname, args.tbl_name, args.new_tbl, args.cascade,resultHandler); - } - } - - @org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public static class add_partition extends org.apache.thrift.AsyncProcessFunction { - public add_partition() { - super("add_partition"); + @org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public static class get_table_names_by_filter extends org.apache.thrift.AsyncProcessFunction> { + public get_table_names_by_filter() { + super("get_table_names_by_filter"); } - public add_partition_args getEmptyArgsInstance() { - return new add_partition_args(); + public get_table_names_by_filter_args getEmptyArgsInstance() { + return new get_table_names_by_filter_args(); } - public AsyncMethodCallback getResultHandler(final AsyncFrameBuffer fb, final int seqid) { + public AsyncMethodCallback> getResultHandler(final AsyncFrameBuffer fb, final int seqid) { final org.apache.thrift.AsyncProcessFunction fcall = this; - return new AsyncMethodCallback() { - public void onComplete(Partition o) { - add_partition_result result = new add_partition_result(); + return new AsyncMethodCallback>() { + public void onComplete(List o) { + get_table_names_by_filter_result result = new get_table_names_by_filter_result(); result.success = o; try { fcall.sendResponse(fb,result, org.apache.thrift.protocol.TMessageType.REPLY,seqid); @@ -19949,19 +19868,19 @@ public void onComplete(Partition o) { public void onError(Exception e) { byte msgType = org.apache.thrift.protocol.TMessageType.REPLY; org.apache.thrift.TBase msg; - add_partition_result result = new add_partition_result(); - if (e instanceof InvalidObjectException) { - result.o1 = (InvalidObjectException) e; + get_table_names_by_filter_result result = new get_table_names_by_filter_result(); + if (e instanceof MetaException) { + result.o1 = (MetaException) e; result.setO1IsSet(true); msg = result; } - else if (e instanceof AlreadyExistsException) { - result.o2 = (AlreadyExistsException) e; + else if (e instanceof InvalidOperationException) { + result.o2 = (InvalidOperationException) e; result.setO2IsSet(true); msg = result; } - else if (e instanceof MetaException) { - result.o3 = (MetaException) e; + else if (e instanceof UnknownDBException) { + result.o3 = (UnknownDBException) e; result.setO3IsSet(true); msg = result; } @@ -19985,26 +19904,25 @@ protected boolean isOneway() { return false; } - public void start(I iface, add_partition_args args, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws TException { - iface.add_partition(args.new_part,resultHandler); + public void start(I iface, get_table_names_by_filter_args args, org.apache.thrift.async.AsyncMethodCallback> resultHandler) throws TException { + iface.get_table_names_by_filter(args.dbname, args.filter, args.max_tables,resultHandler); } } - @org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public static class add_partition_with_environment_context extends org.apache.thrift.AsyncProcessFunction { - public add_partition_with_environment_context() { - super("add_partition_with_environment_context"); + @org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public static class alter_table extends org.apache.thrift.AsyncProcessFunction { + public alter_table() { + super("alter_table"); } - public add_partition_with_environment_context_args getEmptyArgsInstance() { - return new add_partition_with_environment_context_args(); + public alter_table_args getEmptyArgsInstance() { + return new alter_table_args(); } - public AsyncMethodCallback getResultHandler(final AsyncFrameBuffer fb, final int seqid) { + public AsyncMethodCallback getResultHandler(final AsyncFrameBuffer fb, final int seqid) { final org.apache.thrift.AsyncProcessFunction fcall = this; - return new AsyncMethodCallback() { - public void onComplete(Partition o) { - add_partition_with_environment_context_result result = new add_partition_with_environment_context_result(); - result.success = o; + return new AsyncMethodCallback() { + public void onComplete(Void o) { + alter_table_result result = new alter_table_result(); try { fcall.sendResponse(fb,result, org.apache.thrift.protocol.TMessageType.REPLY,seqid); return; @@ -20016,20 +19934,15 @@ public void onComplete(Partition o) { public void onError(Exception e) { byte msgType = org.apache.thrift.protocol.TMessageType.REPLY; org.apache.thrift.TBase msg; - add_partition_with_environment_context_result result = new add_partition_with_environment_context_result(); - if (e instanceof InvalidObjectException) { - result.o1 = (InvalidObjectException) e; + alter_table_result result = new alter_table_result(); + if (e instanceof InvalidOperationException) { + result.o1 = (InvalidOperationException) e; result.setO1IsSet(true); msg = result; } - else if (e instanceof AlreadyExistsException) { - result.o2 = (AlreadyExistsException) e; - result.setO2IsSet(true); - msg = result; - } else if (e instanceof MetaException) { - result.o3 = (MetaException) e; - result.setO3IsSet(true); + result.o2 = (MetaException) e; + result.setO2IsSet(true); msg = result; } else @@ -20052,27 +19965,25 @@ protected boolean isOneway() { return false; } - public void start(I iface, add_partition_with_environment_context_args args, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws TException { - iface.add_partition_with_environment_context(args.new_part, args.environment_context,resultHandler); + public void start(I iface, alter_table_args args, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws TException { + iface.alter_table(args.dbname, args.tbl_name, args.new_tbl,resultHandler); } } - @org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public static class add_partitions extends org.apache.thrift.AsyncProcessFunction { - public add_partitions() { - super("add_partitions"); + @org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public static class alter_table_with_environment_context extends org.apache.thrift.AsyncProcessFunction { + public alter_table_with_environment_context() { + super("alter_table_with_environment_context"); } - public add_partitions_args getEmptyArgsInstance() { - return new add_partitions_args(); + public alter_table_with_environment_context_args getEmptyArgsInstance() { + return new alter_table_with_environment_context_args(); } - public AsyncMethodCallback getResultHandler(final AsyncFrameBuffer fb, final int seqid) { + public AsyncMethodCallback getResultHandler(final AsyncFrameBuffer fb, final int seqid) { final org.apache.thrift.AsyncProcessFunction fcall = this; - return new AsyncMethodCallback() { - public void onComplete(Integer o) { - add_partitions_result result = new add_partitions_result(); - result.success = o; - result.setSuccessIsSet(true); + return new AsyncMethodCallback() { + public void onComplete(Void o) { + alter_table_with_environment_context_result result = new alter_table_with_environment_context_result(); try { fcall.sendResponse(fb,result, org.apache.thrift.protocol.TMessageType.REPLY,seqid); return; @@ -20084,20 +19995,15 @@ public void onComplete(Integer o) { public void onError(Exception e) { byte msgType = org.apache.thrift.protocol.TMessageType.REPLY; org.apache.thrift.TBase msg; - add_partitions_result result = new add_partitions_result(); - if (e instanceof InvalidObjectException) { - result.o1 = (InvalidObjectException) e; + alter_table_with_environment_context_result result = new alter_table_with_environment_context_result(); + if (e instanceof InvalidOperationException) { + result.o1 = (InvalidOperationException) e; result.setO1IsSet(true); msg = result; } - else if (e instanceof AlreadyExistsException) { - result.o2 = (AlreadyExistsException) e; - result.setO2IsSet(true); - msg = result; - } else if (e instanceof MetaException) { - result.o3 = (MetaException) e; - result.setO3IsSet(true); + result.o2 = (MetaException) e; + result.setO2IsSet(true); msg = result; } else @@ -20120,27 +20026,25 @@ protected boolean isOneway() { return false; } - public void start(I iface, add_partitions_args args, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws TException { - iface.add_partitions(args.new_parts,resultHandler); + public void start(I iface, alter_table_with_environment_context_args args, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws TException { + iface.alter_table_with_environment_context(args.dbname, args.tbl_name, args.new_tbl, args.environment_context,resultHandler); } } - @org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public static class add_partitions_pspec extends org.apache.thrift.AsyncProcessFunction { - public add_partitions_pspec() { - super("add_partitions_pspec"); + @org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public static class alter_table_with_cascade extends org.apache.thrift.AsyncProcessFunction { + public alter_table_with_cascade() { + super("alter_table_with_cascade"); } - public add_partitions_pspec_args getEmptyArgsInstance() { - return new add_partitions_pspec_args(); + public alter_table_with_cascade_args getEmptyArgsInstance() { + return new alter_table_with_cascade_args(); } - public AsyncMethodCallback getResultHandler(final AsyncFrameBuffer fb, final int seqid) { + public AsyncMethodCallback getResultHandler(final AsyncFrameBuffer fb, final int seqid) { final org.apache.thrift.AsyncProcessFunction fcall = this; - return new AsyncMethodCallback() { - public void onComplete(Integer o) { - add_partitions_pspec_result result = new add_partitions_pspec_result(); - result.success = o; - result.setSuccessIsSet(true); + return new AsyncMethodCallback() { + public void onComplete(Void o) { + alter_table_with_cascade_result result = new alter_table_with_cascade_result(); try { fcall.sendResponse(fb,result, org.apache.thrift.protocol.TMessageType.REPLY,seqid); return; @@ -20152,20 +20056,15 @@ public void onComplete(Integer o) { public void onError(Exception e) { byte msgType = org.apache.thrift.protocol.TMessageType.REPLY; org.apache.thrift.TBase msg; - add_partitions_pspec_result result = new add_partitions_pspec_result(); - if (e instanceof InvalidObjectException) { - result.o1 = (InvalidObjectException) e; + alter_table_with_cascade_result result = new alter_table_with_cascade_result(); + if (e instanceof InvalidOperationException) { + result.o1 = (InvalidOperationException) e; result.setO1IsSet(true); msg = result; } - else if (e instanceof AlreadyExistsException) { - result.o2 = (AlreadyExistsException) e; - result.setO2IsSet(true); - msg = result; - } else if (e instanceof MetaException) { - result.o3 = (MetaException) e; - result.setO3IsSet(true); + result.o2 = (MetaException) e; + result.setO2IsSet(true); msg = result; } else @@ -20188,25 +20087,25 @@ protected boolean isOneway() { return false; } - public void start(I iface, add_partitions_pspec_args args, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws TException { - iface.add_partitions_pspec(args.new_parts,resultHandler); + public void start(I iface, alter_table_with_cascade_args args, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws TException { + iface.alter_table_with_cascade(args.dbname, args.tbl_name, args.new_tbl, args.cascade,resultHandler); } } - @org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public static class append_partition extends org.apache.thrift.AsyncProcessFunction { - public append_partition() { - super("append_partition"); + @org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public static class add_partition extends org.apache.thrift.AsyncProcessFunction { + public add_partition() { + super("add_partition"); } - public append_partition_args getEmptyArgsInstance() { - return new append_partition_args(); + public add_partition_args getEmptyArgsInstance() { + return new add_partition_args(); } public AsyncMethodCallback getResultHandler(final AsyncFrameBuffer fb, final int seqid) { final org.apache.thrift.AsyncProcessFunction fcall = this; return new AsyncMethodCallback() { public void onComplete(Partition o) { - append_partition_result result = new append_partition_result(); + add_partition_result result = new add_partition_result(); result.success = o; try { fcall.sendResponse(fb,result, org.apache.thrift.protocol.TMessageType.REPLY,seqid); @@ -20219,7 +20118,7 @@ public void onComplete(Partition o) { public void onError(Exception e) { byte msgType = org.apache.thrift.protocol.TMessageType.REPLY; org.apache.thrift.TBase msg; - append_partition_result result = new append_partition_result(); + add_partition_result result = new add_partition_result(); if (e instanceof InvalidObjectException) { result.o1 = (InvalidObjectException) e; result.setO1IsSet(true); @@ -20255,25 +20154,25 @@ protected boolean isOneway() { return false; } - public void start(I iface, append_partition_args args, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws TException { - iface.append_partition(args.db_name, args.tbl_name, args.part_vals,resultHandler); + public void start(I iface, add_partition_args args, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws TException { + iface.add_partition(args.new_part,resultHandler); } } - @org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public static class add_partitions_req extends org.apache.thrift.AsyncProcessFunction { - public add_partitions_req() { - super("add_partitions_req"); + @org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public static class add_partition_with_environment_context extends org.apache.thrift.AsyncProcessFunction { + public add_partition_with_environment_context() { + super("add_partition_with_environment_context"); } - public add_partitions_req_args getEmptyArgsInstance() { - return new add_partitions_req_args(); + public add_partition_with_environment_context_args getEmptyArgsInstance() { + return new add_partition_with_environment_context_args(); } - public AsyncMethodCallback getResultHandler(final AsyncFrameBuffer fb, final int seqid) { + public AsyncMethodCallback getResultHandler(final AsyncFrameBuffer fb, final int seqid) { final org.apache.thrift.AsyncProcessFunction fcall = this; - return new AsyncMethodCallback() { - public void onComplete(AddPartitionsResult o) { - add_partitions_req_result result = new add_partitions_req_result(); + return new AsyncMethodCallback() { + public void onComplete(Partition o) { + add_partition_with_environment_context_result result = new add_partition_with_environment_context_result(); result.success = o; try { fcall.sendResponse(fb,result, org.apache.thrift.protocol.TMessageType.REPLY,seqid); @@ -20286,7 +20185,7 @@ public void onComplete(AddPartitionsResult o) { public void onError(Exception e) { byte msgType = org.apache.thrift.protocol.TMessageType.REPLY; org.apache.thrift.TBase msg; - add_partitions_req_result result = new add_partitions_req_result(); + add_partition_with_environment_context_result result = new add_partition_with_environment_context_result(); if (e instanceof InvalidObjectException) { result.o1 = (InvalidObjectException) e; result.setO1IsSet(true); @@ -20322,26 +20221,27 @@ protected boolean isOneway() { return false; } - public void start(I iface, add_partitions_req_args args, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws TException { - iface.add_partitions_req(args.request,resultHandler); + public void start(I iface, add_partition_with_environment_context_args args, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws TException { + iface.add_partition_with_environment_context(args.new_part, args.environment_context,resultHandler); } } - @org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public static class append_partition_with_environment_context extends org.apache.thrift.AsyncProcessFunction { - public append_partition_with_environment_context() { - super("append_partition_with_environment_context"); + @org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public static class add_partitions extends org.apache.thrift.AsyncProcessFunction { + public add_partitions() { + super("add_partitions"); } - public append_partition_with_environment_context_args getEmptyArgsInstance() { - return new append_partition_with_environment_context_args(); + public add_partitions_args getEmptyArgsInstance() { + return new add_partitions_args(); } - public AsyncMethodCallback getResultHandler(final AsyncFrameBuffer fb, final int seqid) { + public AsyncMethodCallback getResultHandler(final AsyncFrameBuffer fb, final int seqid) { final org.apache.thrift.AsyncProcessFunction fcall = this; - return new AsyncMethodCallback() { - public void onComplete(Partition o) { - append_partition_with_environment_context_result result = new append_partition_with_environment_context_result(); + return new AsyncMethodCallback() { + public void onComplete(Integer o) { + add_partitions_result result = new add_partitions_result(); result.success = o; + result.setSuccessIsSet(true); try { fcall.sendResponse(fb,result, org.apache.thrift.protocol.TMessageType.REPLY,seqid); return; @@ -20353,7 +20253,7 @@ public void onComplete(Partition o) { public void onError(Exception e) { byte msgType = org.apache.thrift.protocol.TMessageType.REPLY; org.apache.thrift.TBase msg; - append_partition_with_environment_context_result result = new append_partition_with_environment_context_result(); + add_partitions_result result = new add_partitions_result(); if (e instanceof InvalidObjectException) { result.o1 = (InvalidObjectException) e; result.setO1IsSet(true); @@ -20389,26 +20289,27 @@ protected boolean isOneway() { return false; } - public void start(I iface, append_partition_with_environment_context_args args, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws TException { - iface.append_partition_with_environment_context(args.db_name, args.tbl_name, args.part_vals, args.environment_context,resultHandler); + public void start(I iface, add_partitions_args args, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws TException { + iface.add_partitions(args.new_parts,resultHandler); } } - @org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public static class append_partition_by_name extends org.apache.thrift.AsyncProcessFunction { - public append_partition_by_name() { - super("append_partition_by_name"); + @org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public static class add_partitions_pspec extends org.apache.thrift.AsyncProcessFunction { + public add_partitions_pspec() { + super("add_partitions_pspec"); } - public append_partition_by_name_args getEmptyArgsInstance() { - return new append_partition_by_name_args(); + public add_partitions_pspec_args getEmptyArgsInstance() { + return new add_partitions_pspec_args(); } - public AsyncMethodCallback getResultHandler(final AsyncFrameBuffer fb, final int seqid) { + public AsyncMethodCallback getResultHandler(final AsyncFrameBuffer fb, final int seqid) { final org.apache.thrift.AsyncProcessFunction fcall = this; - return new AsyncMethodCallback() { - public void onComplete(Partition o) { - append_partition_by_name_result result = new append_partition_by_name_result(); + return new AsyncMethodCallback() { + public void onComplete(Integer o) { + add_partitions_pspec_result result = new add_partitions_pspec_result(); result.success = o; + result.setSuccessIsSet(true); try { fcall.sendResponse(fb,result, org.apache.thrift.protocol.TMessageType.REPLY,seqid); return; @@ -20420,7 +20321,7 @@ public void onComplete(Partition o) { public void onError(Exception e) { byte msgType = org.apache.thrift.protocol.TMessageType.REPLY; org.apache.thrift.TBase msg; - append_partition_by_name_result result = new append_partition_by_name_result(); + add_partitions_pspec_result result = new add_partitions_pspec_result(); if (e instanceof InvalidObjectException) { result.o1 = (InvalidObjectException) e; result.setO1IsSet(true); @@ -20456,25 +20357,25 @@ protected boolean isOneway() { return false; } - public void start(I iface, append_partition_by_name_args args, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws TException { - iface.append_partition_by_name(args.db_name, args.tbl_name, args.part_name,resultHandler); + public void start(I iface, add_partitions_pspec_args args, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws TException { + iface.add_partitions_pspec(args.new_parts,resultHandler); } } - @org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public static class append_partition_by_name_with_environment_context extends org.apache.thrift.AsyncProcessFunction { - public append_partition_by_name_with_environment_context() { - super("append_partition_by_name_with_environment_context"); + @org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public static class append_partition extends org.apache.thrift.AsyncProcessFunction { + public append_partition() { + super("append_partition"); } - public append_partition_by_name_with_environment_context_args getEmptyArgsInstance() { - return new append_partition_by_name_with_environment_context_args(); + public append_partition_args getEmptyArgsInstance() { + return new append_partition_args(); } public AsyncMethodCallback getResultHandler(final AsyncFrameBuffer fb, final int seqid) { final org.apache.thrift.AsyncProcessFunction fcall = this; return new AsyncMethodCallback() { public void onComplete(Partition o) { - append_partition_by_name_with_environment_context_result result = new append_partition_by_name_with_environment_context_result(); + append_partition_result result = new append_partition_result(); result.success = o; try { fcall.sendResponse(fb,result, org.apache.thrift.protocol.TMessageType.REPLY,seqid); @@ -20487,7 +20388,7 @@ public void onComplete(Partition o) { public void onError(Exception e) { byte msgType = org.apache.thrift.protocol.TMessageType.REPLY; org.apache.thrift.TBase msg; - append_partition_by_name_with_environment_context_result result = new append_partition_by_name_with_environment_context_result(); + append_partition_result result = new append_partition_result(); if (e instanceof InvalidObjectException) { result.o1 = (InvalidObjectException) e; result.setO1IsSet(true); @@ -20523,27 +20424,26 @@ protected boolean isOneway() { return false; } - public void start(I iface, append_partition_by_name_with_environment_context_args args, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws TException { - iface.append_partition_by_name_with_environment_context(args.db_name, args.tbl_name, args.part_name, args.environment_context,resultHandler); + public void start(I iface, append_partition_args args, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws TException { + iface.append_partition(args.db_name, args.tbl_name, args.part_vals,resultHandler); } } - @org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public static class drop_partition extends org.apache.thrift.AsyncProcessFunction { - public drop_partition() { - super("drop_partition"); + @org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public static class add_partitions_req extends org.apache.thrift.AsyncProcessFunction { + public add_partitions_req() { + super("add_partitions_req"); } - public drop_partition_args getEmptyArgsInstance() { - return new drop_partition_args(); + public add_partitions_req_args getEmptyArgsInstance() { + return new add_partitions_req_args(); } - public AsyncMethodCallback getResultHandler(final AsyncFrameBuffer fb, final int seqid) { + public AsyncMethodCallback getResultHandler(final AsyncFrameBuffer fb, final int seqid) { final org.apache.thrift.AsyncProcessFunction fcall = this; - return new AsyncMethodCallback() { - public void onComplete(Boolean o) { - drop_partition_result result = new drop_partition_result(); + return new AsyncMethodCallback() { + public void onComplete(AddPartitionsResult o) { + add_partitions_req_result result = new add_partitions_req_result(); result.success = o; - result.setSuccessIsSet(true); try { fcall.sendResponse(fb,result, org.apache.thrift.protocol.TMessageType.REPLY,seqid); return; @@ -20555,14 +20455,283 @@ public void onComplete(Boolean o) { public void onError(Exception e) { byte msgType = org.apache.thrift.protocol.TMessageType.REPLY; org.apache.thrift.TBase msg; - drop_partition_result result = new drop_partition_result(); - if (e instanceof NoSuchObjectException) { - result.o1 = (NoSuchObjectException) e; + add_partitions_req_result result = new add_partitions_req_result(); + if (e instanceof InvalidObjectException) { + result.o1 = (InvalidObjectException) e; result.setO1IsSet(true); msg = result; } - else if (e instanceof MetaException) { - result.o2 = (MetaException) e; + else if (e instanceof AlreadyExistsException) { + result.o2 = (AlreadyExistsException) e; + result.setO2IsSet(true); + msg = result; + } + else if (e instanceof MetaException) { + result.o3 = (MetaException) e; + result.setO3IsSet(true); + msg = result; + } + else + { + msgType = org.apache.thrift.protocol.TMessageType.EXCEPTION; + msg = (org.apache.thrift.TBase)new org.apache.thrift.TApplicationException(org.apache.thrift.TApplicationException.INTERNAL_ERROR, e.getMessage()); + } + try { + fcall.sendResponse(fb,msg,msgType,seqid); + return; + } catch (Exception ex) { + LOGGER.error("Exception writing to internal frame buffer", ex); + } + fb.close(); + } + }; + } + + protected boolean isOneway() { + return false; + } + + public void start(I iface, add_partitions_req_args args, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws TException { + iface.add_partitions_req(args.request,resultHandler); + } + } + + @org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public static class append_partition_with_environment_context extends org.apache.thrift.AsyncProcessFunction { + public append_partition_with_environment_context() { + super("append_partition_with_environment_context"); + } + + public append_partition_with_environment_context_args getEmptyArgsInstance() { + return new append_partition_with_environment_context_args(); + } + + public AsyncMethodCallback getResultHandler(final AsyncFrameBuffer fb, final int seqid) { + final org.apache.thrift.AsyncProcessFunction fcall = this; + return new AsyncMethodCallback() { + public void onComplete(Partition o) { + append_partition_with_environment_context_result result = new append_partition_with_environment_context_result(); + result.success = o; + try { + fcall.sendResponse(fb,result, org.apache.thrift.protocol.TMessageType.REPLY,seqid); + return; + } catch (Exception e) { + LOGGER.error("Exception writing to internal frame buffer", e); + } + fb.close(); + } + public void onError(Exception e) { + byte msgType = org.apache.thrift.protocol.TMessageType.REPLY; + org.apache.thrift.TBase msg; + append_partition_with_environment_context_result result = new append_partition_with_environment_context_result(); + if (e instanceof InvalidObjectException) { + result.o1 = (InvalidObjectException) e; + result.setO1IsSet(true); + msg = result; + } + else if (e instanceof AlreadyExistsException) { + result.o2 = (AlreadyExistsException) e; + result.setO2IsSet(true); + msg = result; + } + else if (e instanceof MetaException) { + result.o3 = (MetaException) e; + result.setO3IsSet(true); + msg = result; + } + else + { + msgType = org.apache.thrift.protocol.TMessageType.EXCEPTION; + msg = (org.apache.thrift.TBase)new org.apache.thrift.TApplicationException(org.apache.thrift.TApplicationException.INTERNAL_ERROR, e.getMessage()); + } + try { + fcall.sendResponse(fb,msg,msgType,seqid); + return; + } catch (Exception ex) { + LOGGER.error("Exception writing to internal frame buffer", ex); + } + fb.close(); + } + }; + } + + protected boolean isOneway() { + return false; + } + + public void start(I iface, append_partition_with_environment_context_args args, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws TException { + iface.append_partition_with_environment_context(args.db_name, args.tbl_name, args.part_vals, args.environment_context,resultHandler); + } + } + + @org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public static class append_partition_by_name extends org.apache.thrift.AsyncProcessFunction { + public append_partition_by_name() { + super("append_partition_by_name"); + } + + public append_partition_by_name_args getEmptyArgsInstance() { + return new append_partition_by_name_args(); + } + + public AsyncMethodCallback getResultHandler(final AsyncFrameBuffer fb, final int seqid) { + final org.apache.thrift.AsyncProcessFunction fcall = this; + return new AsyncMethodCallback() { + public void onComplete(Partition o) { + append_partition_by_name_result result = new append_partition_by_name_result(); + result.success = o; + try { + fcall.sendResponse(fb,result, org.apache.thrift.protocol.TMessageType.REPLY,seqid); + return; + } catch (Exception e) { + LOGGER.error("Exception writing to internal frame buffer", e); + } + fb.close(); + } + public void onError(Exception e) { + byte msgType = org.apache.thrift.protocol.TMessageType.REPLY; + org.apache.thrift.TBase msg; + append_partition_by_name_result result = new append_partition_by_name_result(); + if (e instanceof InvalidObjectException) { + result.o1 = (InvalidObjectException) e; + result.setO1IsSet(true); + msg = result; + } + else if (e instanceof AlreadyExistsException) { + result.o2 = (AlreadyExistsException) e; + result.setO2IsSet(true); + msg = result; + } + else if (e instanceof MetaException) { + result.o3 = (MetaException) e; + result.setO3IsSet(true); + msg = result; + } + else + { + msgType = org.apache.thrift.protocol.TMessageType.EXCEPTION; + msg = (org.apache.thrift.TBase)new org.apache.thrift.TApplicationException(org.apache.thrift.TApplicationException.INTERNAL_ERROR, e.getMessage()); + } + try { + fcall.sendResponse(fb,msg,msgType,seqid); + return; + } catch (Exception ex) { + LOGGER.error("Exception writing to internal frame buffer", ex); + } + fb.close(); + } + }; + } + + protected boolean isOneway() { + return false; + } + + public void start(I iface, append_partition_by_name_args args, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws TException { + iface.append_partition_by_name(args.db_name, args.tbl_name, args.part_name,resultHandler); + } + } + + @org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public static class append_partition_by_name_with_environment_context extends org.apache.thrift.AsyncProcessFunction { + public append_partition_by_name_with_environment_context() { + super("append_partition_by_name_with_environment_context"); + } + + public append_partition_by_name_with_environment_context_args getEmptyArgsInstance() { + return new append_partition_by_name_with_environment_context_args(); + } + + public AsyncMethodCallback getResultHandler(final AsyncFrameBuffer fb, final int seqid) { + final org.apache.thrift.AsyncProcessFunction fcall = this; + return new AsyncMethodCallback() { + public void onComplete(Partition o) { + append_partition_by_name_with_environment_context_result result = new append_partition_by_name_with_environment_context_result(); + result.success = o; + try { + fcall.sendResponse(fb,result, org.apache.thrift.protocol.TMessageType.REPLY,seqid); + return; + } catch (Exception e) { + LOGGER.error("Exception writing to internal frame buffer", e); + } + fb.close(); + } + public void onError(Exception e) { + byte msgType = org.apache.thrift.protocol.TMessageType.REPLY; + org.apache.thrift.TBase msg; + append_partition_by_name_with_environment_context_result result = new append_partition_by_name_with_environment_context_result(); + if (e instanceof InvalidObjectException) { + result.o1 = (InvalidObjectException) e; + result.setO1IsSet(true); + msg = result; + } + else if (e instanceof AlreadyExistsException) { + result.o2 = (AlreadyExistsException) e; + result.setO2IsSet(true); + msg = result; + } + else if (e instanceof MetaException) { + result.o3 = (MetaException) e; + result.setO3IsSet(true); + msg = result; + } + else + { + msgType = org.apache.thrift.protocol.TMessageType.EXCEPTION; + msg = (org.apache.thrift.TBase)new org.apache.thrift.TApplicationException(org.apache.thrift.TApplicationException.INTERNAL_ERROR, e.getMessage()); + } + try { + fcall.sendResponse(fb,msg,msgType,seqid); + return; + } catch (Exception ex) { + LOGGER.error("Exception writing to internal frame buffer", ex); + } + fb.close(); + } + }; + } + + protected boolean isOneway() { + return false; + } + + public void start(I iface, append_partition_by_name_with_environment_context_args args, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws TException { + iface.append_partition_by_name_with_environment_context(args.db_name, args.tbl_name, args.part_name, args.environment_context,resultHandler); + } + } + + @org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public static class drop_partition extends org.apache.thrift.AsyncProcessFunction { + public drop_partition() { + super("drop_partition"); + } + + public drop_partition_args getEmptyArgsInstance() { + return new drop_partition_args(); + } + + public AsyncMethodCallback getResultHandler(final AsyncFrameBuffer fb, final int seqid) { + final org.apache.thrift.AsyncProcessFunction fcall = this; + return new AsyncMethodCallback() { + public void onComplete(Boolean o) { + drop_partition_result result = new drop_partition_result(); + result.success = o; + result.setSuccessIsSet(true); + try { + fcall.sendResponse(fb,result, org.apache.thrift.protocol.TMessageType.REPLY,seqid); + return; + } catch (Exception e) { + LOGGER.error("Exception writing to internal frame buffer", e); + } + fb.close(); + } + public void onError(Exception e) { + byte msgType = org.apache.thrift.protocol.TMessageType.REPLY; + org.apache.thrift.TBase msg; + drop_partition_result result = new drop_partition_result(); + if (e instanceof NoSuchObjectException) { + result.o1 = (NoSuchObjectException) e; + result.setO1IsSet(true); + msg = result; + } + else if (e instanceof MetaException) { + result.o2 = (MetaException) e; result.setO2IsSet(true); msg = result; } @@ -63884,7 +64053,1201 @@ public String getFieldName() { static { Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); tmpMap.put(_Fields.SUCCESS, new org.apache.thrift.meta_data.FieldMetaData("success", org.apache.thrift.TFieldRequirementType.DEFAULT, - new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, GetTablesResult.class))); + new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, GetTablesResult.class))); + tmpMap.put(_Fields.O1, new org.apache.thrift.meta_data.FieldMetaData("o1", org.apache.thrift.TFieldRequirementType.DEFAULT, + new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRUCT))); + tmpMap.put(_Fields.O2, new org.apache.thrift.meta_data.FieldMetaData("o2", org.apache.thrift.TFieldRequirementType.DEFAULT, + new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRUCT))); + tmpMap.put(_Fields.O3, new org.apache.thrift.meta_data.FieldMetaData("o3", org.apache.thrift.TFieldRequirementType.DEFAULT, + new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRUCT))); + metaDataMap = Collections.unmodifiableMap(tmpMap); + org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(get_table_objects_by_name_req_result.class, metaDataMap); + } + + public get_table_objects_by_name_req_result() { + } + + public get_table_objects_by_name_req_result( + GetTablesResult success, + MetaException o1, + InvalidOperationException o2, + UnknownDBException o3) + { + this(); + this.success = success; + this.o1 = o1; + this.o2 = o2; + this.o3 = o3; + } + + /** + * Performs a deep copy on other. + */ + public get_table_objects_by_name_req_result(get_table_objects_by_name_req_result other) { + if (other.isSetSuccess()) { + this.success = new GetTablesResult(other.success); + } + if (other.isSetO1()) { + this.o1 = new MetaException(other.o1); + } + if (other.isSetO2()) { + this.o2 = new InvalidOperationException(other.o2); + } + if (other.isSetO3()) { + this.o3 = new UnknownDBException(other.o3); + } + } + + public get_table_objects_by_name_req_result deepCopy() { + return new get_table_objects_by_name_req_result(this); + } + + @Override + public void clear() { + this.success = null; + this.o1 = null; + this.o2 = null; + this.o3 = null; + } + + public GetTablesResult getSuccess() { + return this.success; + } + + public void setSuccess(GetTablesResult success) { + this.success = success; + } + + public void unsetSuccess() { + this.success = null; + } + + /** Returns true if field success is set (has been assigned a value) and false otherwise */ + public boolean isSetSuccess() { + return this.success != null; + } + + public void setSuccessIsSet(boolean value) { + if (!value) { + this.success = null; + } + } + + public MetaException getO1() { + return this.o1; + } + + public void setO1(MetaException o1) { + this.o1 = o1; + } + + public void unsetO1() { + this.o1 = null; + } + + /** Returns true if field o1 is set (has been assigned a value) and false otherwise */ + public boolean isSetO1() { + return this.o1 != null; + } + + public void setO1IsSet(boolean value) { + if (!value) { + this.o1 = null; + } + } + + public InvalidOperationException getO2() { + return this.o2; + } + + public void setO2(InvalidOperationException o2) { + this.o2 = o2; + } + + public void unsetO2() { + this.o2 = null; + } + + /** Returns true if field o2 is set (has been assigned a value) and false otherwise */ + public boolean isSetO2() { + return this.o2 != null; + } + + public void setO2IsSet(boolean value) { + if (!value) { + this.o2 = null; + } + } + + public UnknownDBException getO3() { + return this.o3; + } + + public void setO3(UnknownDBException o3) { + this.o3 = o3; + } + + public void unsetO3() { + this.o3 = null; + } + + /** Returns true if field o3 is set (has been assigned a value) and false otherwise */ + public boolean isSetO3() { + return this.o3 != null; + } + + public void setO3IsSet(boolean value) { + if (!value) { + this.o3 = null; + } + } + + public void setFieldValue(_Fields field, Object value) { + switch (field) { + case SUCCESS: + if (value == null) { + unsetSuccess(); + } else { + setSuccess((GetTablesResult)value); + } + break; + + case O1: + if (value == null) { + unsetO1(); + } else { + setO1((MetaException)value); + } + break; + + case O2: + if (value == null) { + unsetO2(); + } else { + setO2((InvalidOperationException)value); + } + break; + + case O3: + if (value == null) { + unsetO3(); + } else { + setO3((UnknownDBException)value); + } + break; + + } + } + + public Object getFieldValue(_Fields field) { + switch (field) { + case SUCCESS: + return getSuccess(); + + case O1: + return getO1(); + + case O2: + return getO2(); + + case O3: + return getO3(); + + } + throw new IllegalStateException(); + } + + /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */ + public boolean isSet(_Fields field) { + if (field == null) { + throw new IllegalArgumentException(); + } + + switch (field) { + case SUCCESS: + return isSetSuccess(); + case O1: + return isSetO1(); + case O2: + return isSetO2(); + case O3: + return isSetO3(); + } + throw new IllegalStateException(); + } + + @Override + public boolean equals(Object that) { + if (that == null) + return false; + if (that instanceof get_table_objects_by_name_req_result) + return this.equals((get_table_objects_by_name_req_result)that); + return false; + } + + public boolean equals(get_table_objects_by_name_req_result that) { + if (that == null) + return false; + + boolean this_present_success = true && this.isSetSuccess(); + boolean that_present_success = true && that.isSetSuccess(); + if (this_present_success || that_present_success) { + if (!(this_present_success && that_present_success)) + return false; + if (!this.success.equals(that.success)) + return false; + } + + boolean this_present_o1 = true && this.isSetO1(); + boolean that_present_o1 = true && that.isSetO1(); + if (this_present_o1 || that_present_o1) { + if (!(this_present_o1 && that_present_o1)) + return false; + if (!this.o1.equals(that.o1)) + return false; + } + + boolean this_present_o2 = true && this.isSetO2(); + boolean that_present_o2 = true && that.isSetO2(); + if (this_present_o2 || that_present_o2) { + if (!(this_present_o2 && that_present_o2)) + return false; + if (!this.o2.equals(that.o2)) + return false; + } + + boolean this_present_o3 = true && this.isSetO3(); + boolean that_present_o3 = true && that.isSetO3(); + if (this_present_o3 || that_present_o3) { + if (!(this_present_o3 && that_present_o3)) + return false; + if (!this.o3.equals(that.o3)) + return false; + } + + return true; + } + + @Override + public int hashCode() { + List list = new ArrayList(); + + boolean present_success = true && (isSetSuccess()); + list.add(present_success); + if (present_success) + list.add(success); + + boolean present_o1 = true && (isSetO1()); + list.add(present_o1); + if (present_o1) + list.add(o1); + + boolean present_o2 = true && (isSetO2()); + list.add(present_o2); + if (present_o2) + list.add(o2); + + boolean present_o3 = true && (isSetO3()); + list.add(present_o3); + if (present_o3) + list.add(o3); + + return list.hashCode(); + } + + @Override + public int compareTo(get_table_objects_by_name_req_result other) { + if (!getClass().equals(other.getClass())) { + return getClass().getName().compareTo(other.getClass().getName()); + } + + int lastComparison = 0; + + lastComparison = Boolean.valueOf(isSetSuccess()).compareTo(other.isSetSuccess()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetSuccess()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.success, other.success); + if (lastComparison != 0) { + return lastComparison; + } + } + lastComparison = Boolean.valueOf(isSetO1()).compareTo(other.isSetO1()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetO1()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.o1, other.o1); + if (lastComparison != 0) { + return lastComparison; + } + } + lastComparison = Boolean.valueOf(isSetO2()).compareTo(other.isSetO2()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetO2()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.o2, other.o2); + if (lastComparison != 0) { + return lastComparison; + } + } + lastComparison = Boolean.valueOf(isSetO3()).compareTo(other.isSetO3()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetO3()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.o3, other.o3); + if (lastComparison != 0) { + return lastComparison; + } + } + return 0; + } + + public _Fields fieldForId(int fieldId) { + return _Fields.findByThriftId(fieldId); + } + + public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException { + schemes.get(iprot.getScheme()).getScheme().read(iprot, this); + } + + public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException { + schemes.get(oprot.getScheme()).getScheme().write(oprot, this); + } + + @Override + public String toString() { + StringBuilder sb = new StringBuilder("get_table_objects_by_name_req_result("); + boolean first = true; + + sb.append("success:"); + if (this.success == null) { + sb.append("null"); + } else { + sb.append(this.success); + } + first = false; + if (!first) sb.append(", "); + sb.append("o1:"); + if (this.o1 == null) { + sb.append("null"); + } else { + sb.append(this.o1); + } + first = false; + if (!first) sb.append(", "); + sb.append("o2:"); + if (this.o2 == null) { + sb.append("null"); + } else { + sb.append(this.o2); + } + first = false; + if (!first) sb.append(", "); + sb.append("o3:"); + if (this.o3 == null) { + sb.append("null"); + } else { + sb.append(this.o3); + } + first = false; + sb.append(")"); + return sb.toString(); + } + + public void validate() throws org.apache.thrift.TException { + // check for required fields + // check for sub-struct validity + if (success != null) { + success.validate(); + } + } + + private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException { + try { + write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out))); + } catch (org.apache.thrift.TException te) { + throw new java.io.IOException(te); + } + } + + private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException { + try { + read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in))); + } catch (org.apache.thrift.TException te) { + throw new java.io.IOException(te); + } + } + + private static class get_table_objects_by_name_req_resultStandardSchemeFactory implements SchemeFactory { + public get_table_objects_by_name_req_resultStandardScheme getScheme() { + return new get_table_objects_by_name_req_resultStandardScheme(); + } + } + + private static class get_table_objects_by_name_req_resultStandardScheme extends StandardScheme { + + public void read(org.apache.thrift.protocol.TProtocol iprot, get_table_objects_by_name_req_result struct) throws org.apache.thrift.TException { + org.apache.thrift.protocol.TField schemeField; + iprot.readStructBegin(); + while (true) + { + schemeField = iprot.readFieldBegin(); + if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { + break; + } + switch (schemeField.id) { + case 0: // SUCCESS + if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) { + struct.success = new GetTablesResult(); + struct.success.read(iprot); + struct.setSuccessIsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + break; + case 1: // O1 + if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) { + struct.o1 = new MetaException(); + struct.o1.read(iprot); + struct.setO1IsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + break; + case 2: // O2 + if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) { + struct.o2 = new InvalidOperationException(); + struct.o2.read(iprot); + struct.setO2IsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + break; + case 3: // O3 + if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) { + struct.o3 = new UnknownDBException(); + struct.o3.read(iprot); + struct.setO3IsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + break; + default: + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + iprot.readFieldEnd(); + } + iprot.readStructEnd(); + struct.validate(); + } + + public void write(org.apache.thrift.protocol.TProtocol oprot, get_table_objects_by_name_req_result struct) throws org.apache.thrift.TException { + struct.validate(); + + oprot.writeStructBegin(STRUCT_DESC); + if (struct.success != null) { + oprot.writeFieldBegin(SUCCESS_FIELD_DESC); + struct.success.write(oprot); + oprot.writeFieldEnd(); + } + if (struct.o1 != null) { + oprot.writeFieldBegin(O1_FIELD_DESC); + struct.o1.write(oprot); + oprot.writeFieldEnd(); + } + if (struct.o2 != null) { + oprot.writeFieldBegin(O2_FIELD_DESC); + struct.o2.write(oprot); + oprot.writeFieldEnd(); + } + if (struct.o3 != null) { + oprot.writeFieldBegin(O3_FIELD_DESC); + struct.o3.write(oprot); + oprot.writeFieldEnd(); + } + oprot.writeFieldStop(); + oprot.writeStructEnd(); + } + + } + + private static class get_table_objects_by_name_req_resultTupleSchemeFactory implements SchemeFactory { + public get_table_objects_by_name_req_resultTupleScheme getScheme() { + return new get_table_objects_by_name_req_resultTupleScheme(); + } + } + + private static class get_table_objects_by_name_req_resultTupleScheme extends TupleScheme { + + @Override + public void write(org.apache.thrift.protocol.TProtocol prot, get_table_objects_by_name_req_result struct) throws org.apache.thrift.TException { + TTupleProtocol oprot = (TTupleProtocol) prot; + BitSet optionals = new BitSet(); + if (struct.isSetSuccess()) { + optionals.set(0); + } + if (struct.isSetO1()) { + optionals.set(1); + } + if (struct.isSetO2()) { + optionals.set(2); + } + if (struct.isSetO3()) { + optionals.set(3); + } + oprot.writeBitSet(optionals, 4); + if (struct.isSetSuccess()) { + struct.success.write(oprot); + } + if (struct.isSetO1()) { + struct.o1.write(oprot); + } + if (struct.isSetO2()) { + struct.o2.write(oprot); + } + if (struct.isSetO3()) { + struct.o3.write(oprot); + } + } + + @Override + public void read(org.apache.thrift.protocol.TProtocol prot, get_table_objects_by_name_req_result struct) throws org.apache.thrift.TException { + TTupleProtocol iprot = (TTupleProtocol) prot; + BitSet incoming = iprot.readBitSet(4); + if (incoming.get(0)) { + struct.success = new GetTablesResult(); + struct.success.read(iprot); + struct.setSuccessIsSet(true); + } + if (incoming.get(1)) { + struct.o1 = new MetaException(); + struct.o1.read(iprot); + struct.setO1IsSet(true); + } + if (incoming.get(2)) { + struct.o2 = new InvalidOperationException(); + struct.o2.read(iprot); + struct.setO2IsSet(true); + } + if (incoming.get(3)) { + struct.o3 = new UnknownDBException(); + struct.o3.read(iprot); + struct.setO3IsSet(true); + } + } + } + + } + + @org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public static class get_materialization_invalidation_info_args implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { + private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("get_materialization_invalidation_info_args"); + + private static final org.apache.thrift.protocol.TField DBNAME_FIELD_DESC = new org.apache.thrift.protocol.TField("dbname", org.apache.thrift.protocol.TType.STRING, (short)1); + private static final org.apache.thrift.protocol.TField TBL_NAMES_FIELD_DESC = new org.apache.thrift.protocol.TField("tbl_names", org.apache.thrift.protocol.TType.LIST, (short)2); + + private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); + static { + schemes.put(StandardScheme.class, new get_materialization_invalidation_info_argsStandardSchemeFactory()); + schemes.put(TupleScheme.class, new get_materialization_invalidation_info_argsTupleSchemeFactory()); + } + + private String dbname; // required + private List tbl_names; // required + + /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ + public enum _Fields implements org.apache.thrift.TFieldIdEnum { + DBNAME((short)1, "dbname"), + TBL_NAMES((short)2, "tbl_names"); + + private static final Map byName = new HashMap(); + + static { + for (_Fields field : EnumSet.allOf(_Fields.class)) { + byName.put(field.getFieldName(), field); + } + } + + /** + * Find the _Fields constant that matches fieldId, or null if its not found. + */ + public static _Fields findByThriftId(int fieldId) { + switch(fieldId) { + case 1: // DBNAME + return DBNAME; + case 2: // TBL_NAMES + return TBL_NAMES; + default: + return null; + } + } + + /** + * Find the _Fields constant that matches fieldId, throwing an exception + * if it is not found. + */ + public static _Fields findByThriftIdOrThrow(int fieldId) { + _Fields fields = findByThriftId(fieldId); + if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!"); + return fields; + } + + /** + * Find the _Fields constant that matches name, or null if its not found. + */ + public static _Fields findByName(String name) { + return byName.get(name); + } + + private final short _thriftId; + private final String _fieldName; + + _Fields(short thriftId, String fieldName) { + _thriftId = thriftId; + _fieldName = fieldName; + } + + public short getThriftFieldId() { + return _thriftId; + } + + public String getFieldName() { + return _fieldName; + } + } + + // isset id assignments + public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; + static { + Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); + tmpMap.put(_Fields.DBNAME, new org.apache.thrift.meta_data.FieldMetaData("dbname", org.apache.thrift.TFieldRequirementType.DEFAULT, + new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING))); + tmpMap.put(_Fields.TBL_NAMES, new org.apache.thrift.meta_data.FieldMetaData("tbl_names", org.apache.thrift.TFieldRequirementType.DEFAULT, + new org.apache.thrift.meta_data.ListMetaData(org.apache.thrift.protocol.TType.LIST, + new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING)))); + metaDataMap = Collections.unmodifiableMap(tmpMap); + org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(get_materialization_invalidation_info_args.class, metaDataMap); + } + + public get_materialization_invalidation_info_args() { + } + + public get_materialization_invalidation_info_args( + String dbname, + List tbl_names) + { + this(); + this.dbname = dbname; + this.tbl_names = tbl_names; + } + + /** + * Performs a deep copy on other. + */ + public get_materialization_invalidation_info_args(get_materialization_invalidation_info_args other) { + if (other.isSetDbname()) { + this.dbname = other.dbname; + } + if (other.isSetTbl_names()) { + List __this__tbl_names = new ArrayList(other.tbl_names); + this.tbl_names = __this__tbl_names; + } + } + + public get_materialization_invalidation_info_args deepCopy() { + return new get_materialization_invalidation_info_args(this); + } + + @Override + public void clear() { + this.dbname = null; + this.tbl_names = null; + } + + public String getDbname() { + return this.dbname; + } + + public void setDbname(String dbname) { + this.dbname = dbname; + } + + public void unsetDbname() { + this.dbname = null; + } + + /** Returns true if field dbname is set (has been assigned a value) and false otherwise */ + public boolean isSetDbname() { + return this.dbname != null; + } + + public void setDbnameIsSet(boolean value) { + if (!value) { + this.dbname = null; + } + } + + public int getTbl_namesSize() { + return (this.tbl_names == null) ? 0 : this.tbl_names.size(); + } + + public java.util.Iterator getTbl_namesIterator() { + return (this.tbl_names == null) ? null : this.tbl_names.iterator(); + } + + public void addToTbl_names(String elem) { + if (this.tbl_names == null) { + this.tbl_names = new ArrayList(); + } + this.tbl_names.add(elem); + } + + public List getTbl_names() { + return this.tbl_names; + } + + public void setTbl_names(List tbl_names) { + this.tbl_names = tbl_names; + } + + public void unsetTbl_names() { + this.tbl_names = null; + } + + /** Returns true if field tbl_names is set (has been assigned a value) and false otherwise */ + public boolean isSetTbl_names() { + return this.tbl_names != null; + } + + public void setTbl_namesIsSet(boolean value) { + if (!value) { + this.tbl_names = null; + } + } + + public void setFieldValue(_Fields field, Object value) { + switch (field) { + case DBNAME: + if (value == null) { + unsetDbname(); + } else { + setDbname((String)value); + } + break; + + case TBL_NAMES: + if (value == null) { + unsetTbl_names(); + } else { + setTbl_names((List)value); + } + break; + + } + } + + public Object getFieldValue(_Fields field) { + switch (field) { + case DBNAME: + return getDbname(); + + case TBL_NAMES: + return getTbl_names(); + + } + throw new IllegalStateException(); + } + + /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */ + public boolean isSet(_Fields field) { + if (field == null) { + throw new IllegalArgumentException(); + } + + switch (field) { + case DBNAME: + return isSetDbname(); + case TBL_NAMES: + return isSetTbl_names(); + } + throw new IllegalStateException(); + } + + @Override + public boolean equals(Object that) { + if (that == null) + return false; + if (that instanceof get_materialization_invalidation_info_args) + return this.equals((get_materialization_invalidation_info_args)that); + return false; + } + + public boolean equals(get_materialization_invalidation_info_args that) { + if (that == null) + return false; + + boolean this_present_dbname = true && this.isSetDbname(); + boolean that_present_dbname = true && that.isSetDbname(); + if (this_present_dbname || that_present_dbname) { + if (!(this_present_dbname && that_present_dbname)) + return false; + if (!this.dbname.equals(that.dbname)) + return false; + } + + boolean this_present_tbl_names = true && this.isSetTbl_names(); + boolean that_present_tbl_names = true && that.isSetTbl_names(); + if (this_present_tbl_names || that_present_tbl_names) { + if (!(this_present_tbl_names && that_present_tbl_names)) + return false; + if (!this.tbl_names.equals(that.tbl_names)) + return false; + } + + return true; + } + + @Override + public int hashCode() { + List list = new ArrayList(); + + boolean present_dbname = true && (isSetDbname()); + list.add(present_dbname); + if (present_dbname) + list.add(dbname); + + boolean present_tbl_names = true && (isSetTbl_names()); + list.add(present_tbl_names); + if (present_tbl_names) + list.add(tbl_names); + + return list.hashCode(); + } + + @Override + public int compareTo(get_materialization_invalidation_info_args other) { + if (!getClass().equals(other.getClass())) { + return getClass().getName().compareTo(other.getClass().getName()); + } + + int lastComparison = 0; + + lastComparison = Boolean.valueOf(isSetDbname()).compareTo(other.isSetDbname()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetDbname()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.dbname, other.dbname); + if (lastComparison != 0) { + return lastComparison; + } + } + lastComparison = Boolean.valueOf(isSetTbl_names()).compareTo(other.isSetTbl_names()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetTbl_names()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.tbl_names, other.tbl_names); + if (lastComparison != 0) { + return lastComparison; + } + } + return 0; + } + + public _Fields fieldForId(int fieldId) { + return _Fields.findByThriftId(fieldId); + } + + public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException { + schemes.get(iprot.getScheme()).getScheme().read(iprot, this); + } + + public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException { + schemes.get(oprot.getScheme()).getScheme().write(oprot, this); + } + + @Override + public String toString() { + StringBuilder sb = new StringBuilder("get_materialization_invalidation_info_args("); + boolean first = true; + + sb.append("dbname:"); + if (this.dbname == null) { + sb.append("null"); + } else { + sb.append(this.dbname); + } + first = false; + if (!first) sb.append(", "); + sb.append("tbl_names:"); + if (this.tbl_names == null) { + sb.append("null"); + } else { + sb.append(this.tbl_names); + } + first = false; + sb.append(")"); + return sb.toString(); + } + + public void validate() throws org.apache.thrift.TException { + // check for required fields + // check for sub-struct validity + } + + private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException { + try { + write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out))); + } catch (org.apache.thrift.TException te) { + throw new java.io.IOException(te); + } + } + + private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException { + try { + read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in))); + } catch (org.apache.thrift.TException te) { + throw new java.io.IOException(te); + } + } + + private static class get_materialization_invalidation_info_argsStandardSchemeFactory implements SchemeFactory { + public get_materialization_invalidation_info_argsStandardScheme getScheme() { + return new get_materialization_invalidation_info_argsStandardScheme(); + } + } + + private static class get_materialization_invalidation_info_argsStandardScheme extends StandardScheme { + + public void read(org.apache.thrift.protocol.TProtocol iprot, get_materialization_invalidation_info_args struct) throws org.apache.thrift.TException { + org.apache.thrift.protocol.TField schemeField; + iprot.readStructBegin(); + while (true) + { + schemeField = iprot.readFieldBegin(); + if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { + break; + } + switch (schemeField.id) { + case 1: // DBNAME + if (schemeField.type == org.apache.thrift.protocol.TType.STRING) { + struct.dbname = iprot.readString(); + struct.setDbnameIsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + break; + case 2: // TBL_NAMES + if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { + { + org.apache.thrift.protocol.TList _list980 = iprot.readListBegin(); + struct.tbl_names = new ArrayList(_list980.size); + String _elem981; + for (int _i982 = 0; _i982 < _list980.size; ++_i982) + { + _elem981 = iprot.readString(); + struct.tbl_names.add(_elem981); + } + iprot.readListEnd(); + } + struct.setTbl_namesIsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + break; + default: + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + iprot.readFieldEnd(); + } + iprot.readStructEnd(); + struct.validate(); + } + + public void write(org.apache.thrift.protocol.TProtocol oprot, get_materialization_invalidation_info_args struct) throws org.apache.thrift.TException { + struct.validate(); + + oprot.writeStructBegin(STRUCT_DESC); + if (struct.dbname != null) { + oprot.writeFieldBegin(DBNAME_FIELD_DESC); + oprot.writeString(struct.dbname); + oprot.writeFieldEnd(); + } + if (struct.tbl_names != null) { + oprot.writeFieldBegin(TBL_NAMES_FIELD_DESC); + { + oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.tbl_names.size())); + for (String _iter983 : struct.tbl_names) + { + oprot.writeString(_iter983); + } + oprot.writeListEnd(); + } + oprot.writeFieldEnd(); + } + oprot.writeFieldStop(); + oprot.writeStructEnd(); + } + + } + + private static class get_materialization_invalidation_info_argsTupleSchemeFactory implements SchemeFactory { + public get_materialization_invalidation_info_argsTupleScheme getScheme() { + return new get_materialization_invalidation_info_argsTupleScheme(); + } + } + + private static class get_materialization_invalidation_info_argsTupleScheme extends TupleScheme { + + @Override + public void write(org.apache.thrift.protocol.TProtocol prot, get_materialization_invalidation_info_args struct) throws org.apache.thrift.TException { + TTupleProtocol oprot = (TTupleProtocol) prot; + BitSet optionals = new BitSet(); + if (struct.isSetDbname()) { + optionals.set(0); + } + if (struct.isSetTbl_names()) { + optionals.set(1); + } + oprot.writeBitSet(optionals, 2); + if (struct.isSetDbname()) { + oprot.writeString(struct.dbname); + } + if (struct.isSetTbl_names()) { + { + oprot.writeI32(struct.tbl_names.size()); + for (String _iter984 : struct.tbl_names) + { + oprot.writeString(_iter984); + } + } + } + } + + @Override + public void read(org.apache.thrift.protocol.TProtocol prot, get_materialization_invalidation_info_args struct) throws org.apache.thrift.TException { + TTupleProtocol iprot = (TTupleProtocol) prot; + BitSet incoming = iprot.readBitSet(2); + if (incoming.get(0)) { + struct.dbname = iprot.readString(); + struct.setDbnameIsSet(true); + } + if (incoming.get(1)) { + { + org.apache.thrift.protocol.TList _list985 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.tbl_names = new ArrayList(_list985.size); + String _elem986; + for (int _i987 = 0; _i987 < _list985.size; ++_i987) + { + _elem986 = iprot.readString(); + struct.tbl_names.add(_elem986); + } + } + struct.setTbl_namesIsSet(true); + } + } + } + + } + + @org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public static class get_materialization_invalidation_info_result implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { + private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("get_materialization_invalidation_info_result"); + + private static final org.apache.thrift.protocol.TField SUCCESS_FIELD_DESC = new org.apache.thrift.protocol.TField("success", org.apache.thrift.protocol.TType.MAP, (short)0); + private static final org.apache.thrift.protocol.TField O1_FIELD_DESC = new org.apache.thrift.protocol.TField("o1", org.apache.thrift.protocol.TType.STRUCT, (short)1); + private static final org.apache.thrift.protocol.TField O2_FIELD_DESC = new org.apache.thrift.protocol.TField("o2", org.apache.thrift.protocol.TType.STRUCT, (short)2); + private static final org.apache.thrift.protocol.TField O3_FIELD_DESC = new org.apache.thrift.protocol.TField("o3", org.apache.thrift.protocol.TType.STRUCT, (short)3); + + private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); + static { + schemes.put(StandardScheme.class, new get_materialization_invalidation_info_resultStandardSchemeFactory()); + schemes.put(TupleScheme.class, new get_materialization_invalidation_info_resultTupleSchemeFactory()); + } + + private Map success; // required + private MetaException o1; // required + private InvalidOperationException o2; // required + private UnknownDBException o3; // required + + /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ + public enum _Fields implements org.apache.thrift.TFieldIdEnum { + SUCCESS((short)0, "success"), + O1((short)1, "o1"), + O2((short)2, "o2"), + O3((short)3, "o3"); + + private static final Map byName = new HashMap(); + + static { + for (_Fields field : EnumSet.allOf(_Fields.class)) { + byName.put(field.getFieldName(), field); + } + } + + /** + * Find the _Fields constant that matches fieldId, or null if its not found. + */ + public static _Fields findByThriftId(int fieldId) { + switch(fieldId) { + case 0: // SUCCESS + return SUCCESS; + case 1: // O1 + return O1; + case 2: // O2 + return O2; + case 3: // O3 + return O3; + default: + return null; + } + } + + /** + * Find the _Fields constant that matches fieldId, throwing an exception + * if it is not found. + */ + public static _Fields findByThriftIdOrThrow(int fieldId) { + _Fields fields = findByThriftId(fieldId); + if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!"); + return fields; + } + + /** + * Find the _Fields constant that matches name, or null if its not found. + */ + public static _Fields findByName(String name) { + return byName.get(name); + } + + private final short _thriftId; + private final String _fieldName; + + _Fields(short thriftId, String fieldName) { + _thriftId = thriftId; + _fieldName = fieldName; + } + + public short getThriftFieldId() { + return _thriftId; + } + + public String getFieldName() { + return _fieldName; + } + } + + // isset id assignments + public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; + static { + Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); + tmpMap.put(_Fields.SUCCESS, new org.apache.thrift.meta_data.FieldMetaData("success", org.apache.thrift.TFieldRequirementType.DEFAULT, + new org.apache.thrift.meta_data.MapMetaData(org.apache.thrift.protocol.TType.MAP, + new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING), + new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, Materialization.class)))); tmpMap.put(_Fields.O1, new org.apache.thrift.meta_data.FieldMetaData("o1", org.apache.thrift.TFieldRequirementType.DEFAULT, new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRUCT))); tmpMap.put(_Fields.O2, new org.apache.thrift.meta_data.FieldMetaData("o2", org.apache.thrift.TFieldRequirementType.DEFAULT, @@ -63892,14 +65255,14 @@ public String getFieldName() { tmpMap.put(_Fields.O3, new org.apache.thrift.meta_data.FieldMetaData("o3", org.apache.thrift.TFieldRequirementType.DEFAULT, new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRUCT))); metaDataMap = Collections.unmodifiableMap(tmpMap); - org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(get_table_objects_by_name_req_result.class, metaDataMap); + org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(get_materialization_invalidation_info_result.class, metaDataMap); } - public get_table_objects_by_name_req_result() { + public get_materialization_invalidation_info_result() { } - public get_table_objects_by_name_req_result( - GetTablesResult success, + public get_materialization_invalidation_info_result( + Map success, MetaException o1, InvalidOperationException o2, UnknownDBException o3) @@ -63914,9 +65277,21 @@ public get_table_objects_by_name_req_result( /** * Performs a deep copy on other. */ - public get_table_objects_by_name_req_result(get_table_objects_by_name_req_result other) { + public get_materialization_invalidation_info_result(get_materialization_invalidation_info_result other) { if (other.isSetSuccess()) { - this.success = new GetTablesResult(other.success); + Map __this__success = new HashMap(other.success.size()); + for (Map.Entry other_element : other.success.entrySet()) { + + String other_element_key = other_element.getKey(); + Materialization other_element_value = other_element.getValue(); + + String __this__success_copy_key = other_element_key; + + Materialization __this__success_copy_value = new Materialization(other_element_value); + + __this__success.put(__this__success_copy_key, __this__success_copy_value); + } + this.success = __this__success; } if (other.isSetO1()) { this.o1 = new MetaException(other.o1); @@ -63929,8 +65304,8 @@ public get_table_objects_by_name_req_result(get_table_objects_by_name_req_result } } - public get_table_objects_by_name_req_result deepCopy() { - return new get_table_objects_by_name_req_result(this); + public get_materialization_invalidation_info_result deepCopy() { + return new get_materialization_invalidation_info_result(this); } @Override @@ -63941,11 +65316,22 @@ public void clear() { this.o3 = null; } - public GetTablesResult getSuccess() { + public int getSuccessSize() { + return (this.success == null) ? 0 : this.success.size(); + } + + public void putToSuccess(String key, Materialization val) { + if (this.success == null) { + this.success = new HashMap(); + } + this.success.put(key, val); + } + + public Map getSuccess() { return this.success; } - public void setSuccess(GetTablesResult success) { + public void setSuccess(Map success) { this.success = success; } @@ -64039,7 +65425,7 @@ public void setFieldValue(_Fields field, Object value) { if (value == null) { unsetSuccess(); } else { - setSuccess((GetTablesResult)value); + setSuccess((Map)value); } break; @@ -64111,12 +65497,12 @@ public boolean isSet(_Fields field) { public boolean equals(Object that) { if (that == null) return false; - if (that instanceof get_table_objects_by_name_req_result) - return this.equals((get_table_objects_by_name_req_result)that); + if (that instanceof get_materialization_invalidation_info_result) + return this.equals((get_materialization_invalidation_info_result)that); return false; } - public boolean equals(get_table_objects_by_name_req_result that) { + public boolean equals(get_materialization_invalidation_info_result that) { if (that == null) return false; @@ -64187,7 +65573,7 @@ public int hashCode() { } @Override - public int compareTo(get_table_objects_by_name_req_result other) { + public int compareTo(get_materialization_invalidation_info_result other) { if (!getClass().equals(other.getClass())) { return getClass().getName().compareTo(other.getClass().getName()); } @@ -64251,7 +65637,7 @@ public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache. @Override public String toString() { - StringBuilder sb = new StringBuilder("get_table_objects_by_name_req_result("); + StringBuilder sb = new StringBuilder("get_materialization_invalidation_info_result("); boolean first = true; sb.append("success:"); @@ -64292,9 +65678,6 @@ public String toString() { public void validate() throws org.apache.thrift.TException { // check for required fields // check for sub-struct validity - if (success != null) { - success.validate(); - } } private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException { @@ -64313,15 +65696,15 @@ private void readObject(java.io.ObjectInputStream in) throws java.io.IOException } } - private static class get_table_objects_by_name_req_resultStandardSchemeFactory implements SchemeFactory { - public get_table_objects_by_name_req_resultStandardScheme getScheme() { - return new get_table_objects_by_name_req_resultStandardScheme(); + private static class get_materialization_invalidation_info_resultStandardSchemeFactory implements SchemeFactory { + public get_materialization_invalidation_info_resultStandardScheme getScheme() { + return new get_materialization_invalidation_info_resultStandardScheme(); } } - private static class get_table_objects_by_name_req_resultStandardScheme extends StandardScheme { + private static class get_materialization_invalidation_info_resultStandardScheme extends StandardScheme { - public void read(org.apache.thrift.protocol.TProtocol iprot, get_table_objects_by_name_req_result struct) throws org.apache.thrift.TException { + public void read(org.apache.thrift.protocol.TProtocol iprot, get_materialization_invalidation_info_result struct) throws org.apache.thrift.TException { org.apache.thrift.protocol.TField schemeField; iprot.readStructBegin(); while (true) @@ -64332,9 +65715,21 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_table_objects_b } switch (schemeField.id) { case 0: // SUCCESS - if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) { - struct.success = new GetTablesResult(); - struct.success.read(iprot); + if (schemeField.type == org.apache.thrift.protocol.TType.MAP) { + { + org.apache.thrift.protocol.TMap _map988 = iprot.readMapBegin(); + struct.success = new HashMap(2*_map988.size); + String _key989; + Materialization _val990; + for (int _i991 = 0; _i991 < _map988.size; ++_i991) + { + _key989 = iprot.readString(); + _val990 = new Materialization(); + _val990.read(iprot); + struct.success.put(_key989, _val990); + } + iprot.readMapEnd(); + } struct.setSuccessIsSet(true); } else { org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); @@ -64376,13 +65771,21 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_table_objects_b struct.validate(); } - public void write(org.apache.thrift.protocol.TProtocol oprot, get_table_objects_by_name_req_result struct) throws org.apache.thrift.TException { + public void write(org.apache.thrift.protocol.TProtocol oprot, get_materialization_invalidation_info_result struct) throws org.apache.thrift.TException { struct.validate(); oprot.writeStructBegin(STRUCT_DESC); if (struct.success != null) { oprot.writeFieldBegin(SUCCESS_FIELD_DESC); - struct.success.write(oprot); + { + oprot.writeMapBegin(new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRUCT, struct.success.size())); + for (Map.Entry _iter992 : struct.success.entrySet()) + { + oprot.writeString(_iter992.getKey()); + _iter992.getValue().write(oprot); + } + oprot.writeMapEnd(); + } oprot.writeFieldEnd(); } if (struct.o1 != null) { @@ -64406,16 +65809,16 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_table_objects_ } - private static class get_table_objects_by_name_req_resultTupleSchemeFactory implements SchemeFactory { - public get_table_objects_by_name_req_resultTupleScheme getScheme() { - return new get_table_objects_by_name_req_resultTupleScheme(); + private static class get_materialization_invalidation_info_resultTupleSchemeFactory implements SchemeFactory { + public get_materialization_invalidation_info_resultTupleScheme getScheme() { + return new get_materialization_invalidation_info_resultTupleScheme(); } } - private static class get_table_objects_by_name_req_resultTupleScheme extends TupleScheme { + private static class get_materialization_invalidation_info_resultTupleScheme extends TupleScheme { @Override - public void write(org.apache.thrift.protocol.TProtocol prot, get_table_objects_by_name_req_result struct) throws org.apache.thrift.TException { + public void write(org.apache.thrift.protocol.TProtocol prot, get_materialization_invalidation_info_result struct) throws org.apache.thrift.TException { TTupleProtocol oprot = (TTupleProtocol) prot; BitSet optionals = new BitSet(); if (struct.isSetSuccess()) { @@ -64432,7 +65835,14 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_table_objects_b } oprot.writeBitSet(optionals, 4); if (struct.isSetSuccess()) { - struct.success.write(oprot); + { + oprot.writeI32(struct.success.size()); + for (Map.Entry _iter993 : struct.success.entrySet()) + { + oprot.writeString(_iter993.getKey()); + _iter993.getValue().write(oprot); + } + } } if (struct.isSetO1()) { struct.o1.write(oprot); @@ -64446,12 +65856,23 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_table_objects_b } @Override - public void read(org.apache.thrift.protocol.TProtocol prot, get_table_objects_by_name_req_result struct) throws org.apache.thrift.TException { + public void read(org.apache.thrift.protocol.TProtocol prot, get_materialization_invalidation_info_result struct) throws org.apache.thrift.TException { TTupleProtocol iprot = (TTupleProtocol) prot; BitSet incoming = iprot.readBitSet(4); if (incoming.get(0)) { - struct.success = new GetTablesResult(); - struct.success.read(iprot); + { + org.apache.thrift.protocol.TMap _map994 = new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + struct.success = new HashMap(2*_map994.size); + String _key995; + Materialization _val996; + for (int _i997 = 0; _i997 < _map994.size; ++_i997) + { + _key995 = iprot.readString(); + _val996 = new Materialization(); + _val996.read(iprot); + struct.success.put(_key995, _val996); + } + } struct.setSuccessIsSet(true); } if (incoming.get(1)) { @@ -64474,25 +65895,28 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_table_objects_by } - @org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public static class get_materialization_invalidation_info_args implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { - private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("get_materialization_invalidation_info_args"); + @org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public static class update_creation_metadata_args implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { + private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("update_creation_metadata_args"); private static final org.apache.thrift.protocol.TField DBNAME_FIELD_DESC = new org.apache.thrift.protocol.TField("dbname", org.apache.thrift.protocol.TType.STRING, (short)1); - private static final org.apache.thrift.protocol.TField TBL_NAMES_FIELD_DESC = new org.apache.thrift.protocol.TField("tbl_names", org.apache.thrift.protocol.TType.LIST, (short)2); + private static final org.apache.thrift.protocol.TField TBL_NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("tbl_name", org.apache.thrift.protocol.TType.STRING, (short)2); + private static final org.apache.thrift.protocol.TField CREATION_METADATA_FIELD_DESC = new org.apache.thrift.protocol.TField("creation_metadata", org.apache.thrift.protocol.TType.STRUCT, (short)3); private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); static { - schemes.put(StandardScheme.class, new get_materialization_invalidation_info_argsStandardSchemeFactory()); - schemes.put(TupleScheme.class, new get_materialization_invalidation_info_argsTupleSchemeFactory()); + schemes.put(StandardScheme.class, new update_creation_metadata_argsStandardSchemeFactory()); + schemes.put(TupleScheme.class, new update_creation_metadata_argsTupleSchemeFactory()); } private String dbname; // required - private List tbl_names; // required + private String tbl_name; // required + private CreationMetadata creation_metadata; // required /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ public enum _Fields implements org.apache.thrift.TFieldIdEnum { DBNAME((short)1, "dbname"), - TBL_NAMES((short)2, "tbl_names"); + TBL_NAME((short)2, "tbl_name"), + CREATION_METADATA((short)3, "creation_metadata"); private static final Map byName = new HashMap(); @@ -64509,8 +65933,10 @@ public static _Fields findByThriftId(int fieldId) { switch(fieldId) { case 1: // DBNAME return DBNAME; - case 2: // TBL_NAMES - return TBL_NAMES; + case 2: // TBL_NAME + return TBL_NAME; + case 3: // CREATION_METADATA + return CREATION_METADATA; default: return null; } @@ -64556,46 +65982,52 @@ public String getFieldName() { Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); tmpMap.put(_Fields.DBNAME, new org.apache.thrift.meta_data.FieldMetaData("dbname", org.apache.thrift.TFieldRequirementType.DEFAULT, new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING))); - tmpMap.put(_Fields.TBL_NAMES, new org.apache.thrift.meta_data.FieldMetaData("tbl_names", org.apache.thrift.TFieldRequirementType.DEFAULT, - new org.apache.thrift.meta_data.ListMetaData(org.apache.thrift.protocol.TType.LIST, - new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING)))); + tmpMap.put(_Fields.TBL_NAME, new org.apache.thrift.meta_data.FieldMetaData("tbl_name", org.apache.thrift.TFieldRequirementType.DEFAULT, + new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING))); + tmpMap.put(_Fields.CREATION_METADATA, new org.apache.thrift.meta_data.FieldMetaData("creation_metadata", org.apache.thrift.TFieldRequirementType.DEFAULT, + new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, CreationMetadata.class))); metaDataMap = Collections.unmodifiableMap(tmpMap); - org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(get_materialization_invalidation_info_args.class, metaDataMap); + org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(update_creation_metadata_args.class, metaDataMap); } - public get_materialization_invalidation_info_args() { + public update_creation_metadata_args() { } - public get_materialization_invalidation_info_args( + public update_creation_metadata_args( String dbname, - List tbl_names) + String tbl_name, + CreationMetadata creation_metadata) { this(); this.dbname = dbname; - this.tbl_names = tbl_names; + this.tbl_name = tbl_name; + this.creation_metadata = creation_metadata; } /** * Performs a deep copy on other. */ - public get_materialization_invalidation_info_args(get_materialization_invalidation_info_args other) { + public update_creation_metadata_args(update_creation_metadata_args other) { if (other.isSetDbname()) { this.dbname = other.dbname; } - if (other.isSetTbl_names()) { - List __this__tbl_names = new ArrayList(other.tbl_names); - this.tbl_names = __this__tbl_names; + if (other.isSetTbl_name()) { + this.tbl_name = other.tbl_name; + } + if (other.isSetCreation_metadata()) { + this.creation_metadata = new CreationMetadata(other.creation_metadata); } } - public get_materialization_invalidation_info_args deepCopy() { - return new get_materialization_invalidation_info_args(this); + public update_creation_metadata_args deepCopy() { + return new update_creation_metadata_args(this); } @Override public void clear() { this.dbname = null; - this.tbl_names = null; + this.tbl_name = null; + this.creation_metadata = null; } public String getDbname() { @@ -64621,41 +66053,49 @@ public void setDbnameIsSet(boolean value) { } } - public int getTbl_namesSize() { - return (this.tbl_names == null) ? 0 : this.tbl_names.size(); + public String getTbl_name() { + return this.tbl_name; } - public java.util.Iterator getTbl_namesIterator() { - return (this.tbl_names == null) ? null : this.tbl_names.iterator(); + public void setTbl_name(String tbl_name) { + this.tbl_name = tbl_name; } - public void addToTbl_names(String elem) { - if (this.tbl_names == null) { - this.tbl_names = new ArrayList(); + public void unsetTbl_name() { + this.tbl_name = null; + } + + /** Returns true if field tbl_name is set (has been assigned a value) and false otherwise */ + public boolean isSetTbl_name() { + return this.tbl_name != null; + } + + public void setTbl_nameIsSet(boolean value) { + if (!value) { + this.tbl_name = null; } - this.tbl_names.add(elem); } - public List getTbl_names() { - return this.tbl_names; + public CreationMetadata getCreation_metadata() { + return this.creation_metadata; } - public void setTbl_names(List tbl_names) { - this.tbl_names = tbl_names; + public void setCreation_metadata(CreationMetadata creation_metadata) { + this.creation_metadata = creation_metadata; } - public void unsetTbl_names() { - this.tbl_names = null; + public void unsetCreation_metadata() { + this.creation_metadata = null; } - /** Returns true if field tbl_names is set (has been assigned a value) and false otherwise */ - public boolean isSetTbl_names() { - return this.tbl_names != null; + /** Returns true if field creation_metadata is set (has been assigned a value) and false otherwise */ + public boolean isSetCreation_metadata() { + return this.creation_metadata != null; } - public void setTbl_namesIsSet(boolean value) { + public void setCreation_metadataIsSet(boolean value) { if (!value) { - this.tbl_names = null; + this.creation_metadata = null; } } @@ -64669,11 +66109,19 @@ public void setFieldValue(_Fields field, Object value) { } break; - case TBL_NAMES: + case TBL_NAME: if (value == null) { - unsetTbl_names(); + unsetTbl_name(); } else { - setTbl_names((List)value); + setTbl_name((String)value); + } + break; + + case CREATION_METADATA: + if (value == null) { + unsetCreation_metadata(); + } else { + setCreation_metadata((CreationMetadata)value); } break; @@ -64685,8 +66133,11 @@ public Object getFieldValue(_Fields field) { case DBNAME: return getDbname(); - case TBL_NAMES: - return getTbl_names(); + case TBL_NAME: + return getTbl_name(); + + case CREATION_METADATA: + return getCreation_metadata(); } throw new IllegalStateException(); @@ -64701,8 +66152,10 @@ public boolean isSet(_Fields field) { switch (field) { case DBNAME: return isSetDbname(); - case TBL_NAMES: - return isSetTbl_names(); + case TBL_NAME: + return isSetTbl_name(); + case CREATION_METADATA: + return isSetCreation_metadata(); } throw new IllegalStateException(); } @@ -64711,12 +66164,12 @@ public boolean isSet(_Fields field) { public boolean equals(Object that) { if (that == null) return false; - if (that instanceof get_materialization_invalidation_info_args) - return this.equals((get_materialization_invalidation_info_args)that); + if (that instanceof update_creation_metadata_args) + return this.equals((update_creation_metadata_args)that); return false; } - public boolean equals(get_materialization_invalidation_info_args that) { + public boolean equals(update_creation_metadata_args that) { if (that == null) return false; @@ -64729,12 +66182,21 @@ public boolean equals(get_materialization_invalidation_info_args that) { return false; } - boolean this_present_tbl_names = true && this.isSetTbl_names(); - boolean that_present_tbl_names = true && that.isSetTbl_names(); - if (this_present_tbl_names || that_present_tbl_names) { - if (!(this_present_tbl_names && that_present_tbl_names)) + boolean this_present_tbl_name = true && this.isSetTbl_name(); + boolean that_present_tbl_name = true && that.isSetTbl_name(); + if (this_present_tbl_name || that_present_tbl_name) { + if (!(this_present_tbl_name && that_present_tbl_name)) return false; - if (!this.tbl_names.equals(that.tbl_names)) + if (!this.tbl_name.equals(that.tbl_name)) + return false; + } + + boolean this_present_creation_metadata = true && this.isSetCreation_metadata(); + boolean that_present_creation_metadata = true && that.isSetCreation_metadata(); + if (this_present_creation_metadata || that_present_creation_metadata) { + if (!(this_present_creation_metadata && that_present_creation_metadata)) + return false; + if (!this.creation_metadata.equals(that.creation_metadata)) return false; } @@ -64750,16 +66212,21 @@ public int hashCode() { if (present_dbname) list.add(dbname); - boolean present_tbl_names = true && (isSetTbl_names()); - list.add(present_tbl_names); - if (present_tbl_names) - list.add(tbl_names); + boolean present_tbl_name = true && (isSetTbl_name()); + list.add(present_tbl_name); + if (present_tbl_name) + list.add(tbl_name); + + boolean present_creation_metadata = true && (isSetCreation_metadata()); + list.add(present_creation_metadata); + if (present_creation_metadata) + list.add(creation_metadata); return list.hashCode(); } @Override - public int compareTo(get_materialization_invalidation_info_args other) { + public int compareTo(update_creation_metadata_args other) { if (!getClass().equals(other.getClass())) { return getClass().getName().compareTo(other.getClass().getName()); } @@ -64776,12 +66243,22 @@ public int compareTo(get_materialization_invalidation_info_args other) { return lastComparison; } } - lastComparison = Boolean.valueOf(isSetTbl_names()).compareTo(other.isSetTbl_names()); + lastComparison = Boolean.valueOf(isSetTbl_name()).compareTo(other.isSetTbl_name()); if (lastComparison != 0) { return lastComparison; } - if (isSetTbl_names()) { - lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.tbl_names, other.tbl_names); + if (isSetTbl_name()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.tbl_name, other.tbl_name); + if (lastComparison != 0) { + return lastComparison; + } + } + lastComparison = Boolean.valueOf(isSetCreation_metadata()).compareTo(other.isSetCreation_metadata()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetCreation_metadata()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.creation_metadata, other.creation_metadata); if (lastComparison != 0) { return lastComparison; } @@ -64803,7 +66280,7 @@ public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache. @Override public String toString() { - StringBuilder sb = new StringBuilder("get_materialization_invalidation_info_args("); + StringBuilder sb = new StringBuilder("update_creation_metadata_args("); boolean first = true; sb.append("dbname:"); @@ -64814,11 +66291,19 @@ public String toString() { } first = false; if (!first) sb.append(", "); - sb.append("tbl_names:"); - if (this.tbl_names == null) { + sb.append("tbl_name:"); + if (this.tbl_name == null) { sb.append("null"); } else { - sb.append(this.tbl_names); + sb.append(this.tbl_name); + } + first = false; + if (!first) sb.append(", "); + sb.append("creation_metadata:"); + if (this.creation_metadata == null) { + sb.append("null"); + } else { + sb.append(this.creation_metadata); } first = false; sb.append(")"); @@ -64828,6 +66313,9 @@ public String toString() { public void validate() throws org.apache.thrift.TException { // check for required fields // check for sub-struct validity + if (creation_metadata != null) { + creation_metadata.validate(); + } } private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException { @@ -64846,15 +66334,15 @@ private void readObject(java.io.ObjectInputStream in) throws java.io.IOException } } - private static class get_materialization_invalidation_info_argsStandardSchemeFactory implements SchemeFactory { - public get_materialization_invalidation_info_argsStandardScheme getScheme() { - return new get_materialization_invalidation_info_argsStandardScheme(); + private static class update_creation_metadata_argsStandardSchemeFactory implements SchemeFactory { + public update_creation_metadata_argsStandardScheme getScheme() { + return new update_creation_metadata_argsStandardScheme(); } } - private static class get_materialization_invalidation_info_argsStandardScheme extends StandardScheme { + private static class update_creation_metadata_argsStandardScheme extends StandardScheme { - public void read(org.apache.thrift.protocol.TProtocol iprot, get_materialization_invalidation_info_args struct) throws org.apache.thrift.TException { + public void read(org.apache.thrift.protocol.TProtocol iprot, update_creation_metadata_args struct) throws org.apache.thrift.TException { org.apache.thrift.protocol.TField schemeField; iprot.readStructBegin(); while (true) @@ -64872,20 +66360,19 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_materialization org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); } break; - case 2: // TBL_NAMES - if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { - { - org.apache.thrift.protocol.TList _list980 = iprot.readListBegin(); - struct.tbl_names = new ArrayList(_list980.size); - String _elem981; - for (int _i982 = 0; _i982 < _list980.size; ++_i982) - { - _elem981 = iprot.readString(); - struct.tbl_names.add(_elem981); - } - iprot.readListEnd(); - } - struct.setTbl_namesIsSet(true); + case 2: // TBL_NAME + if (schemeField.type == org.apache.thrift.protocol.TType.STRING) { + struct.tbl_name = iprot.readString(); + struct.setTbl_nameIsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + break; + case 3: // CREATION_METADATA + if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) { + struct.creation_metadata = new CreationMetadata(); + struct.creation_metadata.read(iprot); + struct.setCreation_metadataIsSet(true); } else { org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); } @@ -64899,7 +66386,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_materialization struct.validate(); } - public void write(org.apache.thrift.protocol.TProtocol oprot, get_materialization_invalidation_info_args struct) throws org.apache.thrift.TException { + public void write(org.apache.thrift.protocol.TProtocol oprot, update_creation_metadata_args struct) throws org.apache.thrift.TException { struct.validate(); oprot.writeStructBegin(STRUCT_DESC); @@ -64908,16 +66395,14 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_materializatio oprot.writeString(struct.dbname); oprot.writeFieldEnd(); } - if (struct.tbl_names != null) { - oprot.writeFieldBegin(TBL_NAMES_FIELD_DESC); - { - oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.tbl_names.size())); - for (String _iter983 : struct.tbl_names) - { - oprot.writeString(_iter983); - } - oprot.writeListEnd(); - } + if (struct.tbl_name != null) { + oprot.writeFieldBegin(TBL_NAME_FIELD_DESC); + oprot.writeString(struct.tbl_name); + oprot.writeFieldEnd(); + } + if (struct.creation_metadata != null) { + oprot.writeFieldBegin(CREATION_METADATA_FIELD_DESC); + struct.creation_metadata.write(oprot); oprot.writeFieldEnd(); } oprot.writeFieldStop(); @@ -64926,87 +66411,80 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_materializatio } - private static class get_materialization_invalidation_info_argsTupleSchemeFactory implements SchemeFactory { - public get_materialization_invalidation_info_argsTupleScheme getScheme() { - return new get_materialization_invalidation_info_argsTupleScheme(); + private static class update_creation_metadata_argsTupleSchemeFactory implements SchemeFactory { + public update_creation_metadata_argsTupleScheme getScheme() { + return new update_creation_metadata_argsTupleScheme(); } } - private static class get_materialization_invalidation_info_argsTupleScheme extends TupleScheme { + private static class update_creation_metadata_argsTupleScheme extends TupleScheme { @Override - public void write(org.apache.thrift.protocol.TProtocol prot, get_materialization_invalidation_info_args struct) throws org.apache.thrift.TException { + public void write(org.apache.thrift.protocol.TProtocol prot, update_creation_metadata_args struct) throws org.apache.thrift.TException { TTupleProtocol oprot = (TTupleProtocol) prot; BitSet optionals = new BitSet(); if (struct.isSetDbname()) { optionals.set(0); } - if (struct.isSetTbl_names()) { + if (struct.isSetTbl_name()) { optionals.set(1); } - oprot.writeBitSet(optionals, 2); + if (struct.isSetCreation_metadata()) { + optionals.set(2); + } + oprot.writeBitSet(optionals, 3); if (struct.isSetDbname()) { oprot.writeString(struct.dbname); } - if (struct.isSetTbl_names()) { - { - oprot.writeI32(struct.tbl_names.size()); - for (String _iter984 : struct.tbl_names) - { - oprot.writeString(_iter984); - } - } + if (struct.isSetTbl_name()) { + oprot.writeString(struct.tbl_name); + } + if (struct.isSetCreation_metadata()) { + struct.creation_metadata.write(oprot); } } @Override - public void read(org.apache.thrift.protocol.TProtocol prot, get_materialization_invalidation_info_args struct) throws org.apache.thrift.TException { + public void read(org.apache.thrift.protocol.TProtocol prot, update_creation_metadata_args struct) throws org.apache.thrift.TException { TTupleProtocol iprot = (TTupleProtocol) prot; - BitSet incoming = iprot.readBitSet(2); + BitSet incoming = iprot.readBitSet(3); if (incoming.get(0)) { struct.dbname = iprot.readString(); struct.setDbnameIsSet(true); } if (incoming.get(1)) { - { - org.apache.thrift.protocol.TList _list985 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.tbl_names = new ArrayList(_list985.size); - String _elem986; - for (int _i987 = 0; _i987 < _list985.size; ++_i987) - { - _elem986 = iprot.readString(); - struct.tbl_names.add(_elem986); - } - } - struct.setTbl_namesIsSet(true); + struct.tbl_name = iprot.readString(); + struct.setTbl_nameIsSet(true); + } + if (incoming.get(2)) { + struct.creation_metadata = new CreationMetadata(); + struct.creation_metadata.read(iprot); + struct.setCreation_metadataIsSet(true); } } } } - @org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public static class get_materialization_invalidation_info_result implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { - private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("get_materialization_invalidation_info_result"); + @org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public static class update_creation_metadata_result implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { + private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("update_creation_metadata_result"); - private static final org.apache.thrift.protocol.TField SUCCESS_FIELD_DESC = new org.apache.thrift.protocol.TField("success", org.apache.thrift.protocol.TType.MAP, (short)0); private static final org.apache.thrift.protocol.TField O1_FIELD_DESC = new org.apache.thrift.protocol.TField("o1", org.apache.thrift.protocol.TType.STRUCT, (short)1); private static final org.apache.thrift.protocol.TField O2_FIELD_DESC = new org.apache.thrift.protocol.TField("o2", org.apache.thrift.protocol.TType.STRUCT, (short)2); private static final org.apache.thrift.protocol.TField O3_FIELD_DESC = new org.apache.thrift.protocol.TField("o3", org.apache.thrift.protocol.TType.STRUCT, (short)3); private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); static { - schemes.put(StandardScheme.class, new get_materialization_invalidation_info_resultStandardSchemeFactory()); - schemes.put(TupleScheme.class, new get_materialization_invalidation_info_resultTupleSchemeFactory()); + schemes.put(StandardScheme.class, new update_creation_metadata_resultStandardSchemeFactory()); + schemes.put(TupleScheme.class, new update_creation_metadata_resultTupleSchemeFactory()); } - private Map success; // required private MetaException o1; // required private InvalidOperationException o2; // required private UnknownDBException o3; // required /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ public enum _Fields implements org.apache.thrift.TFieldIdEnum { - SUCCESS((short)0, "success"), O1((short)1, "o1"), O2((short)2, "o2"), O3((short)3, "o3"); @@ -65024,8 +66502,6 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_materialization_ */ public static _Fields findByThriftId(int fieldId) { switch(fieldId) { - case 0: // SUCCESS - return SUCCESS; case 1: // O1 return O1; case 2: // O2 @@ -65075,10 +66551,6 @@ public String getFieldName() { public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; static { Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); - tmpMap.put(_Fields.SUCCESS, new org.apache.thrift.meta_data.FieldMetaData("success", org.apache.thrift.TFieldRequirementType.DEFAULT, - new org.apache.thrift.meta_data.MapMetaData(org.apache.thrift.protocol.TType.MAP, - new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING), - new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, Materialization.class)))); tmpMap.put(_Fields.O1, new org.apache.thrift.meta_data.FieldMetaData("o1", org.apache.thrift.TFieldRequirementType.DEFAULT, new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRUCT))); tmpMap.put(_Fields.O2, new org.apache.thrift.meta_data.FieldMetaData("o2", org.apache.thrift.TFieldRequirementType.DEFAULT, @@ -65086,20 +66558,18 @@ public String getFieldName() { tmpMap.put(_Fields.O3, new org.apache.thrift.meta_data.FieldMetaData("o3", org.apache.thrift.TFieldRequirementType.DEFAULT, new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRUCT))); metaDataMap = Collections.unmodifiableMap(tmpMap); - org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(get_materialization_invalidation_info_result.class, metaDataMap); + org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(update_creation_metadata_result.class, metaDataMap); } - public get_materialization_invalidation_info_result() { + public update_creation_metadata_result() { } - public get_materialization_invalidation_info_result( - Map success, + public update_creation_metadata_result( MetaException o1, InvalidOperationException o2, UnknownDBException o3) { this(); - this.success = success; this.o1 = o1; this.o2 = o2; this.o3 = o3; @@ -65108,22 +66578,7 @@ public get_materialization_invalidation_info_result( /** * Performs a deep copy on other. */ - public get_materialization_invalidation_info_result(get_materialization_invalidation_info_result other) { - if (other.isSetSuccess()) { - Map __this__success = new HashMap(other.success.size()); - for (Map.Entry other_element : other.success.entrySet()) { - - String other_element_key = other_element.getKey(); - Materialization other_element_value = other_element.getValue(); - - String __this__success_copy_key = other_element_key; - - Materialization __this__success_copy_value = new Materialization(other_element_value); - - __this__success.put(__this__success_copy_key, __this__success_copy_value); - } - this.success = __this__success; - } + public update_creation_metadata_result(update_creation_metadata_result other) { if (other.isSetO1()) { this.o1 = new MetaException(other.o1); } @@ -65135,52 +66590,17 @@ public get_materialization_invalidation_info_result(get_materialization_invalida } } - public get_materialization_invalidation_info_result deepCopy() { - return new get_materialization_invalidation_info_result(this); + public update_creation_metadata_result deepCopy() { + return new update_creation_metadata_result(this); } @Override public void clear() { - this.success = null; this.o1 = null; this.o2 = null; this.o3 = null; } - public int getSuccessSize() { - return (this.success == null) ? 0 : this.success.size(); - } - - public void putToSuccess(String key, Materialization val) { - if (this.success == null) { - this.success = new HashMap(); - } - this.success.put(key, val); - } - - public Map getSuccess() { - return this.success; - } - - public void setSuccess(Map success) { - this.success = success; - } - - public void unsetSuccess() { - this.success = null; - } - - /** Returns true if field success is set (has been assigned a value) and false otherwise */ - public boolean isSetSuccess() { - return this.success != null; - } - - public void setSuccessIsSet(boolean value) { - if (!value) { - this.success = null; - } - } - public MetaException getO1() { return this.o1; } @@ -65252,14 +66672,6 @@ public void setO3IsSet(boolean value) { public void setFieldValue(_Fields field, Object value) { switch (field) { - case SUCCESS: - if (value == null) { - unsetSuccess(); - } else { - setSuccess((Map)value); - } - break; - case O1: if (value == null) { unsetO1(); @@ -65289,9 +66701,6 @@ public void setFieldValue(_Fields field, Object value) { public Object getFieldValue(_Fields field) { switch (field) { - case SUCCESS: - return getSuccess(); - case O1: return getO1(); @@ -65312,8 +66721,6 @@ public boolean isSet(_Fields field) { } switch (field) { - case SUCCESS: - return isSetSuccess(); case O1: return isSetO1(); case O2: @@ -65328,24 +66735,15 @@ public boolean isSet(_Fields field) { public boolean equals(Object that) { if (that == null) return false; - if (that instanceof get_materialization_invalidation_info_result) - return this.equals((get_materialization_invalidation_info_result)that); + if (that instanceof update_creation_metadata_result) + return this.equals((update_creation_metadata_result)that); return false; } - public boolean equals(get_materialization_invalidation_info_result that) { + public boolean equals(update_creation_metadata_result that) { if (that == null) return false; - boolean this_present_success = true && this.isSetSuccess(); - boolean that_present_success = true && that.isSetSuccess(); - if (this_present_success || that_present_success) { - if (!(this_present_success && that_present_success)) - return false; - if (!this.success.equals(that.success)) - return false; - } - boolean this_present_o1 = true && this.isSetO1(); boolean that_present_o1 = true && that.isSetO1(); if (this_present_o1 || that_present_o1) { @@ -65380,11 +66778,6 @@ public boolean equals(get_materialization_invalidation_info_result that) { public int hashCode() { List list = new ArrayList(); - boolean present_success = true && (isSetSuccess()); - list.add(present_success); - if (present_success) - list.add(success); - boolean present_o1 = true && (isSetO1()); list.add(present_o1); if (present_o1) @@ -65404,23 +66797,13 @@ public int hashCode() { } @Override - public int compareTo(get_materialization_invalidation_info_result other) { + public int compareTo(update_creation_metadata_result other) { if (!getClass().equals(other.getClass())) { return getClass().getName().compareTo(other.getClass().getName()); } int lastComparison = 0; - lastComparison = Boolean.valueOf(isSetSuccess()).compareTo(other.isSetSuccess()); - if (lastComparison != 0) { - return lastComparison; - } - if (isSetSuccess()) { - lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.success, other.success); - if (lastComparison != 0) { - return lastComparison; - } - } lastComparison = Boolean.valueOf(isSetO1()).compareTo(other.isSetO1()); if (lastComparison != 0) { return lastComparison; @@ -65468,17 +66851,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache. @Override public String toString() { - StringBuilder sb = new StringBuilder("get_materialization_invalidation_info_result("); + StringBuilder sb = new StringBuilder("update_creation_metadata_result("); boolean first = true; - sb.append("success:"); - if (this.success == null) { - sb.append("null"); - } else { - sb.append(this.success); - } - first = false; - if (!first) sb.append(", "); sb.append("o1:"); if (this.o1 == null) { sb.append("null"); @@ -65527,15 +66902,15 @@ private void readObject(java.io.ObjectInputStream in) throws java.io.IOException } } - private static class get_materialization_invalidation_info_resultStandardSchemeFactory implements SchemeFactory { - public get_materialization_invalidation_info_resultStandardScheme getScheme() { - return new get_materialization_invalidation_info_resultStandardScheme(); + private static class update_creation_metadata_resultStandardSchemeFactory implements SchemeFactory { + public update_creation_metadata_resultStandardScheme getScheme() { + return new update_creation_metadata_resultStandardScheme(); } } - private static class get_materialization_invalidation_info_resultStandardScheme extends StandardScheme { + private static class update_creation_metadata_resultStandardScheme extends StandardScheme { - public void read(org.apache.thrift.protocol.TProtocol iprot, get_materialization_invalidation_info_result struct) throws org.apache.thrift.TException { + public void read(org.apache.thrift.protocol.TProtocol iprot, update_creation_metadata_result struct) throws org.apache.thrift.TException { org.apache.thrift.protocol.TField schemeField; iprot.readStructBegin(); while (true) @@ -65545,27 +66920,6 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_materialization break; } switch (schemeField.id) { - case 0: // SUCCESS - if (schemeField.type == org.apache.thrift.protocol.TType.MAP) { - { - org.apache.thrift.protocol.TMap _map988 = iprot.readMapBegin(); - struct.success = new HashMap(2*_map988.size); - String _key989; - Materialization _val990; - for (int _i991 = 0; _i991 < _map988.size; ++_i991) - { - _key989 = iprot.readString(); - _val990 = new Materialization(); - _val990.read(iprot); - struct.success.put(_key989, _val990); - } - iprot.readMapEnd(); - } - struct.setSuccessIsSet(true); - } else { - org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); - } - break; case 1: // O1 if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) { struct.o1 = new MetaException(); @@ -65602,23 +66956,10 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_materialization struct.validate(); } - public void write(org.apache.thrift.protocol.TProtocol oprot, get_materialization_invalidation_info_result struct) throws org.apache.thrift.TException { + public void write(org.apache.thrift.protocol.TProtocol oprot, update_creation_metadata_result struct) throws org.apache.thrift.TException { struct.validate(); oprot.writeStructBegin(STRUCT_DESC); - if (struct.success != null) { - oprot.writeFieldBegin(SUCCESS_FIELD_DESC); - { - oprot.writeMapBegin(new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRUCT, struct.success.size())); - for (Map.Entry _iter992 : struct.success.entrySet()) - { - oprot.writeString(_iter992.getKey()); - _iter992.getValue().write(oprot); - } - oprot.writeMapEnd(); - } - oprot.writeFieldEnd(); - } if (struct.o1 != null) { oprot.writeFieldBegin(O1_FIELD_DESC); struct.o1.write(oprot); @@ -65640,41 +66981,28 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_materializatio } - private static class get_materialization_invalidation_info_resultTupleSchemeFactory implements SchemeFactory { - public get_materialization_invalidation_info_resultTupleScheme getScheme() { - return new get_materialization_invalidation_info_resultTupleScheme(); + private static class update_creation_metadata_resultTupleSchemeFactory implements SchemeFactory { + public update_creation_metadata_resultTupleScheme getScheme() { + return new update_creation_metadata_resultTupleScheme(); } } - private static class get_materialization_invalidation_info_resultTupleScheme extends TupleScheme { + private static class update_creation_metadata_resultTupleScheme extends TupleScheme { @Override - public void write(org.apache.thrift.protocol.TProtocol prot, get_materialization_invalidation_info_result struct) throws org.apache.thrift.TException { + public void write(org.apache.thrift.protocol.TProtocol prot, update_creation_metadata_result struct) throws org.apache.thrift.TException { TTupleProtocol oprot = (TTupleProtocol) prot; BitSet optionals = new BitSet(); - if (struct.isSetSuccess()) { - optionals.set(0); - } if (struct.isSetO1()) { - optionals.set(1); + optionals.set(0); } if (struct.isSetO2()) { - optionals.set(2); + optionals.set(1); } if (struct.isSetO3()) { - optionals.set(3); - } - oprot.writeBitSet(optionals, 4); - if (struct.isSetSuccess()) { - { - oprot.writeI32(struct.success.size()); - for (Map.Entry _iter993 : struct.success.entrySet()) - { - oprot.writeString(_iter993.getKey()); - _iter993.getValue().write(oprot); - } - } + optionals.set(2); } + oprot.writeBitSet(optionals, 3); if (struct.isSetO1()) { struct.o1.write(oprot); } @@ -65687,36 +67015,20 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_materialization } @Override - public void read(org.apache.thrift.protocol.TProtocol prot, get_materialization_invalidation_info_result struct) throws org.apache.thrift.TException { + public void read(org.apache.thrift.protocol.TProtocol prot, update_creation_metadata_result struct) throws org.apache.thrift.TException { TTupleProtocol iprot = (TTupleProtocol) prot; - BitSet incoming = iprot.readBitSet(4); + BitSet incoming = iprot.readBitSet(3); if (incoming.get(0)) { - { - org.apache.thrift.protocol.TMap _map994 = new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); - struct.success = new HashMap(2*_map994.size); - String _key995; - Materialization _val996; - for (int _i997 = 0; _i997 < _map994.size; ++_i997) - { - _key995 = iprot.readString(); - _val996 = new Materialization(); - _val996.read(iprot); - struct.success.put(_key995, _val996); - } - } - struct.setSuccessIsSet(true); - } - if (incoming.get(1)) { struct.o1 = new MetaException(); struct.o1.read(iprot); struct.setO1IsSet(true); } - if (incoming.get(2)) { + if (incoming.get(1)) { struct.o2 = new InvalidOperationException(); struct.o2.read(iprot); struct.setO2IsSet(true); } - if (incoming.get(3)) { + if (incoming.get(2)) { struct.o3 = new UnknownDBException(); struct.o3.read(iprot); struct.setO3IsSet(true); diff --git a/standalone-metastore/src/gen/thrift/gen-php/metastore/ThriftHiveMetastore.php b/standalone-metastore/src/gen/thrift/gen-php/metastore/ThriftHiveMetastore.php index 9382c60120..f6fc346be7 100644 --- a/standalone-metastore/src/gen/thrift/gen-php/metastore/ThriftHiveMetastore.php +++ b/standalone-metastore/src/gen/thrift/gen-php/metastore/ThriftHiveMetastore.php @@ -295,6 +295,15 @@ interface ThriftHiveMetastoreIf extends \FacebookServiceIf { * @throws \metastore\UnknownDBException */ public function get_materialization_invalidation_info($dbname, array $tbl_names); + /** + * @param string $dbname + * @param string $tbl_name + * @param \metastore\CreationMetadata $creation_metadata + * @throws \metastore\MetaException + * @throws \metastore\InvalidOperationException + * @throws \metastore\UnknownDBException + */ + public function update_creation_metadata($dbname, $tbl_name, \metastore\CreationMetadata $creation_metadata); /** * @param string $dbname * @param string $filter @@ -3502,6 +3511,65 @@ class ThriftHiveMetastoreClient extends \FacebookServiceClient implements \metas throw new \Exception("get_materialization_invalidation_info failed: unknown result"); } + public function update_creation_metadata($dbname, $tbl_name, \metastore\CreationMetadata $creation_metadata) + { + $this->send_update_creation_metadata($dbname, $tbl_name, $creation_metadata); + $this->recv_update_creation_metadata(); + } + + public function send_update_creation_metadata($dbname, $tbl_name, \metastore\CreationMetadata $creation_metadata) + { + $args = new \metastore\ThriftHiveMetastore_update_creation_metadata_args(); + $args->dbname = $dbname; + $args->tbl_name = $tbl_name; + $args->creation_metadata = $creation_metadata; + $bin_accel = ($this->output_ instanceof TBinaryProtocolAccelerated) && function_exists('thrift_protocol_write_binary'); + if ($bin_accel) + { + thrift_protocol_write_binary($this->output_, 'update_creation_metadata', TMessageType::CALL, $args, $this->seqid_, $this->output_->isStrictWrite()); + } + else + { + $this->output_->writeMessageBegin('update_creation_metadata', TMessageType::CALL, $this->seqid_); + $args->write($this->output_); + $this->output_->writeMessageEnd(); + $this->output_->getTransport()->flush(); + } + } + + public function recv_update_creation_metadata() + { + $bin_accel = ($this->input_ instanceof TBinaryProtocolAccelerated) && function_exists('thrift_protocol_read_binary'); + if ($bin_accel) $result = thrift_protocol_read_binary($this->input_, '\metastore\ThriftHiveMetastore_update_creation_metadata_result', $this->input_->isStrictRead()); + else + { + $rseqid = 0; + $fname = null; + $mtype = 0; + + $this->input_->readMessageBegin($fname, $mtype, $rseqid); + if ($mtype == TMessageType::EXCEPTION) { + $x = new TApplicationException(); + $x->read($this->input_); + $this->input_->readMessageEnd(); + throw $x; + } + $result = new \metastore\ThriftHiveMetastore_update_creation_metadata_result(); + $result->read($this->input_); + $this->input_->readMessageEnd(); + } + if ($result->o1 !== null) { + throw $result->o1; + } + if ($result->o2 !== null) { + throw $result->o2; + } + if ($result->o3 !== null) { + throw $result->o3; + } + return; + } + public function get_table_names_by_filter($dbname, $filter, $max_tables) { $this->send_get_table_names_by_filter($dbname, $filter, $max_tables); @@ -20209,6 +20277,259 @@ class ThriftHiveMetastore_get_materialization_invalidation_info_result { } +class ThriftHiveMetastore_update_creation_metadata_args { + static $_TSPEC; + + /** + * @var string + */ + public $dbname = null; + /** + * @var string + */ + public $tbl_name = null; + /** + * @var \metastore\CreationMetadata + */ + public $creation_metadata = null; + + public function __construct($vals=null) { + if (!isset(self::$_TSPEC)) { + self::$_TSPEC = array( + 1 => array( + 'var' => 'dbname', + 'type' => TType::STRING, + ), + 2 => array( + 'var' => 'tbl_name', + 'type' => TType::STRING, + ), + 3 => array( + 'var' => 'creation_metadata', + 'type' => TType::STRUCT, + 'class' => '\metastore\CreationMetadata', + ), + ); + } + if (is_array($vals)) { + if (isset($vals['dbname'])) { + $this->dbname = $vals['dbname']; + } + if (isset($vals['tbl_name'])) { + $this->tbl_name = $vals['tbl_name']; + } + if (isset($vals['creation_metadata'])) { + $this->creation_metadata = $vals['creation_metadata']; + } + } + } + + public function getName() { + return 'ThriftHiveMetastore_update_creation_metadata_args'; + } + + public function read($input) + { + $xfer = 0; + $fname = null; + $ftype = 0; + $fid = 0; + $xfer += $input->readStructBegin($fname); + while (true) + { + $xfer += $input->readFieldBegin($fname, $ftype, $fid); + if ($ftype == TType::STOP) { + break; + } + switch ($fid) + { + case 1: + if ($ftype == TType::STRING) { + $xfer += $input->readString($this->dbname); + } else { + $xfer += $input->skip($ftype); + } + break; + case 2: + if ($ftype == TType::STRING) { + $xfer += $input->readString($this->tbl_name); + } else { + $xfer += $input->skip($ftype); + } + break; + case 3: + if ($ftype == TType::STRUCT) { + $this->creation_metadata = new \metastore\CreationMetadata(); + $xfer += $this->creation_metadata->read($input); + } else { + $xfer += $input->skip($ftype); + } + break; + default: + $xfer += $input->skip($ftype); + break; + } + $xfer += $input->readFieldEnd(); + } + $xfer += $input->readStructEnd(); + return $xfer; + } + + public function write($output) { + $xfer = 0; + $xfer += $output->writeStructBegin('ThriftHiveMetastore_update_creation_metadata_args'); + if ($this->dbname !== null) { + $xfer += $output->writeFieldBegin('dbname', TType::STRING, 1); + $xfer += $output->writeString($this->dbname); + $xfer += $output->writeFieldEnd(); + } + if ($this->tbl_name !== null) { + $xfer += $output->writeFieldBegin('tbl_name', TType::STRING, 2); + $xfer += $output->writeString($this->tbl_name); + $xfer += $output->writeFieldEnd(); + } + if ($this->creation_metadata !== null) { + if (!is_object($this->creation_metadata)) { + throw new TProtocolException('Bad type in structure.', TProtocolException::INVALID_DATA); + } + $xfer += $output->writeFieldBegin('creation_metadata', TType::STRUCT, 3); + $xfer += $this->creation_metadata->write($output); + $xfer += $output->writeFieldEnd(); + } + $xfer += $output->writeFieldStop(); + $xfer += $output->writeStructEnd(); + return $xfer; + } + +} + +class ThriftHiveMetastore_update_creation_metadata_result { + static $_TSPEC; + + /** + * @var \metastore\MetaException + */ + public $o1 = null; + /** + * @var \metastore\InvalidOperationException + */ + public $o2 = null; + /** + * @var \metastore\UnknownDBException + */ + public $o3 = null; + + public function __construct($vals=null) { + if (!isset(self::$_TSPEC)) { + self::$_TSPEC = array( + 1 => array( + 'var' => 'o1', + 'type' => TType::STRUCT, + 'class' => '\metastore\MetaException', + ), + 2 => array( + 'var' => 'o2', + 'type' => TType::STRUCT, + 'class' => '\metastore\InvalidOperationException', + ), + 3 => array( + 'var' => 'o3', + 'type' => TType::STRUCT, + 'class' => '\metastore\UnknownDBException', + ), + ); + } + if (is_array($vals)) { + if (isset($vals['o1'])) { + $this->o1 = $vals['o1']; + } + if (isset($vals['o2'])) { + $this->o2 = $vals['o2']; + } + if (isset($vals['o3'])) { + $this->o3 = $vals['o3']; + } + } + } + + public function getName() { + return 'ThriftHiveMetastore_update_creation_metadata_result'; + } + + public function read($input) + { + $xfer = 0; + $fname = null; + $ftype = 0; + $fid = 0; + $xfer += $input->readStructBegin($fname); + while (true) + { + $xfer += $input->readFieldBegin($fname, $ftype, $fid); + if ($ftype == TType::STOP) { + break; + } + switch ($fid) + { + case 1: + if ($ftype == TType::STRUCT) { + $this->o1 = new \metastore\MetaException(); + $xfer += $this->o1->read($input); + } else { + $xfer += $input->skip($ftype); + } + break; + case 2: + if ($ftype == TType::STRUCT) { + $this->o2 = new \metastore\InvalidOperationException(); + $xfer += $this->o2->read($input); + } else { + $xfer += $input->skip($ftype); + } + break; + case 3: + if ($ftype == TType::STRUCT) { + $this->o3 = new \metastore\UnknownDBException(); + $xfer += $this->o3->read($input); + } else { + $xfer += $input->skip($ftype); + } + break; + default: + $xfer += $input->skip($ftype); + break; + } + $xfer += $input->readFieldEnd(); + } + $xfer += $input->readStructEnd(); + return $xfer; + } + + public function write($output) { + $xfer = 0; + $xfer += $output->writeStructBegin('ThriftHiveMetastore_update_creation_metadata_result'); + if ($this->o1 !== null) { + $xfer += $output->writeFieldBegin('o1', TType::STRUCT, 1); + $xfer += $this->o1->write($output); + $xfer += $output->writeFieldEnd(); + } + if ($this->o2 !== null) { + $xfer += $output->writeFieldBegin('o2', TType::STRUCT, 2); + $xfer += $this->o2->write($output); + $xfer += $output->writeFieldEnd(); + } + if ($this->o3 !== null) { + $xfer += $output->writeFieldBegin('o3', TType::STRUCT, 3); + $xfer += $this->o3->write($output); + $xfer += $output->writeFieldEnd(); + } + $xfer += $output->writeFieldStop(); + $xfer += $output->writeStructEnd(); + return $xfer; + } + +} + class ThriftHiveMetastore_get_table_names_by_filter_args { static $_TSPEC; diff --git a/standalone-metastore/src/gen/thrift/gen-php/metastore/Types.php b/standalone-metastore/src/gen/thrift/gen-php/metastore/Types.php index a5b578ef37..e76eb24b07 100644 --- a/standalone-metastore/src/gen/thrift/gen-php/metastore/Types.php +++ b/standalone-metastore/src/gen/thrift/gen-php/metastore/Types.php @@ -20851,14 +20851,14 @@ class TableMeta { class Materialization { static $_TSPEC; - /** - * @var \metastore\Table - */ - public $materializationTable = null; /** * @var string[] */ public $tablesUsed = null; + /** + * @var string + */ + public $validTxnList = null; /** * @var int */ @@ -20868,11 +20868,6 @@ class Materialization { if (!isset(self::$_TSPEC)) { self::$_TSPEC = array( 1 => array( - 'var' => 'materializationTable', - 'type' => TType::STRUCT, - 'class' => '\metastore\Table', - ), - 2 => array( 'var' => 'tablesUsed', 'type' => TType::SET, 'etype' => TType::STRING, @@ -20880,6 +20875,10 @@ class Materialization { 'type' => TType::STRING, ), ), + 2 => array( + 'var' => 'validTxnList', + 'type' => TType::STRING, + ), 3 => array( 'var' => 'invalidationTime', 'type' => TType::I64, @@ -20887,12 +20886,12 @@ class Materialization { ); } if (is_array($vals)) { - if (isset($vals['materializationTable'])) { - $this->materializationTable = $vals['materializationTable']; - } if (isset($vals['tablesUsed'])) { $this->tablesUsed = $vals['tablesUsed']; } + if (isset($vals['validTxnList'])) { + $this->validTxnList = $vals['validTxnList']; + } if (isset($vals['invalidationTime'])) { $this->invalidationTime = $vals['invalidationTime']; } @@ -20919,14 +20918,6 @@ class Materialization { switch ($fid) { case 1: - if ($ftype == TType::STRUCT) { - $this->materializationTable = new \metastore\Table(); - $xfer += $this->materializationTable->read($input); - } else { - $xfer += $input->skip($ftype); - } - break; - case 2: if ($ftype == TType::SET) { $this->tablesUsed = array(); $_size660 = 0; @@ -20947,6 +20938,13 @@ class Materialization { $xfer += $input->skip($ftype); } break; + case 2: + if ($ftype == TType::STRING) { + $xfer += $input->readString($this->validTxnList); + } else { + $xfer += $input->skip($ftype); + } + break; case 3: if ($ftype == TType::I64) { $xfer += $input->readI64($this->invalidationTime); @@ -20967,19 +20965,11 @@ class Materialization { public function write($output) { $xfer = 0; $xfer += $output->writeStructBegin('Materialization'); - if ($this->materializationTable !== null) { - if (!is_object($this->materializationTable)) { - throw new TProtocolException('Bad type in structure.', TProtocolException::INVALID_DATA); - } - $xfer += $output->writeFieldBegin('materializationTable', TType::STRUCT, 1); - $xfer += $this->materializationTable->write($output); - $xfer += $output->writeFieldEnd(); - } if ($this->tablesUsed !== null) { if (!is_array($this->tablesUsed)) { throw new TProtocolException('Bad type in structure.', TProtocolException::INVALID_DATA); } - $xfer += $output->writeFieldBegin('tablesUsed', TType::SET, 2); + $xfer += $output->writeFieldBegin('tablesUsed', TType::SET, 1); { $output->writeSetBegin(TType::STRING, count($this->tablesUsed)); { @@ -20996,6 +20986,11 @@ class Materialization { } $xfer += $output->writeFieldEnd(); } + if ($this->validTxnList !== null) { + $xfer += $output->writeFieldBegin('validTxnList', TType::STRING, 2); + $xfer += $output->writeString($this->validTxnList); + $xfer += $output->writeFieldEnd(); + } if ($this->invalidationTime !== null) { $xfer += $output->writeFieldBegin('invalidationTime', TType::I64, 3); $xfer += $output->writeI64($this->invalidationTime); diff --git a/standalone-metastore/src/gen/thrift/gen-py/hive_metastore/ThriftHiveMetastore-remote b/standalone-metastore/src/gen/thrift/gen-py/hive_metastore/ThriftHiveMetastore-remote index 9b2aaffd0f..c958e976a0 100755 --- a/standalone-metastore/src/gen/thrift/gen-py/hive_metastore/ThriftHiveMetastore-remote +++ b/standalone-metastore/src/gen/thrift/gen-py/hive_metastore/ThriftHiveMetastore-remote @@ -61,6 +61,7 @@ if len(sys.argv) <= 1 or sys.argv[1] == '--help': print(' GetTableResult get_table_req(GetTableRequest req)') print(' GetTablesResult get_table_objects_by_name_req(GetTablesRequest req)') print(' get_materialization_invalidation_info(string dbname, tbl_names)') + print(' void update_creation_metadata(string dbname, string tbl_name, CreationMetadata creation_metadata)') print(' get_table_names_by_filter(string dbname, string filter, i16 max_tables)') print(' void alter_table(string dbname, string tbl_name, Table new_tbl)') print(' void alter_table_with_environment_context(string dbname, string tbl_name, Table new_tbl, EnvironmentContext environment_context)') @@ -496,6 +497,12 @@ elif cmd == 'get_materialization_invalidation_info': sys.exit(1) pp.pprint(client.get_materialization_invalidation_info(args[0],eval(args[1]),)) +elif cmd == 'update_creation_metadata': + if len(args) != 3: + print('update_creation_metadata requires 3 args') + sys.exit(1) + pp.pprint(client.update_creation_metadata(args[0],args[1],eval(args[2]),)) + elif cmd == 'get_table_names_by_filter': if len(args) != 3: print('get_table_names_by_filter requires 3 args') diff --git a/standalone-metastore/src/gen/thrift/gen-py/hive_metastore/ThriftHiveMetastore.py b/standalone-metastore/src/gen/thrift/gen-py/hive_metastore/ThriftHiveMetastore.py index 2e1910568a..330e75f3a4 100644 --- a/standalone-metastore/src/gen/thrift/gen-py/hive_metastore/ThriftHiveMetastore.py +++ b/standalone-metastore/src/gen/thrift/gen-py/hive_metastore/ThriftHiveMetastore.py @@ -308,6 +308,15 @@ def get_materialization_invalidation_info(self, dbname, tbl_names): """ pass + def update_creation_metadata(self, dbname, tbl_name, creation_metadata): + """ + Parameters: + - dbname + - tbl_name + - creation_metadata + """ + pass + def get_table_names_by_filter(self, dbname, filter, max_tables): """ Parameters: @@ -2759,6 +2768,45 @@ def recv_get_materialization_invalidation_info(self): raise result.o3 raise TApplicationException(TApplicationException.MISSING_RESULT, "get_materialization_invalidation_info failed: unknown result") + def update_creation_metadata(self, dbname, tbl_name, creation_metadata): + """ + Parameters: + - dbname + - tbl_name + - creation_metadata + """ + self.send_update_creation_metadata(dbname, tbl_name, creation_metadata) + self.recv_update_creation_metadata() + + def send_update_creation_metadata(self, dbname, tbl_name, creation_metadata): + self._oprot.writeMessageBegin('update_creation_metadata', TMessageType.CALL, self._seqid) + args = update_creation_metadata_args() + args.dbname = dbname + args.tbl_name = tbl_name + args.creation_metadata = creation_metadata + args.write(self._oprot) + self._oprot.writeMessageEnd() + self._oprot.trans.flush() + + def recv_update_creation_metadata(self): + iprot = self._iprot + (fname, mtype, rseqid) = iprot.readMessageBegin() + if mtype == TMessageType.EXCEPTION: + x = TApplicationException() + x.read(iprot) + iprot.readMessageEnd() + raise x + result = update_creation_metadata_result() + result.read(iprot) + iprot.readMessageEnd() + if result.o1 is not None: + raise result.o1 + if result.o2 is not None: + raise result.o2 + if result.o3 is not None: + raise result.o3 + return + def get_table_names_by_filter(self, dbname, filter, max_tables): """ Parameters: @@ -7999,6 +8047,7 @@ def __init__(self, handler): self._processMap["get_table_req"] = Processor.process_get_table_req self._processMap["get_table_objects_by_name_req"] = Processor.process_get_table_objects_by_name_req self._processMap["get_materialization_invalidation_info"] = Processor.process_get_materialization_invalidation_info + self._processMap["update_creation_metadata"] = Processor.process_update_creation_metadata self._processMap["get_table_names_by_filter"] = Processor.process_get_table_names_by_filter self._processMap["alter_table"] = Processor.process_alter_table self._processMap["alter_table_with_environment_context"] = Processor.process_alter_table_with_environment_context @@ -9090,6 +9139,34 @@ def process_get_materialization_invalidation_info(self, seqid, iprot, oprot): oprot.writeMessageEnd() oprot.trans.flush() + def process_update_creation_metadata(self, seqid, iprot, oprot): + args = update_creation_metadata_args() + args.read(iprot) + iprot.readMessageEnd() + result = update_creation_metadata_result() + try: + self._handler.update_creation_metadata(args.dbname, args.tbl_name, args.creation_metadata) + msg_type = TMessageType.REPLY + except (TTransport.TTransportException, KeyboardInterrupt, SystemExit): + raise + except MetaException as o1: + msg_type = TMessageType.REPLY + result.o1 = o1 + except InvalidOperationException as o2: + msg_type = TMessageType.REPLY + result.o2 = o2 + except UnknownDBException as o3: + msg_type = TMessageType.REPLY + result.o3 = o3 + except Exception as ex: + msg_type = TMessageType.EXCEPTION + logging.exception(ex) + result = TApplicationException(TApplicationException.INTERNAL_ERROR, 'Internal error') + oprot.writeMessageBegin("update_creation_metadata", msg_type, seqid) + result.write(oprot) + oprot.writeMessageEnd() + oprot.trans.flush() + def process_get_table_names_by_filter(self, seqid, iprot, oprot): args = get_table_names_by_filter_args() args.read(iprot) @@ -18869,6 +18946,192 @@ def __eq__(self, other): def __ne__(self, other): return not (self == other) +class update_creation_metadata_args: + """ + Attributes: + - dbname + - tbl_name + - creation_metadata + """ + + thrift_spec = ( + None, # 0 + (1, TType.STRING, 'dbname', None, None, ), # 1 + (2, TType.STRING, 'tbl_name', None, None, ), # 2 + (3, TType.STRUCT, 'creation_metadata', (CreationMetadata, CreationMetadata.thrift_spec), None, ), # 3 + ) + + def __init__(self, dbname=None, tbl_name=None, creation_metadata=None,): + self.dbname = dbname + self.tbl_name = tbl_name + self.creation_metadata = creation_metadata + + def read(self, iprot): + if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: + fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec)) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 1: + if ftype == TType.STRING: + self.dbname = iprot.readString() + else: + iprot.skip(ftype) + elif fid == 2: + if ftype == TType.STRING: + self.tbl_name = iprot.readString() + else: + iprot.skip(ftype) + elif fid == 3: + if ftype == TType.STRUCT: + self.creation_metadata = CreationMetadata() + self.creation_metadata.read(iprot) + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None: + oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))) + return + oprot.writeStructBegin('update_creation_metadata_args') + if self.dbname is not None: + oprot.writeFieldBegin('dbname', TType.STRING, 1) + oprot.writeString(self.dbname) + oprot.writeFieldEnd() + if self.tbl_name is not None: + oprot.writeFieldBegin('tbl_name', TType.STRING, 2) + oprot.writeString(self.tbl_name) + oprot.writeFieldEnd() + if self.creation_metadata is not None: + oprot.writeFieldBegin('creation_metadata', TType.STRUCT, 3) + self.creation_metadata.write(oprot) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + + def __hash__(self): + value = 17 + value = (value * 31) ^ hash(self.dbname) + value = (value * 31) ^ hash(self.tbl_name) + value = (value * 31) ^ hash(self.creation_metadata) + return value + + def __repr__(self): + L = ['%s=%r' % (key, value) + for key, value in self.__dict__.iteritems()] + return '%s(%s)' % (self.__class__.__name__, ', '.join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + +class update_creation_metadata_result: + """ + Attributes: + - o1 + - o2 + - o3 + """ + + thrift_spec = ( + None, # 0 + (1, TType.STRUCT, 'o1', (MetaException, MetaException.thrift_spec), None, ), # 1 + (2, TType.STRUCT, 'o2', (InvalidOperationException, InvalidOperationException.thrift_spec), None, ), # 2 + (3, TType.STRUCT, 'o3', (UnknownDBException, UnknownDBException.thrift_spec), None, ), # 3 + ) + + def __init__(self, o1=None, o2=None, o3=None,): + self.o1 = o1 + self.o2 = o2 + self.o3 = o3 + + def read(self, iprot): + if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: + fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec)) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 1: + if ftype == TType.STRUCT: + self.o1 = MetaException() + self.o1.read(iprot) + else: + iprot.skip(ftype) + elif fid == 2: + if ftype == TType.STRUCT: + self.o2 = InvalidOperationException() + self.o2.read(iprot) + else: + iprot.skip(ftype) + elif fid == 3: + if ftype == TType.STRUCT: + self.o3 = UnknownDBException() + self.o3.read(iprot) + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None: + oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))) + return + oprot.writeStructBegin('update_creation_metadata_result') + if self.o1 is not None: + oprot.writeFieldBegin('o1', TType.STRUCT, 1) + self.o1.write(oprot) + oprot.writeFieldEnd() + if self.o2 is not None: + oprot.writeFieldBegin('o2', TType.STRUCT, 2) + self.o2.write(oprot) + oprot.writeFieldEnd() + if self.o3 is not None: + oprot.writeFieldBegin('o3', TType.STRUCT, 3) + self.o3.write(oprot) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + + def __hash__(self): + value = 17 + value = (value * 31) ^ hash(self.o1) + value = (value * 31) ^ hash(self.o2) + value = (value * 31) ^ hash(self.o3) + return value + + def __repr__(self): + L = ['%s=%r' % (key, value) + for key, value in self.__dict__.iteritems()] + return '%s(%s)' % (self.__class__.__name__, ', '.join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + class get_table_names_by_filter_args: """ Attributes: diff --git a/standalone-metastore/src/gen/thrift/gen-py/hive_metastore/ttypes.py b/standalone-metastore/src/gen/thrift/gen-py/hive_metastore/ttypes.py index 5598859042..9a504e1fd1 100644 --- a/standalone-metastore/src/gen/thrift/gen-py/hive_metastore/ttypes.py +++ b/standalone-metastore/src/gen/thrift/gen-py/hive_metastore/ttypes.py @@ -14703,21 +14703,21 @@ def __ne__(self, other): class Materialization: """ Attributes: - - materializationTable - tablesUsed + - validTxnList - invalidationTime """ thrift_spec = ( None, # 0 - (1, TType.STRUCT, 'materializationTable', (Table, Table.thrift_spec), None, ), # 1 - (2, TType.SET, 'tablesUsed', (TType.STRING,None), None, ), # 2 + (1, TType.SET, 'tablesUsed', (TType.STRING,None), None, ), # 1 + (2, TType.STRING, 'validTxnList', None, None, ), # 2 (3, TType.I64, 'invalidationTime', None, None, ), # 3 ) - def __init__(self, materializationTable=None, tablesUsed=None, invalidationTime=None,): - self.materializationTable = materializationTable + def __init__(self, tablesUsed=None, validTxnList=None, invalidationTime=None,): self.tablesUsed = tablesUsed + self.validTxnList = validTxnList self.invalidationTime = invalidationTime def read(self, iprot): @@ -14730,12 +14730,6 @@ def read(self, iprot): if ftype == TType.STOP: break if fid == 1: - if ftype == TType.STRUCT: - self.materializationTable = Table() - self.materializationTable.read(iprot) - else: - iprot.skip(ftype) - elif fid == 2: if ftype == TType.SET: self.tablesUsed = set() (_etype660, _size657) = iprot.readSetBegin() @@ -14745,6 +14739,11 @@ def read(self, iprot): iprot.readSetEnd() else: iprot.skip(ftype) + elif fid == 2: + if ftype == TType.STRING: + self.validTxnList = iprot.readString() + else: + iprot.skip(ftype) elif fid == 3: if ftype == TType.I64: self.invalidationTime = iprot.readI64() @@ -14760,17 +14759,17 @@ def write(self, oprot): oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))) return oprot.writeStructBegin('Materialization') - if self.materializationTable is not None: - oprot.writeFieldBegin('materializationTable', TType.STRUCT, 1) - self.materializationTable.write(oprot) - oprot.writeFieldEnd() if self.tablesUsed is not None: - oprot.writeFieldBegin('tablesUsed', TType.SET, 2) + oprot.writeFieldBegin('tablesUsed', TType.SET, 1) oprot.writeSetBegin(TType.STRING, len(self.tablesUsed)) for iter663 in self.tablesUsed: oprot.writeString(iter663) oprot.writeSetEnd() oprot.writeFieldEnd() + if self.validTxnList is not None: + oprot.writeFieldBegin('validTxnList', TType.STRING, 2) + oprot.writeString(self.validTxnList) + oprot.writeFieldEnd() if self.invalidationTime is not None: oprot.writeFieldBegin('invalidationTime', TType.I64, 3) oprot.writeI64(self.invalidationTime) @@ -14779,8 +14778,6 @@ def write(self, oprot): oprot.writeStructEnd() def validate(self): - if self.materializationTable is None: - raise TProtocol.TProtocolException(message='Required field materializationTable is unset!') if self.tablesUsed is None: raise TProtocol.TProtocolException(message='Required field tablesUsed is unset!') if self.invalidationTime is None: @@ -14790,8 +14787,8 @@ def validate(self): def __hash__(self): value = 17 - value = (value * 31) ^ hash(self.materializationTable) value = (value * 31) ^ hash(self.tablesUsed) + value = (value * 31) ^ hash(self.validTxnList) value = (value * 31) ^ hash(self.invalidationTime) return value diff --git a/standalone-metastore/src/gen/thrift/gen-rb/hive_metastore_types.rb b/standalone-metastore/src/gen/thrift/gen-rb/hive_metastore_types.rb index bc58cfe0ef..5faf5ea831 100644 --- a/standalone-metastore/src/gen/thrift/gen-rb/hive_metastore_types.rb +++ b/standalone-metastore/src/gen/thrift/gen-rb/hive_metastore_types.rb @@ -3316,20 +3316,19 @@ end class Materialization include ::Thrift::Struct, ::Thrift::Struct_Union - MATERIALIZATIONTABLE = 1 - TABLESUSED = 2 + TABLESUSED = 1 + VALIDTXNLIST = 2 INVALIDATIONTIME = 3 FIELDS = { - MATERIALIZATIONTABLE => {:type => ::Thrift::Types::STRUCT, :name => 'materializationTable', :class => ::Table}, TABLESUSED => {:type => ::Thrift::Types::SET, :name => 'tablesUsed', :element => {:type => ::Thrift::Types::STRING}}, + VALIDTXNLIST => {:type => ::Thrift::Types::STRING, :name => 'validTxnList', :optional => true}, INVALIDATIONTIME => {:type => ::Thrift::Types::I64, :name => 'invalidationTime'} } def struct_fields; FIELDS; end def validate - raise ::Thrift::ProtocolException.new(::Thrift::ProtocolException::UNKNOWN, 'Required field materializationTable is unset!') unless @materializationTable raise ::Thrift::ProtocolException.new(::Thrift::ProtocolException::UNKNOWN, 'Required field tablesUsed is unset!') unless @tablesUsed raise ::Thrift::ProtocolException.new(::Thrift::ProtocolException::UNKNOWN, 'Required field invalidationTime is unset!') unless @invalidationTime end diff --git a/standalone-metastore/src/gen/thrift/gen-rb/thrift_hive_metastore.rb b/standalone-metastore/src/gen/thrift/gen-rb/thrift_hive_metastore.rb index ec88131308..640499e897 100644 --- a/standalone-metastore/src/gen/thrift/gen-rb/thrift_hive_metastore.rb +++ b/standalone-metastore/src/gen/thrift/gen-rb/thrift_hive_metastore.rb @@ -628,6 +628,23 @@ module ThriftHiveMetastore raise ::Thrift::ApplicationException.new(::Thrift::ApplicationException::MISSING_RESULT, 'get_materialization_invalidation_info failed: unknown result') end + def update_creation_metadata(dbname, tbl_name, creation_metadata) + send_update_creation_metadata(dbname, tbl_name, creation_metadata) + recv_update_creation_metadata() + end + + def send_update_creation_metadata(dbname, tbl_name, creation_metadata) + send_message('update_creation_metadata', Update_creation_metadata_args, :dbname => dbname, :tbl_name => tbl_name, :creation_metadata => creation_metadata) + end + + def recv_update_creation_metadata() + result = receive_message(Update_creation_metadata_result) + raise result.o1 unless result.o1.nil? + raise result.o2 unless result.o2.nil? + raise result.o3 unless result.o3.nil? + return + end + def get_table_names_by_filter(dbname, filter, max_tables) send_get_table_names_by_filter(dbname, filter, max_tables) return recv_get_table_names_by_filter() @@ -3519,6 +3536,21 @@ module ThriftHiveMetastore write_result(result, oprot, 'get_materialization_invalidation_info', seqid) end + def process_update_creation_metadata(seqid, iprot, oprot) + args = read_args(iprot, Update_creation_metadata_args) + result = Update_creation_metadata_result.new() + begin + @handler.update_creation_metadata(args.dbname, args.tbl_name, args.creation_metadata) + rescue ::MetaException => o1 + result.o1 = o1 + rescue ::InvalidOperationException => o2 + result.o2 = o2 + rescue ::UnknownDBException => o3 + result.o3 = o3 + end + write_result(result, oprot, 'update_creation_metadata', seqid) + end + def process_get_table_names_by_filter(seqid, iprot, oprot) args = read_args(iprot, Get_table_names_by_filter_args) result = Get_table_names_by_filter_result.new() @@ -6668,6 +6700,46 @@ module ThriftHiveMetastore ::Thrift::Struct.generate_accessors self end + class Update_creation_metadata_args + include ::Thrift::Struct, ::Thrift::Struct_Union + DBNAME = 1 + TBL_NAME = 2 + CREATION_METADATA = 3 + + FIELDS = { + DBNAME => {:type => ::Thrift::Types::STRING, :name => 'dbname'}, + TBL_NAME => {:type => ::Thrift::Types::STRING, :name => 'tbl_name'}, + CREATION_METADATA => {:type => ::Thrift::Types::STRUCT, :name => 'creation_metadata', :class => ::CreationMetadata} + } + + def struct_fields; FIELDS; end + + def validate + end + + ::Thrift::Struct.generate_accessors self + end + + class Update_creation_metadata_result + include ::Thrift::Struct, ::Thrift::Struct_Union + O1 = 1 + O2 = 2 + O3 = 3 + + FIELDS = { + O1 => {:type => ::Thrift::Types::STRUCT, :name => 'o1', :class => ::MetaException}, + O2 => {:type => ::Thrift::Types::STRUCT, :name => 'o2', :class => ::InvalidOperationException}, + O3 => {:type => ::Thrift::Types::STRUCT, :name => 'o3', :class => ::UnknownDBException} + } + + def struct_fields; FIELDS; end + + def validate + end + + ::Thrift::Struct.generate_accessors self + end + class Get_table_names_by_filter_args include ::Thrift::Struct, ::Thrift::Struct_Union DBNAME = 1 diff --git a/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/HiveMetaStore.java b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/HiveMetaStore.java index 1f998285cf..47de215a23 100644 --- a/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/HiveMetaStore.java +++ b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/HiveMetaStore.java @@ -2518,6 +2518,11 @@ public GetTablesResult get_table_objects_by_name_req(GetTablesRequest req) throw return MaterializationsInvalidationCache.get().getMaterializationInvalidationInfo(dbName, tableNames); } + @Override + public void update_creation_metadata(final String dbName, final String tableName, CreationMetadata cm) throws MetaException { + getMS().updateCreationMetadata(dbName, tableName, cm); + } + private void assertClientHasCapability(ClientCapabilities client, ClientCapability value, String what, String call) throws MetaException { if (!doesClientHaveCapability(client, value)) { diff --git a/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/HiveMetaStoreClient.java b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/HiveMetaStoreClient.java index 23cef8d556..5b62114bda 100644 --- a/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/HiveMetaStoreClient.java +++ b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/HiveMetaStoreClient.java @@ -1453,6 +1453,13 @@ public Table getTable(String tableName) throws MetaException, TException, dbName, filterHook.filterTableNames(dbName, viewNames)); } + /** {@inheritDoc} */ + @Override + public void updateCreationMetadata(String dbName, String tableName, CreationMetadata cm) + throws MetaException, InvalidOperationException, UnknownDBException, TException { + client.update_creation_metadata(dbName, tableName, cm); + } + /** {@inheritDoc} */ @Override public List listTableNamesByFilter(String dbName, String filter, short maxTables) diff --git a/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/IMetaStoreClient.java b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/IMetaStoreClient.java index 96d4590222..143b04ff49 100644 --- a/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/IMetaStoreClient.java +++ b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/IMetaStoreClient.java @@ -41,6 +41,7 @@ import org.apache.hadoop.hive.metastore.api.CompactionResponse; import org.apache.hadoop.hive.metastore.api.CompactionType; import org.apache.hadoop.hive.metastore.api.ConfigValSecurityException; +import org.apache.hadoop.hive.metastore.api.CreationMetadata; import org.apache.hadoop.hive.metastore.api.CurrentNotificationEventId; import org.apache.hadoop.hive.metastore.api.DataOperationType; import org.apache.hadoop.hive.metastore.api.Database; @@ -449,6 +450,12 @@ Table getTable(String dbName, String tableName) throws MetaException, Map getMaterializationsInvalidationInfo(String dbName, List viewNames) throws MetaException, InvalidOperationException, UnknownDBException, TException; + /** + * Updates the creation metadata for the materialized view. + */ + void updateCreationMetadata(String dbName, String tableName, CreationMetadata cm) + throws MetaException, TException; + /** * @param tableName * @param dbName diff --git a/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/MaterializationInvalidationInfo.java b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/MaterializationInvalidationInfo.java index f787dd4155..3d774071c2 100644 --- a/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/MaterializationInvalidationInfo.java +++ b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/MaterializationInvalidationInfo.java @@ -35,8 +35,9 @@ private AtomicLong invalidationTime; - public MaterializationInvalidationInfo(Table materializationTable, Set tablesUsed) { - super(materializationTable, tablesUsed, 0); + public MaterializationInvalidationInfo(Set tablesUsed, String validTxnList) { + super(tablesUsed, 0); + this.setValidTxnList(validTxnList); this.invalidationTime = new AtomicLong(0); } diff --git a/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/MaterializationsInvalidationCache.java b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/MaterializationsInvalidationCache.java index 20e4e8db51..92653ae97e 100644 --- a/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/MaterializationsInvalidationCache.java +++ b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/MaterializationsInvalidationCache.java @@ -17,7 +17,6 @@ */ package org.apache.hadoop.hive.metastore; -import java.util.Arrays; import java.util.List; import java.util.Map; import java.util.Set; @@ -115,7 +114,8 @@ public void run() { try { for (String dbName : store.getAllDatabases()) { for (Table mv : store.getTableObjectsByName(dbName, store.getTables(dbName, null, TableType.MATERIALIZED_VIEW))) { - addMaterializedView(mv, ImmutableSet.copyOf(mv.getCreationMetadata().getTablesUsed()), OpType.LOAD); + addMaterializedView(mv.getDbName(), mv.getTableName(), ImmutableSet.copyOf(mv.getCreationMetadata().getTablesUsed()), + mv.getCreationMetadata().getValidTxnList(), OpType.LOAD); } } LOG.info("Initialized materializations invalidation cache"); @@ -128,52 +128,60 @@ public void run() { /** * Adds a newly created materialized view to the cache. * - * @param materializedViewTable the materialized view + * @param dbName + * @param tableName * @param tablesUsed tables used by the materialized view + * @param validTxnList */ - public void createMaterializedView(Table materializedViewTable, Set tablesUsed) { - addMaterializedView(materializedViewTable, tablesUsed, OpType.CREATE); + public void createMaterializedView(String dbName, String tableName, Set tablesUsed, + String validTxnList) { + addMaterializedView(dbName, tableName, tablesUsed, validTxnList, OpType.CREATE); } /** * Method to call when materialized view is modified. * - * @param materializedViewTable the materialized view + * @param dbName + * @param tableName * @param tablesUsed tables used by the materialized view + * @param validTxnList */ - public void alterMaterializedView(Table materializedViewTable, Set tablesUsed) { - addMaterializedView(materializedViewTable, tablesUsed, OpType.ALTER); + public void alterMaterializedView(String dbName, String tableName, Set tablesUsed, + String validTxnList) { + addMaterializedView(dbName, tableName, tablesUsed, validTxnList, OpType.ALTER); } /** * Adds the materialized view to the cache. * - * @param materializedViewTable the materialized view + * @param dbName + * @param tableName * @param tablesUsed tables used by the materialized view + * @param validTxnList + * @param opType */ - private void addMaterializedView(Table materializedViewTable, Set tablesUsed, OpType opType) { + private void addMaterializedView(String dbName, String tableName, Set tablesUsed, + String validTxnList, OpType opType) { // We are going to create the map for each view in the given database ConcurrentMap cq = new ConcurrentHashMap(); final ConcurrentMap prevCq = materializations.putIfAbsent( - materializedViewTable.getDbName(), cq); + dbName, cq); if (prevCq != null) { cq = prevCq; } // Start the process to add materialization to the cache // Before loading the materialization in the cache, we need to update some // important information in the registry to account for rewriting invalidation - String txnListString = materializedViewTable.getCreationMetadata().getValidTxnList(); - if (txnListString == null) { + if (validTxnList == null) { // This can happen when the materialized view was created on non-transactional tables return; } if (opType == OpType.CREATE || opType == OpType.ALTER) { // You store the materialized view - cq.put(materializedViewTable.getTableName(), - new MaterializationInvalidationInfo(materializedViewTable, tablesUsed)); + cq.put(tableName, new MaterializationInvalidationInfo(tablesUsed, validTxnList)); } else { - ValidTxnList txnList = new ValidReadTxnList(txnListString); + ValidTxnList txnList = new ValidReadTxnList(validTxnList); for (String qNameTableUsed : tablesUsed) { // First we insert a new tree set to keep table modifications, unless it already exists ConcurrentSkipListMap modificationsTree = @@ -197,19 +205,17 @@ private void addMaterializedView(Table materializedViewTable, Set tables continue; } } catch (MetaException ex) { - LOG.debug("Materialized view " + - Warehouse.getQualifiedName(materializedViewTable.getDbName(), materializedViewTable.getTableName()) + + LOG.debug("Materialized view " + Warehouse.getQualifiedName(dbName, tableName) + " ignored; error loading view into invalidation cache", ex); return; } } // For LOAD, you only add it if it does exist as you might be loading an outdated MV - cq.putIfAbsent(materializedViewTable.getTableName(), - new MaterializationInvalidationInfo(materializedViewTable, tablesUsed)); + cq.putIfAbsent(tableName, new MaterializationInvalidationInfo(tablesUsed, validTxnList)); } if (LOG.isDebugEnabled()) { LOG.debug("Cached materialized view for rewriting in invalidation cache: " + - Warehouse.getQualifiedName(materializedViewTable.getDbName(), materializedViewTable.getTableName())); + Warehouse.getQualifiedName(dbName, tableName)); } } @@ -236,12 +242,9 @@ public void notifyTableModification(String dbName, String tableName, /** * Removes the materialized view from the cache. * - * @param materializedViewTable the materialized view to remove + * @param dbName + * @param tableName */ - public void dropMaterializedView(Table materializedViewTable) { - dropMaterializedView(materializedViewTable.getDbName(), materializedViewTable.getTableName()); - } - public void dropMaterializedView(String dbName, String tableName) { materializations.get(dbName).remove(tableName); } @@ -292,7 +295,7 @@ public void dropMaterializedView(String dbName, String tableName) { } private long getInvalidationTime(MaterializationInvalidationInfo materialization) { - String txnListString = materialization.getMaterializationTable().getCreationMetadata().getValidTxnList(); + String txnListString = materialization.getValidTxnList(); if (txnListString == null) { // This can happen when the materialization was created on non-transactional tables return Long.MIN_VALUE; diff --git a/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/ObjectStore.java b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/ObjectStore.java index edabaa15ff..7b44df4128 100644 --- a/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/ObjectStore.java +++ b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/ObjectStore.java @@ -1152,7 +1152,8 @@ public void createTable(Table tbl) throws InvalidObjectException, MetaException if (MetaStoreUtils.isMaterializedViewTable(tbl)) { // Add to the invalidation cache MaterializationsInvalidationCache.get().createMaterializedView( - tbl, tbl.getCreationMetadata().getTablesUsed()); + tbl.getDbName(), tbl.getTableName(), tbl.getCreationMetadata().getTablesUsed(), + tbl.getCreationMetadata().getValidTxnList()); } } } @@ -3738,28 +3739,38 @@ public void alterTable(String dbname, String name, Table newTable) oldt.setViewOriginalText(newt.getViewOriginalText()); oldt.setViewExpandedText(newt.getViewExpandedText()); oldt.setRewriteEnabled(newt.isRewriteEnabled()); - registerCreationSignature = newTable.getCreationMetadata() != null; - if (registerCreationSignature) { - // Update creation metadata - MCreationMetadata newMcm = convertToMCreationMetadata( - newTable.getCreationMetadata()); - MCreationMetadata mcm = getCreationMetadata(dbname, name); - mcm.setTables(newMcm.getTables()); - mcm.setTxnList(newMcm.getTxnList()); + + // commit the changes + success = commitTransaction(); + } finally { + if (!success) { + rollbackTransaction(); } + } + } + @Override + public void updateCreationMetadata(String dbname, String tablename, CreationMetadata cm) + throws MetaException { + boolean success = false; + try { + openTransaction(); + dbname = normalizeIdentifier(dbname); + tablename = normalizeIdentifier(tablename); + // Update creation metadata + MCreationMetadata newMcm = convertToMCreationMetadata(cm); + MCreationMetadata mcm = getCreationMetadata(dbname, tablename); + mcm.setTables(newMcm.getTables()); + mcm.setTxnList(newMcm.getTxnList()); // commit the changes success = commitTransaction(); } finally { if (!success) { rollbackTransaction(); } else { - if (MetaStoreUtils.isMaterializedViewTable(newTable) && - registerCreationSignature) { - // Add to the invalidation cache if the creation signature has changed - MaterializationsInvalidationCache.get().alterMaterializedView( - newTable, newTable.getCreationMetadata().getTablesUsed()); - } + // Add to the invalidation cache if the creation signature has changed + MaterializationsInvalidationCache.get().alterMaterializedView( + dbname, tablename, cm.getTablesUsed(), cm.getValidTxnList()); } } } diff --git a/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/RawStore.java b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/RawStore.java index e4e7d4239d..f500d63725 100644 --- a/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/RawStore.java +++ b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/RawStore.java @@ -18,6 +18,7 @@ package org.apache.hadoop.hive.metastore; +import org.apache.hadoop.hive.metastore.api.CreationMetadata; import org.apache.hadoop.hive.metastore.api.WMFullResourcePlan; import java.lang.annotation.ElementType; @@ -172,6 +173,9 @@ boolean dropPartition(String dbName, String tableName, void alterTable(String dbname, String name, Table newTable) throws InvalidObjectException, MetaException; + void updateCreationMetadata(String dbname, String tablename, CreationMetadata cm) + throws MetaException; + List getTables(String dbName, String pattern) throws MetaException; diff --git a/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/cache/CachedStore.java b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/cache/CachedStore.java index 80aa3bcdb4..0d132f2074 100644 --- a/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/cache/CachedStore.java +++ b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/cache/CachedStore.java @@ -17,6 +17,7 @@ */ package org.apache.hadoop.hive.metastore.cache; +import org.apache.hadoop.hive.metastore.api.CreationMetadata; import org.apache.hadoop.hive.metastore.api.WMFullResourcePlan; import java.nio.ByteBuffer; @@ -1174,6 +1175,12 @@ public void alterTable(String dbName, String tblName, Table newTable) } } + @Override + public void updateCreationMetadata(String dbname, String tablename, CreationMetadata cm) + throws MetaException { + rawStore.updateCreationMetadata(dbname, tablename, cm); + } + @Override public List getTables(String dbName, String pattern) throws MetaException { if (!isBlacklistWhitelistEmpty(conf) || !sharedCacheWrapper.isInitialized()) { diff --git a/standalone-metastore/src/main/thrift/hive_metastore.thrift b/standalone-metastore/src/main/thrift/hive_metastore.thrift index 371b97590c..35fc8b3c93 100644 --- a/standalone-metastore/src/main/thrift/hive_metastore.thrift +++ b/standalone-metastore/src/main/thrift/hive_metastore.thrift @@ -1049,8 +1049,8 @@ struct TableMeta { } struct Materialization { - 1: required Table materializationTable; - 2: required set tablesUsed; + 1: required set tablesUsed; + 2: optional string validTxnList 3: required i64 invalidationTime; } @@ -1420,6 +1420,8 @@ service ThriftHiveMetastore extends fb303.FacebookService throws (1:MetaException o1, 2:InvalidOperationException o2, 3:UnknownDBException o3) map get_materialization_invalidation_info(1:string dbname, 2:list tbl_names) throws (1:MetaException o1, 2:InvalidOperationException o2, 3:UnknownDBException o3) + void update_creation_metadata(1:string dbname, 2:string tbl_name, 3:CreationMetadata creation_metadata) + throws (1:MetaException o1, 2:InvalidOperationException o2, 3:UnknownDBException o3) // Get a list of table names that match a filter. // The filter operators are LIKE, <, <=, >, >=, =, <> diff --git a/standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/DummyRawStoreControlledCommit.java b/standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/DummyRawStoreControlledCommit.java index 9100c73beb..75ea8c4a77 100644 --- a/standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/DummyRawStoreControlledCommit.java +++ b/standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/DummyRawStoreControlledCommit.java @@ -18,6 +18,7 @@ package org.apache.hadoop.hive.metastore; +import org.apache.hadoop.hive.metastore.api.CreationMetadata; import org.apache.hadoop.hive.metastore.api.WMFullResourcePlan; import java.nio.ByteBuffer; @@ -240,6 +241,12 @@ public void alterTable(String dbName, String name, Table newTable) objectStore.alterTable(dbName, name, newTable); } + @Override + public void updateCreationMetadata(String dbname, String tablename, CreationMetadata cm) + throws MetaException { + objectStore.updateCreationMetadata(dbname, tablename, cm); + } + @Override public List getTables(String dbName, String pattern) throws MetaException { return objectStore.getTables(dbName, pattern); diff --git a/standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/DummyRawStoreForJdoConnection.java b/standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/DummyRawStoreForJdoConnection.java index 86e72d8d76..207d842f94 100644 --- a/standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/DummyRawStoreForJdoConnection.java +++ b/standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/DummyRawStoreForJdoConnection.java @@ -18,6 +18,7 @@ package org.apache.hadoop.hive.metastore; +import org.apache.hadoop.hive.metastore.api.CreationMetadata; import org.apache.hadoop.hive.metastore.api.WMFullResourcePlan; import java.nio.ByteBuffer; @@ -239,6 +240,11 @@ public void alterTable(String dbname, String name, Table newTable) throws Invali } + @Override + public void updateCreationMetadata(String dbname, String tablename, CreationMetadata cm) + throws MetaException { + } + @Override public List getTables(String dbName, String pattern) throws MetaException {