diff --git itests/hcatalog-unit/src/test/java/org/apache/hive/hcatalog/listener/DummyRawStoreFailEvent.java itests/hcatalog-unit/src/test/java/org/apache/hive/hcatalog/listener/DummyRawStoreFailEvent.java index 3c334fac31..580bae9c3f 100644 --- itests/hcatalog-unit/src/test/java/org/apache/hive/hcatalog/listener/DummyRawStoreFailEvent.java +++ itests/hcatalog-unit/src/test/java/org/apache/hive/hcatalog/listener/DummyRawStoreFailEvent.java @@ -266,6 +266,12 @@ public Table getTable(String catName, String dbName, String tableName) throws Me } @Override + public Table getTable(String catName, String dbName, String tableName, + long txnId, String writeIdList) throws MetaException { + return objectStore.getTable(catName, dbName, tableName, txnId, writeIdList); + } + + @Override public boolean addPartition(Partition part) throws InvalidObjectException, MetaException { return objectStore.addPartition(part); @@ -278,6 +284,13 @@ public Partition getPartition(String catName, String dbName, String tableName, L } @Override + public Partition getPartition(String catName, String dbName, String tableName, + List partVals, long txnId, String writeIdList) + throws MetaException, NoSuchObjectException { + return objectStore.getPartition(catName, dbName, tableName, partVals, txnId, writeIdList); + } + + @Override public boolean dropPartition(String catName, String dbName, String tableName, List partVals) throws MetaException, NoSuchObjectException, InvalidObjectException, InvalidInputException { @@ -382,9 +395,10 @@ public void alterPartition(String catName, String dbName, String tblName, List> partValsList, List newParts) + List> partValsList, List newParts, + long txnId, String writeIdList) throws InvalidObjectException, MetaException { - objectStore.alterPartitions(catName, dbName, tblName, partValsList, newParts); + objectStore.alterPartitions(catName, dbName, tblName, partValsList, newParts, txnId, writeIdList); } @Override @@ -691,6 +705,14 @@ public ColumnStatistics getTableColumnStatistics(String catName, String dbName, } @Override + public ColumnStatistics getTableColumnStatistics(String catName, String dbName, String tableName, + List colNames, + long txnId, String writeIdList) + throws MetaException, NoSuchObjectException { + return objectStore.getTableColumnStatistics(catName, dbName, tableName, colNames, txnId, writeIdList); + } + + @Override public boolean deleteTableColumnStatistics(String catName, String dbName, String tableName, String colName) throws NoSuchObjectException, MetaException, InvalidObjectException, InvalidInputException { @@ -784,6 +806,17 @@ public void setMetaStoreSchemaVersion(String schemaVersion, String comment) thro } @Override + public List getPartitionColumnStatistics(String catName, String dbName, + String tblName, List colNames, + List partNames, + long txnId, + String writeIdList) + throws MetaException, NoSuchObjectException { + return objectStore.getPartitionColumnStatistics( + catName, dbName, tblName , colNames, partNames, txnId, writeIdList); + } + + @Override public boolean doesPartitionExist(String catName, String dbName, String tableName, List partKeys, List partVals) throws MetaException, NoSuchObjectException { @@ -861,6 +894,14 @@ public AggrStats get_aggr_stats_for(String catName, String dbName, } @Override + public AggrStats get_aggr_stats_for(String catName, String dbName, + String tblName, List partNames, List colNames, + long txnId, String writeIdList) + throws MetaException { + return null; + } + + @Override public NotificationEventResponse getNextNotification(NotificationEventRequest rqst) { return objectStore.getNextNotification(rqst); } diff --git itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/parse/TestReplicationScenarios.java itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/parse/TestReplicationScenarios.java index 689c859804..a4e29c9cd4 100644 --- itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/parse/TestReplicationScenarios.java +++ itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/parse/TestReplicationScenarios.java @@ -151,6 +151,7 @@ public static void setUpBeforeClass() throws Exception { hconf.set(HiveConf.ConfVars.PREEXECHOOKS.varname, ""); hconf.set(HiveConf.ConfVars.POSTEXECHOOKS.varname, ""); hconf.set(HiveConf.ConfVars.HIVE_IN_TEST_REPL.varname, "true"); + hconf.setBoolVar(HiveConf.ConfVars.HIVE_IN_TEST, true); hconf.set(HiveConf.ConfVars.HIVE_SUPPORT_CONCURRENCY.varname, "false"); hconf.set(HiveConf.ConfVars.HIVE_TXN_MANAGER.varname, "org.apache.hadoop.hive.ql.lockmgr.DummyTxnManager"); diff --git pom.xml pom.xml index 5202248315..4278104900 100644 --- pom.xml +++ pom.xml @@ -66,7 +66,7 @@ - 3.1.0 + 4.0.0 UTF-8 diff --git ql/src/java/org/apache/hadoop/hive/ql/exec/ColumnStatsUpdateTask.java ql/src/java/org/apache/hadoop/hive/ql/exec/ColumnStatsUpdateTask.java index a53ff5aff7..7795c6659c 100644 --- ql/src/java/org/apache/hadoop/hive/ql/exec/ColumnStatsUpdateTask.java +++ ql/src/java/org/apache/hadoop/hive/ql/exec/ColumnStatsUpdateTask.java @@ -46,11 +46,14 @@ import org.apache.hadoop.hive.ql.DriverContext; import org.apache.hadoop.hive.ql.QueryPlan; import org.apache.hadoop.hive.ql.QueryState; +import org.apache.hadoop.hive.ql.io.AcidUtils; import org.apache.hadoop.hive.ql.metadata.Hive; import org.apache.hadoop.hive.ql.metadata.HiveException; +import org.apache.hadoop.hive.ql.metadata.Table; import org.apache.hadoop.hive.ql.parse.SemanticException; import org.apache.hadoop.hive.ql.plan.ColumnStatsUpdateWork; import org.apache.hadoop.hive.ql.plan.api.StageType; +import org.apache.hadoop.hive.ql.session.SessionState; import org.apache.hadoop.hive.serde2.io.DateWritable; import org.slf4j.Logger; import org.slf4j.LoggerFactory; diff --git ql/src/java/org/apache/hadoop/hive/ql/exec/DDLTask.java ql/src/java/org/apache/hadoop/hive/ql/exec/DDLTask.java index 8438dad347..a4b3a0a120 100644 --- ql/src/java/org/apache/hadoop/hive/ql/exec/DDLTask.java +++ ql/src/java/org/apache/hadoop/hive/ql/exec/DDLTask.java @@ -1301,8 +1301,7 @@ private int alterMaterializedView(Hive db, AlterMaterializedViewDesc alterMVDesc throw new AssertionError("Unsupported alter materialized view type! : " + alterMVDesc.getOp()); } - db.alterTable(mv, environmentContext); - + db.alterTable(mv,environmentContext); return 0; } diff --git ql/src/java/org/apache/hadoop/hive/ql/io/AcidUtils.java ql/src/java/org/apache/hadoop/hive/ql/io/AcidUtils.java index 7fce67fc3e..caf886f18c 100644 --- ql/src/java/org/apache/hadoop/hive/ql/io/AcidUtils.java +++ ql/src/java/org/apache/hadoop/hive/ql/io/AcidUtils.java @@ -33,6 +33,7 @@ import java.util.Set; import java.util.regex.Pattern; +import org.apache.avro.generic.GenericData; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FSDataInputStream; import org.apache.hadoop.fs.FSDataOutputStream; @@ -40,13 +41,11 @@ import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.PathFilter; -import org.apache.hadoop.hive.common.HiveStatsUtils; -import org.apache.hadoop.hive.common.ValidReaderWriteIdList; -import org.apache.hadoop.hive.common.ValidTxnWriteIdList; -import org.apache.hadoop.hive.common.ValidWriteIdList; +import org.apache.hadoop.hive.common.*; import org.apache.hadoop.hive.conf.HiveConf; import org.apache.hadoop.hive.conf.HiveConf.ConfVars; import org.apache.hadoop.hive.metastore.TransactionalValidationListener; +import org.apache.hadoop.hive.metastore.Warehouse; import org.apache.hadoop.hive.metastore.api.DataOperationType; import org.apache.hadoop.hive.metastore.api.hive_metastoreConstants; import org.apache.hadoop.hive.ql.ErrorMsg; @@ -57,9 +56,12 @@ import org.apache.hadoop.hive.ql.io.orc.OrcRecordUpdater; import org.apache.hadoop.hive.ql.io.orc.Reader; import org.apache.hadoop.hive.ql.io.orc.Writer; +import org.apache.hadoop.hive.ql.lockmgr.HiveTxnManager; +import org.apache.hadoop.hive.ql.lockmgr.LockException; import org.apache.hadoop.hive.ql.metadata.Table; import org.apache.hadoop.hive.ql.plan.CreateTableDesc; import org.apache.hadoop.hive.ql.plan.TableScanDesc; +import org.apache.hadoop.hive.ql.session.SessionState; import org.apache.hadoop.hive.shims.HadoopShims; import org.apache.hadoop.hive.shims.HadoopShims.HdfsFileStatusWithId; import org.apache.hadoop.hive.shims.ShimLoader; @@ -1621,6 +1623,115 @@ public static void setValidWriteIdList(Configuration conf, TableScanDesc tsDesc) } } + public static class TableSnapshot { + private long txnId; + private String validWriteIdList; + + public TableSnapshot() { + } + + public TableSnapshot(long txnId, String validWriteIdList) { + this.txnId = txnId; + this.validWriteIdList = validWriteIdList; + } + + public long getTxnId() { + return txnId; + } + + public String getValidWriteIdList() { + return validWriteIdList; + } + + public void setTxnId(long txnId) { + this.txnId = txnId; + } + + public void setValidWriteIdList(String validWriteIdList) { + this.validWriteIdList = validWriteIdList; + } + } + + // TODO# remove + public static TableSnapshot getTableSnapshot( + Configuration conf, Table tbl) throws LockException { + return getTableSnapshot(conf, tbl, false); + } + /** + * Create a TableShopshot with the given "conf" + * for the table of the given "tbl". + * + * @param conf + * @param tbl + * @return TableSnapshot on success, null on failure + * @throws LockException + */ + public static TableSnapshot getTableSnapshot( + Configuration conf, Table tbl, boolean isInTxnScope) throws LockException { + if (!isTransactionalTable(tbl)) { + return null; + } else { + long txnId = 0; + ValidWriteIdList validWriteIdList = null; + + HiveTxnManager sessionTxnMgr = SessionState.get().getTxnMgr(); + + if (sessionTxnMgr != null) { + txnId = sessionTxnMgr.getCurrentTxnId(); + } + String fullTableName = getFullTableName(tbl.getDbName(), tbl.getTableName()); + if (txnId > 0 && isTransactionalTable(tbl)) { + validWriteIdList = getTableValidWriteIdList(conf, fullTableName); + + // TODO: remove in_test filters? + if (validWriteIdList == null && !isInTxnScope + && !HiveConf.getBoolVar(conf, ConfVars.HIVE_IN_TEST)) { + LOG.warn("Obtaining write IDs from metastore for " + tbl.getTableName()); + validWriteIdList = getTableValidWriteIdListWithTxnList( + conf, tbl.getDbName(), tbl.getTableName()); + } + if (validWriteIdList == null) { + if (!HiveConf.getBoolVar(conf, ConfVars.HIVE_IN_TEST)) { + throw new AssertionError("Cannot find valid write ID list for " + tbl.getTableName()); + } else { + return null; + } + } + } + return new TableSnapshot(txnId, + validWriteIdList != null ? validWriteIdList.toString() : null); + } + } + + /** + * Returns ValidWriteIdList for the table with the given "dbName" and "tableName". + * This is called when HiveConf has no list for the table. + * Otherwise use getTableSnapshot(). + * @param conf Configuration + * @param dbName + * @param tableName + * @return ValidWriteIdList on success, null on failure to get a list. + * @throws LockException + */ + public static ValidWriteIdList getTableValidWriteIdListWithTxnList( + Configuration conf, String dbName, String tableName) throws LockException { + HiveTxnManager sessionTxnMgr = SessionState.get().getTxnMgr(); + if (sessionTxnMgr == null) { + return null; + } + ValidWriteIdList validWriteIdList = null; + ValidTxnWriteIdList validTxnWriteIdList = null; + + String validTxnList = conf.get(ValidTxnList.VALID_TXNS_KEY); + List tablesInput = new ArrayList<>(); + String fullTableName = getFullTableName(dbName, tableName); + tablesInput.add(fullTableName); + + validTxnWriteIdList = sessionTxnMgr.getValidWriteIds(tablesInput, validTxnList); + return validTxnWriteIdList != null ? + validTxnWriteIdList.getTableValidWriteIdList(fullTableName) : null; + } + public static String getFullTableName(String dbName, String tableName) { return dbName.toLowerCase() + "." + tableName.toLowerCase(); } @@ -1908,8 +2019,8 @@ public static String getAcidSubDir(Path dataPath) { } public static boolean isAcidEnabled(HiveConf hiveConf) { - String txnMgr = hiveConf.getVar(HiveConf.ConfVars.HIVE_TXN_MANAGER); - boolean concurrency = hiveConf.getBoolVar(HiveConf.ConfVars.HIVE_SUPPORT_CONCURRENCY); + String txnMgr = hiveConf.getVar(ConfVars.HIVE_TXN_MANAGER); + boolean concurrency = hiveConf.getBoolVar(ConfVars.HIVE_SUPPORT_CONCURRENCY); String dbTxnMgr = "org.apache.hadoop.hive.ql.lockmgr.DbTxnManager"; if (txnMgr.equals(dbTxnMgr) && concurrency) { return true; diff --git ql/src/java/org/apache/hadoop/hive/ql/lockmgr/DbTxnManager.java ql/src/java/org/apache/hadoop/hive/ql/lockmgr/DbTxnManager.java index 4fd1d4ec54..91047861a1 100644 --- ql/src/java/org/apache/hadoop/hive/ql/lockmgr/DbTxnManager.java +++ ql/src/java/org/apache/hadoop/hive/ql/lockmgr/DbTxnManager.java @@ -1017,9 +1017,16 @@ public int getStmtIdAndIncrement() { @Override public long getTableWriteId(String dbName, String tableName) throws LockException { assert isTxnOpen(); + return getTableWriteId(dbName, tableName, true); + } + + private long getTableWriteId( + String dbName, String tableName, boolean allocateIfNotYet) throws LockException { String fullTableName = AcidUtils.getFullTableName(dbName, tableName); if (tableWriteIds.containsKey(fullTableName)) { return tableWriteIds.get(fullTableName); + } else if (!allocateIfNotYet) { + return 0; } try { long writeId = getMS().allocateTableWriteId(txnId, dbName, tableName); diff --git ql/src/java/org/apache/hadoop/hive/ql/lockmgr/DummyTxnManager.java ql/src/java/org/apache/hadoop/hive/ql/lockmgr/DummyTxnManager.java index ab9d67e441..78bb303bd6 100644 --- ql/src/java/org/apache/hadoop/hive/ql/lockmgr/DummyTxnManager.java +++ ql/src/java/org/apache/hadoop/hive/ql/lockmgr/DummyTxnManager.java @@ -77,6 +77,7 @@ public int getStmtIdAndIncrement() { public long getTableWriteId(String dbName, String tableName) throws LockException { return 0L; } + @Override public void replAllocateTableWriteIdsBatch(String dbName, String tableName, String replPolicy, List srcTxnToWriteIdList) throws LockException { diff --git ql/src/java/org/apache/hadoop/hive/ql/lockmgr/HiveTxnManager.java ql/src/java/org/apache/hadoop/hive/ql/lockmgr/HiveTxnManager.java index 5f68e085a0..9ea40f4fa8 100644 --- ql/src/java/org/apache/hadoop/hive/ql/lockmgr/HiveTxnManager.java +++ ql/src/java/org/apache/hadoop/hive/ql/lockmgr/HiveTxnManager.java @@ -276,7 +276,6 @@ void replTableWriteIdState(String validWriteIdList, String dbName, String tableN * if {@code isTxnOpen()}, returns the table write ID associated with current active transaction. */ long getTableWriteId(String dbName, String tableName) throws LockException; - /** * Allocates write id for each transaction in the list. * @param dbName database name diff --git ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java index c3809d8d87..c0a9be0dee 100644 --- ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java +++ ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java @@ -18,14 +18,15 @@ package org.apache.hadoop.hive.ql.metadata; +import com.google.common.annotations.VisibleForTesting; import com.google.common.base.Splitter; import com.google.common.collect.ImmutableMap; import com.google.common.collect.Lists; import com.google.common.collect.Maps; import com.google.common.collect.Sets; import com.google.common.util.concurrent.ThreadFactoryBuilder; -import static org.apache.hadoop.hive.conf.Constants.MATERIALIZED_VIEW_REWRITING_TIME_WINDOW; +import static org.apache.hadoop.hive.conf.Constants.MATERIALIZED_VIEW_REWRITING_TIME_WINDOW; import static org.apache.hadoop.hive.metastore.api.hive_metastoreConstants.META_TABLE_STORAGE; import static org.apache.hadoop.hive.metastore.utils.MetaStoreUtils.getDefaultCatalog; import static org.apache.hadoop.hive.ql.parse.DDLSemanticAnalyzer.makeBinaryPredicate; @@ -62,6 +63,7 @@ import javax.jdo.JDODataStoreException; import com.google.common.collect.ImmutableList; + import org.apache.calcite.plan.RelOptMaterialization; import org.apache.calcite.plan.hep.HepPlanner; import org.apache.calcite.plan.hep.HepProgramBuilder; @@ -80,11 +82,7 @@ import org.apache.hadoop.fs.PathFilter; import org.apache.hadoop.fs.permission.FsAction; import org.apache.hadoop.hdfs.DistributedFileSystem; -import org.apache.hadoop.hive.common.FileUtils; -import org.apache.hadoop.hive.common.HiveStatsUtils; -import org.apache.hadoop.hive.common.ObjectPair; -import org.apache.hadoop.hive.common.StatsSetupConst; -import org.apache.hadoop.hive.common.ValidTxnWriteIdList; +import org.apache.hadoop.hive.common.*; import org.apache.hadoop.hive.common.classification.InterfaceAudience.LimitedPrivate; import org.apache.hadoop.hive.common.classification.InterfaceStability.Unstable; import org.apache.hadoop.hive.common.log.InPlaceUpdate; @@ -105,60 +103,7 @@ import org.apache.hadoop.hive.metastore.TableType; import org.apache.hadoop.hive.metastore.Warehouse; import org.apache.hadoop.hive.metastore.ReplChangeManager; -import org.apache.hadoop.hive.metastore.api.AggrStats; -import org.apache.hadoop.hive.metastore.api.AlreadyExistsException; -import org.apache.hadoop.hive.metastore.api.CheckConstraintsRequest; -import org.apache.hadoop.hive.metastore.api.CmRecycleRequest; -import org.apache.hadoop.hive.metastore.api.ColumnStatisticsObj; -import org.apache.hadoop.hive.metastore.api.CompactionResponse; -import org.apache.hadoop.hive.metastore.api.CompactionType; -import org.apache.hadoop.hive.metastore.api.CreationMetadata; -import org.apache.hadoop.hive.metastore.api.Database; -import org.apache.hadoop.hive.metastore.api.DefaultConstraintsRequest; -import org.apache.hadoop.hive.metastore.api.EnvironmentContext; -import org.apache.hadoop.hive.metastore.api.FieldSchema; -import org.apache.hadoop.hive.metastore.api.FireEventRequest; -import org.apache.hadoop.hive.metastore.api.FireEventRequestData; -import org.apache.hadoop.hive.metastore.api.ForeignKeysRequest; -import org.apache.hadoop.hive.metastore.api.Function; -import org.apache.hadoop.hive.metastore.api.GetOpenTxnsInfoResponse; -import org.apache.hadoop.hive.metastore.api.GetRoleGrantsForPrincipalRequest; -import org.apache.hadoop.hive.metastore.api.GetRoleGrantsForPrincipalResponse; -import org.apache.hadoop.hive.metastore.api.HiveObjectPrivilege; -import org.apache.hadoop.hive.metastore.api.HiveObjectRef; -import org.apache.hadoop.hive.metastore.api.HiveObjectType; -import org.apache.hadoop.hive.metastore.api.InsertEventRequestData; -import org.apache.hadoop.hive.metastore.api.InvalidOperationException; -import org.apache.hadoop.hive.metastore.api.Materialization; -import org.apache.hadoop.hive.metastore.api.MetaException; -import org.apache.hadoop.hive.metastore.api.MetadataPpdResult; -import org.apache.hadoop.hive.metastore.api.NoSuchObjectException; -import org.apache.hadoop.hive.metastore.api.NotNullConstraintsRequest; -import org.apache.hadoop.hive.metastore.api.PrimaryKeysRequest; -import org.apache.hadoop.hive.metastore.api.PrincipalPrivilegeSet; -import org.apache.hadoop.hive.metastore.api.PrincipalType; -import org.apache.hadoop.hive.metastore.api.PrivilegeBag; -import org.apache.hadoop.hive.metastore.api.Role; -import org.apache.hadoop.hive.metastore.api.RolePrincipalGrant; -import org.apache.hadoop.hive.metastore.api.SQLCheckConstraint; -import org.apache.hadoop.hive.metastore.api.SQLDefaultConstraint; -import org.apache.hadoop.hive.metastore.api.SQLForeignKey; -import org.apache.hadoop.hive.metastore.api.SQLNotNullConstraint; -import org.apache.hadoop.hive.metastore.api.SQLPrimaryKey; -import org.apache.hadoop.hive.metastore.api.SQLUniqueConstraint; -import org.apache.hadoop.hive.metastore.api.SetPartitionsStatsRequest; -import org.apache.hadoop.hive.metastore.api.ShowCompactResponse; -import org.apache.hadoop.hive.metastore.api.SkewedInfo; -import org.apache.hadoop.hive.metastore.api.UniqueConstraintsRequest; -import org.apache.hadoop.hive.metastore.api.WMFullResourcePlan; -import org.apache.hadoop.hive.metastore.api.WMMapping; -import org.apache.hadoop.hive.metastore.api.WMNullablePool; -import org.apache.hadoop.hive.metastore.api.WMNullableResourcePlan; -import org.apache.hadoop.hive.metastore.api.WMPool; -import org.apache.hadoop.hive.metastore.api.WMResourcePlan; -import org.apache.hadoop.hive.metastore.api.WMTrigger; -import org.apache.hadoop.hive.metastore.api.WMValidateResourcePlanResponse; -import org.apache.hadoop.hive.metastore.api.hive_metastoreConstants; +import org.apache.hadoop.hive.metastore.api.*; import org.apache.hadoop.hive.metastore.utils.MetaStoreUtils; import org.apache.hadoop.hive.ql.ErrorMsg; import org.apache.hadoop.hive.ql.exec.AbstractFileMergeOperator; @@ -641,6 +586,12 @@ public void alterTable(Table newTbl, EnvironmentContext environmentContext) alterTable(newTbl.getDbName(), newTbl.getTableName(), newTbl, false, environmentContext); } + + public void alterTable(String fullyQlfdTblName, Table newTbl, EnvironmentContext environmentContext) + throws HiveException { + alterTable(fullyQlfdTblName, newTbl, false, environmentContext); + } + /** * Updates the existing table metadata with the new metadata. * @@ -648,13 +599,17 @@ public void alterTable(Table newTbl, EnvironmentContext environmentContext) * name of the existing table * @param newTbl * new name of the table. could be the old name + * @param transactional + * Need to generate and save a table snapshot into the metastore? * @throws InvalidOperationException * if the changes in metadata is not acceptable * @throws TException */ - public void alterTable(String fullyQlfdTblName, Table newTbl, EnvironmentContext environmentContext) + public void alterTable(String fullyQlfdTblName, Table newTbl, EnvironmentContext environmentContext, + boolean transactional) throws HiveException { - alterTable(fullyQlfdTblName, newTbl, false, environmentContext); + String[] names = Utilities.getDbTableName(fullyQlfdTblName); + alterTable(names[0], names[1], newTbl, false, environmentContext, transactional); } public void alterTable(String fullyQlfdTblName, Table newTbl, boolean cascade, EnvironmentContext environmentContext) @@ -662,9 +617,13 @@ public void alterTable(String fullyQlfdTblName, Table newTbl, boolean cascade, E String[] names = Utilities.getDbTableName(fullyQlfdTblName); alterTable(names[0], names[1], newTbl, cascade, environmentContext); } - public void alterTable(String dbName, String tblName, Table newTbl, boolean cascade, - EnvironmentContext environmentContext) + EnvironmentContext environmentContext) + throws HiveException { + alterTable(dbName, tblName, newTbl, cascade, environmentContext, true); + } + public void alterTable(String dbName, String tblName, Table newTbl, boolean cascade, + EnvironmentContext environmentContext, boolean transactional) throws HiveException { try { @@ -679,6 +638,12 @@ public void alterTable(String dbName, String tblName, Table newTbl, boolean casc if (cascade) { environmentContext.putToProperties(StatsSetupConst.CASCADE, StatsSetupConst.TRUE); } + + // Take a table snapshot and set it to newTbl. + if (transactional) { + setTableSnapshotForTransactionalTable(conf, newTbl); + } + getMSC().alter_table_with_environmentContext(dbName, tblName, newTbl.getTTable(), environmentContext); } catch (MetaException e) { throw new HiveException("Unable to alter table. " + e.getMessage(), e); @@ -728,6 +693,29 @@ public void alterPartition(String tblName, Partition newPart, EnvironmentContext */ public void alterPartition(String dbName, String tblName, Partition newPart, EnvironmentContext environmentContext) throws InvalidOperationException, HiveException { + alterPartition(dbName, tblName, newPart, environmentContext, true); + } + + /** + * Updates the existing partition metadata with the new metadata. + * + * @param dbName + * name of the exiting table's database + * @param tblName + * name of the existing table + * @param newPart + * new partition + * @param environmentContext + * environment context for the method + * @param transactional + * indicates this call is for transaction stats + * @throws InvalidOperationException + * if the changes in metadata is not acceptable + * @throws TException + */ + public void alterPartition(String dbName, String tblName, Partition newPart, + EnvironmentContext environmentContext, boolean transactional) + throws InvalidOperationException, HiveException { try { validatePartition(newPart); String location = newPart.getLocation(); @@ -735,6 +723,9 @@ public void alterPartition(String dbName, String tblName, Partition newPart, Env location = Utilities.getQualifiedPath(conf, new Path(location)); newPart.setLocation(location); } + if (transactional) { + setTableSnapshotForTransactionalPartition(conf, newPart); + } getSynchronizedMSC().alter_partition(dbName, tblName, newPart.getTPartition(), environmentContext); } catch (MetaException e) { @@ -752,6 +743,10 @@ private void validatePartition(Partition newPart) throws HiveException { newPart.checkValidity(); } + public void alterPartitions(String tblName, List newParts, EnvironmentContext environmentContext) + throws InvalidOperationException, HiveException { + alterPartitions(tblName, newParts, environmentContext, false); + } /** * Updates the existing table metadata with the new metadata. * @@ -759,16 +754,23 @@ private void validatePartition(Partition newPart) throws HiveException { * name of the existing table * @param newParts * new partitions + * @param transactional + * Need to generate and save a table snapshot into the metastore? * @throws InvalidOperationException * if the changes in metadata is not acceptable * @throws TException */ - public void alterPartitions(String tblName, List newParts, EnvironmentContext environmentContext) + public void alterPartitions(String tblName, List newParts, + EnvironmentContext environmentContext, boolean transactional) throws InvalidOperationException, HiveException { String[] names = Utilities.getDbTableName(tblName); List newTParts = new ArrayList(); try { + AcidUtils.TableSnapshot tableSnapshot = null; + if (transactional) { + tableSnapshot = AcidUtils.getTableSnapshot(conf, newParts.get(0).getTable(), true); + } // Remove the DDL time so that it gets refreshed for (Partition tmpPart: newParts) { if (tmpPart.getParameters() != null) { @@ -781,7 +783,9 @@ public void alterPartitions(String tblName, List newParts, Environmen } newTParts.add(tmpPart.getTPartition()); } - getMSC().alter_partitions(names[0], names[1], newTParts, environmentContext); + getMSC().alter_partitions(names[0], names[1], newTParts, environmentContext, + tableSnapshot != null ? tableSnapshot.getTxnId() : -1, + tableSnapshot != null ? tableSnapshot.getValidWriteIdList() : null); } catch (MetaException e) { throw new HiveException("Unable to alter partition. " + e.getMessage(), e); } catch (TException e) { @@ -912,6 +916,8 @@ public void createTable(Table tbl, boolean ifNotExists, tTbl.setPrivileges(principalPrivs); } } + // Set table snapshot to api.Table to make it persistent. + setTableSnapshotForTransactionalTable(conf, tbl); if (primaryKeys == null && foreignKeys == null && uniqueConstraints == null && notNullConstraints == null && defaultConstraints == null && checkConstraints == null) { @@ -1114,7 +1120,27 @@ public Table getTable(final String dbName, final String tableName) throws HiveEx * @throws HiveException */ public Table getTable(final String dbName, final String tableName, - boolean throwException) throws HiveException { + boolean throwException) throws HiveException { + return this.getTable(dbName, tableName, throwException, false); + } + + /** + * Returns metadata of the table + * + * @param dbName + * the name of the database + * @param tableName + * the name of the table + * @param throwException + * controls whether an exception is thrown or a returns a null + * @param checkTransactional + * checks whether the metadata table stats are valid (or + * compilant with the snapshot isolation of) for the current transaction. + * @return the table or if throwException is false a null value. + * @throws HiveException + */ + public Table getTable(final String dbName, final String tableName, + boolean throwException, boolean checkTransactional) throws HiveException { if (tableName == null || tableName.equals("")) { throw new HiveException("empty table creation??"); @@ -1123,7 +1149,19 @@ public Table getTable(final String dbName, final String tableName, // Get the table from metastore org.apache.hadoop.hive.metastore.api.Table tTable = null; try { - tTable = getMSC().getTable(dbName, tableName); + if (checkTransactional) { + ValidWriteIdList validWriteIdList = null; + long txnId = SessionState.get().getTxnMgr() != null ? + SessionState.get().getTxnMgr().getCurrentTxnId() : 0; + if (txnId > 0) { + validWriteIdList = AcidUtils.getTableValidWriteIdListWithTxnList(conf, + dbName, tableName); + } + tTable = getMSC().getTable(dbName, tableName, txnId, + validWriteIdList != null ? validWriteIdList.toString() : null); + } else { + tTable = getMSC().getTable(dbName, tableName); + } } catch (NoSuchObjectException e) { if (throwException) { LOG.error("Table " + dbName + "." + tableName + " not found: " + e.getMessage()); @@ -1775,6 +1813,7 @@ public Partition loadPartition(Path loadPath, Table tbl, Map par Partition newTPart = oldPart != null ? oldPart : new Partition(tbl, partSpec, newPartPath); alterPartitionSpecInMemory(tbl, partSpec, newTPart.getTPartition(), inheritTableSpecs, newPartPath.toString()); validatePartition(newTPart); + setTableSnapshotForTransactionalPartition(conf, newTPart); // If config is set, table is not temporary and partition being inserted exists, capture // the list of files added. For not yet existing partitions (insert overwrite to new partition @@ -2410,10 +2449,16 @@ public void loadTable(Path loadPath, String tableName, LoadFileType loadFileType * @throws HiveException * if table doesn't exist or partition already exists */ + @VisibleForTesting public Partition createPartition(Table tbl, Map partSpec) throws HiveException { try { - return new Partition(tbl, getMSC().add_partition( - Partition.createMetaPartitionObject(tbl, partSpec, null))); + org.apache.hadoop.hive.metastore.api.Partition part = + Partition.createMetaPartitionObject(tbl, partSpec, null); + AcidUtils.TableSnapshot tableSnapshot = + AcidUtils.getTableSnapshot(conf, tbl, false); + part.setTxnId(tableSnapshot != null ? tableSnapshot.getTxnId() : 0); + part.setValidWriteIdList(tableSnapshot != null ? tableSnapshot.getValidWriteIdList() : null); + return new Partition(tbl, getMSC().add_partition(part)); } catch (Exception e) { LOG.error(StringUtils.stringifyException(e)); throw new HiveException(e); @@ -2425,8 +2470,16 @@ public Partition createPartition(Table tbl, Map partSpec) throws int size = addPartitionDesc.getPartitionCount(); List in = new ArrayList(size); + AcidUtils.TableSnapshot tableSnapshot = + AcidUtils.getTableSnapshot(conf, tbl); for (int i = 0; i < size; ++i) { - in.add(convertAddSpecToMetaPartition(tbl, addPartitionDesc.getPartition(i), conf)); + org.apache.hadoop.hive.metastore.api.Partition tmpPart = + convertAddSpecToMetaPartition(tbl, addPartitionDesc.getPartition(i), conf); + if (tmpPart != null && tableSnapshot != null && tableSnapshot.getTxnId() > 0) { + tmpPart.setTxnId(tableSnapshot.getTxnId()); + tmpPart.setValidWriteIdList(tableSnapshot.getValidWriteIdList()); + } + in.add(tmpPart); } List out = new ArrayList(); try { @@ -2462,7 +2515,8 @@ public Partition createPartition(Table tbl, Map partSpec) throws : getMSC().add_partitions(partsToAdd, addPartitionDesc.isIfNotExists(), true)) { out.add(new Partition(tbl, outPart)); } - getMSC().alter_partitions(addPartitionDesc.getDbName(), addPartitionDesc.getTableName(), partsToAlter, null); + getMSC().alter_partitions(addPartitionDesc.getDbName(), addPartitionDesc.getTableName(), + partsToAlter, new EnvironmentContext()); for ( org.apache.hadoop.hive.metastore.api.Partition outPart : getMSC().getPartitionsByNames(addPartitionDesc.getDbName(), addPartitionDesc.getTableName(),part_names)){ @@ -2621,7 +2675,8 @@ private void alterPartitionSpec(Table tbl, if (!org.apache.commons.lang.StringUtils.isEmpty(tbl.getDbName())) { fullName = tbl.getFullyQualifiedName(); } - alterPartition(fullName, new Partition(tbl, tpart), null); + Partition newPart = new Partition(tbl, tpart); + alterPartition(fullName, newPart, null); } private void alterPartitionSpecInMemory(Table tbl, @@ -4387,8 +4442,16 @@ private static String getUserName() { } } - public boolean setPartitionColumnStatistics(SetPartitionsStatsRequest request) throws HiveException { + public boolean setPartitionColumnStatistics( + SetPartitionsStatsRequest request) throws HiveException { try { + ColumnStatistics colStat = request.getColStats().get(0); + ColumnStatisticsDesc statsDesc = colStat.getStatsDesc(); + Table tbl = getTable(statsDesc.getDbName(), statsDesc.getTableName()); + + AcidUtils.TableSnapshot tableSnapshot = AcidUtils.getTableSnapshot(conf, tbl); + request.setTxnId(tableSnapshot != null ? tableSnapshot.getTxnId() : 0); + request.setValidWriteIdList(tableSnapshot != null ? tableSnapshot.getValidWriteIdList() : null); return getMSC().setPartitionColumnStatistics(request); } catch (Exception e) { LOG.debug(StringUtils.stringifyException(e)); @@ -4398,8 +4461,27 @@ public boolean setPartitionColumnStatistics(SetPartitionsStatsRequest request) t public List getTableColumnStatistics( String dbName, String tableName, List colNames) throws HiveException { + return getTableColumnStatistics(dbName, tableName, colNames, false); + } + + public List getTableColumnStatistics( + String dbName, String tableName, List colNames, boolean checkTransactional) + throws HiveException { + + List retv = null; try { - return getMSC().getTableColumnStatistics(dbName, tableName, colNames); + if (checkTransactional) { + Table tbl = getTable(dbName, tableName); + AcidUtils.TableSnapshot tableSnapshot = AcidUtils.getTableSnapshot(conf, tbl); + if (tableSnapshot.getTxnId() > 0) { + retv = getMSC().getTableColumnStatistics(dbName, tableName, colNames, + tableSnapshot != null ? tableSnapshot.getTxnId() : -1, + tableSnapshot != null ? tableSnapshot.getValidWriteIdList() : null); + } + } else { + retv = getMSC().getTableColumnStatistics(dbName, tableName, colNames); + } + return retv; } catch (Exception e) { LOG.debug(StringUtils.stringifyException(e)); throw new HiveException(e); @@ -4408,8 +4490,25 @@ public boolean setPartitionColumnStatistics(SetPartitionsStatsRequest request) t public Map> getPartitionColumnStatistics(String dbName, String tableName, List partNames, List colNames) throws HiveException { - try { - return getMSC().getPartitionColumnStatistics(dbName, tableName, partNames, colNames); + return getPartitionColumnStatistics(dbName, tableName, partNames, colNames, false); + } + + public Map> getPartitionColumnStatistics( + String dbName, String tableName, List partNames, List colNames, + boolean checkTransactional) + throws HiveException { + long txnId = -1; + String writeIdList = null; + try { + if (checkTransactional) { + Table tbl = getTable(dbName, tableName); + AcidUtils.TableSnapshot tableSnapshot = AcidUtils.getTableSnapshot(conf, tbl); + txnId = tableSnapshot != null ? tableSnapshot.getTxnId() : -1; + writeIdList = tableSnapshot != null ? tableSnapshot.getValidWriteIdList() : null; + } + + return getMSC().getPartitionColumnStatistics(dbName, tableName, partNames, colNames, + txnId, writeIdList); } catch (Exception e) { LOG.debug(StringUtils.stringifyException(e)); throw new HiveException(e); @@ -4418,8 +4517,22 @@ public boolean setPartitionColumnStatistics(SetPartitionsStatsRequest request) t public AggrStats getAggrColStatsFor(String dbName, String tblName, List colNames, List partName) { - try { - return getMSC().getAggrColStatsFor(dbName, tblName, colNames, partName); + return getAggrColStatsFor(dbName, tblName, colNames, partName, false); + } + + public AggrStats getAggrColStatsFor(String dbName, String tblName, + List colNames, List partName, boolean checkTransactional) { + long txnId = -1; + String writeIdList = null; + try { + if (checkTransactional) { + Table tbl = getTable(dbName, tblName); + AcidUtils.TableSnapshot tableSnapshot = AcidUtils.getTableSnapshot(conf, tbl); + txnId = tableSnapshot != null ? tableSnapshot.getTxnId() : -1; + writeIdList = tableSnapshot != null ? tableSnapshot.getValidWriteIdList() : null; + } + return getMSC().getAggrColStatsFor(dbName, tblName, colNames, partName, + txnId, writeIdList); } catch (Exception e) { LOG.debug(StringUtils.stringifyException(e)); return new AggrStats(new ArrayList(),0); @@ -5217,4 +5330,26 @@ public StorageHandlerInfo getStorageHandlerInfo(Table table) throw new HiveException(e); } } + + private void setTableSnapshotForTransactionalTable( + HiveConf conf, Table newTbl) + throws LockException { + + org.apache.hadoop.hive.metastore.api.Table newTTbl = newTbl.getTTable(); + AcidUtils.TableSnapshot tableSnapshot = AcidUtils.getTableSnapshot(conf, newTbl); + + newTTbl.setTxnId(tableSnapshot != null ? tableSnapshot.getTxnId() : -1); + newTTbl.setValidWriteIdList( + tableSnapshot != null ? tableSnapshot.getValidWriteIdList() : null); + } + + private void setTableSnapshotForTransactionalPartition(HiveConf conf, Partition partition) + throws LockException { + + AcidUtils.TableSnapshot tableSnapshot = AcidUtils.getTableSnapshot(conf, partition.getTable()); + org.apache.hadoop.hive.metastore.api.Partition tpartition = partition.getTPartition(); + tpartition.setTxnId(tableSnapshot != null ? tableSnapshot.getTxnId() : -1); + tpartition.setValidWriteIdList( + tableSnapshot != null ? tableSnapshot.getValidWriteIdList() : null); + } } diff --git ql/src/java/org/apache/hadoop/hive/ql/optimizer/StatsOptimizer.java ql/src/java/org/apache/hadoop/hive/ql/optimizer/StatsOptimizer.java index 857f3004d7..18a27c4172 100644 --- ql/src/java/org/apache/hadoop/hive/ql/optimizer/StatsOptimizer.java +++ ql/src/java/org/apache/hadoop/hive/ql/optimizer/StatsOptimizer.java @@ -46,6 +46,7 @@ import org.apache.hadoop.hive.ql.lib.NodeProcessorCtx; import org.apache.hadoop.hive.ql.lib.Rule; import org.apache.hadoop.hive.ql.lib.RuleRegExp; +import org.apache.hadoop.hive.ql.lockmgr.LockException; import org.apache.hadoop.hive.ql.metadata.Hive; import org.apache.hadoop.hive.ql.metadata.HiveException; import org.apache.hadoop.hive.ql.metadata.Partition; @@ -282,7 +283,17 @@ public Object process(Node nd, Stack stack, NodeProcessorCtx procCtx, // limit. In order to be safe, we do not use it now. return null; } + + Hive hive = Hive.get(pctx.getConf()); Table tbl = tsOp.getConf().getTableMetadata(); + boolean isTransactionalTable = AcidUtils.isTransactionalTable(tbl); + + // If the table is transactional, get stats state by calling getTable() with + // transactional flag on to check the validity of table stats. + if (isTransactionalTable) { + tbl = hive.getTable(tbl.getDbName(), tbl.getTableName(), true, true); + } + if (MetaStoreUtils.isExternalTable(tbl.getTTable())) { Logger.info("Table " + tbl.getTableName() + " is external. Skip StatsOptimizer."); return null; @@ -291,11 +302,7 @@ public Object process(Node nd, Stack stack, NodeProcessorCtx procCtx, Logger.info("Table " + tbl.getTableName() + " is non Native table. Skip StatsOptimizer."); return null; } - if (AcidUtils.isTransactionalTable(tbl)) { - //todo: should this be OK for MM table? - Logger.info("Table " + tbl.getTableName() + " is ACID table. Skip StatsOptimizer."); - return null; - } + Long rowCnt = getRowCnt(pctx, tsOp, tbl); // if we can not have correct table stats, then both the table stats and column stats are not useful. if (rowCnt == null) { @@ -375,7 +382,8 @@ else if (getGbyKeyType(cgbyOp) == GbyKeyType.CONSTANT && rowCnt == 0) { List oneRow = new ArrayList(); - Hive hive = Hive.get(pctx.getConf()); + AcidUtils.TableSnapshot tableSnapshot = + AcidUtils.getTableSnapshot(pctx.getConf(), tbl); for (AggregationDesc aggr : pgbyOp.getConf().getAggregators()) { if (aggr.getDistinct()) { @@ -462,8 +470,13 @@ else if (udaf instanceof GenericUDAFCount) { + " are not up to date."); return null; } - List stats = hive.getMSC().getTableColumnStatistics( - tbl.getDbName(), tbl.getTableName(), Lists.newArrayList(colName)); + + List stats = + hive.getMSC().getTableColumnStatistics( + tbl.getDbName(), tbl.getTableName(), + Lists.newArrayList(colName), + tableSnapshot != null ? tableSnapshot.getTxnId() : -1, + tableSnapshot != null ? tableSnapshot.getValidWriteIdList() : null); if (stats.isEmpty()) { Logger.debug("No stats for " + tbl.getTableName() + " column " + colName); return null; @@ -523,8 +536,13 @@ else if (udaf instanceof GenericUDAFCount) { + " are not up to date."); return null; } - List stats = hive.getMSC().getTableColumnStatistics( - tbl.getDbName(),tbl.getTableName(), Lists.newArrayList(colName)); + + List stats = + hive.getMSC().getTableColumnStatistics( + tbl.getDbName(), tbl.getTableName(), + Lists.newArrayList(colName), + tableSnapshot != null ? tableSnapshot.getTxnId() : -1, + tableSnapshot != null ? tableSnapshot.getValidWriteIdList() : null); if (stats.isEmpty()) { Logger.debug("No stats for " + tbl.getTableName() + " column " + colName); return null; @@ -664,9 +682,12 @@ else if (udaf instanceof GenericUDAFCount) { + " are not up to date."); return null; } - ColumnStatisticsData statData = hive.getMSC().getTableColumnStatistics( - tbl.getDbName(), tbl.getTableName(), Lists.newArrayList(colName)) - .get(0).getStatsData(); + ColumnStatisticsData statData = + hive.getMSC().getTableColumnStatistics( + tbl.getDbName(), tbl.getTableName(), Lists.newArrayList(colName), + tableSnapshot != null ? tableSnapshot.getTxnId() : -1, + tableSnapshot != null ? tableSnapshot.getValidWriteIdList() : null) + .get(0).getStatsData(); String name = colDesc.getTypeString().toUpperCase(); switch (type) { case Integer: { @@ -887,7 +908,7 @@ private ColumnStatisticsData validateSingleColStat(List sta } private Collection> verifyAndGetPartColumnStats( - Hive hive, Table tbl, String colName, Set parts) throws TException { + Hive hive, Table tbl, String colName, Set parts) throws TException, LockException { List partNames = new ArrayList(parts.size()); for (Partition part : parts) { if (!StatsUtils.areColumnStatsUptoDateForQueryAnswering(part.getTable(), part.getParameters(), colName)) { @@ -897,8 +918,13 @@ private ColumnStatisticsData validateSingleColStat(List sta } partNames.add(part.getName()); } + AcidUtils.TableSnapshot tableSnapshot = + AcidUtils.getTableSnapshot(hive.getConf(), tbl); + Map> result = hive.getMSC().getPartitionColumnStatistics( - tbl.getDbName(), tbl.getTableName(), partNames, Lists.newArrayList(colName)); + tbl.getDbName(), tbl.getTableName(), partNames, Lists.newArrayList(colName), + tableSnapshot != null ? tableSnapshot.getTxnId() : -1, + tableSnapshot != null ? tableSnapshot.getValidWriteIdList() : null); if (result.size() != parts.size()) { Logger.debug("Received " + result.size() + " stats for " + parts.size() + " partitions"); return null; diff --git ql/src/java/org/apache/hadoop/hive/ql/stats/BasicStatsNoJobTask.java ql/src/java/org/apache/hadoop/hive/ql/stats/BasicStatsNoJobTask.java index d4d46a3671..9a271a2431 100644 --- ql/src/java/org/apache/hadoop/hive/ql/stats/BasicStatsNoJobTask.java +++ ql/src/java/org/apache/hadoop/hive/ql/stats/BasicStatsNoJobTask.java @@ -344,12 +344,12 @@ private int updatePartitions(Hive db, List scs, Table table } if (values.get(0).result instanceof Table) { - db.alterTable(tableFullName, (Table) values.get(0).result, environmentContext); + db.alterTable(tableFullName, (Table) values.get(0).result, environmentContext, true); LOG.debug("Updated stats for {}.", tableFullName); } else { if (values.get(0).result instanceof Partition) { List results = Lists.transform(values, FooterStatCollector.EXTRACT_RESULT_FUNCTION); - db.alterPartitions(tableFullName, results, environmentContext); + db.alterPartitions(tableFullName, results, environmentContext, true); LOG.debug("Bulk updated {} partitions of {}.", results.size(), tableFullName); } else { throw new RuntimeException("inconsistent"); diff --git ql/src/java/org/apache/hadoop/hive/ql/stats/BasicStatsTask.java ql/src/java/org/apache/hadoop/hive/ql/stats/BasicStatsTask.java index 8c23887176..0a2992d3dc 100644 --- ql/src/java/org/apache/hadoop/hive/ql/stats/BasicStatsTask.java +++ ql/src/java/org/apache/hadoop/hive/ql/stats/BasicStatsTask.java @@ -127,10 +127,7 @@ public BasicStatsProcessor(Partish partish, BasicStatsWork work, HiveConf conf, public Object process(StatsAggregator statsAggregator) throws HiveException, MetaException { Partish p = partish; Map parameters = p.getPartParameters(); - if (p.isTransactionalTable()) { - // TODO: this should also happen on any error. Right now this task will just fail. - StatsSetupConst.setBasicStatsState(parameters, StatsSetupConst.FALSE); - } else if (work.isTargetRewritten()) { + if (work.isTargetRewritten()) { StatsSetupConst.setBasicStatsState(parameters, StatsSetupConst.TRUE); } @@ -208,12 +205,6 @@ private String getAggregationPrefix0(Table table, Partition partition) throws Me private void updateStats(StatsAggregator statsAggregator, Map parameters, String aggKey, boolean isFullAcid) throws HiveException { for (String statType : StatsSetupConst.statsRequireCompute) { - if (isFullAcid && !work.isTargetRewritten()) { - // Don't bother with aggregation in this case, it will probably be invalid. - parameters.remove(statType); - continue; - } - String value = statsAggregator.aggregateStats(aggKey, statType); if (value != null && !value.isEmpty()) { long longValue = Long.parseLong(value); @@ -272,7 +263,7 @@ private int aggregateStats(Hive db) { if (res == null) { return 0; } - db.alterTable(tableFullName, res, environmentContext); + db.alterTable(tableFullName, res, environmentContext, true); if (conf.getBoolVar(ConfVars.TEZ_EXEC_SUMMARY)) { console.printInfo("Table " + tableFullName + " stats: [" + toString(p.getPartParameters()) + ']'); @@ -340,7 +331,7 @@ public Void call() throws Exception { } if (!updates.isEmpty()) { - db.alterPartitions(tableFullName, updates, environmentContext); + db.alterPartitions(tableFullName, updates, environmentContext, true); } if (work.isStatsReliable() && updates.size() != processors.size()) { LOG.info("Stats should be reliadble...however seems like there were some issue.. => ret 1"); diff --git ql/src/java/org/apache/hadoop/hive/ql/stats/ColStatsProcessor.java ql/src/java/org/apache/hadoop/hive/ql/stats/ColStatsProcessor.java index d4cfd0ad62..acebf520d1 100644 --- ql/src/java/org/apache/hadoop/hive/ql/stats/ColStatsProcessor.java +++ ql/src/java/org/apache/hadoop/hive/ql/stats/ColStatsProcessor.java @@ -34,12 +34,14 @@ import org.apache.hadoop.hive.metastore.api.SetPartitionsStatsRequest; import org.apache.hadoop.hive.ql.CompilationOpContext; import org.apache.hadoop.hive.ql.exec.FetchOperator; +import org.apache.hadoop.hive.ql.io.AcidUtils; import org.apache.hadoop.hive.ql.metadata.Hive; import org.apache.hadoop.hive.ql.metadata.HiveException; import org.apache.hadoop.hive.ql.metadata.Partition; import org.apache.hadoop.hive.ql.metadata.Table; import org.apache.hadoop.hive.ql.plan.ColumnStatsDesc; import org.apache.hadoop.hive.ql.plan.FetchWork; +import org.apache.hadoop.hive.ql.session.SessionState; import org.apache.hadoop.hive.serde2.objectinspector.InspectableObject; import org.apache.hadoop.hive.serde2.objectinspector.ObjectInspector; import org.apache.hadoop.hive.serde2.objectinspector.PrimitiveObjectInspector; @@ -176,6 +178,11 @@ public int persistColumnStats(Hive db, Table tbl) throws HiveException, MetaExce } SetPartitionsStatsRequest request = new SetPartitionsStatsRequest(colStats); request.setNeedMerge(colStatDesc.isNeedMerge()); + if (AcidUtils.isTransactionalTable(tbl) && SessionState.get().getTxnMgr() != null) { + request.setTxnId(SessionState.get().getTxnMgr().getCurrentTxnId()); + request.setValidWriteIdList(AcidUtils.getTableValidWriteIdList(conf, + AcidUtils.getFullTableName(tbl.getDbName(), tbl.getTableName())).toString()); + } db.setPartitionColumnStatistics(request); return 0; } diff --git ql/src/test/org/apache/hadoop/hive/ql/metadata/TestHive.java ql/src/test/org/apache/hadoop/hive/ql/metadata/TestHive.java index a24b6423ba..930282d73e 100755 --- ql/src/test/org/apache/hadoop/hive/ql/metadata/TestHive.java +++ ql/src/test/org/apache/hadoop/hive/ql/metadata/TestHive.java @@ -324,6 +324,13 @@ private void validateTable(Table tbl, String tableName) throws MetaException { tbl.setCreateTime(ft.getTTable().getCreateTime()); tbl.getParameters().put(hive_metastoreConstants.DDL_TIME, ft.getParameters().get(hive_metastoreConstants.DDL_TIME)); + // Txn stuff set by metastore + if (tbl.getTTable().isSetTxnId()) { + ft.getTTable().setTxnId(tbl.getTTable().getTxnId()); + } + if (tbl.getTTable().isSetValidWriteIdList()) { + ft.getTTable().setValidWriteIdList(tbl.getTTable().getValidWriteIdList()); + } assertTrue("Tables doesn't match: " + tableName + " (" + ft.getTTable() + "; " + tbl.getTTable() + ")", ft.getTTable().equals(tbl.getTTable())); assertEquals("SerializationLib is not set correctly", tbl diff --git ql/src/test/org/apache/hadoop/hive/ql/parse/TestUpdateDeleteSemanticAnalyzer.java ql/src/test/org/apache/hadoop/hive/ql/parse/TestUpdateDeleteSemanticAnalyzer.java index ffd0445db0..257a6adf8e 100644 --- ql/src/test/org/apache/hadoop/hive/ql/parse/TestUpdateDeleteSemanticAnalyzer.java +++ ql/src/test/org/apache/hadoop/hive/ql/parse/TestUpdateDeleteSemanticAnalyzer.java @@ -231,6 +231,7 @@ public void setup() { conf.setVar(HiveConf.ConfVars.DYNAMICPARTITIONINGMODE, "nonstrict"); conf.setVar(HiveConf.ConfVars.HIVEMAPREDMODE, "nonstrict"); conf.setVar(HiveConf.ConfVars.HIVE_TXN_MANAGER, "org.apache.hadoop.hive.ql.lockmgr.DbTxnManager"); + conf.setBoolVar(HiveConf.ConfVars.HIVE_IN_TEST, true); } public void cleanupTables() throws HiveException { diff --git ql/src/test/queries/clientpositive/acid_stats2.q ql/src/test/queries/clientpositive/acid_stats2.q new file mode 100644 index 0000000000..cf96731985 --- /dev/null +++ ql/src/test/queries/clientpositive/acid_stats2.q @@ -0,0 +1,42 @@ +set hive.stats.dbclass=fs; +set hive.stats.fetch.column.stats=true; +set datanucleus.cache.collections=false; + +set hive.merge.mapfiles=false; +set hive.merge.mapredfiles=false; + +set hive.stats.autogather=true; +set hive.stats.column.autogather=true; +set hive.compute.query.using.stats=true; +set hive.mapred.mode=nonstrict; +set hive.explain.user=false; + +set hive.fetch.task.conversion=none; +set hive.support.concurrency=true; +set hive.txn.manager=org.apache.hadoop.hive.ql.lockmgr.DbTxnManager; +set hive.query.results.cache.enabled=false; + +create table stats3(key int,value string) stored as orc tblproperties ("transactional"="true"); +insert into table stats3 values (1, "foo"); +explain select count(*) from stats3; +select count(*) from stats3; +insert into table stats3 values (2, "bar"); +explain select count(*) from stats3; +select count(*) from stats3; +update stats3 set value = "baz" where key = 4; +explain select count(*) from stats3; +select count(*) from stats3; +update stats3 set value = "baz" where key = 1; +explain select count(*) from stats3; +select count(*) from stats3; +delete from stats3 where key = 3; +explain select count(*) from stats3; +select count(*) from stats3; +delete from stats3 where key = 1; +explain select count(*) from stats3; +select count(*) from stats3; +delete from stats3 where key = 2; +explain select count(*) from stats3; +select count(*) from stats3; + +drop table stats3; \ No newline at end of file diff --git ql/src/test/queries/clientpositive/stats_nonpart.q ql/src/test/queries/clientpositive/stats_nonpart.q new file mode 100644 index 0000000000..f6019cc497 --- /dev/null +++ ql/src/test/queries/clientpositive/stats_nonpart.q @@ -0,0 +1,53 @@ +set hive.stats.dbclass=fs; +set hive.stats.fetch.column.stats=true; +set datanucleus.cache.collections=false; + +set hive.merge.mapfiles=false; +set hive.merge.mapredfiles=false; + +set hive.stats.autogather=true; +set hive.stats.column.autogather=true; +set hive.compute.query.using.stats=true; +set hive.mapred.mode=nonstrict; +set hive.explain.user=false; + +set hive.support.concurrency=true; +set hive.txn.manager=org.apache.hadoop.hive.ql.lockmgr.DbTxnManager; +set hive.query.results.cache.enabled=false; + +-- create source. +drop table if exists mysource; +create table mysource (p int,key int); +insert into mysource values (100,20), (101,40), (102,50); +insert into mysource values (100,30), (101,50), (102,60); + +-- test nonpartitioned table +drop table if exists stats_nonpartitioned; + +--create table stats_nonpartitioned(key int, value int) stored as orc; +create table stats_nonpartitioned(key int, value int) stored as orc tblproperties ("transactional"="true"); +--create table stats_nonpartitioned(key int, value int) stored as orc tblproperties tblproperties ("transactional"="true", "transactional_properties"="insert_only"); + + +explain select count(*) from stats_nonpartitioned; +select count(*) from stats_nonpartitioned; +desc formatted stats_nonpartitioned; + +explain insert into table stats_nonpartitioned select * from mysource where p == 100; +insert into table stats_nonpartitioned select * from mysource where p == 100; + +desc formatted stats_nonpartitioned; + +explain select count(*) from stats_nonpartitioned; +select count(*) from stats_nonpartitioned; +explain select count(key) from stats_nonpartitioned; +select count(key) from stats_nonpartitioned; + +--analyze table stats_nonpartitioned compute statistics; +analyze table stats_nonpartitioned compute statistics for columns key, value; + +explain select count(*) from stats_nonpartitioned; +select count(*) from stats_nonpartitioned; +explain select count(key) from stats_nonpartitioned; +select count(key) from stats_nonpartitioned; + diff --git ql/src/test/queries/clientpositive/stats_part.q ql/src/test/queries/clientpositive/stats_part.q new file mode 100644 index 0000000000..d0812e1007 --- /dev/null +++ ql/src/test/queries/clientpositive/stats_part.q @@ -0,0 +1,98 @@ +set hive.stats.dbclass=fs; +set hive.stats.fetch.column.stats=true; +set datanucleus.cache.collections=false; + +set hive.merge.mapfiles=false; +set hive.merge.mapredfiles=false; + +set hive.stats.autogather=true; +set hive.stats.column.autogather=true; +set hive.compute.query.using.stats=true; +set hive.mapred.mode=nonstrict; +set hive.explain.user=false; + +set hive.support.concurrency=true; +set hive.txn.manager=org.apache.hadoop.hive.ql.lockmgr.DbTxnManager; +set hive.query.results.cache.enabled=false; + +-- create source. +drop table if exists mysource; +create table mysource (p int, key int, value int); +insert into mysource values (100,20,201), (101,40,401), (102,50,501); +insert into mysource values (100,21,211), (101,41,411), (102,51,511); + +--explain select count(*) from mysource; +--select count(*) from mysource; + +-- Gather col stats manually +--analyze table mysource compute statistics for columns p, key; + +--explain select count(*) from mysource; +--select count(*) from mysource; +--explain select count(key) from mysource; +--select count(key) from mysource; + +-- test partitioned table +drop table if exists stats_partitioned; + +--create table stats_part(key int,value string) partitioned by (p int) stored as orc; +create table stats_part(key int,value string) partitioned by (p int) stored as orc tblproperties ("transactional"="true"); +--create table stats_part(key int,value string) partitioned by (p int) stored as orc tblproperties ("transactional"="true", "transactional_properties"="insert_only"); + +explain select count(key) from stats_part; +--select count(*) from stats_part; +--explain select count(*) from stats_part where p = 100; +--select count(*) from stats_part where p = 100; +explain select count(key) from stats_part where p > 100; +--select count(*) from stats_part where p > 100; +desc formatted stats_part; + +--explain insert into table stats_part partition(p=100) select distinct key, value from mysource where p == 100; +insert into table stats_part partition(p=100) select distinct key, value from mysource where p == 100; +insert into table stats_part partition(p=101) select distinct key, value from mysource where p == 101; +insert into table stats_part partition(p=102) select distinct key, value from mysource where p == 102; + +desc formatted stats_part; + +insert into table mysource values (103,20,200), (103,83,832), (103,53,530); +insert into table stats_part partition(p=102) select distinct key, value from mysource where p == 102; + +desc formatted stats_part; +show partitions stats_part; + +explain select count(*) from stats_part; +select count(*) from stats_part; +explain select count(key) from stats_part; +select count(key) from stats_part; +explain select count(key) from stats_part where p > 100; +select count(key) from stats_part where p > 100; +explain select max(key) from stats_part where p > 100; +select max(key) from stats_part where p > 100; + +--update stats_part set key = key + 100 where key in(-50,40) and p > 100; +desc formatted stats_part; +explain select max(key) from stats_part where p > 100; +select max(key) from stats_part where p > 100; + +select count(value) from stats_part; +--update stats_part set value = concat(value, 'updated') where cast(key as integer) in(40,53) and p > 100; +select count(value) from stats_part; + +--delete from stats_part where key in (20, 41); +desc formatted stats_part; + +explain select count(*) from stats_part where p = 100; +select count(*) from stats_part where p = 100; +explain select count(*) from stats_part where p > 100; +select count(*) from stats_part where p > 100; +explain select count(key) from stats_part; +select count(key) from stats_part; +explain select count(*) from stats_part where p > 100; +select count(*) from stats_part where p > 100; +explain select max(key) from stats_part where p > 100; +select max(key) from stats_part where p > 100; + +describe extended stats_part partition (p=101); +describe extended stats_part; + + diff --git ql/src/test/queries/clientpositive/stats_part2.q ql/src/test/queries/clientpositive/stats_part2.q new file mode 100644 index 0000000000..24be2185d0 --- /dev/null +++ ql/src/test/queries/clientpositive/stats_part2.q @@ -0,0 +1,100 @@ +set hive.stats.dbclass=fs; +set hive.stats.fetch.column.stats=true; +set datanucleus.cache.collections=false; + +set hive.merge.mapfiles=false; +set hive.merge.mapredfiles=false; + +set hive.stats.autogather=true; +set hive.stats.column.autogather=true; +set hive.compute.query.using.stats=true; +set hive.mapred.mode=nonstrict; +set hive.explain.user=false; + +set hive.support.concurrency=true; +set hive.txn.manager=org.apache.hadoop.hive.ql.lockmgr.DbTxnManager; +set hive.query.results.cache.enabled=false; + +-- create source. +drop table if exists mysource; +create table mysource (p int, key int, value string); +insert into mysource values (100,20,'value20'), (101,40,'string40'), (102,50,'string50'); +insert into mysource values (100,21,'value21'), (101,41,'value41'), (102,51,'value51'); + +-- test partitioned table +drop table if exists stats_partitioned; + +--create table stats_part(key int,value string) partitioned by (p int) stored as orc; +create table stats_part(key int,value string) partitioned by (p int) stored as orc tblproperties ("transactional"="true"); +--create table stats_part(key int,value string) partitioned by (p int) stored as orc tblproperties ("transactional"="true", "transactional_properties"="insert_only"); + +--explain select count(*) from stats_part; +--select count(*) from stats_part; +--explain select count(*) from stats_part where p = 100; +--select count(*) from stats_part where p = 100; +explain select count(*) from stats_part where p > 100; +explain select max(key) from stats_part where p > 100; +--select count(*) from stats_part where p > 100; +desc formatted stats_part; + +--explain insert into table stats_part partition(p=100) select distinct key, value from mysource where p == 100; +insert into table stats_part partition(p=100) select distinct key, value from mysource where p == 100; +insert into table stats_part partition(p=101) select distinct key, value from mysource where p == 101; +insert into table stats_part partition(p=102) select distinct key, value from mysource where p == 102; + +desc formatted stats_part; +explain select count(key) from stats_part where p > 100; +explain select max(key) from stats_part where p > 100; + +insert into table mysource values (103,20,'value20'), (103,83,'value83'), (103,53,'value53'); +insert into table stats_part partition(p=102) select distinct key, value from mysource where p == 102; + +desc formatted stats_part; +show partitions stats_part; + +explain select count(*) from stats_part; +select count(*) from stats_part; +explain select count(key) from stats_part; +select count(key) from stats_part; +explain select count(key) from stats_part where p > 100; +select count(key) from stats_part where p > 100; +explain select max(key) from stats_part where p > 100; +select max(key) from stats_part where p > 100; + +desc formatted stats_part partition(p = 100); +desc formatted stats_part partition(p = 101); +desc formatted stats_part partition(p = 102); +update stats_part set key = key + 100 where key in(-50,40) and p > 100; +explain select max(key) from stats_part where p > 100; +select max(key) from stats_part where p > 100; +desc formatted stats_part partition(p = 100); +desc formatted stats_part partition(p = 101); +desc formatted stats_part partition(p = 102); + +select count(value) from stats_part; +update stats_part set value = concat(value, 'updated') where cast(key as integer) in(40,53) and p > 100; +desc formatted stats_part partition(p = 100); +desc formatted stats_part partition(p = 101); +desc formatted stats_part partition(p = 102); +select count(value) from stats_part; + +delete from stats_part where key in (20, 41); +desc formatted stats_part partition(p = 100); +desc formatted stats_part partition(p = 101); +desc formatted stats_part partition(p = 102); + +explain select count(*) from stats_part where p = 100; +select count(*) from stats_part where p = 100; +explain select count(*) from stats_part where p > 100; +select count(*) from stats_part where p > 100; +explain select count(key) from stats_part; +select count(key) from stats_part; +explain select count(*) from stats_part where p > 100; +select count(*) from stats_part where p > 100; +explain select max(key) from stats_part where p > 100; +select max(key) from stats_part where p > 100; + +describe extended stats_part partition (p=101); +describe extended stats_part; + + diff --git ql/src/test/queries/clientpositive/stats_sizebug.q ql/src/test/queries/clientpositive/stats_sizebug.q new file mode 100644 index 0000000000..7108766e34 --- /dev/null +++ ql/src/test/queries/clientpositive/stats_sizebug.q @@ -0,0 +1,37 @@ +set hive.stats.dbclass=fs; +set hive.stats.fetch.column.stats=true; +set datanucleus.cache.collections=false; + +set hive.merge.mapfiles=false; +set hive.merge.mapredfiles=false; + +set hive.stats.autogather=true; +set hive.stats.column.autogather=true; +set hive.compute.query.using.stats=true; +set hive.mapred.mode=nonstrict; +set hive.explain.user=false; + +set hive.support.concurrency=true; +set hive.txn.manager=org.apache.hadoop.hive.ql.lockmgr.DbTxnManager; +set hive.query.results.cache.enabled=false; + +-- create source. +drop table if exists mysource; +create table mysource (p int,key int); +insert into mysource values (100,20), (101,40), (102,50); +insert into mysource values (100,20), (101,40), (102,50); + +-- test nonpartitioned table +drop table if exists stats_nonpartitioned; + +--create table stats_nonpartitioned(key int, value int) stored as orc; +create table stats_nonpartitioned(key int, value int) stored as orc tblproperties ("transactional"="true"); +--create table stats_nonpartitioned(key int, value int) stored as orc tblproperties ("transactional"="true", "transactional_properties"="insert_only"); +explain insert into table stats_nonpartitioned select * from mysource where p == 100; +insert into table stats_nonpartitioned select * from mysource where p == 100; + +desc formatted stats_nonpartitioned; +analyze table mysource compute statistics for columns p, key; +desc formatted stats_nonpartitioned; + + diff --git ql/src/test/results/clientpositive/acid_nullscan.q.out ql/src/test/results/clientpositive/acid_nullscan.q.out index c9684dd54f..b7d7dd86b9 100644 --- ql/src/test/results/clientpositive/acid_nullscan.q.out +++ ql/src/test/results/clientpositive/acid_nullscan.q.out @@ -42,12 +42,12 @@ STAGE PLANS: Map Operator Tree: TableScan alias: acid_vectorized_n1 - Statistics: Num rows: 90 Data size: 25960 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 11 Data size: 25960 Basic stats: COMPLETE Column stats: NONE GatherStats: false Filter Operator isSamplingPred: false predicate: false (type: boolean) - Statistics: Num rows: 1 Data size: 288 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 1 Data size: 2360 Basic stats: COMPLETE Column stats: NONE Group By Operator aggregations: sum(a) mode: hash @@ -69,6 +69,7 @@ STAGE PLANS: input format: org.apache.hadoop.hive.ql.io.OneNullRowInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat properties: + COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"a":"true","b":"true"}} bucket_count 2 bucket_field_name a bucketing_version 2 @@ -79,6 +80,8 @@ STAGE PLANS: #### A masked pattern was here #### name default.acid_vectorized_n1 numFiles 3 + numRows 11 + rawDataSize 0 serialization.ddl struct acid_vectorized_n1 { i32 a, string b} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.NullStructSerDe @@ -91,6 +94,7 @@ STAGE PLANS: input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat output format: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat properties: + COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"a":"true","b":"true"}} bucket_count 2 bucket_field_name a bucketing_version 2 @@ -101,6 +105,8 @@ STAGE PLANS: #### A masked pattern was here #### name default.acid_vectorized_n1 numFiles 3 + numRows 11 + rawDataSize 0 serialization.ddl struct acid_vectorized_n1 { i32 a, string b} serialization.format 1 serialization.lib org.apache.hadoop.hive.ql.io.orc.OrcSerde diff --git ql/src/test/results/clientpositive/acid_stats2.q.out ql/src/test/results/clientpositive/acid_stats2.q.out new file mode 100644 index 0000000000..5fc0505462 --- /dev/null +++ ql/src/test/results/clientpositive/acid_stats2.q.out @@ -0,0 +1,237 @@ +PREHOOK: query: create table stats3(key int,value string) stored as orc tblproperties ("transactional"="true") +PREHOOK: type: CREATETABLE +PREHOOK: Output: database:default +PREHOOK: Output: default@stats3 +POSTHOOK: query: create table stats3(key int,value string) stored as orc tblproperties ("transactional"="true") +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: database:default +POSTHOOK: Output: default@stats3 +PREHOOK: query: insert into table stats3 values (1, "foo") +PREHOOK: type: QUERY +PREHOOK: Input: _dummy_database@_dummy_table +PREHOOK: Output: default@stats3 +POSTHOOK: query: insert into table stats3 values (1, "foo") +POSTHOOK: type: QUERY +POSTHOOK: Input: _dummy_database@_dummy_table +POSTHOOK: Output: default@stats3 +POSTHOOK: Lineage: stats3.key SCRIPT [] +POSTHOOK: Lineage: stats3.value SCRIPT [] +PREHOOK: query: explain select count(*) from stats3 +PREHOOK: type: QUERY +POSTHOOK: query: explain select count(*) from stats3 +POSTHOOK: type: QUERY +STAGE DEPENDENCIES: + Stage-0 is a root stage + +STAGE PLANS: + Stage: Stage-0 + Fetch Operator + limit: 1 + Processor Tree: + ListSink + +PREHOOK: query: select count(*) from stats3 +PREHOOK: type: QUERY +PREHOOK: Input: default@stats3 +#### A masked pattern was here #### +POSTHOOK: query: select count(*) from stats3 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@stats3 +#### A masked pattern was here #### +1 +PREHOOK: query: insert into table stats3 values (2, "bar") +PREHOOK: type: QUERY +PREHOOK: Input: _dummy_database@_dummy_table +PREHOOK: Output: default@stats3 +POSTHOOK: query: insert into table stats3 values (2, "bar") +POSTHOOK: type: QUERY +POSTHOOK: Input: _dummy_database@_dummy_table +POSTHOOK: Output: default@stats3 +POSTHOOK: Lineage: stats3.key SCRIPT [] +POSTHOOK: Lineage: stats3.value SCRIPT [] +PREHOOK: query: explain select count(*) from stats3 +PREHOOK: type: QUERY +POSTHOOK: query: explain select count(*) from stats3 +POSTHOOK: type: QUERY +STAGE DEPENDENCIES: + Stage-0 is a root stage + +STAGE PLANS: + Stage: Stage-0 + Fetch Operator + limit: 1 + Processor Tree: + ListSink + +PREHOOK: query: select count(*) from stats3 +PREHOOK: type: QUERY +PREHOOK: Input: default@stats3 +#### A masked pattern was here #### +POSTHOOK: query: select count(*) from stats3 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@stats3 +#### A masked pattern was here #### +2 +PREHOOK: query: update stats3 set value = "baz" where key = 4 +PREHOOK: type: QUERY +PREHOOK: Input: default@stats3 +PREHOOK: Output: default@stats3 +POSTHOOK: query: update stats3 set value = "baz" where key = 4 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@stats3 +POSTHOOK: Output: default@stats3 +PREHOOK: query: explain select count(*) from stats3 +PREHOOK: type: QUERY +POSTHOOK: query: explain select count(*) from stats3 +POSTHOOK: type: QUERY +STAGE DEPENDENCIES: + Stage-0 is a root stage + +STAGE PLANS: + Stage: Stage-0 + Fetch Operator + limit: 1 + Processor Tree: + ListSink + +PREHOOK: query: select count(*) from stats3 +PREHOOK: type: QUERY +PREHOOK: Input: default@stats3 +#### A masked pattern was here #### +POSTHOOK: query: select count(*) from stats3 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@stats3 +#### A masked pattern was here #### +2 +PREHOOK: query: update stats3 set value = "baz" where key = 1 +PREHOOK: type: QUERY +PREHOOK: Input: default@stats3 +PREHOOK: Output: default@stats3 +POSTHOOK: query: update stats3 set value = "baz" where key = 1 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@stats3 +POSTHOOK: Output: default@stats3 +PREHOOK: query: explain select count(*) from stats3 +PREHOOK: type: QUERY +POSTHOOK: query: explain select count(*) from stats3 +POSTHOOK: type: QUERY +STAGE DEPENDENCIES: + Stage-0 is a root stage + +STAGE PLANS: + Stage: Stage-0 + Fetch Operator + limit: 1 + Processor Tree: + ListSink + +PREHOOK: query: select count(*) from stats3 +PREHOOK: type: QUERY +PREHOOK: Input: default@stats3 +#### A masked pattern was here #### +POSTHOOK: query: select count(*) from stats3 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@stats3 +#### A masked pattern was here #### +2 +PREHOOK: query: delete from stats3 where key = 3 +PREHOOK: type: QUERY +PREHOOK: Input: default@stats3 +PREHOOK: Output: default@stats3 +POSTHOOK: query: delete from stats3 where key = 3 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@stats3 +POSTHOOK: Output: default@stats3 +PREHOOK: query: explain select count(*) from stats3 +PREHOOK: type: QUERY +POSTHOOK: query: explain select count(*) from stats3 +POSTHOOK: type: QUERY +STAGE DEPENDENCIES: + Stage-0 is a root stage + +STAGE PLANS: + Stage: Stage-0 + Fetch Operator + limit: 1 + Processor Tree: + ListSink + +PREHOOK: query: select count(*) from stats3 +PREHOOK: type: QUERY +PREHOOK: Input: default@stats3 +#### A masked pattern was here #### +POSTHOOK: query: select count(*) from stats3 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@stats3 +#### A masked pattern was here #### +2 +PREHOOK: query: delete from stats3 where key = 1 +PREHOOK: type: QUERY +PREHOOK: Input: default@stats3 +PREHOOK: Output: default@stats3 +POSTHOOK: query: delete from stats3 where key = 1 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@stats3 +POSTHOOK: Output: default@stats3 +PREHOOK: query: explain select count(*) from stats3 +PREHOOK: type: QUERY +POSTHOOK: query: explain select count(*) from stats3 +POSTHOOK: type: QUERY +STAGE DEPENDENCIES: + Stage-0 is a root stage + +STAGE PLANS: + Stage: Stage-0 + Fetch Operator + limit: 1 + Processor Tree: + ListSink + +PREHOOK: query: select count(*) from stats3 +PREHOOK: type: QUERY +PREHOOK: Input: default@stats3 +#### A masked pattern was here #### +POSTHOOK: query: select count(*) from stats3 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@stats3 +#### A masked pattern was here #### +1 +PREHOOK: query: delete from stats3 where key = 2 +PREHOOK: type: QUERY +PREHOOK: Input: default@stats3 +PREHOOK: Output: default@stats3 +POSTHOOK: query: delete from stats3 where key = 2 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@stats3 +POSTHOOK: Output: default@stats3 +PREHOOK: query: explain select count(*) from stats3 +PREHOOK: type: QUERY +POSTHOOK: query: explain select count(*) from stats3 +POSTHOOK: type: QUERY +STAGE DEPENDENCIES: + Stage-0 is a root stage + +STAGE PLANS: + Stage: Stage-0 + Fetch Operator + limit: 1 + Processor Tree: + ListSink + +PREHOOK: query: select count(*) from stats3 +PREHOOK: type: QUERY +PREHOOK: Input: default@stats3 +#### A masked pattern was here #### +POSTHOOK: query: select count(*) from stats3 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@stats3 +#### A masked pattern was here #### +0 +PREHOOK: query: drop table stats3 +PREHOOK: type: DROPTABLE +PREHOOK: Input: default@stats3 +PREHOOK: Output: default@stats3 +POSTHOOK: query: drop table stats3 +POSTHOOK: type: DROPTABLE +POSTHOOK: Input: default@stats3 +POSTHOOK: Output: default@stats3 diff --git ql/src/test/results/clientpositive/autoColumnStats_4.q.out ql/src/test/results/clientpositive/autoColumnStats_4.q.out index a16ec07be1..190686547c 100644 --- ql/src/test/results/clientpositive/autoColumnStats_4.q.out +++ ql/src/test/results/clientpositive/autoColumnStats_4.q.out @@ -199,8 +199,11 @@ Retention: 0 #### A masked pattern was here #### Table Type: MANAGED_TABLE Table Parameters: + COLUMN_STATS_ACCURATE {\"BASIC_STATS\":\"true\",\"COLUMN_STATS\":{\"a\":\"true\",\"b\":\"true\"}} bucketing_version 2 numFiles 2 + numRows 10 + rawDataSize 0 totalSize 1899 transactional true transactional_properties default @@ -241,9 +244,11 @@ Retention: 0 #### A masked pattern was here #### Table Type: MANAGED_TABLE Table Parameters: - COLUMN_STATS_ACCURATE {} + COLUMN_STATS_ACCURATE {\"BASIC_STATS\":\"true\"} bucketing_version 2 numFiles 4 + numRows 8 + rawDataSize 0 totalSize 3275 transactional true transactional_properties default diff --git ql/src/test/results/clientpositive/druid/druidmini_mv.q.out ql/src/test/results/clientpositive/druid/druidmini_mv.q.out index e5e1ea9299..2e44e14e8f 100644 --- ql/src/test/results/clientpositive/druid/druidmini_mv.q.out +++ ql/src/test/results/clientpositive/druid/druidmini_mv.q.out @@ -341,34 +341,34 @@ STAGE PLANS: TableScan alias: cmv_basetable_n2 filterExpr: (a = 3) (type: boolean) - Statistics: Num rows: 31 Data size: 372 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 7 Data size: 84 Basic stats: COMPLETE Column stats: NONE Filter Operator predicate: (a = 3) (type: boolean) - Statistics: Num rows: 5 Data size: 60 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 7 Data size: 84 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: c (type: double) outputColumnNames: _col0 - Statistics: Num rows: 5 Data size: 60 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 7 Data size: 84 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator sort order: - Statistics: Num rows: 5 Data size: 60 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 7 Data size: 84 Basic stats: COMPLETE Column stats: NONE value expressions: _col0 (type: double) Map 3 Map Operator Tree: TableScan alias: cmv_basetable_n2 filterExpr: ((d = 3) and (a = 3)) (type: boolean) - Statistics: Num rows: 31 Data size: 496 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 7 Data size: 112 Basic stats: COMPLETE Column stats: NONE Filter Operator predicate: ((a = 3) and (d = 3)) (type: boolean) - Statistics: Num rows: 1 Data size: 16 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 7 Data size: 112 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: c (type: double) outputColumnNames: _col0 - Statistics: Num rows: 1 Data size: 16 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 7 Data size: 112 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator sort order: - Statistics: Num rows: 1 Data size: 16 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 7 Data size: 112 Basic stats: COMPLETE Column stats: NONE value expressions: _col0 (type: double) Reducer 2 Reduce Operator Tree: @@ -379,14 +379,14 @@ STAGE PLANS: 0 1 outputColumnNames: _col0, _col1 - Statistics: Num rows: 5 Data size: 145 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 49 Data size: 1421 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: 3 (type: int), _col0 (type: double), 3 (type: int), _col1 (type: double) outputColumnNames: _col0, _col1, _col2, _col3 - Statistics: Num rows: 5 Data size: 145 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 49 Data size: 1421 Basic stats: COMPLETE Column stats: NONE File Output Operator compressed: false - Statistics: Num rows: 5 Data size: 145 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 49 Data size: 1421 Basic stats: COMPLETE Column stats: NONE table: input format: org.apache.hadoop.mapred.SequenceFileInputFormat output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat @@ -460,34 +460,34 @@ STAGE PLANS: TableScan alias: cmv_basetable_n2 filterExpr: (a = 3) (type: boolean) - Statistics: Num rows: 31 Data size: 22692 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 7 Data size: 5124 Basic stats: COMPLETE Column stats: NONE Filter Operator predicate: (a = 3) (type: boolean) - Statistics: Num rows: 5 Data size: 3660 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 7 Data size: 5124 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: CAST( t AS timestamp with local time zone) (type: timestamp with local time zone), 3 (type: int), b (type: varchar(256)), c (type: double), userid (type: varchar(256)) outputColumnNames: _col0, _col1, _col2, _col3, _col4 - Statistics: Num rows: 5 Data size: 3660 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 7 Data size: 5124 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: _col0 (type: timestamp with local time zone), _col1 (type: int), _col2 (type: varchar(256)), _col3 (type: double), _col4 (type: varchar(256)), floor_hour(CAST( GenericUDFEpochMilli(_col0) AS TIMESTAMP)) (type: timestamp) outputColumnNames: _col0, _col1, _col2, _col3, _col4, __time_granularity - Statistics: Num rows: 5 Data size: 3660 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 7 Data size: 5124 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator key expressions: __time_granularity (type: timestamp) sort order: + Map-reduce partition columns: __time_granularity (type: timestamp) - Statistics: Num rows: 5 Data size: 3660 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 7 Data size: 5124 Basic stats: COMPLETE Column stats: NONE value expressions: _col0 (type: timestamp with local time zone), _col1 (type: int), _col2 (type: varchar(256)), _col3 (type: double), _col4 (type: varchar(256)) Reducer 2 Reduce Operator Tree: Select Operator expressions: VALUE._col0 (type: timestamp with local time zone), VALUE._col1 (type: int), VALUE._col2 (type: varchar(256)), VALUE._col3 (type: double), VALUE._col4 (type: varchar(256)), KEY.__time_granularity (type: timestamp) outputColumnNames: _col0, _col1, _col2, _col3, _col4, __time_granularity - Statistics: Num rows: 5 Data size: 3660 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 7 Data size: 5124 Basic stats: COMPLETE Column stats: NONE File Output Operator compressed: false Dp Sort State: PARTITION_SORTED - Statistics: Num rows: 5 Data size: 3660 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 7 Data size: 5124 Basic stats: COMPLETE Column stats: NONE table: input format: org.apache.hadoop.hive.druid.io.DruidQueryBasedInputFormat output format: org.apache.hadoop.hive.druid.io.DruidOutputFormat @@ -552,17 +552,17 @@ STAGE PLANS: TableScan alias: cmv_basetable_n2 filterExpr: ((a = 3) and (d = 3)) (type: boolean) - Statistics: Num rows: 31 Data size: 496 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 7 Data size: 112 Basic stats: COMPLETE Column stats: NONE Filter Operator predicate: ((a = 3) and (d = 3)) (type: boolean) - Statistics: Num rows: 1 Data size: 16 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 7 Data size: 112 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: c (type: double) outputColumnNames: _col1 - Statistics: Num rows: 1 Data size: 16 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 7 Data size: 112 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator sort order: - Statistics: Num rows: 1 Data size: 16 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 7 Data size: 112 Basic stats: COMPLETE Column stats: NONE value expressions: _col1 (type: double) Map 3 Map Operator Tree: @@ -587,14 +587,14 @@ STAGE PLANS: 0 1 outputColumnNames: _col0, _col1, _col6 - Statistics: Num rows: 3 Data size: 87 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 21 Data size: 609 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: _col0 (type: int), _col1 (type: double), _col0 (type: int), _col6 (type: double) outputColumnNames: _col0, _col1, _col2, _col3 - Statistics: Num rows: 3 Data size: 87 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 21 Data size: 609 Basic stats: COMPLETE Column stats: NONE File Output Operator compressed: false - Statistics: Num rows: 3 Data size: 87 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 21 Data size: 609 Basic stats: COMPLETE Column stats: NONE table: input format: org.apache.hadoop.mapred.SequenceFileInputFormat output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat diff --git ql/src/test/results/clientpositive/llap/acid_bucket_pruning.q.out ql/src/test/results/clientpositive/llap/acid_bucket_pruning.q.out index b856b99caf..cfb9f1b2e2 100644 --- ql/src/test/results/clientpositive/llap/acid_bucket_pruning.q.out +++ ql/src/test/results/clientpositive/llap/acid_bucket_pruning.q.out @@ -45,22 +45,22 @@ STAGE PLANS: alias: acidtbldefault filterExpr: (a = 1) (type: boolean) buckets included: [13,] of 16 - Statistics: Num rows: 1850 Data size: 7036 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 9174 Data size: 36696 Basic stats: COMPLETE Column stats: COMPLETE GatherStats: false Filter Operator isSamplingPred: false predicate: (a = 1) (type: boolean) - Statistics: Num rows: 5 Data size: 19 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 2 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE Select Operator expressions: 1 (type: int) outputColumnNames: _col0 - Statistics: Num rows: 5 Data size: 19 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 2 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE File Output Operator compressed: false GlobalTableId: 0 directory: hdfs://### HDFS PATH ### NumFilesPerFileSink: 1 - Statistics: Num rows: 5 Data size: 19 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 2 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE Stats Publishing Key Prefix: hdfs://### HDFS PATH ### table: input format: org.apache.hadoop.mapred.SequenceFileInputFormat @@ -88,6 +88,7 @@ STAGE PLANS: input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat output format: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat properties: + COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"a":"true"}} bucket_count 16 bucket_field_name a bucketing_version 2 @@ -99,6 +100,8 @@ STAGE PLANS: location hdfs://### HDFS PATH ### name default.acidtbldefault numFiles 17 + numRows 9174 + rawDataSize 0 serialization.ddl struct acidtbldefault { i32 a} serialization.format 1 serialization.lib org.apache.hadoop.hive.ql.io.orc.OrcSerde @@ -111,6 +114,7 @@ STAGE PLANS: input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat output format: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat properties: + COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"a":"true"}} bucket_count 16 bucket_field_name a bucketing_version 2 @@ -122,6 +126,8 @@ STAGE PLANS: location hdfs://### HDFS PATH ### name default.acidtbldefault numFiles 17 + numRows 9174 + rawDataSize 0 serialization.ddl struct acidtbldefault { i32 a} serialization.format 1 serialization.lib org.apache.hadoop.hive.ql.io.orc.OrcSerde diff --git ql/src/test/results/clientpositive/llap/acid_vectorization_original.q.out ql/src/test/results/clientpositive/llap/acid_vectorization_original.q.out index be1b4c68f9..57ff575b92 100644 --- ql/src/test/results/clientpositive/llap/acid_vectorization_original.q.out +++ ql/src/test/results/clientpositive/llap/acid_vectorization_original.q.out @@ -665,22 +665,22 @@ STAGE PLANS: Map Operator Tree: TableScan alias: over10k_orc_bucketed - Statistics: Num rows: 1237 Data size: 707670 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 2098 Data size: 622340 Basic stats: COMPLETE Column stats: COMPLETE Select Operator expressions: ROW__ID (type: struct) outputColumnNames: ROW__ID - Statistics: Num rows: 1237 Data size: 707670 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 2098 Data size: 622340 Basic stats: COMPLETE Column stats: COMPLETE Group By Operator aggregations: count() keys: ROW__ID (type: struct) mode: hash outputColumnNames: _col0, _col1 - Statistics: Num rows: 618 Data size: 51912 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 1049 Data size: 88116 Basic stats: COMPLETE Column stats: COMPLETE Reduce Output Operator key expressions: _col0 (type: struct) sort order: + Map-reduce partition columns: _col0 (type: struct) - Statistics: Num rows: 618 Data size: 51912 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 1049 Data size: 88116 Basic stats: COMPLETE Column stats: COMPLETE value expressions: _col1 (type: bigint) Execution mode: llap LLAP IO: may be used (ACID table) @@ -692,13 +692,13 @@ STAGE PLANS: keys: KEY._col0 (type: struct) mode: mergepartial outputColumnNames: _col0, _col1 - Statistics: Num rows: 618 Data size: 51912 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 1049 Data size: 88116 Basic stats: COMPLETE Column stats: COMPLETE Filter Operator predicate: (_col1 > 1L) (type: boolean) - Statistics: Num rows: 206 Data size: 17304 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 349 Data size: 29316 Basic stats: COMPLETE Column stats: COMPLETE File Output Operator compressed: false - Statistics: Num rows: 206 Data size: 17304 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 349 Data size: 29316 Basic stats: COMPLETE Column stats: COMPLETE table: input format: org.apache.hadoop.mapred.SequenceFileInputFormat output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat diff --git ql/src/test/results/clientpositive/llap/dynpart_sort_optimization_acid.q.out ql/src/test/results/clientpositive/llap/dynpart_sort_optimization_acid.q.out index 7a9e200b9b..6c3751deb6 100644 --- ql/src/test/results/clientpositive/llap/dynpart_sort_optimization_acid.q.out +++ ql/src/test/results/clientpositive/llap/dynpart_sort_optimization_acid.q.out @@ -95,19 +95,19 @@ STAGE PLANS: TableScan alias: acid_part filterExpr: ((key = 'foo') and (ds = '2008-04-08')) (type: boolean) - Statistics: Num rows: 160 Data size: 61001 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 1601 Data size: 139287 Basic stats: COMPLETE Column stats: PARTIAL Filter Operator predicate: (key = 'foo') (type: boolean) - Statistics: Num rows: 5 Data size: 1906 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 5 Data size: 435 Basic stats: COMPLETE Column stats: PARTIAL Select Operator expressions: ROW__ID (type: struct) outputColumnNames: _col0 - Statistics: Num rows: 5 Data size: 1906 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 5 Data size: 1720 Basic stats: COMPLETE Column stats: PARTIAL Reduce Output Operator key expressions: _col0 (type: struct) sort order: + Map-reduce partition columns: UDFToInteger(_col0) (type: int) - Statistics: Num rows: 5 Data size: 1906 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 5 Data size: 1720 Basic stats: COMPLETE Column stats: PARTIAL Execution mode: llap LLAP IO: may be used (ACID table) Reducer 2 @@ -116,10 +116,10 @@ STAGE PLANS: Select Operator expressions: KEY.reducesinkkey0 (type: struct), 'foo' (type: string), 'bar' (type: string), '2008-04-08' (type: string) outputColumnNames: _col0, _col1, _col2, _col3 - Statistics: Num rows: 5 Data size: 1906 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 5 Data size: 1720 Basic stats: COMPLETE Column stats: PARTIAL File Output Operator compressed: false - Statistics: Num rows: 5 Data size: 1906 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 5 Data size: 1720 Basic stats: COMPLETE Column stats: PARTIAL table: input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat output format: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat @@ -191,7 +191,7 @@ STAGE PLANS: TableScan alias: acid_part filterExpr: ((key = 'foo') and (ds) IN ('2008-04-08')) (type: boolean) - Statistics: Num rows: 159 Data size: 104317 Basic stats: COMPLETE Column stats: PARTIAL + Statistics: Num rows: 1601 Data size: 433871 Basic stats: COMPLETE Column stats: PARTIAL Filter Operator predicate: (key = 'foo') (type: boolean) Statistics: Num rows: 5 Data size: 1355 Basic stats: COMPLETE Column stats: PARTIAL @@ -383,19 +383,19 @@ STAGE PLANS: TableScan alias: acid_part_sdpo filterExpr: ((key = 'foo') and (ds = '2008-04-08')) (type: boolean) - Statistics: Num rows: 176 Data size: 67063 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 1601 Data size: 150414 Basic stats: COMPLETE Column stats: NONE Filter Operator predicate: (key = 'foo') (type: boolean) - Statistics: Num rows: 5 Data size: 1905 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 5 Data size: 469 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: ROW__ID (type: struct) outputColumnNames: _col0 - Statistics: Num rows: 5 Data size: 1905 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 5 Data size: 469 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator key expressions: _col0 (type: struct) sort order: + Map-reduce partition columns: UDFToInteger(_col0) (type: int) - Statistics: Num rows: 5 Data size: 1905 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 5 Data size: 469 Basic stats: COMPLETE Column stats: NONE Execution mode: llap LLAP IO: may be used (ACID table) Reducer 2 @@ -404,10 +404,10 @@ STAGE PLANS: Select Operator expressions: KEY.reducesinkkey0 (type: struct), 'foo' (type: string), 'bar' (type: string), '2008-04-08' (type: string) outputColumnNames: _col0, _col1, _col2, _col3 - Statistics: Num rows: 5 Data size: 1905 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 5 Data size: 469 Basic stats: COMPLETE Column stats: NONE File Output Operator compressed: false - Statistics: Num rows: 5 Data size: 1905 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 5 Data size: 469 Basic stats: COMPLETE Column stats: NONE table: input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat output format: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat @@ -479,7 +479,7 @@ STAGE PLANS: TableScan alias: acid_part_sdpo filterExpr: ((key = 'foo') and (ds) IN ('2008-04-08')) (type: boolean) - Statistics: Num rows: 171 Data size: 112152 Basic stats: COMPLETE Column stats: PARTIAL + Statistics: Num rows: 1601 Data size: 444998 Basic stats: COMPLETE Column stats: PARTIAL Filter Operator predicate: (key = 'foo') (type: boolean) Statistics: Num rows: 5 Data size: 1355 Basic stats: COMPLETE Column stats: PARTIAL @@ -680,19 +680,19 @@ STAGE PLANS: TableScan alias: acid_2l_part filterExpr: ((key = 'foo') and (ds = '2008-04-08') and (hr = 11)) (type: boolean) - Statistics: Num rows: 157 Data size: 60527 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 1601 Data size: 139287 Basic stats: COMPLETE Column stats: PARTIAL Filter Operator predicate: (key = 'foo') (type: boolean) - Statistics: Num rows: 5 Data size: 1927 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 5 Data size: 435 Basic stats: COMPLETE Column stats: PARTIAL Select Operator expressions: ROW__ID (type: struct) outputColumnNames: _col0 - Statistics: Num rows: 5 Data size: 1927 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 5 Data size: 1740 Basic stats: COMPLETE Column stats: PARTIAL Reduce Output Operator key expressions: _col0 (type: struct) sort order: + Map-reduce partition columns: UDFToInteger(_col0) (type: int) - Statistics: Num rows: 5 Data size: 1927 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 5 Data size: 1740 Basic stats: COMPLETE Column stats: PARTIAL Execution mode: llap LLAP IO: may be used (ACID table) Reducer 2 @@ -701,10 +701,10 @@ STAGE PLANS: Select Operator expressions: KEY.reducesinkkey0 (type: struct), 'foo' (type: string), 'bar' (type: string), '2008-04-08' (type: string), 11 (type: int) outputColumnNames: _col0, _col1, _col2, _col3, _col4 - Statistics: Num rows: 5 Data size: 1927 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 5 Data size: 1740 Basic stats: COMPLETE Column stats: PARTIAL File Output Operator compressed: false - Statistics: Num rows: 5 Data size: 1927 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 5 Data size: 1740 Basic stats: COMPLETE Column stats: PARTIAL table: input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat output format: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat @@ -777,19 +777,19 @@ STAGE PLANS: TableScan alias: acid_2l_part filterExpr: ((key = 'foo') and (ds = '2008-04-08') and (hr >= 11)) (type: boolean) - Statistics: Num rows: 1600 Data size: 156727 Basic stats: PARTIAL Column stats: PARTIAL + Statistics: Num rows: 3201 Data size: 291291 Basic stats: COMPLETE Column stats: PARTIAL Filter Operator predicate: (key = 'foo') (type: boolean) - Statistics: Num rows: 5 Data size: 489 Basic stats: PARTIAL Column stats: PARTIAL + Statistics: Num rows: 10 Data size: 910 Basic stats: COMPLETE Column stats: PARTIAL Select Operator expressions: ROW__ID (type: struct), hr (type: int) outputColumnNames: _col0, _col4 - Statistics: Num rows: 5 Data size: 489 Basic stats: PARTIAL Column stats: PARTIAL + Statistics: Num rows: 10 Data size: 3480 Basic stats: COMPLETE Column stats: PARTIAL Reduce Output Operator key expressions: _col0 (type: struct) sort order: + Map-reduce partition columns: UDFToInteger(_col0) (type: int) - Statistics: Num rows: 5 Data size: 489 Basic stats: PARTIAL Column stats: PARTIAL + Statistics: Num rows: 10 Data size: 3480 Basic stats: COMPLETE Column stats: PARTIAL value expressions: _col4 (type: int) Execution mode: llap LLAP IO: may be used (ACID table) @@ -799,10 +799,10 @@ STAGE PLANS: Select Operator expressions: KEY.reducesinkkey0 (type: struct), 'foo' (type: string), 'bar' (type: string), '2008-04-08' (type: string), VALUE._col2 (type: int) outputColumnNames: _col0, _col1, _col2, _col3, _col4 - Statistics: Num rows: 5 Data size: 489 Basic stats: PARTIAL Column stats: PARTIAL + Statistics: Num rows: 10 Data size: 3480 Basic stats: COMPLETE Column stats: PARTIAL File Output Operator compressed: false - Statistics: Num rows: 5 Data size: 489 Basic stats: PARTIAL Column stats: PARTIAL + Statistics: Num rows: 10 Data size: 3480 Basic stats: COMPLETE Column stats: PARTIAL table: input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat output format: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat @@ -904,19 +904,19 @@ STAGE PLANS: TableScan alias: acid_2l_part filterExpr: (value = 'bar') (type: boolean) - Statistics: Num rows: 1600 Data size: 451127 Basic stats: PARTIAL Column stats: PARTIAL + Statistics: Num rows: 4200 Data size: 1171800 Basic stats: COMPLETE Column stats: PARTIAL Filter Operator predicate: (value = 'bar') (type: boolean) - Statistics: Num rows: 5 Data size: 1409 Basic stats: PARTIAL Column stats: PARTIAL + Statistics: Num rows: 14 Data size: 3906 Basic stats: COMPLETE Column stats: PARTIAL Select Operator expressions: ROW__ID (type: struct), ds (type: string), hr (type: int) outputColumnNames: _col0, _col1, _col2 - Statistics: Num rows: 5 Data size: 1409 Basic stats: PARTIAL Column stats: PARTIAL + Statistics: Num rows: 14 Data size: 3696 Basic stats: COMPLETE Column stats: PARTIAL Reduce Output Operator key expressions: _col0 (type: struct) sort order: + Map-reduce partition columns: UDFToInteger(_col0) (type: int) - Statistics: Num rows: 5 Data size: 1409 Basic stats: PARTIAL Column stats: PARTIAL + Statistics: Num rows: 14 Data size: 3696 Basic stats: COMPLETE Column stats: PARTIAL value expressions: _col1 (type: string), _col2 (type: int) Execution mode: llap LLAP IO: may be used (ACID table) @@ -926,10 +926,10 @@ STAGE PLANS: Select Operator expressions: KEY.reducesinkkey0 (type: struct), VALUE._col0 (type: string), VALUE._col1 (type: int) outputColumnNames: _col0, _col1, _col2 - Statistics: Num rows: 5 Data size: 1409 Basic stats: PARTIAL Column stats: PARTIAL + Statistics: Num rows: 14 Data size: 3696 Basic stats: COMPLETE Column stats: PARTIAL File Output Operator compressed: false - Statistics: Num rows: 5 Data size: 1409 Basic stats: PARTIAL Column stats: PARTIAL + Statistics: Num rows: 14 Data size: 3696 Basic stats: COMPLETE Column stats: PARTIAL table: input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat output format: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat @@ -1103,19 +1103,19 @@ STAGE PLANS: TableScan alias: acid_2l_part_sdpo filterExpr: ((key = 'foo') and (ds = '2008-04-08') and (hr = 11)) (type: boolean) - Statistics: Num rows: 157 Data size: 60527 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 1601 Data size: 150414 Basic stats: COMPLETE Column stats: NONE Filter Operator predicate: (key = 'foo') (type: boolean) - Statistics: Num rows: 5 Data size: 1927 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 5 Data size: 469 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: ROW__ID (type: struct) outputColumnNames: _col0 - Statistics: Num rows: 5 Data size: 1927 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 5 Data size: 469 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator key expressions: _col0 (type: struct) sort order: + Map-reduce partition columns: UDFToInteger(_col0) (type: int) - Statistics: Num rows: 5 Data size: 1927 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 5 Data size: 469 Basic stats: COMPLETE Column stats: NONE Execution mode: llap LLAP IO: may be used (ACID table) Reducer 2 @@ -1124,10 +1124,10 @@ STAGE PLANS: Select Operator expressions: KEY.reducesinkkey0 (type: struct), 'foo' (type: string), 'bar' (type: string), '2008-04-08' (type: string), 11 (type: int) outputColumnNames: _col0, _col1, _col2, _col3, _col4 - Statistics: Num rows: 5 Data size: 1927 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 5 Data size: 469 Basic stats: COMPLETE Column stats: NONE File Output Operator compressed: false - Statistics: Num rows: 5 Data size: 1927 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 5 Data size: 469 Basic stats: COMPLETE Column stats: NONE table: input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat output format: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat @@ -1200,19 +1200,19 @@ STAGE PLANS: TableScan alias: acid_2l_part_sdpo filterExpr: ((key = 'foo') and (ds = '2008-04-08') and (hr >= 11)) (type: boolean) - Statistics: Num rows: 1600 Data size: 156727 Basic stats: PARTIAL Column stats: PARTIAL + Statistics: Num rows: 3201 Data size: 313458 Basic stats: COMPLETE Column stats: PARTIAL Filter Operator predicate: (key = 'foo') (type: boolean) - Statistics: Num rows: 5 Data size: 489 Basic stats: PARTIAL Column stats: PARTIAL + Statistics: Num rows: 5 Data size: 455 Basic stats: COMPLETE Column stats: PARTIAL Select Operator expressions: ROW__ID (type: struct), hr (type: int) outputColumnNames: _col0, _col4 - Statistics: Num rows: 5 Data size: 489 Basic stats: PARTIAL Column stats: PARTIAL + Statistics: Num rows: 5 Data size: 1740 Basic stats: COMPLETE Column stats: PARTIAL Reduce Output Operator key expressions: '2008-04-08' (type: string), _col4 (type: int), '_bucket_number' (type: string), _col0 (type: struct) sort order: ++++ Map-reduce partition columns: '2008-04-08' (type: string), _col4 (type: int) - Statistics: Num rows: 5 Data size: 489 Basic stats: PARTIAL Column stats: PARTIAL + Statistics: Num rows: 5 Data size: 1740 Basic stats: COMPLETE Column stats: PARTIAL Execution mode: llap LLAP IO: may be used (ACID table) Reducer 2 @@ -1221,11 +1221,11 @@ STAGE PLANS: Select Operator expressions: KEY._col0 (type: struct), 'foo' (type: string), 'bar' (type: string), '2008-04-08' (type: string), KEY._col4 (type: int), KEY.'_bucket_number' (type: string) outputColumnNames: _col0, _col1, _col2, _col3, _col4, '_bucket_number' - Statistics: Num rows: 5 Data size: 489 Basic stats: PARTIAL Column stats: PARTIAL + Statistics: Num rows: 5 Data size: 1360 Basic stats: COMPLETE Column stats: PARTIAL File Output Operator compressed: false Dp Sort State: PARTITION_BUCKET_SORTED - Statistics: Num rows: 5 Data size: 489 Basic stats: PARTIAL Column stats: PARTIAL + Statistics: Num rows: 5 Data size: 1360 Basic stats: COMPLETE Column stats: PARTIAL table: input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat output format: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat @@ -1327,19 +1327,19 @@ STAGE PLANS: TableScan alias: acid_2l_part_sdpo filterExpr: (value = 'bar') (type: boolean) - Statistics: Num rows: 1600 Data size: 451127 Basic stats: PARTIAL Column stats: PARTIAL + Statistics: Num rows: 4952 Data size: 2061430 Basic stats: COMPLETE Column stats: PARTIAL Filter Operator predicate: (value = 'bar') (type: boolean) - Statistics: Num rows: 5 Data size: 1409 Basic stats: PARTIAL Column stats: PARTIAL + Statistics: Num rows: 5 Data size: 1375 Basic stats: COMPLETE Column stats: PARTIAL Select Operator expressions: ROW__ID (type: struct), ds (type: string), hr (type: int) outputColumnNames: _col0, _col1, _col2 - Statistics: Num rows: 5 Data size: 1409 Basic stats: PARTIAL Column stats: PARTIAL + Statistics: Num rows: 5 Data size: 1320 Basic stats: COMPLETE Column stats: PARTIAL Reduce Output Operator key expressions: _col1 (type: string), _col2 (type: int), '_bucket_number' (type: string), _col0 (type: struct) sort order: ++++ Map-reduce partition columns: _col1 (type: string), _col2 (type: int) - Statistics: Num rows: 5 Data size: 1409 Basic stats: PARTIAL Column stats: PARTIAL + Statistics: Num rows: 5 Data size: 1320 Basic stats: COMPLETE Column stats: PARTIAL Execution mode: llap LLAP IO: may be used (ACID table) Reducer 2 @@ -1348,11 +1348,11 @@ STAGE PLANS: Select Operator expressions: KEY._col0 (type: struct), KEY._col1 (type: string), KEY._col2 (type: int), KEY.'_bucket_number' (type: string) outputColumnNames: _col0, _col1, _col2, '_bucket_number' - Statistics: Num rows: 5 Data size: 1409 Basic stats: PARTIAL Column stats: PARTIAL + Statistics: Num rows: 5 Data size: 1810 Basic stats: COMPLETE Column stats: PARTIAL File Output Operator compressed: false Dp Sort State: PARTITION_BUCKET_SORTED - Statistics: Num rows: 5 Data size: 1409 Basic stats: PARTIAL Column stats: PARTIAL + Statistics: Num rows: 5 Data size: 1810 Basic stats: COMPLETE Column stats: PARTIAL table: input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat output format: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat @@ -1526,7 +1526,7 @@ STAGE PLANS: TableScan alias: acid_2l_part_sdpo_no_cp filterExpr: ((key = 'foo') and (ds = '2008-04-08') and (hr = 11)) (type: boolean) - Statistics: Num rows: 97 Data size: 82922 Basic stats: COMPLETE Column stats: PARTIAL + Statistics: Num rows: 1601 Data size: 599036 Basic stats: COMPLETE Column stats: PARTIAL Filter Operator predicate: (key = 'foo') (type: boolean) Statistics: Num rows: 5 Data size: 1860 Basic stats: COMPLETE Column stats: PARTIAL @@ -1625,19 +1625,19 @@ STAGE PLANS: TableScan alias: acid_2l_part_sdpo_no_cp filterExpr: ((key = 'foo') and (ds = '2008-04-08') and (hr >= 11)) (type: boolean) - Statistics: Num rows: 1600 Data size: 598664 Basic stats: PARTIAL Column stats: PARTIAL + Statistics: Num rows: 3201 Data size: 1197516 Basic stats: COMPLETE Column stats: PARTIAL Filter Operator predicate: (key = 'foo') (type: boolean) - Statistics: Num rows: 5 Data size: 1870 Basic stats: PARTIAL Column stats: PARTIAL + Statistics: Num rows: 5 Data size: 1860 Basic stats: COMPLETE Column stats: PARTIAL Select Operator expressions: ROW__ID (type: struct), key (type: string), ds (type: string), hr (type: int) outputColumnNames: _col0, _col1, _col3, _col4 - Statistics: Num rows: 5 Data size: 1870 Basic stats: PARTIAL Column stats: PARTIAL + Statistics: Num rows: 5 Data size: 2675 Basic stats: COMPLETE Column stats: PARTIAL Reduce Output Operator key expressions: _col3 (type: string), _col4 (type: int), '_bucket_number' (type: string), _col0 (type: struct) sort order: ++++ Map-reduce partition columns: _col3 (type: string), _col4 (type: int) - Statistics: Num rows: 5 Data size: 1870 Basic stats: PARTIAL Column stats: PARTIAL + Statistics: Num rows: 5 Data size: 2675 Basic stats: COMPLETE Column stats: PARTIAL value expressions: _col1 (type: string), 'bar' (type: string) Execution mode: llap LLAP IO: may be used (ACID table) @@ -1647,11 +1647,11 @@ STAGE PLANS: Select Operator expressions: KEY._col0 (type: struct), VALUE._col1 (type: string), VALUE._col2 (type: string), KEY._col3 (type: string), KEY._col4 (type: int), KEY.'_bucket_number' (type: string) outputColumnNames: _col0, _col1, _col2, _col3, _col4, '_bucket_number' - Statistics: Num rows: 5 Data size: 1870 Basic stats: PARTIAL Column stats: PARTIAL + Statistics: Num rows: 5 Data size: 3165 Basic stats: COMPLETE Column stats: PARTIAL File Output Operator compressed: false Dp Sort State: PARTITION_BUCKET_SORTED - Statistics: Num rows: 5 Data size: 1870 Basic stats: PARTIAL Column stats: PARTIAL + Statistics: Num rows: 5 Data size: 3165 Basic stats: COMPLETE Column stats: PARTIAL table: input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat output format: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat diff --git ql/src/test/results/clientpositive/llap/enforce_constraint_notnull.q.out ql/src/test/results/clientpositive/llap/enforce_constraint_notnull.q.out index 8a5a326ed4..ecf79ae090 100644 --- ql/src/test/results/clientpositive/llap/enforce_constraint_notnull.q.out +++ ql/src/test/results/clientpositive/llap/enforce_constraint_notnull.q.out @@ -3237,19 +3237,19 @@ STAGE PLANS: TableScan alias: acid_uami_n1 filterExpr: (((de = 109.23) or (de = 119.23)) and enforce_constraint(vc is not null)) (type: boolean) - Statistics: Num rows: 281 Data size: 87904 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 1002 Data size: 225450 Basic stats: COMPLETE Column stats: COMPLETE Filter Operator predicate: (((de = 109.23) or (de = 119.23)) and enforce_constraint(vc is not null)) (type: boolean) - Statistics: Num rows: 5 Data size: 1564 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 3 Data size: 675 Basic stats: COMPLETE Column stats: COMPLETE Select Operator expressions: ROW__ID (type: struct), i (type: int), vc (type: varchar(128)) outputColumnNames: _col0, _col1, _col3 - Statistics: Num rows: 5 Data size: 1564 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 3 Data size: 903 Basic stats: COMPLETE Column stats: COMPLETE Reduce Output Operator key expressions: _col0 (type: struct) sort order: + Map-reduce partition columns: UDFToInteger(_col0) (type: int) - Statistics: Num rows: 5 Data size: 1564 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 3 Data size: 903 Basic stats: COMPLETE Column stats: COMPLETE value expressions: _col1 (type: int), _col3 (type: varchar(128)) Execution mode: vectorized, llap LLAP IO: may be used (ACID table) @@ -3259,10 +3259,10 @@ STAGE PLANS: Select Operator expressions: KEY.reducesinkkey0 (type: struct), VALUE._col0 (type: int), 3.14 (type: decimal(5,2)), VALUE._col1 (type: varchar(128)) outputColumnNames: _col0, _col1, _col2, _col3 - Statistics: Num rows: 5 Data size: 1564 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 3 Data size: 903 Basic stats: COMPLETE Column stats: COMPLETE File Output Operator compressed: false - Statistics: Num rows: 5 Data size: 1564 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 3 Data size: 903 Basic stats: COMPLETE Column stats: COMPLETE table: input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat output format: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat @@ -3331,19 +3331,19 @@ STAGE PLANS: TableScan alias: acid_uami_n1 filterExpr: ((de = 3.14) and enforce_constraint((i is not null and vc is not null))) (type: boolean) - Statistics: Num rows: 320 Data size: 100040 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 1002 Data size: 225450 Basic stats: COMPLETE Column stats: COMPLETE Filter Operator predicate: ((de = 3.14) and enforce_constraint((i is not null and vc is not null))) (type: boolean) - Statistics: Num rows: 2 Data size: 625 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 1 Data size: 225 Basic stats: COMPLETE Column stats: COMPLETE Select Operator expressions: ROW__ID (type: struct), i (type: int), vc (type: varchar(128)) outputColumnNames: _col0, _col1, _col3 - Statistics: Num rows: 2 Data size: 625 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 1 Data size: 301 Basic stats: COMPLETE Column stats: COMPLETE Reduce Output Operator key expressions: _col0 (type: struct) sort order: + Map-reduce partition columns: UDFToInteger(_col0) (type: int) - Statistics: Num rows: 2 Data size: 625 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 1 Data size: 301 Basic stats: COMPLETE Column stats: COMPLETE value expressions: _col1 (type: int), _col3 (type: varchar(128)) Execution mode: vectorized, llap LLAP IO: may be used (ACID table) @@ -3353,10 +3353,10 @@ STAGE PLANS: Select Operator expressions: KEY.reducesinkkey0 (type: struct), VALUE._col0 (type: int), 3.14 (type: decimal(5,2)), VALUE._col1 (type: varchar(128)) outputColumnNames: _col0, _col1, _col2, _col3 - Statistics: Num rows: 2 Data size: 625 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 1 Data size: 301 Basic stats: COMPLETE Column stats: COMPLETE File Output Operator compressed: false - Statistics: Num rows: 2 Data size: 625 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 1 Data size: 301 Basic stats: COMPLETE Column stats: COMPLETE table: input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat output format: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat diff --git ql/src/test/results/clientpositive/llap/insert_into_default_keyword.q.out ql/src/test/results/clientpositive/llap/insert_into_default_keyword.q.out index cd38c51729..a93593f3ec 100644 --- ql/src/test/results/clientpositive/llap/insert_into_default_keyword.q.out +++ ql/src/test/results/clientpositive/llap/insert_into_default_keyword.q.out @@ -1705,19 +1705,19 @@ STAGE PLANS: TableScan alias: insert_into1_n0 filterExpr: (value = 1) (type: boolean) - Statistics: Num rows: 25 Data size: 4700 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 1 Data size: 89 Basic stats: COMPLETE Column stats: COMPLETE Filter Operator predicate: (value = 1) (type: boolean) - Statistics: Num rows: 5 Data size: 940 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 1 Data size: 89 Basic stats: COMPLETE Column stats: COMPLETE Select Operator expressions: ROW__ID (type: struct), value (type: string), i (type: int) outputColumnNames: _col0, _col2, _col3 - Statistics: Num rows: 5 Data size: 940 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 1 Data size: 169 Basic stats: COMPLETE Column stats: COMPLETE Reduce Output Operator key expressions: _col0 (type: struct) sort order: + Map-reduce partition columns: UDFToInteger(_col0) (type: int) - Statistics: Num rows: 5 Data size: 940 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 1 Data size: 169 Basic stats: COMPLETE Column stats: COMPLETE value expressions: _col2 (type: string), _col3 (type: int) Execution mode: vectorized, llap LLAP IO: may be used (ACID table) @@ -1727,10 +1727,10 @@ STAGE PLANS: Select Operator expressions: KEY.reducesinkkey0 (type: struct), 1 (type: int), VALUE._col0 (type: string), VALUE._col1 (type: int) outputColumnNames: _col0, _col1, _col2, _col3 - Statistics: Num rows: 5 Data size: 940 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 1 Data size: 169 Basic stats: COMPLETE Column stats: COMPLETE File Output Operator compressed: false - Statistics: Num rows: 5 Data size: 940 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 1 Data size: 169 Basic stats: COMPLETE Column stats: COMPLETE table: input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat output format: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat @@ -1813,19 +1813,19 @@ STAGE PLANS: TableScan alias: insert_into1_n0 filterExpr: (value = 1) (type: boolean) - Statistics: Num rows: 25 Data size: 4700 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 1 Data size: 89 Basic stats: COMPLETE Column stats: COMPLETE Filter Operator predicate: (value = 1) (type: boolean) - Statistics: Num rows: 5 Data size: 940 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 1 Data size: 89 Basic stats: COMPLETE Column stats: COMPLETE Select Operator expressions: ROW__ID (type: struct), i (type: int) outputColumnNames: _col0, _col3 - Statistics: Num rows: 5 Data size: 940 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 1 Data size: 88 Basic stats: COMPLETE Column stats: COMPLETE Reduce Output Operator key expressions: _col0 (type: struct) sort order: + Map-reduce partition columns: UDFToInteger(_col0) (type: int) - Statistics: Num rows: 5 Data size: 940 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 1 Data size: 88 Basic stats: COMPLETE Column stats: COMPLETE value expressions: _col3 (type: int) Execution mode: vectorized, llap LLAP IO: may be used (ACID table) @@ -1835,10 +1835,10 @@ STAGE PLANS: Select Operator expressions: KEY.reducesinkkey0 (type: struct), 1 (type: int), null (type: string), VALUE._col0 (type: int) outputColumnNames: _col0, _col1, _col2, _col3 - Statistics: Num rows: 5 Data size: 940 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 1 Data size: 168 Basic stats: COMPLETE Column stats: COMPLETE File Output Operator compressed: false - Statistics: Num rows: 5 Data size: 940 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 1 Data size: 168 Basic stats: COMPLETE Column stats: COMPLETE table: input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat output format: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat @@ -2371,15 +2371,15 @@ STAGE PLANS: TableScan alias: t filterExpr: enforce_constraint(key is not null) (type: boolean) - Statistics: Num rows: 20 Data size: 80 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 1 Data size: 4 Basic stats: COMPLETE Column stats: COMPLETE Filter Operator predicate: enforce_constraint(key is not null) (type: boolean) - Statistics: Num rows: 10 Data size: 40 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 1 Data size: 4 Basic stats: COMPLETE Column stats: COMPLETE Reduce Output Operator key expressions: key (type: int) sort order: + Map-reduce partition columns: key (type: int) - Statistics: Num rows: 10 Data size: 40 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 1 Data size: 4 Basic stats: COMPLETE Column stats: COMPLETE Execution mode: vectorized, llap LLAP IO: may be used (ACID table) Map 5 @@ -2408,18 +2408,18 @@ STAGE PLANS: 0 key (type: int) 1 key (type: int) outputColumnNames: _col0, _col6 - Statistics: Num rows: 11 Data size: 44 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE Filter Operator predicate: _col0 is null (type: boolean) - Statistics: Num rows: 5 Data size: 20 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE Select Operator expressions: _col6 (type: int) outputColumnNames: _col0 - Statistics: Num rows: 5 Data size: 20 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 1 Data size: 94 Basic stats: COMPLETE Column stats: COMPLETE Reduce Output Operator sort order: Map-reduce partition columns: null (type: string) - Statistics: Num rows: 5 Data size: 20 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 1 Data size: 94 Basic stats: COMPLETE Column stats: COMPLETE value expressions: _col0 (type: int) Reducer 3 Execution mode: llap @@ -2427,10 +2427,10 @@ STAGE PLANS: Select Operator expressions: VALUE._col0 (type: int), 'a1' (type: string), null (type: string) outputColumnNames: _col0, _col1, _col2 - Statistics: Num rows: 5 Data size: 20 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 1 Data size: 174 Basic stats: COMPLETE Column stats: COMPLETE File Output Operator compressed: false - Statistics: Num rows: 5 Data size: 20 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 1 Data size: 174 Basic stats: COMPLETE Column stats: COMPLETE table: input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat output format: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat @@ -2440,15 +2440,15 @@ STAGE PLANS: Select Operator expressions: _col0 (type: int), 'a1' (type: string), null (type: string) outputColumnNames: key, a1, value - Statistics: Num rows: 5 Data size: 20 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 1 Data size: 174 Basic stats: COMPLETE Column stats: COMPLETE Group By Operator aggregations: compute_stats(key, 'hll'), compute_stats(a1, 'hll'), compute_stats(value, 'hll') mode: hash outputColumnNames: _col0, _col1, _col2 - Statistics: Num rows: 1 Data size: 1304 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 1 Data size: 1304 Basic stats: COMPLETE Column stats: COMPLETE Reduce Output Operator sort order: - Statistics: Num rows: 1 Data size: 1304 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 1 Data size: 1304 Basic stats: COMPLETE Column stats: COMPLETE value expressions: _col0 (type: struct), _col1 (type: struct), _col2 (type: struct) Reducer 4 Execution mode: llap @@ -2457,10 +2457,10 @@ STAGE PLANS: aggregations: compute_stats(VALUE._col0), compute_stats(VALUE._col1), compute_stats(VALUE._col2) mode: mergepartial outputColumnNames: _col0, _col1, _col2 - Statistics: Num rows: 1 Data size: 1320 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 1 Data size: 1320 Basic stats: COMPLETE Column stats: COMPLETE File Output Operator compressed: false - Statistics: Num rows: 1 Data size: 1320 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 1 Data size: 1320 Basic stats: COMPLETE Column stats: COMPLETE table: input format: org.apache.hadoop.mapred.SequenceFileInputFormat output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat @@ -2569,12 +2569,12 @@ STAGE PLANS: Map Operator Tree: TableScan alias: t - Statistics: Num rows: 20 Data size: 3760 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 1 Data size: 94 Basic stats: COMPLETE Column stats: COMPLETE Reduce Output Operator key expressions: key (type: int) sort order: + Map-reduce partition columns: key (type: int) - Statistics: Num rows: 20 Data size: 3760 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 1 Data size: 94 Basic stats: COMPLETE Column stats: COMPLETE value expressions: value (type: string), ROW__ID (type: struct) Execution mode: vectorized, llap LLAP IO: may be used (ACID table) @@ -2601,62 +2601,62 @@ STAGE PLANS: 0 key (type: int) 1 key (type: int) outputColumnNames: _col0, _col2, _col5, _col6, _col7 - Statistics: Num rows: 22 Data size: 4136 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 2 Data size: 432 Basic stats: COMPLETE Column stats: COMPLETE Filter Operator predicate: ((_col0 = _col6) and (_col6 < 3)) (type: boolean) - Statistics: Num rows: 3 Data size: 564 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 1 Data size: 261 Basic stats: COMPLETE Column stats: COMPLETE Select Operator expressions: _col5 (type: struct) outputColumnNames: _col0 - Statistics: Num rows: 3 Data size: 564 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 1 Data size: 76 Basic stats: COMPLETE Column stats: COMPLETE Reduce Output Operator key expressions: _col0 (type: struct) sort order: + Map-reduce partition columns: UDFToInteger(_col0) (type: int) - Statistics: Num rows: 3 Data size: 564 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 1 Data size: 76 Basic stats: COMPLETE Column stats: COMPLETE Filter Operator predicate: ((_col0 = _col6) and (_col6 > 3) and (_col6 >= 3) and enforce_constraint(_col0 is not null)) (type: boolean) - Statistics: Num rows: 1 Data size: 188 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 1 Data size: 261 Basic stats: COMPLETE Column stats: COMPLETE Select Operator expressions: _col5 (type: struct), _col0 (type: int), _col2 (type: string) outputColumnNames: _col0, _col1, _col3 - Statistics: Num rows: 1 Data size: 188 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 1 Data size: 256 Basic stats: COMPLETE Column stats: COMPLETE Reduce Output Operator key expressions: _col0 (type: struct) sort order: + Map-reduce partition columns: UDFToInteger(_col0) (type: int) - Statistics: Num rows: 1 Data size: 188 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 1 Data size: 256 Basic stats: COMPLETE Column stats: COMPLETE value expressions: _col1 (type: int), _col3 (type: string) Filter Operator predicate: (_col0 = _col6) (type: boolean) - Statistics: Num rows: 11 Data size: 2068 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 1 Data size: 261 Basic stats: COMPLETE Column stats: COMPLETE Select Operator expressions: _col5 (type: struct) outputColumnNames: _col5 - Statistics: Num rows: 11 Data size: 2068 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 1 Data size: 261 Basic stats: COMPLETE Column stats: COMPLETE Group By Operator aggregations: count() keys: _col5 (type: struct) mode: hash outputColumnNames: _col0, _col1 - Statistics: Num rows: 11 Data size: 2068 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 1 Data size: 84 Basic stats: COMPLETE Column stats: COMPLETE Reduce Output Operator key expressions: _col0 (type: struct) sort order: + Map-reduce partition columns: _col0 (type: struct) - Statistics: Num rows: 11 Data size: 2068 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 1 Data size: 84 Basic stats: COMPLETE Column stats: COMPLETE value expressions: _col1 (type: bigint) Filter Operator predicate: (_col0 is null and enforce_constraint(_col6 is not null)) (type: boolean) - Statistics: Num rows: 5 Data size: 940 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 1 Data size: 261 Basic stats: COMPLETE Column stats: COMPLETE Select Operator expressions: _col6 (type: int), _col7 (type: string) outputColumnNames: _col0, _col1 - Statistics: Num rows: 5 Data size: 940 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 1 Data size: 95 Basic stats: COMPLETE Column stats: COMPLETE Reduce Output Operator sort order: Map-reduce partition columns: null (type: string) - Statistics: Num rows: 5 Data size: 940 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 1 Data size: 95 Basic stats: COMPLETE Column stats: COMPLETE value expressions: _col0 (type: int), _col1 (type: string) Reducer 3 Execution mode: vectorized, llap @@ -2664,10 +2664,10 @@ STAGE PLANS: Select Operator expressions: KEY.reducesinkkey0 (type: struct) outputColumnNames: _col0 - Statistics: Num rows: 3 Data size: 564 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 1 Data size: 76 Basic stats: COMPLETE Column stats: COMPLETE File Output Operator compressed: false - Statistics: Num rows: 3 Data size: 564 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 1 Data size: 76 Basic stats: COMPLETE Column stats: COMPLETE table: input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat output format: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat @@ -2680,10 +2680,10 @@ STAGE PLANS: Select Operator expressions: KEY.reducesinkkey0 (type: struct), VALUE._col0 (type: int), 'a1' (type: string), VALUE._col1 (type: string) outputColumnNames: _col0, _col1, _col2, _col3 - Statistics: Num rows: 1 Data size: 188 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 1 Data size: 256 Basic stats: COMPLETE Column stats: COMPLETE File Output Operator compressed: false - Statistics: Num rows: 1 Data size: 188 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 1 Data size: 256 Basic stats: COMPLETE Column stats: COMPLETE table: input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat output format: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat @@ -2698,17 +2698,17 @@ STAGE PLANS: keys: KEY._col0 (type: struct) mode: mergepartial outputColumnNames: _col0, _col1 - Statistics: Num rows: 5 Data size: 940 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 1 Data size: 84 Basic stats: COMPLETE Column stats: COMPLETE Filter Operator predicate: (_col1 > 1L) (type: boolean) - Statistics: Num rows: 1 Data size: 188 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 1 Data size: 84 Basic stats: COMPLETE Column stats: COMPLETE Select Operator expressions: cardinality_violation(_col0) (type: int) outputColumnNames: _col0 - Statistics: Num rows: 1 Data size: 188 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 1 Data size: 4 Basic stats: COMPLETE Column stats: COMPLETE File Output Operator compressed: false - Statistics: Num rows: 1 Data size: 188 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 1 Data size: 4 Basic stats: COMPLETE Column stats: COMPLETE table: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat @@ -2717,19 +2717,19 @@ STAGE PLANS: Select Operator expressions: _col0 (type: int) outputColumnNames: val - Statistics: Num rows: 1 Data size: 188 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 1 Data size: 4 Basic stats: COMPLETE Column stats: COMPLETE Group By Operator aggregations: compute_stats(val, 'hll') mode: complete outputColumnNames: _col0 - Statistics: Num rows: 1 Data size: 432 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 1 Data size: 424 Basic stats: COMPLETE Column stats: COMPLETE Select Operator expressions: _col0 (type: struct) outputColumnNames: _col0 - Statistics: Num rows: 1 Data size: 432 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 1 Data size: 424 Basic stats: COMPLETE Column stats: COMPLETE File Output Operator compressed: false - Statistics: Num rows: 1 Data size: 432 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 1 Data size: 424 Basic stats: COMPLETE Column stats: COMPLETE table: input format: org.apache.hadoop.mapred.SequenceFileInputFormat output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat @@ -2740,10 +2740,10 @@ STAGE PLANS: Select Operator expressions: VALUE._col0 (type: int), VALUE._col1 (type: string), null (type: string) outputColumnNames: _col0, _col1, _col2 - Statistics: Num rows: 5 Data size: 940 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 1 Data size: 175 Basic stats: COMPLETE Column stats: COMPLETE File Output Operator compressed: false - Statistics: Num rows: 5 Data size: 940 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 1 Data size: 175 Basic stats: COMPLETE Column stats: COMPLETE table: input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat output format: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat @@ -2753,15 +2753,15 @@ STAGE PLANS: Select Operator expressions: _col0 (type: int), _col1 (type: string), null (type: string) outputColumnNames: key, a1, value - Statistics: Num rows: 5 Data size: 940 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 1 Data size: 175 Basic stats: COMPLETE Column stats: COMPLETE Group By Operator aggregations: compute_stats(key, 'hll'), compute_stats(a1, 'hll'), compute_stats(value, 'hll') mode: hash outputColumnNames: _col0, _col1, _col2 - Statistics: Num rows: 1 Data size: 1304 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 1 Data size: 1304 Basic stats: COMPLETE Column stats: COMPLETE Reduce Output Operator sort order: - Statistics: Num rows: 1 Data size: 1304 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 1 Data size: 1304 Basic stats: COMPLETE Column stats: COMPLETE value expressions: _col0 (type: struct), _col1 (type: struct), _col2 (type: struct) Reducer 7 Execution mode: llap @@ -2770,10 +2770,10 @@ STAGE PLANS: aggregations: compute_stats(VALUE._col0), compute_stats(VALUE._col1), compute_stats(VALUE._col2) mode: mergepartial outputColumnNames: _col0, _col1, _col2 - Statistics: Num rows: 1 Data size: 1320 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 1 Data size: 1320 Basic stats: COMPLETE Column stats: COMPLETE File Output Operator compressed: false - Statistics: Num rows: 1 Data size: 1320 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 1 Data size: 1320 Basic stats: COMPLETE Column stats: COMPLETE table: input format: org.apache.hadoop.mapred.SequenceFileInputFormat output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat diff --git ql/src/test/results/clientpositive/llap/insert_values_orig_table_use_metadata.q.out ql/src/test/results/clientpositive/llap/insert_values_orig_table_use_metadata.q.out index 8bd95b1937..492fe055e8 100644 --- ql/src/test/results/clientpositive/llap/insert_values_orig_table_use_metadata.q.out +++ ql/src/test/results/clientpositive/llap/insert_values_orig_table_use_metadata.q.out @@ -168,8 +168,11 @@ Retention: 0 #### A masked pattern was here #### Table Type: MANAGED_TABLE Table Parameters: + COLUMN_STATS_ACCURATE {\"BASIC_STATS\":\"true\",\"COLUMN_STATS\":{\"cbigint\":\"true\",\"cboolean1\":\"true\",\"cboolean2\":\"true\",\"cdouble\":\"true\",\"cfloat\":\"true\",\"cint\":\"true\",\"csmallint\":\"true\",\"cstring1\":\"true\",\"cstring2\":\"true\",\"ctimestamp1\":\"true\",\"ctimestamp2\":\"true\",\"ctinyint\":\"true\"}} bucketing_version 2 numFiles 1 + numRows 12288 + rawDataSize 0 totalSize 295583 transactional true transactional_properties default @@ -190,54 +193,12 @@ PREHOOK: type: QUERY POSTHOOK: query: explain select count(*) from acid_ivot POSTHOOK: type: QUERY STAGE DEPENDENCIES: - Stage-1 is a root stage - Stage-0 depends on stages: Stage-1 + Stage-0 is a root stage STAGE PLANS: - Stage: Stage-1 - Tez -#### A masked pattern was here #### - Edges: - Reducer 2 <- Map 1 (CUSTOM_SIMPLE_EDGE) -#### A masked pattern was here #### - Vertices: - Map 1 - Map Operator Tree: - TableScan - alias: acid_ivot - Statistics: Num rows: 5864 Data size: 2955830 Basic stats: COMPLETE Column stats: COMPLETE - Select Operator - Statistics: Num rows: 5864 Data size: 2955830 Basic stats: COMPLETE Column stats: COMPLETE - Group By Operator - aggregations: count() - mode: hash - outputColumnNames: _col0 - Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE - Reduce Output Operator - sort order: - Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE - value expressions: _col0 (type: bigint) - Execution mode: vectorized, llap - LLAP IO: may be used (ACID table) - Reducer 2 - Execution mode: vectorized, llap - Reduce Operator Tree: - Group By Operator - aggregations: count(VALUE._col0) - mode: mergepartial - outputColumnNames: _col0 - Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE - File Output Operator - compressed: false - Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE - table: - input format: org.apache.hadoop.mapred.SequenceFileInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - Stage: Stage-0 Fetch Operator - limit: -1 + limit: 1 Processor Tree: ListSink @@ -374,8 +335,11 @@ Retention: 0 #### A masked pattern was here #### Table Type: MANAGED_TABLE Table Parameters: + COLUMN_STATS_ACCURATE {\"BASIC_STATS\":\"true\",\"COLUMN_STATS\":{\"cbigint\":\"true\",\"cboolean1\":\"true\",\"cboolean2\":\"true\",\"cdouble\":\"true\",\"cfloat\":\"true\",\"cint\":\"true\",\"csmallint\":\"true\",\"cstring1\":\"true\",\"cstring2\":\"true\",\"ctimestamp1\":\"true\",\"ctimestamp2\":\"true\",\"ctinyint\":\"true\"}} bucketing_version 2 numFiles 1 + numRows 2 + rawDataSize 0 totalSize 1663 transactional true transactional_properties default @@ -396,54 +360,12 @@ PREHOOK: type: QUERY POSTHOOK: query: explain select count(*) from acid_ivot POSTHOOK: type: QUERY STAGE DEPENDENCIES: - Stage-1 is a root stage - Stage-0 depends on stages: Stage-1 + Stage-0 is a root stage STAGE PLANS: - Stage: Stage-1 - Tez -#### A masked pattern was here #### - Edges: - Reducer 2 <- Map 1 (CUSTOM_SIMPLE_EDGE) -#### A masked pattern was here #### - Vertices: - Map 1 - Map Operator Tree: - TableScan - alias: acid_ivot - Statistics: Num rows: 32 Data size: 16630 Basic stats: COMPLETE Column stats: COMPLETE - Select Operator - Statistics: Num rows: 32 Data size: 16630 Basic stats: COMPLETE Column stats: COMPLETE - Group By Operator - aggregations: count() - mode: hash - outputColumnNames: _col0 - Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE - Reduce Output Operator - sort order: - Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE - value expressions: _col0 (type: bigint) - Execution mode: vectorized, llap - LLAP IO: may be used (ACID table) - Reducer 2 - Execution mode: vectorized, llap - Reduce Operator Tree: - Group By Operator - aggregations: count(VALUE._col0) - mode: mergepartial - outputColumnNames: _col0 - Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE - File Output Operator - compressed: false - Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE - table: - input format: org.apache.hadoop.mapred.SequenceFileInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - Stage: Stage-0 Fetch Operator - limit: -1 + limit: 1 Processor Tree: ListSink @@ -507,8 +429,11 @@ Retention: 0 #### A masked pattern was here #### Table Type: MANAGED_TABLE Table Parameters: + COLUMN_STATS_ACCURATE {\"BASIC_STATS\":\"true\",\"COLUMN_STATS\":{\"cbigint\":\"true\",\"cboolean1\":\"true\",\"cboolean2\":\"true\",\"cdouble\":\"true\",\"cfloat\":\"true\",\"cint\":\"true\",\"csmallint\":\"true\",\"cstring1\":\"true\",\"cstring2\":\"true\",\"ctimestamp1\":\"true\",\"ctimestamp2\":\"true\",\"ctinyint\":\"true\"}} bucketing_version 2 numFiles 2 + numRows 4 + rawDataSize 0 totalSize 3326 transactional true transactional_properties default @@ -529,54 +454,12 @@ PREHOOK: type: QUERY POSTHOOK: query: explain select count(*) from acid_ivot POSTHOOK: type: QUERY STAGE DEPENDENCIES: - Stage-1 is a root stage - Stage-0 depends on stages: Stage-1 + Stage-0 is a root stage STAGE PLANS: - Stage: Stage-1 - Tez -#### A masked pattern was here #### - Edges: - Reducer 2 <- Map 1 (CUSTOM_SIMPLE_EDGE) -#### A masked pattern was here #### - Vertices: - Map 1 - Map Operator Tree: - TableScan - alias: acid_ivot - Statistics: Num rows: 65 Data size: 33260 Basic stats: COMPLETE Column stats: COMPLETE - Select Operator - Statistics: Num rows: 65 Data size: 33260 Basic stats: COMPLETE Column stats: COMPLETE - Group By Operator - aggregations: count() - mode: hash - outputColumnNames: _col0 - Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE - Reduce Output Operator - sort order: - Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE - value expressions: _col0 (type: bigint) - Execution mode: vectorized, llap - LLAP IO: may be used (ACID table) - Reducer 2 - Execution mode: vectorized, llap - Reduce Operator Tree: - Group By Operator - aggregations: count(VALUE._col0) - mode: mergepartial - outputColumnNames: _col0 - Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE - File Output Operator - compressed: false - Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE - table: - input format: org.apache.hadoop.mapred.SequenceFileInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - Stage: Stage-0 Fetch Operator - limit: -1 + limit: 1 Processor Tree: ListSink @@ -636,8 +519,11 @@ Retention: 0 #### A masked pattern was here #### Table Type: MANAGED_TABLE Table Parameters: + COLUMN_STATS_ACCURATE {\"BASIC_STATS\":\"true\",\"COLUMN_STATS\":{\"cbigint\":\"true\",\"cboolean1\":\"true\",\"cboolean2\":\"true\",\"cdouble\":\"true\",\"cfloat\":\"true\",\"cint\":\"true\",\"csmallint\":\"true\",\"cstring1\":\"true\",\"cstring2\":\"true\",\"ctimestamp1\":\"true\",\"ctimestamp2\":\"true\",\"ctinyint\":\"true\"}} bucketing_version 2 numFiles 3 + numRows 12292 + rawDataSize 0 totalSize 298909 transactional true transactional_properties default @@ -658,54 +544,12 @@ PREHOOK: type: QUERY POSTHOOK: query: explain select count(*) from acid_ivot POSTHOOK: type: QUERY STAGE DEPENDENCIES: - Stage-1 is a root stage - Stage-0 depends on stages: Stage-1 + Stage-0 is a root stage STAGE PLANS: - Stage: Stage-1 - Tez -#### A masked pattern was here #### - Edges: - Reducer 2 <- Map 1 (CUSTOM_SIMPLE_EDGE) -#### A masked pattern was here #### - Vertices: - Map 1 - Map Operator Tree: - TableScan - alias: acid_ivot - Statistics: Num rows: 5930 Data size: 2989090 Basic stats: COMPLETE Column stats: COMPLETE - Select Operator - Statistics: Num rows: 5930 Data size: 2989090 Basic stats: COMPLETE Column stats: COMPLETE - Group By Operator - aggregations: count() - mode: hash - outputColumnNames: _col0 - Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE - Reduce Output Operator - sort order: - Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE - value expressions: _col0 (type: bigint) - Execution mode: vectorized, llap - LLAP IO: may be used (ACID table) - Reducer 2 - Execution mode: vectorized, llap - Reduce Operator Tree: - Group By Operator - aggregations: count(VALUE._col0) - mode: mergepartial - outputColumnNames: _col0 - Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE - File Output Operator - compressed: false - Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE - table: - input format: org.apache.hadoop.mapred.SequenceFileInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - Stage: Stage-0 Fetch Operator - limit: -1 + limit: 1 Processor Tree: ListSink diff --git ql/src/test/results/clientpositive/llap/materialized_view_create_rewrite_3.q.out ql/src/test/results/clientpositive/llap/materialized_view_create_rewrite_3.q.out index 60278049b5..0628ad84cd 100644 --- ql/src/test/results/clientpositive/llap/materialized_view_create_rewrite_3.q.out +++ ql/src/test/results/clientpositive/llap/materialized_view_create_rewrite_3.q.out @@ -759,19 +759,19 @@ STAGE PLANS: TableScan alias: cmv_basetable_2 filterExpr: ((c > 10.1) and a is not null) (type: boolean) - Statistics: Num rows: 46 Data size: 5336 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 1 Data size: 116 Basic stats: COMPLETE Column stats: COMPLETE Filter Operator predicate: ((c > 10.1) and a is not null) (type: boolean) - Statistics: Num rows: 15 Data size: 1740 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 1 Data size: 116 Basic stats: COMPLETE Column stats: COMPLETE Select Operator expressions: a (type: int), c (type: decimal(10,2)) outputColumnNames: _col0, _col1 - Statistics: Num rows: 15 Data size: 1740 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 1 Data size: 116 Basic stats: COMPLETE Column stats: COMPLETE Reduce Output Operator key expressions: _col0 (type: int) sort order: + Map-reduce partition columns: _col0 (type: int) - Statistics: Num rows: 15 Data size: 1740 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 1 Data size: 116 Basic stats: COMPLETE Column stats: COMPLETE value expressions: _col1 (type: decimal(10,2)) Execution mode: llap LLAP IO: may be used (ACID table) @@ -785,17 +785,17 @@ STAGE PLANS: 0 _col0 (type: int) 1 _col0 (type: int) outputColumnNames: _col0, _col2 - Statistics: Num rows: 25 Data size: 2900 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 5 Data size: 580 Basic stats: COMPLETE Column stats: COMPLETE Group By Operator keys: _col0 (type: int), _col2 (type: decimal(10,2)) mode: hash outputColumnNames: _col0, _col1 - Statistics: Num rows: 1 Data size: 116 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 2 Data size: 232 Basic stats: COMPLETE Column stats: COMPLETE Reduce Output Operator key expressions: _col0 (type: int), _col1 (type: decimal(10,2)) sort order: ++ Map-reduce partition columns: _col0 (type: int), _col1 (type: decimal(10,2)) - Statistics: Num rows: 1 Data size: 116 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 2 Data size: 232 Basic stats: COMPLETE Column stats: COMPLETE Reducer 3 Execution mode: llap Reduce Operator Tree: @@ -803,14 +803,14 @@ STAGE PLANS: keys: KEY._col0 (type: int), KEY._col1 (type: decimal(10,2)) mode: mergepartial outputColumnNames: _col0, _col1 - Statistics: Num rows: 1 Data size: 116 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 2 Data size: 232 Basic stats: COMPLETE Column stats: COMPLETE Select Operator expressions: _col0 (type: int) outputColumnNames: _col0 - Statistics: Num rows: 1 Data size: 4 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 2 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE File Output Operator compressed: false - Statistics: Num rows: 1 Data size: 4 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 2 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE table: input format: org.apache.hadoop.mapred.SequenceFileInputFormat output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat @@ -1041,19 +1041,19 @@ STAGE PLANS: TableScan alias: cmv_basetable_2 filterExpr: ((c > 10.1) and a is not null) (type: boolean) - Statistics: Num rows: 46 Data size: 5336 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 1 Data size: 116 Basic stats: COMPLETE Column stats: COMPLETE Filter Operator predicate: ((c > 10.1) and a is not null) (type: boolean) - Statistics: Num rows: 15 Data size: 1740 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 1 Data size: 116 Basic stats: COMPLETE Column stats: COMPLETE Select Operator expressions: a (type: int), c (type: decimal(10,2)) outputColumnNames: _col0, _col1 - Statistics: Num rows: 15 Data size: 1740 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 1 Data size: 116 Basic stats: COMPLETE Column stats: COMPLETE Reduce Output Operator key expressions: _col0 (type: int) sort order: + Map-reduce partition columns: _col0 (type: int) - Statistics: Num rows: 15 Data size: 1740 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 1 Data size: 116 Basic stats: COMPLETE Column stats: COMPLETE value expressions: _col1 (type: decimal(10,2)) Execution mode: llap LLAP IO: may be used (ACID table) @@ -1067,17 +1067,17 @@ STAGE PLANS: 0 _col0 (type: int) 1 _col0 (type: int) outputColumnNames: _col0, _col2 - Statistics: Num rows: 25 Data size: 2900 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 5 Data size: 580 Basic stats: COMPLETE Column stats: COMPLETE Group By Operator keys: _col0 (type: int), _col2 (type: decimal(10,2)) mode: hash outputColumnNames: _col0, _col1 - Statistics: Num rows: 1 Data size: 116 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 2 Data size: 232 Basic stats: COMPLETE Column stats: COMPLETE Reduce Output Operator key expressions: _col0 (type: int), _col1 (type: decimal(10,2)) sort order: ++ Map-reduce partition columns: _col0 (type: int), _col1 (type: decimal(10,2)) - Statistics: Num rows: 1 Data size: 116 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 2 Data size: 232 Basic stats: COMPLETE Column stats: COMPLETE Reducer 3 Execution mode: llap Reduce Operator Tree: @@ -1085,14 +1085,14 @@ STAGE PLANS: keys: KEY._col0 (type: int), KEY._col1 (type: decimal(10,2)) mode: mergepartial outputColumnNames: _col0, _col1 - Statistics: Num rows: 1 Data size: 116 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 2 Data size: 232 Basic stats: COMPLETE Column stats: COMPLETE Select Operator expressions: _col0 (type: int) outputColumnNames: _col0 - Statistics: Num rows: 1 Data size: 4 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 2 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE File Output Operator compressed: false - Statistics: Num rows: 1 Data size: 4 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 2 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE table: input format: org.apache.hadoop.mapred.SequenceFileInputFormat output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat diff --git ql/src/test/results/clientpositive/llap/materialized_view_create_rewrite_4.q.out ql/src/test/results/clientpositive/llap/materialized_view_create_rewrite_4.q.out index b3fd29adfa..a80463785d 100644 --- ql/src/test/results/clientpositive/llap/materialized_view_create_rewrite_4.q.out +++ ql/src/test/results/clientpositive/llap/materialized_view_create_rewrite_4.q.out @@ -945,8 +945,11 @@ Retention: 0 #### A masked pattern was here #### Table Type: MATERIALIZED_VIEW Table Parameters: + COLUMN_STATS_ACCURATE {\"BASIC_STATS\":\"true\"} bucketing_version 2 numFiles 3 + numRows 3 + rawDataSize 248 totalSize 1508 transactional true transactional_properties default @@ -1077,19 +1080,19 @@ STAGE PLANS: TableScan alias: cmv_basetable_2_n2 filterExpr: ((c > 10) and a is not null) (type: boolean) - Statistics: Num rows: 61 Data size: 7320 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 3 Data size: 360 Basic stats: COMPLETE Column stats: COMPLETE Filter Operator predicate: ((c > 10) and a is not null) (type: boolean) - Statistics: Num rows: 20 Data size: 2400 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 1 Data size: 120 Basic stats: COMPLETE Column stats: COMPLETE Select Operator expressions: a (type: int), c (type: decimal(10,2)), d (type: int) outputColumnNames: _col0, _col1, _col2 - Statistics: Num rows: 20 Data size: 2400 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 1 Data size: 120 Basic stats: COMPLETE Column stats: COMPLETE Reduce Output Operator key expressions: _col0 (type: int) sort order: + Map-reduce partition columns: _col0 (type: int) - Statistics: Num rows: 20 Data size: 2400 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 1 Data size: 120 Basic stats: COMPLETE Column stats: COMPLETE value expressions: _col1 (type: decimal(10,2)), _col2 (type: int) Execution mode: llap LLAP IO: may be used (ACID table) @@ -1103,7 +1106,7 @@ STAGE PLANS: 0 _col0 (type: int) 1 _col0 (type: int) outputColumnNames: _col0, _col2, _col3 - Statistics: Num rows: 33 Data size: 3960 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 2 Data size: 240 Basic stats: COMPLETE Column stats: COMPLETE Group By Operator aggregations: sum(_col3) keys: _col0 (type: int), _col2 (type: decimal(10,2)) @@ -1299,19 +1302,19 @@ STAGE PLANS: TableScan alias: cmv_basetable_2_n2 filterExpr: ((c > 10) and a is not null) (type: boolean) - Statistics: Num rows: 75 Data size: 9000 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 2 Data size: 240 Basic stats: COMPLETE Column stats: COMPLETE Filter Operator predicate: ((c > 10) and a is not null) (type: boolean) - Statistics: Num rows: 25 Data size: 3000 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 1 Data size: 120 Basic stats: COMPLETE Column stats: COMPLETE Select Operator expressions: a (type: int), c (type: decimal(10,2)), d (type: int) outputColumnNames: _col0, _col1, _col2 - Statistics: Num rows: 25 Data size: 3000 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 1 Data size: 120 Basic stats: COMPLETE Column stats: COMPLETE Reduce Output Operator key expressions: _col0 (type: int) sort order: + Map-reduce partition columns: _col0 (type: int) - Statistics: Num rows: 25 Data size: 3000 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 1 Data size: 120 Basic stats: COMPLETE Column stats: COMPLETE value expressions: _col1 (type: decimal(10,2)), _col2 (type: int) Execution mode: llap LLAP IO: may be used (ACID table) @@ -1325,7 +1328,7 @@ STAGE PLANS: 0 _col0 (type: int) 1 _col0 (type: int) outputColumnNames: _col0, _col2, _col3 - Statistics: Num rows: 41 Data size: 4920 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 3 Data size: 360 Basic stats: COMPLETE Column stats: COMPLETE Group By Operator aggregations: sum(_col3) keys: _col0 (type: int), _col2 (type: decimal(10,2)) @@ -1552,19 +1555,19 @@ STAGE PLANS: TableScan alias: cmv_basetable_2_n2 filterExpr: ((c > 10) and (ROW__ID.writeid > 4) and a is not null) (type: boolean) - Statistics: Num rows: 91 Data size: 10920 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 3 Data size: 360 Basic stats: COMPLETE Column stats: COMPLETE Filter Operator predicate: ((ROW__ID.writeid > 4) and (c > 10) and a is not null) (type: boolean) - Statistics: Num rows: 10 Data size: 1200 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 1 Data size: 120 Basic stats: COMPLETE Column stats: COMPLETE Select Operator expressions: a (type: int), c (type: decimal(10,2)), d (type: int) outputColumnNames: _col0, _col1, _col2 - Statistics: Num rows: 10 Data size: 1960 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 1 Data size: 196 Basic stats: COMPLETE Column stats: COMPLETE Reduce Output Operator key expressions: _col0 (type: int) sort order: + Map-reduce partition columns: _col0 (type: int) - Statistics: Num rows: 10 Data size: 1960 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 1 Data size: 196 Basic stats: COMPLETE Column stats: COMPLETE value expressions: _col1 (type: decimal(10,2)), _col2 (type: int) Execution mode: llap LLAP IO: may be used (ACID table) @@ -1662,7 +1665,7 @@ STAGE PLANS: 0 _col0 (type: int) 1 _col0 (type: int) outputColumnNames: _col0, _col2, _col3 - Statistics: Num rows: 16 Data size: 1920 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 2 Data size: 240 Basic stats: COMPLETE Column stats: COMPLETE Group By Operator aggregations: sum(_col3) keys: _col0 (type: int), _col2 (type: decimal(10,2)) diff --git ql/src/test/results/clientpositive/llap/materialized_view_create_rewrite_5.q.out ql/src/test/results/clientpositive/llap/materialized_view_create_rewrite_5.q.out index 9abdcbb2dc..fae47575b5 100644 --- ql/src/test/results/clientpositive/llap/materialized_view_create_rewrite_5.q.out +++ ql/src/test/results/clientpositive/llap/materialized_view_create_rewrite_5.q.out @@ -402,8 +402,11 @@ Retention: 0 #### A masked pattern was here #### Table Type: MATERIALIZED_VIEW Table Parameters: + COLUMN_STATS_ACCURATE {\"BASIC_STATS\":\"true\",\"COLUMN_STATS\":{\"a\":\"true\",\"c\":\"true\"}} bucketing_version 2 numFiles 2 + numRows 5 + rawDataSize 348 totalSize 1071 transactional true transactional_properties default @@ -530,19 +533,19 @@ STAGE PLANS: TableScan alias: cmv_basetable_2_n3 filterExpr: ((c > 10) and a is not null) (type: boolean) - Statistics: Num rows: 61 Data size: 7076 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 3 Data size: 348 Basic stats: COMPLETE Column stats: COMPLETE Filter Operator predicate: ((c > 10) and a is not null) (type: boolean) - Statistics: Num rows: 20 Data size: 2320 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 1 Data size: 116 Basic stats: COMPLETE Column stats: COMPLETE Select Operator expressions: a (type: int), c (type: decimal(10,2)) outputColumnNames: _col0, _col1 - Statistics: Num rows: 20 Data size: 2320 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 1 Data size: 116 Basic stats: COMPLETE Column stats: COMPLETE Reduce Output Operator key expressions: _col0 (type: int) sort order: + Map-reduce partition columns: _col0 (type: int) - Statistics: Num rows: 20 Data size: 2320 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 1 Data size: 116 Basic stats: COMPLETE Column stats: COMPLETE value expressions: _col1 (type: decimal(10,2)) Execution mode: llap LLAP IO: may be used (ACID table) @@ -556,14 +559,14 @@ STAGE PLANS: 0 _col0 (type: int) 1 _col0 (type: int) outputColumnNames: _col0, _col2 - Statistics: Num rows: 33 Data size: 3828 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 2 Data size: 232 Basic stats: COMPLETE Column stats: COMPLETE Select Operator expressions: _col0 (type: int), _col2 (type: decimal(10,2)) outputColumnNames: _col0, _col1 - Statistics: Num rows: 33 Data size: 3828 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 2 Data size: 232 Basic stats: COMPLETE Column stats: COMPLETE File Output Operator compressed: false - Statistics: Num rows: 33 Data size: 3828 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 2 Data size: 232 Basic stats: COMPLETE Column stats: COMPLETE table: input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat output format: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat @@ -573,7 +576,7 @@ STAGE PLANS: Select Operator expressions: _col0 (type: int), _col1 (type: decimal(10,2)) outputColumnNames: a, c - Statistics: Num rows: 33 Data size: 3828 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 2 Data size: 232 Basic stats: COMPLETE Column stats: COMPLETE Group By Operator aggregations: compute_stats(a, 'hll'), compute_stats(c, 'hll') mode: hash @@ -741,19 +744,19 @@ STAGE PLANS: TableScan alias: cmv_basetable_2_n3 filterExpr: ((c > 10) and a is not null) (type: boolean) - Statistics: Num rows: 75 Data size: 8700 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 2 Data size: 232 Basic stats: COMPLETE Column stats: COMPLETE Filter Operator predicate: ((c > 10) and a is not null) (type: boolean) - Statistics: Num rows: 25 Data size: 2900 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 1 Data size: 116 Basic stats: COMPLETE Column stats: COMPLETE Select Operator expressions: a (type: int), c (type: decimal(10,2)) outputColumnNames: _col0, _col1 - Statistics: Num rows: 25 Data size: 2900 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 1 Data size: 116 Basic stats: COMPLETE Column stats: COMPLETE Reduce Output Operator key expressions: _col0 (type: int) sort order: + Map-reduce partition columns: _col0 (type: int) - Statistics: Num rows: 25 Data size: 2900 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 1 Data size: 116 Basic stats: COMPLETE Column stats: COMPLETE value expressions: _col1 (type: decimal(10,2)) Execution mode: llap LLAP IO: may be used (ACID table) @@ -767,14 +770,14 @@ STAGE PLANS: 0 _col0 (type: int) 1 _col0 (type: int) outputColumnNames: _col0, _col2 - Statistics: Num rows: 41 Data size: 4756 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 3 Data size: 348 Basic stats: COMPLETE Column stats: COMPLETE Select Operator expressions: _col0 (type: int), _col2 (type: decimal(10,2)) outputColumnNames: _col0, _col1 - Statistics: Num rows: 41 Data size: 4756 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 3 Data size: 348 Basic stats: COMPLETE Column stats: COMPLETE File Output Operator compressed: false - Statistics: Num rows: 41 Data size: 4756 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 3 Data size: 348 Basic stats: COMPLETE Column stats: COMPLETE table: input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat output format: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat @@ -784,7 +787,7 @@ STAGE PLANS: Select Operator expressions: _col0 (type: int), _col1 (type: decimal(10,2)) outputColumnNames: a, c - Statistics: Num rows: 41 Data size: 4756 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 3 Data size: 348 Basic stats: COMPLETE Column stats: COMPLETE Group By Operator aggregations: compute_stats(a, 'hll'), compute_stats(c, 'hll') mode: hash @@ -956,19 +959,19 @@ STAGE PLANS: TableScan alias: cmv_basetable_2_n3 filterExpr: ((c > 10) and (ROW__ID.writeid > 4) and a is not null) (type: boolean) - Statistics: Num rows: 91 Data size: 10556 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 3 Data size: 348 Basic stats: COMPLETE Column stats: COMPLETE Filter Operator predicate: ((ROW__ID.writeid > 4) and (c > 10) and a is not null) (type: boolean) - Statistics: Num rows: 10 Data size: 1160 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 1 Data size: 116 Basic stats: COMPLETE Column stats: COMPLETE Select Operator expressions: a (type: int), c (type: decimal(10,2)) outputColumnNames: _col0, _col1 - Statistics: Num rows: 10 Data size: 1920 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 1 Data size: 192 Basic stats: COMPLETE Column stats: COMPLETE Reduce Output Operator key expressions: _col0 (type: int) sort order: + Map-reduce partition columns: _col0 (type: int) - Statistics: Num rows: 10 Data size: 1920 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 1 Data size: 192 Basic stats: COMPLETE Column stats: COMPLETE value expressions: _col1 (type: decimal(10,2)) Execution mode: llap LLAP IO: may be used (ACID table) @@ -982,14 +985,14 @@ STAGE PLANS: 0 _col0 (type: int) 1 _col0 (type: int) outputColumnNames: _col0, _col2 - Statistics: Num rows: 16 Data size: 1856 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 2 Data size: 232 Basic stats: COMPLETE Column stats: COMPLETE Select Operator expressions: _col0 (type: int), _col2 (type: decimal(10,2)) outputColumnNames: _col0, _col1 - Statistics: Num rows: 16 Data size: 1856 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 2 Data size: 232 Basic stats: COMPLETE Column stats: COMPLETE File Output Operator compressed: false - Statistics: Num rows: 16 Data size: 1856 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 2 Data size: 232 Basic stats: COMPLETE Column stats: COMPLETE table: input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat output format: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat @@ -999,7 +1002,7 @@ STAGE PLANS: Select Operator expressions: _col0 (type: int), _col1 (type: decimal(10,2)) outputColumnNames: a, c - Statistics: Num rows: 16 Data size: 1856 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 2 Data size: 232 Basic stats: COMPLETE Column stats: COMPLETE Group By Operator aggregations: compute_stats(a, 'hll'), compute_stats(c, 'hll') mode: hash diff --git ql/src/test/results/clientpositive/llap/materialized_view_create_rewrite_rebuild_dummy.q.out ql/src/test/results/clientpositive/llap/materialized_view_create_rewrite_rebuild_dummy.q.out index 210db2cd09..fe54771bfd 100644 --- ql/src/test/results/clientpositive/llap/materialized_view_create_rewrite_rebuild_dummy.q.out +++ ql/src/test/results/clientpositive/llap/materialized_view_create_rewrite_rebuild_dummy.q.out @@ -759,19 +759,19 @@ STAGE PLANS: TableScan alias: cmv_basetable_2_n0 filterExpr: ((c > 10.1) and a is not null) (type: boolean) - Statistics: Num rows: 46 Data size: 5336 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 1 Data size: 116 Basic stats: COMPLETE Column stats: COMPLETE Filter Operator predicate: ((c > 10.1) and a is not null) (type: boolean) - Statistics: Num rows: 15 Data size: 1740 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 1 Data size: 116 Basic stats: COMPLETE Column stats: COMPLETE Select Operator expressions: a (type: int), c (type: decimal(10,2)) outputColumnNames: _col0, _col1 - Statistics: Num rows: 15 Data size: 1740 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 1 Data size: 116 Basic stats: COMPLETE Column stats: COMPLETE Reduce Output Operator key expressions: _col0 (type: int) sort order: + Map-reduce partition columns: _col0 (type: int) - Statistics: Num rows: 15 Data size: 1740 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 1 Data size: 116 Basic stats: COMPLETE Column stats: COMPLETE value expressions: _col1 (type: decimal(10,2)) Execution mode: llap LLAP IO: may be used (ACID table) @@ -785,17 +785,17 @@ STAGE PLANS: 0 _col0 (type: int) 1 _col0 (type: int) outputColumnNames: _col0, _col2 - Statistics: Num rows: 25 Data size: 2900 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 5 Data size: 580 Basic stats: COMPLETE Column stats: COMPLETE Group By Operator keys: _col0 (type: int), _col2 (type: decimal(10,2)) mode: hash outputColumnNames: _col0, _col1 - Statistics: Num rows: 1 Data size: 116 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 2 Data size: 232 Basic stats: COMPLETE Column stats: COMPLETE Reduce Output Operator key expressions: _col0 (type: int), _col1 (type: decimal(10,2)) sort order: ++ Map-reduce partition columns: _col0 (type: int), _col1 (type: decimal(10,2)) - Statistics: Num rows: 1 Data size: 116 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 2 Data size: 232 Basic stats: COMPLETE Column stats: COMPLETE Reducer 3 Execution mode: llap Reduce Operator Tree: @@ -803,14 +803,14 @@ STAGE PLANS: keys: KEY._col0 (type: int), KEY._col1 (type: decimal(10,2)) mode: mergepartial outputColumnNames: _col0, _col1 - Statistics: Num rows: 1 Data size: 116 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 2 Data size: 232 Basic stats: COMPLETE Column stats: COMPLETE Select Operator expressions: _col0 (type: int) outputColumnNames: _col0 - Statistics: Num rows: 1 Data size: 4 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 2 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE File Output Operator compressed: false - Statistics: Num rows: 1 Data size: 4 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 2 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE table: input format: org.apache.hadoop.mapred.SequenceFileInputFormat output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat @@ -1041,19 +1041,19 @@ STAGE PLANS: TableScan alias: cmv_basetable_2_n0 filterExpr: ((c > 10.1) and a is not null) (type: boolean) - Statistics: Num rows: 46 Data size: 5336 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 1 Data size: 116 Basic stats: COMPLETE Column stats: COMPLETE Filter Operator predicate: ((c > 10.1) and a is not null) (type: boolean) - Statistics: Num rows: 15 Data size: 1740 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 1 Data size: 116 Basic stats: COMPLETE Column stats: COMPLETE Select Operator expressions: a (type: int), c (type: decimal(10,2)) outputColumnNames: _col0, _col1 - Statistics: Num rows: 15 Data size: 1740 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 1 Data size: 116 Basic stats: COMPLETE Column stats: COMPLETE Reduce Output Operator key expressions: _col0 (type: int) sort order: + Map-reduce partition columns: _col0 (type: int) - Statistics: Num rows: 15 Data size: 1740 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 1 Data size: 116 Basic stats: COMPLETE Column stats: COMPLETE value expressions: _col1 (type: decimal(10,2)) Execution mode: llap LLAP IO: may be used (ACID table) @@ -1067,17 +1067,17 @@ STAGE PLANS: 0 _col0 (type: int) 1 _col0 (type: int) outputColumnNames: _col0, _col2 - Statistics: Num rows: 25 Data size: 2900 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 5 Data size: 580 Basic stats: COMPLETE Column stats: COMPLETE Group By Operator keys: _col0 (type: int), _col2 (type: decimal(10,2)) mode: hash outputColumnNames: _col0, _col1 - Statistics: Num rows: 1 Data size: 116 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 2 Data size: 232 Basic stats: COMPLETE Column stats: COMPLETE Reduce Output Operator key expressions: _col0 (type: int), _col1 (type: decimal(10,2)) sort order: ++ Map-reduce partition columns: _col0 (type: int), _col1 (type: decimal(10,2)) - Statistics: Num rows: 1 Data size: 116 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 2 Data size: 232 Basic stats: COMPLETE Column stats: COMPLETE Reducer 3 Execution mode: llap Reduce Operator Tree: @@ -1085,14 +1085,14 @@ STAGE PLANS: keys: KEY._col0 (type: int), KEY._col1 (type: decimal(10,2)) mode: mergepartial outputColumnNames: _col0, _col1 - Statistics: Num rows: 1 Data size: 116 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 2 Data size: 232 Basic stats: COMPLETE Column stats: COMPLETE Select Operator expressions: _col0 (type: int) outputColumnNames: _col0 - Statistics: Num rows: 1 Data size: 4 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 2 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE File Output Operator compressed: false - Statistics: Num rows: 1 Data size: 4 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 2 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE table: input format: org.apache.hadoop.mapred.SequenceFileInputFormat output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat diff --git ql/src/test/results/clientpositive/llap/mm_all.q.out ql/src/test/results/clientpositive/llap/mm_all.q.out index 95734b6b4f..500c7fa71f 100644 --- ql/src/test/results/clientpositive/llap/mm_all.q.out +++ ql/src/test/results/clientpositive/llap/mm_all.q.out @@ -1815,6 +1815,7 @@ Retention: 0 #### A masked pattern was here #### Table Type: MANAGED_TABLE Table Parameters: + COLUMN_STATS_ACCURATE {\"BASIC_STATS\":\"true\",\"COLUMN_STATS\":{\"key\":\"true\"}} bucketing_version 2 numFiles 3 numRows 6 @@ -1865,6 +1866,7 @@ Retention: 0 #### A masked pattern was here #### Table Type: MANAGED_TABLE Table Parameters: + COLUMN_STATS_ACCURATE {\"BASIC_STATS\":\"true\",\"COLUMN_STATS\":{\"key\":\"true\"}} bucketing_version 2 numFiles 6 numRows 12 @@ -1923,7 +1925,7 @@ Retention: 0 #### A masked pattern was here #### Table Type: MANAGED_TABLE Table Parameters: - COLUMN_STATS_ACCURATE {} + COLUMN_STATS_ACCURATE {\"BASIC_STATS\":\"true\"} bucketing_version 2 numFiles 55 numRows 500 diff --git ql/src/test/results/clientpositive/llap/mm_exim.q.out ql/src/test/results/clientpositive/llap/mm_exim.q.out index 37d3952d37..ee6cf06ea8 100644 --- ql/src/test/results/clientpositive/llap/mm_exim.q.out +++ ql/src/test/results/clientpositive/llap/mm_exim.q.out @@ -386,6 +386,7 @@ Retention: 0 #### A masked pattern was here #### Table Type: MANAGED_TABLE Table Parameters: + COLUMN_STATS_ACCURATE {\"BASIC_STATS\":\"true\"} bucketing_version 2 numFiles 3 numPartitions 3 diff --git ql/src/test/results/clientpositive/llap/results_cache_invalidation.q.out ql/src/test/results/clientpositive/llap/results_cache_invalidation.q.out index 7efb50ac5f..d4b55bbf90 100644 --- ql/src/test/results/clientpositive/llap/results_cache_invalidation.q.out +++ ql/src/test/results/clientpositive/llap/results_cache_invalidation.q.out @@ -58,20 +58,20 @@ STAGE PLANS: TableScan alias: a filterExpr: (UDFToDouble(key) >= 0.0D) (type: boolean) - Statistics: Num rows: 91 Data size: 16192 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 500 Data size: 43500 Basic stats: COMPLETE Column stats: COMPLETE Filter Operator predicate: (UDFToDouble(key) >= 0.0D) (type: boolean) - Statistics: Num rows: 30 Data size: 5338 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 166 Data size: 14442 Basic stats: COMPLETE Column stats: COMPLETE Select Operator - Statistics: Num rows: 30 Data size: 5338 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 166 Data size: 14442 Basic stats: COMPLETE Column stats: COMPLETE Group By Operator aggregations: count() mode: hash outputColumnNames: _col0 - Statistics: Num rows: 1 Data size: 192 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE Reduce Output Operator sort order: - Statistics: Num rows: 1 Data size: 192 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE value expressions: _col0 (type: bigint) Execution mode: vectorized, llap LLAP IO: may be used (ACID table) @@ -82,10 +82,10 @@ STAGE PLANS: aggregations: count(VALUE._col0) mode: mergepartial outputColumnNames: _col0 - Statistics: Num rows: 1 Data size: 192 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE File Output Operator compressed: false - Statistics: Num rows: 1 Data size: 192 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE table: input format: org.apache.hadoop.mapred.SequenceFileInputFormat output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat @@ -128,19 +128,19 @@ STAGE PLANS: Map Operator Tree: TableScan alias: tab2_n5 - Statistics: Num rows: 91 Data size: 16192 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 500 Data size: 43500 Basic stats: COMPLETE Column stats: COMPLETE Select Operator expressions: key (type: string) outputColumnNames: key - Statistics: Num rows: 91 Data size: 16192 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 500 Data size: 43500 Basic stats: COMPLETE Column stats: COMPLETE Group By Operator aggregations: max(key) mode: hash outputColumnNames: _col0 - Statistics: Num rows: 1 Data size: 368 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 1 Data size: 184 Basic stats: COMPLETE Column stats: COMPLETE Reduce Output Operator sort order: - Statistics: Num rows: 1 Data size: 368 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 1 Data size: 184 Basic stats: COMPLETE Column stats: COMPLETE value expressions: _col0 (type: string) Execution mode: vectorized, llap LLAP IO: may be used (ACID table) @@ -151,10 +151,10 @@ STAGE PLANS: aggregations: max(VALUE._col0) mode: mergepartial outputColumnNames: _col0 - Statistics: Num rows: 1 Data size: 368 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 1 Data size: 184 Basic stats: COMPLETE Column stats: COMPLETE File Output Operator compressed: false - Statistics: Num rows: 1 Data size: 368 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 1 Data size: 184 Basic stats: COMPLETE Column stats: COMPLETE table: input format: org.apache.hadoop.mapred.SequenceFileInputFormat output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat @@ -199,19 +199,19 @@ STAGE PLANS: TableScan alias: tab1_n6 filterExpr: key is not null (type: boolean) - Statistics: Num rows: 91 Data size: 16192 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 500 Data size: 43500 Basic stats: COMPLETE Column stats: COMPLETE Filter Operator predicate: key is not null (type: boolean) - Statistics: Num rows: 87 Data size: 15480 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 500 Data size: 43500 Basic stats: COMPLETE Column stats: COMPLETE Select Operator expressions: key (type: string) outputColumnNames: _col0 - Statistics: Num rows: 87 Data size: 15480 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 500 Data size: 43500 Basic stats: COMPLETE Column stats: COMPLETE Reduce Output Operator key expressions: _col0 (type: string) sort order: + Map-reduce partition columns: _col0 (type: string) - Statistics: Num rows: 87 Data size: 15480 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 500 Data size: 43500 Basic stats: COMPLETE Column stats: COMPLETE Execution mode: vectorized, llap LLAP IO: may be used (ACID table) Map 4 @@ -219,19 +219,19 @@ STAGE PLANS: TableScan alias: tab2_n5 filterExpr: key is not null (type: boolean) - Statistics: Num rows: 91 Data size: 16192 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 500 Data size: 43500 Basic stats: COMPLETE Column stats: COMPLETE Filter Operator predicate: key is not null (type: boolean) - Statistics: Num rows: 87 Data size: 15480 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 500 Data size: 43500 Basic stats: COMPLETE Column stats: COMPLETE Select Operator expressions: key (type: string) outputColumnNames: _col0 - Statistics: Num rows: 87 Data size: 15480 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 500 Data size: 43500 Basic stats: COMPLETE Column stats: COMPLETE Reduce Output Operator key expressions: _col0 (type: string) sort order: + Map-reduce partition columns: _col0 (type: string) - Statistics: Num rows: 87 Data size: 15480 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 500 Data size: 43500 Basic stats: COMPLETE Column stats: COMPLETE Execution mode: vectorized, llap LLAP IO: may be used (ACID table) Reducer 2 @@ -243,15 +243,15 @@ STAGE PLANS: keys: 0 _col0 (type: string) 1 _col0 (type: string) - Statistics: Num rows: 95 Data size: 17028 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 791 Data size: 6328 Basic stats: COMPLETE Column stats: COMPLETE Group By Operator aggregations: count() mode: hash outputColumnNames: _col0 - Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE Reduce Output Operator sort order: - Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE value expressions: _col0 (type: bigint) Reducer 3 Execution mode: vectorized, llap @@ -260,10 +260,10 @@ STAGE PLANS: aggregations: count(VALUE._col0) mode: mergepartial outputColumnNames: _col0 - Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE File Output Operator compressed: false - Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE table: input format: org.apache.hadoop.mapred.SequenceFileInputFormat output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat @@ -397,20 +397,20 @@ STAGE PLANS: TableScan alias: a filterExpr: (UDFToDouble(key) >= 0.0D) (type: boolean) - Statistics: Num rows: 111 Data size: 19688 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 501 Data size: 43587 Basic stats: COMPLETE Column stats: COMPLETE Filter Operator predicate: (UDFToDouble(key) >= 0.0D) (type: boolean) - Statistics: Num rows: 37 Data size: 6562 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 167 Data size: 14529 Basic stats: COMPLETE Column stats: COMPLETE Select Operator - Statistics: Num rows: 37 Data size: 6562 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 167 Data size: 14529 Basic stats: COMPLETE Column stats: COMPLETE Group By Operator aggregations: count() mode: hash outputColumnNames: _col0 - Statistics: Num rows: 1 Data size: 192 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE Reduce Output Operator sort order: - Statistics: Num rows: 1 Data size: 192 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE value expressions: _col0 (type: bigint) Execution mode: vectorized, llap LLAP IO: may be used (ACID table) @@ -421,10 +421,10 @@ STAGE PLANS: aggregations: count(VALUE._col0) mode: mergepartial outputColumnNames: _col0 - Statistics: Num rows: 1 Data size: 192 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE File Output Operator compressed: false - Statistics: Num rows: 1 Data size: 192 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE table: input format: org.apache.hadoop.mapred.SequenceFileInputFormat output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat @@ -495,19 +495,19 @@ STAGE PLANS: TableScan alias: tab1_n6 filterExpr: key is not null (type: boolean) - Statistics: Num rows: 111 Data size: 19688 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 501 Data size: 43587 Basic stats: COMPLETE Column stats: COMPLETE Filter Operator predicate: key is not null (type: boolean) - Statistics: Num rows: 106 Data size: 18801 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 501 Data size: 43587 Basic stats: COMPLETE Column stats: COMPLETE Select Operator expressions: key (type: string) outputColumnNames: _col0 - Statistics: Num rows: 106 Data size: 18801 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 501 Data size: 43587 Basic stats: COMPLETE Column stats: COMPLETE Reduce Output Operator key expressions: _col0 (type: string) sort order: + Map-reduce partition columns: _col0 (type: string) - Statistics: Num rows: 106 Data size: 18801 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 501 Data size: 43587 Basic stats: COMPLETE Column stats: COMPLETE Execution mode: vectorized, llap LLAP IO: may be used (ACID table) Map 4 @@ -515,19 +515,19 @@ STAGE PLANS: TableScan alias: tab2_n5 filterExpr: key is not null (type: boolean) - Statistics: Num rows: 91 Data size: 16192 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 500 Data size: 43500 Basic stats: COMPLETE Column stats: COMPLETE Filter Operator predicate: key is not null (type: boolean) - Statistics: Num rows: 87 Data size: 15480 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 500 Data size: 43500 Basic stats: COMPLETE Column stats: COMPLETE Select Operator expressions: key (type: string) outputColumnNames: _col0 - Statistics: Num rows: 87 Data size: 15480 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 500 Data size: 43500 Basic stats: COMPLETE Column stats: COMPLETE Reduce Output Operator key expressions: _col0 (type: string) sort order: + Map-reduce partition columns: _col0 (type: string) - Statistics: Num rows: 87 Data size: 15480 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 500 Data size: 43500 Basic stats: COMPLETE Column stats: COMPLETE Execution mode: vectorized, llap LLAP IO: may be used (ACID table) Reducer 2 @@ -539,15 +539,15 @@ STAGE PLANS: keys: 0 _col0 (type: string) 1 _col0 (type: string) - Statistics: Num rows: 116 Data size: 20681 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 792 Data size: 6336 Basic stats: COMPLETE Column stats: COMPLETE Group By Operator aggregations: count() mode: hash outputColumnNames: _col0 - Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE Reduce Output Operator sort order: - Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE value expressions: _col0 (type: bigint) Reducer 3 Execution mode: vectorized, llap @@ -556,10 +556,10 @@ STAGE PLANS: aggregations: count(VALUE._col0) mode: mergepartial outputColumnNames: _col0 - Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE File Output Operator compressed: false - Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE table: input format: org.apache.hadoop.mapred.SequenceFileInputFormat output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat @@ -640,19 +640,19 @@ STAGE PLANS: Map Operator Tree: TableScan alias: tab2_n5 - Statistics: Num rows: 111 Data size: 19688 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 501 Data size: 43587 Basic stats: COMPLETE Column stats: COMPLETE Select Operator expressions: key (type: string) outputColumnNames: key - Statistics: Num rows: 111 Data size: 19688 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 501 Data size: 43587 Basic stats: COMPLETE Column stats: COMPLETE Group By Operator aggregations: max(key) mode: hash outputColumnNames: _col0 - Statistics: Num rows: 1 Data size: 368 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 1 Data size: 184 Basic stats: COMPLETE Column stats: COMPLETE Reduce Output Operator sort order: - Statistics: Num rows: 1 Data size: 368 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 1 Data size: 184 Basic stats: COMPLETE Column stats: COMPLETE value expressions: _col0 (type: string) Execution mode: vectorized, llap LLAP IO: may be used (ACID table) @@ -663,10 +663,10 @@ STAGE PLANS: aggregations: max(VALUE._col0) mode: mergepartial outputColumnNames: _col0 - Statistics: Num rows: 1 Data size: 368 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 1 Data size: 184 Basic stats: COMPLETE Column stats: COMPLETE File Output Operator compressed: false - Statistics: Num rows: 1 Data size: 368 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 1 Data size: 184 Basic stats: COMPLETE Column stats: COMPLETE table: input format: org.apache.hadoop.mapred.SequenceFileInputFormat output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat @@ -712,19 +712,19 @@ STAGE PLANS: TableScan alias: tab1_n6 filterExpr: key is not null (type: boolean) - Statistics: Num rows: 111 Data size: 19688 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 501 Data size: 43587 Basic stats: COMPLETE Column stats: COMPLETE Filter Operator predicate: key is not null (type: boolean) - Statistics: Num rows: 106 Data size: 18801 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 501 Data size: 43587 Basic stats: COMPLETE Column stats: COMPLETE Select Operator expressions: key (type: string) outputColumnNames: _col0 - Statistics: Num rows: 106 Data size: 18801 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 501 Data size: 43587 Basic stats: COMPLETE Column stats: COMPLETE Reduce Output Operator key expressions: _col0 (type: string) sort order: + Map-reduce partition columns: _col0 (type: string) - Statistics: Num rows: 106 Data size: 18801 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 501 Data size: 43587 Basic stats: COMPLETE Column stats: COMPLETE Execution mode: vectorized, llap LLAP IO: may be used (ACID table) Map 4 @@ -732,19 +732,19 @@ STAGE PLANS: TableScan alias: tab2_n5 filterExpr: key is not null (type: boolean) - Statistics: Num rows: 111 Data size: 19688 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 501 Data size: 43587 Basic stats: COMPLETE Column stats: COMPLETE Filter Operator predicate: key is not null (type: boolean) - Statistics: Num rows: 106 Data size: 18801 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 501 Data size: 43587 Basic stats: COMPLETE Column stats: COMPLETE Select Operator expressions: key (type: string) outputColumnNames: _col0 - Statistics: Num rows: 106 Data size: 18801 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 501 Data size: 43587 Basic stats: COMPLETE Column stats: COMPLETE Reduce Output Operator key expressions: _col0 (type: string) sort order: + Map-reduce partition columns: _col0 (type: string) - Statistics: Num rows: 106 Data size: 18801 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 501 Data size: 43587 Basic stats: COMPLETE Column stats: COMPLETE Execution mode: vectorized, llap LLAP IO: may be used (ACID table) Reducer 2 @@ -756,15 +756,15 @@ STAGE PLANS: keys: 0 _col0 (type: string) 1 _col0 (type: string) - Statistics: Num rows: 116 Data size: 20681 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 794 Data size: 6352 Basic stats: COMPLETE Column stats: COMPLETE Group By Operator aggregations: count() mode: hash outputColumnNames: _col0 - Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE Reduce Output Operator sort order: - Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE value expressions: _col0 (type: bigint) Reducer 3 Execution mode: vectorized, llap @@ -773,10 +773,10 @@ STAGE PLANS: aggregations: count(VALUE._col0) mode: mergepartial outputColumnNames: _col0 - Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE File Output Operator compressed: false - Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE table: input format: org.apache.hadoop.mapred.SequenceFileInputFormat output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat diff --git ql/src/test/results/clientpositive/llap/results_cache_transactional.q.out ql/src/test/results/clientpositive/llap/results_cache_transactional.q.out index 19504bc845..b57730a2f3 100644 --- ql/src/test/results/clientpositive/llap/results_cache_transactional.q.out +++ ql/src/test/results/clientpositive/llap/results_cache_transactional.q.out @@ -56,19 +56,19 @@ STAGE PLANS: Map Operator Tree: TableScan alias: tab1_n1 - Statistics: Num rows: 91 Data size: 16192 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 500 Data size: 43500 Basic stats: COMPLETE Column stats: COMPLETE Select Operator expressions: key (type: string) outputColumnNames: key - Statistics: Num rows: 91 Data size: 16192 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 500 Data size: 43500 Basic stats: COMPLETE Column stats: COMPLETE Group By Operator aggregations: max(key) mode: hash outputColumnNames: _col0 - Statistics: Num rows: 1 Data size: 368 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 1 Data size: 184 Basic stats: COMPLETE Column stats: COMPLETE Reduce Output Operator sort order: - Statistics: Num rows: 1 Data size: 368 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 1 Data size: 184 Basic stats: COMPLETE Column stats: COMPLETE value expressions: _col0 (type: string) Execution mode: vectorized, llap LLAP IO: may be used (ACID table) @@ -79,10 +79,10 @@ STAGE PLANS: aggregations: max(VALUE._col0) mode: mergepartial outputColumnNames: _col0 - Statistics: Num rows: 1 Data size: 368 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 1 Data size: 184 Basic stats: COMPLETE Column stats: COMPLETE File Output Operator compressed: false - Statistics: Num rows: 1 Data size: 368 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 1 Data size: 184 Basic stats: COMPLETE Column stats: COMPLETE table: input format: org.apache.hadoop.mapred.SequenceFileInputFormat output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat @@ -152,19 +152,19 @@ STAGE PLANS: TableScan alias: tab1_n1 filterExpr: key is not null (type: boolean) - Statistics: Num rows: 91 Data size: 16192 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 500 Data size: 43500 Basic stats: COMPLETE Column stats: COMPLETE Filter Operator predicate: key is not null (type: boolean) - Statistics: Num rows: 87 Data size: 15480 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 500 Data size: 43500 Basic stats: COMPLETE Column stats: COMPLETE Select Operator expressions: key (type: string) outputColumnNames: _col0 - Statistics: Num rows: 87 Data size: 15480 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 500 Data size: 43500 Basic stats: COMPLETE Column stats: COMPLETE Reduce Output Operator key expressions: _col0 (type: string) sort order: + Map-reduce partition columns: _col0 (type: string) - Statistics: Num rows: 87 Data size: 15480 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 500 Data size: 43500 Basic stats: COMPLETE Column stats: COMPLETE Execution mode: vectorized, llap LLAP IO: may be used (ACID table) Map 4 @@ -172,19 +172,19 @@ STAGE PLANS: TableScan alias: tab2_n1 filterExpr: key is not null (type: boolean) - Statistics: Num rows: 91 Data size: 16192 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 500 Data size: 43500 Basic stats: COMPLETE Column stats: COMPLETE Filter Operator predicate: key is not null (type: boolean) - Statistics: Num rows: 87 Data size: 15480 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 500 Data size: 43500 Basic stats: COMPLETE Column stats: COMPLETE Select Operator expressions: key (type: string) outputColumnNames: _col0 - Statistics: Num rows: 87 Data size: 15480 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 500 Data size: 43500 Basic stats: COMPLETE Column stats: COMPLETE Reduce Output Operator key expressions: _col0 (type: string) sort order: + Map-reduce partition columns: _col0 (type: string) - Statistics: Num rows: 87 Data size: 15480 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 500 Data size: 43500 Basic stats: COMPLETE Column stats: COMPLETE Execution mode: vectorized, llap LLAP IO: may be used (ACID table) Reducer 2 @@ -196,15 +196,15 @@ STAGE PLANS: keys: 0 _col0 (type: string) 1 _col0 (type: string) - Statistics: Num rows: 95 Data size: 17028 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 791 Data size: 6328 Basic stats: COMPLETE Column stats: COMPLETE Group By Operator aggregations: count() mode: hash outputColumnNames: _col0 - Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE Reduce Output Operator sort order: - Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE value expressions: _col0 (type: bigint) Reducer 3 Execution mode: vectorized, llap @@ -213,10 +213,10 @@ STAGE PLANS: aggregations: count(VALUE._col0) mode: mergepartial outputColumnNames: _col0 - Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE File Output Operator compressed: false - Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE table: input format: org.apache.hadoop.mapred.SequenceFileInputFormat output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat @@ -429,19 +429,19 @@ STAGE PLANS: TableScan alias: tab1_n1 filterExpr: key is not null (type: boolean) - Statistics: Num rows: 91 Data size: 16192 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 500 Data size: 43500 Basic stats: COMPLETE Column stats: COMPLETE Filter Operator predicate: key is not null (type: boolean) - Statistics: Num rows: 87 Data size: 15480 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 500 Data size: 43500 Basic stats: COMPLETE Column stats: COMPLETE Select Operator expressions: key (type: string) outputColumnNames: _col0 - Statistics: Num rows: 87 Data size: 15480 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 500 Data size: 43500 Basic stats: COMPLETE Column stats: COMPLETE Reduce Output Operator key expressions: _col0 (type: string) sort order: + Map-reduce partition columns: _col0 (type: string) - Statistics: Num rows: 87 Data size: 15480 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 500 Data size: 43500 Basic stats: COMPLETE Column stats: COMPLETE Execution mode: vectorized, llap LLAP IO: may be used (ACID table) Map 4 @@ -473,15 +473,15 @@ STAGE PLANS: keys: 0 _col0 (type: string) 1 _col0 (type: string) - Statistics: Num rows: 550 Data size: 47850 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 791 Data size: 6328 Basic stats: COMPLETE Column stats: COMPLETE Group By Operator aggregations: count() mode: hash outputColumnNames: _col0 - Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE Reduce Output Operator sort order: - Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE value expressions: _col0 (type: bigint) Reducer 3 Execution mode: vectorized, llap @@ -490,10 +490,10 @@ STAGE PLANS: aggregations: count(VALUE._col0) mode: mergepartial outputColumnNames: _col0 - Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE File Output Operator compressed: false - Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE table: input format: org.apache.hadoop.mapred.SequenceFileInputFormat output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat @@ -541,19 +541,19 @@ STAGE PLANS: TableScan alias: tab1_n1 filterExpr: key is not null (type: boolean) - Statistics: Num rows: 91 Data size: 16192 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 500 Data size: 43500 Basic stats: COMPLETE Column stats: COMPLETE Filter Operator predicate: key is not null (type: boolean) - Statistics: Num rows: 87 Data size: 15480 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 500 Data size: 43500 Basic stats: COMPLETE Column stats: COMPLETE Select Operator expressions: key (type: string) outputColumnNames: _col0 - Statistics: Num rows: 87 Data size: 15480 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 500 Data size: 43500 Basic stats: COMPLETE Column stats: COMPLETE Reduce Output Operator key expressions: _col0 (type: string) sort order: + Map-reduce partition columns: _col0 (type: string) - Statistics: Num rows: 87 Data size: 15480 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 500 Data size: 43500 Basic stats: COMPLETE Column stats: COMPLETE Execution mode: vectorized, llap LLAP IO: may be used (ACID table) Map 4 @@ -585,15 +585,15 @@ STAGE PLANS: keys: 0 _col0 (type: string) 1 _col0 (type: string) - Statistics: Num rows: 550 Data size: 47850 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 791 Data size: 6328 Basic stats: COMPLETE Column stats: COMPLETE Group By Operator aggregations: count() mode: hash outputColumnNames: _col0 - Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE Reduce Output Operator sort order: - Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE value expressions: _col0 (type: bigint) Reducer 3 Execution mode: vectorized, llap @@ -602,10 +602,10 @@ STAGE PLANS: aggregations: count(VALUE._col0) mode: mergepartial outputColumnNames: _col0 - Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE File Output Operator compressed: false - Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE table: input format: org.apache.hadoop.mapred.SequenceFileInputFormat output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat diff --git ql/src/test/results/clientpositive/mm_all.q.out ql/src/test/results/clientpositive/mm_all.q.out index e7df4c0a29..e5428bbe07 100644 --- ql/src/test/results/clientpositive/mm_all.q.out +++ ql/src/test/results/clientpositive/mm_all.q.out @@ -1829,6 +1829,7 @@ Retention: 0 #### A masked pattern was here #### Table Type: MANAGED_TABLE Table Parameters: + COLUMN_STATS_ACCURATE {\"BASIC_STATS\":\"true\",\"COLUMN_STATS\":{\"key\":\"true\"}} bucketing_version 2 numFiles 1 numRows 6 @@ -1879,6 +1880,7 @@ Retention: 0 #### A masked pattern was here #### Table Type: MANAGED_TABLE Table Parameters: + COLUMN_STATS_ACCURATE {\"BASIC_STATS\":\"true\",\"COLUMN_STATS\":{\"key\":\"true\"}} bucketing_version 2 numFiles 2 numRows 12 @@ -1937,7 +1939,7 @@ Retention: 0 #### A masked pattern was here #### Table Type: MANAGED_TABLE Table Parameters: - COLUMN_STATS_ACCURATE {} + COLUMN_STATS_ACCURATE {\"BASIC_STATS\":\"true\"} bucketing_version 2 numFiles 1 numRows 500 diff --git ql/src/test/results/clientpositive/mm_default.q.out ql/src/test/results/clientpositive/mm_default.q.out index 4ba6aa5223..5a855544bb 100644 --- ql/src/test/results/clientpositive/mm_default.q.out +++ ql/src/test/results/clientpositive/mm_default.q.out @@ -180,7 +180,7 @@ Retention: 0 #### A masked pattern was here #### Table Type: MANAGED_TABLE Table Parameters: - COLUMN_STATS_ACCURATE {} + COLUMN_STATS_ACCURATE {\"BASIC_STATS\":\"true\"} bucketing_version 2 numFiles 1 numRows 1 diff --git ql/src/test/results/clientpositive/row__id.q.out ql/src/test/results/clientpositive/row__id.q.out index e83b590bf8..7d296667a3 100644 --- ql/src/test/results/clientpositive/row__id.q.out +++ ql/src/test/results/clientpositive/row__id.q.out @@ -62,24 +62,24 @@ STAGE PLANS: Map Operator Tree: TableScan alias: hello_acid - Statistics: Num rows: 78 Data size: 19860 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 3 Data size: 19860 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: ROW__ID.writeid (type: bigint) outputColumnNames: _col0 - Statistics: Num rows: 78 Data size: 19860 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 3 Data size: 19860 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator key expressions: _col0 (type: bigint) sort order: + - Statistics: Num rows: 78 Data size: 19860 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 3 Data size: 19860 Basic stats: COMPLETE Column stats: NONE Execution mode: vectorized Reduce Operator Tree: Select Operator expressions: KEY.reducesinkkey0 (type: bigint) outputColumnNames: _col0 - Statistics: Num rows: 78 Data size: 19860 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 3 Data size: 19860 Basic stats: COMPLETE Column stats: NONE File Output Operator compressed: false - Statistics: Num rows: 78 Data size: 19860 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 3 Data size: 19860 Basic stats: COMPLETE Column stats: NONE table: input format: org.apache.hadoop.mapred.SequenceFileInputFormat output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat @@ -125,17 +125,17 @@ STAGE PLANS: TableScan alias: hello_acid filterExpr: (ROW__ID.writeid = 3) (type: boolean) - Statistics: Num rows: 78 Data size: 19860 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 3 Data size: 19860 Basic stats: COMPLETE Column stats: NONE Filter Operator predicate: (ROW__ID.writeid = 3) (type: boolean) - Statistics: Num rows: 39 Data size: 9930 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 1 Data size: 6620 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: ROW__ID.writeid (type: bigint) outputColumnNames: _col0 - Statistics: Num rows: 39 Data size: 9930 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 1 Data size: 6620 Basic stats: COMPLETE Column stats: NONE File Output Operator compressed: false - Statistics: Num rows: 39 Data size: 9930 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 1 Data size: 6620 Basic stats: COMPLETE Column stats: NONE table: input format: org.apache.hadoop.mapred.SequenceFileInputFormat output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat diff --git ql/src/test/results/clientpositive/stats_nonpart.q.out ql/src/test/results/clientpositive/stats_nonpart.q.out new file mode 100644 index 0000000000..7df570a121 --- /dev/null +++ ql/src/test/results/clientpositive/stats_nonpart.q.out @@ -0,0 +1,332 @@ +PREHOOK: query: drop table if exists mysource +PREHOOK: type: DROPTABLE +POSTHOOK: query: drop table if exists mysource +POSTHOOK: type: DROPTABLE +PREHOOK: query: create table mysource (p int,key int) +PREHOOK: type: CREATETABLE +PREHOOK: Output: database:default +PREHOOK: Output: default@mysource +POSTHOOK: query: create table mysource (p int,key int) +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: database:default +POSTHOOK: Output: default@mysource +PREHOOK: query: insert into mysource values (100,20), (101,40), (102,50) +PREHOOK: type: QUERY +PREHOOK: Input: _dummy_database@_dummy_table +PREHOOK: Output: default@mysource +POSTHOOK: query: insert into mysource values (100,20), (101,40), (102,50) +POSTHOOK: type: QUERY +POSTHOOK: Input: _dummy_database@_dummy_table +POSTHOOK: Output: default@mysource +POSTHOOK: Lineage: mysource.key SCRIPT [] +POSTHOOK: Lineage: mysource.p SCRIPT [] +PREHOOK: query: insert into mysource values (100,30), (101,50), (102,60) +PREHOOK: type: QUERY +PREHOOK: Input: _dummy_database@_dummy_table +PREHOOK: Output: default@mysource +POSTHOOK: query: insert into mysource values (100,30), (101,50), (102,60) +POSTHOOK: type: QUERY +POSTHOOK: Input: _dummy_database@_dummy_table +POSTHOOK: Output: default@mysource +POSTHOOK: Lineage: mysource.key SCRIPT [] +POSTHOOK: Lineage: mysource.p SCRIPT [] +PREHOOK: query: drop table if exists stats_nonpartitioned +PREHOOK: type: DROPTABLE +POSTHOOK: query: drop table if exists stats_nonpartitioned +POSTHOOK: type: DROPTABLE +PREHOOK: query: create table stats_nonpartitioned(key int, value int) stored as orc tblproperties ("transactional"="true") +PREHOOK: type: CREATETABLE +PREHOOK: Output: database:default +PREHOOK: Output: default@stats_nonpartitioned +POSTHOOK: query: create table stats_nonpartitioned(key int, value int) stored as orc tblproperties ("transactional"="true") +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: database:default +POSTHOOK: Output: default@stats_nonpartitioned +PREHOOK: query: explain select count(*) from stats_nonpartitioned +PREHOOK: type: QUERY +POSTHOOK: query: explain select count(*) from stats_nonpartitioned +POSTHOOK: type: QUERY +STAGE DEPENDENCIES: + Stage-0 is a root stage + +STAGE PLANS: + Stage: Stage-0 + Fetch Operator + limit: 1 + Processor Tree: + ListSink + +PREHOOK: query: select count(*) from stats_nonpartitioned +PREHOOK: type: QUERY +PREHOOK: Input: default@stats_nonpartitioned +#### A masked pattern was here #### +POSTHOOK: query: select count(*) from stats_nonpartitioned +POSTHOOK: type: QUERY +POSTHOOK: Input: default@stats_nonpartitioned +#### A masked pattern was here #### +0 +PREHOOK: query: desc formatted stats_nonpartitioned +PREHOOK: type: DESCTABLE +PREHOOK: Input: default@stats_nonpartitioned +POSTHOOK: query: desc formatted stats_nonpartitioned +POSTHOOK: type: DESCTABLE +POSTHOOK: Input: default@stats_nonpartitioned +# col_name data_type comment +key int +value int + +# Detailed Table Information +Database: default +#### A masked pattern was here #### +Retention: 0 +#### A masked pattern was here #### +Table Type: MANAGED_TABLE +Table Parameters: + COLUMN_STATS_ACCURATE {\"BASIC_STATS\":\"true\",\"COLUMN_STATS\":{\"key\":\"true\",\"value\":\"true\"}} + bucketing_version 2 + numFiles 0 + numRows 0 + rawDataSize 0 + totalSize 0 + transactional true + transactional_properties default +#### A masked pattern was here #### + +# Storage Information +SerDe Library: org.apache.hadoop.hive.ql.io.orc.OrcSerde +InputFormat: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat +OutputFormat: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat +Compressed: No +Num Buckets: -1 +Bucket Columns: [] +Sort Columns: [] +Storage Desc Params: + serialization.format 1 +PREHOOK: query: explain insert into table stats_nonpartitioned select * from mysource where p == 100 +PREHOOK: type: QUERY +POSTHOOK: query: explain insert into table stats_nonpartitioned select * from mysource where p == 100 +POSTHOOK: type: QUERY +STAGE DEPENDENCIES: + Stage-1 is a root stage + Stage-0 depends on stages: Stage-1 + Stage-2 depends on stages: Stage-0 + +STAGE PLANS: + Stage: Stage-1 + Map Reduce + Map Operator Tree: + TableScan + alias: mysource + filterExpr: (p = 100) (type: boolean) + Statistics: Num rows: 6 Data size: 48 Basic stats: COMPLETE Column stats: COMPLETE + Filter Operator + predicate: (p = 100) (type: boolean) + Statistics: Num rows: 2 Data size: 16 Basic stats: COMPLETE Column stats: COMPLETE + Select Operator + expressions: 100 (type: int), key (type: int) + outputColumnNames: _col0, _col1 + Statistics: Num rows: 2 Data size: 16 Basic stats: COMPLETE Column stats: COMPLETE + File Output Operator + compressed: false + Statistics: Num rows: 2 Data size: 16 Basic stats: COMPLETE Column stats: COMPLETE + table: + input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat + output format: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat + serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde + name: default.stats_nonpartitioned + Write Type: INSERT + Select Operator + expressions: _col0 (type: int), _col1 (type: int) + outputColumnNames: key, value + Statistics: Num rows: 2 Data size: 16 Basic stats: COMPLETE Column stats: COMPLETE + Group By Operator + aggregations: compute_stats(key, 'hll'), compute_stats(value, 'hll') + mode: hash + outputColumnNames: _col0, _col1 + Statistics: Num rows: 1 Data size: 848 Basic stats: COMPLETE Column stats: COMPLETE + Reduce Output Operator + sort order: + Statistics: Num rows: 1 Data size: 848 Basic stats: COMPLETE Column stats: COMPLETE + value expressions: _col0 (type: struct), _col1 (type: struct) + Reduce Operator Tree: + Group By Operator + aggregations: compute_stats(VALUE._col0), compute_stats(VALUE._col1) + mode: mergepartial + outputColumnNames: _col0, _col1 + Statistics: Num rows: 1 Data size: 880 Basic stats: COMPLETE Column stats: COMPLETE + File Output Operator + compressed: false + Statistics: Num rows: 1 Data size: 880 Basic stats: COMPLETE Column stats: COMPLETE + table: + input format: org.apache.hadoop.mapred.SequenceFileInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + + Stage: Stage-0 + Move Operator + tables: + replace: false + table: + input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat + output format: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat + serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde + name: default.stats_nonpartitioned + Write Type: INSERT + + Stage: Stage-2 + Stats Work + Basic Stats Work: + Column Stats Desc: + Columns: key, value + Column Types: int, int + Table: default.stats_nonpartitioned + +PREHOOK: query: insert into table stats_nonpartitioned select * from mysource where p == 100 +PREHOOK: type: QUERY +PREHOOK: Input: default@mysource +PREHOOK: Output: default@stats_nonpartitioned +POSTHOOK: query: insert into table stats_nonpartitioned select * from mysource where p == 100 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@mysource +POSTHOOK: Output: default@stats_nonpartitioned +POSTHOOK: Lineage: stats_nonpartitioned.key SIMPLE [] +POSTHOOK: Lineage: stats_nonpartitioned.value SIMPLE [(mysource)mysource.FieldSchema(name:key, type:int, comment:null), ] +PREHOOK: query: desc formatted stats_nonpartitioned +PREHOOK: type: DESCTABLE +PREHOOK: Input: default@stats_nonpartitioned +POSTHOOK: query: desc formatted stats_nonpartitioned +POSTHOOK: type: DESCTABLE +POSTHOOK: Input: default@stats_nonpartitioned +# col_name data_type comment +key int +value int + +# Detailed Table Information +Database: default +#### A masked pattern was here #### +Retention: 0 +#### A masked pattern was here #### +Table Type: MANAGED_TABLE +Table Parameters: + COLUMN_STATS_ACCURATE {\"BASIC_STATS\":\"true\",\"COLUMN_STATS\":{\"key\":\"true\",\"value\":\"true\"}} + bucketing_version 2 + numFiles 1 + numRows 2 + rawDataSize 0 + totalSize 719 + transactional true + transactional_properties default +#### A masked pattern was here #### + +# Storage Information +SerDe Library: org.apache.hadoop.hive.ql.io.orc.OrcSerde +InputFormat: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat +OutputFormat: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat +Compressed: No +Num Buckets: -1 +Bucket Columns: [] +Sort Columns: [] +Storage Desc Params: + serialization.format 1 +PREHOOK: query: explain select count(*) from stats_nonpartitioned +PREHOOK: type: QUERY +POSTHOOK: query: explain select count(*) from stats_nonpartitioned +POSTHOOK: type: QUERY +STAGE DEPENDENCIES: + Stage-0 is a root stage + +STAGE PLANS: + Stage: Stage-0 + Fetch Operator + limit: 1 + Processor Tree: + ListSink + +PREHOOK: query: select count(*) from stats_nonpartitioned +PREHOOK: type: QUERY +PREHOOK: Input: default@stats_nonpartitioned +#### A masked pattern was here #### +POSTHOOK: query: select count(*) from stats_nonpartitioned +POSTHOOK: type: QUERY +POSTHOOK: Input: default@stats_nonpartitioned +#### A masked pattern was here #### +2 +PREHOOK: query: explain select count(key) from stats_nonpartitioned +PREHOOK: type: QUERY +POSTHOOK: query: explain select count(key) from stats_nonpartitioned +POSTHOOK: type: QUERY +STAGE DEPENDENCIES: + Stage-0 is a root stage + +STAGE PLANS: + Stage: Stage-0 + Fetch Operator + limit: 1 + Processor Tree: + ListSink + +PREHOOK: query: select count(key) from stats_nonpartitioned +PREHOOK: type: QUERY +PREHOOK: Input: default@stats_nonpartitioned +#### A masked pattern was here #### +POSTHOOK: query: select count(key) from stats_nonpartitioned +POSTHOOK: type: QUERY +POSTHOOK: Input: default@stats_nonpartitioned +#### A masked pattern was here #### +2 +PREHOOK: query: analyze table stats_nonpartitioned compute statistics for columns key, value +PREHOOK: type: ANALYZE_TABLE +PREHOOK: Input: default@stats_nonpartitioned +PREHOOK: Output: default@stats_nonpartitioned +#### A masked pattern was here #### +POSTHOOK: query: analyze table stats_nonpartitioned compute statistics for columns key, value +POSTHOOK: type: ANALYZE_TABLE +POSTHOOK: Input: default@stats_nonpartitioned +POSTHOOK: Output: default@stats_nonpartitioned +#### A masked pattern was here #### +PREHOOK: query: explain select count(*) from stats_nonpartitioned +PREHOOK: type: QUERY +POSTHOOK: query: explain select count(*) from stats_nonpartitioned +POSTHOOK: type: QUERY +STAGE DEPENDENCIES: + Stage-0 is a root stage + +STAGE PLANS: + Stage: Stage-0 + Fetch Operator + limit: 1 + Processor Tree: + ListSink + +PREHOOK: query: select count(*) from stats_nonpartitioned +PREHOOK: type: QUERY +PREHOOK: Input: default@stats_nonpartitioned +#### A masked pattern was here #### +POSTHOOK: query: select count(*) from stats_nonpartitioned +POSTHOOK: type: QUERY +POSTHOOK: Input: default@stats_nonpartitioned +#### A masked pattern was here #### +2 +PREHOOK: query: explain select count(key) from stats_nonpartitioned +PREHOOK: type: QUERY +POSTHOOK: query: explain select count(key) from stats_nonpartitioned +POSTHOOK: type: QUERY +STAGE DEPENDENCIES: + Stage-0 is a root stage + +STAGE PLANS: + Stage: Stage-0 + Fetch Operator + limit: 1 + Processor Tree: + ListSink + +PREHOOK: query: select count(key) from stats_nonpartitioned +PREHOOK: type: QUERY +PREHOOK: Input: default@stats_nonpartitioned +#### A masked pattern was here #### +POSTHOOK: query: select count(key) from stats_nonpartitioned +POSTHOOK: type: QUERY +POSTHOOK: Input: default@stats_nonpartitioned +#### A masked pattern was here #### +2 diff --git ql/src/test/results/clientpositive/stats_part.q.out ql/src/test/results/clientpositive/stats_part.q.out new file mode 100644 index 0000000000..51bdfabacf --- /dev/null +++ ql/src/test/results/clientpositive/stats_part.q.out @@ -0,0 +1,661 @@ +PREHOOK: query: drop table if exists mysource +PREHOOK: type: DROPTABLE +POSTHOOK: query: drop table if exists mysource +POSTHOOK: type: DROPTABLE +PREHOOK: query: create table mysource (p int, key int, value int) +PREHOOK: type: CREATETABLE +PREHOOK: Output: database:default +PREHOOK: Output: default@mysource +POSTHOOK: query: create table mysource (p int, key int, value int) +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: database:default +POSTHOOK: Output: default@mysource +PREHOOK: query: insert into mysource values (100,20,201), (101,40,401), (102,50,501) +PREHOOK: type: QUERY +PREHOOK: Input: _dummy_database@_dummy_table +PREHOOK: Output: default@mysource +POSTHOOK: query: insert into mysource values (100,20,201), (101,40,401), (102,50,501) +POSTHOOK: type: QUERY +POSTHOOK: Input: _dummy_database@_dummy_table +POSTHOOK: Output: default@mysource +POSTHOOK: Lineage: mysource.key SCRIPT [] +POSTHOOK: Lineage: mysource.p SCRIPT [] +POSTHOOK: Lineage: mysource.value SCRIPT [] +PREHOOK: query: insert into mysource values (100,21,211), (101,41,411), (102,51,511) +PREHOOK: type: QUERY +PREHOOK: Input: _dummy_database@_dummy_table +PREHOOK: Output: default@mysource +POSTHOOK: query: insert into mysource values (100,21,211), (101,41,411), (102,51,511) +POSTHOOK: type: QUERY +POSTHOOK: Input: _dummy_database@_dummy_table +POSTHOOK: Output: default@mysource +POSTHOOK: Lineage: mysource.key SCRIPT [] +POSTHOOK: Lineage: mysource.p SCRIPT [] +POSTHOOK: Lineage: mysource.value SCRIPT [] +PREHOOK: query: drop table if exists stats_partitioned +PREHOOK: type: DROPTABLE +POSTHOOK: query: drop table if exists stats_partitioned +POSTHOOK: type: DROPTABLE +PREHOOK: query: create table stats_part(key int,value string) partitioned by (p int) stored as orc tblproperties ("transactional"="true") +PREHOOK: type: CREATETABLE +PREHOOK: Output: database:default +PREHOOK: Output: default@stats_part +POSTHOOK: query: create table stats_part(key int,value string) partitioned by (p int) stored as orc tblproperties ("transactional"="true") +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: database:default +POSTHOOK: Output: default@stats_part +PREHOOK: query: explain select count(key) from stats_part +PREHOOK: type: QUERY +POSTHOOK: query: explain select count(key) from stats_part +POSTHOOK: type: QUERY +STAGE DEPENDENCIES: + Stage-0 is a root stage + +STAGE PLANS: + Stage: Stage-0 + Fetch Operator + limit: 1 + Processor Tree: + ListSink + +PREHOOK: query: explain select count(key) from stats_part where p > 100 +PREHOOK: type: QUERY +POSTHOOK: query: explain select count(key) from stats_part where p > 100 +POSTHOOK: type: QUERY +STAGE DEPENDENCIES: + Stage-1 is a root stage + Stage-0 depends on stages: Stage-1 + +STAGE PLANS: + Stage: Stage-1 + Map Reduce + Map Operator Tree: + TableScan + alias: stats_part + filterExpr: (p > 100) (type: boolean) + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: PARTIAL + Filter Operator + predicate: (p > 100) (type: boolean) + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: PARTIAL + Select Operator + expressions: key (type: int) + outputColumnNames: key + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: PARTIAL + Group By Operator + aggregations: count(key) + mode: hash + outputColumnNames: _col0 + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: PARTIAL + Reduce Output Operator + sort order: + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: PARTIAL + value expressions: _col0 (type: bigint) + Execution mode: vectorized + Reduce Operator Tree: + Group By Operator + aggregations: count(VALUE._col0) + mode: mergepartial + outputColumnNames: _col0 + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: PARTIAL + File Output Operator + compressed: false + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: PARTIAL + table: + input format: org.apache.hadoop.mapred.SequenceFileInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + + Stage: Stage-0 + Fetch Operator + limit: -1 + Processor Tree: + ListSink + +PREHOOK: query: desc formatted stats_part +PREHOOK: type: DESCTABLE +PREHOOK: Input: default@stats_part +POSTHOOK: query: desc formatted stats_part +POSTHOOK: type: DESCTABLE +POSTHOOK: Input: default@stats_part +# col_name data_type comment +key int +value string + +# Partition Information +# col_name data_type comment +p int + +# Detailed Table Information +Database: default +#### A masked pattern was here #### +Retention: 0 +#### A masked pattern was here #### +Table Type: MANAGED_TABLE +Table Parameters: + COLUMN_STATS_ACCURATE {\"BASIC_STATS\":\"true\"} + bucketing_version 2 + numFiles 0 + numPartitions 0 + numRows 0 + rawDataSize 0 + totalSize 0 + transactional true + transactional_properties default +#### A masked pattern was here #### + +# Storage Information +SerDe Library: org.apache.hadoop.hive.ql.io.orc.OrcSerde +InputFormat: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat +OutputFormat: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat +Compressed: No +Num Buckets: -1 +Bucket Columns: [] +Sort Columns: [] +Storage Desc Params: + serialization.format 1 +PREHOOK: query: insert into table stats_part partition(p=100) select distinct key, value from mysource where p == 100 +PREHOOK: type: QUERY +PREHOOK: Input: default@mysource +PREHOOK: Output: default@stats_part@p=100 +POSTHOOK: query: insert into table stats_part partition(p=100) select distinct key, value from mysource where p == 100 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@mysource +POSTHOOK: Output: default@stats_part@p=100 +POSTHOOK: Lineage: stats_part PARTITION(p=100).key SIMPLE [(mysource)mysource.FieldSchema(name:key, type:int, comment:null), ] +POSTHOOK: Lineage: stats_part PARTITION(p=100).value EXPRESSION [(mysource)mysource.FieldSchema(name:value, type:int, comment:null), ] +PREHOOK: query: insert into table stats_part partition(p=101) select distinct key, value from mysource where p == 101 +PREHOOK: type: QUERY +PREHOOK: Input: default@mysource +PREHOOK: Output: default@stats_part@p=101 +POSTHOOK: query: insert into table stats_part partition(p=101) select distinct key, value from mysource where p == 101 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@mysource +POSTHOOK: Output: default@stats_part@p=101 +POSTHOOK: Lineage: stats_part PARTITION(p=101).key SIMPLE [(mysource)mysource.FieldSchema(name:key, type:int, comment:null), ] +POSTHOOK: Lineage: stats_part PARTITION(p=101).value EXPRESSION [(mysource)mysource.FieldSchema(name:value, type:int, comment:null), ] +PREHOOK: query: insert into table stats_part partition(p=102) select distinct key, value from mysource where p == 102 +PREHOOK: type: QUERY +PREHOOK: Input: default@mysource +PREHOOK: Output: default@stats_part@p=102 +POSTHOOK: query: insert into table stats_part partition(p=102) select distinct key, value from mysource where p == 102 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@mysource +POSTHOOK: Output: default@stats_part@p=102 +POSTHOOK: Lineage: stats_part PARTITION(p=102).key SIMPLE [(mysource)mysource.FieldSchema(name:key, type:int, comment:null), ] +POSTHOOK: Lineage: stats_part PARTITION(p=102).value EXPRESSION [(mysource)mysource.FieldSchema(name:value, type:int, comment:null), ] +PREHOOK: query: desc formatted stats_part +PREHOOK: type: DESCTABLE +PREHOOK: Input: default@stats_part +POSTHOOK: query: desc formatted stats_part +POSTHOOK: type: DESCTABLE +POSTHOOK: Input: default@stats_part +# col_name data_type comment +key int +value string + +# Partition Information +# col_name data_type comment +p int + +# Detailed Table Information +Database: default +#### A masked pattern was here #### +Retention: 0 +#### A masked pattern was here #### +Table Type: MANAGED_TABLE +Table Parameters: + COLUMN_STATS_ACCURATE {\"BASIC_STATS\":\"true\"} + bucketing_version 2 + numFiles 3 + numPartitions 3 + numRows 6 + rawDataSize 0 + totalSize 2244 + transactional true + transactional_properties default +#### A masked pattern was here #### + +# Storage Information +SerDe Library: org.apache.hadoop.hive.ql.io.orc.OrcSerde +InputFormat: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat +OutputFormat: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat +Compressed: No +Num Buckets: -1 +Bucket Columns: [] +Sort Columns: [] +Storage Desc Params: + serialization.format 1 +PREHOOK: query: insert into table mysource values (103,20,200), (103,83,832), (103,53,530) +PREHOOK: type: QUERY +PREHOOK: Input: _dummy_database@_dummy_table +PREHOOK: Output: default@mysource +POSTHOOK: query: insert into table mysource values (103,20,200), (103,83,832), (103,53,530) +POSTHOOK: type: QUERY +POSTHOOK: Input: _dummy_database@_dummy_table +POSTHOOK: Output: default@mysource +POSTHOOK: Lineage: mysource.key SCRIPT [] +POSTHOOK: Lineage: mysource.p SCRIPT [] +POSTHOOK: Lineage: mysource.value SCRIPT [] +PREHOOK: query: insert into table stats_part partition(p=102) select distinct key, value from mysource where p == 102 +PREHOOK: type: QUERY +PREHOOK: Input: default@mysource +PREHOOK: Output: default@stats_part@p=102 +POSTHOOK: query: insert into table stats_part partition(p=102) select distinct key, value from mysource where p == 102 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@mysource +POSTHOOK: Output: default@stats_part@p=102 +POSTHOOK: Lineage: stats_part PARTITION(p=102).key SIMPLE [(mysource)mysource.FieldSchema(name:key, type:int, comment:null), ] +POSTHOOK: Lineage: stats_part PARTITION(p=102).value EXPRESSION [(mysource)mysource.FieldSchema(name:value, type:int, comment:null), ] +PREHOOK: query: desc formatted stats_part +PREHOOK: type: DESCTABLE +PREHOOK: Input: default@stats_part +POSTHOOK: query: desc formatted stats_part +POSTHOOK: type: DESCTABLE +POSTHOOK: Input: default@stats_part +# col_name data_type comment +key int +value string + +# Partition Information +# col_name data_type comment +p int + +# Detailed Table Information +Database: default +#### A masked pattern was here #### +Retention: 0 +#### A masked pattern was here #### +Table Type: MANAGED_TABLE +Table Parameters: + COLUMN_STATS_ACCURATE {\"BASIC_STATS\":\"true\"} + bucketing_version 2 + numFiles 4 + numPartitions 3 + numRows 8 + rawDataSize 0 + totalSize 2998 + transactional true + transactional_properties default +#### A masked pattern was here #### + +# Storage Information +SerDe Library: org.apache.hadoop.hive.ql.io.orc.OrcSerde +InputFormat: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat +OutputFormat: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat +Compressed: No +Num Buckets: -1 +Bucket Columns: [] +Sort Columns: [] +Storage Desc Params: + serialization.format 1 +PREHOOK: query: show partitions stats_part +PREHOOK: type: SHOWPARTITIONS +PREHOOK: Input: default@stats_part +POSTHOOK: query: show partitions stats_part +POSTHOOK: type: SHOWPARTITIONS +POSTHOOK: Input: default@stats_part +p=100 +p=101 +p=102 +PREHOOK: query: explain select count(*) from stats_part +PREHOOK: type: QUERY +POSTHOOK: query: explain select count(*) from stats_part +POSTHOOK: type: QUERY +STAGE DEPENDENCIES: + Stage-0 is a root stage + +STAGE PLANS: + Stage: Stage-0 + Fetch Operator + limit: 1 + Processor Tree: + ListSink + +PREHOOK: query: select count(*) from stats_part +PREHOOK: type: QUERY +PREHOOK: Input: default@stats_part +#### A masked pattern was here #### +POSTHOOK: query: select count(*) from stats_part +POSTHOOK: type: QUERY +POSTHOOK: Input: default@stats_part +#### A masked pattern was here #### +8 +PREHOOK: query: explain select count(key) from stats_part +PREHOOK: type: QUERY +POSTHOOK: query: explain select count(key) from stats_part +POSTHOOK: type: QUERY +STAGE DEPENDENCIES: + Stage-0 is a root stage + +STAGE PLANS: + Stage: Stage-0 + Fetch Operator + limit: 1 + Processor Tree: + ListSink + +PREHOOK: query: select count(key) from stats_part +PREHOOK: type: QUERY +PREHOOK: Input: default@stats_part +#### A masked pattern was here #### +POSTHOOK: query: select count(key) from stats_part +POSTHOOK: type: QUERY +POSTHOOK: Input: default@stats_part +#### A masked pattern was here #### +8 +PREHOOK: query: explain select count(key) from stats_part where p > 100 +PREHOOK: type: QUERY +POSTHOOK: query: explain select count(key) from stats_part where p > 100 +POSTHOOK: type: QUERY +STAGE DEPENDENCIES: + Stage-0 is a root stage + +STAGE PLANS: + Stage: Stage-0 + Fetch Operator + limit: 1 + Processor Tree: + ListSink + +PREHOOK: query: select count(key) from stats_part where p > 100 +PREHOOK: type: QUERY +PREHOOK: Input: default@stats_part +#### A masked pattern was here #### +POSTHOOK: query: select count(key) from stats_part where p > 100 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@stats_part +#### A masked pattern was here #### +6 +PREHOOK: query: explain select max(key) from stats_part where p > 100 +PREHOOK: type: QUERY +POSTHOOK: query: explain select max(key) from stats_part where p > 100 +POSTHOOK: type: QUERY +STAGE DEPENDENCIES: + Stage-0 is a root stage + +STAGE PLANS: + Stage: Stage-0 + Fetch Operator + limit: 1 + Processor Tree: + ListSink + +PREHOOK: query: select max(key) from stats_part where p > 100 +PREHOOK: type: QUERY +PREHOOK: Input: default@stats_part +#### A masked pattern was here #### +POSTHOOK: query: select max(key) from stats_part where p > 100 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@stats_part +#### A masked pattern was here #### +51 +PREHOOK: query: desc formatted stats_part +PREHOOK: type: DESCTABLE +PREHOOK: Input: default@stats_part +POSTHOOK: query: desc formatted stats_part +POSTHOOK: type: DESCTABLE +POSTHOOK: Input: default@stats_part +# col_name data_type comment +key int +value string + +# Partition Information +# col_name data_type comment +p int + +# Detailed Table Information +Database: default +#### A masked pattern was here #### +Retention: 0 +#### A masked pattern was here #### +Table Type: MANAGED_TABLE +Table Parameters: + COLUMN_STATS_ACCURATE {\"BASIC_STATS\":\"true\"} + bucketing_version 2 + numFiles 4 + numPartitions 3 + numRows 8 + rawDataSize 0 + totalSize 2998 + transactional true + transactional_properties default +#### A masked pattern was here #### + +# Storage Information +SerDe Library: org.apache.hadoop.hive.ql.io.orc.OrcSerde +InputFormat: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat +OutputFormat: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat +Compressed: No +Num Buckets: -1 +Bucket Columns: [] +Sort Columns: [] +Storage Desc Params: + serialization.format 1 +PREHOOK: query: explain select max(key) from stats_part where p > 100 +PREHOOK: type: QUERY +POSTHOOK: query: explain select max(key) from stats_part where p > 100 +POSTHOOK: type: QUERY +STAGE DEPENDENCIES: + Stage-0 is a root stage + +STAGE PLANS: + Stage: Stage-0 + Fetch Operator + limit: 1 + Processor Tree: + ListSink + +PREHOOK: query: select max(key) from stats_part where p > 100 +PREHOOK: type: QUERY +PREHOOK: Input: default@stats_part +#### A masked pattern was here #### +POSTHOOK: query: select max(key) from stats_part where p > 100 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@stats_part +#### A masked pattern was here #### +51 +PREHOOK: query: select count(value) from stats_part +PREHOOK: type: QUERY +PREHOOK: Input: default@stats_part +#### A masked pattern was here #### +POSTHOOK: query: select count(value) from stats_part +POSTHOOK: type: QUERY +POSTHOOK: Input: default@stats_part +#### A masked pattern was here #### +8 +PREHOOK: query: select count(value) from stats_part +PREHOOK: type: QUERY +PREHOOK: Input: default@stats_part +#### A masked pattern was here #### +POSTHOOK: query: select count(value) from stats_part +POSTHOOK: type: QUERY +POSTHOOK: Input: default@stats_part +#### A masked pattern was here #### +8 +PREHOOK: query: desc formatted stats_part +PREHOOK: type: DESCTABLE +PREHOOK: Input: default@stats_part +POSTHOOK: query: desc formatted stats_part +POSTHOOK: type: DESCTABLE +POSTHOOK: Input: default@stats_part +# col_name data_type comment +key int +value string + +# Partition Information +# col_name data_type comment +p int + +# Detailed Table Information +Database: default +#### A masked pattern was here #### +Retention: 0 +#### A masked pattern was here #### +Table Type: MANAGED_TABLE +Table Parameters: + COLUMN_STATS_ACCURATE {\"BASIC_STATS\":\"true\"} + bucketing_version 2 + numFiles 4 + numPartitions 3 + numRows 8 + rawDataSize 0 + totalSize 2998 + transactional true + transactional_properties default +#### A masked pattern was here #### + +# Storage Information +SerDe Library: org.apache.hadoop.hive.ql.io.orc.OrcSerde +InputFormat: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat +OutputFormat: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat +Compressed: No +Num Buckets: -1 +Bucket Columns: [] +Sort Columns: [] +Storage Desc Params: + serialization.format 1 +PREHOOK: query: explain select count(*) from stats_part where p = 100 +PREHOOK: type: QUERY +POSTHOOK: query: explain select count(*) from stats_part where p = 100 +POSTHOOK: type: QUERY +STAGE DEPENDENCIES: + Stage-0 is a root stage + +STAGE PLANS: + Stage: Stage-0 + Fetch Operator + limit: 1 + Processor Tree: + ListSink + +PREHOOK: query: select count(*) from stats_part where p = 100 +PREHOOK: type: QUERY +PREHOOK: Input: default@stats_part +#### A masked pattern was here #### +POSTHOOK: query: select count(*) from stats_part where p = 100 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@stats_part +#### A masked pattern was here #### +2 +PREHOOK: query: explain select count(*) from stats_part where p > 100 +PREHOOK: type: QUERY +POSTHOOK: query: explain select count(*) from stats_part where p > 100 +POSTHOOK: type: QUERY +STAGE DEPENDENCIES: + Stage-0 is a root stage + +STAGE PLANS: + Stage: Stage-0 + Fetch Operator + limit: 1 + Processor Tree: + ListSink + +PREHOOK: query: select count(*) from stats_part where p > 100 +PREHOOK: type: QUERY +PREHOOK: Input: default@stats_part +#### A masked pattern was here #### +POSTHOOK: query: select count(*) from stats_part where p > 100 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@stats_part +#### A masked pattern was here #### +6 +PREHOOK: query: explain select count(key) from stats_part +PREHOOK: type: QUERY +POSTHOOK: query: explain select count(key) from stats_part +POSTHOOK: type: QUERY +STAGE DEPENDENCIES: + Stage-0 is a root stage + +STAGE PLANS: + Stage: Stage-0 + Fetch Operator + limit: 1 + Processor Tree: + ListSink + +PREHOOK: query: select count(key) from stats_part +PREHOOK: type: QUERY +PREHOOK: Input: default@stats_part +#### A masked pattern was here #### +POSTHOOK: query: select count(key) from stats_part +POSTHOOK: type: QUERY +POSTHOOK: Input: default@stats_part +#### A masked pattern was here #### +8 +PREHOOK: query: explain select count(*) from stats_part where p > 100 +PREHOOK: type: QUERY +POSTHOOK: query: explain select count(*) from stats_part where p > 100 +POSTHOOK: type: QUERY +STAGE DEPENDENCIES: + Stage-0 is a root stage + +STAGE PLANS: + Stage: Stage-0 + Fetch Operator + limit: 1 + Processor Tree: + ListSink + +PREHOOK: query: select count(*) from stats_part where p > 100 +PREHOOK: type: QUERY +PREHOOK: Input: default@stats_part +#### A masked pattern was here #### +POSTHOOK: query: select count(*) from stats_part where p > 100 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@stats_part +#### A masked pattern was here #### +6 +PREHOOK: query: explain select max(key) from stats_part where p > 100 +PREHOOK: type: QUERY +POSTHOOK: query: explain select max(key) from stats_part where p > 100 +POSTHOOK: type: QUERY +STAGE DEPENDENCIES: + Stage-0 is a root stage + +STAGE PLANS: + Stage: Stage-0 + Fetch Operator + limit: 1 + Processor Tree: + ListSink + +PREHOOK: query: select max(key) from stats_part where p > 100 +PREHOOK: type: QUERY +PREHOOK: Input: default@stats_part +#### A masked pattern was here #### +POSTHOOK: query: select max(key) from stats_part where p > 100 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@stats_part +#### A masked pattern was here #### +51 +PREHOOK: query: describe extended stats_part partition (p=101) +PREHOOK: type: DESCTABLE +PREHOOK: Input: default@stats_part +POSTHOOK: query: describe extended stats_part partition (p=101) +POSTHOOK: type: DESCTABLE +POSTHOOK: Input: default@stats_part +key int +value string +p int + +# Partition Information +# col_name data_type comment +p int + +#### A masked pattern was here #### +PREHOOK: query: describe extended stats_part +PREHOOK: type: DESCTABLE +PREHOOK: Input: default@stats_part +POSTHOOK: query: describe extended stats_part +POSTHOOK: type: DESCTABLE +POSTHOOK: Input: default@stats_part +key int +value string +p int + +# Partition Information +# col_name data_type comment +p int + +#### A masked pattern was here #### diff --git ql/src/test/results/clientpositive/stats_part2.q.out ql/src/test/results/clientpositive/stats_part2.q.out new file mode 100644 index 0000000000..9c22ce7702 --- /dev/null +++ ql/src/test/results/clientpositive/stats_part2.q.out @@ -0,0 +1,1265 @@ +PREHOOK: query: drop table if exists mysource +PREHOOK: type: DROPTABLE +POSTHOOK: query: drop table if exists mysource +POSTHOOK: type: DROPTABLE +PREHOOK: query: create table mysource (p int, key int, value string) +PREHOOK: type: CREATETABLE +PREHOOK: Output: database:default +PREHOOK: Output: default@mysource +POSTHOOK: query: create table mysource (p int, key int, value string) +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: database:default +POSTHOOK: Output: default@mysource +PREHOOK: query: insert into mysource values (100,20,'value20'), (101,40,'string40'), (102,50,'string50') +PREHOOK: type: QUERY +PREHOOK: Input: _dummy_database@_dummy_table +PREHOOK: Output: default@mysource +POSTHOOK: query: insert into mysource values (100,20,'value20'), (101,40,'string40'), (102,50,'string50') +POSTHOOK: type: QUERY +POSTHOOK: Input: _dummy_database@_dummy_table +POSTHOOK: Output: default@mysource +POSTHOOK: Lineage: mysource.key SCRIPT [] +POSTHOOK: Lineage: mysource.p SCRIPT [] +POSTHOOK: Lineage: mysource.value SCRIPT [] +PREHOOK: query: insert into mysource values (100,21,'value21'), (101,41,'value41'), (102,51,'value51') +PREHOOK: type: QUERY +PREHOOK: Input: _dummy_database@_dummy_table +PREHOOK: Output: default@mysource +POSTHOOK: query: insert into mysource values (100,21,'value21'), (101,41,'value41'), (102,51,'value51') +POSTHOOK: type: QUERY +POSTHOOK: Input: _dummy_database@_dummy_table +POSTHOOK: Output: default@mysource +POSTHOOK: Lineage: mysource.key SCRIPT [] +POSTHOOK: Lineage: mysource.p SCRIPT [] +POSTHOOK: Lineage: mysource.value SCRIPT [] +PREHOOK: query: drop table if exists stats_partitioned +PREHOOK: type: DROPTABLE +POSTHOOK: query: drop table if exists stats_partitioned +POSTHOOK: type: DROPTABLE +PREHOOK: query: create table stats_part(key int,value string) partitioned by (p int) stored as orc tblproperties ("transactional"="true") +PREHOOK: type: CREATETABLE +PREHOOK: Output: database:default +PREHOOK: Output: default@stats_part +POSTHOOK: query: create table stats_part(key int,value string) partitioned by (p int) stored as orc tblproperties ("transactional"="true") +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: database:default +POSTHOOK: Output: default@stats_part +PREHOOK: query: explain select count(*) from stats_part where p > 100 +PREHOOK: type: QUERY +POSTHOOK: query: explain select count(*) from stats_part where p > 100 +POSTHOOK: type: QUERY +STAGE DEPENDENCIES: + Stage-1 is a root stage + Stage-0 depends on stages: Stage-1 + +STAGE PLANS: + Stage: Stage-1 + Map Reduce + Map Operator Tree: + TableScan + alias: stats_part + filterExpr: (p > 100) (type: boolean) + Statistics: Num rows: 1 Data size: 4 Basic stats: COMPLETE Column stats: COMPLETE + Filter Operator + predicate: (p > 100) (type: boolean) + Statistics: Num rows: 1 Data size: 4 Basic stats: COMPLETE Column stats: COMPLETE + Select Operator + Statistics: Num rows: 1 Data size: 4 Basic stats: COMPLETE Column stats: COMPLETE + Group By Operator + aggregations: count() + mode: hash + outputColumnNames: _col0 + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE + Reduce Output Operator + sort order: + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE + value expressions: _col0 (type: bigint) + Execution mode: vectorized + Reduce Operator Tree: + Group By Operator + aggregations: count(VALUE._col0) + mode: mergepartial + outputColumnNames: _col0 + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE + File Output Operator + compressed: false + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE + table: + input format: org.apache.hadoop.mapred.SequenceFileInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + + Stage: Stage-0 + Fetch Operator + limit: -1 + Processor Tree: + ListSink + +PREHOOK: query: explain select max(key) from stats_part where p > 100 +PREHOOK: type: QUERY +POSTHOOK: query: explain select max(key) from stats_part where p > 100 +POSTHOOK: type: QUERY +STAGE DEPENDENCIES: + Stage-1 is a root stage + Stage-0 depends on stages: Stage-1 + +STAGE PLANS: + Stage: Stage-1 + Map Reduce + Map Operator Tree: + TableScan + alias: stats_part + filterExpr: (p > 100) (type: boolean) + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: PARTIAL + Filter Operator + predicate: (p > 100) (type: boolean) + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: PARTIAL + Select Operator + expressions: key (type: int) + outputColumnNames: key + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: PARTIAL + Group By Operator + aggregations: max(key) + mode: hash + outputColumnNames: _col0 + Statistics: Num rows: 1 Data size: 4 Basic stats: COMPLETE Column stats: PARTIAL + Reduce Output Operator + sort order: + Statistics: Num rows: 1 Data size: 4 Basic stats: COMPLETE Column stats: PARTIAL + value expressions: _col0 (type: int) + Execution mode: vectorized + Reduce Operator Tree: + Group By Operator + aggregations: max(VALUE._col0) + mode: mergepartial + outputColumnNames: _col0 + Statistics: Num rows: 1 Data size: 4 Basic stats: COMPLETE Column stats: PARTIAL + File Output Operator + compressed: false + Statistics: Num rows: 1 Data size: 4 Basic stats: COMPLETE Column stats: PARTIAL + table: + input format: org.apache.hadoop.mapred.SequenceFileInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + + Stage: Stage-0 + Fetch Operator + limit: -1 + Processor Tree: + ListSink + +PREHOOK: query: desc formatted stats_part +PREHOOK: type: DESCTABLE +PREHOOK: Input: default@stats_part +POSTHOOK: query: desc formatted stats_part +POSTHOOK: type: DESCTABLE +POSTHOOK: Input: default@stats_part +# col_name data_type comment +key int +value string + +# Partition Information +# col_name data_type comment +p int + +# Detailed Table Information +Database: default +#### A masked pattern was here #### +Retention: 0 +#### A masked pattern was here #### +Table Type: MANAGED_TABLE +Table Parameters: + COLUMN_STATS_ACCURATE {\"BASIC_STATS\":\"true\"} + bucketing_version 2 + numFiles 0 + numPartitions 0 + numRows 0 + rawDataSize 0 + totalSize 0 + transactional true + transactional_properties default +#### A masked pattern was here #### + +# Storage Information +SerDe Library: org.apache.hadoop.hive.ql.io.orc.OrcSerde +InputFormat: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat +OutputFormat: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat +Compressed: No +Num Buckets: -1 +Bucket Columns: [] +Sort Columns: [] +Storage Desc Params: + serialization.format 1 +PREHOOK: query: insert into table stats_part partition(p=100) select distinct key, value from mysource where p == 100 +PREHOOK: type: QUERY +PREHOOK: Input: default@mysource +PREHOOK: Output: default@stats_part@p=100 +POSTHOOK: query: insert into table stats_part partition(p=100) select distinct key, value from mysource where p == 100 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@mysource +POSTHOOK: Output: default@stats_part@p=100 +POSTHOOK: Lineage: stats_part PARTITION(p=100).key SIMPLE [(mysource)mysource.FieldSchema(name:key, type:int, comment:null), ] +POSTHOOK: Lineage: stats_part PARTITION(p=100).value SIMPLE [(mysource)mysource.FieldSchema(name:value, type:string, comment:null), ] +PREHOOK: query: insert into table stats_part partition(p=101) select distinct key, value from mysource where p == 101 +PREHOOK: type: QUERY +PREHOOK: Input: default@mysource +PREHOOK: Output: default@stats_part@p=101 +POSTHOOK: query: insert into table stats_part partition(p=101) select distinct key, value from mysource where p == 101 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@mysource +POSTHOOK: Output: default@stats_part@p=101 +POSTHOOK: Lineage: stats_part PARTITION(p=101).key SIMPLE [(mysource)mysource.FieldSchema(name:key, type:int, comment:null), ] +POSTHOOK: Lineage: stats_part PARTITION(p=101).value SIMPLE [(mysource)mysource.FieldSchema(name:value, type:string, comment:null), ] +PREHOOK: query: insert into table stats_part partition(p=102) select distinct key, value from mysource where p == 102 +PREHOOK: type: QUERY +PREHOOK: Input: default@mysource +PREHOOK: Output: default@stats_part@p=102 +POSTHOOK: query: insert into table stats_part partition(p=102) select distinct key, value from mysource where p == 102 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@mysource +POSTHOOK: Output: default@stats_part@p=102 +POSTHOOK: Lineage: stats_part PARTITION(p=102).key SIMPLE [(mysource)mysource.FieldSchema(name:key, type:int, comment:null), ] +POSTHOOK: Lineage: stats_part PARTITION(p=102).value SIMPLE [(mysource)mysource.FieldSchema(name:value, type:string, comment:null), ] +PREHOOK: query: desc formatted stats_part +PREHOOK: type: DESCTABLE +PREHOOK: Input: default@stats_part +POSTHOOK: query: desc formatted stats_part +POSTHOOK: type: DESCTABLE +POSTHOOK: Input: default@stats_part +# col_name data_type comment +key int +value string + +# Partition Information +# col_name data_type comment +p int + +# Detailed Table Information +Database: default +#### A masked pattern was here #### +Retention: 0 +#### A masked pattern was here #### +Table Type: MANAGED_TABLE +Table Parameters: + COLUMN_STATS_ACCURATE {\"BASIC_STATS\":\"true\"} + bucketing_version 2 + numFiles 3 + numPartitions 3 + numRows 6 + rawDataSize 0 + totalSize 2335 + transactional true + transactional_properties default +#### A masked pattern was here #### + +# Storage Information +SerDe Library: org.apache.hadoop.hive.ql.io.orc.OrcSerde +InputFormat: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat +OutputFormat: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat +Compressed: No +Num Buckets: -1 +Bucket Columns: [] +Sort Columns: [] +Storage Desc Params: + serialization.format 1 +PREHOOK: query: explain select count(key) from stats_part where p > 100 +PREHOOK: type: QUERY +POSTHOOK: query: explain select count(key) from stats_part where p > 100 +POSTHOOK: type: QUERY +STAGE DEPENDENCIES: + Stage-0 is a root stage + +STAGE PLANS: + Stage: Stage-0 + Fetch Operator + limit: 1 + Processor Tree: + ListSink + +PREHOOK: query: explain select max(key) from stats_part where p > 100 +PREHOOK: type: QUERY +POSTHOOK: query: explain select max(key) from stats_part where p > 100 +POSTHOOK: type: QUERY +STAGE DEPENDENCIES: + Stage-0 is a root stage + +STAGE PLANS: + Stage: Stage-0 + Fetch Operator + limit: 1 + Processor Tree: + ListSink + +PREHOOK: query: insert into table mysource values (103,20,'value20'), (103,83,'value83'), (103,53,'value53') +PREHOOK: type: QUERY +PREHOOK: Input: _dummy_database@_dummy_table +PREHOOK: Output: default@mysource +POSTHOOK: query: insert into table mysource values (103,20,'value20'), (103,83,'value83'), (103,53,'value53') +POSTHOOK: type: QUERY +POSTHOOK: Input: _dummy_database@_dummy_table +POSTHOOK: Output: default@mysource +POSTHOOK: Lineage: mysource.key SCRIPT [] +POSTHOOK: Lineage: mysource.p SCRIPT [] +POSTHOOK: Lineage: mysource.value SCRIPT [] +PREHOOK: query: insert into table stats_part partition(p=102) select distinct key, value from mysource where p == 102 +PREHOOK: type: QUERY +PREHOOK: Input: default@mysource +PREHOOK: Output: default@stats_part@p=102 +POSTHOOK: query: insert into table stats_part partition(p=102) select distinct key, value from mysource where p == 102 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@mysource +POSTHOOK: Output: default@stats_part@p=102 +POSTHOOK: Lineage: stats_part PARTITION(p=102).key SIMPLE [(mysource)mysource.FieldSchema(name:key, type:int, comment:null), ] +POSTHOOK: Lineage: stats_part PARTITION(p=102).value SIMPLE [(mysource)mysource.FieldSchema(name:value, type:string, comment:null), ] +PREHOOK: query: desc formatted stats_part +PREHOOK: type: DESCTABLE +PREHOOK: Input: default@stats_part +POSTHOOK: query: desc formatted stats_part +POSTHOOK: type: DESCTABLE +POSTHOOK: Input: default@stats_part +# col_name data_type comment +key int +value string + +# Partition Information +# col_name data_type comment +p int + +# Detailed Table Information +Database: default +#### A masked pattern was here #### +Retention: 0 +#### A masked pattern was here #### +Table Type: MANAGED_TABLE +Table Parameters: + COLUMN_STATS_ACCURATE {\"BASIC_STATS\":\"true\"} + bucketing_version 2 + numFiles 4 + numPartitions 3 + numRows 8 + rawDataSize 0 + totalSize 3124 + transactional true + transactional_properties default +#### A masked pattern was here #### + +# Storage Information +SerDe Library: org.apache.hadoop.hive.ql.io.orc.OrcSerde +InputFormat: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat +OutputFormat: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat +Compressed: No +Num Buckets: -1 +Bucket Columns: [] +Sort Columns: [] +Storage Desc Params: + serialization.format 1 +PREHOOK: query: show partitions stats_part +PREHOOK: type: SHOWPARTITIONS +PREHOOK: Input: default@stats_part +POSTHOOK: query: show partitions stats_part +POSTHOOK: type: SHOWPARTITIONS +POSTHOOK: Input: default@stats_part +p=100 +p=101 +p=102 +PREHOOK: query: explain select count(*) from stats_part +PREHOOK: type: QUERY +POSTHOOK: query: explain select count(*) from stats_part +POSTHOOK: type: QUERY +STAGE DEPENDENCIES: + Stage-0 is a root stage + +STAGE PLANS: + Stage: Stage-0 + Fetch Operator + limit: 1 + Processor Tree: + ListSink + +PREHOOK: query: select count(*) from stats_part +PREHOOK: type: QUERY +PREHOOK: Input: default@stats_part +#### A masked pattern was here #### +POSTHOOK: query: select count(*) from stats_part +POSTHOOK: type: QUERY +POSTHOOK: Input: default@stats_part +#### A masked pattern was here #### +8 +PREHOOK: query: explain select count(key) from stats_part +PREHOOK: type: QUERY +POSTHOOK: query: explain select count(key) from stats_part +POSTHOOK: type: QUERY +STAGE DEPENDENCIES: + Stage-0 is a root stage + +STAGE PLANS: + Stage: Stage-0 + Fetch Operator + limit: 1 + Processor Tree: + ListSink + +PREHOOK: query: select count(key) from stats_part +PREHOOK: type: QUERY +PREHOOK: Input: default@stats_part +#### A masked pattern was here #### +POSTHOOK: query: select count(key) from stats_part +POSTHOOK: type: QUERY +POSTHOOK: Input: default@stats_part +#### A masked pattern was here #### +8 +PREHOOK: query: explain select count(key) from stats_part where p > 100 +PREHOOK: type: QUERY +POSTHOOK: query: explain select count(key) from stats_part where p > 100 +POSTHOOK: type: QUERY +STAGE DEPENDENCIES: + Stage-0 is a root stage + +STAGE PLANS: + Stage: Stage-0 + Fetch Operator + limit: 1 + Processor Tree: + ListSink + +PREHOOK: query: select count(key) from stats_part where p > 100 +PREHOOK: type: QUERY +PREHOOK: Input: default@stats_part +#### A masked pattern was here #### +POSTHOOK: query: select count(key) from stats_part where p > 100 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@stats_part +#### A masked pattern was here #### +6 +PREHOOK: query: explain select max(key) from stats_part where p > 100 +PREHOOK: type: QUERY +POSTHOOK: query: explain select max(key) from stats_part where p > 100 +POSTHOOK: type: QUERY +STAGE DEPENDENCIES: + Stage-0 is a root stage + +STAGE PLANS: + Stage: Stage-0 + Fetch Operator + limit: 1 + Processor Tree: + ListSink + +PREHOOK: query: select max(key) from stats_part where p > 100 +PREHOOK: type: QUERY +PREHOOK: Input: default@stats_part +#### A masked pattern was here #### +POSTHOOK: query: select max(key) from stats_part where p > 100 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@stats_part +#### A masked pattern was here #### +51 +PREHOOK: query: desc formatted stats_part partition(p = 100) +PREHOOK: type: DESCTABLE +PREHOOK: Input: default@stats_part +POSTHOOK: query: desc formatted stats_part partition(p = 100) +POSTHOOK: type: DESCTABLE +POSTHOOK: Input: default@stats_part +# col_name data_type comment +key int +value string + +# Partition Information +# col_name data_type comment +p int + +# Detailed Partition Information +Partition Value: [100] +Database: default +Table: stats_part +#### A masked pattern was here #### +Partition Parameters: + COLUMN_STATS_ACCURATE {\"BASIC_STATS\":\"true\",\"COLUMN_STATS\":{\"key\":\"true\",\"value\":\"true\"}} + numFiles 1 + numRows 2 + rawDataSize 0 + totalSize 756 +#### A masked pattern was here #### + +# Storage Information +SerDe Library: org.apache.hadoop.hive.ql.io.orc.OrcSerde +InputFormat: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat +OutputFormat: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat +Compressed: No +Num Buckets: -1 +Bucket Columns: [] +Sort Columns: [] +Storage Desc Params: + serialization.format 1 +PREHOOK: query: desc formatted stats_part partition(p = 101) +PREHOOK: type: DESCTABLE +PREHOOK: Input: default@stats_part +POSTHOOK: query: desc formatted stats_part partition(p = 101) +POSTHOOK: type: DESCTABLE +POSTHOOK: Input: default@stats_part +# col_name data_type comment +key int +value string + +# Partition Information +# col_name data_type comment +p int + +# Detailed Partition Information +Partition Value: [101] +Database: default +Table: stats_part +#### A masked pattern was here #### +Partition Parameters: + COLUMN_STATS_ACCURATE {\"BASIC_STATS\":\"true\",\"COLUMN_STATS\":{\"key\":\"true\",\"value\":\"true\"}} + numFiles 1 + numRows 2 + rawDataSize 0 + totalSize 789 +#### A masked pattern was here #### + +# Storage Information +SerDe Library: org.apache.hadoop.hive.ql.io.orc.OrcSerde +InputFormat: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat +OutputFormat: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat +Compressed: No +Num Buckets: -1 +Bucket Columns: [] +Sort Columns: [] +Storage Desc Params: + serialization.format 1 +PREHOOK: query: desc formatted stats_part partition(p = 102) +PREHOOK: type: DESCTABLE +PREHOOK: Input: default@stats_part +POSTHOOK: query: desc formatted stats_part partition(p = 102) +POSTHOOK: type: DESCTABLE +POSTHOOK: Input: default@stats_part +# col_name data_type comment +key int +value string + +# Partition Information +# col_name data_type comment +p int + +# Detailed Partition Information +Partition Value: [102] +Database: default +Table: stats_part +#### A masked pattern was here #### +Partition Parameters: + COLUMN_STATS_ACCURATE {\"BASIC_STATS\":\"true\",\"COLUMN_STATS\":{\"key\":\"true\",\"value\":\"true\"}} + numFiles 2 + numRows 4 + rawDataSize 0 + totalSize 1579 +#### A masked pattern was here #### + +# Storage Information +SerDe Library: org.apache.hadoop.hive.ql.io.orc.OrcSerde +InputFormat: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat +OutputFormat: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat +Compressed: No +Num Buckets: -1 +Bucket Columns: [] +Sort Columns: [] +Storage Desc Params: + serialization.format 1 +PREHOOK: query: update stats_part set key = key + 100 where key in(-50,40) and p > 100 +PREHOOK: type: QUERY +PREHOOK: Input: default@stats_part +PREHOOK: Input: default@stats_part@p=101 +PREHOOK: Input: default@stats_part@p=102 +PREHOOK: Output: default@stats_part@p=101 +PREHOOK: Output: default@stats_part@p=102 +POSTHOOK: query: update stats_part set key = key + 100 where key in(-50,40) and p > 100 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@stats_part +POSTHOOK: Input: default@stats_part@p=101 +POSTHOOK: Input: default@stats_part@p=102 +POSTHOOK: Output: default@stats_part@p=101 +POSTHOOK: Output: default@stats_part@p=102 +PREHOOK: query: explain select max(key) from stats_part where p > 100 +PREHOOK: type: QUERY +POSTHOOK: query: explain select max(key) from stats_part where p > 100 +POSTHOOK: type: QUERY +STAGE DEPENDENCIES: + Stage-1 is a root stage + Stage-0 depends on stages: Stage-1 + +STAGE PLANS: + Stage: Stage-1 + Map Reduce + Map Operator Tree: + TableScan + alias: stats_part + filterExpr: (p > 100) (type: boolean) + Statistics: Num rows: 6 Data size: 24 Basic stats: COMPLETE Column stats: PARTIAL + Select Operator + expressions: key (type: int) + outputColumnNames: key + Statistics: Num rows: 6 Data size: 24 Basic stats: COMPLETE Column stats: PARTIAL + Group By Operator + aggregations: max(key) + mode: hash + outputColumnNames: _col0 + Statistics: Num rows: 1 Data size: 4 Basic stats: COMPLETE Column stats: PARTIAL + Reduce Output Operator + sort order: + Statistics: Num rows: 1 Data size: 4 Basic stats: COMPLETE Column stats: PARTIAL + value expressions: _col0 (type: int) + Execution mode: vectorized + Reduce Operator Tree: + Group By Operator + aggregations: max(VALUE._col0) + mode: mergepartial + outputColumnNames: _col0 + Statistics: Num rows: 1 Data size: 4 Basic stats: COMPLETE Column stats: PARTIAL + File Output Operator + compressed: false + Statistics: Num rows: 1 Data size: 4 Basic stats: COMPLETE Column stats: PARTIAL + table: + input format: org.apache.hadoop.mapred.SequenceFileInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + + Stage: Stage-0 + Fetch Operator + limit: -1 + Processor Tree: + ListSink + +PREHOOK: query: select max(key) from stats_part where p > 100 +PREHOOK: type: QUERY +PREHOOK: Input: default@stats_part +PREHOOK: Input: default@stats_part@p=101 +PREHOOK: Input: default@stats_part@p=102 +#### A masked pattern was here #### +POSTHOOK: query: select max(key) from stats_part where p > 100 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@stats_part +POSTHOOK: Input: default@stats_part@p=101 +POSTHOOK: Input: default@stats_part@p=102 +#### A masked pattern was here #### +140 +PREHOOK: query: desc formatted stats_part partition(p = 100) +PREHOOK: type: DESCTABLE +PREHOOK: Input: default@stats_part +POSTHOOK: query: desc formatted stats_part partition(p = 100) +POSTHOOK: type: DESCTABLE +POSTHOOK: Input: default@stats_part +# col_name data_type comment +key int +value string + +# Partition Information +# col_name data_type comment +p int + +# Detailed Partition Information +Partition Value: [100] +Database: default +Table: stats_part +#### A masked pattern was here #### +Partition Parameters: + COLUMN_STATS_ACCURATE {\"BASIC_STATS\":\"true\",\"COLUMN_STATS\":{\"key\":\"true\",\"value\":\"true\"}} + numFiles 1 + numRows 2 + rawDataSize 0 + totalSize 756 +#### A masked pattern was here #### + +# Storage Information +SerDe Library: org.apache.hadoop.hive.ql.io.orc.OrcSerde +InputFormat: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat +OutputFormat: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat +Compressed: No +Num Buckets: -1 +Bucket Columns: [] +Sort Columns: [] +Storage Desc Params: + serialization.format 1 +PREHOOK: query: desc formatted stats_part partition(p = 101) +PREHOOK: type: DESCTABLE +PREHOOK: Input: default@stats_part +POSTHOOK: query: desc formatted stats_part partition(p = 101) +POSTHOOK: type: DESCTABLE +POSTHOOK: Input: default@stats_part +# col_name data_type comment +key int +value string + +# Partition Information +# col_name data_type comment +p int + +# Detailed Partition Information +Partition Value: [101] +Database: default +Table: stats_part +#### A masked pattern was here #### +Partition Parameters: + COLUMN_STATS_ACCURATE {\"BASIC_STATS\":\"true\"} + numFiles 3 + numRows 2 + rawDataSize 0 + totalSize 2235 +#### A masked pattern was here #### + +# Storage Information +SerDe Library: org.apache.hadoop.hive.ql.io.orc.OrcSerde +InputFormat: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat +OutputFormat: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat +Compressed: No +Num Buckets: -1 +Bucket Columns: [] +Sort Columns: [] +Storage Desc Params: + serialization.format 1 +PREHOOK: query: desc formatted stats_part partition(p = 102) +PREHOOK: type: DESCTABLE +PREHOOK: Input: default@stats_part +POSTHOOK: query: desc formatted stats_part partition(p = 102) +POSTHOOK: type: DESCTABLE +POSTHOOK: Input: default@stats_part +# col_name data_type comment +key int +value string + +# Partition Information +# col_name data_type comment +p int + +# Detailed Partition Information +Partition Value: [102] +Database: default +Table: stats_part +#### A masked pattern was here #### +Partition Parameters: + COLUMN_STATS_ACCURATE {\"BASIC_STATS\":\"true\",\"COLUMN_STATS\":{\"key\":\"true\",\"value\":\"true\"}} + numFiles 2 + numRows 4 + rawDataSize 0 + totalSize 1579 +#### A masked pattern was here #### + +# Storage Information +SerDe Library: org.apache.hadoop.hive.ql.io.orc.OrcSerde +InputFormat: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat +OutputFormat: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat +Compressed: No +Num Buckets: -1 +Bucket Columns: [] +Sort Columns: [] +Storage Desc Params: + serialization.format 1 +PREHOOK: query: select count(value) from stats_part +PREHOOK: type: QUERY +PREHOOK: Input: default@stats_part +PREHOOK: Input: default@stats_part@p=100 +PREHOOK: Input: default@stats_part@p=101 +PREHOOK: Input: default@stats_part@p=102 +#### A masked pattern was here #### +POSTHOOK: query: select count(value) from stats_part +POSTHOOK: type: QUERY +POSTHOOK: Input: default@stats_part +POSTHOOK: Input: default@stats_part@p=100 +POSTHOOK: Input: default@stats_part@p=101 +POSTHOOK: Input: default@stats_part@p=102 +#### A masked pattern was here #### +8 +PREHOOK: query: update stats_part set value = concat(value, 'updated') where cast(key as integer) in(40,53) and p > 100 +PREHOOK: type: QUERY +PREHOOK: Input: default@stats_part +PREHOOK: Input: default@stats_part@p=101 +PREHOOK: Input: default@stats_part@p=102 +PREHOOK: Output: default@stats_part@p=101 +PREHOOK: Output: default@stats_part@p=102 +POSTHOOK: query: update stats_part set value = concat(value, 'updated') where cast(key as integer) in(40,53) and p > 100 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@stats_part +POSTHOOK: Input: default@stats_part@p=101 +POSTHOOK: Input: default@stats_part@p=102 +POSTHOOK: Output: default@stats_part@p=101 +POSTHOOK: Output: default@stats_part@p=102 +PREHOOK: query: desc formatted stats_part partition(p = 100) +PREHOOK: type: DESCTABLE +PREHOOK: Input: default@stats_part +POSTHOOK: query: desc formatted stats_part partition(p = 100) +POSTHOOK: type: DESCTABLE +POSTHOOK: Input: default@stats_part +# col_name data_type comment +key int +value string + +# Partition Information +# col_name data_type comment +p int + +# Detailed Partition Information +Partition Value: [100] +Database: default +Table: stats_part +#### A masked pattern was here #### +Partition Parameters: + COLUMN_STATS_ACCURATE {\"BASIC_STATS\":\"true\",\"COLUMN_STATS\":{\"key\":\"true\",\"value\":\"true\"}} + numFiles 1 + numRows 2 + rawDataSize 0 + totalSize 756 +#### A masked pattern was here #### + +# Storage Information +SerDe Library: org.apache.hadoop.hive.ql.io.orc.OrcSerde +InputFormat: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat +OutputFormat: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat +Compressed: No +Num Buckets: -1 +Bucket Columns: [] +Sort Columns: [] +Storage Desc Params: + serialization.format 1 +PREHOOK: query: desc formatted stats_part partition(p = 101) +PREHOOK: type: DESCTABLE +PREHOOK: Input: default@stats_part +POSTHOOK: query: desc formatted stats_part partition(p = 101) +POSTHOOK: type: DESCTABLE +POSTHOOK: Input: default@stats_part +# col_name data_type comment +key int +value string + +# Partition Information +# col_name data_type comment +p int + +# Detailed Partition Information +Partition Value: [101] +Database: default +Table: stats_part +#### A masked pattern was here #### +Partition Parameters: + COLUMN_STATS_ACCURATE {\"BASIC_STATS\":\"true\"} + numFiles 3 + numRows 2 + rawDataSize 0 + totalSize 2235 +#### A masked pattern was here #### + +# Storage Information +SerDe Library: org.apache.hadoop.hive.ql.io.orc.OrcSerde +InputFormat: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat +OutputFormat: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat +Compressed: No +Num Buckets: -1 +Bucket Columns: [] +Sort Columns: [] +Storage Desc Params: + serialization.format 1 +PREHOOK: query: desc formatted stats_part partition(p = 102) +PREHOOK: type: DESCTABLE +PREHOOK: Input: default@stats_part +POSTHOOK: query: desc formatted stats_part partition(p = 102) +POSTHOOK: type: DESCTABLE +POSTHOOK: Input: default@stats_part +# col_name data_type comment +key int +value string + +# Partition Information +# col_name data_type comment +p int + +# Detailed Partition Information +Partition Value: [102] +Database: default +Table: stats_part +#### A masked pattern was here #### +Partition Parameters: + COLUMN_STATS_ACCURATE {\"BASIC_STATS\":\"true\",\"COLUMN_STATS\":{\"key\":\"true\",\"value\":\"true\"}} + numFiles 2 + numRows 4 + rawDataSize 0 + totalSize 1579 +#### A masked pattern was here #### + +# Storage Information +SerDe Library: org.apache.hadoop.hive.ql.io.orc.OrcSerde +InputFormat: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat +OutputFormat: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat +Compressed: No +Num Buckets: -1 +Bucket Columns: [] +Sort Columns: [] +Storage Desc Params: + serialization.format 1 +PREHOOK: query: select count(value) from stats_part +PREHOOK: type: QUERY +PREHOOK: Input: default@stats_part +PREHOOK: Input: default@stats_part@p=100 +PREHOOK: Input: default@stats_part@p=101 +PREHOOK: Input: default@stats_part@p=102 +#### A masked pattern was here #### +POSTHOOK: query: select count(value) from stats_part +POSTHOOK: type: QUERY +POSTHOOK: Input: default@stats_part +POSTHOOK: Input: default@stats_part@p=100 +POSTHOOK: Input: default@stats_part@p=101 +POSTHOOK: Input: default@stats_part@p=102 +#### A masked pattern was here #### +8 +PREHOOK: query: delete from stats_part where key in (20, 41) +PREHOOK: type: QUERY +PREHOOK: Input: default@stats_part +PREHOOK: Input: default@stats_part@p=100 +PREHOOK: Input: default@stats_part@p=101 +PREHOOK: Input: default@stats_part@p=102 +PREHOOK: Output: default@stats_part@p=100 +PREHOOK: Output: default@stats_part@p=101 +PREHOOK: Output: default@stats_part@p=102 +POSTHOOK: query: delete from stats_part where key in (20, 41) +POSTHOOK: type: QUERY +POSTHOOK: Input: default@stats_part +POSTHOOK: Input: default@stats_part@p=100 +POSTHOOK: Input: default@stats_part@p=101 +POSTHOOK: Input: default@stats_part@p=102 +POSTHOOK: Output: default@stats_part@p=100 +POSTHOOK: Output: default@stats_part@p=101 +POSTHOOK: Output: default@stats_part@p=102 +PREHOOK: query: desc formatted stats_part partition(p = 100) +PREHOOK: type: DESCTABLE +PREHOOK: Input: default@stats_part +POSTHOOK: query: desc formatted stats_part partition(p = 100) +POSTHOOK: type: DESCTABLE +POSTHOOK: Input: default@stats_part +# col_name data_type comment +key int +value string + +# Partition Information +# col_name data_type comment +p int + +# Detailed Partition Information +Partition Value: [100] +Database: default +Table: stats_part +#### A masked pattern was here #### +Partition Parameters: + COLUMN_STATS_ACCURATE {\"BASIC_STATS\":\"true\"} + numFiles 2 + numRows 1 + rawDataSize 0 + totalSize 1453 +#### A masked pattern was here #### + +# Storage Information +SerDe Library: org.apache.hadoop.hive.ql.io.orc.OrcSerde +InputFormat: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat +OutputFormat: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat +Compressed: No +Num Buckets: -1 +Bucket Columns: [] +Sort Columns: [] +Storage Desc Params: + serialization.format 1 +PREHOOK: query: desc formatted stats_part partition(p = 101) +PREHOOK: type: DESCTABLE +PREHOOK: Input: default@stats_part +POSTHOOK: query: desc formatted stats_part partition(p = 101) +POSTHOOK: type: DESCTABLE +POSTHOOK: Input: default@stats_part +# col_name data_type comment +key int +value string + +# Partition Information +# col_name data_type comment +p int + +# Detailed Partition Information +Partition Value: [101] +Database: default +Table: stats_part +#### A masked pattern was here #### +Partition Parameters: + COLUMN_STATS_ACCURATE {\"BASIC_STATS\":\"true\"} + numFiles 4 + numRows 1 + rawDataSize 0 + totalSize 2929 +#### A masked pattern was here #### + +# Storage Information +SerDe Library: org.apache.hadoop.hive.ql.io.orc.OrcSerde +InputFormat: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat +OutputFormat: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat +Compressed: No +Num Buckets: -1 +Bucket Columns: [] +Sort Columns: [] +Storage Desc Params: + serialization.format 1 +PREHOOK: query: desc formatted stats_part partition(p = 102) +PREHOOK: type: DESCTABLE +PREHOOK: Input: default@stats_part +POSTHOOK: query: desc formatted stats_part partition(p = 102) +POSTHOOK: type: DESCTABLE +POSTHOOK: Input: default@stats_part +# col_name data_type comment +key int +value string + +# Partition Information +# col_name data_type comment +p int + +# Detailed Partition Information +Partition Value: [102] +Database: default +Table: stats_part +#### A masked pattern was here #### +Partition Parameters: + COLUMN_STATS_ACCURATE {\"BASIC_STATS\":\"true\",\"COLUMN_STATS\":{\"key\":\"true\",\"value\":\"true\"}} + numFiles 2 + numRows 4 + rawDataSize 0 + totalSize 1579 +#### A masked pattern was here #### + +# Storage Information +SerDe Library: org.apache.hadoop.hive.ql.io.orc.OrcSerde +InputFormat: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat +OutputFormat: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat +Compressed: No +Num Buckets: -1 +Bucket Columns: [] +Sort Columns: [] +Storage Desc Params: + serialization.format 1 +PREHOOK: query: explain select count(*) from stats_part where p = 100 +PREHOOK: type: QUERY +POSTHOOK: query: explain select count(*) from stats_part where p = 100 +POSTHOOK: type: QUERY +STAGE DEPENDENCIES: + Stage-0 is a root stage + +STAGE PLANS: + Stage: Stage-0 + Fetch Operator + limit: 1 + Processor Tree: + ListSink + +PREHOOK: query: select count(*) from stats_part where p = 100 +PREHOOK: type: QUERY +PREHOOK: Input: default@stats_part +#### A masked pattern was here #### +POSTHOOK: query: select count(*) from stats_part where p = 100 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@stats_part +#### A masked pattern was here #### +1 +PREHOOK: query: explain select count(*) from stats_part where p > 100 +PREHOOK: type: QUERY +POSTHOOK: query: explain select count(*) from stats_part where p > 100 +POSTHOOK: type: QUERY +STAGE DEPENDENCIES: + Stage-0 is a root stage + +STAGE PLANS: + Stage: Stage-0 + Fetch Operator + limit: 1 + Processor Tree: + ListSink + +PREHOOK: query: select count(*) from stats_part where p > 100 +PREHOOK: type: QUERY +PREHOOK: Input: default@stats_part +#### A masked pattern was here #### +POSTHOOK: query: select count(*) from stats_part where p > 100 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@stats_part +#### A masked pattern was here #### +5 +PREHOOK: query: explain select count(key) from stats_part +PREHOOK: type: QUERY +POSTHOOK: query: explain select count(key) from stats_part +POSTHOOK: type: QUERY +STAGE DEPENDENCIES: + Stage-1 is a root stage + Stage-0 depends on stages: Stage-1 + +STAGE PLANS: + Stage: Stage-1 + Map Reduce + Map Operator Tree: + TableScan + alias: stats_part + Statistics: Num rows: 6 Data size: 24 Basic stats: COMPLETE Column stats: COMPLETE + Select Operator + expressions: key (type: int) + outputColumnNames: key + Statistics: Num rows: 6 Data size: 24 Basic stats: COMPLETE Column stats: COMPLETE + Group By Operator + aggregations: count(key) + mode: hash + outputColumnNames: _col0 + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE + Reduce Output Operator + sort order: + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE + value expressions: _col0 (type: bigint) + Execution mode: vectorized + Reduce Operator Tree: + Group By Operator + aggregations: count(VALUE._col0) + mode: mergepartial + outputColumnNames: _col0 + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE + File Output Operator + compressed: false + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE + table: + input format: org.apache.hadoop.mapred.SequenceFileInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + + Stage: Stage-0 + Fetch Operator + limit: -1 + Processor Tree: + ListSink + +PREHOOK: query: select count(key) from stats_part +PREHOOK: type: QUERY +PREHOOK: Input: default@stats_part +PREHOOK: Input: default@stats_part@p=100 +PREHOOK: Input: default@stats_part@p=101 +PREHOOK: Input: default@stats_part@p=102 +#### A masked pattern was here #### +POSTHOOK: query: select count(key) from stats_part +POSTHOOK: type: QUERY +POSTHOOK: Input: default@stats_part +POSTHOOK: Input: default@stats_part@p=100 +POSTHOOK: Input: default@stats_part@p=101 +POSTHOOK: Input: default@stats_part@p=102 +#### A masked pattern was here #### +6 +PREHOOK: query: explain select count(*) from stats_part where p > 100 +PREHOOK: type: QUERY +POSTHOOK: query: explain select count(*) from stats_part where p > 100 +POSTHOOK: type: QUERY +STAGE DEPENDENCIES: + Stage-0 is a root stage + +STAGE PLANS: + Stage: Stage-0 + Fetch Operator + limit: 1 + Processor Tree: + ListSink + +PREHOOK: query: select count(*) from stats_part where p > 100 +PREHOOK: type: QUERY +PREHOOK: Input: default@stats_part +#### A masked pattern was here #### +POSTHOOK: query: select count(*) from stats_part where p > 100 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@stats_part +#### A masked pattern was here #### +5 +PREHOOK: query: explain select max(key) from stats_part where p > 100 +PREHOOK: type: QUERY +POSTHOOK: query: explain select max(key) from stats_part where p > 100 +POSTHOOK: type: QUERY +STAGE DEPENDENCIES: + Stage-1 is a root stage + Stage-0 depends on stages: Stage-1 + +STAGE PLANS: + Stage: Stage-1 + Map Reduce + Map Operator Tree: + TableScan + alias: stats_part + filterExpr: (p > 100) (type: boolean) + Statistics: Num rows: 5 Data size: 20 Basic stats: COMPLETE Column stats: PARTIAL + Select Operator + expressions: key (type: int) + outputColumnNames: key + Statistics: Num rows: 5 Data size: 20 Basic stats: COMPLETE Column stats: PARTIAL + Group By Operator + aggregations: max(key) + mode: hash + outputColumnNames: _col0 + Statistics: Num rows: 1 Data size: 4 Basic stats: COMPLETE Column stats: PARTIAL + Reduce Output Operator + sort order: + Statistics: Num rows: 1 Data size: 4 Basic stats: COMPLETE Column stats: PARTIAL + value expressions: _col0 (type: int) + Execution mode: vectorized + Reduce Operator Tree: + Group By Operator + aggregations: max(VALUE._col0) + mode: mergepartial + outputColumnNames: _col0 + Statistics: Num rows: 1 Data size: 4 Basic stats: COMPLETE Column stats: PARTIAL + File Output Operator + compressed: false + Statistics: Num rows: 1 Data size: 4 Basic stats: COMPLETE Column stats: PARTIAL + table: + input format: org.apache.hadoop.mapred.SequenceFileInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + + Stage: Stage-0 + Fetch Operator + limit: -1 + Processor Tree: + ListSink + +PREHOOK: query: select max(key) from stats_part where p > 100 +PREHOOK: type: QUERY +PREHOOK: Input: default@stats_part +PREHOOK: Input: default@stats_part@p=101 +PREHOOK: Input: default@stats_part@p=102 +#### A masked pattern was here #### +POSTHOOK: query: select max(key) from stats_part where p > 100 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@stats_part +POSTHOOK: Input: default@stats_part@p=101 +POSTHOOK: Input: default@stats_part@p=102 +#### A masked pattern was here #### +140 +PREHOOK: query: describe extended stats_part partition (p=101) +PREHOOK: type: DESCTABLE +PREHOOK: Input: default@stats_part +POSTHOOK: query: describe extended stats_part partition (p=101) +POSTHOOK: type: DESCTABLE +POSTHOOK: Input: default@stats_part +key int +value string +p int + +# Partition Information +# col_name data_type comment +p int + +#### A masked pattern was here #### +PREHOOK: query: describe extended stats_part +PREHOOK: type: DESCTABLE +PREHOOK: Input: default@stats_part +POSTHOOK: query: describe extended stats_part +POSTHOOK: type: DESCTABLE +POSTHOOK: Input: default@stats_part +key int +value string +p int + +# Partition Information +# col_name data_type comment +p int + +#### A masked pattern was here #### diff --git ql/src/test/results/clientpositive/stats_sizebug.q.out ql/src/test/results/clientpositive/stats_sizebug.q.out new file mode 100644 index 0000000000..648a9fa562 --- /dev/null +++ ql/src/test/results/clientpositive/stats_sizebug.q.out @@ -0,0 +1,217 @@ +PREHOOK: query: drop table if exists mysource +PREHOOK: type: DROPTABLE +POSTHOOK: query: drop table if exists mysource +POSTHOOK: type: DROPTABLE +PREHOOK: query: create table mysource (p int,key int) +PREHOOK: type: CREATETABLE +PREHOOK: Output: database:default +PREHOOK: Output: default@mysource +POSTHOOK: query: create table mysource (p int,key int) +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: database:default +POSTHOOK: Output: default@mysource +PREHOOK: query: insert into mysource values (100,20), (101,40), (102,50) +PREHOOK: type: QUERY +PREHOOK: Input: _dummy_database@_dummy_table +PREHOOK: Output: default@mysource +POSTHOOK: query: insert into mysource values (100,20), (101,40), (102,50) +POSTHOOK: type: QUERY +POSTHOOK: Input: _dummy_database@_dummy_table +POSTHOOK: Output: default@mysource +POSTHOOK: Lineage: mysource.key SCRIPT [] +POSTHOOK: Lineage: mysource.p SCRIPT [] +PREHOOK: query: insert into mysource values (100,20), (101,40), (102,50) +PREHOOK: type: QUERY +PREHOOK: Input: _dummy_database@_dummy_table +PREHOOK: Output: default@mysource +POSTHOOK: query: insert into mysource values (100,20), (101,40), (102,50) +POSTHOOK: type: QUERY +POSTHOOK: Input: _dummy_database@_dummy_table +POSTHOOK: Output: default@mysource +POSTHOOK: Lineage: mysource.key SCRIPT [] +POSTHOOK: Lineage: mysource.p SCRIPT [] +PREHOOK: query: drop table if exists stats_nonpartitioned +PREHOOK: type: DROPTABLE +POSTHOOK: query: drop table if exists stats_nonpartitioned +POSTHOOK: type: DROPTABLE +PREHOOK: query: create table stats_nonpartitioned(key int, value int) stored as orc tblproperties ("transactional"="true") +PREHOOK: type: CREATETABLE +PREHOOK: Output: database:default +PREHOOK: Output: default@stats_nonpartitioned +POSTHOOK: query: create table stats_nonpartitioned(key int, value int) stored as orc tblproperties ("transactional"="true") +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: database:default +POSTHOOK: Output: default@stats_nonpartitioned +PREHOOK: query: explain insert into table stats_nonpartitioned select * from mysource where p == 100 +PREHOOK: type: QUERY +POSTHOOK: query: explain insert into table stats_nonpartitioned select * from mysource where p == 100 +POSTHOOK: type: QUERY +STAGE DEPENDENCIES: + Stage-1 is a root stage + Stage-0 depends on stages: Stage-1 + Stage-2 depends on stages: Stage-0 + +STAGE PLANS: + Stage: Stage-1 + Map Reduce + Map Operator Tree: + TableScan + alias: mysource + filterExpr: (p = 100) (type: boolean) + Statistics: Num rows: 6 Data size: 48 Basic stats: COMPLETE Column stats: COMPLETE + Filter Operator + predicate: (p = 100) (type: boolean) + Statistics: Num rows: 2 Data size: 16 Basic stats: COMPLETE Column stats: COMPLETE + Select Operator + expressions: 100 (type: int), key (type: int) + outputColumnNames: _col0, _col1 + Statistics: Num rows: 2 Data size: 16 Basic stats: COMPLETE Column stats: COMPLETE + File Output Operator + compressed: false + Statistics: Num rows: 2 Data size: 16 Basic stats: COMPLETE Column stats: COMPLETE + table: + input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat + output format: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat + serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde + name: default.stats_nonpartitioned + Write Type: INSERT + Select Operator + expressions: _col0 (type: int), _col1 (type: int) + outputColumnNames: key, value + Statistics: Num rows: 2 Data size: 16 Basic stats: COMPLETE Column stats: COMPLETE + Group By Operator + aggregations: compute_stats(key, 'hll'), compute_stats(value, 'hll') + mode: hash + outputColumnNames: _col0, _col1 + Statistics: Num rows: 1 Data size: 848 Basic stats: COMPLETE Column stats: COMPLETE + Reduce Output Operator + sort order: + Statistics: Num rows: 1 Data size: 848 Basic stats: COMPLETE Column stats: COMPLETE + value expressions: _col0 (type: struct), _col1 (type: struct) + Reduce Operator Tree: + Group By Operator + aggregations: compute_stats(VALUE._col0), compute_stats(VALUE._col1) + mode: mergepartial + outputColumnNames: _col0, _col1 + Statistics: Num rows: 1 Data size: 880 Basic stats: COMPLETE Column stats: COMPLETE + File Output Operator + compressed: false + Statistics: Num rows: 1 Data size: 880 Basic stats: COMPLETE Column stats: COMPLETE + table: + input format: org.apache.hadoop.mapred.SequenceFileInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + + Stage: Stage-0 + Move Operator + tables: + replace: false + table: + input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat + output format: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat + serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde + name: default.stats_nonpartitioned + Write Type: INSERT + + Stage: Stage-2 + Stats Work + Basic Stats Work: + Column Stats Desc: + Columns: key, value + Column Types: int, int + Table: default.stats_nonpartitioned + +PREHOOK: query: insert into table stats_nonpartitioned select * from mysource where p == 100 +PREHOOK: type: QUERY +PREHOOK: Input: default@mysource +PREHOOK: Output: default@stats_nonpartitioned +POSTHOOK: query: insert into table stats_nonpartitioned select * from mysource where p == 100 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@mysource +POSTHOOK: Output: default@stats_nonpartitioned +POSTHOOK: Lineage: stats_nonpartitioned.key SIMPLE [] +POSTHOOK: Lineage: stats_nonpartitioned.value SIMPLE [(mysource)mysource.FieldSchema(name:key, type:int, comment:null), ] +PREHOOK: query: desc formatted stats_nonpartitioned +PREHOOK: type: DESCTABLE +PREHOOK: Input: default@stats_nonpartitioned +POSTHOOK: query: desc formatted stats_nonpartitioned +POSTHOOK: type: DESCTABLE +POSTHOOK: Input: default@stats_nonpartitioned +# col_name data_type comment +key int +value int + +# Detailed Table Information +Database: default +#### A masked pattern was here #### +Retention: 0 +#### A masked pattern was here #### +Table Type: MANAGED_TABLE +Table Parameters: + COLUMN_STATS_ACCURATE {\"BASIC_STATS\":\"true\",\"COLUMN_STATS\":{\"key\":\"true\",\"value\":\"true\"}} + bucketing_version 2 + numFiles 1 + numRows 2 + rawDataSize 0 + totalSize 718 + transactional true + transactional_properties default +#### A masked pattern was here #### + +# Storage Information +SerDe Library: org.apache.hadoop.hive.ql.io.orc.OrcSerde +InputFormat: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat +OutputFormat: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat +Compressed: No +Num Buckets: -1 +Bucket Columns: [] +Sort Columns: [] +Storage Desc Params: + serialization.format 1 +PREHOOK: query: analyze table mysource compute statistics for columns p, key +PREHOOK: type: ANALYZE_TABLE +PREHOOK: Input: default@mysource +PREHOOK: Output: default@mysource +#### A masked pattern was here #### +POSTHOOK: query: analyze table mysource compute statistics for columns p, key +POSTHOOK: type: ANALYZE_TABLE +POSTHOOK: Input: default@mysource +POSTHOOK: Output: default@mysource +#### A masked pattern was here #### +PREHOOK: query: desc formatted stats_nonpartitioned +PREHOOK: type: DESCTABLE +PREHOOK: Input: default@stats_nonpartitioned +POSTHOOK: query: desc formatted stats_nonpartitioned +POSTHOOK: type: DESCTABLE +POSTHOOK: Input: default@stats_nonpartitioned +# col_name data_type comment +key int +value int + +# Detailed Table Information +Database: default +#### A masked pattern was here #### +Retention: 0 +#### A masked pattern was here #### +Table Type: MANAGED_TABLE +Table Parameters: + COLUMN_STATS_ACCURATE {\"BASIC_STATS\":\"true\",\"COLUMN_STATS\":{\"key\":\"true\",\"value\":\"true\"}} + bucketing_version 2 + numFiles 1 + numRows 2 + rawDataSize 0 + totalSize 718 + transactional true + transactional_properties default +#### A masked pattern was here #### + +# Storage Information +SerDe Library: org.apache.hadoop.hive.ql.io.orc.OrcSerde +InputFormat: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat +OutputFormat: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat +Compressed: No +Num Buckets: -1 +Bucket Columns: [] +Sort Columns: [] +Storage Desc Params: + serialization.format 1 diff --git ql/src/test/results/clientpositive/tez/acid_vectorization_original_tez.q.out ql/src/test/results/clientpositive/tez/acid_vectorization_original_tez.q.out index 88499fd5f4..b40036152d 100644 --- ql/src/test/results/clientpositive/tez/acid_vectorization_original_tez.q.out +++ ql/src/test/results/clientpositive/tez/acid_vectorization_original_tez.q.out @@ -680,22 +680,22 @@ STAGE PLANS: Map Operator Tree: TableScan alias: over10k_orc_bucketed_n0 - Statistics: Num rows: 1237 Data size: 707670 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 2098 Data size: 622340 Basic stats: COMPLETE Column stats: COMPLETE Select Operator expressions: ROW__ID (type: struct) outputColumnNames: ROW__ID - Statistics: Num rows: 1237 Data size: 707670 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 2098 Data size: 622340 Basic stats: COMPLETE Column stats: COMPLETE Group By Operator aggregations: count() keys: ROW__ID (type: struct) mode: hash outputColumnNames: _col0, _col1 - Statistics: Num rows: 618 Data size: 51912 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 1049 Data size: 88116 Basic stats: COMPLETE Column stats: COMPLETE Reduce Output Operator key expressions: _col0 (type: struct) sort order: + Map-reduce partition columns: _col0 (type: struct) - Statistics: Num rows: 618 Data size: 51912 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 1049 Data size: 88116 Basic stats: COMPLETE Column stats: COMPLETE value expressions: _col1 (type: bigint) Reducer 2 Reduce Operator Tree: @@ -704,13 +704,13 @@ STAGE PLANS: keys: KEY._col0 (type: struct) mode: mergepartial outputColumnNames: _col0, _col1 - Statistics: Num rows: 618 Data size: 51912 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 1049 Data size: 88116 Basic stats: COMPLETE Column stats: COMPLETE Filter Operator predicate: (_col1 > 1L) (type: boolean) - Statistics: Num rows: 206 Data size: 17304 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 349 Data size: 29316 Basic stats: COMPLETE Column stats: COMPLETE File Output Operator compressed: false - Statistics: Num rows: 206 Data size: 17304 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 349 Data size: 29316 Basic stats: COMPLETE Column stats: COMPLETE table: input format: org.apache.hadoop.mapred.SequenceFileInputFormat output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat diff --git ql/src/test/results/clientpositive/tez/explainanalyze_5.q.out ql/src/test/results/clientpositive/tez/explainanalyze_5.q.out index bab0d241ec..5a50431d26 100644 --- ql/src/test/results/clientpositive/tez/explainanalyze_5.q.out +++ ql/src/test/results/clientpositive/tez/explainanalyze_5.q.out @@ -295,17 +295,17 @@ Stage-3 Reducer 2 File Output Operator [FS_8] table:{"name:":"default.acid_uami_n2"} - Select Operator [SEL_4] (rows=10/2 width=316) + Select Operator [SEL_4] (rows=2/2 width=302) Output:["_col0","_col1","_col2","_col3"] <-Map 1 [SIMPLE_EDGE] SHUFFLE [RS_3] PartitionCols:UDFToInteger(_col0) - Select Operator [SEL_2] (rows=10/2 width=316) + Select Operator [SEL_2] (rows=2/2 width=302) Output:["_col0","_col1","_col3"] - Filter Operator [FIL_9] (rows=10/2 width=316) + Filter Operator [FIL_9] (rows=2/2 width=226) predicate:((de = 109.23) or (de = 119.23)) - TableScan [TS_0] (rows=85/4 width=316) - default@acid_uami_n2,acid_uami_n2, ACID table,Tbl:COMPLETE,Col:NONE,Output:["i","de","vc"] + TableScan [TS_0] (rows=4/4 width=226) + default@acid_uami_n2,acid_uami_n2, ACID table,Tbl:COMPLETE,Col:COMPLETE,Output:["i","de","vc"] PREHOOK: query: select * from acid_uami_n2 order by de PREHOOK: type: QUERY diff --git standalone-metastore/pom.xml standalone-metastore/pom.xml index 67d8fb41d1..7369ebbaa2 100644 --- standalone-metastore/pom.xml +++ standalone-metastore/pom.xml @@ -30,7 +30,7 @@ Hive Standalone Metastore - 3.1.0 + 4.0.0 UTF-8 diff --git standalone-metastore/src/gen/thrift/gen-cpp/ThriftHiveMetastore.cpp standalone-metastore/src/gen/thrift/gen-cpp/ThriftHiveMetastore.cpp index e459bc2a9c..7a81dfbcb8 100644 --- standalone-metastore/src/gen/thrift/gen-cpp/ThriftHiveMetastore.cpp +++ standalone-metastore/src/gen/thrift/gen-cpp/ThriftHiveMetastore.cpp @@ -2334,14 +2334,14 @@ uint32_t ThriftHiveMetastore_get_databases_result::read(::apache::thrift::protoc if (ftype == ::apache::thrift::protocol::T_LIST) { { this->success.clear(); - uint32_t _size1202; - ::apache::thrift::protocol::TType _etype1205; - xfer += iprot->readListBegin(_etype1205, _size1202); - this->success.resize(_size1202); - uint32_t _i1206; - for (_i1206 = 0; _i1206 < _size1202; ++_i1206) + uint32_t _size1221; + ::apache::thrift::protocol::TType _etype1224; + xfer += iprot->readListBegin(_etype1224, _size1221); + this->success.resize(_size1221); + uint32_t _i1225; + for (_i1225 = 0; _i1225 < _size1221; ++_i1225) { - xfer += iprot->readString(this->success[_i1206]); + xfer += iprot->readString(this->success[_i1225]); } xfer += iprot->readListEnd(); } @@ -2380,10 +2380,10 @@ uint32_t ThriftHiveMetastore_get_databases_result::write(::apache::thrift::proto xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_LIST, 0); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast(this->success.size())); - std::vector ::const_iterator _iter1207; - for (_iter1207 = this->success.begin(); _iter1207 != this->success.end(); ++_iter1207) + std::vector ::const_iterator _iter1226; + for (_iter1226 = this->success.begin(); _iter1226 != this->success.end(); ++_iter1226) { - xfer += oprot->writeString((*_iter1207)); + xfer += oprot->writeString((*_iter1226)); } xfer += oprot->writeListEnd(); } @@ -2428,14 +2428,14 @@ uint32_t ThriftHiveMetastore_get_databases_presult::read(::apache::thrift::proto if (ftype == ::apache::thrift::protocol::T_LIST) { { (*(this->success)).clear(); - uint32_t _size1208; - ::apache::thrift::protocol::TType _etype1211; - xfer += iprot->readListBegin(_etype1211, _size1208); - (*(this->success)).resize(_size1208); - uint32_t _i1212; - for (_i1212 = 0; _i1212 < _size1208; ++_i1212) + uint32_t _size1227; + ::apache::thrift::protocol::TType _etype1230; + xfer += iprot->readListBegin(_etype1230, _size1227); + (*(this->success)).resize(_size1227); + uint32_t _i1231; + for (_i1231 = 0; _i1231 < _size1227; ++_i1231) { - xfer += iprot->readString((*(this->success))[_i1212]); + xfer += iprot->readString((*(this->success))[_i1231]); } xfer += iprot->readListEnd(); } @@ -2552,14 +2552,14 @@ uint32_t ThriftHiveMetastore_get_all_databases_result::read(::apache::thrift::pr if (ftype == ::apache::thrift::protocol::T_LIST) { { this->success.clear(); - uint32_t _size1213; - ::apache::thrift::protocol::TType _etype1216; - xfer += iprot->readListBegin(_etype1216, _size1213); - this->success.resize(_size1213); - uint32_t _i1217; - for (_i1217 = 0; _i1217 < _size1213; ++_i1217) + uint32_t _size1232; + ::apache::thrift::protocol::TType _etype1235; + xfer += iprot->readListBegin(_etype1235, _size1232); + this->success.resize(_size1232); + uint32_t _i1236; + for (_i1236 = 0; _i1236 < _size1232; ++_i1236) { - xfer += iprot->readString(this->success[_i1217]); + xfer += iprot->readString(this->success[_i1236]); } xfer += iprot->readListEnd(); } @@ -2598,10 +2598,10 @@ uint32_t ThriftHiveMetastore_get_all_databases_result::write(::apache::thrift::p xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_LIST, 0); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast(this->success.size())); - std::vector ::const_iterator _iter1218; - for (_iter1218 = this->success.begin(); _iter1218 != this->success.end(); ++_iter1218) + std::vector ::const_iterator _iter1237; + for (_iter1237 = this->success.begin(); _iter1237 != this->success.end(); ++_iter1237) { - xfer += oprot->writeString((*_iter1218)); + xfer += oprot->writeString((*_iter1237)); } xfer += oprot->writeListEnd(); } @@ -2646,14 +2646,14 @@ uint32_t ThriftHiveMetastore_get_all_databases_presult::read(::apache::thrift::p if (ftype == ::apache::thrift::protocol::T_LIST) { { (*(this->success)).clear(); - uint32_t _size1219; - ::apache::thrift::protocol::TType _etype1222; - xfer += iprot->readListBegin(_etype1222, _size1219); - (*(this->success)).resize(_size1219); - uint32_t _i1223; - for (_i1223 = 0; _i1223 < _size1219; ++_i1223) + uint32_t _size1238; + ::apache::thrift::protocol::TType _etype1241; + xfer += iprot->readListBegin(_etype1241, _size1238); + (*(this->success)).resize(_size1238); + uint32_t _i1242; + for (_i1242 = 0; _i1242 < _size1238; ++_i1242) { - xfer += iprot->readString((*(this->success))[_i1223]); + xfer += iprot->readString((*(this->success))[_i1242]); } xfer += iprot->readListEnd(); } @@ -3715,17 +3715,17 @@ uint32_t ThriftHiveMetastore_get_type_all_result::read(::apache::thrift::protoco if (ftype == ::apache::thrift::protocol::T_MAP) { { this->success.clear(); - uint32_t _size1224; - ::apache::thrift::protocol::TType _ktype1225; - ::apache::thrift::protocol::TType _vtype1226; - xfer += iprot->readMapBegin(_ktype1225, _vtype1226, _size1224); - uint32_t _i1228; - for (_i1228 = 0; _i1228 < _size1224; ++_i1228) + uint32_t _size1243; + ::apache::thrift::protocol::TType _ktype1244; + ::apache::thrift::protocol::TType _vtype1245; + xfer += iprot->readMapBegin(_ktype1244, _vtype1245, _size1243); + uint32_t _i1247; + for (_i1247 = 0; _i1247 < _size1243; ++_i1247) { - std::string _key1229; - xfer += iprot->readString(_key1229); - Type& _val1230 = this->success[_key1229]; - xfer += _val1230.read(iprot); + std::string _key1248; + xfer += iprot->readString(_key1248); + Type& _val1249 = this->success[_key1248]; + xfer += _val1249.read(iprot); } xfer += iprot->readMapEnd(); } @@ -3764,11 +3764,11 @@ uint32_t ThriftHiveMetastore_get_type_all_result::write(::apache::thrift::protoc xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_MAP, 0); { xfer += oprot->writeMapBegin(::apache::thrift::protocol::T_STRING, ::apache::thrift::protocol::T_STRUCT, static_cast(this->success.size())); - std::map ::const_iterator _iter1231; - for (_iter1231 = this->success.begin(); _iter1231 != this->success.end(); ++_iter1231) + std::map ::const_iterator _iter1250; + for (_iter1250 = this->success.begin(); _iter1250 != this->success.end(); ++_iter1250) { - xfer += oprot->writeString(_iter1231->first); - xfer += _iter1231->second.write(oprot); + xfer += oprot->writeString(_iter1250->first); + xfer += _iter1250->second.write(oprot); } xfer += oprot->writeMapEnd(); } @@ -3813,17 +3813,17 @@ uint32_t ThriftHiveMetastore_get_type_all_presult::read(::apache::thrift::protoc if (ftype == ::apache::thrift::protocol::T_MAP) { { (*(this->success)).clear(); - uint32_t _size1232; - ::apache::thrift::protocol::TType _ktype1233; - ::apache::thrift::protocol::TType _vtype1234; - xfer += iprot->readMapBegin(_ktype1233, _vtype1234, _size1232); - uint32_t _i1236; - for (_i1236 = 0; _i1236 < _size1232; ++_i1236) + uint32_t _size1251; + ::apache::thrift::protocol::TType _ktype1252; + ::apache::thrift::protocol::TType _vtype1253; + xfer += iprot->readMapBegin(_ktype1252, _vtype1253, _size1251); + uint32_t _i1255; + for (_i1255 = 0; _i1255 < _size1251; ++_i1255) { - std::string _key1237; - xfer += iprot->readString(_key1237); - Type& _val1238 = (*(this->success))[_key1237]; - xfer += _val1238.read(iprot); + std::string _key1256; + xfer += iprot->readString(_key1256); + Type& _val1257 = (*(this->success))[_key1256]; + xfer += _val1257.read(iprot); } xfer += iprot->readMapEnd(); } @@ -3977,14 +3977,14 @@ uint32_t ThriftHiveMetastore_get_fields_result::read(::apache::thrift::protocol: if (ftype == ::apache::thrift::protocol::T_LIST) { { this->success.clear(); - uint32_t _size1239; - ::apache::thrift::protocol::TType _etype1242; - xfer += iprot->readListBegin(_etype1242, _size1239); - this->success.resize(_size1239); - uint32_t _i1243; - for (_i1243 = 0; _i1243 < _size1239; ++_i1243) + uint32_t _size1258; + ::apache::thrift::protocol::TType _etype1261; + xfer += iprot->readListBegin(_etype1261, _size1258); + this->success.resize(_size1258); + uint32_t _i1262; + for (_i1262 = 0; _i1262 < _size1258; ++_i1262) { - xfer += this->success[_i1243].read(iprot); + xfer += this->success[_i1262].read(iprot); } xfer += iprot->readListEnd(); } @@ -4039,10 +4039,10 @@ uint32_t ThriftHiveMetastore_get_fields_result::write(::apache::thrift::protocol xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_LIST, 0); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast(this->success.size())); - std::vector ::const_iterator _iter1244; - for (_iter1244 = this->success.begin(); _iter1244 != this->success.end(); ++_iter1244) + std::vector ::const_iterator _iter1263; + for (_iter1263 = this->success.begin(); _iter1263 != this->success.end(); ++_iter1263) { - xfer += (*_iter1244).write(oprot); + xfer += (*_iter1263).write(oprot); } xfer += oprot->writeListEnd(); } @@ -4095,14 +4095,14 @@ uint32_t ThriftHiveMetastore_get_fields_presult::read(::apache::thrift::protocol if (ftype == ::apache::thrift::protocol::T_LIST) { { (*(this->success)).clear(); - uint32_t _size1245; - ::apache::thrift::protocol::TType _etype1248; - xfer += iprot->readListBegin(_etype1248, _size1245); - (*(this->success)).resize(_size1245); - uint32_t _i1249; - for (_i1249 = 0; _i1249 < _size1245; ++_i1249) + uint32_t _size1264; + ::apache::thrift::protocol::TType _etype1267; + xfer += iprot->readListBegin(_etype1267, _size1264); + (*(this->success)).resize(_size1264); + uint32_t _i1268; + for (_i1268 = 0; _i1268 < _size1264; ++_i1268) { - xfer += (*(this->success))[_i1249].read(iprot); + xfer += (*(this->success))[_i1268].read(iprot); } xfer += iprot->readListEnd(); } @@ -4288,14 +4288,14 @@ uint32_t ThriftHiveMetastore_get_fields_with_environment_context_result::read(:: if (ftype == ::apache::thrift::protocol::T_LIST) { { this->success.clear(); - uint32_t _size1250; - ::apache::thrift::protocol::TType _etype1253; - xfer += iprot->readListBegin(_etype1253, _size1250); - this->success.resize(_size1250); - uint32_t _i1254; - for (_i1254 = 0; _i1254 < _size1250; ++_i1254) + uint32_t _size1269; + ::apache::thrift::protocol::TType _etype1272; + xfer += iprot->readListBegin(_etype1272, _size1269); + this->success.resize(_size1269); + uint32_t _i1273; + for (_i1273 = 0; _i1273 < _size1269; ++_i1273) { - xfer += this->success[_i1254].read(iprot); + xfer += this->success[_i1273].read(iprot); } xfer += iprot->readListEnd(); } @@ -4350,10 +4350,10 @@ uint32_t ThriftHiveMetastore_get_fields_with_environment_context_result::write(: xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_LIST, 0); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast(this->success.size())); - std::vector ::const_iterator _iter1255; - for (_iter1255 = this->success.begin(); _iter1255 != this->success.end(); ++_iter1255) + std::vector ::const_iterator _iter1274; + for (_iter1274 = this->success.begin(); _iter1274 != this->success.end(); ++_iter1274) { - xfer += (*_iter1255).write(oprot); + xfer += (*_iter1274).write(oprot); } xfer += oprot->writeListEnd(); } @@ -4406,14 +4406,14 @@ uint32_t ThriftHiveMetastore_get_fields_with_environment_context_presult::read(: if (ftype == ::apache::thrift::protocol::T_LIST) { { (*(this->success)).clear(); - uint32_t _size1256; - ::apache::thrift::protocol::TType _etype1259; - xfer += iprot->readListBegin(_etype1259, _size1256); - (*(this->success)).resize(_size1256); - uint32_t _i1260; - for (_i1260 = 0; _i1260 < _size1256; ++_i1260) + uint32_t _size1275; + ::apache::thrift::protocol::TType _etype1278; + xfer += iprot->readListBegin(_etype1278, _size1275); + (*(this->success)).resize(_size1275); + uint32_t _i1279; + for (_i1279 = 0; _i1279 < _size1275; ++_i1279) { - xfer += (*(this->success))[_i1260].read(iprot); + xfer += (*(this->success))[_i1279].read(iprot); } xfer += iprot->readListEnd(); } @@ -4583,14 +4583,14 @@ uint32_t ThriftHiveMetastore_get_schema_result::read(::apache::thrift::protocol: if (ftype == ::apache::thrift::protocol::T_LIST) { { this->success.clear(); - uint32_t _size1261; - ::apache::thrift::protocol::TType _etype1264; - xfer += iprot->readListBegin(_etype1264, _size1261); - this->success.resize(_size1261); - uint32_t _i1265; - for (_i1265 = 0; _i1265 < _size1261; ++_i1265) + uint32_t _size1280; + ::apache::thrift::protocol::TType _etype1283; + xfer += iprot->readListBegin(_etype1283, _size1280); + this->success.resize(_size1280); + uint32_t _i1284; + for (_i1284 = 0; _i1284 < _size1280; ++_i1284) { - xfer += this->success[_i1265].read(iprot); + xfer += this->success[_i1284].read(iprot); } xfer += iprot->readListEnd(); } @@ -4645,10 +4645,10 @@ uint32_t ThriftHiveMetastore_get_schema_result::write(::apache::thrift::protocol xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_LIST, 0); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast(this->success.size())); - std::vector ::const_iterator _iter1266; - for (_iter1266 = this->success.begin(); _iter1266 != this->success.end(); ++_iter1266) + std::vector ::const_iterator _iter1285; + for (_iter1285 = this->success.begin(); _iter1285 != this->success.end(); ++_iter1285) { - xfer += (*_iter1266).write(oprot); + xfer += (*_iter1285).write(oprot); } xfer += oprot->writeListEnd(); } @@ -4701,14 +4701,14 @@ uint32_t ThriftHiveMetastore_get_schema_presult::read(::apache::thrift::protocol if (ftype == ::apache::thrift::protocol::T_LIST) { { (*(this->success)).clear(); - uint32_t _size1267; - ::apache::thrift::protocol::TType _etype1270; - xfer += iprot->readListBegin(_etype1270, _size1267); - (*(this->success)).resize(_size1267); - uint32_t _i1271; - for (_i1271 = 0; _i1271 < _size1267; ++_i1271) + uint32_t _size1286; + ::apache::thrift::protocol::TType _etype1289; + xfer += iprot->readListBegin(_etype1289, _size1286); + (*(this->success)).resize(_size1286); + uint32_t _i1290; + for (_i1290 = 0; _i1290 < _size1286; ++_i1290) { - xfer += (*(this->success))[_i1271].read(iprot); + xfer += (*(this->success))[_i1290].read(iprot); } xfer += iprot->readListEnd(); } @@ -4894,14 +4894,14 @@ uint32_t ThriftHiveMetastore_get_schema_with_environment_context_result::read(:: if (ftype == ::apache::thrift::protocol::T_LIST) { { this->success.clear(); - uint32_t _size1272; - ::apache::thrift::protocol::TType _etype1275; - xfer += iprot->readListBegin(_etype1275, _size1272); - this->success.resize(_size1272); - uint32_t _i1276; - for (_i1276 = 0; _i1276 < _size1272; ++_i1276) + uint32_t _size1291; + ::apache::thrift::protocol::TType _etype1294; + xfer += iprot->readListBegin(_etype1294, _size1291); + this->success.resize(_size1291); + uint32_t _i1295; + for (_i1295 = 0; _i1295 < _size1291; ++_i1295) { - xfer += this->success[_i1276].read(iprot); + xfer += this->success[_i1295].read(iprot); } xfer += iprot->readListEnd(); } @@ -4956,10 +4956,10 @@ uint32_t ThriftHiveMetastore_get_schema_with_environment_context_result::write(: xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_LIST, 0); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast(this->success.size())); - std::vector ::const_iterator _iter1277; - for (_iter1277 = this->success.begin(); _iter1277 != this->success.end(); ++_iter1277) + std::vector ::const_iterator _iter1296; + for (_iter1296 = this->success.begin(); _iter1296 != this->success.end(); ++_iter1296) { - xfer += (*_iter1277).write(oprot); + xfer += (*_iter1296).write(oprot); } xfer += oprot->writeListEnd(); } @@ -5012,14 +5012,14 @@ uint32_t ThriftHiveMetastore_get_schema_with_environment_context_presult::read(: if (ftype == ::apache::thrift::protocol::T_LIST) { { (*(this->success)).clear(); - uint32_t _size1278; - ::apache::thrift::protocol::TType _etype1281; - xfer += iprot->readListBegin(_etype1281, _size1278); - (*(this->success)).resize(_size1278); - uint32_t _i1282; - for (_i1282 = 0; _i1282 < _size1278; ++_i1282) + uint32_t _size1297; + ::apache::thrift::protocol::TType _etype1300; + xfer += iprot->readListBegin(_etype1300, _size1297); + (*(this->success)).resize(_size1297); + uint32_t _i1301; + for (_i1301 = 0; _i1301 < _size1297; ++_i1301) { - xfer += (*(this->success))[_i1282].read(iprot); + xfer += (*(this->success))[_i1301].read(iprot); } xfer += iprot->readListEnd(); } @@ -5612,14 +5612,14 @@ uint32_t ThriftHiveMetastore_create_table_with_constraints_args::read(::apache:: if (ftype == ::apache::thrift::protocol::T_LIST) { { this->primaryKeys.clear(); - uint32_t _size1283; - ::apache::thrift::protocol::TType _etype1286; - xfer += iprot->readListBegin(_etype1286, _size1283); - this->primaryKeys.resize(_size1283); - uint32_t _i1287; - for (_i1287 = 0; _i1287 < _size1283; ++_i1287) + uint32_t _size1302; + ::apache::thrift::protocol::TType _etype1305; + xfer += iprot->readListBegin(_etype1305, _size1302); + this->primaryKeys.resize(_size1302); + uint32_t _i1306; + for (_i1306 = 0; _i1306 < _size1302; ++_i1306) { - xfer += this->primaryKeys[_i1287].read(iprot); + xfer += this->primaryKeys[_i1306].read(iprot); } xfer += iprot->readListEnd(); } @@ -5632,14 +5632,14 @@ uint32_t ThriftHiveMetastore_create_table_with_constraints_args::read(::apache:: if (ftype == ::apache::thrift::protocol::T_LIST) { { this->foreignKeys.clear(); - uint32_t _size1288; - ::apache::thrift::protocol::TType _etype1291; - xfer += iprot->readListBegin(_etype1291, _size1288); - this->foreignKeys.resize(_size1288); - uint32_t _i1292; - for (_i1292 = 0; _i1292 < _size1288; ++_i1292) + uint32_t _size1307; + ::apache::thrift::protocol::TType _etype1310; + xfer += iprot->readListBegin(_etype1310, _size1307); + this->foreignKeys.resize(_size1307); + uint32_t _i1311; + for (_i1311 = 0; _i1311 < _size1307; ++_i1311) { - xfer += this->foreignKeys[_i1292].read(iprot); + xfer += this->foreignKeys[_i1311].read(iprot); } xfer += iprot->readListEnd(); } @@ -5652,14 +5652,14 @@ uint32_t ThriftHiveMetastore_create_table_with_constraints_args::read(::apache:: if (ftype == ::apache::thrift::protocol::T_LIST) { { this->uniqueConstraints.clear(); - uint32_t _size1293; - ::apache::thrift::protocol::TType _etype1296; - xfer += iprot->readListBegin(_etype1296, _size1293); - this->uniqueConstraints.resize(_size1293); - uint32_t _i1297; - for (_i1297 = 0; _i1297 < _size1293; ++_i1297) + uint32_t _size1312; + ::apache::thrift::protocol::TType _etype1315; + xfer += iprot->readListBegin(_etype1315, _size1312); + this->uniqueConstraints.resize(_size1312); + uint32_t _i1316; + for (_i1316 = 0; _i1316 < _size1312; ++_i1316) { - xfer += this->uniqueConstraints[_i1297].read(iprot); + xfer += this->uniqueConstraints[_i1316].read(iprot); } xfer += iprot->readListEnd(); } @@ -5672,14 +5672,14 @@ uint32_t ThriftHiveMetastore_create_table_with_constraints_args::read(::apache:: if (ftype == ::apache::thrift::protocol::T_LIST) { { this->notNullConstraints.clear(); - uint32_t _size1298; - ::apache::thrift::protocol::TType _etype1301; - xfer += iprot->readListBegin(_etype1301, _size1298); - this->notNullConstraints.resize(_size1298); - uint32_t _i1302; - for (_i1302 = 0; _i1302 < _size1298; ++_i1302) + uint32_t _size1317; + ::apache::thrift::protocol::TType _etype1320; + xfer += iprot->readListBegin(_etype1320, _size1317); + this->notNullConstraints.resize(_size1317); + uint32_t _i1321; + for (_i1321 = 0; _i1321 < _size1317; ++_i1321) { - xfer += this->notNullConstraints[_i1302].read(iprot); + xfer += this->notNullConstraints[_i1321].read(iprot); } xfer += iprot->readListEnd(); } @@ -5692,14 +5692,14 @@ uint32_t ThriftHiveMetastore_create_table_with_constraints_args::read(::apache:: if (ftype == ::apache::thrift::protocol::T_LIST) { { this->defaultConstraints.clear(); - uint32_t _size1303; - ::apache::thrift::protocol::TType _etype1306; - xfer += iprot->readListBegin(_etype1306, _size1303); - this->defaultConstraints.resize(_size1303); - uint32_t _i1307; - for (_i1307 = 0; _i1307 < _size1303; ++_i1307) + uint32_t _size1322; + ::apache::thrift::protocol::TType _etype1325; + xfer += iprot->readListBegin(_etype1325, _size1322); + this->defaultConstraints.resize(_size1322); + uint32_t _i1326; + for (_i1326 = 0; _i1326 < _size1322; ++_i1326) { - xfer += this->defaultConstraints[_i1307].read(iprot); + xfer += this->defaultConstraints[_i1326].read(iprot); } xfer += iprot->readListEnd(); } @@ -5712,14 +5712,14 @@ uint32_t ThriftHiveMetastore_create_table_with_constraints_args::read(::apache:: if (ftype == ::apache::thrift::protocol::T_LIST) { { this->checkConstraints.clear(); - uint32_t _size1308; - ::apache::thrift::protocol::TType _etype1311; - xfer += iprot->readListBegin(_etype1311, _size1308); - this->checkConstraints.resize(_size1308); - uint32_t _i1312; - for (_i1312 = 0; _i1312 < _size1308; ++_i1312) + uint32_t _size1327; + ::apache::thrift::protocol::TType _etype1330; + xfer += iprot->readListBegin(_etype1330, _size1327); + this->checkConstraints.resize(_size1327); + uint32_t _i1331; + for (_i1331 = 0; _i1331 < _size1327; ++_i1331) { - xfer += this->checkConstraints[_i1312].read(iprot); + xfer += this->checkConstraints[_i1331].read(iprot); } xfer += iprot->readListEnd(); } @@ -5752,10 +5752,10 @@ uint32_t ThriftHiveMetastore_create_table_with_constraints_args::write(::apache: xfer += oprot->writeFieldBegin("primaryKeys", ::apache::thrift::protocol::T_LIST, 2); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast(this->primaryKeys.size())); - std::vector ::const_iterator _iter1313; - for (_iter1313 = this->primaryKeys.begin(); _iter1313 != this->primaryKeys.end(); ++_iter1313) + std::vector ::const_iterator _iter1332; + for (_iter1332 = this->primaryKeys.begin(); _iter1332 != this->primaryKeys.end(); ++_iter1332) { - xfer += (*_iter1313).write(oprot); + xfer += (*_iter1332).write(oprot); } xfer += oprot->writeListEnd(); } @@ -5764,10 +5764,10 @@ uint32_t ThriftHiveMetastore_create_table_with_constraints_args::write(::apache: xfer += oprot->writeFieldBegin("foreignKeys", ::apache::thrift::protocol::T_LIST, 3); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast(this->foreignKeys.size())); - std::vector ::const_iterator _iter1314; - for (_iter1314 = this->foreignKeys.begin(); _iter1314 != this->foreignKeys.end(); ++_iter1314) + std::vector ::const_iterator _iter1333; + for (_iter1333 = this->foreignKeys.begin(); _iter1333 != this->foreignKeys.end(); ++_iter1333) { - xfer += (*_iter1314).write(oprot); + xfer += (*_iter1333).write(oprot); } xfer += oprot->writeListEnd(); } @@ -5776,10 +5776,10 @@ uint32_t ThriftHiveMetastore_create_table_with_constraints_args::write(::apache: xfer += oprot->writeFieldBegin("uniqueConstraints", ::apache::thrift::protocol::T_LIST, 4); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast(this->uniqueConstraints.size())); - std::vector ::const_iterator _iter1315; - for (_iter1315 = this->uniqueConstraints.begin(); _iter1315 != this->uniqueConstraints.end(); ++_iter1315) + std::vector ::const_iterator _iter1334; + for (_iter1334 = this->uniqueConstraints.begin(); _iter1334 != this->uniqueConstraints.end(); ++_iter1334) { - xfer += (*_iter1315).write(oprot); + xfer += (*_iter1334).write(oprot); } xfer += oprot->writeListEnd(); } @@ -5788,10 +5788,10 @@ uint32_t ThriftHiveMetastore_create_table_with_constraints_args::write(::apache: xfer += oprot->writeFieldBegin("notNullConstraints", ::apache::thrift::protocol::T_LIST, 5); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast(this->notNullConstraints.size())); - std::vector ::const_iterator _iter1316; - for (_iter1316 = this->notNullConstraints.begin(); _iter1316 != this->notNullConstraints.end(); ++_iter1316) + std::vector ::const_iterator _iter1335; + for (_iter1335 = this->notNullConstraints.begin(); _iter1335 != this->notNullConstraints.end(); ++_iter1335) { - xfer += (*_iter1316).write(oprot); + xfer += (*_iter1335).write(oprot); } xfer += oprot->writeListEnd(); } @@ -5800,10 +5800,10 @@ uint32_t ThriftHiveMetastore_create_table_with_constraints_args::write(::apache: xfer += oprot->writeFieldBegin("defaultConstraints", ::apache::thrift::protocol::T_LIST, 6); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast(this->defaultConstraints.size())); - std::vector ::const_iterator _iter1317; - for (_iter1317 = this->defaultConstraints.begin(); _iter1317 != this->defaultConstraints.end(); ++_iter1317) + std::vector ::const_iterator _iter1336; + for (_iter1336 = this->defaultConstraints.begin(); _iter1336 != this->defaultConstraints.end(); ++_iter1336) { - xfer += (*_iter1317).write(oprot); + xfer += (*_iter1336).write(oprot); } xfer += oprot->writeListEnd(); } @@ -5812,10 +5812,10 @@ uint32_t ThriftHiveMetastore_create_table_with_constraints_args::write(::apache: xfer += oprot->writeFieldBegin("checkConstraints", ::apache::thrift::protocol::T_LIST, 7); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast(this->checkConstraints.size())); - std::vector ::const_iterator _iter1318; - for (_iter1318 = this->checkConstraints.begin(); _iter1318 != this->checkConstraints.end(); ++_iter1318) + std::vector ::const_iterator _iter1337; + for (_iter1337 = this->checkConstraints.begin(); _iter1337 != this->checkConstraints.end(); ++_iter1337) { - xfer += (*_iter1318).write(oprot); + xfer += (*_iter1337).write(oprot); } xfer += oprot->writeListEnd(); } @@ -5843,10 +5843,10 @@ uint32_t ThriftHiveMetastore_create_table_with_constraints_pargs::write(::apache xfer += oprot->writeFieldBegin("primaryKeys", ::apache::thrift::protocol::T_LIST, 2); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast((*(this->primaryKeys)).size())); - std::vector ::const_iterator _iter1319; - for (_iter1319 = (*(this->primaryKeys)).begin(); _iter1319 != (*(this->primaryKeys)).end(); ++_iter1319) + std::vector ::const_iterator _iter1338; + for (_iter1338 = (*(this->primaryKeys)).begin(); _iter1338 != (*(this->primaryKeys)).end(); ++_iter1338) { - xfer += (*_iter1319).write(oprot); + xfer += (*_iter1338).write(oprot); } xfer += oprot->writeListEnd(); } @@ -5855,10 +5855,10 @@ uint32_t ThriftHiveMetastore_create_table_with_constraints_pargs::write(::apache xfer += oprot->writeFieldBegin("foreignKeys", ::apache::thrift::protocol::T_LIST, 3); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast((*(this->foreignKeys)).size())); - std::vector ::const_iterator _iter1320; - for (_iter1320 = (*(this->foreignKeys)).begin(); _iter1320 != (*(this->foreignKeys)).end(); ++_iter1320) + std::vector ::const_iterator _iter1339; + for (_iter1339 = (*(this->foreignKeys)).begin(); _iter1339 != (*(this->foreignKeys)).end(); ++_iter1339) { - xfer += (*_iter1320).write(oprot); + xfer += (*_iter1339).write(oprot); } xfer += oprot->writeListEnd(); } @@ -5867,10 +5867,10 @@ uint32_t ThriftHiveMetastore_create_table_with_constraints_pargs::write(::apache xfer += oprot->writeFieldBegin("uniqueConstraints", ::apache::thrift::protocol::T_LIST, 4); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast((*(this->uniqueConstraints)).size())); - std::vector ::const_iterator _iter1321; - for (_iter1321 = (*(this->uniqueConstraints)).begin(); _iter1321 != (*(this->uniqueConstraints)).end(); ++_iter1321) + std::vector ::const_iterator _iter1340; + for (_iter1340 = (*(this->uniqueConstraints)).begin(); _iter1340 != (*(this->uniqueConstraints)).end(); ++_iter1340) { - xfer += (*_iter1321).write(oprot); + xfer += (*_iter1340).write(oprot); } xfer += oprot->writeListEnd(); } @@ -5879,10 +5879,10 @@ uint32_t ThriftHiveMetastore_create_table_with_constraints_pargs::write(::apache xfer += oprot->writeFieldBegin("notNullConstraints", ::apache::thrift::protocol::T_LIST, 5); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast((*(this->notNullConstraints)).size())); - std::vector ::const_iterator _iter1322; - for (_iter1322 = (*(this->notNullConstraints)).begin(); _iter1322 != (*(this->notNullConstraints)).end(); ++_iter1322) + std::vector ::const_iterator _iter1341; + for (_iter1341 = (*(this->notNullConstraints)).begin(); _iter1341 != (*(this->notNullConstraints)).end(); ++_iter1341) { - xfer += (*_iter1322).write(oprot); + xfer += (*_iter1341).write(oprot); } xfer += oprot->writeListEnd(); } @@ -5891,10 +5891,10 @@ uint32_t ThriftHiveMetastore_create_table_with_constraints_pargs::write(::apache xfer += oprot->writeFieldBegin("defaultConstraints", ::apache::thrift::protocol::T_LIST, 6); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast((*(this->defaultConstraints)).size())); - std::vector ::const_iterator _iter1323; - for (_iter1323 = (*(this->defaultConstraints)).begin(); _iter1323 != (*(this->defaultConstraints)).end(); ++_iter1323) + std::vector ::const_iterator _iter1342; + for (_iter1342 = (*(this->defaultConstraints)).begin(); _iter1342 != (*(this->defaultConstraints)).end(); ++_iter1342) { - xfer += (*_iter1323).write(oprot); + xfer += (*_iter1342).write(oprot); } xfer += oprot->writeListEnd(); } @@ -5903,10 +5903,10 @@ uint32_t ThriftHiveMetastore_create_table_with_constraints_pargs::write(::apache xfer += oprot->writeFieldBegin("checkConstraints", ::apache::thrift::protocol::T_LIST, 7); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast((*(this->checkConstraints)).size())); - std::vector ::const_iterator _iter1324; - for (_iter1324 = (*(this->checkConstraints)).begin(); _iter1324 != (*(this->checkConstraints)).end(); ++_iter1324) + std::vector ::const_iterator _iter1343; + for (_iter1343 = (*(this->checkConstraints)).begin(); _iter1343 != (*(this->checkConstraints)).end(); ++_iter1343) { - xfer += (*_iter1324).write(oprot); + xfer += (*_iter1343).write(oprot); } xfer += oprot->writeListEnd(); } @@ -8074,14 +8074,14 @@ uint32_t ThriftHiveMetastore_truncate_table_args::read(::apache::thrift::protoco if (ftype == ::apache::thrift::protocol::T_LIST) { { this->partNames.clear(); - uint32_t _size1325; - ::apache::thrift::protocol::TType _etype1328; - xfer += iprot->readListBegin(_etype1328, _size1325); - this->partNames.resize(_size1325); - uint32_t _i1329; - for (_i1329 = 0; _i1329 < _size1325; ++_i1329) + uint32_t _size1344; + ::apache::thrift::protocol::TType _etype1347; + xfer += iprot->readListBegin(_etype1347, _size1344); + this->partNames.resize(_size1344); + uint32_t _i1348; + for (_i1348 = 0; _i1348 < _size1344; ++_i1348) { - xfer += iprot->readString(this->partNames[_i1329]); + xfer += iprot->readString(this->partNames[_i1348]); } xfer += iprot->readListEnd(); } @@ -8118,10 +8118,10 @@ uint32_t ThriftHiveMetastore_truncate_table_args::write(::apache::thrift::protoc xfer += oprot->writeFieldBegin("partNames", ::apache::thrift::protocol::T_LIST, 3); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast(this->partNames.size())); - std::vector ::const_iterator _iter1330; - for (_iter1330 = this->partNames.begin(); _iter1330 != this->partNames.end(); ++_iter1330) + std::vector ::const_iterator _iter1349; + for (_iter1349 = this->partNames.begin(); _iter1349 != this->partNames.end(); ++_iter1349) { - xfer += oprot->writeString((*_iter1330)); + xfer += oprot->writeString((*_iter1349)); } xfer += oprot->writeListEnd(); } @@ -8153,10 +8153,10 @@ uint32_t ThriftHiveMetastore_truncate_table_pargs::write(::apache::thrift::proto xfer += oprot->writeFieldBegin("partNames", ::apache::thrift::protocol::T_LIST, 3); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast((*(this->partNames)).size())); - std::vector ::const_iterator _iter1331; - for (_iter1331 = (*(this->partNames)).begin(); _iter1331 != (*(this->partNames)).end(); ++_iter1331) + std::vector ::const_iterator _iter1350; + for (_iter1350 = (*(this->partNames)).begin(); _iter1350 != (*(this->partNames)).end(); ++_iter1350) { - xfer += oprot->writeString((*_iter1331)); + xfer += oprot->writeString((*_iter1350)); } xfer += oprot->writeListEnd(); } @@ -8400,14 +8400,14 @@ uint32_t ThriftHiveMetastore_get_tables_result::read(::apache::thrift::protocol: if (ftype == ::apache::thrift::protocol::T_LIST) { { this->success.clear(); - uint32_t _size1332; - ::apache::thrift::protocol::TType _etype1335; - xfer += iprot->readListBegin(_etype1335, _size1332); - this->success.resize(_size1332); - uint32_t _i1336; - for (_i1336 = 0; _i1336 < _size1332; ++_i1336) + uint32_t _size1351; + ::apache::thrift::protocol::TType _etype1354; + xfer += iprot->readListBegin(_etype1354, _size1351); + this->success.resize(_size1351); + uint32_t _i1355; + for (_i1355 = 0; _i1355 < _size1351; ++_i1355) { - xfer += iprot->readString(this->success[_i1336]); + xfer += iprot->readString(this->success[_i1355]); } xfer += iprot->readListEnd(); } @@ -8446,10 +8446,10 @@ uint32_t ThriftHiveMetastore_get_tables_result::write(::apache::thrift::protocol xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_LIST, 0); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast(this->success.size())); - std::vector ::const_iterator _iter1337; - for (_iter1337 = this->success.begin(); _iter1337 != this->success.end(); ++_iter1337) + std::vector ::const_iterator _iter1356; + for (_iter1356 = this->success.begin(); _iter1356 != this->success.end(); ++_iter1356) { - xfer += oprot->writeString((*_iter1337)); + xfer += oprot->writeString((*_iter1356)); } xfer += oprot->writeListEnd(); } @@ -8494,14 +8494,14 @@ uint32_t ThriftHiveMetastore_get_tables_presult::read(::apache::thrift::protocol if (ftype == ::apache::thrift::protocol::T_LIST) { { (*(this->success)).clear(); - uint32_t _size1338; - ::apache::thrift::protocol::TType _etype1341; - xfer += iprot->readListBegin(_etype1341, _size1338); - (*(this->success)).resize(_size1338); - uint32_t _i1342; - for (_i1342 = 0; _i1342 < _size1338; ++_i1342) + uint32_t _size1357; + ::apache::thrift::protocol::TType _etype1360; + xfer += iprot->readListBegin(_etype1360, _size1357); + (*(this->success)).resize(_size1357); + uint32_t _i1361; + for (_i1361 = 0; _i1361 < _size1357; ++_i1361) { - xfer += iprot->readString((*(this->success))[_i1342]); + xfer += iprot->readString((*(this->success))[_i1361]); } xfer += iprot->readListEnd(); } @@ -8671,14 +8671,14 @@ uint32_t ThriftHiveMetastore_get_tables_by_type_result::read(::apache::thrift::p if (ftype == ::apache::thrift::protocol::T_LIST) { { this->success.clear(); - uint32_t _size1343; - ::apache::thrift::protocol::TType _etype1346; - xfer += iprot->readListBegin(_etype1346, _size1343); - this->success.resize(_size1343); - uint32_t _i1347; - for (_i1347 = 0; _i1347 < _size1343; ++_i1347) + uint32_t _size1362; + ::apache::thrift::protocol::TType _etype1365; + xfer += iprot->readListBegin(_etype1365, _size1362); + this->success.resize(_size1362); + uint32_t _i1366; + for (_i1366 = 0; _i1366 < _size1362; ++_i1366) { - xfer += iprot->readString(this->success[_i1347]); + xfer += iprot->readString(this->success[_i1366]); } xfer += iprot->readListEnd(); } @@ -8717,10 +8717,10 @@ uint32_t ThriftHiveMetastore_get_tables_by_type_result::write(::apache::thrift:: xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_LIST, 0); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast(this->success.size())); - std::vector ::const_iterator _iter1348; - for (_iter1348 = this->success.begin(); _iter1348 != this->success.end(); ++_iter1348) + std::vector ::const_iterator _iter1367; + for (_iter1367 = this->success.begin(); _iter1367 != this->success.end(); ++_iter1367) { - xfer += oprot->writeString((*_iter1348)); + xfer += oprot->writeString((*_iter1367)); } xfer += oprot->writeListEnd(); } @@ -8765,14 +8765,14 @@ uint32_t ThriftHiveMetastore_get_tables_by_type_presult::read(::apache::thrift:: if (ftype == ::apache::thrift::protocol::T_LIST) { { (*(this->success)).clear(); - uint32_t _size1349; - ::apache::thrift::protocol::TType _etype1352; - xfer += iprot->readListBegin(_etype1352, _size1349); - (*(this->success)).resize(_size1349); - uint32_t _i1353; - for (_i1353 = 0; _i1353 < _size1349; ++_i1353) + uint32_t _size1368; + ::apache::thrift::protocol::TType _etype1371; + xfer += iprot->readListBegin(_etype1371, _size1368); + (*(this->success)).resize(_size1368); + uint32_t _i1372; + for (_i1372 = 0; _i1372 < _size1368; ++_i1372) { - xfer += iprot->readString((*(this->success))[_i1353]); + xfer += iprot->readString((*(this->success))[_i1372]); } xfer += iprot->readListEnd(); } @@ -8910,14 +8910,14 @@ uint32_t ThriftHiveMetastore_get_materialized_views_for_rewriting_result::read(: if (ftype == ::apache::thrift::protocol::T_LIST) { { this->success.clear(); - uint32_t _size1354; - ::apache::thrift::protocol::TType _etype1357; - xfer += iprot->readListBegin(_etype1357, _size1354); - this->success.resize(_size1354); - uint32_t _i1358; - for (_i1358 = 0; _i1358 < _size1354; ++_i1358) + uint32_t _size1373; + ::apache::thrift::protocol::TType _etype1376; + xfer += iprot->readListBegin(_etype1376, _size1373); + this->success.resize(_size1373); + uint32_t _i1377; + for (_i1377 = 0; _i1377 < _size1373; ++_i1377) { - xfer += iprot->readString(this->success[_i1358]); + xfer += iprot->readString(this->success[_i1377]); } xfer += iprot->readListEnd(); } @@ -8956,10 +8956,10 @@ uint32_t ThriftHiveMetastore_get_materialized_views_for_rewriting_result::write( xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_LIST, 0); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast(this->success.size())); - std::vector ::const_iterator _iter1359; - for (_iter1359 = this->success.begin(); _iter1359 != this->success.end(); ++_iter1359) + std::vector ::const_iterator _iter1378; + for (_iter1378 = this->success.begin(); _iter1378 != this->success.end(); ++_iter1378) { - xfer += oprot->writeString((*_iter1359)); + xfer += oprot->writeString((*_iter1378)); } xfer += oprot->writeListEnd(); } @@ -9004,14 +9004,14 @@ uint32_t ThriftHiveMetastore_get_materialized_views_for_rewriting_presult::read( if (ftype == ::apache::thrift::protocol::T_LIST) { { (*(this->success)).clear(); - uint32_t _size1360; - ::apache::thrift::protocol::TType _etype1363; - xfer += iprot->readListBegin(_etype1363, _size1360); - (*(this->success)).resize(_size1360); - uint32_t _i1364; - for (_i1364 = 0; _i1364 < _size1360; ++_i1364) + uint32_t _size1379; + ::apache::thrift::protocol::TType _etype1382; + xfer += iprot->readListBegin(_etype1382, _size1379); + (*(this->success)).resize(_size1379); + uint32_t _i1383; + for (_i1383 = 0; _i1383 < _size1379; ++_i1383) { - xfer += iprot->readString((*(this->success))[_i1364]); + xfer += iprot->readString((*(this->success))[_i1383]); } xfer += iprot->readListEnd(); } @@ -9086,14 +9086,14 @@ uint32_t ThriftHiveMetastore_get_table_meta_args::read(::apache::thrift::protoco if (ftype == ::apache::thrift::protocol::T_LIST) { { this->tbl_types.clear(); - uint32_t _size1365; - ::apache::thrift::protocol::TType _etype1368; - xfer += iprot->readListBegin(_etype1368, _size1365); - this->tbl_types.resize(_size1365); - uint32_t _i1369; - for (_i1369 = 0; _i1369 < _size1365; ++_i1369) + uint32_t _size1384; + ::apache::thrift::protocol::TType _etype1387; + xfer += iprot->readListBegin(_etype1387, _size1384); + this->tbl_types.resize(_size1384); + uint32_t _i1388; + for (_i1388 = 0; _i1388 < _size1384; ++_i1388) { - xfer += iprot->readString(this->tbl_types[_i1369]); + xfer += iprot->readString(this->tbl_types[_i1388]); } xfer += iprot->readListEnd(); } @@ -9130,10 +9130,10 @@ uint32_t ThriftHiveMetastore_get_table_meta_args::write(::apache::thrift::protoc xfer += oprot->writeFieldBegin("tbl_types", ::apache::thrift::protocol::T_LIST, 3); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast(this->tbl_types.size())); - std::vector ::const_iterator _iter1370; - for (_iter1370 = this->tbl_types.begin(); _iter1370 != this->tbl_types.end(); ++_iter1370) + std::vector ::const_iterator _iter1389; + for (_iter1389 = this->tbl_types.begin(); _iter1389 != this->tbl_types.end(); ++_iter1389) { - xfer += oprot->writeString((*_iter1370)); + xfer += oprot->writeString((*_iter1389)); } xfer += oprot->writeListEnd(); } @@ -9165,10 +9165,10 @@ uint32_t ThriftHiveMetastore_get_table_meta_pargs::write(::apache::thrift::proto xfer += oprot->writeFieldBegin("tbl_types", ::apache::thrift::protocol::T_LIST, 3); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast((*(this->tbl_types)).size())); - std::vector ::const_iterator _iter1371; - for (_iter1371 = (*(this->tbl_types)).begin(); _iter1371 != (*(this->tbl_types)).end(); ++_iter1371) + std::vector ::const_iterator _iter1390; + for (_iter1390 = (*(this->tbl_types)).begin(); _iter1390 != (*(this->tbl_types)).end(); ++_iter1390) { - xfer += oprot->writeString((*_iter1371)); + xfer += oprot->writeString((*_iter1390)); } xfer += oprot->writeListEnd(); } @@ -9209,14 +9209,14 @@ uint32_t ThriftHiveMetastore_get_table_meta_result::read(::apache::thrift::proto if (ftype == ::apache::thrift::protocol::T_LIST) { { this->success.clear(); - uint32_t _size1372; - ::apache::thrift::protocol::TType _etype1375; - xfer += iprot->readListBegin(_etype1375, _size1372); - this->success.resize(_size1372); - uint32_t _i1376; - for (_i1376 = 0; _i1376 < _size1372; ++_i1376) + uint32_t _size1391; + ::apache::thrift::protocol::TType _etype1394; + xfer += iprot->readListBegin(_etype1394, _size1391); + this->success.resize(_size1391); + uint32_t _i1395; + for (_i1395 = 0; _i1395 < _size1391; ++_i1395) { - xfer += this->success[_i1376].read(iprot); + xfer += this->success[_i1395].read(iprot); } xfer += iprot->readListEnd(); } @@ -9255,10 +9255,10 @@ uint32_t ThriftHiveMetastore_get_table_meta_result::write(::apache::thrift::prot xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_LIST, 0); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast(this->success.size())); - std::vector ::const_iterator _iter1377; - for (_iter1377 = this->success.begin(); _iter1377 != this->success.end(); ++_iter1377) + std::vector ::const_iterator _iter1396; + for (_iter1396 = this->success.begin(); _iter1396 != this->success.end(); ++_iter1396) { - xfer += (*_iter1377).write(oprot); + xfer += (*_iter1396).write(oprot); } xfer += oprot->writeListEnd(); } @@ -9303,14 +9303,14 @@ uint32_t ThriftHiveMetastore_get_table_meta_presult::read(::apache::thrift::prot if (ftype == ::apache::thrift::protocol::T_LIST) { { (*(this->success)).clear(); - uint32_t _size1378; - ::apache::thrift::protocol::TType _etype1381; - xfer += iprot->readListBegin(_etype1381, _size1378); - (*(this->success)).resize(_size1378); - uint32_t _i1382; - for (_i1382 = 0; _i1382 < _size1378; ++_i1382) + uint32_t _size1397; + ::apache::thrift::protocol::TType _etype1400; + xfer += iprot->readListBegin(_etype1400, _size1397); + (*(this->success)).resize(_size1397); + uint32_t _i1401; + for (_i1401 = 0; _i1401 < _size1397; ++_i1401) { - xfer += (*(this->success))[_i1382].read(iprot); + xfer += (*(this->success))[_i1401].read(iprot); } xfer += iprot->readListEnd(); } @@ -9448,14 +9448,14 @@ uint32_t ThriftHiveMetastore_get_all_tables_result::read(::apache::thrift::proto if (ftype == ::apache::thrift::protocol::T_LIST) { { this->success.clear(); - uint32_t _size1383; - ::apache::thrift::protocol::TType _etype1386; - xfer += iprot->readListBegin(_etype1386, _size1383); - this->success.resize(_size1383); - uint32_t _i1387; - for (_i1387 = 0; _i1387 < _size1383; ++_i1387) + uint32_t _size1402; + ::apache::thrift::protocol::TType _etype1405; + xfer += iprot->readListBegin(_etype1405, _size1402); + this->success.resize(_size1402); + uint32_t _i1406; + for (_i1406 = 0; _i1406 < _size1402; ++_i1406) { - xfer += iprot->readString(this->success[_i1387]); + xfer += iprot->readString(this->success[_i1406]); } xfer += iprot->readListEnd(); } @@ -9494,10 +9494,10 @@ uint32_t ThriftHiveMetastore_get_all_tables_result::write(::apache::thrift::prot xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_LIST, 0); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast(this->success.size())); - std::vector ::const_iterator _iter1388; - for (_iter1388 = this->success.begin(); _iter1388 != this->success.end(); ++_iter1388) + std::vector ::const_iterator _iter1407; + for (_iter1407 = this->success.begin(); _iter1407 != this->success.end(); ++_iter1407) { - xfer += oprot->writeString((*_iter1388)); + xfer += oprot->writeString((*_iter1407)); } xfer += oprot->writeListEnd(); } @@ -9542,14 +9542,14 @@ uint32_t ThriftHiveMetastore_get_all_tables_presult::read(::apache::thrift::prot if (ftype == ::apache::thrift::protocol::T_LIST) { { (*(this->success)).clear(); - uint32_t _size1389; - ::apache::thrift::protocol::TType _etype1392; - xfer += iprot->readListBegin(_etype1392, _size1389); - (*(this->success)).resize(_size1389); - uint32_t _i1393; - for (_i1393 = 0; _i1393 < _size1389; ++_i1393) + uint32_t _size1408; + ::apache::thrift::protocol::TType _etype1411; + xfer += iprot->readListBegin(_etype1411, _size1408); + (*(this->success)).resize(_size1408); + uint32_t _i1412; + for (_i1412 = 0; _i1412 < _size1408; ++_i1412) { - xfer += iprot->readString((*(this->success))[_i1393]); + xfer += iprot->readString((*(this->success))[_i1412]); } xfer += iprot->readListEnd(); } @@ -9859,14 +9859,14 @@ uint32_t ThriftHiveMetastore_get_table_objects_by_name_args::read(::apache::thri if (ftype == ::apache::thrift::protocol::T_LIST) { { this->tbl_names.clear(); - uint32_t _size1394; - ::apache::thrift::protocol::TType _etype1397; - xfer += iprot->readListBegin(_etype1397, _size1394); - this->tbl_names.resize(_size1394); - uint32_t _i1398; - for (_i1398 = 0; _i1398 < _size1394; ++_i1398) + uint32_t _size1413; + ::apache::thrift::protocol::TType _etype1416; + xfer += iprot->readListBegin(_etype1416, _size1413); + this->tbl_names.resize(_size1413); + uint32_t _i1417; + for (_i1417 = 0; _i1417 < _size1413; ++_i1417) { - xfer += iprot->readString(this->tbl_names[_i1398]); + xfer += iprot->readString(this->tbl_names[_i1417]); } xfer += iprot->readListEnd(); } @@ -9899,10 +9899,10 @@ uint32_t ThriftHiveMetastore_get_table_objects_by_name_args::write(::apache::thr xfer += oprot->writeFieldBegin("tbl_names", ::apache::thrift::protocol::T_LIST, 2); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast(this->tbl_names.size())); - std::vector ::const_iterator _iter1399; - for (_iter1399 = this->tbl_names.begin(); _iter1399 != this->tbl_names.end(); ++_iter1399) + std::vector ::const_iterator _iter1418; + for (_iter1418 = this->tbl_names.begin(); _iter1418 != this->tbl_names.end(); ++_iter1418) { - xfer += oprot->writeString((*_iter1399)); + xfer += oprot->writeString((*_iter1418)); } xfer += oprot->writeListEnd(); } @@ -9930,10 +9930,10 @@ uint32_t ThriftHiveMetastore_get_table_objects_by_name_pargs::write(::apache::th xfer += oprot->writeFieldBegin("tbl_names", ::apache::thrift::protocol::T_LIST, 2); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast((*(this->tbl_names)).size())); - std::vector ::const_iterator _iter1400; - for (_iter1400 = (*(this->tbl_names)).begin(); _iter1400 != (*(this->tbl_names)).end(); ++_iter1400) + std::vector ::const_iterator _iter1419; + for (_iter1419 = (*(this->tbl_names)).begin(); _iter1419 != (*(this->tbl_names)).end(); ++_iter1419) { - xfer += oprot->writeString((*_iter1400)); + xfer += oprot->writeString((*_iter1419)); } xfer += oprot->writeListEnd(); } @@ -9974,14 +9974,14 @@ uint32_t ThriftHiveMetastore_get_table_objects_by_name_result::read(::apache::th if (ftype == ::apache::thrift::protocol::T_LIST) { { this->success.clear(); - uint32_t _size1401; - ::apache::thrift::protocol::TType _etype1404; - xfer += iprot->readListBegin(_etype1404, _size1401); - this->success.resize(_size1401); - uint32_t _i1405; - for (_i1405 = 0; _i1405 < _size1401; ++_i1405) + uint32_t _size1420; + ::apache::thrift::protocol::TType _etype1423; + xfer += iprot->readListBegin(_etype1423, _size1420); + this->success.resize(_size1420); + uint32_t _i1424; + for (_i1424 = 0; _i1424 < _size1420; ++_i1424) { - xfer += this->success[_i1405].read(iprot); + xfer += this->success[_i1424].read(iprot); } xfer += iprot->readListEnd(); } @@ -10012,10 +10012,10 @@ uint32_t ThriftHiveMetastore_get_table_objects_by_name_result::write(::apache::t xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_LIST, 0); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast(this->success.size())); - std::vector ::const_iterator _iter1406; - for (_iter1406 = this->success.begin(); _iter1406 != this->success.end(); ++_iter1406) + std::vector
::const_iterator _iter1425; + for (_iter1425 = this->success.begin(); _iter1425 != this->success.end(); ++_iter1425) { - xfer += (*_iter1406).write(oprot); + xfer += (*_iter1425).write(oprot); } xfer += oprot->writeListEnd(); } @@ -10056,14 +10056,14 @@ uint32_t ThriftHiveMetastore_get_table_objects_by_name_presult::read(::apache::t if (ftype == ::apache::thrift::protocol::T_LIST) { { (*(this->success)).clear(); - uint32_t _size1407; - ::apache::thrift::protocol::TType _etype1410; - xfer += iprot->readListBegin(_etype1410, _size1407); - (*(this->success)).resize(_size1407); - uint32_t _i1411; - for (_i1411 = 0; _i1411 < _size1407; ++_i1411) + uint32_t _size1426; + ::apache::thrift::protocol::TType _etype1429; + xfer += iprot->readListBegin(_etype1429, _size1426); + (*(this->success)).resize(_size1426); + uint32_t _i1430; + for (_i1430 = 0; _i1430 < _size1426; ++_i1430) { - xfer += (*(this->success))[_i1411].read(iprot); + xfer += (*(this->success))[_i1430].read(iprot); } xfer += iprot->readListEnd(); } @@ -10596,14 +10596,14 @@ uint32_t ThriftHiveMetastore_get_materialization_invalidation_info_args::read(:: if (ftype == ::apache::thrift::protocol::T_LIST) { { this->tbl_names.clear(); - uint32_t _size1412; - ::apache::thrift::protocol::TType _etype1415; - xfer += iprot->readListBegin(_etype1415, _size1412); - this->tbl_names.resize(_size1412); - uint32_t _i1416; - for (_i1416 = 0; _i1416 < _size1412; ++_i1416) + uint32_t _size1431; + ::apache::thrift::protocol::TType _etype1434; + xfer += iprot->readListBegin(_etype1434, _size1431); + this->tbl_names.resize(_size1431); + uint32_t _i1435; + for (_i1435 = 0; _i1435 < _size1431; ++_i1435) { - xfer += iprot->readString(this->tbl_names[_i1416]); + xfer += iprot->readString(this->tbl_names[_i1435]); } xfer += iprot->readListEnd(); } @@ -10636,10 +10636,10 @@ uint32_t ThriftHiveMetastore_get_materialization_invalidation_info_args::write(: xfer += oprot->writeFieldBegin("tbl_names", ::apache::thrift::protocol::T_LIST, 2); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast(this->tbl_names.size())); - std::vector ::const_iterator _iter1417; - for (_iter1417 = this->tbl_names.begin(); _iter1417 != this->tbl_names.end(); ++_iter1417) + std::vector ::const_iterator _iter1436; + for (_iter1436 = this->tbl_names.begin(); _iter1436 != this->tbl_names.end(); ++_iter1436) { - xfer += oprot->writeString((*_iter1417)); + xfer += oprot->writeString((*_iter1436)); } xfer += oprot->writeListEnd(); } @@ -10667,10 +10667,10 @@ uint32_t ThriftHiveMetastore_get_materialization_invalidation_info_pargs::write( xfer += oprot->writeFieldBegin("tbl_names", ::apache::thrift::protocol::T_LIST, 2); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast((*(this->tbl_names)).size())); - std::vector ::const_iterator _iter1418; - for (_iter1418 = (*(this->tbl_names)).begin(); _iter1418 != (*(this->tbl_names)).end(); ++_iter1418) + std::vector ::const_iterator _iter1437; + for (_iter1437 = (*(this->tbl_names)).begin(); _iter1437 != (*(this->tbl_names)).end(); ++_iter1437) { - xfer += oprot->writeString((*_iter1418)); + xfer += oprot->writeString((*_iter1437)); } xfer += oprot->writeListEnd(); } @@ -10711,17 +10711,17 @@ uint32_t ThriftHiveMetastore_get_materialization_invalidation_info_result::read( if (ftype == ::apache::thrift::protocol::T_MAP) { { this->success.clear(); - uint32_t _size1419; - ::apache::thrift::protocol::TType _ktype1420; - ::apache::thrift::protocol::TType _vtype1421; - xfer += iprot->readMapBegin(_ktype1420, _vtype1421, _size1419); - uint32_t _i1423; - for (_i1423 = 0; _i1423 < _size1419; ++_i1423) + uint32_t _size1438; + ::apache::thrift::protocol::TType _ktype1439; + ::apache::thrift::protocol::TType _vtype1440; + xfer += iprot->readMapBegin(_ktype1439, _vtype1440, _size1438); + uint32_t _i1442; + for (_i1442 = 0; _i1442 < _size1438; ++_i1442) { - std::string _key1424; - xfer += iprot->readString(_key1424); - Materialization& _val1425 = this->success[_key1424]; - xfer += _val1425.read(iprot); + std::string _key1443; + xfer += iprot->readString(_key1443); + Materialization& _val1444 = this->success[_key1443]; + xfer += _val1444.read(iprot); } xfer += iprot->readMapEnd(); } @@ -10776,11 +10776,11 @@ uint32_t ThriftHiveMetastore_get_materialization_invalidation_info_result::write xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_MAP, 0); { xfer += oprot->writeMapBegin(::apache::thrift::protocol::T_STRING, ::apache::thrift::protocol::T_STRUCT, static_cast(this->success.size())); - std::map ::const_iterator _iter1426; - for (_iter1426 = this->success.begin(); _iter1426 != this->success.end(); ++_iter1426) + std::map ::const_iterator _iter1445; + for (_iter1445 = this->success.begin(); _iter1445 != this->success.end(); ++_iter1445) { - xfer += oprot->writeString(_iter1426->first); - xfer += _iter1426->second.write(oprot); + xfer += oprot->writeString(_iter1445->first); + xfer += _iter1445->second.write(oprot); } xfer += oprot->writeMapEnd(); } @@ -10833,17 +10833,17 @@ uint32_t ThriftHiveMetastore_get_materialization_invalidation_info_presult::read if (ftype == ::apache::thrift::protocol::T_MAP) { { (*(this->success)).clear(); - uint32_t _size1427; - ::apache::thrift::protocol::TType _ktype1428; - ::apache::thrift::protocol::TType _vtype1429; - xfer += iprot->readMapBegin(_ktype1428, _vtype1429, _size1427); - uint32_t _i1431; - for (_i1431 = 0; _i1431 < _size1427; ++_i1431) + uint32_t _size1446; + ::apache::thrift::protocol::TType _ktype1447; + ::apache::thrift::protocol::TType _vtype1448; + xfer += iprot->readMapBegin(_ktype1447, _vtype1448, _size1446); + uint32_t _i1450; + for (_i1450 = 0; _i1450 < _size1446; ++_i1450) { - std::string _key1432; - xfer += iprot->readString(_key1432); - Materialization& _val1433 = (*(this->success))[_key1432]; - xfer += _val1433.read(iprot); + std::string _key1451; + xfer += iprot->readString(_key1451); + Materialization& _val1452 = (*(this->success))[_key1451]; + xfer += _val1452.read(iprot); } xfer += iprot->readMapEnd(); } @@ -11304,14 +11304,14 @@ uint32_t ThriftHiveMetastore_get_table_names_by_filter_result::read(::apache::th if (ftype == ::apache::thrift::protocol::T_LIST) { { this->success.clear(); - uint32_t _size1434; - ::apache::thrift::protocol::TType _etype1437; - xfer += iprot->readListBegin(_etype1437, _size1434); - this->success.resize(_size1434); - uint32_t _i1438; - for (_i1438 = 0; _i1438 < _size1434; ++_i1438) + uint32_t _size1453; + ::apache::thrift::protocol::TType _etype1456; + xfer += iprot->readListBegin(_etype1456, _size1453); + this->success.resize(_size1453); + uint32_t _i1457; + for (_i1457 = 0; _i1457 < _size1453; ++_i1457) { - xfer += iprot->readString(this->success[_i1438]); + xfer += iprot->readString(this->success[_i1457]); } xfer += iprot->readListEnd(); } @@ -11366,10 +11366,10 @@ uint32_t ThriftHiveMetastore_get_table_names_by_filter_result::write(::apache::t xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_LIST, 0); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast(this->success.size())); - std::vector ::const_iterator _iter1439; - for (_iter1439 = this->success.begin(); _iter1439 != this->success.end(); ++_iter1439) + std::vector ::const_iterator _iter1458; + for (_iter1458 = this->success.begin(); _iter1458 != this->success.end(); ++_iter1458) { - xfer += oprot->writeString((*_iter1439)); + xfer += oprot->writeString((*_iter1458)); } xfer += oprot->writeListEnd(); } @@ -11422,14 +11422,14 @@ uint32_t ThriftHiveMetastore_get_table_names_by_filter_presult::read(::apache::t if (ftype == ::apache::thrift::protocol::T_LIST) { { (*(this->success)).clear(); - uint32_t _size1440; - ::apache::thrift::protocol::TType _etype1443; - xfer += iprot->readListBegin(_etype1443, _size1440); - (*(this->success)).resize(_size1440); - uint32_t _i1444; - for (_i1444 = 0; _i1444 < _size1440; ++_i1444) + uint32_t _size1459; + ::apache::thrift::protocol::TType _etype1462; + xfer += iprot->readListBegin(_etype1462, _size1459); + (*(this->success)).resize(_size1459); + uint32_t _i1463; + for (_i1463 = 0; _i1463 < _size1459; ++_i1463) { - xfer += iprot->readString((*(this->success))[_i1444]); + xfer += iprot->readString((*(this->success))[_i1463]); } xfer += iprot->readListEnd(); } @@ -12763,14 +12763,14 @@ uint32_t ThriftHiveMetastore_add_partitions_args::read(::apache::thrift::protoco if (ftype == ::apache::thrift::protocol::T_LIST) { { this->new_parts.clear(); - uint32_t _size1445; - ::apache::thrift::protocol::TType _etype1448; - xfer += iprot->readListBegin(_etype1448, _size1445); - this->new_parts.resize(_size1445); - uint32_t _i1449; - for (_i1449 = 0; _i1449 < _size1445; ++_i1449) + uint32_t _size1464; + ::apache::thrift::protocol::TType _etype1467; + xfer += iprot->readListBegin(_etype1467, _size1464); + this->new_parts.resize(_size1464); + uint32_t _i1468; + for (_i1468 = 0; _i1468 < _size1464; ++_i1468) { - xfer += this->new_parts[_i1449].read(iprot); + xfer += this->new_parts[_i1468].read(iprot); } xfer += iprot->readListEnd(); } @@ -12799,10 +12799,10 @@ uint32_t ThriftHiveMetastore_add_partitions_args::write(::apache::thrift::protoc xfer += oprot->writeFieldBegin("new_parts", ::apache::thrift::protocol::T_LIST, 1); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast(this->new_parts.size())); - std::vector ::const_iterator _iter1450; - for (_iter1450 = this->new_parts.begin(); _iter1450 != this->new_parts.end(); ++_iter1450) + std::vector ::const_iterator _iter1469; + for (_iter1469 = this->new_parts.begin(); _iter1469 != this->new_parts.end(); ++_iter1469) { - xfer += (*_iter1450).write(oprot); + xfer += (*_iter1469).write(oprot); } xfer += oprot->writeListEnd(); } @@ -12826,10 +12826,10 @@ uint32_t ThriftHiveMetastore_add_partitions_pargs::write(::apache::thrift::proto xfer += oprot->writeFieldBegin("new_parts", ::apache::thrift::protocol::T_LIST, 1); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast((*(this->new_parts)).size())); - std::vector ::const_iterator _iter1451; - for (_iter1451 = (*(this->new_parts)).begin(); _iter1451 != (*(this->new_parts)).end(); ++_iter1451) + std::vector ::const_iterator _iter1470; + for (_iter1470 = (*(this->new_parts)).begin(); _iter1470 != (*(this->new_parts)).end(); ++_iter1470) { - xfer += (*_iter1451).write(oprot); + xfer += (*_iter1470).write(oprot); } xfer += oprot->writeListEnd(); } @@ -13038,14 +13038,14 @@ uint32_t ThriftHiveMetastore_add_partitions_pspec_args::read(::apache::thrift::p if (ftype == ::apache::thrift::protocol::T_LIST) { { this->new_parts.clear(); - uint32_t _size1452; - ::apache::thrift::protocol::TType _etype1455; - xfer += iprot->readListBegin(_etype1455, _size1452); - this->new_parts.resize(_size1452); - uint32_t _i1456; - for (_i1456 = 0; _i1456 < _size1452; ++_i1456) + uint32_t _size1471; + ::apache::thrift::protocol::TType _etype1474; + xfer += iprot->readListBegin(_etype1474, _size1471); + this->new_parts.resize(_size1471); + uint32_t _i1475; + for (_i1475 = 0; _i1475 < _size1471; ++_i1475) { - xfer += this->new_parts[_i1456].read(iprot); + xfer += this->new_parts[_i1475].read(iprot); } xfer += iprot->readListEnd(); } @@ -13074,10 +13074,10 @@ uint32_t ThriftHiveMetastore_add_partitions_pspec_args::write(::apache::thrift:: xfer += oprot->writeFieldBegin("new_parts", ::apache::thrift::protocol::T_LIST, 1); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast(this->new_parts.size())); - std::vector ::const_iterator _iter1457; - for (_iter1457 = this->new_parts.begin(); _iter1457 != this->new_parts.end(); ++_iter1457) + std::vector ::const_iterator _iter1476; + for (_iter1476 = this->new_parts.begin(); _iter1476 != this->new_parts.end(); ++_iter1476) { - xfer += (*_iter1457).write(oprot); + xfer += (*_iter1476).write(oprot); } xfer += oprot->writeListEnd(); } @@ -13101,10 +13101,10 @@ uint32_t ThriftHiveMetastore_add_partitions_pspec_pargs::write(::apache::thrift: xfer += oprot->writeFieldBegin("new_parts", ::apache::thrift::protocol::T_LIST, 1); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast((*(this->new_parts)).size())); - std::vector ::const_iterator _iter1458; - for (_iter1458 = (*(this->new_parts)).begin(); _iter1458 != (*(this->new_parts)).end(); ++_iter1458) + std::vector ::const_iterator _iter1477; + for (_iter1477 = (*(this->new_parts)).begin(); _iter1477 != (*(this->new_parts)).end(); ++_iter1477) { - xfer += (*_iter1458).write(oprot); + xfer += (*_iter1477).write(oprot); } xfer += oprot->writeListEnd(); } @@ -13329,14 +13329,14 @@ uint32_t ThriftHiveMetastore_append_partition_args::read(::apache::thrift::proto if (ftype == ::apache::thrift::protocol::T_LIST) { { this->part_vals.clear(); - uint32_t _size1459; - ::apache::thrift::protocol::TType _etype1462; - xfer += iprot->readListBegin(_etype1462, _size1459); - this->part_vals.resize(_size1459); - uint32_t _i1463; - for (_i1463 = 0; _i1463 < _size1459; ++_i1463) + uint32_t _size1478; + ::apache::thrift::protocol::TType _etype1481; + xfer += iprot->readListBegin(_etype1481, _size1478); + this->part_vals.resize(_size1478); + uint32_t _i1482; + for (_i1482 = 0; _i1482 < _size1478; ++_i1482) { - xfer += iprot->readString(this->part_vals[_i1463]); + xfer += iprot->readString(this->part_vals[_i1482]); } xfer += iprot->readListEnd(); } @@ -13373,10 +13373,10 @@ uint32_t ThriftHiveMetastore_append_partition_args::write(::apache::thrift::prot xfer += oprot->writeFieldBegin("part_vals", ::apache::thrift::protocol::T_LIST, 3); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast(this->part_vals.size())); - std::vector ::const_iterator _iter1464; - for (_iter1464 = this->part_vals.begin(); _iter1464 != this->part_vals.end(); ++_iter1464) + std::vector ::const_iterator _iter1483; + for (_iter1483 = this->part_vals.begin(); _iter1483 != this->part_vals.end(); ++_iter1483) { - xfer += oprot->writeString((*_iter1464)); + xfer += oprot->writeString((*_iter1483)); } xfer += oprot->writeListEnd(); } @@ -13408,10 +13408,10 @@ uint32_t ThriftHiveMetastore_append_partition_pargs::write(::apache::thrift::pro xfer += oprot->writeFieldBegin("part_vals", ::apache::thrift::protocol::T_LIST, 3); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast((*(this->part_vals)).size())); - std::vector ::const_iterator _iter1465; - for (_iter1465 = (*(this->part_vals)).begin(); _iter1465 != (*(this->part_vals)).end(); ++_iter1465) + std::vector ::const_iterator _iter1484; + for (_iter1484 = (*(this->part_vals)).begin(); _iter1484 != (*(this->part_vals)).end(); ++_iter1484) { - xfer += oprot->writeString((*_iter1465)); + xfer += oprot->writeString((*_iter1484)); } xfer += oprot->writeListEnd(); } @@ -13883,14 +13883,14 @@ uint32_t ThriftHiveMetastore_append_partition_with_environment_context_args::rea if (ftype == ::apache::thrift::protocol::T_LIST) { { this->part_vals.clear(); - uint32_t _size1466; - ::apache::thrift::protocol::TType _etype1469; - xfer += iprot->readListBegin(_etype1469, _size1466); - this->part_vals.resize(_size1466); - uint32_t _i1470; - for (_i1470 = 0; _i1470 < _size1466; ++_i1470) + uint32_t _size1485; + ::apache::thrift::protocol::TType _etype1488; + xfer += iprot->readListBegin(_etype1488, _size1485); + this->part_vals.resize(_size1485); + uint32_t _i1489; + for (_i1489 = 0; _i1489 < _size1485; ++_i1489) { - xfer += iprot->readString(this->part_vals[_i1470]); + xfer += iprot->readString(this->part_vals[_i1489]); } xfer += iprot->readListEnd(); } @@ -13935,10 +13935,10 @@ uint32_t ThriftHiveMetastore_append_partition_with_environment_context_args::wri xfer += oprot->writeFieldBegin("part_vals", ::apache::thrift::protocol::T_LIST, 3); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast(this->part_vals.size())); - std::vector ::const_iterator _iter1471; - for (_iter1471 = this->part_vals.begin(); _iter1471 != this->part_vals.end(); ++_iter1471) + std::vector ::const_iterator _iter1490; + for (_iter1490 = this->part_vals.begin(); _iter1490 != this->part_vals.end(); ++_iter1490) { - xfer += oprot->writeString((*_iter1471)); + xfer += oprot->writeString((*_iter1490)); } xfer += oprot->writeListEnd(); } @@ -13974,10 +13974,10 @@ uint32_t ThriftHiveMetastore_append_partition_with_environment_context_pargs::wr xfer += oprot->writeFieldBegin("part_vals", ::apache::thrift::protocol::T_LIST, 3); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast((*(this->part_vals)).size())); - std::vector ::const_iterator _iter1472; - for (_iter1472 = (*(this->part_vals)).begin(); _iter1472 != (*(this->part_vals)).end(); ++_iter1472) + std::vector ::const_iterator _iter1491; + for (_iter1491 = (*(this->part_vals)).begin(); _iter1491 != (*(this->part_vals)).end(); ++_iter1491) { - xfer += oprot->writeString((*_iter1472)); + xfer += oprot->writeString((*_iter1491)); } xfer += oprot->writeListEnd(); } @@ -14780,14 +14780,14 @@ uint32_t ThriftHiveMetastore_drop_partition_args::read(::apache::thrift::protoco if (ftype == ::apache::thrift::protocol::T_LIST) { { this->part_vals.clear(); - uint32_t _size1473; - ::apache::thrift::protocol::TType _etype1476; - xfer += iprot->readListBegin(_etype1476, _size1473); - this->part_vals.resize(_size1473); - uint32_t _i1477; - for (_i1477 = 0; _i1477 < _size1473; ++_i1477) + uint32_t _size1492; + ::apache::thrift::protocol::TType _etype1495; + xfer += iprot->readListBegin(_etype1495, _size1492); + this->part_vals.resize(_size1492); + uint32_t _i1496; + for (_i1496 = 0; _i1496 < _size1492; ++_i1496) { - xfer += iprot->readString(this->part_vals[_i1477]); + xfer += iprot->readString(this->part_vals[_i1496]); } xfer += iprot->readListEnd(); } @@ -14832,10 +14832,10 @@ uint32_t ThriftHiveMetastore_drop_partition_args::write(::apache::thrift::protoc xfer += oprot->writeFieldBegin("part_vals", ::apache::thrift::protocol::T_LIST, 3); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast(this->part_vals.size())); - std::vector ::const_iterator _iter1478; - for (_iter1478 = this->part_vals.begin(); _iter1478 != this->part_vals.end(); ++_iter1478) + std::vector ::const_iterator _iter1497; + for (_iter1497 = this->part_vals.begin(); _iter1497 != this->part_vals.end(); ++_iter1497) { - xfer += oprot->writeString((*_iter1478)); + xfer += oprot->writeString((*_iter1497)); } xfer += oprot->writeListEnd(); } @@ -14871,10 +14871,10 @@ uint32_t ThriftHiveMetastore_drop_partition_pargs::write(::apache::thrift::proto xfer += oprot->writeFieldBegin("part_vals", ::apache::thrift::protocol::T_LIST, 3); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast((*(this->part_vals)).size())); - std::vector ::const_iterator _iter1479; - for (_iter1479 = (*(this->part_vals)).begin(); _iter1479 != (*(this->part_vals)).end(); ++_iter1479) + std::vector ::const_iterator _iter1498; + for (_iter1498 = (*(this->part_vals)).begin(); _iter1498 != (*(this->part_vals)).end(); ++_iter1498) { - xfer += oprot->writeString((*_iter1479)); + xfer += oprot->writeString((*_iter1498)); } xfer += oprot->writeListEnd(); } @@ -15083,14 +15083,14 @@ uint32_t ThriftHiveMetastore_drop_partition_with_environment_context_args::read( if (ftype == ::apache::thrift::protocol::T_LIST) { { this->part_vals.clear(); - uint32_t _size1480; - ::apache::thrift::protocol::TType _etype1483; - xfer += iprot->readListBegin(_etype1483, _size1480); - this->part_vals.resize(_size1480); - uint32_t _i1484; - for (_i1484 = 0; _i1484 < _size1480; ++_i1484) + uint32_t _size1499; + ::apache::thrift::protocol::TType _etype1502; + xfer += iprot->readListBegin(_etype1502, _size1499); + this->part_vals.resize(_size1499); + uint32_t _i1503; + for (_i1503 = 0; _i1503 < _size1499; ++_i1503) { - xfer += iprot->readString(this->part_vals[_i1484]); + xfer += iprot->readString(this->part_vals[_i1503]); } xfer += iprot->readListEnd(); } @@ -15143,10 +15143,10 @@ uint32_t ThriftHiveMetastore_drop_partition_with_environment_context_args::write xfer += oprot->writeFieldBegin("part_vals", ::apache::thrift::protocol::T_LIST, 3); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast(this->part_vals.size())); - std::vector ::const_iterator _iter1485; - for (_iter1485 = this->part_vals.begin(); _iter1485 != this->part_vals.end(); ++_iter1485) + std::vector ::const_iterator _iter1504; + for (_iter1504 = this->part_vals.begin(); _iter1504 != this->part_vals.end(); ++_iter1504) { - xfer += oprot->writeString((*_iter1485)); + xfer += oprot->writeString((*_iter1504)); } xfer += oprot->writeListEnd(); } @@ -15186,10 +15186,10 @@ uint32_t ThriftHiveMetastore_drop_partition_with_environment_context_pargs::writ xfer += oprot->writeFieldBegin("part_vals", ::apache::thrift::protocol::T_LIST, 3); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast((*(this->part_vals)).size())); - std::vector ::const_iterator _iter1486; - for (_iter1486 = (*(this->part_vals)).begin(); _iter1486 != (*(this->part_vals)).end(); ++_iter1486) + std::vector ::const_iterator _iter1505; + for (_iter1505 = (*(this->part_vals)).begin(); _iter1505 != (*(this->part_vals)).end(); ++_iter1505) { - xfer += oprot->writeString((*_iter1486)); + xfer += oprot->writeString((*_iter1505)); } xfer += oprot->writeListEnd(); } @@ -16195,14 +16195,14 @@ uint32_t ThriftHiveMetastore_get_partition_args::read(::apache::thrift::protocol if (ftype == ::apache::thrift::protocol::T_LIST) { { this->part_vals.clear(); - uint32_t _size1487; - ::apache::thrift::protocol::TType _etype1490; - xfer += iprot->readListBegin(_etype1490, _size1487); - this->part_vals.resize(_size1487); - uint32_t _i1491; - for (_i1491 = 0; _i1491 < _size1487; ++_i1491) + uint32_t _size1506; + ::apache::thrift::protocol::TType _etype1509; + xfer += iprot->readListBegin(_etype1509, _size1506); + this->part_vals.resize(_size1506); + uint32_t _i1510; + for (_i1510 = 0; _i1510 < _size1506; ++_i1510) { - xfer += iprot->readString(this->part_vals[_i1491]); + xfer += iprot->readString(this->part_vals[_i1510]); } xfer += iprot->readListEnd(); } @@ -16239,10 +16239,10 @@ uint32_t ThriftHiveMetastore_get_partition_args::write(::apache::thrift::protoco xfer += oprot->writeFieldBegin("part_vals", ::apache::thrift::protocol::T_LIST, 3); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast(this->part_vals.size())); - std::vector ::const_iterator _iter1492; - for (_iter1492 = this->part_vals.begin(); _iter1492 != this->part_vals.end(); ++_iter1492) + std::vector ::const_iterator _iter1511; + for (_iter1511 = this->part_vals.begin(); _iter1511 != this->part_vals.end(); ++_iter1511) { - xfer += oprot->writeString((*_iter1492)); + xfer += oprot->writeString((*_iter1511)); } xfer += oprot->writeListEnd(); } @@ -16274,10 +16274,10 @@ uint32_t ThriftHiveMetastore_get_partition_pargs::write(::apache::thrift::protoc xfer += oprot->writeFieldBegin("part_vals", ::apache::thrift::protocol::T_LIST, 3); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast((*(this->part_vals)).size())); - std::vector ::const_iterator _iter1493; - for (_iter1493 = (*(this->part_vals)).begin(); _iter1493 != (*(this->part_vals)).end(); ++_iter1493) + std::vector ::const_iterator _iter1512; + for (_iter1512 = (*(this->part_vals)).begin(); _iter1512 != (*(this->part_vals)).end(); ++_iter1512) { - xfer += oprot->writeString((*_iter1493)); + xfer += oprot->writeString((*_iter1512)); } xfer += oprot->writeListEnd(); } @@ -16466,17 +16466,17 @@ uint32_t ThriftHiveMetastore_exchange_partition_args::read(::apache::thrift::pro if (ftype == ::apache::thrift::protocol::T_MAP) { { this->partitionSpecs.clear(); - uint32_t _size1494; - ::apache::thrift::protocol::TType _ktype1495; - ::apache::thrift::protocol::TType _vtype1496; - xfer += iprot->readMapBegin(_ktype1495, _vtype1496, _size1494); - uint32_t _i1498; - for (_i1498 = 0; _i1498 < _size1494; ++_i1498) + uint32_t _size1513; + ::apache::thrift::protocol::TType _ktype1514; + ::apache::thrift::protocol::TType _vtype1515; + xfer += iprot->readMapBegin(_ktype1514, _vtype1515, _size1513); + uint32_t _i1517; + for (_i1517 = 0; _i1517 < _size1513; ++_i1517) { - std::string _key1499; - xfer += iprot->readString(_key1499); - std::string& _val1500 = this->partitionSpecs[_key1499]; - xfer += iprot->readString(_val1500); + std::string _key1518; + xfer += iprot->readString(_key1518); + std::string& _val1519 = this->partitionSpecs[_key1518]; + xfer += iprot->readString(_val1519); } xfer += iprot->readMapEnd(); } @@ -16537,11 +16537,11 @@ uint32_t ThriftHiveMetastore_exchange_partition_args::write(::apache::thrift::pr xfer += oprot->writeFieldBegin("partitionSpecs", ::apache::thrift::protocol::T_MAP, 1); { xfer += oprot->writeMapBegin(::apache::thrift::protocol::T_STRING, ::apache::thrift::protocol::T_STRING, static_cast(this->partitionSpecs.size())); - std::map ::const_iterator _iter1501; - for (_iter1501 = this->partitionSpecs.begin(); _iter1501 != this->partitionSpecs.end(); ++_iter1501) + std::map ::const_iterator _iter1520; + for (_iter1520 = this->partitionSpecs.begin(); _iter1520 != this->partitionSpecs.end(); ++_iter1520) { - xfer += oprot->writeString(_iter1501->first); - xfer += oprot->writeString(_iter1501->second); + xfer += oprot->writeString(_iter1520->first); + xfer += oprot->writeString(_iter1520->second); } xfer += oprot->writeMapEnd(); } @@ -16581,11 +16581,11 @@ uint32_t ThriftHiveMetastore_exchange_partition_pargs::write(::apache::thrift::p xfer += oprot->writeFieldBegin("partitionSpecs", ::apache::thrift::protocol::T_MAP, 1); { xfer += oprot->writeMapBegin(::apache::thrift::protocol::T_STRING, ::apache::thrift::protocol::T_STRING, static_cast((*(this->partitionSpecs)).size())); - std::map ::const_iterator _iter1502; - for (_iter1502 = (*(this->partitionSpecs)).begin(); _iter1502 != (*(this->partitionSpecs)).end(); ++_iter1502) + std::map ::const_iterator _iter1521; + for (_iter1521 = (*(this->partitionSpecs)).begin(); _iter1521 != (*(this->partitionSpecs)).end(); ++_iter1521) { - xfer += oprot->writeString(_iter1502->first); - xfer += oprot->writeString(_iter1502->second); + xfer += oprot->writeString(_iter1521->first); + xfer += oprot->writeString(_iter1521->second); } xfer += oprot->writeMapEnd(); } @@ -16830,17 +16830,17 @@ uint32_t ThriftHiveMetastore_exchange_partitions_args::read(::apache::thrift::pr if (ftype == ::apache::thrift::protocol::T_MAP) { { this->partitionSpecs.clear(); - uint32_t _size1503; - ::apache::thrift::protocol::TType _ktype1504; - ::apache::thrift::protocol::TType _vtype1505; - xfer += iprot->readMapBegin(_ktype1504, _vtype1505, _size1503); - uint32_t _i1507; - for (_i1507 = 0; _i1507 < _size1503; ++_i1507) + uint32_t _size1522; + ::apache::thrift::protocol::TType _ktype1523; + ::apache::thrift::protocol::TType _vtype1524; + xfer += iprot->readMapBegin(_ktype1523, _vtype1524, _size1522); + uint32_t _i1526; + for (_i1526 = 0; _i1526 < _size1522; ++_i1526) { - std::string _key1508; - xfer += iprot->readString(_key1508); - std::string& _val1509 = this->partitionSpecs[_key1508]; - xfer += iprot->readString(_val1509); + std::string _key1527; + xfer += iprot->readString(_key1527); + std::string& _val1528 = this->partitionSpecs[_key1527]; + xfer += iprot->readString(_val1528); } xfer += iprot->readMapEnd(); } @@ -16901,11 +16901,11 @@ uint32_t ThriftHiveMetastore_exchange_partitions_args::write(::apache::thrift::p xfer += oprot->writeFieldBegin("partitionSpecs", ::apache::thrift::protocol::T_MAP, 1); { xfer += oprot->writeMapBegin(::apache::thrift::protocol::T_STRING, ::apache::thrift::protocol::T_STRING, static_cast(this->partitionSpecs.size())); - std::map ::const_iterator _iter1510; - for (_iter1510 = this->partitionSpecs.begin(); _iter1510 != this->partitionSpecs.end(); ++_iter1510) + std::map ::const_iterator _iter1529; + for (_iter1529 = this->partitionSpecs.begin(); _iter1529 != this->partitionSpecs.end(); ++_iter1529) { - xfer += oprot->writeString(_iter1510->first); - xfer += oprot->writeString(_iter1510->second); + xfer += oprot->writeString(_iter1529->first); + xfer += oprot->writeString(_iter1529->second); } xfer += oprot->writeMapEnd(); } @@ -16945,11 +16945,11 @@ uint32_t ThriftHiveMetastore_exchange_partitions_pargs::write(::apache::thrift:: xfer += oprot->writeFieldBegin("partitionSpecs", ::apache::thrift::protocol::T_MAP, 1); { xfer += oprot->writeMapBegin(::apache::thrift::protocol::T_STRING, ::apache::thrift::protocol::T_STRING, static_cast((*(this->partitionSpecs)).size())); - std::map ::const_iterator _iter1511; - for (_iter1511 = (*(this->partitionSpecs)).begin(); _iter1511 != (*(this->partitionSpecs)).end(); ++_iter1511) + std::map ::const_iterator _iter1530; + for (_iter1530 = (*(this->partitionSpecs)).begin(); _iter1530 != (*(this->partitionSpecs)).end(); ++_iter1530) { - xfer += oprot->writeString(_iter1511->first); - xfer += oprot->writeString(_iter1511->second); + xfer += oprot->writeString(_iter1530->first); + xfer += oprot->writeString(_iter1530->second); } xfer += oprot->writeMapEnd(); } @@ -17006,14 +17006,14 @@ uint32_t ThriftHiveMetastore_exchange_partitions_result::read(::apache::thrift:: if (ftype == ::apache::thrift::protocol::T_LIST) { { this->success.clear(); - uint32_t _size1512; - ::apache::thrift::protocol::TType _etype1515; - xfer += iprot->readListBegin(_etype1515, _size1512); - this->success.resize(_size1512); - uint32_t _i1516; - for (_i1516 = 0; _i1516 < _size1512; ++_i1516) + uint32_t _size1531; + ::apache::thrift::protocol::TType _etype1534; + xfer += iprot->readListBegin(_etype1534, _size1531); + this->success.resize(_size1531); + uint32_t _i1535; + for (_i1535 = 0; _i1535 < _size1531; ++_i1535) { - xfer += this->success[_i1516].read(iprot); + xfer += this->success[_i1535].read(iprot); } xfer += iprot->readListEnd(); } @@ -17076,10 +17076,10 @@ uint32_t ThriftHiveMetastore_exchange_partitions_result::write(::apache::thrift: xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_LIST, 0); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast(this->success.size())); - std::vector ::const_iterator _iter1517; - for (_iter1517 = this->success.begin(); _iter1517 != this->success.end(); ++_iter1517) + std::vector ::const_iterator _iter1536; + for (_iter1536 = this->success.begin(); _iter1536 != this->success.end(); ++_iter1536) { - xfer += (*_iter1517).write(oprot); + xfer += (*_iter1536).write(oprot); } xfer += oprot->writeListEnd(); } @@ -17136,14 +17136,14 @@ uint32_t ThriftHiveMetastore_exchange_partitions_presult::read(::apache::thrift: if (ftype == ::apache::thrift::protocol::T_LIST) { { (*(this->success)).clear(); - uint32_t _size1518; - ::apache::thrift::protocol::TType _etype1521; - xfer += iprot->readListBegin(_etype1521, _size1518); - (*(this->success)).resize(_size1518); - uint32_t _i1522; - for (_i1522 = 0; _i1522 < _size1518; ++_i1522) + uint32_t _size1537; + ::apache::thrift::protocol::TType _etype1540; + xfer += iprot->readListBegin(_etype1540, _size1537); + (*(this->success)).resize(_size1537); + uint32_t _i1541; + for (_i1541 = 0; _i1541 < _size1537; ++_i1541) { - xfer += (*(this->success))[_i1522].read(iprot); + xfer += (*(this->success))[_i1541].read(iprot); } xfer += iprot->readListEnd(); } @@ -17242,14 +17242,14 @@ uint32_t ThriftHiveMetastore_get_partition_with_auth_args::read(::apache::thrift if (ftype == ::apache::thrift::protocol::T_LIST) { { this->part_vals.clear(); - uint32_t _size1523; - ::apache::thrift::protocol::TType _etype1526; - xfer += iprot->readListBegin(_etype1526, _size1523); - this->part_vals.resize(_size1523); - uint32_t _i1527; - for (_i1527 = 0; _i1527 < _size1523; ++_i1527) + uint32_t _size1542; + ::apache::thrift::protocol::TType _etype1545; + xfer += iprot->readListBegin(_etype1545, _size1542); + this->part_vals.resize(_size1542); + uint32_t _i1546; + for (_i1546 = 0; _i1546 < _size1542; ++_i1546) { - xfer += iprot->readString(this->part_vals[_i1527]); + xfer += iprot->readString(this->part_vals[_i1546]); } xfer += iprot->readListEnd(); } @@ -17270,14 +17270,14 @@ uint32_t ThriftHiveMetastore_get_partition_with_auth_args::read(::apache::thrift if (ftype == ::apache::thrift::protocol::T_LIST) { { this->group_names.clear(); - uint32_t _size1528; - ::apache::thrift::protocol::TType _etype1531; - xfer += iprot->readListBegin(_etype1531, _size1528); - this->group_names.resize(_size1528); - uint32_t _i1532; - for (_i1532 = 0; _i1532 < _size1528; ++_i1532) + uint32_t _size1547; + ::apache::thrift::protocol::TType _etype1550; + xfer += iprot->readListBegin(_etype1550, _size1547); + this->group_names.resize(_size1547); + uint32_t _i1551; + for (_i1551 = 0; _i1551 < _size1547; ++_i1551) { - xfer += iprot->readString(this->group_names[_i1532]); + xfer += iprot->readString(this->group_names[_i1551]); } xfer += iprot->readListEnd(); } @@ -17314,10 +17314,10 @@ uint32_t ThriftHiveMetastore_get_partition_with_auth_args::write(::apache::thrif xfer += oprot->writeFieldBegin("part_vals", ::apache::thrift::protocol::T_LIST, 3); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast(this->part_vals.size())); - std::vector ::const_iterator _iter1533; - for (_iter1533 = this->part_vals.begin(); _iter1533 != this->part_vals.end(); ++_iter1533) + std::vector ::const_iterator _iter1552; + for (_iter1552 = this->part_vals.begin(); _iter1552 != this->part_vals.end(); ++_iter1552) { - xfer += oprot->writeString((*_iter1533)); + xfer += oprot->writeString((*_iter1552)); } xfer += oprot->writeListEnd(); } @@ -17330,10 +17330,10 @@ uint32_t ThriftHiveMetastore_get_partition_with_auth_args::write(::apache::thrif xfer += oprot->writeFieldBegin("group_names", ::apache::thrift::protocol::T_LIST, 5); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast(this->group_names.size())); - std::vector ::const_iterator _iter1534; - for (_iter1534 = this->group_names.begin(); _iter1534 != this->group_names.end(); ++_iter1534) + std::vector ::const_iterator _iter1553; + for (_iter1553 = this->group_names.begin(); _iter1553 != this->group_names.end(); ++_iter1553) { - xfer += oprot->writeString((*_iter1534)); + xfer += oprot->writeString((*_iter1553)); } xfer += oprot->writeListEnd(); } @@ -17365,10 +17365,10 @@ uint32_t ThriftHiveMetastore_get_partition_with_auth_pargs::write(::apache::thri xfer += oprot->writeFieldBegin("part_vals", ::apache::thrift::protocol::T_LIST, 3); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast((*(this->part_vals)).size())); - std::vector ::const_iterator _iter1535; - for (_iter1535 = (*(this->part_vals)).begin(); _iter1535 != (*(this->part_vals)).end(); ++_iter1535) + std::vector ::const_iterator _iter1554; + for (_iter1554 = (*(this->part_vals)).begin(); _iter1554 != (*(this->part_vals)).end(); ++_iter1554) { - xfer += oprot->writeString((*_iter1535)); + xfer += oprot->writeString((*_iter1554)); } xfer += oprot->writeListEnd(); } @@ -17381,10 +17381,10 @@ uint32_t ThriftHiveMetastore_get_partition_with_auth_pargs::write(::apache::thri xfer += oprot->writeFieldBegin("group_names", ::apache::thrift::protocol::T_LIST, 5); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast((*(this->group_names)).size())); - std::vector ::const_iterator _iter1536; - for (_iter1536 = (*(this->group_names)).begin(); _iter1536 != (*(this->group_names)).end(); ++_iter1536) + std::vector ::const_iterator _iter1555; + for (_iter1555 = (*(this->group_names)).begin(); _iter1555 != (*(this->group_names)).end(); ++_iter1555) { - xfer += oprot->writeString((*_iter1536)); + xfer += oprot->writeString((*_iter1555)); } xfer += oprot->writeListEnd(); } @@ -17943,14 +17943,14 @@ uint32_t ThriftHiveMetastore_get_partitions_result::read(::apache::thrift::proto if (ftype == ::apache::thrift::protocol::T_LIST) { { this->success.clear(); - uint32_t _size1537; - ::apache::thrift::protocol::TType _etype1540; - xfer += iprot->readListBegin(_etype1540, _size1537); - this->success.resize(_size1537); - uint32_t _i1541; - for (_i1541 = 0; _i1541 < _size1537; ++_i1541) + uint32_t _size1556; + ::apache::thrift::protocol::TType _etype1559; + xfer += iprot->readListBegin(_etype1559, _size1556); + this->success.resize(_size1556); + uint32_t _i1560; + for (_i1560 = 0; _i1560 < _size1556; ++_i1560) { - xfer += this->success[_i1541].read(iprot); + xfer += this->success[_i1560].read(iprot); } xfer += iprot->readListEnd(); } @@ -17997,10 +17997,10 @@ uint32_t ThriftHiveMetastore_get_partitions_result::write(::apache::thrift::prot xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_LIST, 0); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast(this->success.size())); - std::vector ::const_iterator _iter1542; - for (_iter1542 = this->success.begin(); _iter1542 != this->success.end(); ++_iter1542) + std::vector ::const_iterator _iter1561; + for (_iter1561 = this->success.begin(); _iter1561 != this->success.end(); ++_iter1561) { - xfer += (*_iter1542).write(oprot); + xfer += (*_iter1561).write(oprot); } xfer += oprot->writeListEnd(); } @@ -18049,14 +18049,14 @@ uint32_t ThriftHiveMetastore_get_partitions_presult::read(::apache::thrift::prot if (ftype == ::apache::thrift::protocol::T_LIST) { { (*(this->success)).clear(); - uint32_t _size1543; - ::apache::thrift::protocol::TType _etype1546; - xfer += iprot->readListBegin(_etype1546, _size1543); - (*(this->success)).resize(_size1543); - uint32_t _i1547; - for (_i1547 = 0; _i1547 < _size1543; ++_i1547) + uint32_t _size1562; + ::apache::thrift::protocol::TType _etype1565; + xfer += iprot->readListBegin(_etype1565, _size1562); + (*(this->success)).resize(_size1562); + uint32_t _i1566; + for (_i1566 = 0; _i1566 < _size1562; ++_i1566) { - xfer += (*(this->success))[_i1547].read(iprot); + xfer += (*(this->success))[_i1566].read(iprot); } xfer += iprot->readListEnd(); } @@ -18155,14 +18155,14 @@ uint32_t ThriftHiveMetastore_get_partitions_with_auth_args::read(::apache::thrif if (ftype == ::apache::thrift::protocol::T_LIST) { { this->group_names.clear(); - uint32_t _size1548; - ::apache::thrift::protocol::TType _etype1551; - xfer += iprot->readListBegin(_etype1551, _size1548); - this->group_names.resize(_size1548); - uint32_t _i1552; - for (_i1552 = 0; _i1552 < _size1548; ++_i1552) + uint32_t _size1567; + ::apache::thrift::protocol::TType _etype1570; + xfer += iprot->readListBegin(_etype1570, _size1567); + this->group_names.resize(_size1567); + uint32_t _i1571; + for (_i1571 = 0; _i1571 < _size1567; ++_i1571) { - xfer += iprot->readString(this->group_names[_i1552]); + xfer += iprot->readString(this->group_names[_i1571]); } xfer += iprot->readListEnd(); } @@ -18207,10 +18207,10 @@ uint32_t ThriftHiveMetastore_get_partitions_with_auth_args::write(::apache::thri xfer += oprot->writeFieldBegin("group_names", ::apache::thrift::protocol::T_LIST, 5); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast(this->group_names.size())); - std::vector ::const_iterator _iter1553; - for (_iter1553 = this->group_names.begin(); _iter1553 != this->group_names.end(); ++_iter1553) + std::vector ::const_iterator _iter1572; + for (_iter1572 = this->group_names.begin(); _iter1572 != this->group_names.end(); ++_iter1572) { - xfer += oprot->writeString((*_iter1553)); + xfer += oprot->writeString((*_iter1572)); } xfer += oprot->writeListEnd(); } @@ -18250,10 +18250,10 @@ uint32_t ThriftHiveMetastore_get_partitions_with_auth_pargs::write(::apache::thr xfer += oprot->writeFieldBegin("group_names", ::apache::thrift::protocol::T_LIST, 5); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast((*(this->group_names)).size())); - std::vector ::const_iterator _iter1554; - for (_iter1554 = (*(this->group_names)).begin(); _iter1554 != (*(this->group_names)).end(); ++_iter1554) + std::vector ::const_iterator _iter1573; + for (_iter1573 = (*(this->group_names)).begin(); _iter1573 != (*(this->group_names)).end(); ++_iter1573) { - xfer += oprot->writeString((*_iter1554)); + xfer += oprot->writeString((*_iter1573)); } xfer += oprot->writeListEnd(); } @@ -18294,14 +18294,14 @@ uint32_t ThriftHiveMetastore_get_partitions_with_auth_result::read(::apache::thr if (ftype == ::apache::thrift::protocol::T_LIST) { { this->success.clear(); - uint32_t _size1555; - ::apache::thrift::protocol::TType _etype1558; - xfer += iprot->readListBegin(_etype1558, _size1555); - this->success.resize(_size1555); - uint32_t _i1559; - for (_i1559 = 0; _i1559 < _size1555; ++_i1559) + uint32_t _size1574; + ::apache::thrift::protocol::TType _etype1577; + xfer += iprot->readListBegin(_etype1577, _size1574); + this->success.resize(_size1574); + uint32_t _i1578; + for (_i1578 = 0; _i1578 < _size1574; ++_i1578) { - xfer += this->success[_i1559].read(iprot); + xfer += this->success[_i1578].read(iprot); } xfer += iprot->readListEnd(); } @@ -18348,10 +18348,10 @@ uint32_t ThriftHiveMetastore_get_partitions_with_auth_result::write(::apache::th xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_LIST, 0); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast(this->success.size())); - std::vector ::const_iterator _iter1560; - for (_iter1560 = this->success.begin(); _iter1560 != this->success.end(); ++_iter1560) + std::vector ::const_iterator _iter1579; + for (_iter1579 = this->success.begin(); _iter1579 != this->success.end(); ++_iter1579) { - xfer += (*_iter1560).write(oprot); + xfer += (*_iter1579).write(oprot); } xfer += oprot->writeListEnd(); } @@ -18400,14 +18400,14 @@ uint32_t ThriftHiveMetastore_get_partitions_with_auth_presult::read(::apache::th if (ftype == ::apache::thrift::protocol::T_LIST) { { (*(this->success)).clear(); - uint32_t _size1561; - ::apache::thrift::protocol::TType _etype1564; - xfer += iprot->readListBegin(_etype1564, _size1561); - (*(this->success)).resize(_size1561); - uint32_t _i1565; - for (_i1565 = 0; _i1565 < _size1561; ++_i1565) + uint32_t _size1580; + ::apache::thrift::protocol::TType _etype1583; + xfer += iprot->readListBegin(_etype1583, _size1580); + (*(this->success)).resize(_size1580); + uint32_t _i1584; + for (_i1584 = 0; _i1584 < _size1580; ++_i1584) { - xfer += (*(this->success))[_i1565].read(iprot); + xfer += (*(this->success))[_i1584].read(iprot); } xfer += iprot->readListEnd(); } @@ -18585,14 +18585,14 @@ uint32_t ThriftHiveMetastore_get_partitions_pspec_result::read(::apache::thrift: if (ftype == ::apache::thrift::protocol::T_LIST) { { this->success.clear(); - uint32_t _size1566; - ::apache::thrift::protocol::TType _etype1569; - xfer += iprot->readListBegin(_etype1569, _size1566); - this->success.resize(_size1566); - uint32_t _i1570; - for (_i1570 = 0; _i1570 < _size1566; ++_i1570) + uint32_t _size1585; + ::apache::thrift::protocol::TType _etype1588; + xfer += iprot->readListBegin(_etype1588, _size1585); + this->success.resize(_size1585); + uint32_t _i1589; + for (_i1589 = 0; _i1589 < _size1585; ++_i1589) { - xfer += this->success[_i1570].read(iprot); + xfer += this->success[_i1589].read(iprot); } xfer += iprot->readListEnd(); } @@ -18639,10 +18639,10 @@ uint32_t ThriftHiveMetastore_get_partitions_pspec_result::write(::apache::thrift xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_LIST, 0); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast(this->success.size())); - std::vector ::const_iterator _iter1571; - for (_iter1571 = this->success.begin(); _iter1571 != this->success.end(); ++_iter1571) + std::vector ::const_iterator _iter1590; + for (_iter1590 = this->success.begin(); _iter1590 != this->success.end(); ++_iter1590) { - xfer += (*_iter1571).write(oprot); + xfer += (*_iter1590).write(oprot); } xfer += oprot->writeListEnd(); } @@ -18691,14 +18691,14 @@ uint32_t ThriftHiveMetastore_get_partitions_pspec_presult::read(::apache::thrift if (ftype == ::apache::thrift::protocol::T_LIST) { { (*(this->success)).clear(); - uint32_t _size1572; - ::apache::thrift::protocol::TType _etype1575; - xfer += iprot->readListBegin(_etype1575, _size1572); - (*(this->success)).resize(_size1572); - uint32_t _i1576; - for (_i1576 = 0; _i1576 < _size1572; ++_i1576) + uint32_t _size1591; + ::apache::thrift::protocol::TType _etype1594; + xfer += iprot->readListBegin(_etype1594, _size1591); + (*(this->success)).resize(_size1591); + uint32_t _i1595; + for (_i1595 = 0; _i1595 < _size1591; ++_i1595) { - xfer += (*(this->success))[_i1576].read(iprot); + xfer += (*(this->success))[_i1595].read(iprot); } xfer += iprot->readListEnd(); } @@ -18876,14 +18876,14 @@ uint32_t ThriftHiveMetastore_get_partition_names_result::read(::apache::thrift:: if (ftype == ::apache::thrift::protocol::T_LIST) { { this->success.clear(); - uint32_t _size1577; - ::apache::thrift::protocol::TType _etype1580; - xfer += iprot->readListBegin(_etype1580, _size1577); - this->success.resize(_size1577); - uint32_t _i1581; - for (_i1581 = 0; _i1581 < _size1577; ++_i1581) + uint32_t _size1596; + ::apache::thrift::protocol::TType _etype1599; + xfer += iprot->readListBegin(_etype1599, _size1596); + this->success.resize(_size1596); + uint32_t _i1600; + for (_i1600 = 0; _i1600 < _size1596; ++_i1600) { - xfer += iprot->readString(this->success[_i1581]); + xfer += iprot->readString(this->success[_i1600]); } xfer += iprot->readListEnd(); } @@ -18930,10 +18930,10 @@ uint32_t ThriftHiveMetastore_get_partition_names_result::write(::apache::thrift: xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_LIST, 0); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast(this->success.size())); - std::vector ::const_iterator _iter1582; - for (_iter1582 = this->success.begin(); _iter1582 != this->success.end(); ++_iter1582) + std::vector ::const_iterator _iter1601; + for (_iter1601 = this->success.begin(); _iter1601 != this->success.end(); ++_iter1601) { - xfer += oprot->writeString((*_iter1582)); + xfer += oprot->writeString((*_iter1601)); } xfer += oprot->writeListEnd(); } @@ -18982,14 +18982,14 @@ uint32_t ThriftHiveMetastore_get_partition_names_presult::read(::apache::thrift: if (ftype == ::apache::thrift::protocol::T_LIST) { { (*(this->success)).clear(); - uint32_t _size1583; - ::apache::thrift::protocol::TType _etype1586; - xfer += iprot->readListBegin(_etype1586, _size1583); - (*(this->success)).resize(_size1583); - uint32_t _i1587; - for (_i1587 = 0; _i1587 < _size1583; ++_i1587) + uint32_t _size1602; + ::apache::thrift::protocol::TType _etype1605; + xfer += iprot->readListBegin(_etype1605, _size1602); + (*(this->success)).resize(_size1602); + uint32_t _i1606; + for (_i1606 = 0; _i1606 < _size1602; ++_i1606) { - xfer += iprot->readString((*(this->success))[_i1587]); + xfer += iprot->readString((*(this->success))[_i1606]); } xfer += iprot->readListEnd(); } @@ -19299,14 +19299,14 @@ uint32_t ThriftHiveMetastore_get_partitions_ps_args::read(::apache::thrift::prot if (ftype == ::apache::thrift::protocol::T_LIST) { { this->part_vals.clear(); - uint32_t _size1588; - ::apache::thrift::protocol::TType _etype1591; - xfer += iprot->readListBegin(_etype1591, _size1588); - this->part_vals.resize(_size1588); - uint32_t _i1592; - for (_i1592 = 0; _i1592 < _size1588; ++_i1592) + uint32_t _size1607; + ::apache::thrift::protocol::TType _etype1610; + xfer += iprot->readListBegin(_etype1610, _size1607); + this->part_vals.resize(_size1607); + uint32_t _i1611; + for (_i1611 = 0; _i1611 < _size1607; ++_i1611) { - xfer += iprot->readString(this->part_vals[_i1592]); + xfer += iprot->readString(this->part_vals[_i1611]); } xfer += iprot->readListEnd(); } @@ -19351,10 +19351,10 @@ uint32_t ThriftHiveMetastore_get_partitions_ps_args::write(::apache::thrift::pro xfer += oprot->writeFieldBegin("part_vals", ::apache::thrift::protocol::T_LIST, 3); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast(this->part_vals.size())); - std::vector ::const_iterator _iter1593; - for (_iter1593 = this->part_vals.begin(); _iter1593 != this->part_vals.end(); ++_iter1593) + std::vector ::const_iterator _iter1612; + for (_iter1612 = this->part_vals.begin(); _iter1612 != this->part_vals.end(); ++_iter1612) { - xfer += oprot->writeString((*_iter1593)); + xfer += oprot->writeString((*_iter1612)); } xfer += oprot->writeListEnd(); } @@ -19390,10 +19390,10 @@ uint32_t ThriftHiveMetastore_get_partitions_ps_pargs::write(::apache::thrift::pr xfer += oprot->writeFieldBegin("part_vals", ::apache::thrift::protocol::T_LIST, 3); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast((*(this->part_vals)).size())); - std::vector ::const_iterator _iter1594; - for (_iter1594 = (*(this->part_vals)).begin(); _iter1594 != (*(this->part_vals)).end(); ++_iter1594) + std::vector ::const_iterator _iter1613; + for (_iter1613 = (*(this->part_vals)).begin(); _iter1613 != (*(this->part_vals)).end(); ++_iter1613) { - xfer += oprot->writeString((*_iter1594)); + xfer += oprot->writeString((*_iter1613)); } xfer += oprot->writeListEnd(); } @@ -19438,14 +19438,14 @@ uint32_t ThriftHiveMetastore_get_partitions_ps_result::read(::apache::thrift::pr if (ftype == ::apache::thrift::protocol::T_LIST) { { this->success.clear(); - uint32_t _size1595; - ::apache::thrift::protocol::TType _etype1598; - xfer += iprot->readListBegin(_etype1598, _size1595); - this->success.resize(_size1595); - uint32_t _i1599; - for (_i1599 = 0; _i1599 < _size1595; ++_i1599) + uint32_t _size1614; + ::apache::thrift::protocol::TType _etype1617; + xfer += iprot->readListBegin(_etype1617, _size1614); + this->success.resize(_size1614); + uint32_t _i1618; + for (_i1618 = 0; _i1618 < _size1614; ++_i1618) { - xfer += this->success[_i1599].read(iprot); + xfer += this->success[_i1618].read(iprot); } xfer += iprot->readListEnd(); } @@ -19492,10 +19492,10 @@ uint32_t ThriftHiveMetastore_get_partitions_ps_result::write(::apache::thrift::p xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_LIST, 0); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast(this->success.size())); - std::vector ::const_iterator _iter1600; - for (_iter1600 = this->success.begin(); _iter1600 != this->success.end(); ++_iter1600) + std::vector ::const_iterator _iter1619; + for (_iter1619 = this->success.begin(); _iter1619 != this->success.end(); ++_iter1619) { - xfer += (*_iter1600).write(oprot); + xfer += (*_iter1619).write(oprot); } xfer += oprot->writeListEnd(); } @@ -19544,14 +19544,14 @@ uint32_t ThriftHiveMetastore_get_partitions_ps_presult::read(::apache::thrift::p if (ftype == ::apache::thrift::protocol::T_LIST) { { (*(this->success)).clear(); - uint32_t _size1601; - ::apache::thrift::protocol::TType _etype1604; - xfer += iprot->readListBegin(_etype1604, _size1601); - (*(this->success)).resize(_size1601); - uint32_t _i1605; - for (_i1605 = 0; _i1605 < _size1601; ++_i1605) + uint32_t _size1620; + ::apache::thrift::protocol::TType _etype1623; + xfer += iprot->readListBegin(_etype1623, _size1620); + (*(this->success)).resize(_size1620); + uint32_t _i1624; + for (_i1624 = 0; _i1624 < _size1620; ++_i1624) { - xfer += (*(this->success))[_i1605].read(iprot); + xfer += (*(this->success))[_i1624].read(iprot); } xfer += iprot->readListEnd(); } @@ -19634,14 +19634,14 @@ uint32_t ThriftHiveMetastore_get_partitions_ps_with_auth_args::read(::apache::th if (ftype == ::apache::thrift::protocol::T_LIST) { { this->part_vals.clear(); - uint32_t _size1606; - ::apache::thrift::protocol::TType _etype1609; - xfer += iprot->readListBegin(_etype1609, _size1606); - this->part_vals.resize(_size1606); - uint32_t _i1610; - for (_i1610 = 0; _i1610 < _size1606; ++_i1610) + uint32_t _size1625; + ::apache::thrift::protocol::TType _etype1628; + xfer += iprot->readListBegin(_etype1628, _size1625); + this->part_vals.resize(_size1625); + uint32_t _i1629; + for (_i1629 = 0; _i1629 < _size1625; ++_i1629) { - xfer += iprot->readString(this->part_vals[_i1610]); + xfer += iprot->readString(this->part_vals[_i1629]); } xfer += iprot->readListEnd(); } @@ -19670,14 +19670,14 @@ uint32_t ThriftHiveMetastore_get_partitions_ps_with_auth_args::read(::apache::th if (ftype == ::apache::thrift::protocol::T_LIST) { { this->group_names.clear(); - uint32_t _size1611; - ::apache::thrift::protocol::TType _etype1614; - xfer += iprot->readListBegin(_etype1614, _size1611); - this->group_names.resize(_size1611); - uint32_t _i1615; - for (_i1615 = 0; _i1615 < _size1611; ++_i1615) + uint32_t _size1630; + ::apache::thrift::protocol::TType _etype1633; + xfer += iprot->readListBegin(_etype1633, _size1630); + this->group_names.resize(_size1630); + uint32_t _i1634; + for (_i1634 = 0; _i1634 < _size1630; ++_i1634) { - xfer += iprot->readString(this->group_names[_i1615]); + xfer += iprot->readString(this->group_names[_i1634]); } xfer += iprot->readListEnd(); } @@ -19714,10 +19714,10 @@ uint32_t ThriftHiveMetastore_get_partitions_ps_with_auth_args::write(::apache::t xfer += oprot->writeFieldBegin("part_vals", ::apache::thrift::protocol::T_LIST, 3); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast(this->part_vals.size())); - std::vector ::const_iterator _iter1616; - for (_iter1616 = this->part_vals.begin(); _iter1616 != this->part_vals.end(); ++_iter1616) + std::vector ::const_iterator _iter1635; + for (_iter1635 = this->part_vals.begin(); _iter1635 != this->part_vals.end(); ++_iter1635) { - xfer += oprot->writeString((*_iter1616)); + xfer += oprot->writeString((*_iter1635)); } xfer += oprot->writeListEnd(); } @@ -19734,10 +19734,10 @@ uint32_t ThriftHiveMetastore_get_partitions_ps_with_auth_args::write(::apache::t xfer += oprot->writeFieldBegin("group_names", ::apache::thrift::protocol::T_LIST, 6); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast(this->group_names.size())); - std::vector ::const_iterator _iter1617; - for (_iter1617 = this->group_names.begin(); _iter1617 != this->group_names.end(); ++_iter1617) + std::vector ::const_iterator _iter1636; + for (_iter1636 = this->group_names.begin(); _iter1636 != this->group_names.end(); ++_iter1636) { - xfer += oprot->writeString((*_iter1617)); + xfer += oprot->writeString((*_iter1636)); } xfer += oprot->writeListEnd(); } @@ -19769,10 +19769,10 @@ uint32_t ThriftHiveMetastore_get_partitions_ps_with_auth_pargs::write(::apache:: xfer += oprot->writeFieldBegin("part_vals", ::apache::thrift::protocol::T_LIST, 3); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast((*(this->part_vals)).size())); - std::vector ::const_iterator _iter1618; - for (_iter1618 = (*(this->part_vals)).begin(); _iter1618 != (*(this->part_vals)).end(); ++_iter1618) + std::vector ::const_iterator _iter1637; + for (_iter1637 = (*(this->part_vals)).begin(); _iter1637 != (*(this->part_vals)).end(); ++_iter1637) { - xfer += oprot->writeString((*_iter1618)); + xfer += oprot->writeString((*_iter1637)); } xfer += oprot->writeListEnd(); } @@ -19789,10 +19789,10 @@ uint32_t ThriftHiveMetastore_get_partitions_ps_with_auth_pargs::write(::apache:: xfer += oprot->writeFieldBegin("group_names", ::apache::thrift::protocol::T_LIST, 6); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast((*(this->group_names)).size())); - std::vector ::const_iterator _iter1619; - for (_iter1619 = (*(this->group_names)).begin(); _iter1619 != (*(this->group_names)).end(); ++_iter1619) + std::vector ::const_iterator _iter1638; + for (_iter1638 = (*(this->group_names)).begin(); _iter1638 != (*(this->group_names)).end(); ++_iter1638) { - xfer += oprot->writeString((*_iter1619)); + xfer += oprot->writeString((*_iter1638)); } xfer += oprot->writeListEnd(); } @@ -19833,14 +19833,14 @@ uint32_t ThriftHiveMetastore_get_partitions_ps_with_auth_result::read(::apache:: if (ftype == ::apache::thrift::protocol::T_LIST) { { this->success.clear(); - uint32_t _size1620; - ::apache::thrift::protocol::TType _etype1623; - xfer += iprot->readListBegin(_etype1623, _size1620); - this->success.resize(_size1620); - uint32_t _i1624; - for (_i1624 = 0; _i1624 < _size1620; ++_i1624) + uint32_t _size1639; + ::apache::thrift::protocol::TType _etype1642; + xfer += iprot->readListBegin(_etype1642, _size1639); + this->success.resize(_size1639); + uint32_t _i1643; + for (_i1643 = 0; _i1643 < _size1639; ++_i1643) { - xfer += this->success[_i1624].read(iprot); + xfer += this->success[_i1643].read(iprot); } xfer += iprot->readListEnd(); } @@ -19887,10 +19887,10 @@ uint32_t ThriftHiveMetastore_get_partitions_ps_with_auth_result::write(::apache: xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_LIST, 0); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast(this->success.size())); - std::vector ::const_iterator _iter1625; - for (_iter1625 = this->success.begin(); _iter1625 != this->success.end(); ++_iter1625) + std::vector ::const_iterator _iter1644; + for (_iter1644 = this->success.begin(); _iter1644 != this->success.end(); ++_iter1644) { - xfer += (*_iter1625).write(oprot); + xfer += (*_iter1644).write(oprot); } xfer += oprot->writeListEnd(); } @@ -19939,14 +19939,14 @@ uint32_t ThriftHiveMetastore_get_partitions_ps_with_auth_presult::read(::apache: if (ftype == ::apache::thrift::protocol::T_LIST) { { (*(this->success)).clear(); - uint32_t _size1626; - ::apache::thrift::protocol::TType _etype1629; - xfer += iprot->readListBegin(_etype1629, _size1626); - (*(this->success)).resize(_size1626); - uint32_t _i1630; - for (_i1630 = 0; _i1630 < _size1626; ++_i1630) + uint32_t _size1645; + ::apache::thrift::protocol::TType _etype1648; + xfer += iprot->readListBegin(_etype1648, _size1645); + (*(this->success)).resize(_size1645); + uint32_t _i1649; + for (_i1649 = 0; _i1649 < _size1645; ++_i1649) { - xfer += (*(this->success))[_i1630].read(iprot); + xfer += (*(this->success))[_i1649].read(iprot); } xfer += iprot->readListEnd(); } @@ -20029,14 +20029,14 @@ uint32_t ThriftHiveMetastore_get_partition_names_ps_args::read(::apache::thrift: if (ftype == ::apache::thrift::protocol::T_LIST) { { this->part_vals.clear(); - uint32_t _size1631; - ::apache::thrift::protocol::TType _etype1634; - xfer += iprot->readListBegin(_etype1634, _size1631); - this->part_vals.resize(_size1631); - uint32_t _i1635; - for (_i1635 = 0; _i1635 < _size1631; ++_i1635) + uint32_t _size1650; + ::apache::thrift::protocol::TType _etype1653; + xfer += iprot->readListBegin(_etype1653, _size1650); + this->part_vals.resize(_size1650); + uint32_t _i1654; + for (_i1654 = 0; _i1654 < _size1650; ++_i1654) { - xfer += iprot->readString(this->part_vals[_i1635]); + xfer += iprot->readString(this->part_vals[_i1654]); } xfer += iprot->readListEnd(); } @@ -20081,10 +20081,10 @@ uint32_t ThriftHiveMetastore_get_partition_names_ps_args::write(::apache::thrift xfer += oprot->writeFieldBegin("part_vals", ::apache::thrift::protocol::T_LIST, 3); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast(this->part_vals.size())); - std::vector ::const_iterator _iter1636; - for (_iter1636 = this->part_vals.begin(); _iter1636 != this->part_vals.end(); ++_iter1636) + std::vector ::const_iterator _iter1655; + for (_iter1655 = this->part_vals.begin(); _iter1655 != this->part_vals.end(); ++_iter1655) { - xfer += oprot->writeString((*_iter1636)); + xfer += oprot->writeString((*_iter1655)); } xfer += oprot->writeListEnd(); } @@ -20120,10 +20120,10 @@ uint32_t ThriftHiveMetastore_get_partition_names_ps_pargs::write(::apache::thrif xfer += oprot->writeFieldBegin("part_vals", ::apache::thrift::protocol::T_LIST, 3); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast((*(this->part_vals)).size())); - std::vector ::const_iterator _iter1637; - for (_iter1637 = (*(this->part_vals)).begin(); _iter1637 != (*(this->part_vals)).end(); ++_iter1637) + std::vector ::const_iterator _iter1656; + for (_iter1656 = (*(this->part_vals)).begin(); _iter1656 != (*(this->part_vals)).end(); ++_iter1656) { - xfer += oprot->writeString((*_iter1637)); + xfer += oprot->writeString((*_iter1656)); } xfer += oprot->writeListEnd(); } @@ -20168,14 +20168,14 @@ uint32_t ThriftHiveMetastore_get_partition_names_ps_result::read(::apache::thrif if (ftype == ::apache::thrift::protocol::T_LIST) { { this->success.clear(); - uint32_t _size1638; - ::apache::thrift::protocol::TType _etype1641; - xfer += iprot->readListBegin(_etype1641, _size1638); - this->success.resize(_size1638); - uint32_t _i1642; - for (_i1642 = 0; _i1642 < _size1638; ++_i1642) + uint32_t _size1657; + ::apache::thrift::protocol::TType _etype1660; + xfer += iprot->readListBegin(_etype1660, _size1657); + this->success.resize(_size1657); + uint32_t _i1661; + for (_i1661 = 0; _i1661 < _size1657; ++_i1661) { - xfer += iprot->readString(this->success[_i1642]); + xfer += iprot->readString(this->success[_i1661]); } xfer += iprot->readListEnd(); } @@ -20222,10 +20222,10 @@ uint32_t ThriftHiveMetastore_get_partition_names_ps_result::write(::apache::thri xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_LIST, 0); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast(this->success.size())); - std::vector ::const_iterator _iter1643; - for (_iter1643 = this->success.begin(); _iter1643 != this->success.end(); ++_iter1643) + std::vector ::const_iterator _iter1662; + for (_iter1662 = this->success.begin(); _iter1662 != this->success.end(); ++_iter1662) { - xfer += oprot->writeString((*_iter1643)); + xfer += oprot->writeString((*_iter1662)); } xfer += oprot->writeListEnd(); } @@ -20274,14 +20274,14 @@ uint32_t ThriftHiveMetastore_get_partition_names_ps_presult::read(::apache::thri if (ftype == ::apache::thrift::protocol::T_LIST) { { (*(this->success)).clear(); - uint32_t _size1644; - ::apache::thrift::protocol::TType _etype1647; - xfer += iprot->readListBegin(_etype1647, _size1644); - (*(this->success)).resize(_size1644); - uint32_t _i1648; - for (_i1648 = 0; _i1648 < _size1644; ++_i1648) + uint32_t _size1663; + ::apache::thrift::protocol::TType _etype1666; + xfer += iprot->readListBegin(_etype1666, _size1663); + (*(this->success)).resize(_size1663); + uint32_t _i1667; + for (_i1667 = 0; _i1667 < _size1663; ++_i1667) { - xfer += iprot->readString((*(this->success))[_i1648]); + xfer += iprot->readString((*(this->success))[_i1667]); } xfer += iprot->readListEnd(); } @@ -20475,14 +20475,14 @@ uint32_t ThriftHiveMetastore_get_partitions_by_filter_result::read(::apache::thr if (ftype == ::apache::thrift::protocol::T_LIST) { { this->success.clear(); - uint32_t _size1649; - ::apache::thrift::protocol::TType _etype1652; - xfer += iprot->readListBegin(_etype1652, _size1649); - this->success.resize(_size1649); - uint32_t _i1653; - for (_i1653 = 0; _i1653 < _size1649; ++_i1653) + uint32_t _size1668; + ::apache::thrift::protocol::TType _etype1671; + xfer += iprot->readListBegin(_etype1671, _size1668); + this->success.resize(_size1668); + uint32_t _i1672; + for (_i1672 = 0; _i1672 < _size1668; ++_i1672) { - xfer += this->success[_i1653].read(iprot); + xfer += this->success[_i1672].read(iprot); } xfer += iprot->readListEnd(); } @@ -20529,10 +20529,10 @@ uint32_t ThriftHiveMetastore_get_partitions_by_filter_result::write(::apache::th xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_LIST, 0); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast(this->success.size())); - std::vector ::const_iterator _iter1654; - for (_iter1654 = this->success.begin(); _iter1654 != this->success.end(); ++_iter1654) + std::vector ::const_iterator _iter1673; + for (_iter1673 = this->success.begin(); _iter1673 != this->success.end(); ++_iter1673) { - xfer += (*_iter1654).write(oprot); + xfer += (*_iter1673).write(oprot); } xfer += oprot->writeListEnd(); } @@ -20581,14 +20581,14 @@ uint32_t ThriftHiveMetastore_get_partitions_by_filter_presult::read(::apache::th if (ftype == ::apache::thrift::protocol::T_LIST) { { (*(this->success)).clear(); - uint32_t _size1655; - ::apache::thrift::protocol::TType _etype1658; - xfer += iprot->readListBegin(_etype1658, _size1655); - (*(this->success)).resize(_size1655); - uint32_t _i1659; - for (_i1659 = 0; _i1659 < _size1655; ++_i1659) + uint32_t _size1674; + ::apache::thrift::protocol::TType _etype1677; + xfer += iprot->readListBegin(_etype1677, _size1674); + (*(this->success)).resize(_size1674); + uint32_t _i1678; + for (_i1678 = 0; _i1678 < _size1674; ++_i1678) { - xfer += (*(this->success))[_i1659].read(iprot); + xfer += (*(this->success))[_i1678].read(iprot); } xfer += iprot->readListEnd(); } @@ -20782,14 +20782,14 @@ uint32_t ThriftHiveMetastore_get_part_specs_by_filter_result::read(::apache::thr if (ftype == ::apache::thrift::protocol::T_LIST) { { this->success.clear(); - uint32_t _size1660; - ::apache::thrift::protocol::TType _etype1663; - xfer += iprot->readListBegin(_etype1663, _size1660); - this->success.resize(_size1660); - uint32_t _i1664; - for (_i1664 = 0; _i1664 < _size1660; ++_i1664) + uint32_t _size1679; + ::apache::thrift::protocol::TType _etype1682; + xfer += iprot->readListBegin(_etype1682, _size1679); + this->success.resize(_size1679); + uint32_t _i1683; + for (_i1683 = 0; _i1683 < _size1679; ++_i1683) { - xfer += this->success[_i1664].read(iprot); + xfer += this->success[_i1683].read(iprot); } xfer += iprot->readListEnd(); } @@ -20836,10 +20836,10 @@ uint32_t ThriftHiveMetastore_get_part_specs_by_filter_result::write(::apache::th xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_LIST, 0); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast(this->success.size())); - std::vector ::const_iterator _iter1665; - for (_iter1665 = this->success.begin(); _iter1665 != this->success.end(); ++_iter1665) + std::vector ::const_iterator _iter1684; + for (_iter1684 = this->success.begin(); _iter1684 != this->success.end(); ++_iter1684) { - xfer += (*_iter1665).write(oprot); + xfer += (*_iter1684).write(oprot); } xfer += oprot->writeListEnd(); } @@ -20888,14 +20888,14 @@ uint32_t ThriftHiveMetastore_get_part_specs_by_filter_presult::read(::apache::th if (ftype == ::apache::thrift::protocol::T_LIST) { { (*(this->success)).clear(); - uint32_t _size1666; - ::apache::thrift::protocol::TType _etype1669; - xfer += iprot->readListBegin(_etype1669, _size1666); - (*(this->success)).resize(_size1666); - uint32_t _i1670; - for (_i1670 = 0; _i1670 < _size1666; ++_i1670) + uint32_t _size1685; + ::apache::thrift::protocol::TType _etype1688; + xfer += iprot->readListBegin(_etype1688, _size1685); + (*(this->success)).resize(_size1685); + uint32_t _i1689; + for (_i1689 = 0; _i1689 < _size1685; ++_i1689) { - xfer += (*(this->success))[_i1670].read(iprot); + xfer += (*(this->success))[_i1689].read(iprot); } xfer += iprot->readListEnd(); } @@ -21464,14 +21464,14 @@ uint32_t ThriftHiveMetastore_get_partitions_by_names_args::read(::apache::thrift if (ftype == ::apache::thrift::protocol::T_LIST) { { this->names.clear(); - uint32_t _size1671; - ::apache::thrift::protocol::TType _etype1674; - xfer += iprot->readListBegin(_etype1674, _size1671); - this->names.resize(_size1671); - uint32_t _i1675; - for (_i1675 = 0; _i1675 < _size1671; ++_i1675) + uint32_t _size1690; + ::apache::thrift::protocol::TType _etype1693; + xfer += iprot->readListBegin(_etype1693, _size1690); + this->names.resize(_size1690); + uint32_t _i1694; + for (_i1694 = 0; _i1694 < _size1690; ++_i1694) { - xfer += iprot->readString(this->names[_i1675]); + xfer += iprot->readString(this->names[_i1694]); } xfer += iprot->readListEnd(); } @@ -21508,10 +21508,10 @@ uint32_t ThriftHiveMetastore_get_partitions_by_names_args::write(::apache::thrif xfer += oprot->writeFieldBegin("names", ::apache::thrift::protocol::T_LIST, 3); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast(this->names.size())); - std::vector ::const_iterator _iter1676; - for (_iter1676 = this->names.begin(); _iter1676 != this->names.end(); ++_iter1676) + std::vector ::const_iterator _iter1695; + for (_iter1695 = this->names.begin(); _iter1695 != this->names.end(); ++_iter1695) { - xfer += oprot->writeString((*_iter1676)); + xfer += oprot->writeString((*_iter1695)); } xfer += oprot->writeListEnd(); } @@ -21543,10 +21543,10 @@ uint32_t ThriftHiveMetastore_get_partitions_by_names_pargs::write(::apache::thri xfer += oprot->writeFieldBegin("names", ::apache::thrift::protocol::T_LIST, 3); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast((*(this->names)).size())); - std::vector ::const_iterator _iter1677; - for (_iter1677 = (*(this->names)).begin(); _iter1677 != (*(this->names)).end(); ++_iter1677) + std::vector ::const_iterator _iter1696; + for (_iter1696 = (*(this->names)).begin(); _iter1696 != (*(this->names)).end(); ++_iter1696) { - xfer += oprot->writeString((*_iter1677)); + xfer += oprot->writeString((*_iter1696)); } xfer += oprot->writeListEnd(); } @@ -21587,14 +21587,14 @@ uint32_t ThriftHiveMetastore_get_partitions_by_names_result::read(::apache::thri if (ftype == ::apache::thrift::protocol::T_LIST) { { this->success.clear(); - uint32_t _size1678; - ::apache::thrift::protocol::TType _etype1681; - xfer += iprot->readListBegin(_etype1681, _size1678); - this->success.resize(_size1678); - uint32_t _i1682; - for (_i1682 = 0; _i1682 < _size1678; ++_i1682) + uint32_t _size1697; + ::apache::thrift::protocol::TType _etype1700; + xfer += iprot->readListBegin(_etype1700, _size1697); + this->success.resize(_size1697); + uint32_t _i1701; + for (_i1701 = 0; _i1701 < _size1697; ++_i1701) { - xfer += this->success[_i1682].read(iprot); + xfer += this->success[_i1701].read(iprot); } xfer += iprot->readListEnd(); } @@ -21641,10 +21641,10 @@ uint32_t ThriftHiveMetastore_get_partitions_by_names_result::write(::apache::thr xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_LIST, 0); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast(this->success.size())); - std::vector ::const_iterator _iter1683; - for (_iter1683 = this->success.begin(); _iter1683 != this->success.end(); ++_iter1683) + std::vector ::const_iterator _iter1702; + for (_iter1702 = this->success.begin(); _iter1702 != this->success.end(); ++_iter1702) { - xfer += (*_iter1683).write(oprot); + xfer += (*_iter1702).write(oprot); } xfer += oprot->writeListEnd(); } @@ -21693,14 +21693,14 @@ uint32_t ThriftHiveMetastore_get_partitions_by_names_presult::read(::apache::thr if (ftype == ::apache::thrift::protocol::T_LIST) { { (*(this->success)).clear(); - uint32_t _size1684; - ::apache::thrift::protocol::TType _etype1687; - xfer += iprot->readListBegin(_etype1687, _size1684); - (*(this->success)).resize(_size1684); - uint32_t _i1688; - for (_i1688 = 0; _i1688 < _size1684; ++_i1688) + uint32_t _size1703; + ::apache::thrift::protocol::TType _etype1706; + xfer += iprot->readListBegin(_etype1706, _size1703); + (*(this->success)).resize(_size1703); + uint32_t _i1707; + for (_i1707 = 0; _i1707 < _size1703; ++_i1707) { - xfer += (*(this->success))[_i1688].read(iprot); + xfer += (*(this->success))[_i1707].read(iprot); } xfer += iprot->readListEnd(); } @@ -22022,14 +22022,14 @@ uint32_t ThriftHiveMetastore_alter_partitions_args::read(::apache::thrift::proto if (ftype == ::apache::thrift::protocol::T_LIST) { { this->new_parts.clear(); - uint32_t _size1689; - ::apache::thrift::protocol::TType _etype1692; - xfer += iprot->readListBegin(_etype1692, _size1689); - this->new_parts.resize(_size1689); - uint32_t _i1693; - for (_i1693 = 0; _i1693 < _size1689; ++_i1693) + uint32_t _size1708; + ::apache::thrift::protocol::TType _etype1711; + xfer += iprot->readListBegin(_etype1711, _size1708); + this->new_parts.resize(_size1708); + uint32_t _i1712; + for (_i1712 = 0; _i1712 < _size1708; ++_i1712) { - xfer += this->new_parts[_i1693].read(iprot); + xfer += this->new_parts[_i1712].read(iprot); } xfer += iprot->readListEnd(); } @@ -22066,10 +22066,10 @@ uint32_t ThriftHiveMetastore_alter_partitions_args::write(::apache::thrift::prot xfer += oprot->writeFieldBegin("new_parts", ::apache::thrift::protocol::T_LIST, 3); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast(this->new_parts.size())); - std::vector ::const_iterator _iter1694; - for (_iter1694 = this->new_parts.begin(); _iter1694 != this->new_parts.end(); ++_iter1694) + std::vector ::const_iterator _iter1713; + for (_iter1713 = this->new_parts.begin(); _iter1713 != this->new_parts.end(); ++_iter1713) { - xfer += (*_iter1694).write(oprot); + xfer += (*_iter1713).write(oprot); } xfer += oprot->writeListEnd(); } @@ -22101,10 +22101,10 @@ uint32_t ThriftHiveMetastore_alter_partitions_pargs::write(::apache::thrift::pro xfer += oprot->writeFieldBegin("new_parts", ::apache::thrift::protocol::T_LIST, 3); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast((*(this->new_parts)).size())); - std::vector ::const_iterator _iter1695; - for (_iter1695 = (*(this->new_parts)).begin(); _iter1695 != (*(this->new_parts)).end(); ++_iter1695) + std::vector ::const_iterator _iter1714; + for (_iter1714 = (*(this->new_parts)).begin(); _iter1714 != (*(this->new_parts)).end(); ++_iter1714) { - xfer += (*_iter1695).write(oprot); + xfer += (*_iter1714).write(oprot); } xfer += oprot->writeListEnd(); } @@ -22270,45 +22270,9 @@ uint32_t ThriftHiveMetastore_alter_partitions_with_environment_context_args::rea switch (fid) { case 1: - if (ftype == ::apache::thrift::protocol::T_STRING) { - xfer += iprot->readString(this->db_name); - this->__isset.db_name = true; - } else { - xfer += iprot->skip(ftype); - } - break; - case 2: - if (ftype == ::apache::thrift::protocol::T_STRING) { - xfer += iprot->readString(this->tbl_name); - this->__isset.tbl_name = true; - } else { - xfer += iprot->skip(ftype); - } - break; - case 3: - if (ftype == ::apache::thrift::protocol::T_LIST) { - { - this->new_parts.clear(); - uint32_t _size1696; - ::apache::thrift::protocol::TType _etype1699; - xfer += iprot->readListBegin(_etype1699, _size1696); - this->new_parts.resize(_size1696); - uint32_t _i1700; - for (_i1700 = 0; _i1700 < _size1696; ++_i1700) - { - xfer += this->new_parts[_i1700].read(iprot); - } - xfer += iprot->readListEnd(); - } - this->__isset.new_parts = true; - } else { - xfer += iprot->skip(ftype); - } - break; - case 4: if (ftype == ::apache::thrift::protocol::T_STRUCT) { - xfer += this->environment_context.read(iprot); - this->__isset.environment_context = true; + xfer += this->req.read(iprot); + this->__isset.req = true; } else { xfer += iprot->skip(ftype); } @@ -22330,28 +22294,8 @@ uint32_t ThriftHiveMetastore_alter_partitions_with_environment_context_args::wri apache::thrift::protocol::TOutputRecursionTracker tracker(*oprot); xfer += oprot->writeStructBegin("ThriftHiveMetastore_alter_partitions_with_environment_context_args"); - xfer += oprot->writeFieldBegin("db_name", ::apache::thrift::protocol::T_STRING, 1); - xfer += oprot->writeString(this->db_name); - xfer += oprot->writeFieldEnd(); - - xfer += oprot->writeFieldBegin("tbl_name", ::apache::thrift::protocol::T_STRING, 2); - xfer += oprot->writeString(this->tbl_name); - xfer += oprot->writeFieldEnd(); - - xfer += oprot->writeFieldBegin("new_parts", ::apache::thrift::protocol::T_LIST, 3); - { - xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast(this->new_parts.size())); - std::vector ::const_iterator _iter1701; - for (_iter1701 = this->new_parts.begin(); _iter1701 != this->new_parts.end(); ++_iter1701) - { - xfer += (*_iter1701).write(oprot); - } - xfer += oprot->writeListEnd(); - } - xfer += oprot->writeFieldEnd(); - - xfer += oprot->writeFieldBegin("environment_context", ::apache::thrift::protocol::T_STRUCT, 4); - xfer += this->environment_context.write(oprot); + xfer += oprot->writeFieldBegin("req", ::apache::thrift::protocol::T_STRUCT, 1); + xfer += this->req.write(oprot); xfer += oprot->writeFieldEnd(); xfer += oprot->writeFieldStop(); @@ -22369,28 +22313,8 @@ uint32_t ThriftHiveMetastore_alter_partitions_with_environment_context_pargs::wr apache::thrift::protocol::TOutputRecursionTracker tracker(*oprot); xfer += oprot->writeStructBegin("ThriftHiveMetastore_alter_partitions_with_environment_context_pargs"); - xfer += oprot->writeFieldBegin("db_name", ::apache::thrift::protocol::T_STRING, 1); - xfer += oprot->writeString((*(this->db_name))); - xfer += oprot->writeFieldEnd(); - - xfer += oprot->writeFieldBegin("tbl_name", ::apache::thrift::protocol::T_STRING, 2); - xfer += oprot->writeString((*(this->tbl_name))); - xfer += oprot->writeFieldEnd(); - - xfer += oprot->writeFieldBegin("new_parts", ::apache::thrift::protocol::T_LIST, 3); - { - xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast((*(this->new_parts)).size())); - std::vector ::const_iterator _iter1702; - for (_iter1702 = (*(this->new_parts)).begin(); _iter1702 != (*(this->new_parts)).end(); ++_iter1702) - { - xfer += (*_iter1702).write(oprot); - } - xfer += oprot->writeListEnd(); - } - xfer += oprot->writeFieldEnd(); - - xfer += oprot->writeFieldBegin("environment_context", ::apache::thrift::protocol::T_STRUCT, 4); - xfer += (*(this->environment_context)).write(oprot); + xfer += oprot->writeFieldBegin("req", ::apache::thrift::protocol::T_STRUCT, 1); + xfer += (*(this->req)).write(oprot); xfer += oprot->writeFieldEnd(); xfer += oprot->writeFieldStop(); @@ -22424,6 +22348,14 @@ uint32_t ThriftHiveMetastore_alter_partitions_with_environment_context_result::r } switch (fid) { + case 0: + if (ftype == ::apache::thrift::protocol::T_STRUCT) { + xfer += this->success.read(iprot); + this->__isset.success = true; + } else { + xfer += iprot->skip(ftype); + } + break; case 1: if (ftype == ::apache::thrift::protocol::T_STRUCT) { xfer += this->o1.read(iprot); @@ -22458,7 +22390,11 @@ uint32_t ThriftHiveMetastore_alter_partitions_with_environment_context_result::w xfer += oprot->writeStructBegin("ThriftHiveMetastore_alter_partitions_with_environment_context_result"); - if (this->__isset.o1) { + if (this->__isset.success) { + xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_STRUCT, 0); + xfer += this->success.write(oprot); + xfer += oprot->writeFieldEnd(); + } else if (this->__isset.o1) { xfer += oprot->writeFieldBegin("o1", ::apache::thrift::protocol::T_STRUCT, 1); xfer += this->o1.write(oprot); xfer += oprot->writeFieldEnd(); @@ -22498,6 +22434,14 @@ uint32_t ThriftHiveMetastore_alter_partitions_with_environment_context_presult:: } switch (fid) { + case 0: + if (ftype == ::apache::thrift::protocol::T_STRUCT) { + xfer += (*(this->success)).read(iprot); + this->__isset.success = true; + } else { + xfer += iprot->skip(ftype); + } + break; case 1: if (ftype == ::apache::thrift::protocol::T_STRUCT) { xfer += this->o1.read(iprot); @@ -22827,14 +22771,14 @@ uint32_t ThriftHiveMetastore_rename_partition_args::read(::apache::thrift::proto if (ftype == ::apache::thrift::protocol::T_LIST) { { this->part_vals.clear(); - uint32_t _size1703; - ::apache::thrift::protocol::TType _etype1706; - xfer += iprot->readListBegin(_etype1706, _size1703); - this->part_vals.resize(_size1703); - uint32_t _i1707; - for (_i1707 = 0; _i1707 < _size1703; ++_i1707) + uint32_t _size1715; + ::apache::thrift::protocol::TType _etype1718; + xfer += iprot->readListBegin(_etype1718, _size1715); + this->part_vals.resize(_size1715); + uint32_t _i1719; + for (_i1719 = 0; _i1719 < _size1715; ++_i1719) { - xfer += iprot->readString(this->part_vals[_i1707]); + xfer += iprot->readString(this->part_vals[_i1719]); } xfer += iprot->readListEnd(); } @@ -22879,10 +22823,10 @@ uint32_t ThriftHiveMetastore_rename_partition_args::write(::apache::thrift::prot xfer += oprot->writeFieldBegin("part_vals", ::apache::thrift::protocol::T_LIST, 3); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast(this->part_vals.size())); - std::vector ::const_iterator _iter1708; - for (_iter1708 = this->part_vals.begin(); _iter1708 != this->part_vals.end(); ++_iter1708) + std::vector ::const_iterator _iter1720; + for (_iter1720 = this->part_vals.begin(); _iter1720 != this->part_vals.end(); ++_iter1720) { - xfer += oprot->writeString((*_iter1708)); + xfer += oprot->writeString((*_iter1720)); } xfer += oprot->writeListEnd(); } @@ -22918,10 +22862,10 @@ uint32_t ThriftHiveMetastore_rename_partition_pargs::write(::apache::thrift::pro xfer += oprot->writeFieldBegin("part_vals", ::apache::thrift::protocol::T_LIST, 3); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast((*(this->part_vals)).size())); - std::vector ::const_iterator _iter1709; - for (_iter1709 = (*(this->part_vals)).begin(); _iter1709 != (*(this->part_vals)).end(); ++_iter1709) + std::vector ::const_iterator _iter1721; + for (_iter1721 = (*(this->part_vals)).begin(); _iter1721 != (*(this->part_vals)).end(); ++_iter1721) { - xfer += oprot->writeString((*_iter1709)); + xfer += oprot->writeString((*_iter1721)); } xfer += oprot->writeListEnd(); } @@ -23094,14 +23038,14 @@ uint32_t ThriftHiveMetastore_partition_name_has_valid_characters_args::read(::ap if (ftype == ::apache::thrift::protocol::T_LIST) { { this->part_vals.clear(); - uint32_t _size1710; - ::apache::thrift::protocol::TType _etype1713; - xfer += iprot->readListBegin(_etype1713, _size1710); - this->part_vals.resize(_size1710); - uint32_t _i1714; - for (_i1714 = 0; _i1714 < _size1710; ++_i1714) + uint32_t _size1722; + ::apache::thrift::protocol::TType _etype1725; + xfer += iprot->readListBegin(_etype1725, _size1722); + this->part_vals.resize(_size1722); + uint32_t _i1726; + for (_i1726 = 0; _i1726 < _size1722; ++_i1726) { - xfer += iprot->readString(this->part_vals[_i1714]); + xfer += iprot->readString(this->part_vals[_i1726]); } xfer += iprot->readListEnd(); } @@ -23138,10 +23082,10 @@ uint32_t ThriftHiveMetastore_partition_name_has_valid_characters_args::write(::a xfer += oprot->writeFieldBegin("part_vals", ::apache::thrift::protocol::T_LIST, 1); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast(this->part_vals.size())); - std::vector ::const_iterator _iter1715; - for (_iter1715 = this->part_vals.begin(); _iter1715 != this->part_vals.end(); ++_iter1715) + std::vector ::const_iterator _iter1727; + for (_iter1727 = this->part_vals.begin(); _iter1727 != this->part_vals.end(); ++_iter1727) { - xfer += oprot->writeString((*_iter1715)); + xfer += oprot->writeString((*_iter1727)); } xfer += oprot->writeListEnd(); } @@ -23169,10 +23113,10 @@ uint32_t ThriftHiveMetastore_partition_name_has_valid_characters_pargs::write(:: xfer += oprot->writeFieldBegin("part_vals", ::apache::thrift::protocol::T_LIST, 1); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast((*(this->part_vals)).size())); - std::vector ::const_iterator _iter1716; - for (_iter1716 = (*(this->part_vals)).begin(); _iter1716 != (*(this->part_vals)).end(); ++_iter1716) + std::vector ::const_iterator _iter1728; + for (_iter1728 = (*(this->part_vals)).begin(); _iter1728 != (*(this->part_vals)).end(); ++_iter1728) { - xfer += oprot->writeString((*_iter1716)); + xfer += oprot->writeString((*_iter1728)); } xfer += oprot->writeListEnd(); } @@ -23647,14 +23591,14 @@ uint32_t ThriftHiveMetastore_partition_name_to_vals_result::read(::apache::thrif if (ftype == ::apache::thrift::protocol::T_LIST) { { this->success.clear(); - uint32_t _size1717; - ::apache::thrift::protocol::TType _etype1720; - xfer += iprot->readListBegin(_etype1720, _size1717); - this->success.resize(_size1717); - uint32_t _i1721; - for (_i1721 = 0; _i1721 < _size1717; ++_i1721) + uint32_t _size1729; + ::apache::thrift::protocol::TType _etype1732; + xfer += iprot->readListBegin(_etype1732, _size1729); + this->success.resize(_size1729); + uint32_t _i1733; + for (_i1733 = 0; _i1733 < _size1729; ++_i1733) { - xfer += iprot->readString(this->success[_i1721]); + xfer += iprot->readString(this->success[_i1733]); } xfer += iprot->readListEnd(); } @@ -23693,10 +23637,10 @@ uint32_t ThriftHiveMetastore_partition_name_to_vals_result::write(::apache::thri xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_LIST, 0); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast(this->success.size())); - std::vector ::const_iterator _iter1722; - for (_iter1722 = this->success.begin(); _iter1722 != this->success.end(); ++_iter1722) + std::vector ::const_iterator _iter1734; + for (_iter1734 = this->success.begin(); _iter1734 != this->success.end(); ++_iter1734) { - xfer += oprot->writeString((*_iter1722)); + xfer += oprot->writeString((*_iter1734)); } xfer += oprot->writeListEnd(); } @@ -23741,14 +23685,14 @@ uint32_t ThriftHiveMetastore_partition_name_to_vals_presult::read(::apache::thri if (ftype == ::apache::thrift::protocol::T_LIST) { { (*(this->success)).clear(); - uint32_t _size1723; - ::apache::thrift::protocol::TType _etype1726; - xfer += iprot->readListBegin(_etype1726, _size1723); - (*(this->success)).resize(_size1723); - uint32_t _i1727; - for (_i1727 = 0; _i1727 < _size1723; ++_i1727) + uint32_t _size1735; + ::apache::thrift::protocol::TType _etype1738; + xfer += iprot->readListBegin(_etype1738, _size1735); + (*(this->success)).resize(_size1735); + uint32_t _i1739; + for (_i1739 = 0; _i1739 < _size1735; ++_i1739) { - xfer += iprot->readString((*(this->success))[_i1727]); + xfer += iprot->readString((*(this->success))[_i1739]); } xfer += iprot->readListEnd(); } @@ -23886,17 +23830,17 @@ uint32_t ThriftHiveMetastore_partition_name_to_spec_result::read(::apache::thrif if (ftype == ::apache::thrift::protocol::T_MAP) { { this->success.clear(); - uint32_t _size1728; - ::apache::thrift::protocol::TType _ktype1729; - ::apache::thrift::protocol::TType _vtype1730; - xfer += iprot->readMapBegin(_ktype1729, _vtype1730, _size1728); - uint32_t _i1732; - for (_i1732 = 0; _i1732 < _size1728; ++_i1732) + uint32_t _size1740; + ::apache::thrift::protocol::TType _ktype1741; + ::apache::thrift::protocol::TType _vtype1742; + xfer += iprot->readMapBegin(_ktype1741, _vtype1742, _size1740); + uint32_t _i1744; + for (_i1744 = 0; _i1744 < _size1740; ++_i1744) { - std::string _key1733; - xfer += iprot->readString(_key1733); - std::string& _val1734 = this->success[_key1733]; - xfer += iprot->readString(_val1734); + std::string _key1745; + xfer += iprot->readString(_key1745); + std::string& _val1746 = this->success[_key1745]; + xfer += iprot->readString(_val1746); } xfer += iprot->readMapEnd(); } @@ -23935,11 +23879,11 @@ uint32_t ThriftHiveMetastore_partition_name_to_spec_result::write(::apache::thri xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_MAP, 0); { xfer += oprot->writeMapBegin(::apache::thrift::protocol::T_STRING, ::apache::thrift::protocol::T_STRING, static_cast(this->success.size())); - std::map ::const_iterator _iter1735; - for (_iter1735 = this->success.begin(); _iter1735 != this->success.end(); ++_iter1735) + std::map ::const_iterator _iter1747; + for (_iter1747 = this->success.begin(); _iter1747 != this->success.end(); ++_iter1747) { - xfer += oprot->writeString(_iter1735->first); - xfer += oprot->writeString(_iter1735->second); + xfer += oprot->writeString(_iter1747->first); + xfer += oprot->writeString(_iter1747->second); } xfer += oprot->writeMapEnd(); } @@ -23984,17 +23928,17 @@ uint32_t ThriftHiveMetastore_partition_name_to_spec_presult::read(::apache::thri if (ftype == ::apache::thrift::protocol::T_MAP) { { (*(this->success)).clear(); - uint32_t _size1736; - ::apache::thrift::protocol::TType _ktype1737; - ::apache::thrift::protocol::TType _vtype1738; - xfer += iprot->readMapBegin(_ktype1737, _vtype1738, _size1736); - uint32_t _i1740; - for (_i1740 = 0; _i1740 < _size1736; ++_i1740) + uint32_t _size1748; + ::apache::thrift::protocol::TType _ktype1749; + ::apache::thrift::protocol::TType _vtype1750; + xfer += iprot->readMapBegin(_ktype1749, _vtype1750, _size1748); + uint32_t _i1752; + for (_i1752 = 0; _i1752 < _size1748; ++_i1752) { - std::string _key1741; - xfer += iprot->readString(_key1741); - std::string& _val1742 = (*(this->success))[_key1741]; - xfer += iprot->readString(_val1742); + std::string _key1753; + xfer += iprot->readString(_key1753); + std::string& _val1754 = (*(this->success))[_key1753]; + xfer += iprot->readString(_val1754); } xfer += iprot->readMapEnd(); } @@ -24069,17 +24013,17 @@ uint32_t ThriftHiveMetastore_markPartitionForEvent_args::read(::apache::thrift:: if (ftype == ::apache::thrift::protocol::T_MAP) { { this->part_vals.clear(); - uint32_t _size1743; - ::apache::thrift::protocol::TType _ktype1744; - ::apache::thrift::protocol::TType _vtype1745; - xfer += iprot->readMapBegin(_ktype1744, _vtype1745, _size1743); - uint32_t _i1747; - for (_i1747 = 0; _i1747 < _size1743; ++_i1747) + uint32_t _size1755; + ::apache::thrift::protocol::TType _ktype1756; + ::apache::thrift::protocol::TType _vtype1757; + xfer += iprot->readMapBegin(_ktype1756, _vtype1757, _size1755); + uint32_t _i1759; + for (_i1759 = 0; _i1759 < _size1755; ++_i1759) { - std::string _key1748; - xfer += iprot->readString(_key1748); - std::string& _val1749 = this->part_vals[_key1748]; - xfer += iprot->readString(_val1749); + std::string _key1760; + xfer += iprot->readString(_key1760); + std::string& _val1761 = this->part_vals[_key1760]; + xfer += iprot->readString(_val1761); } xfer += iprot->readMapEnd(); } @@ -24090,9 +24034,9 @@ uint32_t ThriftHiveMetastore_markPartitionForEvent_args::read(::apache::thrift:: break; case 4: if (ftype == ::apache::thrift::protocol::T_I32) { - int32_t ecast1750; - xfer += iprot->readI32(ecast1750); - this->eventType = (PartitionEventType::type)ecast1750; + int32_t ecast1762; + xfer += iprot->readI32(ecast1762); + this->eventType = (PartitionEventType::type)ecast1762; this->__isset.eventType = true; } else { xfer += iprot->skip(ftype); @@ -24126,11 +24070,11 @@ uint32_t ThriftHiveMetastore_markPartitionForEvent_args::write(::apache::thrift: xfer += oprot->writeFieldBegin("part_vals", ::apache::thrift::protocol::T_MAP, 3); { xfer += oprot->writeMapBegin(::apache::thrift::protocol::T_STRING, ::apache::thrift::protocol::T_STRING, static_cast(this->part_vals.size())); - std::map ::const_iterator _iter1751; - for (_iter1751 = this->part_vals.begin(); _iter1751 != this->part_vals.end(); ++_iter1751) + std::map ::const_iterator _iter1763; + for (_iter1763 = this->part_vals.begin(); _iter1763 != this->part_vals.end(); ++_iter1763) { - xfer += oprot->writeString(_iter1751->first); - xfer += oprot->writeString(_iter1751->second); + xfer += oprot->writeString(_iter1763->first); + xfer += oprot->writeString(_iter1763->second); } xfer += oprot->writeMapEnd(); } @@ -24166,11 +24110,11 @@ uint32_t ThriftHiveMetastore_markPartitionForEvent_pargs::write(::apache::thrift xfer += oprot->writeFieldBegin("part_vals", ::apache::thrift::protocol::T_MAP, 3); { xfer += oprot->writeMapBegin(::apache::thrift::protocol::T_STRING, ::apache::thrift::protocol::T_STRING, static_cast((*(this->part_vals)).size())); - std::map ::const_iterator _iter1752; - for (_iter1752 = (*(this->part_vals)).begin(); _iter1752 != (*(this->part_vals)).end(); ++_iter1752) + std::map ::const_iterator _iter1764; + for (_iter1764 = (*(this->part_vals)).begin(); _iter1764 != (*(this->part_vals)).end(); ++_iter1764) { - xfer += oprot->writeString(_iter1752->first); - xfer += oprot->writeString(_iter1752->second); + xfer += oprot->writeString(_iter1764->first); + xfer += oprot->writeString(_iter1764->second); } xfer += oprot->writeMapEnd(); } @@ -24439,17 +24383,17 @@ uint32_t ThriftHiveMetastore_isPartitionMarkedForEvent_args::read(::apache::thri if (ftype == ::apache::thrift::protocol::T_MAP) { { this->part_vals.clear(); - uint32_t _size1753; - ::apache::thrift::protocol::TType _ktype1754; - ::apache::thrift::protocol::TType _vtype1755; - xfer += iprot->readMapBegin(_ktype1754, _vtype1755, _size1753); - uint32_t _i1757; - for (_i1757 = 0; _i1757 < _size1753; ++_i1757) + uint32_t _size1765; + ::apache::thrift::protocol::TType _ktype1766; + ::apache::thrift::protocol::TType _vtype1767; + xfer += iprot->readMapBegin(_ktype1766, _vtype1767, _size1765); + uint32_t _i1769; + for (_i1769 = 0; _i1769 < _size1765; ++_i1769) { - std::string _key1758; - xfer += iprot->readString(_key1758); - std::string& _val1759 = this->part_vals[_key1758]; - xfer += iprot->readString(_val1759); + std::string _key1770; + xfer += iprot->readString(_key1770); + std::string& _val1771 = this->part_vals[_key1770]; + xfer += iprot->readString(_val1771); } xfer += iprot->readMapEnd(); } @@ -24460,9 +24404,9 @@ uint32_t ThriftHiveMetastore_isPartitionMarkedForEvent_args::read(::apache::thri break; case 4: if (ftype == ::apache::thrift::protocol::T_I32) { - int32_t ecast1760; - xfer += iprot->readI32(ecast1760); - this->eventType = (PartitionEventType::type)ecast1760; + int32_t ecast1772; + xfer += iprot->readI32(ecast1772); + this->eventType = (PartitionEventType::type)ecast1772; this->__isset.eventType = true; } else { xfer += iprot->skip(ftype); @@ -24496,11 +24440,11 @@ uint32_t ThriftHiveMetastore_isPartitionMarkedForEvent_args::write(::apache::thr xfer += oprot->writeFieldBegin("part_vals", ::apache::thrift::protocol::T_MAP, 3); { xfer += oprot->writeMapBegin(::apache::thrift::protocol::T_STRING, ::apache::thrift::protocol::T_STRING, static_cast(this->part_vals.size())); - std::map ::const_iterator _iter1761; - for (_iter1761 = this->part_vals.begin(); _iter1761 != this->part_vals.end(); ++_iter1761) + std::map ::const_iterator _iter1773; + for (_iter1773 = this->part_vals.begin(); _iter1773 != this->part_vals.end(); ++_iter1773) { - xfer += oprot->writeString(_iter1761->first); - xfer += oprot->writeString(_iter1761->second); + xfer += oprot->writeString(_iter1773->first); + xfer += oprot->writeString(_iter1773->second); } xfer += oprot->writeMapEnd(); } @@ -24536,11 +24480,11 @@ uint32_t ThriftHiveMetastore_isPartitionMarkedForEvent_pargs::write(::apache::th xfer += oprot->writeFieldBegin("part_vals", ::apache::thrift::protocol::T_MAP, 3); { xfer += oprot->writeMapBegin(::apache::thrift::protocol::T_STRING, ::apache::thrift::protocol::T_STRING, static_cast((*(this->part_vals)).size())); - std::map ::const_iterator _iter1762; - for (_iter1762 = (*(this->part_vals)).begin(); _iter1762 != (*(this->part_vals)).end(); ++_iter1762) + std::map ::const_iterator _iter1774; + for (_iter1774 = (*(this->part_vals)).begin(); _iter1774 != (*(this->part_vals)).end(); ++_iter1774) { - xfer += oprot->writeString(_iter1762->first); - xfer += oprot->writeString(_iter1762->second); + xfer += oprot->writeString(_iter1774->first); + xfer += oprot->writeString(_iter1774->second); } xfer += oprot->writeMapEnd(); } @@ -29689,14 +29633,14 @@ uint32_t ThriftHiveMetastore_get_functions_result::read(::apache::thrift::protoc if (ftype == ::apache::thrift::protocol::T_LIST) { { this->success.clear(); - uint32_t _size1763; - ::apache::thrift::protocol::TType _etype1766; - xfer += iprot->readListBegin(_etype1766, _size1763); - this->success.resize(_size1763); - uint32_t _i1767; - for (_i1767 = 0; _i1767 < _size1763; ++_i1767) + uint32_t _size1775; + ::apache::thrift::protocol::TType _etype1778; + xfer += iprot->readListBegin(_etype1778, _size1775); + this->success.resize(_size1775); + uint32_t _i1779; + for (_i1779 = 0; _i1779 < _size1775; ++_i1779) { - xfer += iprot->readString(this->success[_i1767]); + xfer += iprot->readString(this->success[_i1779]); } xfer += iprot->readListEnd(); } @@ -29735,10 +29679,10 @@ uint32_t ThriftHiveMetastore_get_functions_result::write(::apache::thrift::proto xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_LIST, 0); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast(this->success.size())); - std::vector ::const_iterator _iter1768; - for (_iter1768 = this->success.begin(); _iter1768 != this->success.end(); ++_iter1768) + std::vector ::const_iterator _iter1780; + for (_iter1780 = this->success.begin(); _iter1780 != this->success.end(); ++_iter1780) { - xfer += oprot->writeString((*_iter1768)); + xfer += oprot->writeString((*_iter1780)); } xfer += oprot->writeListEnd(); } @@ -29783,14 +29727,14 @@ uint32_t ThriftHiveMetastore_get_functions_presult::read(::apache::thrift::proto if (ftype == ::apache::thrift::protocol::T_LIST) { { (*(this->success)).clear(); - uint32_t _size1769; - ::apache::thrift::protocol::TType _etype1772; - xfer += iprot->readListBegin(_etype1772, _size1769); - (*(this->success)).resize(_size1769); - uint32_t _i1773; - for (_i1773 = 0; _i1773 < _size1769; ++_i1773) + uint32_t _size1781; + ::apache::thrift::protocol::TType _etype1784; + xfer += iprot->readListBegin(_etype1784, _size1781); + (*(this->success)).resize(_size1781); + uint32_t _i1785; + for (_i1785 = 0; _i1785 < _size1781; ++_i1785) { - xfer += iprot->readString((*(this->success))[_i1773]); + xfer += iprot->readString((*(this->success))[_i1785]); } xfer += iprot->readListEnd(); } @@ -30750,14 +30694,14 @@ uint32_t ThriftHiveMetastore_get_role_names_result::read(::apache::thrift::proto if (ftype == ::apache::thrift::protocol::T_LIST) { { this->success.clear(); - uint32_t _size1774; - ::apache::thrift::protocol::TType _etype1777; - xfer += iprot->readListBegin(_etype1777, _size1774); - this->success.resize(_size1774); - uint32_t _i1778; - for (_i1778 = 0; _i1778 < _size1774; ++_i1778) + uint32_t _size1786; + ::apache::thrift::protocol::TType _etype1789; + xfer += iprot->readListBegin(_etype1789, _size1786); + this->success.resize(_size1786); + uint32_t _i1790; + for (_i1790 = 0; _i1790 < _size1786; ++_i1790) { - xfer += iprot->readString(this->success[_i1778]); + xfer += iprot->readString(this->success[_i1790]); } xfer += iprot->readListEnd(); } @@ -30796,10 +30740,10 @@ uint32_t ThriftHiveMetastore_get_role_names_result::write(::apache::thrift::prot xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_LIST, 0); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast(this->success.size())); - std::vector ::const_iterator _iter1779; - for (_iter1779 = this->success.begin(); _iter1779 != this->success.end(); ++_iter1779) + std::vector ::const_iterator _iter1791; + for (_iter1791 = this->success.begin(); _iter1791 != this->success.end(); ++_iter1791) { - xfer += oprot->writeString((*_iter1779)); + xfer += oprot->writeString((*_iter1791)); } xfer += oprot->writeListEnd(); } @@ -30844,14 +30788,14 @@ uint32_t ThriftHiveMetastore_get_role_names_presult::read(::apache::thrift::prot if (ftype == ::apache::thrift::protocol::T_LIST) { { (*(this->success)).clear(); - uint32_t _size1780; - ::apache::thrift::protocol::TType _etype1783; - xfer += iprot->readListBegin(_etype1783, _size1780); - (*(this->success)).resize(_size1780); - uint32_t _i1784; - for (_i1784 = 0; _i1784 < _size1780; ++_i1784) + uint32_t _size1792; + ::apache::thrift::protocol::TType _etype1795; + xfer += iprot->readListBegin(_etype1795, _size1792); + (*(this->success)).resize(_size1792); + uint32_t _i1796; + for (_i1796 = 0; _i1796 < _size1792; ++_i1796) { - xfer += iprot->readString((*(this->success))[_i1784]); + xfer += iprot->readString((*(this->success))[_i1796]); } xfer += iprot->readListEnd(); } @@ -30924,9 +30868,9 @@ uint32_t ThriftHiveMetastore_grant_role_args::read(::apache::thrift::protocol::T break; case 3: if (ftype == ::apache::thrift::protocol::T_I32) { - int32_t ecast1785; - xfer += iprot->readI32(ecast1785); - this->principal_type = (PrincipalType::type)ecast1785; + int32_t ecast1797; + xfer += iprot->readI32(ecast1797); + this->principal_type = (PrincipalType::type)ecast1797; this->__isset.principal_type = true; } else { xfer += iprot->skip(ftype); @@ -30942,9 +30886,9 @@ uint32_t ThriftHiveMetastore_grant_role_args::read(::apache::thrift::protocol::T break; case 5: if (ftype == ::apache::thrift::protocol::T_I32) { - int32_t ecast1786; - xfer += iprot->readI32(ecast1786); - this->grantorType = (PrincipalType::type)ecast1786; + int32_t ecast1798; + xfer += iprot->readI32(ecast1798); + this->grantorType = (PrincipalType::type)ecast1798; this->__isset.grantorType = true; } else { xfer += iprot->skip(ftype); @@ -31215,9 +31159,9 @@ uint32_t ThriftHiveMetastore_revoke_role_args::read(::apache::thrift::protocol:: break; case 3: if (ftype == ::apache::thrift::protocol::T_I32) { - int32_t ecast1787; - xfer += iprot->readI32(ecast1787); - this->principal_type = (PrincipalType::type)ecast1787; + int32_t ecast1799; + xfer += iprot->readI32(ecast1799); + this->principal_type = (PrincipalType::type)ecast1799; this->__isset.principal_type = true; } else { xfer += iprot->skip(ftype); @@ -31448,9 +31392,9 @@ uint32_t ThriftHiveMetastore_list_roles_args::read(::apache::thrift::protocol::T break; case 2: if (ftype == ::apache::thrift::protocol::T_I32) { - int32_t ecast1788; - xfer += iprot->readI32(ecast1788); - this->principal_type = (PrincipalType::type)ecast1788; + int32_t ecast1800; + xfer += iprot->readI32(ecast1800); + this->principal_type = (PrincipalType::type)ecast1800; this->__isset.principal_type = true; } else { xfer += iprot->skip(ftype); @@ -31539,14 +31483,14 @@ uint32_t ThriftHiveMetastore_list_roles_result::read(::apache::thrift::protocol: if (ftype == ::apache::thrift::protocol::T_LIST) { { this->success.clear(); - uint32_t _size1789; - ::apache::thrift::protocol::TType _etype1792; - xfer += iprot->readListBegin(_etype1792, _size1789); - this->success.resize(_size1789); - uint32_t _i1793; - for (_i1793 = 0; _i1793 < _size1789; ++_i1793) + uint32_t _size1801; + ::apache::thrift::protocol::TType _etype1804; + xfer += iprot->readListBegin(_etype1804, _size1801); + this->success.resize(_size1801); + uint32_t _i1805; + for (_i1805 = 0; _i1805 < _size1801; ++_i1805) { - xfer += this->success[_i1793].read(iprot); + xfer += this->success[_i1805].read(iprot); } xfer += iprot->readListEnd(); } @@ -31585,10 +31529,10 @@ uint32_t ThriftHiveMetastore_list_roles_result::write(::apache::thrift::protocol xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_LIST, 0); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast(this->success.size())); - std::vector ::const_iterator _iter1794; - for (_iter1794 = this->success.begin(); _iter1794 != this->success.end(); ++_iter1794) + std::vector ::const_iterator _iter1806; + for (_iter1806 = this->success.begin(); _iter1806 != this->success.end(); ++_iter1806) { - xfer += (*_iter1794).write(oprot); + xfer += (*_iter1806).write(oprot); } xfer += oprot->writeListEnd(); } @@ -31633,14 +31577,14 @@ uint32_t ThriftHiveMetastore_list_roles_presult::read(::apache::thrift::protocol if (ftype == ::apache::thrift::protocol::T_LIST) { { (*(this->success)).clear(); - uint32_t _size1795; - ::apache::thrift::protocol::TType _etype1798; - xfer += iprot->readListBegin(_etype1798, _size1795); - (*(this->success)).resize(_size1795); - uint32_t _i1799; - for (_i1799 = 0; _i1799 < _size1795; ++_i1799) + uint32_t _size1807; + ::apache::thrift::protocol::TType _etype1810; + xfer += iprot->readListBegin(_etype1810, _size1807); + (*(this->success)).resize(_size1807); + uint32_t _i1811; + for (_i1811 = 0; _i1811 < _size1807; ++_i1811) { - xfer += (*(this->success))[_i1799].read(iprot); + xfer += (*(this->success))[_i1811].read(iprot); } xfer += iprot->readListEnd(); } @@ -32336,14 +32280,14 @@ uint32_t ThriftHiveMetastore_get_privilege_set_args::read(::apache::thrift::prot if (ftype == ::apache::thrift::protocol::T_LIST) { { this->group_names.clear(); - uint32_t _size1800; - ::apache::thrift::protocol::TType _etype1803; - xfer += iprot->readListBegin(_etype1803, _size1800); - this->group_names.resize(_size1800); - uint32_t _i1804; - for (_i1804 = 0; _i1804 < _size1800; ++_i1804) + uint32_t _size1812; + ::apache::thrift::protocol::TType _etype1815; + xfer += iprot->readListBegin(_etype1815, _size1812); + this->group_names.resize(_size1812); + uint32_t _i1816; + for (_i1816 = 0; _i1816 < _size1812; ++_i1816) { - xfer += iprot->readString(this->group_names[_i1804]); + xfer += iprot->readString(this->group_names[_i1816]); } xfer += iprot->readListEnd(); } @@ -32380,10 +32324,10 @@ uint32_t ThriftHiveMetastore_get_privilege_set_args::write(::apache::thrift::pro xfer += oprot->writeFieldBegin("group_names", ::apache::thrift::protocol::T_LIST, 3); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast(this->group_names.size())); - std::vector ::const_iterator _iter1805; - for (_iter1805 = this->group_names.begin(); _iter1805 != this->group_names.end(); ++_iter1805) + std::vector ::const_iterator _iter1817; + for (_iter1817 = this->group_names.begin(); _iter1817 != this->group_names.end(); ++_iter1817) { - xfer += oprot->writeString((*_iter1805)); + xfer += oprot->writeString((*_iter1817)); } xfer += oprot->writeListEnd(); } @@ -32415,10 +32359,10 @@ uint32_t ThriftHiveMetastore_get_privilege_set_pargs::write(::apache::thrift::pr xfer += oprot->writeFieldBegin("group_names", ::apache::thrift::protocol::T_LIST, 3); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast((*(this->group_names)).size())); - std::vector ::const_iterator _iter1806; - for (_iter1806 = (*(this->group_names)).begin(); _iter1806 != (*(this->group_names)).end(); ++_iter1806) + std::vector ::const_iterator _iter1818; + for (_iter1818 = (*(this->group_names)).begin(); _iter1818 != (*(this->group_names)).end(); ++_iter1818) { - xfer += oprot->writeString((*_iter1806)); + xfer += oprot->writeString((*_iter1818)); } xfer += oprot->writeListEnd(); } @@ -32593,9 +32537,9 @@ uint32_t ThriftHiveMetastore_list_privileges_args::read(::apache::thrift::protoc break; case 2: if (ftype == ::apache::thrift::protocol::T_I32) { - int32_t ecast1807; - xfer += iprot->readI32(ecast1807); - this->principal_type = (PrincipalType::type)ecast1807; + int32_t ecast1819; + xfer += iprot->readI32(ecast1819); + this->principal_type = (PrincipalType::type)ecast1819; this->__isset.principal_type = true; } else { xfer += iprot->skip(ftype); @@ -32700,14 +32644,14 @@ uint32_t ThriftHiveMetastore_list_privileges_result::read(::apache::thrift::prot if (ftype == ::apache::thrift::protocol::T_LIST) { { this->success.clear(); - uint32_t _size1808; - ::apache::thrift::protocol::TType _etype1811; - xfer += iprot->readListBegin(_etype1811, _size1808); - this->success.resize(_size1808); - uint32_t _i1812; - for (_i1812 = 0; _i1812 < _size1808; ++_i1812) + uint32_t _size1820; + ::apache::thrift::protocol::TType _etype1823; + xfer += iprot->readListBegin(_etype1823, _size1820); + this->success.resize(_size1820); + uint32_t _i1824; + for (_i1824 = 0; _i1824 < _size1820; ++_i1824) { - xfer += this->success[_i1812].read(iprot); + xfer += this->success[_i1824].read(iprot); } xfer += iprot->readListEnd(); } @@ -32746,10 +32690,10 @@ uint32_t ThriftHiveMetastore_list_privileges_result::write(::apache::thrift::pro xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_LIST, 0); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast(this->success.size())); - std::vector ::const_iterator _iter1813; - for (_iter1813 = this->success.begin(); _iter1813 != this->success.end(); ++_iter1813) + std::vector ::const_iterator _iter1825; + for (_iter1825 = this->success.begin(); _iter1825 != this->success.end(); ++_iter1825) { - xfer += (*_iter1813).write(oprot); + xfer += (*_iter1825).write(oprot); } xfer += oprot->writeListEnd(); } @@ -32794,14 +32738,14 @@ uint32_t ThriftHiveMetastore_list_privileges_presult::read(::apache::thrift::pro if (ftype == ::apache::thrift::protocol::T_LIST) { { (*(this->success)).clear(); - uint32_t _size1814; - ::apache::thrift::protocol::TType _etype1817; - xfer += iprot->readListBegin(_etype1817, _size1814); - (*(this->success)).resize(_size1814); - uint32_t _i1818; - for (_i1818 = 0; _i1818 < _size1814; ++_i1818) + uint32_t _size1826; + ::apache::thrift::protocol::TType _etype1829; + xfer += iprot->readListBegin(_etype1829, _size1826); + (*(this->success)).resize(_size1826); + uint32_t _i1830; + for (_i1830 = 0; _i1830 < _size1826; ++_i1830) { - xfer += (*(this->success))[_i1818].read(iprot); + xfer += (*(this->success))[_i1830].read(iprot); } xfer += iprot->readListEnd(); } @@ -33728,14 +33672,14 @@ uint32_t ThriftHiveMetastore_set_ugi_args::read(::apache::thrift::protocol::TPro if (ftype == ::apache::thrift::protocol::T_LIST) { { this->group_names.clear(); - uint32_t _size1819; - ::apache::thrift::protocol::TType _etype1822; - xfer += iprot->readListBegin(_etype1822, _size1819); - this->group_names.resize(_size1819); - uint32_t _i1823; - for (_i1823 = 0; _i1823 < _size1819; ++_i1823) + uint32_t _size1831; + ::apache::thrift::protocol::TType _etype1834; + xfer += iprot->readListBegin(_etype1834, _size1831); + this->group_names.resize(_size1831); + uint32_t _i1835; + for (_i1835 = 0; _i1835 < _size1831; ++_i1835) { - xfer += iprot->readString(this->group_names[_i1823]); + xfer += iprot->readString(this->group_names[_i1835]); } xfer += iprot->readListEnd(); } @@ -33768,10 +33712,10 @@ uint32_t ThriftHiveMetastore_set_ugi_args::write(::apache::thrift::protocol::TPr xfer += oprot->writeFieldBegin("group_names", ::apache::thrift::protocol::T_LIST, 2); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast(this->group_names.size())); - std::vector ::const_iterator _iter1824; - for (_iter1824 = this->group_names.begin(); _iter1824 != this->group_names.end(); ++_iter1824) + std::vector ::const_iterator _iter1836; + for (_iter1836 = this->group_names.begin(); _iter1836 != this->group_names.end(); ++_iter1836) { - xfer += oprot->writeString((*_iter1824)); + xfer += oprot->writeString((*_iter1836)); } xfer += oprot->writeListEnd(); } @@ -33799,10 +33743,10 @@ uint32_t ThriftHiveMetastore_set_ugi_pargs::write(::apache::thrift::protocol::TP xfer += oprot->writeFieldBegin("group_names", ::apache::thrift::protocol::T_LIST, 2); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast((*(this->group_names)).size())); - std::vector ::const_iterator _iter1825; - for (_iter1825 = (*(this->group_names)).begin(); _iter1825 != (*(this->group_names)).end(); ++_iter1825) + std::vector ::const_iterator _iter1837; + for (_iter1837 = (*(this->group_names)).begin(); _iter1837 != (*(this->group_names)).end(); ++_iter1837) { - xfer += oprot->writeString((*_iter1825)); + xfer += oprot->writeString((*_iter1837)); } xfer += oprot->writeListEnd(); } @@ -33843,14 +33787,14 @@ uint32_t ThriftHiveMetastore_set_ugi_result::read(::apache::thrift::protocol::TP if (ftype == ::apache::thrift::protocol::T_LIST) { { this->success.clear(); - uint32_t _size1826; - ::apache::thrift::protocol::TType _etype1829; - xfer += iprot->readListBegin(_etype1829, _size1826); - this->success.resize(_size1826); - uint32_t _i1830; - for (_i1830 = 0; _i1830 < _size1826; ++_i1830) + uint32_t _size1838; + ::apache::thrift::protocol::TType _etype1841; + xfer += iprot->readListBegin(_etype1841, _size1838); + this->success.resize(_size1838); + uint32_t _i1842; + for (_i1842 = 0; _i1842 < _size1838; ++_i1842) { - xfer += iprot->readString(this->success[_i1830]); + xfer += iprot->readString(this->success[_i1842]); } xfer += iprot->readListEnd(); } @@ -33889,10 +33833,10 @@ uint32_t ThriftHiveMetastore_set_ugi_result::write(::apache::thrift::protocol::T xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_LIST, 0); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast(this->success.size())); - std::vector ::const_iterator _iter1831; - for (_iter1831 = this->success.begin(); _iter1831 != this->success.end(); ++_iter1831) + std::vector ::const_iterator _iter1843; + for (_iter1843 = this->success.begin(); _iter1843 != this->success.end(); ++_iter1843) { - xfer += oprot->writeString((*_iter1831)); + xfer += oprot->writeString((*_iter1843)); } xfer += oprot->writeListEnd(); } @@ -33937,14 +33881,14 @@ uint32_t ThriftHiveMetastore_set_ugi_presult::read(::apache::thrift::protocol::T if (ftype == ::apache::thrift::protocol::T_LIST) { { (*(this->success)).clear(); - uint32_t _size1832; - ::apache::thrift::protocol::TType _etype1835; - xfer += iprot->readListBegin(_etype1835, _size1832); - (*(this->success)).resize(_size1832); - uint32_t _i1836; - for (_i1836 = 0; _i1836 < _size1832; ++_i1836) + uint32_t _size1844; + ::apache::thrift::protocol::TType _etype1847; + xfer += iprot->readListBegin(_etype1847, _size1844); + (*(this->success)).resize(_size1844); + uint32_t _i1848; + for (_i1848 = 0; _i1848 < _size1844; ++_i1848) { - xfer += iprot->readString((*(this->success))[_i1836]); + xfer += iprot->readString((*(this->success))[_i1848]); } xfer += iprot->readListEnd(); } @@ -35255,14 +35199,14 @@ uint32_t ThriftHiveMetastore_get_all_token_identifiers_result::read(::apache::th if (ftype == ::apache::thrift::protocol::T_LIST) { { this->success.clear(); - uint32_t _size1837; - ::apache::thrift::protocol::TType _etype1840; - xfer += iprot->readListBegin(_etype1840, _size1837); - this->success.resize(_size1837); - uint32_t _i1841; - for (_i1841 = 0; _i1841 < _size1837; ++_i1841) + uint32_t _size1849; + ::apache::thrift::protocol::TType _etype1852; + xfer += iprot->readListBegin(_etype1852, _size1849); + this->success.resize(_size1849); + uint32_t _i1853; + for (_i1853 = 0; _i1853 < _size1849; ++_i1853) { - xfer += iprot->readString(this->success[_i1841]); + xfer += iprot->readString(this->success[_i1853]); } xfer += iprot->readListEnd(); } @@ -35293,10 +35237,10 @@ uint32_t ThriftHiveMetastore_get_all_token_identifiers_result::write(::apache::t xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_LIST, 0); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast(this->success.size())); - std::vector ::const_iterator _iter1842; - for (_iter1842 = this->success.begin(); _iter1842 != this->success.end(); ++_iter1842) + std::vector ::const_iterator _iter1854; + for (_iter1854 = this->success.begin(); _iter1854 != this->success.end(); ++_iter1854) { - xfer += oprot->writeString((*_iter1842)); + xfer += oprot->writeString((*_iter1854)); } xfer += oprot->writeListEnd(); } @@ -35337,14 +35281,14 @@ uint32_t ThriftHiveMetastore_get_all_token_identifiers_presult::read(::apache::t if (ftype == ::apache::thrift::protocol::T_LIST) { { (*(this->success)).clear(); - uint32_t _size1843; - ::apache::thrift::protocol::TType _etype1846; - xfer += iprot->readListBegin(_etype1846, _size1843); - (*(this->success)).resize(_size1843); - uint32_t _i1847; - for (_i1847 = 0; _i1847 < _size1843; ++_i1847) + uint32_t _size1855; + ::apache::thrift::protocol::TType _etype1858; + xfer += iprot->readListBegin(_etype1858, _size1855); + (*(this->success)).resize(_size1855); + uint32_t _i1859; + for (_i1859 = 0; _i1859 < _size1855; ++_i1859) { - xfer += iprot->readString((*(this->success))[_i1847]); + xfer += iprot->readString((*(this->success))[_i1859]); } xfer += iprot->readListEnd(); } @@ -36070,14 +36014,14 @@ uint32_t ThriftHiveMetastore_get_master_keys_result::read(::apache::thrift::prot if (ftype == ::apache::thrift::protocol::T_LIST) { { this->success.clear(); - uint32_t _size1848; - ::apache::thrift::protocol::TType _etype1851; - xfer += iprot->readListBegin(_etype1851, _size1848); - this->success.resize(_size1848); - uint32_t _i1852; - for (_i1852 = 0; _i1852 < _size1848; ++_i1852) + uint32_t _size1860; + ::apache::thrift::protocol::TType _etype1863; + xfer += iprot->readListBegin(_etype1863, _size1860); + this->success.resize(_size1860); + uint32_t _i1864; + for (_i1864 = 0; _i1864 < _size1860; ++_i1864) { - xfer += iprot->readString(this->success[_i1852]); + xfer += iprot->readString(this->success[_i1864]); } xfer += iprot->readListEnd(); } @@ -36108,10 +36052,10 @@ uint32_t ThriftHiveMetastore_get_master_keys_result::write(::apache::thrift::pro xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_LIST, 0); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast(this->success.size())); - std::vector ::const_iterator _iter1853; - for (_iter1853 = this->success.begin(); _iter1853 != this->success.end(); ++_iter1853) + std::vector ::const_iterator _iter1865; + for (_iter1865 = this->success.begin(); _iter1865 != this->success.end(); ++_iter1865) { - xfer += oprot->writeString((*_iter1853)); + xfer += oprot->writeString((*_iter1865)); } xfer += oprot->writeListEnd(); } @@ -36152,14 +36096,14 @@ uint32_t ThriftHiveMetastore_get_master_keys_presult::read(::apache::thrift::pro if (ftype == ::apache::thrift::protocol::T_LIST) { { (*(this->success)).clear(); - uint32_t _size1854; - ::apache::thrift::protocol::TType _etype1857; - xfer += iprot->readListBegin(_etype1857, _size1854); - (*(this->success)).resize(_size1854); - uint32_t _i1858; - for (_i1858 = 0; _i1858 < _size1854; ++_i1858) + uint32_t _size1866; + ::apache::thrift::protocol::TType _etype1869; + xfer += iprot->readListBegin(_etype1869, _size1866); + (*(this->success)).resize(_size1866); + uint32_t _i1870; + for (_i1870 = 0; _i1870 < _size1866; ++_i1870) { - xfer += iprot->readString((*(this->success))[_i1858]); + xfer += iprot->readString((*(this->success))[_i1870]); } xfer += iprot->readListEnd(); } @@ -47956,14 +47900,14 @@ uint32_t ThriftHiveMetastore_get_schema_all_versions_result::read(::apache::thri if (ftype == ::apache::thrift::protocol::T_LIST) { { this->success.clear(); - uint32_t _size1859; - ::apache::thrift::protocol::TType _etype1862; - xfer += iprot->readListBegin(_etype1862, _size1859); - this->success.resize(_size1859); - uint32_t _i1863; - for (_i1863 = 0; _i1863 < _size1859; ++_i1863) + uint32_t _size1871; + ::apache::thrift::protocol::TType _etype1874; + xfer += iprot->readListBegin(_etype1874, _size1871); + this->success.resize(_size1871); + uint32_t _i1875; + for (_i1875 = 0; _i1875 < _size1871; ++_i1875) { - xfer += this->success[_i1863].read(iprot); + xfer += this->success[_i1875].read(iprot); } xfer += iprot->readListEnd(); } @@ -48010,10 +47954,10 @@ uint32_t ThriftHiveMetastore_get_schema_all_versions_result::write(::apache::thr xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_LIST, 0); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast(this->success.size())); - std::vector ::const_iterator _iter1864; - for (_iter1864 = this->success.begin(); _iter1864 != this->success.end(); ++_iter1864) + std::vector ::const_iterator _iter1876; + for (_iter1876 = this->success.begin(); _iter1876 != this->success.end(); ++_iter1876) { - xfer += (*_iter1864).write(oprot); + xfer += (*_iter1876).write(oprot); } xfer += oprot->writeListEnd(); } @@ -48062,14 +48006,14 @@ uint32_t ThriftHiveMetastore_get_schema_all_versions_presult::read(::apache::thr if (ftype == ::apache::thrift::protocol::T_LIST) { { (*(this->success)).clear(); - uint32_t _size1865; - ::apache::thrift::protocol::TType _etype1868; - xfer += iprot->readListBegin(_etype1868, _size1865); - (*(this->success)).resize(_size1865); - uint32_t _i1869; - for (_i1869 = 0; _i1869 < _size1865; ++_i1869) + uint32_t _size1877; + ::apache::thrift::protocol::TType _etype1880; + xfer += iprot->readListBegin(_etype1880, _size1877); + (*(this->success)).resize(_size1877); + uint32_t _i1881; + for (_i1881 = 0; _i1881 < _size1877; ++_i1881) { - xfer += (*(this->success))[_i1869].read(iprot); + xfer += (*(this->success))[_i1881].read(iprot); } xfer += iprot->readListEnd(); } @@ -50122,14 +50066,14 @@ uint32_t ThriftHiveMetastore_get_runtime_stats_result::read(::apache::thrift::pr if (ftype == ::apache::thrift::protocol::T_LIST) { { this->success.clear(); - uint32_t _size1870; - ::apache::thrift::protocol::TType _etype1873; - xfer += iprot->readListBegin(_etype1873, _size1870); - this->success.resize(_size1870); - uint32_t _i1874; - for (_i1874 = 0; _i1874 < _size1870; ++_i1874) + uint32_t _size1882; + ::apache::thrift::protocol::TType _etype1885; + xfer += iprot->readListBegin(_etype1885, _size1882); + this->success.resize(_size1882); + uint32_t _i1886; + for (_i1886 = 0; _i1886 < _size1882; ++_i1886) { - xfer += this->success[_i1874].read(iprot); + xfer += this->success[_i1886].read(iprot); } xfer += iprot->readListEnd(); } @@ -50168,10 +50112,10 @@ uint32_t ThriftHiveMetastore_get_runtime_stats_result::write(::apache::thrift::p xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_LIST, 0); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast(this->success.size())); - std::vector ::const_iterator _iter1875; - for (_iter1875 = this->success.begin(); _iter1875 != this->success.end(); ++_iter1875) + std::vector ::const_iterator _iter1887; + for (_iter1887 = this->success.begin(); _iter1887 != this->success.end(); ++_iter1887) { - xfer += (*_iter1875).write(oprot); + xfer += (*_iter1887).write(oprot); } xfer += oprot->writeListEnd(); } @@ -50216,14 +50160,14 @@ uint32_t ThriftHiveMetastore_get_runtime_stats_presult::read(::apache::thrift::p if (ftype == ::apache::thrift::protocol::T_LIST) { { (*(this->success)).clear(); - uint32_t _size1876; - ::apache::thrift::protocol::TType _etype1879; - xfer += iprot->readListBegin(_etype1879, _size1876); - (*(this->success)).resize(_size1876); - uint32_t _i1880; - for (_i1880 = 0; _i1880 < _size1876; ++_i1880) + uint32_t _size1888; + ::apache::thrift::protocol::TType _etype1891; + xfer += iprot->readListBegin(_etype1891, _size1888); + (*(this->success)).resize(_size1888); + uint32_t _i1892; + for (_i1892 = 0; _i1892 < _size1888; ++_i1892) { - xfer += (*(this->success))[_i1880].read(iprot); + xfer += (*(this->success))[_i1892].read(iprot); } xfer += iprot->readListEnd(); } @@ -55610,22 +55554,19 @@ void ThriftHiveMetastoreClient::recv_alter_partitions() return; } -void ThriftHiveMetastoreClient::alter_partitions_with_environment_context(const std::string& db_name, const std::string& tbl_name, const std::vector & new_parts, const EnvironmentContext& environment_context) +void ThriftHiveMetastoreClient::alter_partitions_with_environment_context(AlterPartitionsResponse& _return, const AlterPartitionsRequest& req) { - send_alter_partitions_with_environment_context(db_name, tbl_name, new_parts, environment_context); - recv_alter_partitions_with_environment_context(); + send_alter_partitions_with_environment_context(req); + recv_alter_partitions_with_environment_context(_return); } -void ThriftHiveMetastoreClient::send_alter_partitions_with_environment_context(const std::string& db_name, const std::string& tbl_name, const std::vector & new_parts, const EnvironmentContext& environment_context) +void ThriftHiveMetastoreClient::send_alter_partitions_with_environment_context(const AlterPartitionsRequest& req) { int32_t cseqid = 0; oprot_->writeMessageBegin("alter_partitions_with_environment_context", ::apache::thrift::protocol::T_CALL, cseqid); ThriftHiveMetastore_alter_partitions_with_environment_context_pargs args; - args.db_name = &db_name; - args.tbl_name = &tbl_name; - args.new_parts = &new_parts; - args.environment_context = &environment_context; + args.req = &req; args.write(oprot_); oprot_->writeMessageEnd(); @@ -55633,7 +55574,7 @@ void ThriftHiveMetastoreClient::send_alter_partitions_with_environment_context(c oprot_->getTransport()->flush(); } -void ThriftHiveMetastoreClient::recv_alter_partitions_with_environment_context() +void ThriftHiveMetastoreClient::recv_alter_partitions_with_environment_context(AlterPartitionsResponse& _return) { int32_t rseqid = 0; @@ -55659,17 +55600,22 @@ void ThriftHiveMetastoreClient::recv_alter_partitions_with_environment_context() iprot_->getTransport()->readEnd(); } ThriftHiveMetastore_alter_partitions_with_environment_context_presult result; + result.success = &_return; result.read(iprot_); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); + if (result.__isset.success) { + // _return pointer has now been filled + return; + } if (result.__isset.o1) { throw result.o1; } if (result.__isset.o2) { throw result.o2; } - return; + throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "alter_partitions_with_environment_context failed: unknown result"); } void ThriftHiveMetastoreClient::alter_partition_with_environment_context(const std::string& db_name, const std::string& tbl_name, const Partition& new_part, const EnvironmentContext& environment_context) @@ -68377,7 +68323,8 @@ void ThriftHiveMetastoreProcessor::process_alter_partitions_with_environment_con ThriftHiveMetastore_alter_partitions_with_environment_context_result result; try { - iface_->alter_partitions_with_environment_context(args.db_name, args.tbl_name, args.new_parts, args.environment_context); + iface_->alter_partitions_with_environment_context(result.success, args.req); + result.__isset.success = true; } catch (InvalidOperationException &o1) { result.o1 = o1; result.__isset.o1 = true; @@ -83326,23 +83273,20 @@ void ThriftHiveMetastoreConcurrentClient::recv_alter_partitions(const int32_t se } // end while(true) } -void ThriftHiveMetastoreConcurrentClient::alter_partitions_with_environment_context(const std::string& db_name, const std::string& tbl_name, const std::vector & new_parts, const EnvironmentContext& environment_context) +void ThriftHiveMetastoreConcurrentClient::alter_partitions_with_environment_context(AlterPartitionsResponse& _return, const AlterPartitionsRequest& req) { - int32_t seqid = send_alter_partitions_with_environment_context(db_name, tbl_name, new_parts, environment_context); - recv_alter_partitions_with_environment_context(seqid); + int32_t seqid = send_alter_partitions_with_environment_context(req); + recv_alter_partitions_with_environment_context(_return, seqid); } -int32_t ThriftHiveMetastoreConcurrentClient::send_alter_partitions_with_environment_context(const std::string& db_name, const std::string& tbl_name, const std::vector & new_parts, const EnvironmentContext& environment_context) +int32_t ThriftHiveMetastoreConcurrentClient::send_alter_partitions_with_environment_context(const AlterPartitionsRequest& req) { int32_t cseqid = this->sync_.generateSeqId(); ::apache::thrift::async::TConcurrentSendSentry sentry(&this->sync_); oprot_->writeMessageBegin("alter_partitions_with_environment_context", ::apache::thrift::protocol::T_CALL, cseqid); ThriftHiveMetastore_alter_partitions_with_environment_context_pargs args; - args.db_name = &db_name; - args.tbl_name = &tbl_name; - args.new_parts = &new_parts; - args.environment_context = &environment_context; + args.req = &req; args.write(oprot_); oprot_->writeMessageEnd(); @@ -83353,7 +83297,7 @@ int32_t ThriftHiveMetastoreConcurrentClient::send_alter_partitions_with_environm return cseqid; } -void ThriftHiveMetastoreConcurrentClient::recv_alter_partitions_with_environment_context(const int32_t seqid) +void ThriftHiveMetastoreConcurrentClient::recv_alter_partitions_with_environment_context(AlterPartitionsResponse& _return, const int32_t seqid) { int32_t rseqid = 0; @@ -83392,10 +83336,16 @@ void ThriftHiveMetastoreConcurrentClient::recv_alter_partitions_with_environment throw TProtocolException(TProtocolException::INVALID_DATA); } ThriftHiveMetastore_alter_partitions_with_environment_context_presult result; + result.success = &_return; result.read(iprot_); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); + if (result.__isset.success) { + // _return pointer has now been filled + sentry.commit(); + return; + } if (result.__isset.o1) { sentry.commit(); throw result.o1; @@ -83404,8 +83354,8 @@ void ThriftHiveMetastoreConcurrentClient::recv_alter_partitions_with_environment sentry.commit(); throw result.o2; } - sentry.commit(); - return; + // in a bad state, don't commit + throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "alter_partitions_with_environment_context failed: unknown result"); } // seqid != rseqid this->sync_.updatePending(fname, mtype, rseqid); diff --git standalone-metastore/src/gen/thrift/gen-cpp/ThriftHiveMetastore.h standalone-metastore/src/gen/thrift/gen-cpp/ThriftHiveMetastore.h index 1d57aee9c0..d3449a7c74 100644 --- standalone-metastore/src/gen/thrift/gen-cpp/ThriftHiveMetastore.h +++ standalone-metastore/src/gen/thrift/gen-cpp/ThriftHiveMetastore.h @@ -105,7 +105,7 @@ class ThriftHiveMetastoreIf : virtual public ::facebook::fb303::FacebookService virtual void get_partitions_by_names(std::vector & _return, const std::string& db_name, const std::string& tbl_name, const std::vector & names) = 0; virtual void alter_partition(const std::string& db_name, const std::string& tbl_name, const Partition& new_part) = 0; virtual void alter_partitions(const std::string& db_name, const std::string& tbl_name, const std::vector & new_parts) = 0; - virtual void alter_partitions_with_environment_context(const std::string& db_name, const std::string& tbl_name, const std::vector & new_parts, const EnvironmentContext& environment_context) = 0; + virtual void alter_partitions_with_environment_context(AlterPartitionsResponse& _return, const AlterPartitionsRequest& req) = 0; virtual void alter_partition_with_environment_context(const std::string& db_name, const std::string& tbl_name, const Partition& new_part, const EnvironmentContext& environment_context) = 0; virtual void rename_partition(const std::string& db_name, const std::string& tbl_name, const std::vector & part_vals, const Partition& new_part) = 0; virtual bool partition_name_has_valid_characters(const std::vector & part_vals, const bool throw_exception) = 0; @@ -516,7 +516,7 @@ class ThriftHiveMetastoreNull : virtual public ThriftHiveMetastoreIf , virtual p void alter_partitions(const std::string& /* db_name */, const std::string& /* tbl_name */, const std::vector & /* new_parts */) { return; } - void alter_partitions_with_environment_context(const std::string& /* db_name */, const std::string& /* tbl_name */, const std::vector & /* new_parts */, const EnvironmentContext& /* environment_context */) { + void alter_partitions_with_environment_context(AlterPartitionsResponse& /* _return */, const AlterPartitionsRequest& /* req */) { return; } void alter_partition_with_environment_context(const std::string& /* db_name */, const std::string& /* tbl_name */, const Partition& /* new_part */, const EnvironmentContext& /* environment_context */) { @@ -11637,11 +11637,8 @@ class ThriftHiveMetastore_alter_partitions_presult { }; typedef struct _ThriftHiveMetastore_alter_partitions_with_environment_context_args__isset { - _ThriftHiveMetastore_alter_partitions_with_environment_context_args__isset() : db_name(false), tbl_name(false), new_parts(false), environment_context(false) {} - bool db_name :1; - bool tbl_name :1; - bool new_parts :1; - bool environment_context :1; + _ThriftHiveMetastore_alter_partitions_with_environment_context_args__isset() : req(false) {} + bool req :1; } _ThriftHiveMetastore_alter_partitions_with_environment_context_args__isset; class ThriftHiveMetastore_alter_partitions_with_environment_context_args { @@ -11649,34 +11646,19 @@ class ThriftHiveMetastore_alter_partitions_with_environment_context_args { ThriftHiveMetastore_alter_partitions_with_environment_context_args(const ThriftHiveMetastore_alter_partitions_with_environment_context_args&); ThriftHiveMetastore_alter_partitions_with_environment_context_args& operator=(const ThriftHiveMetastore_alter_partitions_with_environment_context_args&); - ThriftHiveMetastore_alter_partitions_with_environment_context_args() : db_name(), tbl_name() { + ThriftHiveMetastore_alter_partitions_with_environment_context_args() { } virtual ~ThriftHiveMetastore_alter_partitions_with_environment_context_args() throw(); - std::string db_name; - std::string tbl_name; - std::vector new_parts; - EnvironmentContext environment_context; + AlterPartitionsRequest req; _ThriftHiveMetastore_alter_partitions_with_environment_context_args__isset __isset; - void __set_db_name(const std::string& val); - - void __set_tbl_name(const std::string& val); - - void __set_new_parts(const std::vector & val); - - void __set_environment_context(const EnvironmentContext& val); + void __set_req(const AlterPartitionsRequest& val); bool operator == (const ThriftHiveMetastore_alter_partitions_with_environment_context_args & rhs) const { - if (!(db_name == rhs.db_name)) - return false; - if (!(tbl_name == rhs.tbl_name)) - return false; - if (!(new_parts == rhs.new_parts)) - return false; - if (!(environment_context == rhs.environment_context)) + if (!(req == rhs.req)) return false; return true; } @@ -11697,17 +11679,15 @@ class ThriftHiveMetastore_alter_partitions_with_environment_context_pargs { virtual ~ThriftHiveMetastore_alter_partitions_with_environment_context_pargs() throw(); - const std::string* db_name; - const std::string* tbl_name; - const std::vector * new_parts; - const EnvironmentContext* environment_context; + const AlterPartitionsRequest* req; uint32_t write(::apache::thrift::protocol::TProtocol* oprot) const; }; typedef struct _ThriftHiveMetastore_alter_partitions_with_environment_context_result__isset { - _ThriftHiveMetastore_alter_partitions_with_environment_context_result__isset() : o1(false), o2(false) {} + _ThriftHiveMetastore_alter_partitions_with_environment_context_result__isset() : success(false), o1(false), o2(false) {} + bool success :1; bool o1 :1; bool o2 :1; } _ThriftHiveMetastore_alter_partitions_with_environment_context_result__isset; @@ -11721,17 +11701,22 @@ class ThriftHiveMetastore_alter_partitions_with_environment_context_result { } virtual ~ThriftHiveMetastore_alter_partitions_with_environment_context_result() throw(); + AlterPartitionsResponse success; InvalidOperationException o1; MetaException o2; _ThriftHiveMetastore_alter_partitions_with_environment_context_result__isset __isset; + void __set_success(const AlterPartitionsResponse& val); + void __set_o1(const InvalidOperationException& val); void __set_o2(const MetaException& val); bool operator == (const ThriftHiveMetastore_alter_partitions_with_environment_context_result & rhs) const { + if (!(success == rhs.success)) + return false; if (!(o1 == rhs.o1)) return false; if (!(o2 == rhs.o2)) @@ -11750,7 +11735,8 @@ class ThriftHiveMetastore_alter_partitions_with_environment_context_result { }; typedef struct _ThriftHiveMetastore_alter_partitions_with_environment_context_presult__isset { - _ThriftHiveMetastore_alter_partitions_with_environment_context_presult__isset() : o1(false), o2(false) {} + _ThriftHiveMetastore_alter_partitions_with_environment_context_presult__isset() : success(false), o1(false), o2(false) {} + bool success :1; bool o1 :1; bool o2 :1; } _ThriftHiveMetastore_alter_partitions_with_environment_context_presult__isset; @@ -11760,6 +11746,7 @@ class ThriftHiveMetastore_alter_partitions_with_environment_context_presult { virtual ~ThriftHiveMetastore_alter_partitions_with_environment_context_presult() throw(); + AlterPartitionsResponse* success; InvalidOperationException o1; MetaException o2; @@ -26472,9 +26459,9 @@ class ThriftHiveMetastoreClient : virtual public ThriftHiveMetastoreIf, public void alter_partitions(const std::string& db_name, const std::string& tbl_name, const std::vector & new_parts); void send_alter_partitions(const std::string& db_name, const std::string& tbl_name, const std::vector & new_parts); void recv_alter_partitions(); - void alter_partitions_with_environment_context(const std::string& db_name, const std::string& tbl_name, const std::vector & new_parts, const EnvironmentContext& environment_context); - void send_alter_partitions_with_environment_context(const std::string& db_name, const std::string& tbl_name, const std::vector & new_parts, const EnvironmentContext& environment_context); - void recv_alter_partitions_with_environment_context(); + void alter_partitions_with_environment_context(AlterPartitionsResponse& _return, const AlterPartitionsRequest& req); + void send_alter_partitions_with_environment_context(const AlterPartitionsRequest& req); + void recv_alter_partitions_with_environment_context(AlterPartitionsResponse& _return); void alter_partition_with_environment_context(const std::string& db_name, const std::string& tbl_name, const Partition& new_part, const EnvironmentContext& environment_context); void send_alter_partition_with_environment_context(const std::string& db_name, const std::string& tbl_name, const Partition& new_part, const EnvironmentContext& environment_context); void recv_alter_partition_with_environment_context(); @@ -28100,13 +28087,14 @@ class ThriftHiveMetastoreMultiface : virtual public ThriftHiveMetastoreIf, publi ifaces_[i]->alter_partitions(db_name, tbl_name, new_parts); } - void alter_partitions_with_environment_context(const std::string& db_name, const std::string& tbl_name, const std::vector & new_parts, const EnvironmentContext& environment_context) { + void alter_partitions_with_environment_context(AlterPartitionsResponse& _return, const AlterPartitionsRequest& req) { size_t sz = ifaces_.size(); size_t i = 0; for (; i < (sz - 1); ++i) { - ifaces_[i]->alter_partitions_with_environment_context(db_name, tbl_name, new_parts, environment_context); + ifaces_[i]->alter_partitions_with_environment_context(_return, req); } - ifaces_[i]->alter_partitions_with_environment_context(db_name, tbl_name, new_parts, environment_context); + ifaces_[i]->alter_partitions_with_environment_context(_return, req); + return; } void alter_partition_with_environment_context(const std::string& db_name, const std::string& tbl_name, const Partition& new_part, const EnvironmentContext& environment_context) { @@ -29559,9 +29547,9 @@ class ThriftHiveMetastoreConcurrentClient : virtual public ThriftHiveMetastoreIf void alter_partitions(const std::string& db_name, const std::string& tbl_name, const std::vector & new_parts); int32_t send_alter_partitions(const std::string& db_name, const std::string& tbl_name, const std::vector & new_parts); void recv_alter_partitions(const int32_t seqid); - void alter_partitions_with_environment_context(const std::string& db_name, const std::string& tbl_name, const std::vector & new_parts, const EnvironmentContext& environment_context); - int32_t send_alter_partitions_with_environment_context(const std::string& db_name, const std::string& tbl_name, const std::vector & new_parts, const EnvironmentContext& environment_context); - void recv_alter_partitions_with_environment_context(const int32_t seqid); + void alter_partitions_with_environment_context(AlterPartitionsResponse& _return, const AlterPartitionsRequest& req); + int32_t send_alter_partitions_with_environment_context(const AlterPartitionsRequest& req); + void recv_alter_partitions_with_environment_context(AlterPartitionsResponse& _return, const int32_t seqid); void alter_partition_with_environment_context(const std::string& db_name, const std::string& tbl_name, const Partition& new_part, const EnvironmentContext& environment_context); int32_t send_alter_partition_with_environment_context(const std::string& db_name, const std::string& tbl_name, const Partition& new_part, const EnvironmentContext& environment_context); void recv_alter_partition_with_environment_context(const int32_t seqid); diff --git standalone-metastore/src/gen/thrift/gen-cpp/ThriftHiveMetastore_server.skeleton.cpp standalone-metastore/src/gen/thrift/gen-cpp/ThriftHiveMetastore_server.skeleton.cpp index d45ec8103f..c6b820406d 100644 --- standalone-metastore/src/gen/thrift/gen-cpp/ThriftHiveMetastore_server.skeleton.cpp +++ standalone-metastore/src/gen/thrift/gen-cpp/ThriftHiveMetastore_server.skeleton.cpp @@ -437,7 +437,7 @@ class ThriftHiveMetastoreHandler : virtual public ThriftHiveMetastoreIf { printf("alter_partitions\n"); } - void alter_partitions_with_environment_context(const std::string& db_name, const std::string& tbl_name, const std::vector & new_parts, const EnvironmentContext& environment_context) { + void alter_partitions_with_environment_context(AlterPartitionsResponse& _return, const AlterPartitionsRequest& req) { // Your implementation goes here printf("alter_partitions_with_environment_context\n"); } diff --git standalone-metastore/src/gen/thrift/gen-cpp/hive_metastore_types.cpp standalone-metastore/src/gen/thrift/gen-cpp/hive_metastore_types.cpp index bc4d168a74..c2d6a5694e 100644 --- standalone-metastore/src/gen/thrift/gen-cpp/hive_metastore_types.cpp +++ standalone-metastore/src/gen/thrift/gen-cpp/hive_metastore_types.cpp @@ -215,6 +215,18 @@ const char* _kSchemaVersionStateNames[] = { }; const std::map _SchemaVersionState_VALUES_TO_NAMES(::apache::thrift::TEnumIterator(8, _kSchemaVersionStateValues, _kSchemaVersionStateNames), ::apache::thrift::TEnumIterator(-1, NULL, NULL)); +int _kIsolationLevelComplianceValues[] = { + IsolationLevelCompliance::YES, + IsolationLevelCompliance::NO, + IsolationLevelCompliance::UNKNOWN +}; +const char* _kIsolationLevelComplianceNames[] = { + "YES", + "NO", + "UNKNOWN" +}; +const std::map _IsolationLevelCompliance_VALUES_TO_NAMES(::apache::thrift::TEnumIterator(3, _kIsolationLevelComplianceValues, _kIsolationLevelComplianceNames), ::apache::thrift::TEnumIterator(-1, NULL, NULL)); + int _kFunctionTypeValues[] = { FunctionType::JAVA }; @@ -6435,6 +6447,21 @@ void Table::__set_ownerType(const PrincipalType::type val) { __isset.ownerType = true; } +void Table::__set_txnId(const int64_t val) { + this->txnId = val; +__isset.txnId = true; +} + +void Table::__set_validWriteIdList(const std::string& val) { + this->validWriteIdList = val; +__isset.validWriteIdList = true; +} + +void Table::__set_isStatsCompliant(const IsolationLevelCompliance::type val) { + this->isStatsCompliant = val; +__isset.isStatsCompliant = true; +} + uint32_t Table::read(::apache::thrift::protocol::TProtocol* iprot) { apache::thrift::protocol::TInputRecursionTracker tracker(*iprot); @@ -6629,6 +6656,32 @@ uint32_t Table::read(::apache::thrift::protocol::TProtocol* iprot) { xfer += iprot->skip(ftype); } break; + case 19: + if (ftype == ::apache::thrift::protocol::T_I64) { + xfer += iprot->readI64(this->txnId); + this->__isset.txnId = true; + } else { + xfer += iprot->skip(ftype); + } + break; + case 20: + if (ftype == ::apache::thrift::protocol::T_STRING) { + xfer += iprot->readString(this->validWriteIdList); + this->__isset.validWriteIdList = true; + } else { + xfer += iprot->skip(ftype); + } + break; + case 21: + if (ftype == ::apache::thrift::protocol::T_I32) { + int32_t ecast249; + xfer += iprot->readI32(ecast249); + this->isStatsCompliant = (IsolationLevelCompliance::type)ecast249; + this->__isset.isStatsCompliant = true; + } else { + xfer += iprot->skip(ftype); + } + break; default: xfer += iprot->skip(ftype); break; @@ -6677,10 +6730,10 @@ uint32_t Table::write(::apache::thrift::protocol::TProtocol* oprot) const { xfer += oprot->writeFieldBegin("partitionKeys", ::apache::thrift::protocol::T_LIST, 8); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast(this->partitionKeys.size())); - std::vector ::const_iterator _iter249; - for (_iter249 = this->partitionKeys.begin(); _iter249 != this->partitionKeys.end(); ++_iter249) + std::vector ::const_iterator _iter250; + for (_iter250 = this->partitionKeys.begin(); _iter250 != this->partitionKeys.end(); ++_iter250) { - xfer += (*_iter249).write(oprot); + xfer += (*_iter250).write(oprot); } xfer += oprot->writeListEnd(); } @@ -6689,11 +6742,11 @@ uint32_t Table::write(::apache::thrift::protocol::TProtocol* oprot) const { xfer += oprot->writeFieldBegin("parameters", ::apache::thrift::protocol::T_MAP, 9); { xfer += oprot->writeMapBegin(::apache::thrift::protocol::T_STRING, ::apache::thrift::protocol::T_STRING, static_cast(this->parameters.size())); - std::map ::const_iterator _iter250; - for (_iter250 = this->parameters.begin(); _iter250 != this->parameters.end(); ++_iter250) + std::map ::const_iterator _iter251; + for (_iter251 = this->parameters.begin(); _iter251 != this->parameters.end(); ++_iter251) { - xfer += oprot->writeString(_iter250->first); - xfer += oprot->writeString(_iter250->second); + xfer += oprot->writeString(_iter251->first); + xfer += oprot->writeString(_iter251->second); } xfer += oprot->writeMapEnd(); } @@ -6741,6 +6794,21 @@ uint32_t Table::write(::apache::thrift::protocol::TProtocol* oprot) const { xfer += oprot->writeI32((int32_t)this->ownerType); xfer += oprot->writeFieldEnd(); } + if (this->__isset.txnId) { + xfer += oprot->writeFieldBegin("txnId", ::apache::thrift::protocol::T_I64, 19); + xfer += oprot->writeI64(this->txnId); + xfer += oprot->writeFieldEnd(); + } + if (this->__isset.validWriteIdList) { + xfer += oprot->writeFieldBegin("validWriteIdList", ::apache::thrift::protocol::T_STRING, 20); + xfer += oprot->writeString(this->validWriteIdList); + xfer += oprot->writeFieldEnd(); + } + if (this->__isset.isStatsCompliant) { + xfer += oprot->writeFieldBegin("isStatsCompliant", ::apache::thrift::protocol::T_I32, 21); + xfer += oprot->writeI32((int32_t)this->isStatsCompliant); + xfer += oprot->writeFieldEnd(); + } xfer += oprot->writeFieldStop(); xfer += oprot->writeStructEnd(); return xfer; @@ -6766,31 +6834,13 @@ void swap(Table &a, Table &b) { swap(a.creationMetadata, b.creationMetadata); swap(a.catName, b.catName); swap(a.ownerType, b.ownerType); + swap(a.txnId, b.txnId); + swap(a.validWriteIdList, b.validWriteIdList); + swap(a.isStatsCompliant, b.isStatsCompliant); swap(a.__isset, b.__isset); } -Table::Table(const Table& other251) { - tableName = other251.tableName; - dbName = other251.dbName; - owner = other251.owner; - createTime = other251.createTime; - lastAccessTime = other251.lastAccessTime; - retention = other251.retention; - sd = other251.sd; - partitionKeys = other251.partitionKeys; - parameters = other251.parameters; - viewOriginalText = other251.viewOriginalText; - viewExpandedText = other251.viewExpandedText; - tableType = other251.tableType; - privileges = other251.privileges; - temporary = other251.temporary; - rewriteEnabled = other251.rewriteEnabled; - creationMetadata = other251.creationMetadata; - catName = other251.catName; - ownerType = other251.ownerType; - __isset = other251.__isset; -} -Table& Table::operator=(const Table& other252) { +Table::Table(const Table& other252) { tableName = other252.tableName; dbName = other252.dbName; owner = other252.owner; @@ -6809,7 +6859,34 @@ Table& Table::operator=(const Table& other252) { creationMetadata = other252.creationMetadata; catName = other252.catName; ownerType = other252.ownerType; + txnId = other252.txnId; + validWriteIdList = other252.validWriteIdList; + isStatsCompliant = other252.isStatsCompliant; __isset = other252.__isset; +} +Table& Table::operator=(const Table& other253) { + tableName = other253.tableName; + dbName = other253.dbName; + owner = other253.owner; + createTime = other253.createTime; + lastAccessTime = other253.lastAccessTime; + retention = other253.retention; + sd = other253.sd; + partitionKeys = other253.partitionKeys; + parameters = other253.parameters; + viewOriginalText = other253.viewOriginalText; + viewExpandedText = other253.viewExpandedText; + tableType = other253.tableType; + privileges = other253.privileges; + temporary = other253.temporary; + rewriteEnabled = other253.rewriteEnabled; + creationMetadata = other253.creationMetadata; + catName = other253.catName; + ownerType = other253.ownerType; + txnId = other253.txnId; + validWriteIdList = other253.validWriteIdList; + isStatsCompliant = other253.isStatsCompliant; + __isset = other253.__isset; return *this; } void Table::printTo(std::ostream& out) const { @@ -6833,6 +6910,9 @@ void Table::printTo(std::ostream& out) const { out << ", " << "creationMetadata="; (__isset.creationMetadata ? (out << to_string(creationMetadata)) : (out << "")); out << ", " << "catName="; (__isset.catName ? (out << to_string(catName)) : (out << "")); out << ", " << "ownerType="; (__isset.ownerType ? (out << to_string(ownerType)) : (out << "")); + out << ", " << "txnId="; (__isset.txnId ? (out << to_string(txnId)) : (out << "")); + out << ", " << "validWriteIdList="; (__isset.validWriteIdList ? (out << to_string(validWriteIdList)) : (out << "")); + out << ", " << "isStatsCompliant="; (__isset.isStatsCompliant ? (out << to_string(isStatsCompliant)) : (out << "")); out << ")"; } @@ -6879,6 +6959,21 @@ void Partition::__set_catName(const std::string& val) { __isset.catName = true; } +void Partition::__set_txnId(const int64_t val) { + this->txnId = val; +__isset.txnId = true; +} + +void Partition::__set_validWriteIdList(const std::string& val) { + this->validWriteIdList = val; +__isset.validWriteIdList = true; +} + +void Partition::__set_isStatsCompliant(const IsolationLevelCompliance::type val) { + this->isStatsCompliant = val; +__isset.isStatsCompliant = true; +} + uint32_t Partition::read(::apache::thrift::protocol::TProtocol* iprot) { apache::thrift::protocol::TInputRecursionTracker tracker(*iprot); @@ -6904,14 +6999,14 @@ uint32_t Partition::read(::apache::thrift::protocol::TProtocol* iprot) { if (ftype == ::apache::thrift::protocol::T_LIST) { { this->values.clear(); - uint32_t _size253; - ::apache::thrift::protocol::TType _etype256; - xfer += iprot->readListBegin(_etype256, _size253); - this->values.resize(_size253); - uint32_t _i257; - for (_i257 = 0; _i257 < _size253; ++_i257) + uint32_t _size254; + ::apache::thrift::protocol::TType _etype257; + xfer += iprot->readListBegin(_etype257, _size254); + this->values.resize(_size254); + uint32_t _i258; + for (_i258 = 0; _i258 < _size254; ++_i258) { - xfer += iprot->readString(this->values[_i257]); + xfer += iprot->readString(this->values[_i258]); } xfer += iprot->readListEnd(); } @@ -6964,17 +7059,17 @@ uint32_t Partition::read(::apache::thrift::protocol::TProtocol* iprot) { if (ftype == ::apache::thrift::protocol::T_MAP) { { this->parameters.clear(); - uint32_t _size258; - ::apache::thrift::protocol::TType _ktype259; - ::apache::thrift::protocol::TType _vtype260; - xfer += iprot->readMapBegin(_ktype259, _vtype260, _size258); - uint32_t _i262; - for (_i262 = 0; _i262 < _size258; ++_i262) + uint32_t _size259; + ::apache::thrift::protocol::TType _ktype260; + ::apache::thrift::protocol::TType _vtype261; + xfer += iprot->readMapBegin(_ktype260, _vtype261, _size259); + uint32_t _i263; + for (_i263 = 0; _i263 < _size259; ++_i263) { - std::string _key263; - xfer += iprot->readString(_key263); - std::string& _val264 = this->parameters[_key263]; - xfer += iprot->readString(_val264); + std::string _key264; + xfer += iprot->readString(_key264); + std::string& _val265 = this->parameters[_key264]; + xfer += iprot->readString(_val265); } xfer += iprot->readMapEnd(); } @@ -6999,6 +7094,32 @@ uint32_t Partition::read(::apache::thrift::protocol::TProtocol* iprot) { xfer += iprot->skip(ftype); } break; + case 10: + if (ftype == ::apache::thrift::protocol::T_I64) { + xfer += iprot->readI64(this->txnId); + this->__isset.txnId = true; + } else { + xfer += iprot->skip(ftype); + } + break; + case 11: + if (ftype == ::apache::thrift::protocol::T_STRING) { + xfer += iprot->readString(this->validWriteIdList); + this->__isset.validWriteIdList = true; + } else { + xfer += iprot->skip(ftype); + } + break; + case 12: + if (ftype == ::apache::thrift::protocol::T_I32) { + int32_t ecast266; + xfer += iprot->readI32(ecast266); + this->isStatsCompliant = (IsolationLevelCompliance::type)ecast266; + this->__isset.isStatsCompliant = true; + } else { + xfer += iprot->skip(ftype); + } + break; default: xfer += iprot->skip(ftype); break; @@ -7019,10 +7140,10 @@ uint32_t Partition::write(::apache::thrift::protocol::TProtocol* oprot) const { xfer += oprot->writeFieldBegin("values", ::apache::thrift::protocol::T_LIST, 1); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast(this->values.size())); - std::vector ::const_iterator _iter265; - for (_iter265 = this->values.begin(); _iter265 != this->values.end(); ++_iter265) + std::vector ::const_iterator _iter267; + for (_iter267 = this->values.begin(); _iter267 != this->values.end(); ++_iter267) { - xfer += oprot->writeString((*_iter265)); + xfer += oprot->writeString((*_iter267)); } xfer += oprot->writeListEnd(); } @@ -7051,11 +7172,11 @@ uint32_t Partition::write(::apache::thrift::protocol::TProtocol* oprot) const { xfer += oprot->writeFieldBegin("parameters", ::apache::thrift::protocol::T_MAP, 7); { xfer += oprot->writeMapBegin(::apache::thrift::protocol::T_STRING, ::apache::thrift::protocol::T_STRING, static_cast(this->parameters.size())); - std::map ::const_iterator _iter266; - for (_iter266 = this->parameters.begin(); _iter266 != this->parameters.end(); ++_iter266) + std::map ::const_iterator _iter268; + for (_iter268 = this->parameters.begin(); _iter268 != this->parameters.end(); ++_iter268) { - xfer += oprot->writeString(_iter266->first); - xfer += oprot->writeString(_iter266->second); + xfer += oprot->writeString(_iter268->first); + xfer += oprot->writeString(_iter268->second); } xfer += oprot->writeMapEnd(); } @@ -7071,6 +7192,21 @@ uint32_t Partition::write(::apache::thrift::protocol::TProtocol* oprot) const { xfer += oprot->writeString(this->catName); xfer += oprot->writeFieldEnd(); } + if (this->__isset.txnId) { + xfer += oprot->writeFieldBegin("txnId", ::apache::thrift::protocol::T_I64, 10); + xfer += oprot->writeI64(this->txnId); + xfer += oprot->writeFieldEnd(); + } + if (this->__isset.validWriteIdList) { + xfer += oprot->writeFieldBegin("validWriteIdList", ::apache::thrift::protocol::T_STRING, 11); + xfer += oprot->writeString(this->validWriteIdList); + xfer += oprot->writeFieldEnd(); + } + if (this->__isset.isStatsCompliant) { + xfer += oprot->writeFieldBegin("isStatsCompliant", ::apache::thrift::protocol::T_I32, 12); + xfer += oprot->writeI32((int32_t)this->isStatsCompliant); + xfer += oprot->writeFieldEnd(); + } xfer += oprot->writeFieldStop(); xfer += oprot->writeStructEnd(); return xfer; @@ -7087,32 +7223,41 @@ void swap(Partition &a, Partition &b) { swap(a.parameters, b.parameters); swap(a.privileges, b.privileges); swap(a.catName, b.catName); + swap(a.txnId, b.txnId); + swap(a.validWriteIdList, b.validWriteIdList); + swap(a.isStatsCompliant, b.isStatsCompliant); swap(a.__isset, b.__isset); } -Partition::Partition(const Partition& other267) { - values = other267.values; - dbName = other267.dbName; - tableName = other267.tableName; - createTime = other267.createTime; - lastAccessTime = other267.lastAccessTime; - sd = other267.sd; - parameters = other267.parameters; - privileges = other267.privileges; - catName = other267.catName; - __isset = other267.__isset; -} -Partition& Partition::operator=(const Partition& other268) { - values = other268.values; - dbName = other268.dbName; - tableName = other268.tableName; - createTime = other268.createTime; - lastAccessTime = other268.lastAccessTime; - sd = other268.sd; - parameters = other268.parameters; - privileges = other268.privileges; - catName = other268.catName; - __isset = other268.__isset; +Partition::Partition(const Partition& other269) { + values = other269.values; + dbName = other269.dbName; + tableName = other269.tableName; + createTime = other269.createTime; + lastAccessTime = other269.lastAccessTime; + sd = other269.sd; + parameters = other269.parameters; + privileges = other269.privileges; + catName = other269.catName; + txnId = other269.txnId; + validWriteIdList = other269.validWriteIdList; + isStatsCompliant = other269.isStatsCompliant; + __isset = other269.__isset; +} +Partition& Partition::operator=(const Partition& other270) { + values = other270.values; + dbName = other270.dbName; + tableName = other270.tableName; + createTime = other270.createTime; + lastAccessTime = other270.lastAccessTime; + sd = other270.sd; + parameters = other270.parameters; + privileges = other270.privileges; + catName = other270.catName; + txnId = other270.txnId; + validWriteIdList = other270.validWriteIdList; + isStatsCompliant = other270.isStatsCompliant; + __isset = other270.__isset; return *this; } void Partition::printTo(std::ostream& out) const { @@ -7127,6 +7272,9 @@ void Partition::printTo(std::ostream& out) const { out << ", " << "parameters=" << to_string(parameters); out << ", " << "privileges="; (__isset.privileges ? (out << to_string(privileges)) : (out << "")); out << ", " << "catName="; (__isset.catName ? (out << to_string(catName)) : (out << "")); + out << ", " << "txnId="; (__isset.txnId ? (out << to_string(txnId)) : (out << "")); + out << ", " << "validWriteIdList="; (__isset.validWriteIdList ? (out << to_string(validWriteIdList)) : (out << "")); + out << ", " << "isStatsCompliant="; (__isset.isStatsCompliant ? (out << to_string(isStatsCompliant)) : (out << "")); out << ")"; } @@ -7185,14 +7333,14 @@ uint32_t PartitionWithoutSD::read(::apache::thrift::protocol::TProtocol* iprot) if (ftype == ::apache::thrift::protocol::T_LIST) { { this->values.clear(); - uint32_t _size269; - ::apache::thrift::protocol::TType _etype272; - xfer += iprot->readListBegin(_etype272, _size269); - this->values.resize(_size269); - uint32_t _i273; - for (_i273 = 0; _i273 < _size269; ++_i273) + uint32_t _size271; + ::apache::thrift::protocol::TType _etype274; + xfer += iprot->readListBegin(_etype274, _size271); + this->values.resize(_size271); + uint32_t _i275; + for (_i275 = 0; _i275 < _size271; ++_i275) { - xfer += iprot->readString(this->values[_i273]); + xfer += iprot->readString(this->values[_i275]); } xfer += iprot->readListEnd(); } @@ -7229,17 +7377,17 @@ uint32_t PartitionWithoutSD::read(::apache::thrift::protocol::TProtocol* iprot) if (ftype == ::apache::thrift::protocol::T_MAP) { { this->parameters.clear(); - uint32_t _size274; - ::apache::thrift::protocol::TType _ktype275; - ::apache::thrift::protocol::TType _vtype276; - xfer += iprot->readMapBegin(_ktype275, _vtype276, _size274); - uint32_t _i278; - for (_i278 = 0; _i278 < _size274; ++_i278) + uint32_t _size276; + ::apache::thrift::protocol::TType _ktype277; + ::apache::thrift::protocol::TType _vtype278; + xfer += iprot->readMapBegin(_ktype277, _vtype278, _size276); + uint32_t _i280; + for (_i280 = 0; _i280 < _size276; ++_i280) { - std::string _key279; - xfer += iprot->readString(_key279); - std::string& _val280 = this->parameters[_key279]; - xfer += iprot->readString(_val280); + std::string _key281; + xfer += iprot->readString(_key281); + std::string& _val282 = this->parameters[_key281]; + xfer += iprot->readString(_val282); } xfer += iprot->readMapEnd(); } @@ -7276,10 +7424,10 @@ uint32_t PartitionWithoutSD::write(::apache::thrift::protocol::TProtocol* oprot) xfer += oprot->writeFieldBegin("values", ::apache::thrift::protocol::T_LIST, 1); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast(this->values.size())); - std::vector ::const_iterator _iter281; - for (_iter281 = this->values.begin(); _iter281 != this->values.end(); ++_iter281) + std::vector ::const_iterator _iter283; + for (_iter283 = this->values.begin(); _iter283 != this->values.end(); ++_iter283) { - xfer += oprot->writeString((*_iter281)); + xfer += oprot->writeString((*_iter283)); } xfer += oprot->writeListEnd(); } @@ -7300,11 +7448,11 @@ uint32_t PartitionWithoutSD::write(::apache::thrift::protocol::TProtocol* oprot) xfer += oprot->writeFieldBegin("parameters", ::apache::thrift::protocol::T_MAP, 5); { xfer += oprot->writeMapBegin(::apache::thrift::protocol::T_STRING, ::apache::thrift::protocol::T_STRING, static_cast(this->parameters.size())); - std::map ::const_iterator _iter282; - for (_iter282 = this->parameters.begin(); _iter282 != this->parameters.end(); ++_iter282) + std::map ::const_iterator _iter284; + for (_iter284 = this->parameters.begin(); _iter284 != this->parameters.end(); ++_iter284) { - xfer += oprot->writeString(_iter282->first); - xfer += oprot->writeString(_iter282->second); + xfer += oprot->writeString(_iter284->first); + xfer += oprot->writeString(_iter284->second); } xfer += oprot->writeMapEnd(); } @@ -7331,23 +7479,23 @@ void swap(PartitionWithoutSD &a, PartitionWithoutSD &b) { swap(a.__isset, b.__isset); } -PartitionWithoutSD::PartitionWithoutSD(const PartitionWithoutSD& other283) { - values = other283.values; - createTime = other283.createTime; - lastAccessTime = other283.lastAccessTime; - relativePath = other283.relativePath; - parameters = other283.parameters; - privileges = other283.privileges; - __isset = other283.__isset; -} -PartitionWithoutSD& PartitionWithoutSD::operator=(const PartitionWithoutSD& other284) { - values = other284.values; - createTime = other284.createTime; - lastAccessTime = other284.lastAccessTime; - relativePath = other284.relativePath; - parameters = other284.parameters; - privileges = other284.privileges; - __isset = other284.__isset; +PartitionWithoutSD::PartitionWithoutSD(const PartitionWithoutSD& other285) { + values = other285.values; + createTime = other285.createTime; + lastAccessTime = other285.lastAccessTime; + relativePath = other285.relativePath; + parameters = other285.parameters; + privileges = other285.privileges; + __isset = other285.__isset; +} +PartitionWithoutSD& PartitionWithoutSD::operator=(const PartitionWithoutSD& other286) { + values = other286.values; + createTime = other286.createTime; + lastAccessTime = other286.lastAccessTime; + relativePath = other286.relativePath; + parameters = other286.parameters; + privileges = other286.privileges; + __isset = other286.__isset; return *this; } void PartitionWithoutSD::printTo(std::ostream& out) const { @@ -7400,14 +7548,14 @@ uint32_t PartitionSpecWithSharedSD::read(::apache::thrift::protocol::TProtocol* if (ftype == ::apache::thrift::protocol::T_LIST) { { this->partitions.clear(); - uint32_t _size285; - ::apache::thrift::protocol::TType _etype288; - xfer += iprot->readListBegin(_etype288, _size285); - this->partitions.resize(_size285); - uint32_t _i289; - for (_i289 = 0; _i289 < _size285; ++_i289) + uint32_t _size287; + ::apache::thrift::protocol::TType _etype290; + xfer += iprot->readListBegin(_etype290, _size287); + this->partitions.resize(_size287); + uint32_t _i291; + for (_i291 = 0; _i291 < _size287; ++_i291) { - xfer += this->partitions[_i289].read(iprot); + xfer += this->partitions[_i291].read(iprot); } xfer += iprot->readListEnd(); } @@ -7444,10 +7592,10 @@ uint32_t PartitionSpecWithSharedSD::write(::apache::thrift::protocol::TProtocol* xfer += oprot->writeFieldBegin("partitions", ::apache::thrift::protocol::T_LIST, 1); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast(this->partitions.size())); - std::vector ::const_iterator _iter290; - for (_iter290 = this->partitions.begin(); _iter290 != this->partitions.end(); ++_iter290) + std::vector ::const_iterator _iter292; + for (_iter292 = this->partitions.begin(); _iter292 != this->partitions.end(); ++_iter292) { - xfer += (*_iter290).write(oprot); + xfer += (*_iter292).write(oprot); } xfer += oprot->writeListEnd(); } @@ -7469,15 +7617,15 @@ void swap(PartitionSpecWithSharedSD &a, PartitionSpecWithSharedSD &b) { swap(a.__isset, b.__isset); } -PartitionSpecWithSharedSD::PartitionSpecWithSharedSD(const PartitionSpecWithSharedSD& other291) { - partitions = other291.partitions; - sd = other291.sd; - __isset = other291.__isset; +PartitionSpecWithSharedSD::PartitionSpecWithSharedSD(const PartitionSpecWithSharedSD& other293) { + partitions = other293.partitions; + sd = other293.sd; + __isset = other293.__isset; } -PartitionSpecWithSharedSD& PartitionSpecWithSharedSD::operator=(const PartitionSpecWithSharedSD& other292) { - partitions = other292.partitions; - sd = other292.sd; - __isset = other292.__isset; +PartitionSpecWithSharedSD& PartitionSpecWithSharedSD::operator=(const PartitionSpecWithSharedSD& other294) { + partitions = other294.partitions; + sd = other294.sd; + __isset = other294.__isset; return *this; } void PartitionSpecWithSharedSD::printTo(std::ostream& out) const { @@ -7522,14 +7670,14 @@ uint32_t PartitionListComposingSpec::read(::apache::thrift::protocol::TProtocol* if (ftype == ::apache::thrift::protocol::T_LIST) { { this->partitions.clear(); - uint32_t _size293; - ::apache::thrift::protocol::TType _etype296; - xfer += iprot->readListBegin(_etype296, _size293); - this->partitions.resize(_size293); - uint32_t _i297; - for (_i297 = 0; _i297 < _size293; ++_i297) + uint32_t _size295; + ::apache::thrift::protocol::TType _etype298; + xfer += iprot->readListBegin(_etype298, _size295); + this->partitions.resize(_size295); + uint32_t _i299; + for (_i299 = 0; _i299 < _size295; ++_i299) { - xfer += this->partitions[_i297].read(iprot); + xfer += this->partitions[_i299].read(iprot); } xfer += iprot->readListEnd(); } @@ -7558,10 +7706,10 @@ uint32_t PartitionListComposingSpec::write(::apache::thrift::protocol::TProtocol xfer += oprot->writeFieldBegin("partitions", ::apache::thrift::protocol::T_LIST, 1); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast(this->partitions.size())); - std::vector ::const_iterator _iter298; - for (_iter298 = this->partitions.begin(); _iter298 != this->partitions.end(); ++_iter298) + std::vector ::const_iterator _iter300; + for (_iter300 = this->partitions.begin(); _iter300 != this->partitions.end(); ++_iter300) { - xfer += (*_iter298).write(oprot); + xfer += (*_iter300).write(oprot); } xfer += oprot->writeListEnd(); } @@ -7578,13 +7726,13 @@ void swap(PartitionListComposingSpec &a, PartitionListComposingSpec &b) { swap(a.__isset, b.__isset); } -PartitionListComposingSpec::PartitionListComposingSpec(const PartitionListComposingSpec& other299) { - partitions = other299.partitions; - __isset = other299.__isset; +PartitionListComposingSpec::PartitionListComposingSpec(const PartitionListComposingSpec& other301) { + partitions = other301.partitions; + __isset = other301.__isset; } -PartitionListComposingSpec& PartitionListComposingSpec::operator=(const PartitionListComposingSpec& other300) { - partitions = other300.partitions; - __isset = other300.__isset; +PartitionListComposingSpec& PartitionListComposingSpec::operator=(const PartitionListComposingSpec& other302) { + partitions = other302.partitions; + __isset = other302.__isset; return *this; } void PartitionListComposingSpec::printTo(std::ostream& out) const { @@ -7626,6 +7774,21 @@ void PartitionSpec::__set_catName(const std::string& val) { __isset.catName = true; } +void PartitionSpec::__set_txnId(const int64_t val) { + this->txnId = val; +__isset.txnId = true; +} + +void PartitionSpec::__set_validWriteIdList(const std::string& val) { + this->validWriteIdList = val; +__isset.validWriteIdList = true; +} + +void PartitionSpec::__set_isStatsCompliant(const IsolationLevelCompliance::type val) { + this->isStatsCompliant = val; +__isset.isStatsCompliant = true; +} + uint32_t PartitionSpec::read(::apache::thrift::protocol::TProtocol* iprot) { apache::thrift::protocol::TInputRecursionTracker tracker(*iprot); @@ -7695,6 +7858,32 @@ uint32_t PartitionSpec::read(::apache::thrift::protocol::TProtocol* iprot) { xfer += iprot->skip(ftype); } break; + case 7: + if (ftype == ::apache::thrift::protocol::T_I64) { + xfer += iprot->readI64(this->txnId); + this->__isset.txnId = true; + } else { + xfer += iprot->skip(ftype); + } + break; + case 8: + if (ftype == ::apache::thrift::protocol::T_STRING) { + xfer += iprot->readString(this->validWriteIdList); + this->__isset.validWriteIdList = true; + } else { + xfer += iprot->skip(ftype); + } + break; + case 9: + if (ftype == ::apache::thrift::protocol::T_I32) { + int32_t ecast303; + xfer += iprot->readI32(ecast303); + this->isStatsCompliant = (IsolationLevelCompliance::type)ecast303; + this->__isset.isStatsCompliant = true; + } else { + xfer += iprot->skip(ftype); + } + break; default: xfer += iprot->skip(ftype); break; @@ -7739,6 +7928,21 @@ uint32_t PartitionSpec::write(::apache::thrift::protocol::TProtocol* oprot) cons xfer += oprot->writeString(this->catName); xfer += oprot->writeFieldEnd(); } + if (this->__isset.txnId) { + xfer += oprot->writeFieldBegin("txnId", ::apache::thrift::protocol::T_I64, 7); + xfer += oprot->writeI64(this->txnId); + xfer += oprot->writeFieldEnd(); + } + if (this->__isset.validWriteIdList) { + xfer += oprot->writeFieldBegin("validWriteIdList", ::apache::thrift::protocol::T_STRING, 8); + xfer += oprot->writeString(this->validWriteIdList); + xfer += oprot->writeFieldEnd(); + } + if (this->__isset.isStatsCompliant) { + xfer += oprot->writeFieldBegin("isStatsCompliant", ::apache::thrift::protocol::T_I32, 9); + xfer += oprot->writeI32((int32_t)this->isStatsCompliant); + xfer += oprot->writeFieldEnd(); + } xfer += oprot->writeFieldStop(); xfer += oprot->writeStructEnd(); return xfer; @@ -7752,26 +7956,35 @@ void swap(PartitionSpec &a, PartitionSpec &b) { swap(a.sharedSDPartitionSpec, b.sharedSDPartitionSpec); swap(a.partitionList, b.partitionList); swap(a.catName, b.catName); + swap(a.txnId, b.txnId); + swap(a.validWriteIdList, b.validWriteIdList); + swap(a.isStatsCompliant, b.isStatsCompliant); swap(a.__isset, b.__isset); } -PartitionSpec::PartitionSpec(const PartitionSpec& other301) { - dbName = other301.dbName; - tableName = other301.tableName; - rootPath = other301.rootPath; - sharedSDPartitionSpec = other301.sharedSDPartitionSpec; - partitionList = other301.partitionList; - catName = other301.catName; - __isset = other301.__isset; +PartitionSpec::PartitionSpec(const PartitionSpec& other304) { + dbName = other304.dbName; + tableName = other304.tableName; + rootPath = other304.rootPath; + sharedSDPartitionSpec = other304.sharedSDPartitionSpec; + partitionList = other304.partitionList; + catName = other304.catName; + txnId = other304.txnId; + validWriteIdList = other304.validWriteIdList; + isStatsCompliant = other304.isStatsCompliant; + __isset = other304.__isset; } -PartitionSpec& PartitionSpec::operator=(const PartitionSpec& other302) { - dbName = other302.dbName; - tableName = other302.tableName; - rootPath = other302.rootPath; - sharedSDPartitionSpec = other302.sharedSDPartitionSpec; - partitionList = other302.partitionList; - catName = other302.catName; - __isset = other302.__isset; +PartitionSpec& PartitionSpec::operator=(const PartitionSpec& other305) { + dbName = other305.dbName; + tableName = other305.tableName; + rootPath = other305.rootPath; + sharedSDPartitionSpec = other305.sharedSDPartitionSpec; + partitionList = other305.partitionList; + catName = other305.catName; + txnId = other305.txnId; + validWriteIdList = other305.validWriteIdList; + isStatsCompliant = other305.isStatsCompliant; + __isset = other305.__isset; return *this; } void PartitionSpec::printTo(std::ostream& out) const { @@ -7783,6 +7996,9 @@ void PartitionSpec::printTo(std::ostream& out) const { out << ", " << "sharedSDPartitionSpec="; (__isset.sharedSDPartitionSpec ? (out << to_string(sharedSDPartitionSpec)) : (out << "")); out << ", " << "partitionList="; (__isset.partitionList ? (out << to_string(partitionList)) : (out << "")); out << ", " << "catName="; (__isset.catName ? (out << to_string(catName)) : (out << "")); + out << ", " << "txnId="; (__isset.txnId ? (out << to_string(txnId)) : (out << "")); + out << ", " << "validWriteIdList="; (__isset.validWriteIdList ? (out << to_string(validWriteIdList)) : (out << "")); + out << ", " << "isStatsCompliant="; (__isset.isStatsCompliant ? (out << to_string(isStatsCompliant)) : (out << "")); out << ")"; } @@ -7918,19 +8134,19 @@ void swap(BooleanColumnStatsData &a, BooleanColumnStatsData &b) { swap(a.__isset, b.__isset); } -BooleanColumnStatsData::BooleanColumnStatsData(const BooleanColumnStatsData& other303) { - numTrues = other303.numTrues; - numFalses = other303.numFalses; - numNulls = other303.numNulls; - bitVectors = other303.bitVectors; - __isset = other303.__isset; +BooleanColumnStatsData::BooleanColumnStatsData(const BooleanColumnStatsData& other306) { + numTrues = other306.numTrues; + numFalses = other306.numFalses; + numNulls = other306.numNulls; + bitVectors = other306.bitVectors; + __isset = other306.__isset; } -BooleanColumnStatsData& BooleanColumnStatsData::operator=(const BooleanColumnStatsData& other304) { - numTrues = other304.numTrues; - numFalses = other304.numFalses; - numNulls = other304.numNulls; - bitVectors = other304.bitVectors; - __isset = other304.__isset; +BooleanColumnStatsData& BooleanColumnStatsData::operator=(const BooleanColumnStatsData& other307) { + numTrues = other307.numTrues; + numFalses = other307.numFalses; + numNulls = other307.numNulls; + bitVectors = other307.bitVectors; + __isset = other307.__isset; return *this; } void BooleanColumnStatsData::printTo(std::ostream& out) const { @@ -8093,21 +8309,21 @@ void swap(DoubleColumnStatsData &a, DoubleColumnStatsData &b) { swap(a.__isset, b.__isset); } -DoubleColumnStatsData::DoubleColumnStatsData(const DoubleColumnStatsData& other305) { - lowValue = other305.lowValue; - highValue = other305.highValue; - numNulls = other305.numNulls; - numDVs = other305.numDVs; - bitVectors = other305.bitVectors; - __isset = other305.__isset; +DoubleColumnStatsData::DoubleColumnStatsData(const DoubleColumnStatsData& other308) { + lowValue = other308.lowValue; + highValue = other308.highValue; + numNulls = other308.numNulls; + numDVs = other308.numDVs; + bitVectors = other308.bitVectors; + __isset = other308.__isset; } -DoubleColumnStatsData& DoubleColumnStatsData::operator=(const DoubleColumnStatsData& other306) { - lowValue = other306.lowValue; - highValue = other306.highValue; - numNulls = other306.numNulls; - numDVs = other306.numDVs; - bitVectors = other306.bitVectors; - __isset = other306.__isset; +DoubleColumnStatsData& DoubleColumnStatsData::operator=(const DoubleColumnStatsData& other309) { + lowValue = other309.lowValue; + highValue = other309.highValue; + numNulls = other309.numNulls; + numDVs = other309.numDVs; + bitVectors = other309.bitVectors; + __isset = other309.__isset; return *this; } void DoubleColumnStatsData::printTo(std::ostream& out) const { @@ -8271,21 +8487,21 @@ void swap(LongColumnStatsData &a, LongColumnStatsData &b) { swap(a.__isset, b.__isset); } -LongColumnStatsData::LongColumnStatsData(const LongColumnStatsData& other307) { - lowValue = other307.lowValue; - highValue = other307.highValue; - numNulls = other307.numNulls; - numDVs = other307.numDVs; - bitVectors = other307.bitVectors; - __isset = other307.__isset; +LongColumnStatsData::LongColumnStatsData(const LongColumnStatsData& other310) { + lowValue = other310.lowValue; + highValue = other310.highValue; + numNulls = other310.numNulls; + numDVs = other310.numDVs; + bitVectors = other310.bitVectors; + __isset = other310.__isset; } -LongColumnStatsData& LongColumnStatsData::operator=(const LongColumnStatsData& other308) { - lowValue = other308.lowValue; - highValue = other308.highValue; - numNulls = other308.numNulls; - numDVs = other308.numDVs; - bitVectors = other308.bitVectors; - __isset = other308.__isset; +LongColumnStatsData& LongColumnStatsData::operator=(const LongColumnStatsData& other311) { + lowValue = other311.lowValue; + highValue = other311.highValue; + numNulls = other311.numNulls; + numDVs = other311.numDVs; + bitVectors = other311.bitVectors; + __isset = other311.__isset; return *this; } void LongColumnStatsData::printTo(std::ostream& out) const { @@ -8451,21 +8667,21 @@ void swap(StringColumnStatsData &a, StringColumnStatsData &b) { swap(a.__isset, b.__isset); } -StringColumnStatsData::StringColumnStatsData(const StringColumnStatsData& other309) { - maxColLen = other309.maxColLen; - avgColLen = other309.avgColLen; - numNulls = other309.numNulls; - numDVs = other309.numDVs; - bitVectors = other309.bitVectors; - __isset = other309.__isset; +StringColumnStatsData::StringColumnStatsData(const StringColumnStatsData& other312) { + maxColLen = other312.maxColLen; + avgColLen = other312.avgColLen; + numNulls = other312.numNulls; + numDVs = other312.numDVs; + bitVectors = other312.bitVectors; + __isset = other312.__isset; } -StringColumnStatsData& StringColumnStatsData::operator=(const StringColumnStatsData& other310) { - maxColLen = other310.maxColLen; - avgColLen = other310.avgColLen; - numNulls = other310.numNulls; - numDVs = other310.numDVs; - bitVectors = other310.bitVectors; - __isset = other310.__isset; +StringColumnStatsData& StringColumnStatsData::operator=(const StringColumnStatsData& other313) { + maxColLen = other313.maxColLen; + avgColLen = other313.avgColLen; + numNulls = other313.numNulls; + numDVs = other313.numDVs; + bitVectors = other313.bitVectors; + __isset = other313.__isset; return *this; } void StringColumnStatsData::printTo(std::ostream& out) const { @@ -8611,19 +8827,19 @@ void swap(BinaryColumnStatsData &a, BinaryColumnStatsData &b) { swap(a.__isset, b.__isset); } -BinaryColumnStatsData::BinaryColumnStatsData(const BinaryColumnStatsData& other311) { - maxColLen = other311.maxColLen; - avgColLen = other311.avgColLen; - numNulls = other311.numNulls; - bitVectors = other311.bitVectors; - __isset = other311.__isset; +BinaryColumnStatsData::BinaryColumnStatsData(const BinaryColumnStatsData& other314) { + maxColLen = other314.maxColLen; + avgColLen = other314.avgColLen; + numNulls = other314.numNulls; + bitVectors = other314.bitVectors; + __isset = other314.__isset; } -BinaryColumnStatsData& BinaryColumnStatsData::operator=(const BinaryColumnStatsData& other312) { - maxColLen = other312.maxColLen; - avgColLen = other312.avgColLen; - numNulls = other312.numNulls; - bitVectors = other312.bitVectors; - __isset = other312.__isset; +BinaryColumnStatsData& BinaryColumnStatsData::operator=(const BinaryColumnStatsData& other315) { + maxColLen = other315.maxColLen; + avgColLen = other315.avgColLen; + numNulls = other315.numNulls; + bitVectors = other315.bitVectors; + __isset = other315.__isset; return *this; } void BinaryColumnStatsData::printTo(std::ostream& out) const { @@ -8728,13 +8944,13 @@ void swap(Decimal &a, Decimal &b) { swap(a.unscaled, b.unscaled); } -Decimal::Decimal(const Decimal& other313) { - scale = other313.scale; - unscaled = other313.unscaled; +Decimal::Decimal(const Decimal& other316) { + scale = other316.scale; + unscaled = other316.unscaled; } -Decimal& Decimal::operator=(const Decimal& other314) { - scale = other314.scale; - unscaled = other314.unscaled; +Decimal& Decimal::operator=(const Decimal& other317) { + scale = other317.scale; + unscaled = other317.unscaled; return *this; } void Decimal::printTo(std::ostream& out) const { @@ -8895,21 +9111,21 @@ void swap(DecimalColumnStatsData &a, DecimalColumnStatsData &b) { swap(a.__isset, b.__isset); } -DecimalColumnStatsData::DecimalColumnStatsData(const DecimalColumnStatsData& other315) { - lowValue = other315.lowValue; - highValue = other315.highValue; - numNulls = other315.numNulls; - numDVs = other315.numDVs; - bitVectors = other315.bitVectors; - __isset = other315.__isset; +DecimalColumnStatsData::DecimalColumnStatsData(const DecimalColumnStatsData& other318) { + lowValue = other318.lowValue; + highValue = other318.highValue; + numNulls = other318.numNulls; + numDVs = other318.numDVs; + bitVectors = other318.bitVectors; + __isset = other318.__isset; } -DecimalColumnStatsData& DecimalColumnStatsData::operator=(const DecimalColumnStatsData& other316) { - lowValue = other316.lowValue; - highValue = other316.highValue; - numNulls = other316.numNulls; - numDVs = other316.numDVs; - bitVectors = other316.bitVectors; - __isset = other316.__isset; +DecimalColumnStatsData& DecimalColumnStatsData::operator=(const DecimalColumnStatsData& other319) { + lowValue = other319.lowValue; + highValue = other319.highValue; + numNulls = other319.numNulls; + numDVs = other319.numDVs; + bitVectors = other319.bitVectors; + __isset = other319.__isset; return *this; } void DecimalColumnStatsData::printTo(std::ostream& out) const { @@ -8995,11 +9211,11 @@ void swap(Date &a, Date &b) { swap(a.daysSinceEpoch, b.daysSinceEpoch); } -Date::Date(const Date& other317) { - daysSinceEpoch = other317.daysSinceEpoch; +Date::Date(const Date& other320) { + daysSinceEpoch = other320.daysSinceEpoch; } -Date& Date::operator=(const Date& other318) { - daysSinceEpoch = other318.daysSinceEpoch; +Date& Date::operator=(const Date& other321) { + daysSinceEpoch = other321.daysSinceEpoch; return *this; } void Date::printTo(std::ostream& out) const { @@ -9159,21 +9375,21 @@ void swap(DateColumnStatsData &a, DateColumnStatsData &b) { swap(a.__isset, b.__isset); } -DateColumnStatsData::DateColumnStatsData(const DateColumnStatsData& other319) { - lowValue = other319.lowValue; - highValue = other319.highValue; - numNulls = other319.numNulls; - numDVs = other319.numDVs; - bitVectors = other319.bitVectors; - __isset = other319.__isset; +DateColumnStatsData::DateColumnStatsData(const DateColumnStatsData& other322) { + lowValue = other322.lowValue; + highValue = other322.highValue; + numNulls = other322.numNulls; + numDVs = other322.numDVs; + bitVectors = other322.bitVectors; + __isset = other322.__isset; } -DateColumnStatsData& DateColumnStatsData::operator=(const DateColumnStatsData& other320) { - lowValue = other320.lowValue; - highValue = other320.highValue; - numNulls = other320.numNulls; - numDVs = other320.numDVs; - bitVectors = other320.bitVectors; - __isset = other320.__isset; +DateColumnStatsData& DateColumnStatsData::operator=(const DateColumnStatsData& other323) { + lowValue = other323.lowValue; + highValue = other323.highValue; + numNulls = other323.numNulls; + numDVs = other323.numDVs; + bitVectors = other323.bitVectors; + __isset = other323.__isset; return *this; } void DateColumnStatsData::printTo(std::ostream& out) const { @@ -9359,25 +9575,25 @@ void swap(ColumnStatisticsData &a, ColumnStatisticsData &b) { swap(a.__isset, b.__isset); } -ColumnStatisticsData::ColumnStatisticsData(const ColumnStatisticsData& other321) { - booleanStats = other321.booleanStats; - longStats = other321.longStats; - doubleStats = other321.doubleStats; - stringStats = other321.stringStats; - binaryStats = other321.binaryStats; - decimalStats = other321.decimalStats; - dateStats = other321.dateStats; - __isset = other321.__isset; -} -ColumnStatisticsData& ColumnStatisticsData::operator=(const ColumnStatisticsData& other322) { - booleanStats = other322.booleanStats; - longStats = other322.longStats; - doubleStats = other322.doubleStats; - stringStats = other322.stringStats; - binaryStats = other322.binaryStats; - decimalStats = other322.decimalStats; - dateStats = other322.dateStats; - __isset = other322.__isset; +ColumnStatisticsData::ColumnStatisticsData(const ColumnStatisticsData& other324) { + booleanStats = other324.booleanStats; + longStats = other324.longStats; + doubleStats = other324.doubleStats; + stringStats = other324.stringStats; + binaryStats = other324.binaryStats; + decimalStats = other324.decimalStats; + dateStats = other324.dateStats; + __isset = other324.__isset; +} +ColumnStatisticsData& ColumnStatisticsData::operator=(const ColumnStatisticsData& other325) { + booleanStats = other325.booleanStats; + longStats = other325.longStats; + doubleStats = other325.doubleStats; + stringStats = other325.stringStats; + binaryStats = other325.binaryStats; + decimalStats = other325.decimalStats; + dateStats = other325.dateStats; + __isset = other325.__isset; return *this; } void ColumnStatisticsData::printTo(std::ostream& out) const { @@ -9505,15 +9721,15 @@ void swap(ColumnStatisticsObj &a, ColumnStatisticsObj &b) { swap(a.statsData, b.statsData); } -ColumnStatisticsObj::ColumnStatisticsObj(const ColumnStatisticsObj& other323) { - colName = other323.colName; - colType = other323.colType; - statsData = other323.statsData; +ColumnStatisticsObj::ColumnStatisticsObj(const ColumnStatisticsObj& other326) { + colName = other326.colName; + colType = other326.colType; + statsData = other326.statsData; } -ColumnStatisticsObj& ColumnStatisticsObj::operator=(const ColumnStatisticsObj& other324) { - colName = other324.colName; - colType = other324.colType; - statsData = other324.statsData; +ColumnStatisticsObj& ColumnStatisticsObj::operator=(const ColumnStatisticsObj& other327) { + colName = other327.colName; + colType = other327.colType; + statsData = other327.statsData; return *this; } void ColumnStatisticsObj::printTo(std::ostream& out) const { @@ -9695,23 +9911,23 @@ void swap(ColumnStatisticsDesc &a, ColumnStatisticsDesc &b) { swap(a.__isset, b.__isset); } -ColumnStatisticsDesc::ColumnStatisticsDesc(const ColumnStatisticsDesc& other325) { - isTblLevel = other325.isTblLevel; - dbName = other325.dbName; - tableName = other325.tableName; - partName = other325.partName; - lastAnalyzed = other325.lastAnalyzed; - catName = other325.catName; - __isset = other325.__isset; -} -ColumnStatisticsDesc& ColumnStatisticsDesc::operator=(const ColumnStatisticsDesc& other326) { - isTblLevel = other326.isTblLevel; - dbName = other326.dbName; - tableName = other326.tableName; - partName = other326.partName; - lastAnalyzed = other326.lastAnalyzed; - catName = other326.catName; - __isset = other326.__isset; +ColumnStatisticsDesc::ColumnStatisticsDesc(const ColumnStatisticsDesc& other328) { + isTblLevel = other328.isTblLevel; + dbName = other328.dbName; + tableName = other328.tableName; + partName = other328.partName; + lastAnalyzed = other328.lastAnalyzed; + catName = other328.catName; + __isset = other328.__isset; +} +ColumnStatisticsDesc& ColumnStatisticsDesc::operator=(const ColumnStatisticsDesc& other329) { + isTblLevel = other329.isTblLevel; + dbName = other329.dbName; + tableName = other329.tableName; + partName = other329.partName; + lastAnalyzed = other329.lastAnalyzed; + catName = other329.catName; + __isset = other329.__isset; return *this; } void ColumnStatisticsDesc::printTo(std::ostream& out) const { @@ -9739,6 +9955,21 @@ void ColumnStatistics::__set_statsObj(const std::vector & v this->statsObj = val; } +void ColumnStatistics::__set_txnId(const int64_t val) { + this->txnId = val; +__isset.txnId = true; +} + +void ColumnStatistics::__set_validWriteIdList(const std::string& val) { + this->validWriteIdList = val; +__isset.validWriteIdList = true; +} + +void ColumnStatistics::__set_isStatsCompliant(const IsolationLevelCompliance::type val) { + this->isStatsCompliant = val; +__isset.isStatsCompliant = true; +} + uint32_t ColumnStatistics::read(::apache::thrift::protocol::TProtocol* iprot) { apache::thrift::protocol::TInputRecursionTracker tracker(*iprot); @@ -9774,14 +10005,14 @@ uint32_t ColumnStatistics::read(::apache::thrift::protocol::TProtocol* iprot) { if (ftype == ::apache::thrift::protocol::T_LIST) { { this->statsObj.clear(); - uint32_t _size327; - ::apache::thrift::protocol::TType _etype330; - xfer += iprot->readListBegin(_etype330, _size327); - this->statsObj.resize(_size327); - uint32_t _i331; - for (_i331 = 0; _i331 < _size327; ++_i331) + uint32_t _size330; + ::apache::thrift::protocol::TType _etype333; + xfer += iprot->readListBegin(_etype333, _size330); + this->statsObj.resize(_size330); + uint32_t _i334; + for (_i334 = 0; _i334 < _size330; ++_i334) { - xfer += this->statsObj[_i331].read(iprot); + xfer += this->statsObj[_i334].read(iprot); } xfer += iprot->readListEnd(); } @@ -9790,6 +10021,32 @@ uint32_t ColumnStatistics::read(::apache::thrift::protocol::TProtocol* iprot) { xfer += iprot->skip(ftype); } break; + case 3: + if (ftype == ::apache::thrift::protocol::T_I64) { + xfer += iprot->readI64(this->txnId); + this->__isset.txnId = true; + } else { + xfer += iprot->skip(ftype); + } + break; + case 4: + if (ftype == ::apache::thrift::protocol::T_STRING) { + xfer += iprot->readString(this->validWriteIdList); + this->__isset.validWriteIdList = true; + } else { + xfer += iprot->skip(ftype); + } + break; + case 5: + if (ftype == ::apache::thrift::protocol::T_I32) { + int32_t ecast335; + xfer += iprot->readI32(ecast335); + this->isStatsCompliant = (IsolationLevelCompliance::type)ecast335; + this->__isset.isStatsCompliant = true; + } else { + xfer += iprot->skip(ftype); + } + break; default: xfer += iprot->skip(ftype); break; @@ -9818,15 +10075,30 @@ uint32_t ColumnStatistics::write(::apache::thrift::protocol::TProtocol* oprot) c xfer += oprot->writeFieldBegin("statsObj", ::apache::thrift::protocol::T_LIST, 2); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast(this->statsObj.size())); - std::vector ::const_iterator _iter332; - for (_iter332 = this->statsObj.begin(); _iter332 != this->statsObj.end(); ++_iter332) + std::vector ::const_iterator _iter336; + for (_iter336 = this->statsObj.begin(); _iter336 != this->statsObj.end(); ++_iter336) { - xfer += (*_iter332).write(oprot); + xfer += (*_iter336).write(oprot); } xfer += oprot->writeListEnd(); } xfer += oprot->writeFieldEnd(); + if (this->__isset.txnId) { + xfer += oprot->writeFieldBegin("txnId", ::apache::thrift::protocol::T_I64, 3); + xfer += oprot->writeI64(this->txnId); + xfer += oprot->writeFieldEnd(); + } + if (this->__isset.validWriteIdList) { + xfer += oprot->writeFieldBegin("validWriteIdList", ::apache::thrift::protocol::T_STRING, 4); + xfer += oprot->writeString(this->validWriteIdList); + xfer += oprot->writeFieldEnd(); + } + if (this->__isset.isStatsCompliant) { + xfer += oprot->writeFieldBegin("isStatsCompliant", ::apache::thrift::protocol::T_I32, 5); + xfer += oprot->writeI32((int32_t)this->isStatsCompliant); + xfer += oprot->writeFieldEnd(); + } xfer += oprot->writeFieldStop(); xfer += oprot->writeStructEnd(); return xfer; @@ -9836,15 +10108,27 @@ void swap(ColumnStatistics &a, ColumnStatistics &b) { using ::std::swap; swap(a.statsDesc, b.statsDesc); swap(a.statsObj, b.statsObj); + swap(a.txnId, b.txnId); + swap(a.validWriteIdList, b.validWriteIdList); + swap(a.isStatsCompliant, b.isStatsCompliant); + swap(a.__isset, b.__isset); } -ColumnStatistics::ColumnStatistics(const ColumnStatistics& other333) { - statsDesc = other333.statsDesc; - statsObj = other333.statsObj; -} -ColumnStatistics& ColumnStatistics::operator=(const ColumnStatistics& other334) { - statsDesc = other334.statsDesc; - statsObj = other334.statsObj; +ColumnStatistics::ColumnStatistics(const ColumnStatistics& other337) { + statsDesc = other337.statsDesc; + statsObj = other337.statsObj; + txnId = other337.txnId; + validWriteIdList = other337.validWriteIdList; + isStatsCompliant = other337.isStatsCompliant; + __isset = other337.__isset; +} +ColumnStatistics& ColumnStatistics::operator=(const ColumnStatistics& other338) { + statsDesc = other338.statsDesc; + statsObj = other338.statsObj; + txnId = other338.txnId; + validWriteIdList = other338.validWriteIdList; + isStatsCompliant = other338.isStatsCompliant; + __isset = other338.__isset; return *this; } void ColumnStatistics::printTo(std::ostream& out) const { @@ -9852,6 +10136,9 @@ void ColumnStatistics::printTo(std::ostream& out) const { out << "ColumnStatistics("; out << "statsDesc=" << to_string(statsDesc); out << ", " << "statsObj=" << to_string(statsObj); + out << ", " << "txnId="; (__isset.txnId ? (out << to_string(txnId)) : (out << "")); + out << ", " << "validWriteIdList="; (__isset.validWriteIdList ? (out << to_string(validWriteIdList)) : (out << "")); + out << ", " << "isStatsCompliant="; (__isset.isStatsCompliant ? (out << to_string(isStatsCompliant)) : (out << "")); out << ")"; } @@ -9868,6 +10155,11 @@ void AggrStats::__set_partsFound(const int64_t val) { this->partsFound = val; } +void AggrStats::__set_isStatsCompliant(const IsolationLevelCompliance::type val) { + this->isStatsCompliant = val; +__isset.isStatsCompliant = true; +} + uint32_t AggrStats::read(::apache::thrift::protocol::TProtocol* iprot) { apache::thrift::protocol::TInputRecursionTracker tracker(*iprot); @@ -9895,14 +10187,14 @@ uint32_t AggrStats::read(::apache::thrift::protocol::TProtocol* iprot) { if (ftype == ::apache::thrift::protocol::T_LIST) { { this->colStats.clear(); - uint32_t _size335; - ::apache::thrift::protocol::TType _etype338; - xfer += iprot->readListBegin(_etype338, _size335); - this->colStats.resize(_size335); - uint32_t _i339; - for (_i339 = 0; _i339 < _size335; ++_i339) + uint32_t _size339; + ::apache::thrift::protocol::TType _etype342; + xfer += iprot->readListBegin(_etype342, _size339); + this->colStats.resize(_size339); + uint32_t _i343; + for (_i343 = 0; _i343 < _size339; ++_i343) { - xfer += this->colStats[_i339].read(iprot); + xfer += this->colStats[_i343].read(iprot); } xfer += iprot->readListEnd(); } @@ -9919,6 +10211,16 @@ uint32_t AggrStats::read(::apache::thrift::protocol::TProtocol* iprot) { xfer += iprot->skip(ftype); } break; + case 3: + if (ftype == ::apache::thrift::protocol::T_I32) { + int32_t ecast344; + xfer += iprot->readI32(ecast344); + this->isStatsCompliant = (IsolationLevelCompliance::type)ecast344; + this->__isset.isStatsCompliant = true; + } else { + xfer += iprot->skip(ftype); + } + break; default: xfer += iprot->skip(ftype); break; @@ -9943,10 +10245,10 @@ uint32_t AggrStats::write(::apache::thrift::protocol::TProtocol* oprot) const { xfer += oprot->writeFieldBegin("colStats", ::apache::thrift::protocol::T_LIST, 1); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast(this->colStats.size())); - std::vector ::const_iterator _iter340; - for (_iter340 = this->colStats.begin(); _iter340 != this->colStats.end(); ++_iter340) + std::vector ::const_iterator _iter345; + for (_iter345 = this->colStats.begin(); _iter345 != this->colStats.end(); ++_iter345) { - xfer += (*_iter340).write(oprot); + xfer += (*_iter345).write(oprot); } xfer += oprot->writeListEnd(); } @@ -9956,6 +10258,11 @@ uint32_t AggrStats::write(::apache::thrift::protocol::TProtocol* oprot) const { xfer += oprot->writeI64(this->partsFound); xfer += oprot->writeFieldEnd(); + if (this->__isset.isStatsCompliant) { + xfer += oprot->writeFieldBegin("isStatsCompliant", ::apache::thrift::protocol::T_I32, 3); + xfer += oprot->writeI32((int32_t)this->isStatsCompliant); + xfer += oprot->writeFieldEnd(); + } xfer += oprot->writeFieldStop(); xfer += oprot->writeStructEnd(); return xfer; @@ -9965,15 +10272,21 @@ void swap(AggrStats &a, AggrStats &b) { using ::std::swap; swap(a.colStats, b.colStats); swap(a.partsFound, b.partsFound); + swap(a.isStatsCompliant, b.isStatsCompliant); + swap(a.__isset, b.__isset); } -AggrStats::AggrStats(const AggrStats& other341) { - colStats = other341.colStats; - partsFound = other341.partsFound; +AggrStats::AggrStats(const AggrStats& other346) { + colStats = other346.colStats; + partsFound = other346.partsFound; + isStatsCompliant = other346.isStatsCompliant; + __isset = other346.__isset; } -AggrStats& AggrStats::operator=(const AggrStats& other342) { - colStats = other342.colStats; - partsFound = other342.partsFound; +AggrStats& AggrStats::operator=(const AggrStats& other347) { + colStats = other347.colStats; + partsFound = other347.partsFound; + isStatsCompliant = other347.isStatsCompliant; + __isset = other347.__isset; return *this; } void AggrStats::printTo(std::ostream& out) const { @@ -9981,6 +10294,7 @@ void AggrStats::printTo(std::ostream& out) const { out << "AggrStats("; out << "colStats=" << to_string(colStats); out << ", " << "partsFound=" << to_string(partsFound); + out << ", " << "isStatsCompliant="; (__isset.isStatsCompliant ? (out << to_string(isStatsCompliant)) : (out << "")); out << ")"; } @@ -9998,6 +10312,16 @@ void SetPartitionsStatsRequest::__set_needMerge(const bool val) { __isset.needMerge = true; } +void SetPartitionsStatsRequest::__set_txnId(const int64_t val) { + this->txnId = val; +__isset.txnId = true; +} + +void SetPartitionsStatsRequest::__set_validWriteIdList(const std::string& val) { + this->validWriteIdList = val; +__isset.validWriteIdList = true; +} + uint32_t SetPartitionsStatsRequest::read(::apache::thrift::protocol::TProtocol* iprot) { apache::thrift::protocol::TInputRecursionTracker tracker(*iprot); @@ -10024,14 +10348,14 @@ uint32_t SetPartitionsStatsRequest::read(::apache::thrift::protocol::TProtocol* if (ftype == ::apache::thrift::protocol::T_LIST) { { this->colStats.clear(); - uint32_t _size343; - ::apache::thrift::protocol::TType _etype346; - xfer += iprot->readListBegin(_etype346, _size343); - this->colStats.resize(_size343); - uint32_t _i347; - for (_i347 = 0; _i347 < _size343; ++_i347) + uint32_t _size348; + ::apache::thrift::protocol::TType _etype351; + xfer += iprot->readListBegin(_etype351, _size348); + this->colStats.resize(_size348); + uint32_t _i352; + for (_i352 = 0; _i352 < _size348; ++_i352) { - xfer += this->colStats[_i347].read(iprot); + xfer += this->colStats[_i352].read(iprot); } xfer += iprot->readListEnd(); } @@ -10048,6 +10372,22 @@ uint32_t SetPartitionsStatsRequest::read(::apache::thrift::protocol::TProtocol* xfer += iprot->skip(ftype); } break; + case 3: + if (ftype == ::apache::thrift::protocol::T_I64) { + xfer += iprot->readI64(this->txnId); + this->__isset.txnId = true; + } else { + xfer += iprot->skip(ftype); + } + break; + case 4: + if (ftype == ::apache::thrift::protocol::T_STRING) { + xfer += iprot->readString(this->validWriteIdList); + this->__isset.validWriteIdList = true; + } else { + xfer += iprot->skip(ftype); + } + break; default: xfer += iprot->skip(ftype); break; @@ -10070,10 +10410,10 @@ uint32_t SetPartitionsStatsRequest::write(::apache::thrift::protocol::TProtocol* xfer += oprot->writeFieldBegin("colStats", ::apache::thrift::protocol::T_LIST, 1); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast(this->colStats.size())); - std::vector ::const_iterator _iter348; - for (_iter348 = this->colStats.begin(); _iter348 != this->colStats.end(); ++_iter348) + std::vector ::const_iterator _iter353; + for (_iter353 = this->colStats.begin(); _iter353 != this->colStats.end(); ++_iter353) { - xfer += (*_iter348).write(oprot); + xfer += (*_iter353).write(oprot); } xfer += oprot->writeListEnd(); } @@ -10084,6 +10424,16 @@ uint32_t SetPartitionsStatsRequest::write(::apache::thrift::protocol::TProtocol* xfer += oprot->writeBool(this->needMerge); xfer += oprot->writeFieldEnd(); } + if (this->__isset.txnId) { + xfer += oprot->writeFieldBegin("txnId", ::apache::thrift::protocol::T_I64, 3); + xfer += oprot->writeI64(this->txnId); + xfer += oprot->writeFieldEnd(); + } + if (this->__isset.validWriteIdList) { + xfer += oprot->writeFieldBegin("validWriteIdList", ::apache::thrift::protocol::T_STRING, 4); + xfer += oprot->writeString(this->validWriteIdList); + xfer += oprot->writeFieldEnd(); + } xfer += oprot->writeFieldStop(); xfer += oprot->writeStructEnd(); return xfer; @@ -10093,18 +10443,24 @@ void swap(SetPartitionsStatsRequest &a, SetPartitionsStatsRequest &b) { using ::std::swap; swap(a.colStats, b.colStats); swap(a.needMerge, b.needMerge); + swap(a.txnId, b.txnId); + swap(a.validWriteIdList, b.validWriteIdList); swap(a.__isset, b.__isset); } -SetPartitionsStatsRequest::SetPartitionsStatsRequest(const SetPartitionsStatsRequest& other349) { - colStats = other349.colStats; - needMerge = other349.needMerge; - __isset = other349.__isset; +SetPartitionsStatsRequest::SetPartitionsStatsRequest(const SetPartitionsStatsRequest& other354) { + colStats = other354.colStats; + needMerge = other354.needMerge; + txnId = other354.txnId; + validWriteIdList = other354.validWriteIdList; + __isset = other354.__isset; } -SetPartitionsStatsRequest& SetPartitionsStatsRequest::operator=(const SetPartitionsStatsRequest& other350) { - colStats = other350.colStats; - needMerge = other350.needMerge; - __isset = other350.__isset; +SetPartitionsStatsRequest& SetPartitionsStatsRequest::operator=(const SetPartitionsStatsRequest& other355) { + colStats = other355.colStats; + needMerge = other355.needMerge; + txnId = other355.txnId; + validWriteIdList = other355.validWriteIdList; + __isset = other355.__isset; return *this; } void SetPartitionsStatsRequest::printTo(std::ostream& out) const { @@ -10112,6 +10468,8 @@ void SetPartitionsStatsRequest::printTo(std::ostream& out) const { out << "SetPartitionsStatsRequest("; out << "colStats=" << to_string(colStats); out << ", " << "needMerge="; (__isset.needMerge ? (out << to_string(needMerge)) : (out << "")); + out << ", " << "txnId="; (__isset.txnId ? (out << to_string(txnId)) : (out << "")); + out << ", " << "validWriteIdList="; (__isset.validWriteIdList ? (out << to_string(validWriteIdList)) : (out << "")); out << ")"; } @@ -10153,14 +10511,14 @@ uint32_t Schema::read(::apache::thrift::protocol::TProtocol* iprot) { if (ftype == ::apache::thrift::protocol::T_LIST) { { this->fieldSchemas.clear(); - uint32_t _size351; - ::apache::thrift::protocol::TType _etype354; - xfer += iprot->readListBegin(_etype354, _size351); - this->fieldSchemas.resize(_size351); - uint32_t _i355; - for (_i355 = 0; _i355 < _size351; ++_i355) + uint32_t _size356; + ::apache::thrift::protocol::TType _etype359; + xfer += iprot->readListBegin(_etype359, _size356); + this->fieldSchemas.resize(_size356); + uint32_t _i360; + for (_i360 = 0; _i360 < _size356; ++_i360) { - xfer += this->fieldSchemas[_i355].read(iprot); + xfer += this->fieldSchemas[_i360].read(iprot); } xfer += iprot->readListEnd(); } @@ -10173,17 +10531,17 @@ uint32_t Schema::read(::apache::thrift::protocol::TProtocol* iprot) { if (ftype == ::apache::thrift::protocol::T_MAP) { { this->properties.clear(); - uint32_t _size356; - ::apache::thrift::protocol::TType _ktype357; - ::apache::thrift::protocol::TType _vtype358; - xfer += iprot->readMapBegin(_ktype357, _vtype358, _size356); - uint32_t _i360; - for (_i360 = 0; _i360 < _size356; ++_i360) + uint32_t _size361; + ::apache::thrift::protocol::TType _ktype362; + ::apache::thrift::protocol::TType _vtype363; + xfer += iprot->readMapBegin(_ktype362, _vtype363, _size361); + uint32_t _i365; + for (_i365 = 0; _i365 < _size361; ++_i365) { - std::string _key361; - xfer += iprot->readString(_key361); - std::string& _val362 = this->properties[_key361]; - xfer += iprot->readString(_val362); + std::string _key366; + xfer += iprot->readString(_key366); + std::string& _val367 = this->properties[_key366]; + xfer += iprot->readString(_val367); } xfer += iprot->readMapEnd(); } @@ -10212,10 +10570,10 @@ uint32_t Schema::write(::apache::thrift::protocol::TProtocol* oprot) const { xfer += oprot->writeFieldBegin("fieldSchemas", ::apache::thrift::protocol::T_LIST, 1); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast(this->fieldSchemas.size())); - std::vector ::const_iterator _iter363; - for (_iter363 = this->fieldSchemas.begin(); _iter363 != this->fieldSchemas.end(); ++_iter363) + std::vector ::const_iterator _iter368; + for (_iter368 = this->fieldSchemas.begin(); _iter368 != this->fieldSchemas.end(); ++_iter368) { - xfer += (*_iter363).write(oprot); + xfer += (*_iter368).write(oprot); } xfer += oprot->writeListEnd(); } @@ -10224,11 +10582,11 @@ uint32_t Schema::write(::apache::thrift::protocol::TProtocol* oprot) const { xfer += oprot->writeFieldBegin("properties", ::apache::thrift::protocol::T_MAP, 2); { xfer += oprot->writeMapBegin(::apache::thrift::protocol::T_STRING, ::apache::thrift::protocol::T_STRING, static_cast(this->properties.size())); - std::map ::const_iterator _iter364; - for (_iter364 = this->properties.begin(); _iter364 != this->properties.end(); ++_iter364) + std::map ::const_iterator _iter369; + for (_iter369 = this->properties.begin(); _iter369 != this->properties.end(); ++_iter369) { - xfer += oprot->writeString(_iter364->first); - xfer += oprot->writeString(_iter364->second); + xfer += oprot->writeString(_iter369->first); + xfer += oprot->writeString(_iter369->second); } xfer += oprot->writeMapEnd(); } @@ -10246,15 +10604,15 @@ void swap(Schema &a, Schema &b) { swap(a.__isset, b.__isset); } -Schema::Schema(const Schema& other365) { - fieldSchemas = other365.fieldSchemas; - properties = other365.properties; - __isset = other365.__isset; +Schema::Schema(const Schema& other370) { + fieldSchemas = other370.fieldSchemas; + properties = other370.properties; + __isset = other370.__isset; } -Schema& Schema::operator=(const Schema& other366) { - fieldSchemas = other366.fieldSchemas; - properties = other366.properties; - __isset = other366.__isset; +Schema& Schema::operator=(const Schema& other371) { + fieldSchemas = other371.fieldSchemas; + properties = other371.properties; + __isset = other371.__isset; return *this; } void Schema::printTo(std::ostream& out) const { @@ -10299,17 +10657,17 @@ uint32_t EnvironmentContext::read(::apache::thrift::protocol::TProtocol* iprot) if (ftype == ::apache::thrift::protocol::T_MAP) { { this->properties.clear(); - uint32_t _size367; - ::apache::thrift::protocol::TType _ktype368; - ::apache::thrift::protocol::TType _vtype369; - xfer += iprot->readMapBegin(_ktype368, _vtype369, _size367); - uint32_t _i371; - for (_i371 = 0; _i371 < _size367; ++_i371) + uint32_t _size372; + ::apache::thrift::protocol::TType _ktype373; + ::apache::thrift::protocol::TType _vtype374; + xfer += iprot->readMapBegin(_ktype373, _vtype374, _size372); + uint32_t _i376; + for (_i376 = 0; _i376 < _size372; ++_i376) { - std::string _key372; - xfer += iprot->readString(_key372); - std::string& _val373 = this->properties[_key372]; - xfer += iprot->readString(_val373); + std::string _key377; + xfer += iprot->readString(_key377); + std::string& _val378 = this->properties[_key377]; + xfer += iprot->readString(_val378); } xfer += iprot->readMapEnd(); } @@ -10338,11 +10696,11 @@ uint32_t EnvironmentContext::write(::apache::thrift::protocol::TProtocol* oprot) xfer += oprot->writeFieldBegin("properties", ::apache::thrift::protocol::T_MAP, 1); { xfer += oprot->writeMapBegin(::apache::thrift::protocol::T_STRING, ::apache::thrift::protocol::T_STRING, static_cast(this->properties.size())); - std::map ::const_iterator _iter374; - for (_iter374 = this->properties.begin(); _iter374 != this->properties.end(); ++_iter374) + std::map ::const_iterator _iter379; + for (_iter379 = this->properties.begin(); _iter379 != this->properties.end(); ++_iter379) { - xfer += oprot->writeString(_iter374->first); - xfer += oprot->writeString(_iter374->second); + xfer += oprot->writeString(_iter379->first); + xfer += oprot->writeString(_iter379->second); } xfer += oprot->writeMapEnd(); } @@ -10359,13 +10717,13 @@ void swap(EnvironmentContext &a, EnvironmentContext &b) { swap(a.__isset, b.__isset); } -EnvironmentContext::EnvironmentContext(const EnvironmentContext& other375) { - properties = other375.properties; - __isset = other375.__isset; +EnvironmentContext::EnvironmentContext(const EnvironmentContext& other380) { + properties = other380.properties; + __isset = other380.__isset; } -EnvironmentContext& EnvironmentContext::operator=(const EnvironmentContext& other376) { - properties = other376.properties; - __isset = other376.__isset; +EnvironmentContext& EnvironmentContext::operator=(const EnvironmentContext& other381) { + properties = other381.properties; + __isset = other381.__isset; return *this; } void EnvironmentContext::printTo(std::ostream& out) const { @@ -10487,17 +10845,17 @@ void swap(PrimaryKeysRequest &a, PrimaryKeysRequest &b) { swap(a.__isset, b.__isset); } -PrimaryKeysRequest::PrimaryKeysRequest(const PrimaryKeysRequest& other377) { - db_name = other377.db_name; - tbl_name = other377.tbl_name; - catName = other377.catName; - __isset = other377.__isset; +PrimaryKeysRequest::PrimaryKeysRequest(const PrimaryKeysRequest& other382) { + db_name = other382.db_name; + tbl_name = other382.tbl_name; + catName = other382.catName; + __isset = other382.__isset; } -PrimaryKeysRequest& PrimaryKeysRequest::operator=(const PrimaryKeysRequest& other378) { - db_name = other378.db_name; - tbl_name = other378.tbl_name; - catName = other378.catName; - __isset = other378.__isset; +PrimaryKeysRequest& PrimaryKeysRequest::operator=(const PrimaryKeysRequest& other383) { + db_name = other383.db_name; + tbl_name = other383.tbl_name; + catName = other383.catName; + __isset = other383.__isset; return *this; } void PrimaryKeysRequest::printTo(std::ostream& out) const { @@ -10544,14 +10902,14 @@ uint32_t PrimaryKeysResponse::read(::apache::thrift::protocol::TProtocol* iprot) if (ftype == ::apache::thrift::protocol::T_LIST) { { this->primaryKeys.clear(); - uint32_t _size379; - ::apache::thrift::protocol::TType _etype382; - xfer += iprot->readListBegin(_etype382, _size379); - this->primaryKeys.resize(_size379); - uint32_t _i383; - for (_i383 = 0; _i383 < _size379; ++_i383) + uint32_t _size384; + ::apache::thrift::protocol::TType _etype387; + xfer += iprot->readListBegin(_etype387, _size384); + this->primaryKeys.resize(_size384); + uint32_t _i388; + for (_i388 = 0; _i388 < _size384; ++_i388) { - xfer += this->primaryKeys[_i383].read(iprot); + xfer += this->primaryKeys[_i388].read(iprot); } xfer += iprot->readListEnd(); } @@ -10582,10 +10940,10 @@ uint32_t PrimaryKeysResponse::write(::apache::thrift::protocol::TProtocol* oprot xfer += oprot->writeFieldBegin("primaryKeys", ::apache::thrift::protocol::T_LIST, 1); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast(this->primaryKeys.size())); - std::vector ::const_iterator _iter384; - for (_iter384 = this->primaryKeys.begin(); _iter384 != this->primaryKeys.end(); ++_iter384) + std::vector ::const_iterator _iter389; + for (_iter389 = this->primaryKeys.begin(); _iter389 != this->primaryKeys.end(); ++_iter389) { - xfer += (*_iter384).write(oprot); + xfer += (*_iter389).write(oprot); } xfer += oprot->writeListEnd(); } @@ -10601,11 +10959,11 @@ void swap(PrimaryKeysResponse &a, PrimaryKeysResponse &b) { swap(a.primaryKeys, b.primaryKeys); } -PrimaryKeysResponse::PrimaryKeysResponse(const PrimaryKeysResponse& other385) { - primaryKeys = other385.primaryKeys; +PrimaryKeysResponse::PrimaryKeysResponse(const PrimaryKeysResponse& other390) { + primaryKeys = other390.primaryKeys; } -PrimaryKeysResponse& PrimaryKeysResponse::operator=(const PrimaryKeysResponse& other386) { - primaryKeys = other386.primaryKeys; +PrimaryKeysResponse& PrimaryKeysResponse::operator=(const PrimaryKeysResponse& other391) { + primaryKeys = other391.primaryKeys; return *this; } void PrimaryKeysResponse::printTo(std::ostream& out) const { @@ -10755,21 +11113,21 @@ void swap(ForeignKeysRequest &a, ForeignKeysRequest &b) { swap(a.__isset, b.__isset); } -ForeignKeysRequest::ForeignKeysRequest(const ForeignKeysRequest& other387) { - parent_db_name = other387.parent_db_name; - parent_tbl_name = other387.parent_tbl_name; - foreign_db_name = other387.foreign_db_name; - foreign_tbl_name = other387.foreign_tbl_name; - catName = other387.catName; - __isset = other387.__isset; -} -ForeignKeysRequest& ForeignKeysRequest::operator=(const ForeignKeysRequest& other388) { - parent_db_name = other388.parent_db_name; - parent_tbl_name = other388.parent_tbl_name; - foreign_db_name = other388.foreign_db_name; - foreign_tbl_name = other388.foreign_tbl_name; - catName = other388.catName; - __isset = other388.__isset; +ForeignKeysRequest::ForeignKeysRequest(const ForeignKeysRequest& other392) { + parent_db_name = other392.parent_db_name; + parent_tbl_name = other392.parent_tbl_name; + foreign_db_name = other392.foreign_db_name; + foreign_tbl_name = other392.foreign_tbl_name; + catName = other392.catName; + __isset = other392.__isset; +} +ForeignKeysRequest& ForeignKeysRequest::operator=(const ForeignKeysRequest& other393) { + parent_db_name = other393.parent_db_name; + parent_tbl_name = other393.parent_tbl_name; + foreign_db_name = other393.foreign_db_name; + foreign_tbl_name = other393.foreign_tbl_name; + catName = other393.catName; + __isset = other393.__isset; return *this; } void ForeignKeysRequest::printTo(std::ostream& out) const { @@ -10818,14 +11176,14 @@ uint32_t ForeignKeysResponse::read(::apache::thrift::protocol::TProtocol* iprot) if (ftype == ::apache::thrift::protocol::T_LIST) { { this->foreignKeys.clear(); - uint32_t _size389; - ::apache::thrift::protocol::TType _etype392; - xfer += iprot->readListBegin(_etype392, _size389); - this->foreignKeys.resize(_size389); - uint32_t _i393; - for (_i393 = 0; _i393 < _size389; ++_i393) + uint32_t _size394; + ::apache::thrift::protocol::TType _etype397; + xfer += iprot->readListBegin(_etype397, _size394); + this->foreignKeys.resize(_size394); + uint32_t _i398; + for (_i398 = 0; _i398 < _size394; ++_i398) { - xfer += this->foreignKeys[_i393].read(iprot); + xfer += this->foreignKeys[_i398].read(iprot); } xfer += iprot->readListEnd(); } @@ -10856,10 +11214,10 @@ uint32_t ForeignKeysResponse::write(::apache::thrift::protocol::TProtocol* oprot xfer += oprot->writeFieldBegin("foreignKeys", ::apache::thrift::protocol::T_LIST, 1); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast(this->foreignKeys.size())); - std::vector ::const_iterator _iter394; - for (_iter394 = this->foreignKeys.begin(); _iter394 != this->foreignKeys.end(); ++_iter394) + std::vector ::const_iterator _iter399; + for (_iter399 = this->foreignKeys.begin(); _iter399 != this->foreignKeys.end(); ++_iter399) { - xfer += (*_iter394).write(oprot); + xfer += (*_iter399).write(oprot); } xfer += oprot->writeListEnd(); } @@ -10875,11 +11233,11 @@ void swap(ForeignKeysResponse &a, ForeignKeysResponse &b) { swap(a.foreignKeys, b.foreignKeys); } -ForeignKeysResponse::ForeignKeysResponse(const ForeignKeysResponse& other395) { - foreignKeys = other395.foreignKeys; +ForeignKeysResponse::ForeignKeysResponse(const ForeignKeysResponse& other400) { + foreignKeys = other400.foreignKeys; } -ForeignKeysResponse& ForeignKeysResponse::operator=(const ForeignKeysResponse& other396) { - foreignKeys = other396.foreignKeys; +ForeignKeysResponse& ForeignKeysResponse::operator=(const ForeignKeysResponse& other401) { + foreignKeys = other401.foreignKeys; return *this; } void ForeignKeysResponse::printTo(std::ostream& out) const { @@ -11001,15 +11359,15 @@ void swap(UniqueConstraintsRequest &a, UniqueConstraintsRequest &b) { swap(a.tbl_name, b.tbl_name); } -UniqueConstraintsRequest::UniqueConstraintsRequest(const UniqueConstraintsRequest& other397) { - catName = other397.catName; - db_name = other397.db_name; - tbl_name = other397.tbl_name; +UniqueConstraintsRequest::UniqueConstraintsRequest(const UniqueConstraintsRequest& other402) { + catName = other402.catName; + db_name = other402.db_name; + tbl_name = other402.tbl_name; } -UniqueConstraintsRequest& UniqueConstraintsRequest::operator=(const UniqueConstraintsRequest& other398) { - catName = other398.catName; - db_name = other398.db_name; - tbl_name = other398.tbl_name; +UniqueConstraintsRequest& UniqueConstraintsRequest::operator=(const UniqueConstraintsRequest& other403) { + catName = other403.catName; + db_name = other403.db_name; + tbl_name = other403.tbl_name; return *this; } void UniqueConstraintsRequest::printTo(std::ostream& out) const { @@ -11056,14 +11414,14 @@ uint32_t UniqueConstraintsResponse::read(::apache::thrift::protocol::TProtocol* if (ftype == ::apache::thrift::protocol::T_LIST) { { this->uniqueConstraints.clear(); - uint32_t _size399; - ::apache::thrift::protocol::TType _etype402; - xfer += iprot->readListBegin(_etype402, _size399); - this->uniqueConstraints.resize(_size399); - uint32_t _i403; - for (_i403 = 0; _i403 < _size399; ++_i403) + uint32_t _size404; + ::apache::thrift::protocol::TType _etype407; + xfer += iprot->readListBegin(_etype407, _size404); + this->uniqueConstraints.resize(_size404); + uint32_t _i408; + for (_i408 = 0; _i408 < _size404; ++_i408) { - xfer += this->uniqueConstraints[_i403].read(iprot); + xfer += this->uniqueConstraints[_i408].read(iprot); } xfer += iprot->readListEnd(); } @@ -11094,10 +11452,10 @@ uint32_t UniqueConstraintsResponse::write(::apache::thrift::protocol::TProtocol* xfer += oprot->writeFieldBegin("uniqueConstraints", ::apache::thrift::protocol::T_LIST, 1); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast(this->uniqueConstraints.size())); - std::vector ::const_iterator _iter404; - for (_iter404 = this->uniqueConstraints.begin(); _iter404 != this->uniqueConstraints.end(); ++_iter404) + std::vector ::const_iterator _iter409; + for (_iter409 = this->uniqueConstraints.begin(); _iter409 != this->uniqueConstraints.end(); ++_iter409) { - xfer += (*_iter404).write(oprot); + xfer += (*_iter409).write(oprot); } xfer += oprot->writeListEnd(); } @@ -11113,11 +11471,11 @@ void swap(UniqueConstraintsResponse &a, UniqueConstraintsResponse &b) { swap(a.uniqueConstraints, b.uniqueConstraints); } -UniqueConstraintsResponse::UniqueConstraintsResponse(const UniqueConstraintsResponse& other405) { - uniqueConstraints = other405.uniqueConstraints; +UniqueConstraintsResponse::UniqueConstraintsResponse(const UniqueConstraintsResponse& other410) { + uniqueConstraints = other410.uniqueConstraints; } -UniqueConstraintsResponse& UniqueConstraintsResponse::operator=(const UniqueConstraintsResponse& other406) { - uniqueConstraints = other406.uniqueConstraints; +UniqueConstraintsResponse& UniqueConstraintsResponse::operator=(const UniqueConstraintsResponse& other411) { + uniqueConstraints = other411.uniqueConstraints; return *this; } void UniqueConstraintsResponse::printTo(std::ostream& out) const { @@ -11239,15 +11597,15 @@ void swap(NotNullConstraintsRequest &a, NotNullConstraintsRequest &b) { swap(a.tbl_name, b.tbl_name); } -NotNullConstraintsRequest::NotNullConstraintsRequest(const NotNullConstraintsRequest& other407) { - catName = other407.catName; - db_name = other407.db_name; - tbl_name = other407.tbl_name; +NotNullConstraintsRequest::NotNullConstraintsRequest(const NotNullConstraintsRequest& other412) { + catName = other412.catName; + db_name = other412.db_name; + tbl_name = other412.tbl_name; } -NotNullConstraintsRequest& NotNullConstraintsRequest::operator=(const NotNullConstraintsRequest& other408) { - catName = other408.catName; - db_name = other408.db_name; - tbl_name = other408.tbl_name; +NotNullConstraintsRequest& NotNullConstraintsRequest::operator=(const NotNullConstraintsRequest& other413) { + catName = other413.catName; + db_name = other413.db_name; + tbl_name = other413.tbl_name; return *this; } void NotNullConstraintsRequest::printTo(std::ostream& out) const { @@ -11294,14 +11652,14 @@ uint32_t NotNullConstraintsResponse::read(::apache::thrift::protocol::TProtocol* if (ftype == ::apache::thrift::protocol::T_LIST) { { this->notNullConstraints.clear(); - uint32_t _size409; - ::apache::thrift::protocol::TType _etype412; - xfer += iprot->readListBegin(_etype412, _size409); - this->notNullConstraints.resize(_size409); - uint32_t _i413; - for (_i413 = 0; _i413 < _size409; ++_i413) + uint32_t _size414; + ::apache::thrift::protocol::TType _etype417; + xfer += iprot->readListBegin(_etype417, _size414); + this->notNullConstraints.resize(_size414); + uint32_t _i418; + for (_i418 = 0; _i418 < _size414; ++_i418) { - xfer += this->notNullConstraints[_i413].read(iprot); + xfer += this->notNullConstraints[_i418].read(iprot); } xfer += iprot->readListEnd(); } @@ -11332,10 +11690,10 @@ uint32_t NotNullConstraintsResponse::write(::apache::thrift::protocol::TProtocol xfer += oprot->writeFieldBegin("notNullConstraints", ::apache::thrift::protocol::T_LIST, 1); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast(this->notNullConstraints.size())); - std::vector ::const_iterator _iter414; - for (_iter414 = this->notNullConstraints.begin(); _iter414 != this->notNullConstraints.end(); ++_iter414) + std::vector ::const_iterator _iter419; + for (_iter419 = this->notNullConstraints.begin(); _iter419 != this->notNullConstraints.end(); ++_iter419) { - xfer += (*_iter414).write(oprot); + xfer += (*_iter419).write(oprot); } xfer += oprot->writeListEnd(); } @@ -11351,11 +11709,11 @@ void swap(NotNullConstraintsResponse &a, NotNullConstraintsResponse &b) { swap(a.notNullConstraints, b.notNullConstraints); } -NotNullConstraintsResponse::NotNullConstraintsResponse(const NotNullConstraintsResponse& other415) { - notNullConstraints = other415.notNullConstraints; +NotNullConstraintsResponse::NotNullConstraintsResponse(const NotNullConstraintsResponse& other420) { + notNullConstraints = other420.notNullConstraints; } -NotNullConstraintsResponse& NotNullConstraintsResponse::operator=(const NotNullConstraintsResponse& other416) { - notNullConstraints = other416.notNullConstraints; +NotNullConstraintsResponse& NotNullConstraintsResponse::operator=(const NotNullConstraintsResponse& other421) { + notNullConstraints = other421.notNullConstraints; return *this; } void NotNullConstraintsResponse::printTo(std::ostream& out) const { @@ -11477,15 +11835,15 @@ void swap(DefaultConstraintsRequest &a, DefaultConstraintsRequest &b) { swap(a.tbl_name, b.tbl_name); } -DefaultConstraintsRequest::DefaultConstraintsRequest(const DefaultConstraintsRequest& other417) { - catName = other417.catName; - db_name = other417.db_name; - tbl_name = other417.tbl_name; +DefaultConstraintsRequest::DefaultConstraintsRequest(const DefaultConstraintsRequest& other422) { + catName = other422.catName; + db_name = other422.db_name; + tbl_name = other422.tbl_name; } -DefaultConstraintsRequest& DefaultConstraintsRequest::operator=(const DefaultConstraintsRequest& other418) { - catName = other418.catName; - db_name = other418.db_name; - tbl_name = other418.tbl_name; +DefaultConstraintsRequest& DefaultConstraintsRequest::operator=(const DefaultConstraintsRequest& other423) { + catName = other423.catName; + db_name = other423.db_name; + tbl_name = other423.tbl_name; return *this; } void DefaultConstraintsRequest::printTo(std::ostream& out) const { @@ -11532,14 +11890,14 @@ uint32_t DefaultConstraintsResponse::read(::apache::thrift::protocol::TProtocol* if (ftype == ::apache::thrift::protocol::T_LIST) { { this->defaultConstraints.clear(); - uint32_t _size419; - ::apache::thrift::protocol::TType _etype422; - xfer += iprot->readListBegin(_etype422, _size419); - this->defaultConstraints.resize(_size419); - uint32_t _i423; - for (_i423 = 0; _i423 < _size419; ++_i423) + uint32_t _size424; + ::apache::thrift::protocol::TType _etype427; + xfer += iprot->readListBegin(_etype427, _size424); + this->defaultConstraints.resize(_size424); + uint32_t _i428; + for (_i428 = 0; _i428 < _size424; ++_i428) { - xfer += this->defaultConstraints[_i423].read(iprot); + xfer += this->defaultConstraints[_i428].read(iprot); } xfer += iprot->readListEnd(); } @@ -11570,10 +11928,10 @@ uint32_t DefaultConstraintsResponse::write(::apache::thrift::protocol::TProtocol xfer += oprot->writeFieldBegin("defaultConstraints", ::apache::thrift::protocol::T_LIST, 1); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast(this->defaultConstraints.size())); - std::vector ::const_iterator _iter424; - for (_iter424 = this->defaultConstraints.begin(); _iter424 != this->defaultConstraints.end(); ++_iter424) + std::vector ::const_iterator _iter429; + for (_iter429 = this->defaultConstraints.begin(); _iter429 != this->defaultConstraints.end(); ++_iter429) { - xfer += (*_iter424).write(oprot); + xfer += (*_iter429).write(oprot); } xfer += oprot->writeListEnd(); } @@ -11589,11 +11947,11 @@ void swap(DefaultConstraintsResponse &a, DefaultConstraintsResponse &b) { swap(a.defaultConstraints, b.defaultConstraints); } -DefaultConstraintsResponse::DefaultConstraintsResponse(const DefaultConstraintsResponse& other425) { - defaultConstraints = other425.defaultConstraints; +DefaultConstraintsResponse::DefaultConstraintsResponse(const DefaultConstraintsResponse& other430) { + defaultConstraints = other430.defaultConstraints; } -DefaultConstraintsResponse& DefaultConstraintsResponse::operator=(const DefaultConstraintsResponse& other426) { - defaultConstraints = other426.defaultConstraints; +DefaultConstraintsResponse& DefaultConstraintsResponse::operator=(const DefaultConstraintsResponse& other431) { + defaultConstraints = other431.defaultConstraints; return *this; } void DefaultConstraintsResponse::printTo(std::ostream& out) const { @@ -11715,15 +12073,15 @@ void swap(CheckConstraintsRequest &a, CheckConstraintsRequest &b) { swap(a.tbl_name, b.tbl_name); } -CheckConstraintsRequest::CheckConstraintsRequest(const CheckConstraintsRequest& other427) { - catName = other427.catName; - db_name = other427.db_name; - tbl_name = other427.tbl_name; +CheckConstraintsRequest::CheckConstraintsRequest(const CheckConstraintsRequest& other432) { + catName = other432.catName; + db_name = other432.db_name; + tbl_name = other432.tbl_name; } -CheckConstraintsRequest& CheckConstraintsRequest::operator=(const CheckConstraintsRequest& other428) { - catName = other428.catName; - db_name = other428.db_name; - tbl_name = other428.tbl_name; +CheckConstraintsRequest& CheckConstraintsRequest::operator=(const CheckConstraintsRequest& other433) { + catName = other433.catName; + db_name = other433.db_name; + tbl_name = other433.tbl_name; return *this; } void CheckConstraintsRequest::printTo(std::ostream& out) const { @@ -11770,14 +12128,14 @@ uint32_t CheckConstraintsResponse::read(::apache::thrift::protocol::TProtocol* i if (ftype == ::apache::thrift::protocol::T_LIST) { { this->checkConstraints.clear(); - uint32_t _size429; - ::apache::thrift::protocol::TType _etype432; - xfer += iprot->readListBegin(_etype432, _size429); - this->checkConstraints.resize(_size429); - uint32_t _i433; - for (_i433 = 0; _i433 < _size429; ++_i433) + uint32_t _size434; + ::apache::thrift::protocol::TType _etype437; + xfer += iprot->readListBegin(_etype437, _size434); + this->checkConstraints.resize(_size434); + uint32_t _i438; + for (_i438 = 0; _i438 < _size434; ++_i438) { - xfer += this->checkConstraints[_i433].read(iprot); + xfer += this->checkConstraints[_i438].read(iprot); } xfer += iprot->readListEnd(); } @@ -11808,10 +12166,10 @@ uint32_t CheckConstraintsResponse::write(::apache::thrift::protocol::TProtocol* xfer += oprot->writeFieldBegin("checkConstraints", ::apache::thrift::protocol::T_LIST, 1); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast(this->checkConstraints.size())); - std::vector ::const_iterator _iter434; - for (_iter434 = this->checkConstraints.begin(); _iter434 != this->checkConstraints.end(); ++_iter434) + std::vector ::const_iterator _iter439; + for (_iter439 = this->checkConstraints.begin(); _iter439 != this->checkConstraints.end(); ++_iter439) { - xfer += (*_iter434).write(oprot); + xfer += (*_iter439).write(oprot); } xfer += oprot->writeListEnd(); } @@ -11827,11 +12185,11 @@ void swap(CheckConstraintsResponse &a, CheckConstraintsResponse &b) { swap(a.checkConstraints, b.checkConstraints); } -CheckConstraintsResponse::CheckConstraintsResponse(const CheckConstraintsResponse& other435) { - checkConstraints = other435.checkConstraints; +CheckConstraintsResponse::CheckConstraintsResponse(const CheckConstraintsResponse& other440) { + checkConstraints = other440.checkConstraints; } -CheckConstraintsResponse& CheckConstraintsResponse::operator=(const CheckConstraintsResponse& other436) { - checkConstraints = other436.checkConstraints; +CheckConstraintsResponse& CheckConstraintsResponse::operator=(const CheckConstraintsResponse& other441) { + checkConstraints = other441.checkConstraints; return *this; } void CheckConstraintsResponse::printTo(std::ostream& out) const { @@ -11973,19 +12331,19 @@ void swap(DropConstraintRequest &a, DropConstraintRequest &b) { swap(a.__isset, b.__isset); } -DropConstraintRequest::DropConstraintRequest(const DropConstraintRequest& other437) { - dbname = other437.dbname; - tablename = other437.tablename; - constraintname = other437.constraintname; - catName = other437.catName; - __isset = other437.__isset; +DropConstraintRequest::DropConstraintRequest(const DropConstraintRequest& other442) { + dbname = other442.dbname; + tablename = other442.tablename; + constraintname = other442.constraintname; + catName = other442.catName; + __isset = other442.__isset; } -DropConstraintRequest& DropConstraintRequest::operator=(const DropConstraintRequest& other438) { - dbname = other438.dbname; - tablename = other438.tablename; - constraintname = other438.constraintname; - catName = other438.catName; - __isset = other438.__isset; +DropConstraintRequest& DropConstraintRequest::operator=(const DropConstraintRequest& other443) { + dbname = other443.dbname; + tablename = other443.tablename; + constraintname = other443.constraintname; + catName = other443.catName; + __isset = other443.__isset; return *this; } void DropConstraintRequest::printTo(std::ostream& out) const { @@ -12033,14 +12391,14 @@ uint32_t AddPrimaryKeyRequest::read(::apache::thrift::protocol::TProtocol* iprot if (ftype == ::apache::thrift::protocol::T_LIST) { { this->primaryKeyCols.clear(); - uint32_t _size439; - ::apache::thrift::protocol::TType _etype442; - xfer += iprot->readListBegin(_etype442, _size439); - this->primaryKeyCols.resize(_size439); - uint32_t _i443; - for (_i443 = 0; _i443 < _size439; ++_i443) + uint32_t _size444; + ::apache::thrift::protocol::TType _etype447; + xfer += iprot->readListBegin(_etype447, _size444); + this->primaryKeyCols.resize(_size444); + uint32_t _i448; + for (_i448 = 0; _i448 < _size444; ++_i448) { - xfer += this->primaryKeyCols[_i443].read(iprot); + xfer += this->primaryKeyCols[_i448].read(iprot); } xfer += iprot->readListEnd(); } @@ -12071,10 +12429,10 @@ uint32_t AddPrimaryKeyRequest::write(::apache::thrift::protocol::TProtocol* opro xfer += oprot->writeFieldBegin("primaryKeyCols", ::apache::thrift::protocol::T_LIST, 1); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast(this->primaryKeyCols.size())); - std::vector ::const_iterator _iter444; - for (_iter444 = this->primaryKeyCols.begin(); _iter444 != this->primaryKeyCols.end(); ++_iter444) + std::vector ::const_iterator _iter449; + for (_iter449 = this->primaryKeyCols.begin(); _iter449 != this->primaryKeyCols.end(); ++_iter449) { - xfer += (*_iter444).write(oprot); + xfer += (*_iter449).write(oprot); } xfer += oprot->writeListEnd(); } @@ -12090,11 +12448,11 @@ void swap(AddPrimaryKeyRequest &a, AddPrimaryKeyRequest &b) { swap(a.primaryKeyCols, b.primaryKeyCols); } -AddPrimaryKeyRequest::AddPrimaryKeyRequest(const AddPrimaryKeyRequest& other445) { - primaryKeyCols = other445.primaryKeyCols; +AddPrimaryKeyRequest::AddPrimaryKeyRequest(const AddPrimaryKeyRequest& other450) { + primaryKeyCols = other450.primaryKeyCols; } -AddPrimaryKeyRequest& AddPrimaryKeyRequest::operator=(const AddPrimaryKeyRequest& other446) { - primaryKeyCols = other446.primaryKeyCols; +AddPrimaryKeyRequest& AddPrimaryKeyRequest::operator=(const AddPrimaryKeyRequest& other451) { + primaryKeyCols = other451.primaryKeyCols; return *this; } void AddPrimaryKeyRequest::printTo(std::ostream& out) const { @@ -12139,14 +12497,14 @@ uint32_t AddForeignKeyRequest::read(::apache::thrift::protocol::TProtocol* iprot if (ftype == ::apache::thrift::protocol::T_LIST) { { this->foreignKeyCols.clear(); - uint32_t _size447; - ::apache::thrift::protocol::TType _etype450; - xfer += iprot->readListBegin(_etype450, _size447); - this->foreignKeyCols.resize(_size447); - uint32_t _i451; - for (_i451 = 0; _i451 < _size447; ++_i451) + uint32_t _size452; + ::apache::thrift::protocol::TType _etype455; + xfer += iprot->readListBegin(_etype455, _size452); + this->foreignKeyCols.resize(_size452); + uint32_t _i456; + for (_i456 = 0; _i456 < _size452; ++_i456) { - xfer += this->foreignKeyCols[_i451].read(iprot); + xfer += this->foreignKeyCols[_i456].read(iprot); } xfer += iprot->readListEnd(); } @@ -12177,10 +12535,10 @@ uint32_t AddForeignKeyRequest::write(::apache::thrift::protocol::TProtocol* opro xfer += oprot->writeFieldBegin("foreignKeyCols", ::apache::thrift::protocol::T_LIST, 1); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast(this->foreignKeyCols.size())); - std::vector ::const_iterator _iter452; - for (_iter452 = this->foreignKeyCols.begin(); _iter452 != this->foreignKeyCols.end(); ++_iter452) + std::vector ::const_iterator _iter457; + for (_iter457 = this->foreignKeyCols.begin(); _iter457 != this->foreignKeyCols.end(); ++_iter457) { - xfer += (*_iter452).write(oprot); + xfer += (*_iter457).write(oprot); } xfer += oprot->writeListEnd(); } @@ -12196,11 +12554,11 @@ void swap(AddForeignKeyRequest &a, AddForeignKeyRequest &b) { swap(a.foreignKeyCols, b.foreignKeyCols); } -AddForeignKeyRequest::AddForeignKeyRequest(const AddForeignKeyRequest& other453) { - foreignKeyCols = other453.foreignKeyCols; +AddForeignKeyRequest::AddForeignKeyRequest(const AddForeignKeyRequest& other458) { + foreignKeyCols = other458.foreignKeyCols; } -AddForeignKeyRequest& AddForeignKeyRequest::operator=(const AddForeignKeyRequest& other454) { - foreignKeyCols = other454.foreignKeyCols; +AddForeignKeyRequest& AddForeignKeyRequest::operator=(const AddForeignKeyRequest& other459) { + foreignKeyCols = other459.foreignKeyCols; return *this; } void AddForeignKeyRequest::printTo(std::ostream& out) const { @@ -12245,14 +12603,14 @@ uint32_t AddUniqueConstraintRequest::read(::apache::thrift::protocol::TProtocol* if (ftype == ::apache::thrift::protocol::T_LIST) { { this->uniqueConstraintCols.clear(); - uint32_t _size455; - ::apache::thrift::protocol::TType _etype458; - xfer += iprot->readListBegin(_etype458, _size455); - this->uniqueConstraintCols.resize(_size455); - uint32_t _i459; - for (_i459 = 0; _i459 < _size455; ++_i459) + uint32_t _size460; + ::apache::thrift::protocol::TType _etype463; + xfer += iprot->readListBegin(_etype463, _size460); + this->uniqueConstraintCols.resize(_size460); + uint32_t _i464; + for (_i464 = 0; _i464 < _size460; ++_i464) { - xfer += this->uniqueConstraintCols[_i459].read(iprot); + xfer += this->uniqueConstraintCols[_i464].read(iprot); } xfer += iprot->readListEnd(); } @@ -12283,10 +12641,10 @@ uint32_t AddUniqueConstraintRequest::write(::apache::thrift::protocol::TProtocol xfer += oprot->writeFieldBegin("uniqueConstraintCols", ::apache::thrift::protocol::T_LIST, 1); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast(this->uniqueConstraintCols.size())); - std::vector ::const_iterator _iter460; - for (_iter460 = this->uniqueConstraintCols.begin(); _iter460 != this->uniqueConstraintCols.end(); ++_iter460) + std::vector ::const_iterator _iter465; + for (_iter465 = this->uniqueConstraintCols.begin(); _iter465 != this->uniqueConstraintCols.end(); ++_iter465) { - xfer += (*_iter460).write(oprot); + xfer += (*_iter465).write(oprot); } xfer += oprot->writeListEnd(); } @@ -12302,11 +12660,11 @@ void swap(AddUniqueConstraintRequest &a, AddUniqueConstraintRequest &b) { swap(a.uniqueConstraintCols, b.uniqueConstraintCols); } -AddUniqueConstraintRequest::AddUniqueConstraintRequest(const AddUniqueConstraintRequest& other461) { - uniqueConstraintCols = other461.uniqueConstraintCols; +AddUniqueConstraintRequest::AddUniqueConstraintRequest(const AddUniqueConstraintRequest& other466) { + uniqueConstraintCols = other466.uniqueConstraintCols; } -AddUniqueConstraintRequest& AddUniqueConstraintRequest::operator=(const AddUniqueConstraintRequest& other462) { - uniqueConstraintCols = other462.uniqueConstraintCols; +AddUniqueConstraintRequest& AddUniqueConstraintRequest::operator=(const AddUniqueConstraintRequest& other467) { + uniqueConstraintCols = other467.uniqueConstraintCols; return *this; } void AddUniqueConstraintRequest::printTo(std::ostream& out) const { @@ -12351,14 +12709,14 @@ uint32_t AddNotNullConstraintRequest::read(::apache::thrift::protocol::TProtocol if (ftype == ::apache::thrift::protocol::T_LIST) { { this->notNullConstraintCols.clear(); - uint32_t _size463; - ::apache::thrift::protocol::TType _etype466; - xfer += iprot->readListBegin(_etype466, _size463); - this->notNullConstraintCols.resize(_size463); - uint32_t _i467; - for (_i467 = 0; _i467 < _size463; ++_i467) + uint32_t _size468; + ::apache::thrift::protocol::TType _etype471; + xfer += iprot->readListBegin(_etype471, _size468); + this->notNullConstraintCols.resize(_size468); + uint32_t _i472; + for (_i472 = 0; _i472 < _size468; ++_i472) { - xfer += this->notNullConstraintCols[_i467].read(iprot); + xfer += this->notNullConstraintCols[_i472].read(iprot); } xfer += iprot->readListEnd(); } @@ -12389,10 +12747,10 @@ uint32_t AddNotNullConstraintRequest::write(::apache::thrift::protocol::TProtoco xfer += oprot->writeFieldBegin("notNullConstraintCols", ::apache::thrift::protocol::T_LIST, 1); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast(this->notNullConstraintCols.size())); - std::vector ::const_iterator _iter468; - for (_iter468 = this->notNullConstraintCols.begin(); _iter468 != this->notNullConstraintCols.end(); ++_iter468) + std::vector ::const_iterator _iter473; + for (_iter473 = this->notNullConstraintCols.begin(); _iter473 != this->notNullConstraintCols.end(); ++_iter473) { - xfer += (*_iter468).write(oprot); + xfer += (*_iter473).write(oprot); } xfer += oprot->writeListEnd(); } @@ -12408,11 +12766,11 @@ void swap(AddNotNullConstraintRequest &a, AddNotNullConstraintRequest &b) { swap(a.notNullConstraintCols, b.notNullConstraintCols); } -AddNotNullConstraintRequest::AddNotNullConstraintRequest(const AddNotNullConstraintRequest& other469) { - notNullConstraintCols = other469.notNullConstraintCols; +AddNotNullConstraintRequest::AddNotNullConstraintRequest(const AddNotNullConstraintRequest& other474) { + notNullConstraintCols = other474.notNullConstraintCols; } -AddNotNullConstraintRequest& AddNotNullConstraintRequest::operator=(const AddNotNullConstraintRequest& other470) { - notNullConstraintCols = other470.notNullConstraintCols; +AddNotNullConstraintRequest& AddNotNullConstraintRequest::operator=(const AddNotNullConstraintRequest& other475) { + notNullConstraintCols = other475.notNullConstraintCols; return *this; } void AddNotNullConstraintRequest::printTo(std::ostream& out) const { @@ -12457,14 +12815,14 @@ uint32_t AddDefaultConstraintRequest::read(::apache::thrift::protocol::TProtocol if (ftype == ::apache::thrift::protocol::T_LIST) { { this->defaultConstraintCols.clear(); - uint32_t _size471; - ::apache::thrift::protocol::TType _etype474; - xfer += iprot->readListBegin(_etype474, _size471); - this->defaultConstraintCols.resize(_size471); - uint32_t _i475; - for (_i475 = 0; _i475 < _size471; ++_i475) + uint32_t _size476; + ::apache::thrift::protocol::TType _etype479; + xfer += iprot->readListBegin(_etype479, _size476); + this->defaultConstraintCols.resize(_size476); + uint32_t _i480; + for (_i480 = 0; _i480 < _size476; ++_i480) { - xfer += this->defaultConstraintCols[_i475].read(iprot); + xfer += this->defaultConstraintCols[_i480].read(iprot); } xfer += iprot->readListEnd(); } @@ -12495,10 +12853,10 @@ uint32_t AddDefaultConstraintRequest::write(::apache::thrift::protocol::TProtoco xfer += oprot->writeFieldBegin("defaultConstraintCols", ::apache::thrift::protocol::T_LIST, 1); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast(this->defaultConstraintCols.size())); - std::vector ::const_iterator _iter476; - for (_iter476 = this->defaultConstraintCols.begin(); _iter476 != this->defaultConstraintCols.end(); ++_iter476) + std::vector ::const_iterator _iter481; + for (_iter481 = this->defaultConstraintCols.begin(); _iter481 != this->defaultConstraintCols.end(); ++_iter481) { - xfer += (*_iter476).write(oprot); + xfer += (*_iter481).write(oprot); } xfer += oprot->writeListEnd(); } @@ -12514,11 +12872,11 @@ void swap(AddDefaultConstraintRequest &a, AddDefaultConstraintRequest &b) { swap(a.defaultConstraintCols, b.defaultConstraintCols); } -AddDefaultConstraintRequest::AddDefaultConstraintRequest(const AddDefaultConstraintRequest& other477) { - defaultConstraintCols = other477.defaultConstraintCols; +AddDefaultConstraintRequest::AddDefaultConstraintRequest(const AddDefaultConstraintRequest& other482) { + defaultConstraintCols = other482.defaultConstraintCols; } -AddDefaultConstraintRequest& AddDefaultConstraintRequest::operator=(const AddDefaultConstraintRequest& other478) { - defaultConstraintCols = other478.defaultConstraintCols; +AddDefaultConstraintRequest& AddDefaultConstraintRequest::operator=(const AddDefaultConstraintRequest& other483) { + defaultConstraintCols = other483.defaultConstraintCols; return *this; } void AddDefaultConstraintRequest::printTo(std::ostream& out) const { @@ -12563,14 +12921,14 @@ uint32_t AddCheckConstraintRequest::read(::apache::thrift::protocol::TProtocol* if (ftype == ::apache::thrift::protocol::T_LIST) { { this->checkConstraintCols.clear(); - uint32_t _size479; - ::apache::thrift::protocol::TType _etype482; - xfer += iprot->readListBegin(_etype482, _size479); - this->checkConstraintCols.resize(_size479); - uint32_t _i483; - for (_i483 = 0; _i483 < _size479; ++_i483) + uint32_t _size484; + ::apache::thrift::protocol::TType _etype487; + xfer += iprot->readListBegin(_etype487, _size484); + this->checkConstraintCols.resize(_size484); + uint32_t _i488; + for (_i488 = 0; _i488 < _size484; ++_i488) { - xfer += this->checkConstraintCols[_i483].read(iprot); + xfer += this->checkConstraintCols[_i488].read(iprot); } xfer += iprot->readListEnd(); } @@ -12601,10 +12959,10 @@ uint32_t AddCheckConstraintRequest::write(::apache::thrift::protocol::TProtocol* xfer += oprot->writeFieldBegin("checkConstraintCols", ::apache::thrift::protocol::T_LIST, 1); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast(this->checkConstraintCols.size())); - std::vector ::const_iterator _iter484; - for (_iter484 = this->checkConstraintCols.begin(); _iter484 != this->checkConstraintCols.end(); ++_iter484) + std::vector ::const_iterator _iter489; + for (_iter489 = this->checkConstraintCols.begin(); _iter489 != this->checkConstraintCols.end(); ++_iter489) { - xfer += (*_iter484).write(oprot); + xfer += (*_iter489).write(oprot); } xfer += oprot->writeListEnd(); } @@ -12620,11 +12978,11 @@ void swap(AddCheckConstraintRequest &a, AddCheckConstraintRequest &b) { swap(a.checkConstraintCols, b.checkConstraintCols); } -AddCheckConstraintRequest::AddCheckConstraintRequest(const AddCheckConstraintRequest& other485) { - checkConstraintCols = other485.checkConstraintCols; +AddCheckConstraintRequest::AddCheckConstraintRequest(const AddCheckConstraintRequest& other490) { + checkConstraintCols = other490.checkConstraintCols; } -AddCheckConstraintRequest& AddCheckConstraintRequest::operator=(const AddCheckConstraintRequest& other486) { - checkConstraintCols = other486.checkConstraintCols; +AddCheckConstraintRequest& AddCheckConstraintRequest::operator=(const AddCheckConstraintRequest& other491) { + checkConstraintCols = other491.checkConstraintCols; return *this; } void AddCheckConstraintRequest::printTo(std::ostream& out) const { @@ -12674,14 +13032,14 @@ uint32_t PartitionsByExprResult::read(::apache::thrift::protocol::TProtocol* ipr if (ftype == ::apache::thrift::protocol::T_LIST) { { this->partitions.clear(); - uint32_t _size487; - ::apache::thrift::protocol::TType _etype490; - xfer += iprot->readListBegin(_etype490, _size487); - this->partitions.resize(_size487); - uint32_t _i491; - for (_i491 = 0; _i491 < _size487; ++_i491) + uint32_t _size492; + ::apache::thrift::protocol::TType _etype495; + xfer += iprot->readListBegin(_etype495, _size492); + this->partitions.resize(_size492); + uint32_t _i496; + for (_i496 = 0; _i496 < _size492; ++_i496) { - xfer += this->partitions[_i491].read(iprot); + xfer += this->partitions[_i496].read(iprot); } xfer += iprot->readListEnd(); } @@ -12722,10 +13080,10 @@ uint32_t PartitionsByExprResult::write(::apache::thrift::protocol::TProtocol* op xfer += oprot->writeFieldBegin("partitions", ::apache::thrift::protocol::T_LIST, 1); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast(this->partitions.size())); - std::vector ::const_iterator _iter492; - for (_iter492 = this->partitions.begin(); _iter492 != this->partitions.end(); ++_iter492) + std::vector ::const_iterator _iter497; + for (_iter497 = this->partitions.begin(); _iter497 != this->partitions.end(); ++_iter497) { - xfer += (*_iter492).write(oprot); + xfer += (*_iter497).write(oprot); } xfer += oprot->writeListEnd(); } @@ -12746,13 +13104,13 @@ void swap(PartitionsByExprResult &a, PartitionsByExprResult &b) { swap(a.hasUnknownPartitions, b.hasUnknownPartitions); } -PartitionsByExprResult::PartitionsByExprResult(const PartitionsByExprResult& other493) { - partitions = other493.partitions; - hasUnknownPartitions = other493.hasUnknownPartitions; +PartitionsByExprResult::PartitionsByExprResult(const PartitionsByExprResult& other498) { + partitions = other498.partitions; + hasUnknownPartitions = other498.hasUnknownPartitions; } -PartitionsByExprResult& PartitionsByExprResult::operator=(const PartitionsByExprResult& other494) { - partitions = other494.partitions; - hasUnknownPartitions = other494.hasUnknownPartitions; +PartitionsByExprResult& PartitionsByExprResult::operator=(const PartitionsByExprResult& other499) { + partitions = other499.partitions; + hasUnknownPartitions = other499.hasUnknownPartitions; return *this; } void PartitionsByExprResult::printTo(std::ostream& out) const { @@ -12933,23 +13291,23 @@ void swap(PartitionsByExprRequest &a, PartitionsByExprRequest &b) { swap(a.__isset, b.__isset); } -PartitionsByExprRequest::PartitionsByExprRequest(const PartitionsByExprRequest& other495) { - dbName = other495.dbName; - tblName = other495.tblName; - expr = other495.expr; - defaultPartitionName = other495.defaultPartitionName; - maxParts = other495.maxParts; - catName = other495.catName; - __isset = other495.__isset; -} -PartitionsByExprRequest& PartitionsByExprRequest::operator=(const PartitionsByExprRequest& other496) { - dbName = other496.dbName; - tblName = other496.tblName; - expr = other496.expr; - defaultPartitionName = other496.defaultPartitionName; - maxParts = other496.maxParts; - catName = other496.catName; - __isset = other496.__isset; +PartitionsByExprRequest::PartitionsByExprRequest(const PartitionsByExprRequest& other500) { + dbName = other500.dbName; + tblName = other500.tblName; + expr = other500.expr; + defaultPartitionName = other500.defaultPartitionName; + maxParts = other500.maxParts; + catName = other500.catName; + __isset = other500.__isset; +} +PartitionsByExprRequest& PartitionsByExprRequest::operator=(const PartitionsByExprRequest& other501) { + dbName = other501.dbName; + tblName = other501.tblName; + expr = other501.expr; + defaultPartitionName = other501.defaultPartitionName; + maxParts = other501.maxParts; + catName = other501.catName; + __isset = other501.__isset; return *this; } void PartitionsByExprRequest::printTo(std::ostream& out) const { @@ -12973,6 +13331,11 @@ void TableStatsResult::__set_tableStats(const std::vector & this->tableStats = val; } +void TableStatsResult::__set_isStatsCompliant(const IsolationLevelCompliance::type val) { + this->isStatsCompliant = val; +__isset.isStatsCompliant = true; +} + uint32_t TableStatsResult::read(::apache::thrift::protocol::TProtocol* iprot) { apache::thrift::protocol::TInputRecursionTracker tracker(*iprot); @@ -12999,14 +13362,14 @@ uint32_t TableStatsResult::read(::apache::thrift::protocol::TProtocol* iprot) { if (ftype == ::apache::thrift::protocol::T_LIST) { { this->tableStats.clear(); - uint32_t _size497; - ::apache::thrift::protocol::TType _etype500; - xfer += iprot->readListBegin(_etype500, _size497); - this->tableStats.resize(_size497); - uint32_t _i501; - for (_i501 = 0; _i501 < _size497; ++_i501) + uint32_t _size502; + ::apache::thrift::protocol::TType _etype505; + xfer += iprot->readListBegin(_etype505, _size502); + this->tableStats.resize(_size502); + uint32_t _i506; + for (_i506 = 0; _i506 < _size502; ++_i506) { - xfer += this->tableStats[_i501].read(iprot); + xfer += this->tableStats[_i506].read(iprot); } xfer += iprot->readListEnd(); } @@ -13015,6 +13378,16 @@ uint32_t TableStatsResult::read(::apache::thrift::protocol::TProtocol* iprot) { xfer += iprot->skip(ftype); } break; + case 2: + if (ftype == ::apache::thrift::protocol::T_I32) { + int32_t ecast507; + xfer += iprot->readI32(ecast507); + this->isStatsCompliant = (IsolationLevelCompliance::type)ecast507; + this->__isset.isStatsCompliant = true; + } else { + xfer += iprot->skip(ftype); + } + break; default: xfer += iprot->skip(ftype); break; @@ -13037,15 +13410,20 @@ uint32_t TableStatsResult::write(::apache::thrift::protocol::TProtocol* oprot) c xfer += oprot->writeFieldBegin("tableStats", ::apache::thrift::protocol::T_LIST, 1); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast(this->tableStats.size())); - std::vector ::const_iterator _iter502; - for (_iter502 = this->tableStats.begin(); _iter502 != this->tableStats.end(); ++_iter502) + std::vector ::const_iterator _iter508; + for (_iter508 = this->tableStats.begin(); _iter508 != this->tableStats.end(); ++_iter508) { - xfer += (*_iter502).write(oprot); + xfer += (*_iter508).write(oprot); } xfer += oprot->writeListEnd(); } xfer += oprot->writeFieldEnd(); + if (this->__isset.isStatsCompliant) { + xfer += oprot->writeFieldBegin("isStatsCompliant", ::apache::thrift::protocol::T_I32, 2); + xfer += oprot->writeI32((int32_t)this->isStatsCompliant); + xfer += oprot->writeFieldEnd(); + } xfer += oprot->writeFieldStop(); xfer += oprot->writeStructEnd(); return xfer; @@ -13054,19 +13432,26 @@ uint32_t TableStatsResult::write(::apache::thrift::protocol::TProtocol* oprot) c void swap(TableStatsResult &a, TableStatsResult &b) { using ::std::swap; swap(a.tableStats, b.tableStats); + swap(a.isStatsCompliant, b.isStatsCompliant); + swap(a.__isset, b.__isset); } -TableStatsResult::TableStatsResult(const TableStatsResult& other503) { - tableStats = other503.tableStats; +TableStatsResult::TableStatsResult(const TableStatsResult& other509) { + tableStats = other509.tableStats; + isStatsCompliant = other509.isStatsCompliant; + __isset = other509.__isset; } -TableStatsResult& TableStatsResult::operator=(const TableStatsResult& other504) { - tableStats = other504.tableStats; +TableStatsResult& TableStatsResult::operator=(const TableStatsResult& other510) { + tableStats = other510.tableStats; + isStatsCompliant = other510.isStatsCompliant; + __isset = other510.__isset; return *this; } void TableStatsResult::printTo(std::ostream& out) const { using ::apache::thrift::to_string; out << "TableStatsResult("; out << "tableStats=" << to_string(tableStats); + out << ", " << "isStatsCompliant="; (__isset.isStatsCompliant ? (out << to_string(isStatsCompliant)) : (out << "")); out << ")"; } @@ -13079,6 +13464,11 @@ void PartitionsStatsResult::__set_partStats(const std::mappartStats = val; } +void PartitionsStatsResult::__set_isStatsCompliant(const IsolationLevelCompliance::type val) { + this->isStatsCompliant = val; +__isset.isStatsCompliant = true; +} + uint32_t PartitionsStatsResult::read(::apache::thrift::protocol::TProtocol* iprot) { apache::thrift::protocol::TInputRecursionTracker tracker(*iprot); @@ -13105,26 +13495,26 @@ uint32_t PartitionsStatsResult::read(::apache::thrift::protocol::TProtocol* ipro if (ftype == ::apache::thrift::protocol::T_MAP) { { this->partStats.clear(); - uint32_t _size505; - ::apache::thrift::protocol::TType _ktype506; - ::apache::thrift::protocol::TType _vtype507; - xfer += iprot->readMapBegin(_ktype506, _vtype507, _size505); - uint32_t _i509; - for (_i509 = 0; _i509 < _size505; ++_i509) + uint32_t _size511; + ::apache::thrift::protocol::TType _ktype512; + ::apache::thrift::protocol::TType _vtype513; + xfer += iprot->readMapBegin(_ktype512, _vtype513, _size511); + uint32_t _i515; + for (_i515 = 0; _i515 < _size511; ++_i515) { - std::string _key510; - xfer += iprot->readString(_key510); - std::vector & _val511 = this->partStats[_key510]; + std::string _key516; + xfer += iprot->readString(_key516); + std::vector & _val517 = this->partStats[_key516]; { - _val511.clear(); - uint32_t _size512; - ::apache::thrift::protocol::TType _etype515; - xfer += iprot->readListBegin(_etype515, _size512); - _val511.resize(_size512); - uint32_t _i516; - for (_i516 = 0; _i516 < _size512; ++_i516) + _val517.clear(); + uint32_t _size518; + ::apache::thrift::protocol::TType _etype521; + xfer += iprot->readListBegin(_etype521, _size518); + _val517.resize(_size518); + uint32_t _i522; + for (_i522 = 0; _i522 < _size518; ++_i522) { - xfer += _val511[_i516].read(iprot); + xfer += _val517[_i522].read(iprot); } xfer += iprot->readListEnd(); } @@ -13136,6 +13526,16 @@ uint32_t PartitionsStatsResult::read(::apache::thrift::protocol::TProtocol* ipro xfer += iprot->skip(ftype); } break; + case 2: + if (ftype == ::apache::thrift::protocol::T_I32) { + int32_t ecast523; + xfer += iprot->readI32(ecast523); + this->isStatsCompliant = (IsolationLevelCompliance::type)ecast523; + this->__isset.isStatsCompliant = true; + } else { + xfer += iprot->skip(ftype); + } + break; default: xfer += iprot->skip(ftype); break; @@ -13158,16 +13558,16 @@ uint32_t PartitionsStatsResult::write(::apache::thrift::protocol::TProtocol* opr xfer += oprot->writeFieldBegin("partStats", ::apache::thrift::protocol::T_MAP, 1); { xfer += oprot->writeMapBegin(::apache::thrift::protocol::T_STRING, ::apache::thrift::protocol::T_LIST, static_cast(this->partStats.size())); - std::map > ::const_iterator _iter517; - for (_iter517 = this->partStats.begin(); _iter517 != this->partStats.end(); ++_iter517) + std::map > ::const_iterator _iter524; + for (_iter524 = this->partStats.begin(); _iter524 != this->partStats.end(); ++_iter524) { - xfer += oprot->writeString(_iter517->first); + xfer += oprot->writeString(_iter524->first); { - xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast(_iter517->second.size())); - std::vector ::const_iterator _iter518; - for (_iter518 = _iter517->second.begin(); _iter518 != _iter517->second.end(); ++_iter518) + xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast(_iter524->second.size())); + std::vector ::const_iterator _iter525; + for (_iter525 = _iter524->second.begin(); _iter525 != _iter524->second.end(); ++_iter525) { - xfer += (*_iter518).write(oprot); + xfer += (*_iter525).write(oprot); } xfer += oprot->writeListEnd(); } @@ -13176,6 +13576,11 @@ uint32_t PartitionsStatsResult::write(::apache::thrift::protocol::TProtocol* opr } xfer += oprot->writeFieldEnd(); + if (this->__isset.isStatsCompliant) { + xfer += oprot->writeFieldBegin("isStatsCompliant", ::apache::thrift::protocol::T_I32, 2); + xfer += oprot->writeI32((int32_t)this->isStatsCompliant); + xfer += oprot->writeFieldEnd(); + } xfer += oprot->writeFieldStop(); xfer += oprot->writeStructEnd(); return xfer; @@ -13184,19 +13589,26 @@ uint32_t PartitionsStatsResult::write(::apache::thrift::protocol::TProtocol* opr void swap(PartitionsStatsResult &a, PartitionsStatsResult &b) { using ::std::swap; swap(a.partStats, b.partStats); + swap(a.isStatsCompliant, b.isStatsCompliant); + swap(a.__isset, b.__isset); } -PartitionsStatsResult::PartitionsStatsResult(const PartitionsStatsResult& other519) { - partStats = other519.partStats; +PartitionsStatsResult::PartitionsStatsResult(const PartitionsStatsResult& other526) { + partStats = other526.partStats; + isStatsCompliant = other526.isStatsCompliant; + __isset = other526.__isset; } -PartitionsStatsResult& PartitionsStatsResult::operator=(const PartitionsStatsResult& other520) { - partStats = other520.partStats; +PartitionsStatsResult& PartitionsStatsResult::operator=(const PartitionsStatsResult& other527) { + partStats = other527.partStats; + isStatsCompliant = other527.isStatsCompliant; + __isset = other527.__isset; return *this; } void PartitionsStatsResult::printTo(std::ostream& out) const { using ::apache::thrift::to_string; out << "PartitionsStatsResult("; out << "partStats=" << to_string(partStats); + out << ", " << "isStatsCompliant="; (__isset.isStatsCompliant ? (out << to_string(isStatsCompliant)) : (out << "")); out << ")"; } @@ -13222,6 +13634,16 @@ void TableStatsRequest::__set_catName(const std::string& val) { __isset.catName = true; } +void TableStatsRequest::__set_txnId(const int64_t val) { + this->txnId = val; +__isset.txnId = true; +} + +void TableStatsRequest::__set_validWriteIdList(const std::string& val) { + this->validWriteIdList = val; +__isset.validWriteIdList = true; +} + uint32_t TableStatsRequest::read(::apache::thrift::protocol::TProtocol* iprot) { apache::thrift::protocol::TInputRecursionTracker tracker(*iprot); @@ -13266,14 +13688,14 @@ uint32_t TableStatsRequest::read(::apache::thrift::protocol::TProtocol* iprot) { if (ftype == ::apache::thrift::protocol::T_LIST) { { this->colNames.clear(); - uint32_t _size521; - ::apache::thrift::protocol::TType _etype524; - xfer += iprot->readListBegin(_etype524, _size521); - this->colNames.resize(_size521); - uint32_t _i525; - for (_i525 = 0; _i525 < _size521; ++_i525) + uint32_t _size528; + ::apache::thrift::protocol::TType _etype531; + xfer += iprot->readListBegin(_etype531, _size528); + this->colNames.resize(_size528); + uint32_t _i532; + for (_i532 = 0; _i532 < _size528; ++_i532) { - xfer += iprot->readString(this->colNames[_i525]); + xfer += iprot->readString(this->colNames[_i532]); } xfer += iprot->readListEnd(); } @@ -13290,6 +13712,22 @@ uint32_t TableStatsRequest::read(::apache::thrift::protocol::TProtocol* iprot) { xfer += iprot->skip(ftype); } break; + case 5: + if (ftype == ::apache::thrift::protocol::T_I64) { + xfer += iprot->readI64(this->txnId); + this->__isset.txnId = true; + } else { + xfer += iprot->skip(ftype); + } + break; + case 6: + if (ftype == ::apache::thrift::protocol::T_STRING) { + xfer += iprot->readString(this->validWriteIdList); + this->__isset.validWriteIdList = true; + } else { + xfer += iprot->skip(ftype); + } + break; default: xfer += iprot->skip(ftype); break; @@ -13324,10 +13762,10 @@ uint32_t TableStatsRequest::write(::apache::thrift::protocol::TProtocol* oprot) xfer += oprot->writeFieldBegin("colNames", ::apache::thrift::protocol::T_LIST, 3); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast(this->colNames.size())); - std::vector ::const_iterator _iter526; - for (_iter526 = this->colNames.begin(); _iter526 != this->colNames.end(); ++_iter526) + std::vector ::const_iterator _iter533; + for (_iter533 = this->colNames.begin(); _iter533 != this->colNames.end(); ++_iter533) { - xfer += oprot->writeString((*_iter526)); + xfer += oprot->writeString((*_iter533)); } xfer += oprot->writeListEnd(); } @@ -13338,6 +13776,16 @@ uint32_t TableStatsRequest::write(::apache::thrift::protocol::TProtocol* oprot) xfer += oprot->writeString(this->catName); xfer += oprot->writeFieldEnd(); } + if (this->__isset.txnId) { + xfer += oprot->writeFieldBegin("txnId", ::apache::thrift::protocol::T_I64, 5); + xfer += oprot->writeI64(this->txnId); + xfer += oprot->writeFieldEnd(); + } + if (this->__isset.validWriteIdList) { + xfer += oprot->writeFieldBegin("validWriteIdList", ::apache::thrift::protocol::T_STRING, 6); + xfer += oprot->writeString(this->validWriteIdList); + xfer += oprot->writeFieldEnd(); + } xfer += oprot->writeFieldStop(); xfer += oprot->writeStructEnd(); return xfer; @@ -13349,22 +13797,28 @@ void swap(TableStatsRequest &a, TableStatsRequest &b) { swap(a.tblName, b.tblName); swap(a.colNames, b.colNames); swap(a.catName, b.catName); + swap(a.txnId, b.txnId); + swap(a.validWriteIdList, b.validWriteIdList); swap(a.__isset, b.__isset); } -TableStatsRequest::TableStatsRequest(const TableStatsRequest& other527) { - dbName = other527.dbName; - tblName = other527.tblName; - colNames = other527.colNames; - catName = other527.catName; - __isset = other527.__isset; -} -TableStatsRequest& TableStatsRequest::operator=(const TableStatsRequest& other528) { - dbName = other528.dbName; - tblName = other528.tblName; - colNames = other528.colNames; - catName = other528.catName; - __isset = other528.__isset; +TableStatsRequest::TableStatsRequest(const TableStatsRequest& other534) { + dbName = other534.dbName; + tblName = other534.tblName; + colNames = other534.colNames; + catName = other534.catName; + txnId = other534.txnId; + validWriteIdList = other534.validWriteIdList; + __isset = other534.__isset; +} +TableStatsRequest& TableStatsRequest::operator=(const TableStatsRequest& other535) { + dbName = other535.dbName; + tblName = other535.tblName; + colNames = other535.colNames; + catName = other535.catName; + txnId = other535.txnId; + validWriteIdList = other535.validWriteIdList; + __isset = other535.__isset; return *this; } void TableStatsRequest::printTo(std::ostream& out) const { @@ -13374,6 +13828,8 @@ void TableStatsRequest::printTo(std::ostream& out) const { out << ", " << "tblName=" << to_string(tblName); out << ", " << "colNames=" << to_string(colNames); out << ", " << "catName="; (__isset.catName ? (out << to_string(catName)) : (out << "")); + out << ", " << "txnId="; (__isset.txnId ? (out << to_string(txnId)) : (out << "")); + out << ", " << "validWriteIdList="; (__isset.validWriteIdList ? (out << to_string(validWriteIdList)) : (out << "")); out << ")"; } @@ -13403,6 +13859,16 @@ void PartitionsStatsRequest::__set_catName(const std::string& val) { __isset.catName = true; } +void PartitionsStatsRequest::__set_txnId(const int64_t val) { + this->txnId = val; +__isset.txnId = true; +} + +void PartitionsStatsRequest::__set_validWriteIdList(const std::string& val) { + this->validWriteIdList = val; +__isset.validWriteIdList = true; +} + uint32_t PartitionsStatsRequest::read(::apache::thrift::protocol::TProtocol* iprot) { apache::thrift::protocol::TInputRecursionTracker tracker(*iprot); @@ -13448,14 +13914,14 @@ uint32_t PartitionsStatsRequest::read(::apache::thrift::protocol::TProtocol* ipr if (ftype == ::apache::thrift::protocol::T_LIST) { { this->colNames.clear(); - uint32_t _size529; - ::apache::thrift::protocol::TType _etype532; - xfer += iprot->readListBegin(_etype532, _size529); - this->colNames.resize(_size529); - uint32_t _i533; - for (_i533 = 0; _i533 < _size529; ++_i533) + uint32_t _size536; + ::apache::thrift::protocol::TType _etype539; + xfer += iprot->readListBegin(_etype539, _size536); + this->colNames.resize(_size536); + uint32_t _i540; + for (_i540 = 0; _i540 < _size536; ++_i540) { - xfer += iprot->readString(this->colNames[_i533]); + xfer += iprot->readString(this->colNames[_i540]); } xfer += iprot->readListEnd(); } @@ -13468,14 +13934,14 @@ uint32_t PartitionsStatsRequest::read(::apache::thrift::protocol::TProtocol* ipr if (ftype == ::apache::thrift::protocol::T_LIST) { { this->partNames.clear(); - uint32_t _size534; - ::apache::thrift::protocol::TType _etype537; - xfer += iprot->readListBegin(_etype537, _size534); - this->partNames.resize(_size534); - uint32_t _i538; - for (_i538 = 0; _i538 < _size534; ++_i538) + uint32_t _size541; + ::apache::thrift::protocol::TType _etype544; + xfer += iprot->readListBegin(_etype544, _size541); + this->partNames.resize(_size541); + uint32_t _i545; + for (_i545 = 0; _i545 < _size541; ++_i545) { - xfer += iprot->readString(this->partNames[_i538]); + xfer += iprot->readString(this->partNames[_i545]); } xfer += iprot->readListEnd(); } @@ -13492,6 +13958,22 @@ uint32_t PartitionsStatsRequest::read(::apache::thrift::protocol::TProtocol* ipr xfer += iprot->skip(ftype); } break; + case 6: + if (ftype == ::apache::thrift::protocol::T_I64) { + xfer += iprot->readI64(this->txnId); + this->__isset.txnId = true; + } else { + xfer += iprot->skip(ftype); + } + break; + case 7: + if (ftype == ::apache::thrift::protocol::T_STRING) { + xfer += iprot->readString(this->validWriteIdList); + this->__isset.validWriteIdList = true; + } else { + xfer += iprot->skip(ftype); + } + break; default: xfer += iprot->skip(ftype); break; @@ -13528,10 +14010,10 @@ uint32_t PartitionsStatsRequest::write(::apache::thrift::protocol::TProtocol* op xfer += oprot->writeFieldBegin("colNames", ::apache::thrift::protocol::T_LIST, 3); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast(this->colNames.size())); - std::vector ::const_iterator _iter539; - for (_iter539 = this->colNames.begin(); _iter539 != this->colNames.end(); ++_iter539) + std::vector ::const_iterator _iter546; + for (_iter546 = this->colNames.begin(); _iter546 != this->colNames.end(); ++_iter546) { - xfer += oprot->writeString((*_iter539)); + xfer += oprot->writeString((*_iter546)); } xfer += oprot->writeListEnd(); } @@ -13540,10 +14022,10 @@ uint32_t PartitionsStatsRequest::write(::apache::thrift::protocol::TProtocol* op xfer += oprot->writeFieldBegin("partNames", ::apache::thrift::protocol::T_LIST, 4); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast(this->partNames.size())); - std::vector ::const_iterator _iter540; - for (_iter540 = this->partNames.begin(); _iter540 != this->partNames.end(); ++_iter540) + std::vector ::const_iterator _iter547; + for (_iter547 = this->partNames.begin(); _iter547 != this->partNames.end(); ++_iter547) { - xfer += oprot->writeString((*_iter540)); + xfer += oprot->writeString((*_iter547)); } xfer += oprot->writeListEnd(); } @@ -13554,6 +14036,16 @@ uint32_t PartitionsStatsRequest::write(::apache::thrift::protocol::TProtocol* op xfer += oprot->writeString(this->catName); xfer += oprot->writeFieldEnd(); } + if (this->__isset.txnId) { + xfer += oprot->writeFieldBegin("txnId", ::apache::thrift::protocol::T_I64, 6); + xfer += oprot->writeI64(this->txnId); + xfer += oprot->writeFieldEnd(); + } + if (this->__isset.validWriteIdList) { + xfer += oprot->writeFieldBegin("validWriteIdList", ::apache::thrift::protocol::T_STRING, 7); + xfer += oprot->writeString(this->validWriteIdList); + xfer += oprot->writeFieldEnd(); + } xfer += oprot->writeFieldStop(); xfer += oprot->writeStructEnd(); return xfer; @@ -13566,24 +14058,30 @@ void swap(PartitionsStatsRequest &a, PartitionsStatsRequest &b) { swap(a.colNames, b.colNames); swap(a.partNames, b.partNames); swap(a.catName, b.catName); + swap(a.txnId, b.txnId); + swap(a.validWriteIdList, b.validWriteIdList); swap(a.__isset, b.__isset); } -PartitionsStatsRequest::PartitionsStatsRequest(const PartitionsStatsRequest& other541) { - dbName = other541.dbName; - tblName = other541.tblName; - colNames = other541.colNames; - partNames = other541.partNames; - catName = other541.catName; - __isset = other541.__isset; -} -PartitionsStatsRequest& PartitionsStatsRequest::operator=(const PartitionsStatsRequest& other542) { - dbName = other542.dbName; - tblName = other542.tblName; - colNames = other542.colNames; - partNames = other542.partNames; - catName = other542.catName; - __isset = other542.__isset; +PartitionsStatsRequest::PartitionsStatsRequest(const PartitionsStatsRequest& other548) { + dbName = other548.dbName; + tblName = other548.tblName; + colNames = other548.colNames; + partNames = other548.partNames; + catName = other548.catName; + txnId = other548.txnId; + validWriteIdList = other548.validWriteIdList; + __isset = other548.__isset; +} +PartitionsStatsRequest& PartitionsStatsRequest::operator=(const PartitionsStatsRequest& other549) { + dbName = other549.dbName; + tblName = other549.tblName; + colNames = other549.colNames; + partNames = other549.partNames; + catName = other549.catName; + txnId = other549.txnId; + validWriteIdList = other549.validWriteIdList; + __isset = other549.__isset; return *this; } void PartitionsStatsRequest::printTo(std::ostream& out) const { @@ -13594,6 +14092,8 @@ void PartitionsStatsRequest::printTo(std::ostream& out) const { out << ", " << "colNames=" << to_string(colNames); out << ", " << "partNames=" << to_string(partNames); out << ", " << "catName="; (__isset.catName ? (out << to_string(catName)) : (out << "")); + out << ", " << "txnId="; (__isset.txnId ? (out << to_string(txnId)) : (out << "")); + out << ", " << "validWriteIdList="; (__isset.validWriteIdList ? (out << to_string(validWriteIdList)) : (out << "")); out << ")"; } @@ -13607,6 +14107,11 @@ void AddPartitionsResult::__set_partitions(const std::vector & val) { __isset.partitions = true; } +void AddPartitionsResult::__set_isStatsCompliant(const IsolationLevelCompliance::type val) { + this->isStatsCompliant = val; +__isset.isStatsCompliant = true; +} + uint32_t AddPartitionsResult::read(::apache::thrift::protocol::TProtocol* iprot) { apache::thrift::protocol::TInputRecursionTracker tracker(*iprot); @@ -13632,14 +14137,14 @@ uint32_t AddPartitionsResult::read(::apache::thrift::protocol::TProtocol* iprot) if (ftype == ::apache::thrift::protocol::T_LIST) { { this->partitions.clear(); - uint32_t _size543; - ::apache::thrift::protocol::TType _etype546; - xfer += iprot->readListBegin(_etype546, _size543); - this->partitions.resize(_size543); - uint32_t _i547; - for (_i547 = 0; _i547 < _size543; ++_i547) + uint32_t _size550; + ::apache::thrift::protocol::TType _etype553; + xfer += iprot->readListBegin(_etype553, _size550); + this->partitions.resize(_size550); + uint32_t _i554; + for (_i554 = 0; _i554 < _size550; ++_i554) { - xfer += this->partitions[_i547].read(iprot); + xfer += this->partitions[_i554].read(iprot); } xfer += iprot->readListEnd(); } @@ -13648,6 +14153,16 @@ uint32_t AddPartitionsResult::read(::apache::thrift::protocol::TProtocol* iprot) xfer += iprot->skip(ftype); } break; + case 2: + if (ftype == ::apache::thrift::protocol::T_I32) { + int32_t ecast555; + xfer += iprot->readI32(ecast555); + this->isStatsCompliant = (IsolationLevelCompliance::type)ecast555; + this->__isset.isStatsCompliant = true; + } else { + xfer += iprot->skip(ftype); + } + break; default: xfer += iprot->skip(ftype); break; @@ -13669,15 +14184,20 @@ uint32_t AddPartitionsResult::write(::apache::thrift::protocol::TProtocol* oprot xfer += oprot->writeFieldBegin("partitions", ::apache::thrift::protocol::T_LIST, 1); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast(this->partitions.size())); - std::vector ::const_iterator _iter548; - for (_iter548 = this->partitions.begin(); _iter548 != this->partitions.end(); ++_iter548) + std::vector ::const_iterator _iter556; + for (_iter556 = this->partitions.begin(); _iter556 != this->partitions.end(); ++_iter556) { - xfer += (*_iter548).write(oprot); + xfer += (*_iter556).write(oprot); } xfer += oprot->writeListEnd(); } xfer += oprot->writeFieldEnd(); } + if (this->__isset.isStatsCompliant) { + xfer += oprot->writeFieldBegin("isStatsCompliant", ::apache::thrift::protocol::T_I32, 2); + xfer += oprot->writeI32((int32_t)this->isStatsCompliant); + xfer += oprot->writeFieldEnd(); + } xfer += oprot->writeFieldStop(); xfer += oprot->writeStructEnd(); return xfer; @@ -13686,22 +14206,26 @@ uint32_t AddPartitionsResult::write(::apache::thrift::protocol::TProtocol* oprot void swap(AddPartitionsResult &a, AddPartitionsResult &b) { using ::std::swap; swap(a.partitions, b.partitions); + swap(a.isStatsCompliant, b.isStatsCompliant); swap(a.__isset, b.__isset); } -AddPartitionsResult::AddPartitionsResult(const AddPartitionsResult& other549) { - partitions = other549.partitions; - __isset = other549.__isset; +AddPartitionsResult::AddPartitionsResult(const AddPartitionsResult& other557) { + partitions = other557.partitions; + isStatsCompliant = other557.isStatsCompliant; + __isset = other557.__isset; } -AddPartitionsResult& AddPartitionsResult::operator=(const AddPartitionsResult& other550) { - partitions = other550.partitions; - __isset = other550.__isset; +AddPartitionsResult& AddPartitionsResult::operator=(const AddPartitionsResult& other558) { + partitions = other558.partitions; + isStatsCompliant = other558.isStatsCompliant; + __isset = other558.__isset; return *this; } void AddPartitionsResult::printTo(std::ostream& out) const { using ::apache::thrift::to_string; out << "AddPartitionsResult("; out << "partitions="; (__isset.partitions ? (out << to_string(partitions)) : (out << "")); + out << ", " << "isStatsCompliant="; (__isset.isStatsCompliant ? (out << to_string(isStatsCompliant)) : (out << "")); out << ")"; } @@ -13736,6 +14260,16 @@ void AddPartitionsRequest::__set_catName(const std::string& val) { __isset.catName = true; } +void AddPartitionsRequest::__set_txnId(const int64_t val) { + this->txnId = val; +__isset.txnId = true; +} + +void AddPartitionsRequest::__set_validWriteIdList(const std::string& val) { + this->validWriteIdList = val; +__isset.validWriteIdList = true; +} + uint32_t AddPartitionsRequest::read(::apache::thrift::protocol::TProtocol* iprot) { apache::thrift::protocol::TInputRecursionTracker tracker(*iprot); @@ -13781,14 +14315,14 @@ uint32_t AddPartitionsRequest::read(::apache::thrift::protocol::TProtocol* iprot if (ftype == ::apache::thrift::protocol::T_LIST) { { this->parts.clear(); - uint32_t _size551; - ::apache::thrift::protocol::TType _etype554; - xfer += iprot->readListBegin(_etype554, _size551); - this->parts.resize(_size551); - uint32_t _i555; - for (_i555 = 0; _i555 < _size551; ++_i555) + uint32_t _size559; + ::apache::thrift::protocol::TType _etype562; + xfer += iprot->readListBegin(_etype562, _size559); + this->parts.resize(_size559); + uint32_t _i563; + for (_i563 = 0; _i563 < _size559; ++_i563) { - xfer += this->parts[_i555].read(iprot); + xfer += this->parts[_i563].read(iprot); } xfer += iprot->readListEnd(); } @@ -13821,6 +14355,22 @@ uint32_t AddPartitionsRequest::read(::apache::thrift::protocol::TProtocol* iprot xfer += iprot->skip(ftype); } break; + case 7: + if (ftype == ::apache::thrift::protocol::T_I64) { + xfer += iprot->readI64(this->txnId); + this->__isset.txnId = true; + } else { + xfer += iprot->skip(ftype); + } + break; + case 8: + if (ftype == ::apache::thrift::protocol::T_STRING) { + xfer += iprot->readString(this->validWriteIdList); + this->__isset.validWriteIdList = true; + } else { + xfer += iprot->skip(ftype); + } + break; default: xfer += iprot->skip(ftype); break; @@ -13857,10 +14407,10 @@ uint32_t AddPartitionsRequest::write(::apache::thrift::protocol::TProtocol* opro xfer += oprot->writeFieldBegin("parts", ::apache::thrift::protocol::T_LIST, 3); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast(this->parts.size())); - std::vector ::const_iterator _iter556; - for (_iter556 = this->parts.begin(); _iter556 != this->parts.end(); ++_iter556) + std::vector ::const_iterator _iter564; + for (_iter564 = this->parts.begin(); _iter564 != this->parts.end(); ++_iter564) { - xfer += (*_iter556).write(oprot); + xfer += (*_iter564).write(oprot); } xfer += oprot->writeListEnd(); } @@ -13880,6 +14430,16 @@ uint32_t AddPartitionsRequest::write(::apache::thrift::protocol::TProtocol* opro xfer += oprot->writeString(this->catName); xfer += oprot->writeFieldEnd(); } + if (this->__isset.txnId) { + xfer += oprot->writeFieldBegin("txnId", ::apache::thrift::protocol::T_I64, 7); + xfer += oprot->writeI64(this->txnId); + xfer += oprot->writeFieldEnd(); + } + if (this->__isset.validWriteIdList) { + xfer += oprot->writeFieldBegin("validWriteIdList", ::apache::thrift::protocol::T_STRING, 8); + xfer += oprot->writeString(this->validWriteIdList); + xfer += oprot->writeFieldEnd(); + } xfer += oprot->writeFieldStop(); xfer += oprot->writeStructEnd(); return xfer; @@ -13893,26 +14453,32 @@ void swap(AddPartitionsRequest &a, AddPartitionsRequest &b) { swap(a.ifNotExists, b.ifNotExists); swap(a.needResult, b.needResult); swap(a.catName, b.catName); + swap(a.txnId, b.txnId); + swap(a.validWriteIdList, b.validWriteIdList); swap(a.__isset, b.__isset); } -AddPartitionsRequest::AddPartitionsRequest(const AddPartitionsRequest& other557) { - dbName = other557.dbName; - tblName = other557.tblName; - parts = other557.parts; - ifNotExists = other557.ifNotExists; - needResult = other557.needResult; - catName = other557.catName; - __isset = other557.__isset; +AddPartitionsRequest::AddPartitionsRequest(const AddPartitionsRequest& other565) { + dbName = other565.dbName; + tblName = other565.tblName; + parts = other565.parts; + ifNotExists = other565.ifNotExists; + needResult = other565.needResult; + catName = other565.catName; + txnId = other565.txnId; + validWriteIdList = other565.validWriteIdList; + __isset = other565.__isset; } -AddPartitionsRequest& AddPartitionsRequest::operator=(const AddPartitionsRequest& other558) { - dbName = other558.dbName; - tblName = other558.tblName; - parts = other558.parts; - ifNotExists = other558.ifNotExists; - needResult = other558.needResult; - catName = other558.catName; - __isset = other558.__isset; +AddPartitionsRequest& AddPartitionsRequest::operator=(const AddPartitionsRequest& other566) { + dbName = other566.dbName; + tblName = other566.tblName; + parts = other566.parts; + ifNotExists = other566.ifNotExists; + needResult = other566.needResult; + catName = other566.catName; + txnId = other566.txnId; + validWriteIdList = other566.validWriteIdList; + __isset = other566.__isset; return *this; } void AddPartitionsRequest::printTo(std::ostream& out) const { @@ -13924,6 +14490,8 @@ void AddPartitionsRequest::printTo(std::ostream& out) const { out << ", " << "ifNotExists=" << to_string(ifNotExists); out << ", " << "needResult="; (__isset.needResult ? (out << to_string(needResult)) : (out << "")); out << ", " << "catName="; (__isset.catName ? (out << to_string(catName)) : (out << "")); + out << ", " << "txnId="; (__isset.txnId ? (out << to_string(txnId)) : (out << "")); + out << ", " << "validWriteIdList="; (__isset.validWriteIdList ? (out << to_string(validWriteIdList)) : (out << "")); out << ")"; } @@ -13962,14 +14530,14 @@ uint32_t DropPartitionsResult::read(::apache::thrift::protocol::TProtocol* iprot if (ftype == ::apache::thrift::protocol::T_LIST) { { this->partitions.clear(); - uint32_t _size559; - ::apache::thrift::protocol::TType _etype562; - xfer += iprot->readListBegin(_etype562, _size559); - this->partitions.resize(_size559); - uint32_t _i563; - for (_i563 = 0; _i563 < _size559; ++_i563) + uint32_t _size567; + ::apache::thrift::protocol::TType _etype570; + xfer += iprot->readListBegin(_etype570, _size567); + this->partitions.resize(_size567); + uint32_t _i571; + for (_i571 = 0; _i571 < _size567; ++_i571) { - xfer += this->partitions[_i563].read(iprot); + xfer += this->partitions[_i571].read(iprot); } xfer += iprot->readListEnd(); } @@ -13999,10 +14567,10 @@ uint32_t DropPartitionsResult::write(::apache::thrift::protocol::TProtocol* opro xfer += oprot->writeFieldBegin("partitions", ::apache::thrift::protocol::T_LIST, 1); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast(this->partitions.size())); - std::vector ::const_iterator _iter564; - for (_iter564 = this->partitions.begin(); _iter564 != this->partitions.end(); ++_iter564) + std::vector ::const_iterator _iter572; + for (_iter572 = this->partitions.begin(); _iter572 != this->partitions.end(); ++_iter572) { - xfer += (*_iter564).write(oprot); + xfer += (*_iter572).write(oprot); } xfer += oprot->writeListEnd(); } @@ -14019,13 +14587,13 @@ void swap(DropPartitionsResult &a, DropPartitionsResult &b) { swap(a.__isset, b.__isset); } -DropPartitionsResult::DropPartitionsResult(const DropPartitionsResult& other565) { - partitions = other565.partitions; - __isset = other565.__isset; +DropPartitionsResult::DropPartitionsResult(const DropPartitionsResult& other573) { + partitions = other573.partitions; + __isset = other573.__isset; } -DropPartitionsResult& DropPartitionsResult::operator=(const DropPartitionsResult& other566) { - partitions = other566.partitions; - __isset = other566.__isset; +DropPartitionsResult& DropPartitionsResult::operator=(const DropPartitionsResult& other574) { + partitions = other574.partitions; + __isset = other574.__isset; return *this; } void DropPartitionsResult::printTo(std::ostream& out) const { @@ -14127,15 +14695,15 @@ void swap(DropPartitionsExpr &a, DropPartitionsExpr &b) { swap(a.__isset, b.__isset); } -DropPartitionsExpr::DropPartitionsExpr(const DropPartitionsExpr& other567) { - expr = other567.expr; - partArchiveLevel = other567.partArchiveLevel; - __isset = other567.__isset; +DropPartitionsExpr::DropPartitionsExpr(const DropPartitionsExpr& other575) { + expr = other575.expr; + partArchiveLevel = other575.partArchiveLevel; + __isset = other575.__isset; } -DropPartitionsExpr& DropPartitionsExpr::operator=(const DropPartitionsExpr& other568) { - expr = other568.expr; - partArchiveLevel = other568.partArchiveLevel; - __isset = other568.__isset; +DropPartitionsExpr& DropPartitionsExpr::operator=(const DropPartitionsExpr& other576) { + expr = other576.expr; + partArchiveLevel = other576.partArchiveLevel; + __isset = other576.__isset; return *this; } void DropPartitionsExpr::printTo(std::ostream& out) const { @@ -14184,14 +14752,14 @@ uint32_t RequestPartsSpec::read(::apache::thrift::protocol::TProtocol* iprot) { if (ftype == ::apache::thrift::protocol::T_LIST) { { this->names.clear(); - uint32_t _size569; - ::apache::thrift::protocol::TType _etype572; - xfer += iprot->readListBegin(_etype572, _size569); - this->names.resize(_size569); - uint32_t _i573; - for (_i573 = 0; _i573 < _size569; ++_i573) + uint32_t _size577; + ::apache::thrift::protocol::TType _etype580; + xfer += iprot->readListBegin(_etype580, _size577); + this->names.resize(_size577); + uint32_t _i581; + for (_i581 = 0; _i581 < _size577; ++_i581) { - xfer += iprot->readString(this->names[_i573]); + xfer += iprot->readString(this->names[_i581]); } xfer += iprot->readListEnd(); } @@ -14204,14 +14772,14 @@ uint32_t RequestPartsSpec::read(::apache::thrift::protocol::TProtocol* iprot) { if (ftype == ::apache::thrift::protocol::T_LIST) { { this->exprs.clear(); - uint32_t _size574; - ::apache::thrift::protocol::TType _etype577; - xfer += iprot->readListBegin(_etype577, _size574); - this->exprs.resize(_size574); - uint32_t _i578; - for (_i578 = 0; _i578 < _size574; ++_i578) + uint32_t _size582; + ::apache::thrift::protocol::TType _etype585; + xfer += iprot->readListBegin(_etype585, _size582); + this->exprs.resize(_size582); + uint32_t _i586; + for (_i586 = 0; _i586 < _size582; ++_i586) { - xfer += this->exprs[_i578].read(iprot); + xfer += this->exprs[_i586].read(iprot); } xfer += iprot->readListEnd(); } @@ -14240,10 +14808,10 @@ uint32_t RequestPartsSpec::write(::apache::thrift::protocol::TProtocol* oprot) c xfer += oprot->writeFieldBegin("names", ::apache::thrift::protocol::T_LIST, 1); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast(this->names.size())); - std::vector ::const_iterator _iter579; - for (_iter579 = this->names.begin(); _iter579 != this->names.end(); ++_iter579) + std::vector ::const_iterator _iter587; + for (_iter587 = this->names.begin(); _iter587 != this->names.end(); ++_iter587) { - xfer += oprot->writeString((*_iter579)); + xfer += oprot->writeString((*_iter587)); } xfer += oprot->writeListEnd(); } @@ -14252,10 +14820,10 @@ uint32_t RequestPartsSpec::write(::apache::thrift::protocol::TProtocol* oprot) c xfer += oprot->writeFieldBegin("exprs", ::apache::thrift::protocol::T_LIST, 2); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast(this->exprs.size())); - std::vector ::const_iterator _iter580; - for (_iter580 = this->exprs.begin(); _iter580 != this->exprs.end(); ++_iter580) + std::vector ::const_iterator _iter588; + for (_iter588 = this->exprs.begin(); _iter588 != this->exprs.end(); ++_iter588) { - xfer += (*_iter580).write(oprot); + xfer += (*_iter588).write(oprot); } xfer += oprot->writeListEnd(); } @@ -14273,15 +14841,15 @@ void swap(RequestPartsSpec &a, RequestPartsSpec &b) { swap(a.__isset, b.__isset); } -RequestPartsSpec::RequestPartsSpec(const RequestPartsSpec& other581) { - names = other581.names; - exprs = other581.exprs; - __isset = other581.__isset; +RequestPartsSpec::RequestPartsSpec(const RequestPartsSpec& other589) { + names = other589.names; + exprs = other589.exprs; + __isset = other589.__isset; } -RequestPartsSpec& RequestPartsSpec::operator=(const RequestPartsSpec& other582) { - names = other582.names; - exprs = other582.exprs; - __isset = other582.__isset; +RequestPartsSpec& RequestPartsSpec::operator=(const RequestPartsSpec& other590) { + names = other590.names; + exprs = other590.exprs; + __isset = other590.__isset; return *this; } void RequestPartsSpec::printTo(std::ostream& out) const { @@ -14519,29 +15087,29 @@ void swap(DropPartitionsRequest &a, DropPartitionsRequest &b) { swap(a.__isset, b.__isset); } -DropPartitionsRequest::DropPartitionsRequest(const DropPartitionsRequest& other583) { - dbName = other583.dbName; - tblName = other583.tblName; - parts = other583.parts; - deleteData = other583.deleteData; - ifExists = other583.ifExists; - ignoreProtection = other583.ignoreProtection; - environmentContext = other583.environmentContext; - needResult = other583.needResult; - catName = other583.catName; - __isset = other583.__isset; -} -DropPartitionsRequest& DropPartitionsRequest::operator=(const DropPartitionsRequest& other584) { - dbName = other584.dbName; - tblName = other584.tblName; - parts = other584.parts; - deleteData = other584.deleteData; - ifExists = other584.ifExists; - ignoreProtection = other584.ignoreProtection; - environmentContext = other584.environmentContext; - needResult = other584.needResult; - catName = other584.catName; - __isset = other584.__isset; +DropPartitionsRequest::DropPartitionsRequest(const DropPartitionsRequest& other591) { + dbName = other591.dbName; + tblName = other591.tblName; + parts = other591.parts; + deleteData = other591.deleteData; + ifExists = other591.ifExists; + ignoreProtection = other591.ignoreProtection; + environmentContext = other591.environmentContext; + needResult = other591.needResult; + catName = other591.catName; + __isset = other591.__isset; +} +DropPartitionsRequest& DropPartitionsRequest::operator=(const DropPartitionsRequest& other592) { + dbName = other592.dbName; + tblName = other592.tblName; + parts = other592.parts; + deleteData = other592.deleteData; + ifExists = other592.ifExists; + ignoreProtection = other592.ignoreProtection; + environmentContext = other592.environmentContext; + needResult = other592.needResult; + catName = other592.catName; + __isset = other592.__isset; return *this; } void DropPartitionsRequest::printTo(std::ostream& out) const { @@ -14650,14 +15218,14 @@ uint32_t PartitionValuesRequest::read(::apache::thrift::protocol::TProtocol* ipr if (ftype == ::apache::thrift::protocol::T_LIST) { { this->partitionKeys.clear(); - uint32_t _size585; - ::apache::thrift::protocol::TType _etype588; - xfer += iprot->readListBegin(_etype588, _size585); - this->partitionKeys.resize(_size585); - uint32_t _i589; - for (_i589 = 0; _i589 < _size585; ++_i589) + uint32_t _size593; + ::apache::thrift::protocol::TType _etype596; + xfer += iprot->readListBegin(_etype596, _size593); + this->partitionKeys.resize(_size593); + uint32_t _i597; + for (_i597 = 0; _i597 < _size593; ++_i597) { - xfer += this->partitionKeys[_i589].read(iprot); + xfer += this->partitionKeys[_i597].read(iprot); } xfer += iprot->readListEnd(); } @@ -14686,14 +15254,14 @@ uint32_t PartitionValuesRequest::read(::apache::thrift::protocol::TProtocol* ipr if (ftype == ::apache::thrift::protocol::T_LIST) { { this->partitionOrder.clear(); - uint32_t _size590; - ::apache::thrift::protocol::TType _etype593; - xfer += iprot->readListBegin(_etype593, _size590); - this->partitionOrder.resize(_size590); - uint32_t _i594; - for (_i594 = 0; _i594 < _size590; ++_i594) + uint32_t _size598; + ::apache::thrift::protocol::TType _etype601; + xfer += iprot->readListBegin(_etype601, _size598); + this->partitionOrder.resize(_size598); + uint32_t _i602; + for (_i602 = 0; _i602 < _size598; ++_i602) { - xfer += this->partitionOrder[_i594].read(iprot); + xfer += this->partitionOrder[_i602].read(iprot); } xfer += iprot->readListEnd(); } @@ -14760,10 +15328,10 @@ uint32_t PartitionValuesRequest::write(::apache::thrift::protocol::TProtocol* op xfer += oprot->writeFieldBegin("partitionKeys", ::apache::thrift::protocol::T_LIST, 3); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast(this->partitionKeys.size())); - std::vector ::const_iterator _iter595; - for (_iter595 = this->partitionKeys.begin(); _iter595 != this->partitionKeys.end(); ++_iter595) + std::vector ::const_iterator _iter603; + for (_iter603 = this->partitionKeys.begin(); _iter603 != this->partitionKeys.end(); ++_iter603) { - xfer += (*_iter595).write(oprot); + xfer += (*_iter603).write(oprot); } xfer += oprot->writeListEnd(); } @@ -14783,10 +15351,10 @@ uint32_t PartitionValuesRequest::write(::apache::thrift::protocol::TProtocol* op xfer += oprot->writeFieldBegin("partitionOrder", ::apache::thrift::protocol::T_LIST, 6); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast(this->partitionOrder.size())); - std::vector ::const_iterator _iter596; - for (_iter596 = this->partitionOrder.begin(); _iter596 != this->partitionOrder.end(); ++_iter596) + std::vector ::const_iterator _iter604; + for (_iter604 = this->partitionOrder.begin(); _iter604 != this->partitionOrder.end(); ++_iter604) { - xfer += (*_iter596).write(oprot); + xfer += (*_iter604).write(oprot); } xfer += oprot->writeListEnd(); } @@ -14826,29 +15394,29 @@ void swap(PartitionValuesRequest &a, PartitionValuesRequest &b) { swap(a.__isset, b.__isset); } -PartitionValuesRequest::PartitionValuesRequest(const PartitionValuesRequest& other597) { - dbName = other597.dbName; - tblName = other597.tblName; - partitionKeys = other597.partitionKeys; - applyDistinct = other597.applyDistinct; - filter = other597.filter; - partitionOrder = other597.partitionOrder; - ascending = other597.ascending; - maxParts = other597.maxParts; - catName = other597.catName; - __isset = other597.__isset; -} -PartitionValuesRequest& PartitionValuesRequest::operator=(const PartitionValuesRequest& other598) { - dbName = other598.dbName; - tblName = other598.tblName; - partitionKeys = other598.partitionKeys; - applyDistinct = other598.applyDistinct; - filter = other598.filter; - partitionOrder = other598.partitionOrder; - ascending = other598.ascending; - maxParts = other598.maxParts; - catName = other598.catName; - __isset = other598.__isset; +PartitionValuesRequest::PartitionValuesRequest(const PartitionValuesRequest& other605) { + dbName = other605.dbName; + tblName = other605.tblName; + partitionKeys = other605.partitionKeys; + applyDistinct = other605.applyDistinct; + filter = other605.filter; + partitionOrder = other605.partitionOrder; + ascending = other605.ascending; + maxParts = other605.maxParts; + catName = other605.catName; + __isset = other605.__isset; +} +PartitionValuesRequest& PartitionValuesRequest::operator=(const PartitionValuesRequest& other606) { + dbName = other606.dbName; + tblName = other606.tblName; + partitionKeys = other606.partitionKeys; + applyDistinct = other606.applyDistinct; + filter = other606.filter; + partitionOrder = other606.partitionOrder; + ascending = other606.ascending; + maxParts = other606.maxParts; + catName = other606.catName; + __isset = other606.__isset; return *this; } void PartitionValuesRequest::printTo(std::ostream& out) const { @@ -14901,14 +15469,14 @@ uint32_t PartitionValuesRow::read(::apache::thrift::protocol::TProtocol* iprot) if (ftype == ::apache::thrift::protocol::T_LIST) { { this->row.clear(); - uint32_t _size599; - ::apache::thrift::protocol::TType _etype602; - xfer += iprot->readListBegin(_etype602, _size599); - this->row.resize(_size599); - uint32_t _i603; - for (_i603 = 0; _i603 < _size599; ++_i603) + uint32_t _size607; + ::apache::thrift::protocol::TType _etype610; + xfer += iprot->readListBegin(_etype610, _size607); + this->row.resize(_size607); + uint32_t _i611; + for (_i611 = 0; _i611 < _size607; ++_i611) { - xfer += iprot->readString(this->row[_i603]); + xfer += iprot->readString(this->row[_i611]); } xfer += iprot->readListEnd(); } @@ -14939,10 +15507,10 @@ uint32_t PartitionValuesRow::write(::apache::thrift::protocol::TProtocol* oprot) xfer += oprot->writeFieldBegin("row", ::apache::thrift::protocol::T_LIST, 1); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast(this->row.size())); - std::vector ::const_iterator _iter604; - for (_iter604 = this->row.begin(); _iter604 != this->row.end(); ++_iter604) + std::vector ::const_iterator _iter612; + for (_iter612 = this->row.begin(); _iter612 != this->row.end(); ++_iter612) { - xfer += oprot->writeString((*_iter604)); + xfer += oprot->writeString((*_iter612)); } xfer += oprot->writeListEnd(); } @@ -14958,11 +15526,11 @@ void swap(PartitionValuesRow &a, PartitionValuesRow &b) { swap(a.row, b.row); } -PartitionValuesRow::PartitionValuesRow(const PartitionValuesRow& other605) { - row = other605.row; +PartitionValuesRow::PartitionValuesRow(const PartitionValuesRow& other613) { + row = other613.row; } -PartitionValuesRow& PartitionValuesRow::operator=(const PartitionValuesRow& other606) { - row = other606.row; +PartitionValuesRow& PartitionValuesRow::operator=(const PartitionValuesRow& other614) { + row = other614.row; return *this; } void PartitionValuesRow::printTo(std::ostream& out) const { @@ -15007,14 +15575,14 @@ uint32_t PartitionValuesResponse::read(::apache::thrift::protocol::TProtocol* ip if (ftype == ::apache::thrift::protocol::T_LIST) { { this->partitionValues.clear(); - uint32_t _size607; - ::apache::thrift::protocol::TType _etype610; - xfer += iprot->readListBegin(_etype610, _size607); - this->partitionValues.resize(_size607); - uint32_t _i611; - for (_i611 = 0; _i611 < _size607; ++_i611) + uint32_t _size615; + ::apache::thrift::protocol::TType _etype618; + xfer += iprot->readListBegin(_etype618, _size615); + this->partitionValues.resize(_size615); + uint32_t _i619; + for (_i619 = 0; _i619 < _size615; ++_i619) { - xfer += this->partitionValues[_i611].read(iprot); + xfer += this->partitionValues[_i619].read(iprot); } xfer += iprot->readListEnd(); } @@ -15045,10 +15613,10 @@ uint32_t PartitionValuesResponse::write(::apache::thrift::protocol::TProtocol* o xfer += oprot->writeFieldBegin("partitionValues", ::apache::thrift::protocol::T_LIST, 1); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast(this->partitionValues.size())); - std::vector ::const_iterator _iter612; - for (_iter612 = this->partitionValues.begin(); _iter612 != this->partitionValues.end(); ++_iter612) + std::vector ::const_iterator _iter620; + for (_iter620 = this->partitionValues.begin(); _iter620 != this->partitionValues.end(); ++_iter620) { - xfer += (*_iter612).write(oprot); + xfer += (*_iter620).write(oprot); } xfer += oprot->writeListEnd(); } @@ -15064,11 +15632,11 @@ void swap(PartitionValuesResponse &a, PartitionValuesResponse &b) { swap(a.partitionValues, b.partitionValues); } -PartitionValuesResponse::PartitionValuesResponse(const PartitionValuesResponse& other613) { - partitionValues = other613.partitionValues; +PartitionValuesResponse::PartitionValuesResponse(const PartitionValuesResponse& other621) { + partitionValues = other621.partitionValues; } -PartitionValuesResponse& PartitionValuesResponse::operator=(const PartitionValuesResponse& other614) { - partitionValues = other614.partitionValues; +PartitionValuesResponse& PartitionValuesResponse::operator=(const PartitionValuesResponse& other622) { + partitionValues = other622.partitionValues; return *this; } void PartitionValuesResponse::printTo(std::ostream& out) const { @@ -15114,9 +15682,9 @@ uint32_t ResourceUri::read(::apache::thrift::protocol::TProtocol* iprot) { { case 1: if (ftype == ::apache::thrift::protocol::T_I32) { - int32_t ecast615; - xfer += iprot->readI32(ecast615); - this->resourceType = (ResourceType::type)ecast615; + int32_t ecast623; + xfer += iprot->readI32(ecast623); + this->resourceType = (ResourceType::type)ecast623; this->__isset.resourceType = true; } else { xfer += iprot->skip(ftype); @@ -15167,15 +15735,15 @@ void swap(ResourceUri &a, ResourceUri &b) { swap(a.__isset, b.__isset); } -ResourceUri::ResourceUri(const ResourceUri& other616) { - resourceType = other616.resourceType; - uri = other616.uri; - __isset = other616.__isset; +ResourceUri::ResourceUri(const ResourceUri& other624) { + resourceType = other624.resourceType; + uri = other624.uri; + __isset = other624.__isset; } -ResourceUri& ResourceUri::operator=(const ResourceUri& other617) { - resourceType = other617.resourceType; - uri = other617.uri; - __isset = other617.__isset; +ResourceUri& ResourceUri::operator=(const ResourceUri& other625) { + resourceType = other625.resourceType; + uri = other625.uri; + __isset = other625.__isset; return *this; } void ResourceUri::printTo(std::ostream& out) const { @@ -15283,9 +15851,9 @@ uint32_t Function::read(::apache::thrift::protocol::TProtocol* iprot) { break; case 5: if (ftype == ::apache::thrift::protocol::T_I32) { - int32_t ecast618; - xfer += iprot->readI32(ecast618); - this->ownerType = (PrincipalType::type)ecast618; + int32_t ecast626; + xfer += iprot->readI32(ecast626); + this->ownerType = (PrincipalType::type)ecast626; this->__isset.ownerType = true; } else { xfer += iprot->skip(ftype); @@ -15301,9 +15869,9 @@ uint32_t Function::read(::apache::thrift::protocol::TProtocol* iprot) { break; case 7: if (ftype == ::apache::thrift::protocol::T_I32) { - int32_t ecast619; - xfer += iprot->readI32(ecast619); - this->functionType = (FunctionType::type)ecast619; + int32_t ecast627; + xfer += iprot->readI32(ecast627); + this->functionType = (FunctionType::type)ecast627; this->__isset.functionType = true; } else { xfer += iprot->skip(ftype); @@ -15313,14 +15881,14 @@ uint32_t Function::read(::apache::thrift::protocol::TProtocol* iprot) { if (ftype == ::apache::thrift::protocol::T_LIST) { { this->resourceUris.clear(); - uint32_t _size620; - ::apache::thrift::protocol::TType _etype623; - xfer += iprot->readListBegin(_etype623, _size620); - this->resourceUris.resize(_size620); - uint32_t _i624; - for (_i624 = 0; _i624 < _size620; ++_i624) + uint32_t _size628; + ::apache::thrift::protocol::TType _etype631; + xfer += iprot->readListBegin(_etype631, _size628); + this->resourceUris.resize(_size628); + uint32_t _i632; + for (_i632 = 0; _i632 < _size628; ++_i632) { - xfer += this->resourceUris[_i624].read(iprot); + xfer += this->resourceUris[_i632].read(iprot); } xfer += iprot->readListEnd(); } @@ -15385,10 +15953,10 @@ uint32_t Function::write(::apache::thrift::protocol::TProtocol* oprot) const { xfer += oprot->writeFieldBegin("resourceUris", ::apache::thrift::protocol::T_LIST, 8); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast(this->resourceUris.size())); - std::vector ::const_iterator _iter625; - for (_iter625 = this->resourceUris.begin(); _iter625 != this->resourceUris.end(); ++_iter625) + std::vector ::const_iterator _iter633; + for (_iter633 = this->resourceUris.begin(); _iter633 != this->resourceUris.end(); ++_iter633) { - xfer += (*_iter625).write(oprot); + xfer += (*_iter633).write(oprot); } xfer += oprot->writeListEnd(); } @@ -15418,29 +15986,29 @@ void swap(Function &a, Function &b) { swap(a.__isset, b.__isset); } -Function::Function(const Function& other626) { - functionName = other626.functionName; - dbName = other626.dbName; - className = other626.className; - ownerName = other626.ownerName; - ownerType = other626.ownerType; - createTime = other626.createTime; - functionType = other626.functionType; - resourceUris = other626.resourceUris; - catName = other626.catName; - __isset = other626.__isset; -} -Function& Function::operator=(const Function& other627) { - functionName = other627.functionName; - dbName = other627.dbName; - className = other627.className; - ownerName = other627.ownerName; - ownerType = other627.ownerType; - createTime = other627.createTime; - functionType = other627.functionType; - resourceUris = other627.resourceUris; - catName = other627.catName; - __isset = other627.__isset; +Function::Function(const Function& other634) { + functionName = other634.functionName; + dbName = other634.dbName; + className = other634.className; + ownerName = other634.ownerName; + ownerType = other634.ownerType; + createTime = other634.createTime; + functionType = other634.functionType; + resourceUris = other634.resourceUris; + catName = other634.catName; + __isset = other634.__isset; +} +Function& Function::operator=(const Function& other635) { + functionName = other635.functionName; + dbName = other635.dbName; + className = other635.className; + ownerName = other635.ownerName; + ownerType = other635.ownerType; + createTime = other635.createTime; + functionType = other635.functionType; + resourceUris = other635.resourceUris; + catName = other635.catName; + __isset = other635.__isset; return *this; } void Function::printTo(std::ostream& out) const { @@ -15539,9 +16107,9 @@ uint32_t TxnInfo::read(::apache::thrift::protocol::TProtocol* iprot) { break; case 2: if (ftype == ::apache::thrift::protocol::T_I32) { - int32_t ecast628; - xfer += iprot->readI32(ecast628); - this->state = (TxnState::type)ecast628; + int32_t ecast636; + xfer += iprot->readI32(ecast636); + this->state = (TxnState::type)ecast636; isset_state = true; } else { xfer += iprot->skip(ftype); @@ -15688,29 +16256,29 @@ void swap(TxnInfo &a, TxnInfo &b) { swap(a.__isset, b.__isset); } -TxnInfo::TxnInfo(const TxnInfo& other629) { - id = other629.id; - state = other629.state; - user = other629.user; - hostname = other629.hostname; - agentInfo = other629.agentInfo; - heartbeatCount = other629.heartbeatCount; - metaInfo = other629.metaInfo; - startedTime = other629.startedTime; - lastHeartbeatTime = other629.lastHeartbeatTime; - __isset = other629.__isset; -} -TxnInfo& TxnInfo::operator=(const TxnInfo& other630) { - id = other630.id; - state = other630.state; - user = other630.user; - hostname = other630.hostname; - agentInfo = other630.agentInfo; - heartbeatCount = other630.heartbeatCount; - metaInfo = other630.metaInfo; - startedTime = other630.startedTime; - lastHeartbeatTime = other630.lastHeartbeatTime; - __isset = other630.__isset; +TxnInfo::TxnInfo(const TxnInfo& other637) { + id = other637.id; + state = other637.state; + user = other637.user; + hostname = other637.hostname; + agentInfo = other637.agentInfo; + heartbeatCount = other637.heartbeatCount; + metaInfo = other637.metaInfo; + startedTime = other637.startedTime; + lastHeartbeatTime = other637.lastHeartbeatTime; + __isset = other637.__isset; +} +TxnInfo& TxnInfo::operator=(const TxnInfo& other638) { + id = other638.id; + state = other638.state; + user = other638.user; + hostname = other638.hostname; + agentInfo = other638.agentInfo; + heartbeatCount = other638.heartbeatCount; + metaInfo = other638.metaInfo; + startedTime = other638.startedTime; + lastHeartbeatTime = other638.lastHeartbeatTime; + __isset = other638.__isset; return *this; } void TxnInfo::printTo(std::ostream& out) const { @@ -15776,14 +16344,14 @@ uint32_t GetOpenTxnsInfoResponse::read(::apache::thrift::protocol::TProtocol* ip if (ftype == ::apache::thrift::protocol::T_LIST) { { this->open_txns.clear(); - uint32_t _size631; - ::apache::thrift::protocol::TType _etype634; - xfer += iprot->readListBegin(_etype634, _size631); - this->open_txns.resize(_size631); - uint32_t _i635; - for (_i635 = 0; _i635 < _size631; ++_i635) + uint32_t _size639; + ::apache::thrift::protocol::TType _etype642; + xfer += iprot->readListBegin(_etype642, _size639); + this->open_txns.resize(_size639); + uint32_t _i643; + for (_i643 = 0; _i643 < _size639; ++_i643) { - xfer += this->open_txns[_i635].read(iprot); + xfer += this->open_txns[_i643].read(iprot); } xfer += iprot->readListEnd(); } @@ -15820,10 +16388,10 @@ uint32_t GetOpenTxnsInfoResponse::write(::apache::thrift::protocol::TProtocol* o xfer += oprot->writeFieldBegin("open_txns", ::apache::thrift::protocol::T_LIST, 2); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast(this->open_txns.size())); - std::vector ::const_iterator _iter636; - for (_iter636 = this->open_txns.begin(); _iter636 != this->open_txns.end(); ++_iter636) + std::vector ::const_iterator _iter644; + for (_iter644 = this->open_txns.begin(); _iter644 != this->open_txns.end(); ++_iter644) { - xfer += (*_iter636).write(oprot); + xfer += (*_iter644).write(oprot); } xfer += oprot->writeListEnd(); } @@ -15840,13 +16408,13 @@ void swap(GetOpenTxnsInfoResponse &a, GetOpenTxnsInfoResponse &b) { swap(a.open_txns, b.open_txns); } -GetOpenTxnsInfoResponse::GetOpenTxnsInfoResponse(const GetOpenTxnsInfoResponse& other637) { - txn_high_water_mark = other637.txn_high_water_mark; - open_txns = other637.open_txns; +GetOpenTxnsInfoResponse::GetOpenTxnsInfoResponse(const GetOpenTxnsInfoResponse& other645) { + txn_high_water_mark = other645.txn_high_water_mark; + open_txns = other645.open_txns; } -GetOpenTxnsInfoResponse& GetOpenTxnsInfoResponse::operator=(const GetOpenTxnsInfoResponse& other638) { - txn_high_water_mark = other638.txn_high_water_mark; - open_txns = other638.open_txns; +GetOpenTxnsInfoResponse& GetOpenTxnsInfoResponse::operator=(const GetOpenTxnsInfoResponse& other646) { + txn_high_water_mark = other646.txn_high_water_mark; + open_txns = other646.open_txns; return *this; } void GetOpenTxnsInfoResponse::printTo(std::ostream& out) const { @@ -15915,14 +16483,14 @@ uint32_t GetOpenTxnsResponse::read(::apache::thrift::protocol::TProtocol* iprot) if (ftype == ::apache::thrift::protocol::T_LIST) { { this->open_txns.clear(); - uint32_t _size639; - ::apache::thrift::protocol::TType _etype642; - xfer += iprot->readListBegin(_etype642, _size639); - this->open_txns.resize(_size639); - uint32_t _i643; - for (_i643 = 0; _i643 < _size639; ++_i643) + uint32_t _size647; + ::apache::thrift::protocol::TType _etype650; + xfer += iprot->readListBegin(_etype650, _size647); + this->open_txns.resize(_size647); + uint32_t _i651; + for (_i651 = 0; _i651 < _size647; ++_i651) { - xfer += iprot->readI64(this->open_txns[_i643]); + xfer += iprot->readI64(this->open_txns[_i651]); } xfer += iprot->readListEnd(); } @@ -15977,10 +16545,10 @@ uint32_t GetOpenTxnsResponse::write(::apache::thrift::protocol::TProtocol* oprot xfer += oprot->writeFieldBegin("open_txns", ::apache::thrift::protocol::T_LIST, 2); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_I64, static_cast(this->open_txns.size())); - std::vector ::const_iterator _iter644; - for (_iter644 = this->open_txns.begin(); _iter644 != this->open_txns.end(); ++_iter644) + std::vector ::const_iterator _iter652; + for (_iter652 = this->open_txns.begin(); _iter652 != this->open_txns.end(); ++_iter652) { - xfer += oprot->writeI64((*_iter644)); + xfer += oprot->writeI64((*_iter652)); } xfer += oprot->writeListEnd(); } @@ -16009,19 +16577,19 @@ void swap(GetOpenTxnsResponse &a, GetOpenTxnsResponse &b) { swap(a.__isset, b.__isset); } -GetOpenTxnsResponse::GetOpenTxnsResponse(const GetOpenTxnsResponse& other645) { - txn_high_water_mark = other645.txn_high_water_mark; - open_txns = other645.open_txns; - min_open_txn = other645.min_open_txn; - abortedBits = other645.abortedBits; - __isset = other645.__isset; +GetOpenTxnsResponse::GetOpenTxnsResponse(const GetOpenTxnsResponse& other653) { + txn_high_water_mark = other653.txn_high_water_mark; + open_txns = other653.open_txns; + min_open_txn = other653.min_open_txn; + abortedBits = other653.abortedBits; + __isset = other653.__isset; } -GetOpenTxnsResponse& GetOpenTxnsResponse::operator=(const GetOpenTxnsResponse& other646) { - txn_high_water_mark = other646.txn_high_water_mark; - open_txns = other646.open_txns; - min_open_txn = other646.min_open_txn; - abortedBits = other646.abortedBits; - __isset = other646.__isset; +GetOpenTxnsResponse& GetOpenTxnsResponse::operator=(const GetOpenTxnsResponse& other654) { + txn_high_water_mark = other654.txn_high_water_mark; + open_txns = other654.open_txns; + min_open_txn = other654.min_open_txn; + abortedBits = other654.abortedBits; + __isset = other654.__isset; return *this; } void GetOpenTxnsResponse::printTo(std::ostream& out) const { @@ -16134,14 +16702,14 @@ uint32_t OpenTxnRequest::read(::apache::thrift::protocol::TProtocol* iprot) { if (ftype == ::apache::thrift::protocol::T_LIST) { { this->replSrcTxnIds.clear(); - uint32_t _size647; - ::apache::thrift::protocol::TType _etype650; - xfer += iprot->readListBegin(_etype650, _size647); - this->replSrcTxnIds.resize(_size647); - uint32_t _i651; - for (_i651 = 0; _i651 < _size647; ++_i651) + uint32_t _size655; + ::apache::thrift::protocol::TType _etype658; + xfer += iprot->readListBegin(_etype658, _size655); + this->replSrcTxnIds.resize(_size655); + uint32_t _i659; + for (_i659 = 0; _i659 < _size655; ++_i659) { - xfer += iprot->readI64(this->replSrcTxnIds[_i651]); + xfer += iprot->readI64(this->replSrcTxnIds[_i659]); } xfer += iprot->readListEnd(); } @@ -16199,10 +16767,10 @@ uint32_t OpenTxnRequest::write(::apache::thrift::protocol::TProtocol* oprot) con xfer += oprot->writeFieldBegin("replSrcTxnIds", ::apache::thrift::protocol::T_LIST, 6); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_I64, static_cast(this->replSrcTxnIds.size())); - std::vector ::const_iterator _iter652; - for (_iter652 = this->replSrcTxnIds.begin(); _iter652 != this->replSrcTxnIds.end(); ++_iter652) + std::vector ::const_iterator _iter660; + for (_iter660 = this->replSrcTxnIds.begin(); _iter660 != this->replSrcTxnIds.end(); ++_iter660) { - xfer += oprot->writeI64((*_iter652)); + xfer += oprot->writeI64((*_iter660)); } xfer += oprot->writeListEnd(); } @@ -16224,23 +16792,23 @@ void swap(OpenTxnRequest &a, OpenTxnRequest &b) { swap(a.__isset, b.__isset); } -OpenTxnRequest::OpenTxnRequest(const OpenTxnRequest& other653) { - num_txns = other653.num_txns; - user = other653.user; - hostname = other653.hostname; - agentInfo = other653.agentInfo; - replPolicy = other653.replPolicy; - replSrcTxnIds = other653.replSrcTxnIds; - __isset = other653.__isset; -} -OpenTxnRequest& OpenTxnRequest::operator=(const OpenTxnRequest& other654) { - num_txns = other654.num_txns; - user = other654.user; - hostname = other654.hostname; - agentInfo = other654.agentInfo; - replPolicy = other654.replPolicy; - replSrcTxnIds = other654.replSrcTxnIds; - __isset = other654.__isset; +OpenTxnRequest::OpenTxnRequest(const OpenTxnRequest& other661) { + num_txns = other661.num_txns; + user = other661.user; + hostname = other661.hostname; + agentInfo = other661.agentInfo; + replPolicy = other661.replPolicy; + replSrcTxnIds = other661.replSrcTxnIds; + __isset = other661.__isset; +} +OpenTxnRequest& OpenTxnRequest::operator=(const OpenTxnRequest& other662) { + num_txns = other662.num_txns; + user = other662.user; + hostname = other662.hostname; + agentInfo = other662.agentInfo; + replPolicy = other662.replPolicy; + replSrcTxnIds = other662.replSrcTxnIds; + __isset = other662.__isset; return *this; } void OpenTxnRequest::printTo(std::ostream& out) const { @@ -16290,14 +16858,14 @@ uint32_t OpenTxnsResponse::read(::apache::thrift::protocol::TProtocol* iprot) { if (ftype == ::apache::thrift::protocol::T_LIST) { { this->txn_ids.clear(); - uint32_t _size655; - ::apache::thrift::protocol::TType _etype658; - xfer += iprot->readListBegin(_etype658, _size655); - this->txn_ids.resize(_size655); - uint32_t _i659; - for (_i659 = 0; _i659 < _size655; ++_i659) + uint32_t _size663; + ::apache::thrift::protocol::TType _etype666; + xfer += iprot->readListBegin(_etype666, _size663); + this->txn_ids.resize(_size663); + uint32_t _i667; + for (_i667 = 0; _i667 < _size663; ++_i667) { - xfer += iprot->readI64(this->txn_ids[_i659]); + xfer += iprot->readI64(this->txn_ids[_i667]); } xfer += iprot->readListEnd(); } @@ -16328,10 +16896,10 @@ uint32_t OpenTxnsResponse::write(::apache::thrift::protocol::TProtocol* oprot) c xfer += oprot->writeFieldBegin("txn_ids", ::apache::thrift::protocol::T_LIST, 1); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_I64, static_cast(this->txn_ids.size())); - std::vector ::const_iterator _iter660; - for (_iter660 = this->txn_ids.begin(); _iter660 != this->txn_ids.end(); ++_iter660) + std::vector ::const_iterator _iter668; + for (_iter668 = this->txn_ids.begin(); _iter668 != this->txn_ids.end(); ++_iter668) { - xfer += oprot->writeI64((*_iter660)); + xfer += oprot->writeI64((*_iter668)); } xfer += oprot->writeListEnd(); } @@ -16347,11 +16915,11 @@ void swap(OpenTxnsResponse &a, OpenTxnsResponse &b) { swap(a.txn_ids, b.txn_ids); } -OpenTxnsResponse::OpenTxnsResponse(const OpenTxnsResponse& other661) { - txn_ids = other661.txn_ids; +OpenTxnsResponse::OpenTxnsResponse(const OpenTxnsResponse& other669) { + txn_ids = other669.txn_ids; } -OpenTxnsResponse& OpenTxnsResponse::operator=(const OpenTxnsResponse& other662) { - txn_ids = other662.txn_ids; +OpenTxnsResponse& OpenTxnsResponse::operator=(const OpenTxnsResponse& other670) { + txn_ids = other670.txn_ids; return *this; } void OpenTxnsResponse::printTo(std::ostream& out) const { @@ -16453,15 +17021,15 @@ void swap(AbortTxnRequest &a, AbortTxnRequest &b) { swap(a.__isset, b.__isset); } -AbortTxnRequest::AbortTxnRequest(const AbortTxnRequest& other663) { - txnid = other663.txnid; - replPolicy = other663.replPolicy; - __isset = other663.__isset; +AbortTxnRequest::AbortTxnRequest(const AbortTxnRequest& other671) { + txnid = other671.txnid; + replPolicy = other671.replPolicy; + __isset = other671.__isset; } -AbortTxnRequest& AbortTxnRequest::operator=(const AbortTxnRequest& other664) { - txnid = other664.txnid; - replPolicy = other664.replPolicy; - __isset = other664.__isset; +AbortTxnRequest& AbortTxnRequest::operator=(const AbortTxnRequest& other672) { + txnid = other672.txnid; + replPolicy = other672.replPolicy; + __isset = other672.__isset; return *this; } void AbortTxnRequest::printTo(std::ostream& out) const { @@ -16507,14 +17075,14 @@ uint32_t AbortTxnsRequest::read(::apache::thrift::protocol::TProtocol* iprot) { if (ftype == ::apache::thrift::protocol::T_LIST) { { this->txn_ids.clear(); - uint32_t _size665; - ::apache::thrift::protocol::TType _etype668; - xfer += iprot->readListBegin(_etype668, _size665); - this->txn_ids.resize(_size665); - uint32_t _i669; - for (_i669 = 0; _i669 < _size665; ++_i669) + uint32_t _size673; + ::apache::thrift::protocol::TType _etype676; + xfer += iprot->readListBegin(_etype676, _size673); + this->txn_ids.resize(_size673); + uint32_t _i677; + for (_i677 = 0; _i677 < _size673; ++_i677) { - xfer += iprot->readI64(this->txn_ids[_i669]); + xfer += iprot->readI64(this->txn_ids[_i677]); } xfer += iprot->readListEnd(); } @@ -16545,10 +17113,10 @@ uint32_t AbortTxnsRequest::write(::apache::thrift::protocol::TProtocol* oprot) c xfer += oprot->writeFieldBegin("txn_ids", ::apache::thrift::protocol::T_LIST, 1); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_I64, static_cast(this->txn_ids.size())); - std::vector ::const_iterator _iter670; - for (_iter670 = this->txn_ids.begin(); _iter670 != this->txn_ids.end(); ++_iter670) + std::vector ::const_iterator _iter678; + for (_iter678 = this->txn_ids.begin(); _iter678 != this->txn_ids.end(); ++_iter678) { - xfer += oprot->writeI64((*_iter670)); + xfer += oprot->writeI64((*_iter678)); } xfer += oprot->writeListEnd(); } @@ -16564,11 +17132,11 @@ void swap(AbortTxnsRequest &a, AbortTxnsRequest &b) { swap(a.txn_ids, b.txn_ids); } -AbortTxnsRequest::AbortTxnsRequest(const AbortTxnsRequest& other671) { - txn_ids = other671.txn_ids; +AbortTxnsRequest::AbortTxnsRequest(const AbortTxnsRequest& other679) { + txn_ids = other679.txn_ids; } -AbortTxnsRequest& AbortTxnsRequest::operator=(const AbortTxnsRequest& other672) { - txn_ids = other672.txn_ids; +AbortTxnsRequest& AbortTxnsRequest::operator=(const AbortTxnsRequest& other680) { + txn_ids = other680.txn_ids; return *this; } void AbortTxnsRequest::printTo(std::ostream& out) const { @@ -16670,15 +17238,15 @@ void swap(CommitTxnRequest &a, CommitTxnRequest &b) { swap(a.__isset, b.__isset); } -CommitTxnRequest::CommitTxnRequest(const CommitTxnRequest& other673) { - txnid = other673.txnid; - replPolicy = other673.replPolicy; - __isset = other673.__isset; +CommitTxnRequest::CommitTxnRequest(const CommitTxnRequest& other681) { + txnid = other681.txnid; + replPolicy = other681.replPolicy; + __isset = other681.__isset; } -CommitTxnRequest& CommitTxnRequest::operator=(const CommitTxnRequest& other674) { - txnid = other674.txnid; - replPolicy = other674.replPolicy; - __isset = other674.__isset; +CommitTxnRequest& CommitTxnRequest::operator=(const CommitTxnRequest& other682) { + txnid = other682.txnid; + replPolicy = other682.replPolicy; + __isset = other682.__isset; return *this; } void CommitTxnRequest::printTo(std::ostream& out) const { @@ -16789,14 +17357,14 @@ uint32_t ReplTblWriteIdStateRequest::read(::apache::thrift::protocol::TProtocol* if (ftype == ::apache::thrift::protocol::T_LIST) { { this->partNames.clear(); - uint32_t _size675; - ::apache::thrift::protocol::TType _etype678; - xfer += iprot->readListBegin(_etype678, _size675); - this->partNames.resize(_size675); - uint32_t _i679; - for (_i679 = 0; _i679 < _size675; ++_i679) + uint32_t _size683; + ::apache::thrift::protocol::TType _etype686; + xfer += iprot->readListBegin(_etype686, _size683); + this->partNames.resize(_size683); + uint32_t _i687; + for (_i687 = 0; _i687 < _size683; ++_i687) { - xfer += iprot->readString(this->partNames[_i679]); + xfer += iprot->readString(this->partNames[_i687]); } xfer += iprot->readListEnd(); } @@ -16856,10 +17424,10 @@ uint32_t ReplTblWriteIdStateRequest::write(::apache::thrift::protocol::TProtocol xfer += oprot->writeFieldBegin("partNames", ::apache::thrift::protocol::T_LIST, 6); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast(this->partNames.size())); - std::vector ::const_iterator _iter680; - for (_iter680 = this->partNames.begin(); _iter680 != this->partNames.end(); ++_iter680) + std::vector ::const_iterator _iter688; + for (_iter688 = this->partNames.begin(); _iter688 != this->partNames.end(); ++_iter688) { - xfer += oprot->writeString((*_iter680)); + xfer += oprot->writeString((*_iter688)); } xfer += oprot->writeListEnd(); } @@ -16881,23 +17449,23 @@ void swap(ReplTblWriteIdStateRequest &a, ReplTblWriteIdStateRequest &b) { swap(a.__isset, b.__isset); } -ReplTblWriteIdStateRequest::ReplTblWriteIdStateRequest(const ReplTblWriteIdStateRequest& other681) { - validWriteIdlist = other681.validWriteIdlist; - user = other681.user; - hostName = other681.hostName; - dbName = other681.dbName; - tableName = other681.tableName; - partNames = other681.partNames; - __isset = other681.__isset; -} -ReplTblWriteIdStateRequest& ReplTblWriteIdStateRequest::operator=(const ReplTblWriteIdStateRequest& other682) { - validWriteIdlist = other682.validWriteIdlist; - user = other682.user; - hostName = other682.hostName; - dbName = other682.dbName; - tableName = other682.tableName; - partNames = other682.partNames; - __isset = other682.__isset; +ReplTblWriteIdStateRequest::ReplTblWriteIdStateRequest(const ReplTblWriteIdStateRequest& other689) { + validWriteIdlist = other689.validWriteIdlist; + user = other689.user; + hostName = other689.hostName; + dbName = other689.dbName; + tableName = other689.tableName; + partNames = other689.partNames; + __isset = other689.__isset; +} +ReplTblWriteIdStateRequest& ReplTblWriteIdStateRequest::operator=(const ReplTblWriteIdStateRequest& other690) { + validWriteIdlist = other690.validWriteIdlist; + user = other690.user; + hostName = other690.hostName; + dbName = other690.dbName; + tableName = other690.tableName; + partNames = other690.partNames; + __isset = other690.__isset; return *this; } void ReplTblWriteIdStateRequest::printTo(std::ostream& out) const { @@ -16952,14 +17520,14 @@ uint32_t GetValidWriteIdsRequest::read(::apache::thrift::protocol::TProtocol* ip if (ftype == ::apache::thrift::protocol::T_LIST) { { this->fullTableNames.clear(); - uint32_t _size683; - ::apache::thrift::protocol::TType _etype686; - xfer += iprot->readListBegin(_etype686, _size683); - this->fullTableNames.resize(_size683); - uint32_t _i687; - for (_i687 = 0; _i687 < _size683; ++_i687) + uint32_t _size691; + ::apache::thrift::protocol::TType _etype694; + xfer += iprot->readListBegin(_etype694, _size691); + this->fullTableNames.resize(_size691); + uint32_t _i695; + for (_i695 = 0; _i695 < _size691; ++_i695) { - xfer += iprot->readString(this->fullTableNames[_i687]); + xfer += iprot->readString(this->fullTableNames[_i695]); } xfer += iprot->readListEnd(); } @@ -17000,10 +17568,10 @@ uint32_t GetValidWriteIdsRequest::write(::apache::thrift::protocol::TProtocol* o xfer += oprot->writeFieldBegin("fullTableNames", ::apache::thrift::protocol::T_LIST, 1); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast(this->fullTableNames.size())); - std::vector ::const_iterator _iter688; - for (_iter688 = this->fullTableNames.begin(); _iter688 != this->fullTableNames.end(); ++_iter688) + std::vector ::const_iterator _iter696; + for (_iter696 = this->fullTableNames.begin(); _iter696 != this->fullTableNames.end(); ++_iter696) { - xfer += oprot->writeString((*_iter688)); + xfer += oprot->writeString((*_iter696)); } xfer += oprot->writeListEnd(); } @@ -17024,13 +17592,13 @@ void swap(GetValidWriteIdsRequest &a, GetValidWriteIdsRequest &b) { swap(a.validTxnList, b.validTxnList); } -GetValidWriteIdsRequest::GetValidWriteIdsRequest(const GetValidWriteIdsRequest& other689) { - fullTableNames = other689.fullTableNames; - validTxnList = other689.validTxnList; +GetValidWriteIdsRequest::GetValidWriteIdsRequest(const GetValidWriteIdsRequest& other697) { + fullTableNames = other697.fullTableNames; + validTxnList = other697.validTxnList; } -GetValidWriteIdsRequest& GetValidWriteIdsRequest::operator=(const GetValidWriteIdsRequest& other690) { - fullTableNames = other690.fullTableNames; - validTxnList = other690.validTxnList; +GetValidWriteIdsRequest& GetValidWriteIdsRequest::operator=(const GetValidWriteIdsRequest& other698) { + fullTableNames = other698.fullTableNames; + validTxnList = other698.validTxnList; return *this; } void GetValidWriteIdsRequest::printTo(std::ostream& out) const { @@ -17112,14 +17680,14 @@ uint32_t TableValidWriteIds::read(::apache::thrift::protocol::TProtocol* iprot) if (ftype == ::apache::thrift::protocol::T_LIST) { { this->invalidWriteIds.clear(); - uint32_t _size691; - ::apache::thrift::protocol::TType _etype694; - xfer += iprot->readListBegin(_etype694, _size691); - this->invalidWriteIds.resize(_size691); - uint32_t _i695; - for (_i695 = 0; _i695 < _size691; ++_i695) + uint32_t _size699; + ::apache::thrift::protocol::TType _etype702; + xfer += iprot->readListBegin(_etype702, _size699); + this->invalidWriteIds.resize(_size699); + uint32_t _i703; + for (_i703 = 0; _i703 < _size699; ++_i703) { - xfer += iprot->readI64(this->invalidWriteIds[_i695]); + xfer += iprot->readI64(this->invalidWriteIds[_i703]); } xfer += iprot->readListEnd(); } @@ -17180,10 +17748,10 @@ uint32_t TableValidWriteIds::write(::apache::thrift::protocol::TProtocol* oprot) xfer += oprot->writeFieldBegin("invalidWriteIds", ::apache::thrift::protocol::T_LIST, 3); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_I64, static_cast(this->invalidWriteIds.size())); - std::vector ::const_iterator _iter696; - for (_iter696 = this->invalidWriteIds.begin(); _iter696 != this->invalidWriteIds.end(); ++_iter696) + std::vector ::const_iterator _iter704; + for (_iter704 = this->invalidWriteIds.begin(); _iter704 != this->invalidWriteIds.end(); ++_iter704) { - xfer += oprot->writeI64((*_iter696)); + xfer += oprot->writeI64((*_iter704)); } xfer += oprot->writeListEnd(); } @@ -17213,21 +17781,21 @@ void swap(TableValidWriteIds &a, TableValidWriteIds &b) { swap(a.__isset, b.__isset); } -TableValidWriteIds::TableValidWriteIds(const TableValidWriteIds& other697) { - fullTableName = other697.fullTableName; - writeIdHighWaterMark = other697.writeIdHighWaterMark; - invalidWriteIds = other697.invalidWriteIds; - minOpenWriteId = other697.minOpenWriteId; - abortedBits = other697.abortedBits; - __isset = other697.__isset; -} -TableValidWriteIds& TableValidWriteIds::operator=(const TableValidWriteIds& other698) { - fullTableName = other698.fullTableName; - writeIdHighWaterMark = other698.writeIdHighWaterMark; - invalidWriteIds = other698.invalidWriteIds; - minOpenWriteId = other698.minOpenWriteId; - abortedBits = other698.abortedBits; - __isset = other698.__isset; +TableValidWriteIds::TableValidWriteIds(const TableValidWriteIds& other705) { + fullTableName = other705.fullTableName; + writeIdHighWaterMark = other705.writeIdHighWaterMark; + invalidWriteIds = other705.invalidWriteIds; + minOpenWriteId = other705.minOpenWriteId; + abortedBits = other705.abortedBits; + __isset = other705.__isset; +} +TableValidWriteIds& TableValidWriteIds::operator=(const TableValidWriteIds& other706) { + fullTableName = other706.fullTableName; + writeIdHighWaterMark = other706.writeIdHighWaterMark; + invalidWriteIds = other706.invalidWriteIds; + minOpenWriteId = other706.minOpenWriteId; + abortedBits = other706.abortedBits; + __isset = other706.__isset; return *this; } void TableValidWriteIds::printTo(std::ostream& out) const { @@ -17276,14 +17844,14 @@ uint32_t GetValidWriteIdsResponse::read(::apache::thrift::protocol::TProtocol* i if (ftype == ::apache::thrift::protocol::T_LIST) { { this->tblValidWriteIds.clear(); - uint32_t _size699; - ::apache::thrift::protocol::TType _etype702; - xfer += iprot->readListBegin(_etype702, _size699); - this->tblValidWriteIds.resize(_size699); - uint32_t _i703; - for (_i703 = 0; _i703 < _size699; ++_i703) + uint32_t _size707; + ::apache::thrift::protocol::TType _etype710; + xfer += iprot->readListBegin(_etype710, _size707); + this->tblValidWriteIds.resize(_size707); + uint32_t _i711; + for (_i711 = 0; _i711 < _size707; ++_i711) { - xfer += this->tblValidWriteIds[_i703].read(iprot); + xfer += this->tblValidWriteIds[_i711].read(iprot); } xfer += iprot->readListEnd(); } @@ -17314,10 +17882,10 @@ uint32_t GetValidWriteIdsResponse::write(::apache::thrift::protocol::TProtocol* xfer += oprot->writeFieldBegin("tblValidWriteIds", ::apache::thrift::protocol::T_LIST, 1); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast(this->tblValidWriteIds.size())); - std::vector ::const_iterator _iter704; - for (_iter704 = this->tblValidWriteIds.begin(); _iter704 != this->tblValidWriteIds.end(); ++_iter704) + std::vector ::const_iterator _iter712; + for (_iter712 = this->tblValidWriteIds.begin(); _iter712 != this->tblValidWriteIds.end(); ++_iter712) { - xfer += (*_iter704).write(oprot); + xfer += (*_iter712).write(oprot); } xfer += oprot->writeListEnd(); } @@ -17333,11 +17901,11 @@ void swap(GetValidWriteIdsResponse &a, GetValidWriteIdsResponse &b) { swap(a.tblValidWriteIds, b.tblValidWriteIds); } -GetValidWriteIdsResponse::GetValidWriteIdsResponse(const GetValidWriteIdsResponse& other705) { - tblValidWriteIds = other705.tblValidWriteIds; +GetValidWriteIdsResponse::GetValidWriteIdsResponse(const GetValidWriteIdsResponse& other713) { + tblValidWriteIds = other713.tblValidWriteIds; } -GetValidWriteIdsResponse& GetValidWriteIdsResponse::operator=(const GetValidWriteIdsResponse& other706) { - tblValidWriteIds = other706.tblValidWriteIds; +GetValidWriteIdsResponse& GetValidWriteIdsResponse::operator=(const GetValidWriteIdsResponse& other714) { + tblValidWriteIds = other714.tblValidWriteIds; return *this; } void GetValidWriteIdsResponse::printTo(std::ostream& out) const { @@ -17418,14 +17986,14 @@ uint32_t AllocateTableWriteIdsRequest::read(::apache::thrift::protocol::TProtoco if (ftype == ::apache::thrift::protocol::T_LIST) { { this->txnIds.clear(); - uint32_t _size707; - ::apache::thrift::protocol::TType _etype710; - xfer += iprot->readListBegin(_etype710, _size707); - this->txnIds.resize(_size707); - uint32_t _i711; - for (_i711 = 0; _i711 < _size707; ++_i711) + uint32_t _size715; + ::apache::thrift::protocol::TType _etype718; + xfer += iprot->readListBegin(_etype718, _size715); + this->txnIds.resize(_size715); + uint32_t _i719; + for (_i719 = 0; _i719 < _size715; ++_i719) { - xfer += iprot->readI64(this->txnIds[_i711]); + xfer += iprot->readI64(this->txnIds[_i719]); } xfer += iprot->readListEnd(); } @@ -17446,14 +18014,14 @@ uint32_t AllocateTableWriteIdsRequest::read(::apache::thrift::protocol::TProtoco if (ftype == ::apache::thrift::protocol::T_LIST) { { this->srcTxnToWriteIdList.clear(); - uint32_t _size712; - ::apache::thrift::protocol::TType _etype715; - xfer += iprot->readListBegin(_etype715, _size712); - this->srcTxnToWriteIdList.resize(_size712); - uint32_t _i716; - for (_i716 = 0; _i716 < _size712; ++_i716) + uint32_t _size720; + ::apache::thrift::protocol::TType _etype723; + xfer += iprot->readListBegin(_etype723, _size720); + this->srcTxnToWriteIdList.resize(_size720); + uint32_t _i724; + for (_i724 = 0; _i724 < _size720; ++_i724) { - xfer += this->srcTxnToWriteIdList[_i716].read(iprot); + xfer += this->srcTxnToWriteIdList[_i724].read(iprot); } xfer += iprot->readListEnd(); } @@ -17495,10 +18063,10 @@ uint32_t AllocateTableWriteIdsRequest::write(::apache::thrift::protocol::TProtoc xfer += oprot->writeFieldBegin("txnIds", ::apache::thrift::protocol::T_LIST, 3); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_I64, static_cast(this->txnIds.size())); - std::vector ::const_iterator _iter717; - for (_iter717 = this->txnIds.begin(); _iter717 != this->txnIds.end(); ++_iter717) + std::vector ::const_iterator _iter725; + for (_iter725 = this->txnIds.begin(); _iter725 != this->txnIds.end(); ++_iter725) { - xfer += oprot->writeI64((*_iter717)); + xfer += oprot->writeI64((*_iter725)); } xfer += oprot->writeListEnd(); } @@ -17513,10 +18081,10 @@ uint32_t AllocateTableWriteIdsRequest::write(::apache::thrift::protocol::TProtoc xfer += oprot->writeFieldBegin("srcTxnToWriteIdList", ::apache::thrift::protocol::T_LIST, 5); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast(this->srcTxnToWriteIdList.size())); - std::vector ::const_iterator _iter718; - for (_iter718 = this->srcTxnToWriteIdList.begin(); _iter718 != this->srcTxnToWriteIdList.end(); ++_iter718) + std::vector ::const_iterator _iter726; + for (_iter726 = this->srcTxnToWriteIdList.begin(); _iter726 != this->srcTxnToWriteIdList.end(); ++_iter726) { - xfer += (*_iter718).write(oprot); + xfer += (*_iter726).write(oprot); } xfer += oprot->writeListEnd(); } @@ -17537,21 +18105,21 @@ void swap(AllocateTableWriteIdsRequest &a, AllocateTableWriteIdsRequest &b) { swap(a.__isset, b.__isset); } -AllocateTableWriteIdsRequest::AllocateTableWriteIdsRequest(const AllocateTableWriteIdsRequest& other719) { - dbName = other719.dbName; - tableName = other719.tableName; - txnIds = other719.txnIds; - replPolicy = other719.replPolicy; - srcTxnToWriteIdList = other719.srcTxnToWriteIdList; - __isset = other719.__isset; -} -AllocateTableWriteIdsRequest& AllocateTableWriteIdsRequest::operator=(const AllocateTableWriteIdsRequest& other720) { - dbName = other720.dbName; - tableName = other720.tableName; - txnIds = other720.txnIds; - replPolicy = other720.replPolicy; - srcTxnToWriteIdList = other720.srcTxnToWriteIdList; - __isset = other720.__isset; +AllocateTableWriteIdsRequest::AllocateTableWriteIdsRequest(const AllocateTableWriteIdsRequest& other727) { + dbName = other727.dbName; + tableName = other727.tableName; + txnIds = other727.txnIds; + replPolicy = other727.replPolicy; + srcTxnToWriteIdList = other727.srcTxnToWriteIdList; + __isset = other727.__isset; +} +AllocateTableWriteIdsRequest& AllocateTableWriteIdsRequest::operator=(const AllocateTableWriteIdsRequest& other728) { + dbName = other728.dbName; + tableName = other728.tableName; + txnIds = other728.txnIds; + replPolicy = other728.replPolicy; + srcTxnToWriteIdList = other728.srcTxnToWriteIdList; + __isset = other728.__isset; return *this; } void AllocateTableWriteIdsRequest::printTo(std::ostream& out) const { @@ -17657,13 +18225,13 @@ void swap(TxnToWriteId &a, TxnToWriteId &b) { swap(a.writeId, b.writeId); } -TxnToWriteId::TxnToWriteId(const TxnToWriteId& other721) { - txnId = other721.txnId; - writeId = other721.writeId; +TxnToWriteId::TxnToWriteId(const TxnToWriteId& other729) { + txnId = other729.txnId; + writeId = other729.writeId; } -TxnToWriteId& TxnToWriteId::operator=(const TxnToWriteId& other722) { - txnId = other722.txnId; - writeId = other722.writeId; +TxnToWriteId& TxnToWriteId::operator=(const TxnToWriteId& other730) { + txnId = other730.txnId; + writeId = other730.writeId; return *this; } void TxnToWriteId::printTo(std::ostream& out) const { @@ -17709,14 +18277,14 @@ uint32_t AllocateTableWriteIdsResponse::read(::apache::thrift::protocol::TProtoc if (ftype == ::apache::thrift::protocol::T_LIST) { { this->txnToWriteIds.clear(); - uint32_t _size723; - ::apache::thrift::protocol::TType _etype726; - xfer += iprot->readListBegin(_etype726, _size723); - this->txnToWriteIds.resize(_size723); - uint32_t _i727; - for (_i727 = 0; _i727 < _size723; ++_i727) + uint32_t _size731; + ::apache::thrift::protocol::TType _etype734; + xfer += iprot->readListBegin(_etype734, _size731); + this->txnToWriteIds.resize(_size731); + uint32_t _i735; + for (_i735 = 0; _i735 < _size731; ++_i735) { - xfer += this->txnToWriteIds[_i727].read(iprot); + xfer += this->txnToWriteIds[_i735].read(iprot); } xfer += iprot->readListEnd(); } @@ -17747,10 +18315,10 @@ uint32_t AllocateTableWriteIdsResponse::write(::apache::thrift::protocol::TProto xfer += oprot->writeFieldBegin("txnToWriteIds", ::apache::thrift::protocol::T_LIST, 1); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast(this->txnToWriteIds.size())); - std::vector ::const_iterator _iter728; - for (_iter728 = this->txnToWriteIds.begin(); _iter728 != this->txnToWriteIds.end(); ++_iter728) + std::vector ::const_iterator _iter736; + for (_iter736 = this->txnToWriteIds.begin(); _iter736 != this->txnToWriteIds.end(); ++_iter736) { - xfer += (*_iter728).write(oprot); + xfer += (*_iter736).write(oprot); } xfer += oprot->writeListEnd(); } @@ -17766,11 +18334,11 @@ void swap(AllocateTableWriteIdsResponse &a, AllocateTableWriteIdsResponse &b) { swap(a.txnToWriteIds, b.txnToWriteIds); } -AllocateTableWriteIdsResponse::AllocateTableWriteIdsResponse(const AllocateTableWriteIdsResponse& other729) { - txnToWriteIds = other729.txnToWriteIds; +AllocateTableWriteIdsResponse::AllocateTableWriteIdsResponse(const AllocateTableWriteIdsResponse& other737) { + txnToWriteIds = other737.txnToWriteIds; } -AllocateTableWriteIdsResponse& AllocateTableWriteIdsResponse::operator=(const AllocateTableWriteIdsResponse& other730) { - txnToWriteIds = other730.txnToWriteIds; +AllocateTableWriteIdsResponse& AllocateTableWriteIdsResponse::operator=(const AllocateTableWriteIdsResponse& other738) { + txnToWriteIds = other738.txnToWriteIds; return *this; } void AllocateTableWriteIdsResponse::printTo(std::ostream& out) const { @@ -17848,9 +18416,9 @@ uint32_t LockComponent::read(::apache::thrift::protocol::TProtocol* iprot) { { case 1: if (ftype == ::apache::thrift::protocol::T_I32) { - int32_t ecast731; - xfer += iprot->readI32(ecast731); - this->type = (LockType::type)ecast731; + int32_t ecast739; + xfer += iprot->readI32(ecast739); + this->type = (LockType::type)ecast739; isset_type = true; } else { xfer += iprot->skip(ftype); @@ -17858,9 +18426,9 @@ uint32_t LockComponent::read(::apache::thrift::protocol::TProtocol* iprot) { break; case 2: if (ftype == ::apache::thrift::protocol::T_I32) { - int32_t ecast732; - xfer += iprot->readI32(ecast732); - this->level = (LockLevel::type)ecast732; + int32_t ecast740; + xfer += iprot->readI32(ecast740); + this->level = (LockLevel::type)ecast740; isset_level = true; } else { xfer += iprot->skip(ftype); @@ -17892,9 +18460,9 @@ uint32_t LockComponent::read(::apache::thrift::protocol::TProtocol* iprot) { break; case 6: if (ftype == ::apache::thrift::protocol::T_I32) { - int32_t ecast733; - xfer += iprot->readI32(ecast733); - this->operationType = (DataOperationType::type)ecast733; + int32_t ecast741; + xfer += iprot->readI32(ecast741); + this->operationType = (DataOperationType::type)ecast741; this->__isset.operationType = true; } else { xfer += iprot->skip(ftype); @@ -17994,27 +18562,27 @@ void swap(LockComponent &a, LockComponent &b) { swap(a.__isset, b.__isset); } -LockComponent::LockComponent(const LockComponent& other734) { - type = other734.type; - level = other734.level; - dbname = other734.dbname; - tablename = other734.tablename; - partitionname = other734.partitionname; - operationType = other734.operationType; - isTransactional = other734.isTransactional; - isDynamicPartitionWrite = other734.isDynamicPartitionWrite; - __isset = other734.__isset; -} -LockComponent& LockComponent::operator=(const LockComponent& other735) { - type = other735.type; - level = other735.level; - dbname = other735.dbname; - tablename = other735.tablename; - partitionname = other735.partitionname; - operationType = other735.operationType; - isTransactional = other735.isTransactional; - isDynamicPartitionWrite = other735.isDynamicPartitionWrite; - __isset = other735.__isset; +LockComponent::LockComponent(const LockComponent& other742) { + type = other742.type; + level = other742.level; + dbname = other742.dbname; + tablename = other742.tablename; + partitionname = other742.partitionname; + operationType = other742.operationType; + isTransactional = other742.isTransactional; + isDynamicPartitionWrite = other742.isDynamicPartitionWrite; + __isset = other742.__isset; +} +LockComponent& LockComponent::operator=(const LockComponent& other743) { + type = other743.type; + level = other743.level; + dbname = other743.dbname; + tablename = other743.tablename; + partitionname = other743.partitionname; + operationType = other743.operationType; + isTransactional = other743.isTransactional; + isDynamicPartitionWrite = other743.isDynamicPartitionWrite; + __isset = other743.__isset; return *this; } void LockComponent::printTo(std::ostream& out) const { @@ -18086,14 +18654,14 @@ uint32_t LockRequest::read(::apache::thrift::protocol::TProtocol* iprot) { if (ftype == ::apache::thrift::protocol::T_LIST) { { this->component.clear(); - uint32_t _size736; - ::apache::thrift::protocol::TType _etype739; - xfer += iprot->readListBegin(_etype739, _size736); - this->component.resize(_size736); - uint32_t _i740; - for (_i740 = 0; _i740 < _size736; ++_i740) + uint32_t _size744; + ::apache::thrift::protocol::TType _etype747; + xfer += iprot->readListBegin(_etype747, _size744); + this->component.resize(_size744); + uint32_t _i748; + for (_i748 = 0; _i748 < _size744; ++_i748) { - xfer += this->component[_i740].read(iprot); + xfer += this->component[_i748].read(iprot); } xfer += iprot->readListEnd(); } @@ -18160,10 +18728,10 @@ uint32_t LockRequest::write(::apache::thrift::protocol::TProtocol* oprot) const xfer += oprot->writeFieldBegin("component", ::apache::thrift::protocol::T_LIST, 1); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast(this->component.size())); - std::vector ::const_iterator _iter741; - for (_iter741 = this->component.begin(); _iter741 != this->component.end(); ++_iter741) + std::vector ::const_iterator _iter749; + for (_iter749 = this->component.begin(); _iter749 != this->component.end(); ++_iter749) { - xfer += (*_iter741).write(oprot); + xfer += (*_iter749).write(oprot); } xfer += oprot->writeListEnd(); } @@ -18202,21 +18770,21 @@ void swap(LockRequest &a, LockRequest &b) { swap(a.__isset, b.__isset); } -LockRequest::LockRequest(const LockRequest& other742) { - component = other742.component; - txnid = other742.txnid; - user = other742.user; - hostname = other742.hostname; - agentInfo = other742.agentInfo; - __isset = other742.__isset; -} -LockRequest& LockRequest::operator=(const LockRequest& other743) { - component = other743.component; - txnid = other743.txnid; - user = other743.user; - hostname = other743.hostname; - agentInfo = other743.agentInfo; - __isset = other743.__isset; +LockRequest::LockRequest(const LockRequest& other750) { + component = other750.component; + txnid = other750.txnid; + user = other750.user; + hostname = other750.hostname; + agentInfo = other750.agentInfo; + __isset = other750.__isset; +} +LockRequest& LockRequest::operator=(const LockRequest& other751) { + component = other751.component; + txnid = other751.txnid; + user = other751.user; + hostname = other751.hostname; + agentInfo = other751.agentInfo; + __isset = other751.__isset; return *this; } void LockRequest::printTo(std::ostream& out) const { @@ -18276,9 +18844,9 @@ uint32_t LockResponse::read(::apache::thrift::protocol::TProtocol* iprot) { break; case 2: if (ftype == ::apache::thrift::protocol::T_I32) { - int32_t ecast744; - xfer += iprot->readI32(ecast744); - this->state = (LockState::type)ecast744; + int32_t ecast752; + xfer += iprot->readI32(ecast752); + this->state = (LockState::type)ecast752; isset_state = true; } else { xfer += iprot->skip(ftype); @@ -18324,13 +18892,13 @@ void swap(LockResponse &a, LockResponse &b) { swap(a.state, b.state); } -LockResponse::LockResponse(const LockResponse& other745) { - lockid = other745.lockid; - state = other745.state; +LockResponse::LockResponse(const LockResponse& other753) { + lockid = other753.lockid; + state = other753.state; } -LockResponse& LockResponse::operator=(const LockResponse& other746) { - lockid = other746.lockid; - state = other746.state; +LockResponse& LockResponse::operator=(const LockResponse& other754) { + lockid = other754.lockid; + state = other754.state; return *this; } void LockResponse::printTo(std::ostream& out) const { @@ -18452,17 +19020,17 @@ void swap(CheckLockRequest &a, CheckLockRequest &b) { swap(a.__isset, b.__isset); } -CheckLockRequest::CheckLockRequest(const CheckLockRequest& other747) { - lockid = other747.lockid; - txnid = other747.txnid; - elapsed_ms = other747.elapsed_ms; - __isset = other747.__isset; +CheckLockRequest::CheckLockRequest(const CheckLockRequest& other755) { + lockid = other755.lockid; + txnid = other755.txnid; + elapsed_ms = other755.elapsed_ms; + __isset = other755.__isset; } -CheckLockRequest& CheckLockRequest::operator=(const CheckLockRequest& other748) { - lockid = other748.lockid; - txnid = other748.txnid; - elapsed_ms = other748.elapsed_ms; - __isset = other748.__isset; +CheckLockRequest& CheckLockRequest::operator=(const CheckLockRequest& other756) { + lockid = other756.lockid; + txnid = other756.txnid; + elapsed_ms = other756.elapsed_ms; + __isset = other756.__isset; return *this; } void CheckLockRequest::printTo(std::ostream& out) const { @@ -18546,11 +19114,11 @@ void swap(UnlockRequest &a, UnlockRequest &b) { swap(a.lockid, b.lockid); } -UnlockRequest::UnlockRequest(const UnlockRequest& other749) { - lockid = other749.lockid; +UnlockRequest::UnlockRequest(const UnlockRequest& other757) { + lockid = other757.lockid; } -UnlockRequest& UnlockRequest::operator=(const UnlockRequest& other750) { - lockid = other750.lockid; +UnlockRequest& UnlockRequest::operator=(const UnlockRequest& other758) { + lockid = other758.lockid; return *this; } void UnlockRequest::printTo(std::ostream& out) const { @@ -18689,19 +19257,19 @@ void swap(ShowLocksRequest &a, ShowLocksRequest &b) { swap(a.__isset, b.__isset); } -ShowLocksRequest::ShowLocksRequest(const ShowLocksRequest& other751) { - dbname = other751.dbname; - tablename = other751.tablename; - partname = other751.partname; - isExtended = other751.isExtended; - __isset = other751.__isset; +ShowLocksRequest::ShowLocksRequest(const ShowLocksRequest& other759) { + dbname = other759.dbname; + tablename = other759.tablename; + partname = other759.partname; + isExtended = other759.isExtended; + __isset = other759.__isset; } -ShowLocksRequest& ShowLocksRequest::operator=(const ShowLocksRequest& other752) { - dbname = other752.dbname; - tablename = other752.tablename; - partname = other752.partname; - isExtended = other752.isExtended; - __isset = other752.__isset; +ShowLocksRequest& ShowLocksRequest::operator=(const ShowLocksRequest& other760) { + dbname = other760.dbname; + tablename = other760.tablename; + partname = other760.partname; + isExtended = other760.isExtended; + __isset = other760.__isset; return *this; } void ShowLocksRequest::printTo(std::ostream& out) const { @@ -18854,9 +19422,9 @@ uint32_t ShowLocksResponseElement::read(::apache::thrift::protocol::TProtocol* i break; case 5: if (ftype == ::apache::thrift::protocol::T_I32) { - int32_t ecast753; - xfer += iprot->readI32(ecast753); - this->state = (LockState::type)ecast753; + int32_t ecast761; + xfer += iprot->readI32(ecast761); + this->state = (LockState::type)ecast761; isset_state = true; } else { xfer += iprot->skip(ftype); @@ -18864,9 +19432,9 @@ uint32_t ShowLocksResponseElement::read(::apache::thrift::protocol::TProtocol* i break; case 6: if (ftype == ::apache::thrift::protocol::T_I32) { - int32_t ecast754; - xfer += iprot->readI32(ecast754); - this->type = (LockType::type)ecast754; + int32_t ecast762; + xfer += iprot->readI32(ecast762); + this->type = (LockType::type)ecast762; isset_type = true; } else { xfer += iprot->skip(ftype); @@ -19082,43 +19650,43 @@ void swap(ShowLocksResponseElement &a, ShowLocksResponseElement &b) { swap(a.__isset, b.__isset); } -ShowLocksResponseElement::ShowLocksResponseElement(const ShowLocksResponseElement& other755) { - lockid = other755.lockid; - dbname = other755.dbname; - tablename = other755.tablename; - partname = other755.partname; - state = other755.state; - type = other755.type; - txnid = other755.txnid; - lastheartbeat = other755.lastheartbeat; - acquiredat = other755.acquiredat; - user = other755.user; - hostname = other755.hostname; - heartbeatCount = other755.heartbeatCount; - agentInfo = other755.agentInfo; - blockedByExtId = other755.blockedByExtId; - blockedByIntId = other755.blockedByIntId; - lockIdInternal = other755.lockIdInternal; - __isset = other755.__isset; +ShowLocksResponseElement::ShowLocksResponseElement(const ShowLocksResponseElement& other763) { + lockid = other763.lockid; + dbname = other763.dbname; + tablename = other763.tablename; + partname = other763.partname; + state = other763.state; + type = other763.type; + txnid = other763.txnid; + lastheartbeat = other763.lastheartbeat; + acquiredat = other763.acquiredat; + user = other763.user; + hostname = other763.hostname; + heartbeatCount = other763.heartbeatCount; + agentInfo = other763.agentInfo; + blockedByExtId = other763.blockedByExtId; + blockedByIntId = other763.blockedByIntId; + lockIdInternal = other763.lockIdInternal; + __isset = other763.__isset; } -ShowLocksResponseElement& ShowLocksResponseElement::operator=(const ShowLocksResponseElement& other756) { - lockid = other756.lockid; - dbname = other756.dbname; - tablename = other756.tablename; - partname = other756.partname; - state = other756.state; - type = other756.type; - txnid = other756.txnid; - lastheartbeat = other756.lastheartbeat; - acquiredat = other756.acquiredat; - user = other756.user; - hostname = other756.hostname; - heartbeatCount = other756.heartbeatCount; - agentInfo = other756.agentInfo; - blockedByExtId = other756.blockedByExtId; - blockedByIntId = other756.blockedByIntId; - lockIdInternal = other756.lockIdInternal; - __isset = other756.__isset; +ShowLocksResponseElement& ShowLocksResponseElement::operator=(const ShowLocksResponseElement& other764) { + lockid = other764.lockid; + dbname = other764.dbname; + tablename = other764.tablename; + partname = other764.partname; + state = other764.state; + type = other764.type; + txnid = other764.txnid; + lastheartbeat = other764.lastheartbeat; + acquiredat = other764.acquiredat; + user = other764.user; + hostname = other764.hostname; + heartbeatCount = other764.heartbeatCount; + agentInfo = other764.agentInfo; + blockedByExtId = other764.blockedByExtId; + blockedByIntId = other764.blockedByIntId; + lockIdInternal = other764.lockIdInternal; + __isset = other764.__isset; return *this; } void ShowLocksResponseElement::printTo(std::ostream& out) const { @@ -19177,14 +19745,14 @@ uint32_t ShowLocksResponse::read(::apache::thrift::protocol::TProtocol* iprot) { if (ftype == ::apache::thrift::protocol::T_LIST) { { this->locks.clear(); - uint32_t _size757; - ::apache::thrift::protocol::TType _etype760; - xfer += iprot->readListBegin(_etype760, _size757); - this->locks.resize(_size757); - uint32_t _i761; - for (_i761 = 0; _i761 < _size757; ++_i761) + uint32_t _size765; + ::apache::thrift::protocol::TType _etype768; + xfer += iprot->readListBegin(_etype768, _size765); + this->locks.resize(_size765); + uint32_t _i769; + for (_i769 = 0; _i769 < _size765; ++_i769) { - xfer += this->locks[_i761].read(iprot); + xfer += this->locks[_i769].read(iprot); } xfer += iprot->readListEnd(); } @@ -19213,10 +19781,10 @@ uint32_t ShowLocksResponse::write(::apache::thrift::protocol::TProtocol* oprot) xfer += oprot->writeFieldBegin("locks", ::apache::thrift::protocol::T_LIST, 1); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast(this->locks.size())); - std::vector ::const_iterator _iter762; - for (_iter762 = this->locks.begin(); _iter762 != this->locks.end(); ++_iter762) + std::vector ::const_iterator _iter770; + for (_iter770 = this->locks.begin(); _iter770 != this->locks.end(); ++_iter770) { - xfer += (*_iter762).write(oprot); + xfer += (*_iter770).write(oprot); } xfer += oprot->writeListEnd(); } @@ -19233,13 +19801,13 @@ void swap(ShowLocksResponse &a, ShowLocksResponse &b) { swap(a.__isset, b.__isset); } -ShowLocksResponse::ShowLocksResponse(const ShowLocksResponse& other763) { - locks = other763.locks; - __isset = other763.__isset; +ShowLocksResponse::ShowLocksResponse(const ShowLocksResponse& other771) { + locks = other771.locks; + __isset = other771.__isset; } -ShowLocksResponse& ShowLocksResponse::operator=(const ShowLocksResponse& other764) { - locks = other764.locks; - __isset = other764.__isset; +ShowLocksResponse& ShowLocksResponse::operator=(const ShowLocksResponse& other772) { + locks = other772.locks; + __isset = other772.__isset; return *this; } void ShowLocksResponse::printTo(std::ostream& out) const { @@ -19340,15 +19908,15 @@ void swap(HeartbeatRequest &a, HeartbeatRequest &b) { swap(a.__isset, b.__isset); } -HeartbeatRequest::HeartbeatRequest(const HeartbeatRequest& other765) { - lockid = other765.lockid; - txnid = other765.txnid; - __isset = other765.__isset; +HeartbeatRequest::HeartbeatRequest(const HeartbeatRequest& other773) { + lockid = other773.lockid; + txnid = other773.txnid; + __isset = other773.__isset; } -HeartbeatRequest& HeartbeatRequest::operator=(const HeartbeatRequest& other766) { - lockid = other766.lockid; - txnid = other766.txnid; - __isset = other766.__isset; +HeartbeatRequest& HeartbeatRequest::operator=(const HeartbeatRequest& other774) { + lockid = other774.lockid; + txnid = other774.txnid; + __isset = other774.__isset; return *this; } void HeartbeatRequest::printTo(std::ostream& out) const { @@ -19451,13 +20019,13 @@ void swap(HeartbeatTxnRangeRequest &a, HeartbeatTxnRangeRequest &b) { swap(a.max, b.max); } -HeartbeatTxnRangeRequest::HeartbeatTxnRangeRequest(const HeartbeatTxnRangeRequest& other767) { - min = other767.min; - max = other767.max; +HeartbeatTxnRangeRequest::HeartbeatTxnRangeRequest(const HeartbeatTxnRangeRequest& other775) { + min = other775.min; + max = other775.max; } -HeartbeatTxnRangeRequest& HeartbeatTxnRangeRequest::operator=(const HeartbeatTxnRangeRequest& other768) { - min = other768.min; - max = other768.max; +HeartbeatTxnRangeRequest& HeartbeatTxnRangeRequest::operator=(const HeartbeatTxnRangeRequest& other776) { + min = other776.min; + max = other776.max; return *this; } void HeartbeatTxnRangeRequest::printTo(std::ostream& out) const { @@ -19508,15 +20076,15 @@ uint32_t HeartbeatTxnRangeResponse::read(::apache::thrift::protocol::TProtocol* if (ftype == ::apache::thrift::protocol::T_SET) { { this->aborted.clear(); - uint32_t _size769; - ::apache::thrift::protocol::TType _etype772; - xfer += iprot->readSetBegin(_etype772, _size769); - uint32_t _i773; - for (_i773 = 0; _i773 < _size769; ++_i773) + uint32_t _size777; + ::apache::thrift::protocol::TType _etype780; + xfer += iprot->readSetBegin(_etype780, _size777); + uint32_t _i781; + for (_i781 = 0; _i781 < _size777; ++_i781) { - int64_t _elem774; - xfer += iprot->readI64(_elem774); - this->aborted.insert(_elem774); + int64_t _elem782; + xfer += iprot->readI64(_elem782); + this->aborted.insert(_elem782); } xfer += iprot->readSetEnd(); } @@ -19529,15 +20097,15 @@ uint32_t HeartbeatTxnRangeResponse::read(::apache::thrift::protocol::TProtocol* if (ftype == ::apache::thrift::protocol::T_SET) { { this->nosuch.clear(); - uint32_t _size775; - ::apache::thrift::protocol::TType _etype778; - xfer += iprot->readSetBegin(_etype778, _size775); - uint32_t _i779; - for (_i779 = 0; _i779 < _size775; ++_i779) + uint32_t _size783; + ::apache::thrift::protocol::TType _etype786; + xfer += iprot->readSetBegin(_etype786, _size783); + uint32_t _i787; + for (_i787 = 0; _i787 < _size783; ++_i787) { - int64_t _elem780; - xfer += iprot->readI64(_elem780); - this->nosuch.insert(_elem780); + int64_t _elem788; + xfer += iprot->readI64(_elem788); + this->nosuch.insert(_elem788); } xfer += iprot->readSetEnd(); } @@ -19570,10 +20138,10 @@ uint32_t HeartbeatTxnRangeResponse::write(::apache::thrift::protocol::TProtocol* xfer += oprot->writeFieldBegin("aborted", ::apache::thrift::protocol::T_SET, 1); { xfer += oprot->writeSetBegin(::apache::thrift::protocol::T_I64, static_cast(this->aborted.size())); - std::set ::const_iterator _iter781; - for (_iter781 = this->aborted.begin(); _iter781 != this->aborted.end(); ++_iter781) + std::set ::const_iterator _iter789; + for (_iter789 = this->aborted.begin(); _iter789 != this->aborted.end(); ++_iter789) { - xfer += oprot->writeI64((*_iter781)); + xfer += oprot->writeI64((*_iter789)); } xfer += oprot->writeSetEnd(); } @@ -19582,10 +20150,10 @@ uint32_t HeartbeatTxnRangeResponse::write(::apache::thrift::protocol::TProtocol* xfer += oprot->writeFieldBegin("nosuch", ::apache::thrift::protocol::T_SET, 2); { xfer += oprot->writeSetBegin(::apache::thrift::protocol::T_I64, static_cast(this->nosuch.size())); - std::set ::const_iterator _iter782; - for (_iter782 = this->nosuch.begin(); _iter782 != this->nosuch.end(); ++_iter782) + std::set ::const_iterator _iter790; + for (_iter790 = this->nosuch.begin(); _iter790 != this->nosuch.end(); ++_iter790) { - xfer += oprot->writeI64((*_iter782)); + xfer += oprot->writeI64((*_iter790)); } xfer += oprot->writeSetEnd(); } @@ -19602,13 +20170,13 @@ void swap(HeartbeatTxnRangeResponse &a, HeartbeatTxnRangeResponse &b) { swap(a.nosuch, b.nosuch); } -HeartbeatTxnRangeResponse::HeartbeatTxnRangeResponse(const HeartbeatTxnRangeResponse& other783) { - aborted = other783.aborted; - nosuch = other783.nosuch; +HeartbeatTxnRangeResponse::HeartbeatTxnRangeResponse(const HeartbeatTxnRangeResponse& other791) { + aborted = other791.aborted; + nosuch = other791.nosuch; } -HeartbeatTxnRangeResponse& HeartbeatTxnRangeResponse::operator=(const HeartbeatTxnRangeResponse& other784) { - aborted = other784.aborted; - nosuch = other784.nosuch; +HeartbeatTxnRangeResponse& HeartbeatTxnRangeResponse::operator=(const HeartbeatTxnRangeResponse& other792) { + aborted = other792.aborted; + nosuch = other792.nosuch; return *this; } void HeartbeatTxnRangeResponse::printTo(std::ostream& out) const { @@ -19701,9 +20269,9 @@ uint32_t CompactionRequest::read(::apache::thrift::protocol::TProtocol* iprot) { break; case 4: if (ftype == ::apache::thrift::protocol::T_I32) { - int32_t ecast785; - xfer += iprot->readI32(ecast785); - this->type = (CompactionType::type)ecast785; + int32_t ecast793; + xfer += iprot->readI32(ecast793); + this->type = (CompactionType::type)ecast793; isset_type = true; } else { xfer += iprot->skip(ftype); @@ -19721,17 +20289,17 @@ uint32_t CompactionRequest::read(::apache::thrift::protocol::TProtocol* iprot) { if (ftype == ::apache::thrift::protocol::T_MAP) { { this->properties.clear(); - uint32_t _size786; - ::apache::thrift::protocol::TType _ktype787; - ::apache::thrift::protocol::TType _vtype788; - xfer += iprot->readMapBegin(_ktype787, _vtype788, _size786); - uint32_t _i790; - for (_i790 = 0; _i790 < _size786; ++_i790) + uint32_t _size794; + ::apache::thrift::protocol::TType _ktype795; + ::apache::thrift::protocol::TType _vtype796; + xfer += iprot->readMapBegin(_ktype795, _vtype796, _size794); + uint32_t _i798; + for (_i798 = 0; _i798 < _size794; ++_i798) { - std::string _key791; - xfer += iprot->readString(_key791); - std::string& _val792 = this->properties[_key791]; - xfer += iprot->readString(_val792); + std::string _key799; + xfer += iprot->readString(_key799); + std::string& _val800 = this->properties[_key799]; + xfer += iprot->readString(_val800); } xfer += iprot->readMapEnd(); } @@ -19789,11 +20357,11 @@ uint32_t CompactionRequest::write(::apache::thrift::protocol::TProtocol* oprot) xfer += oprot->writeFieldBegin("properties", ::apache::thrift::protocol::T_MAP, 6); { xfer += oprot->writeMapBegin(::apache::thrift::protocol::T_STRING, ::apache::thrift::protocol::T_STRING, static_cast(this->properties.size())); - std::map ::const_iterator _iter793; - for (_iter793 = this->properties.begin(); _iter793 != this->properties.end(); ++_iter793) + std::map ::const_iterator _iter801; + for (_iter801 = this->properties.begin(); _iter801 != this->properties.end(); ++_iter801) { - xfer += oprot->writeString(_iter793->first); - xfer += oprot->writeString(_iter793->second); + xfer += oprot->writeString(_iter801->first); + xfer += oprot->writeString(_iter801->second); } xfer += oprot->writeMapEnd(); } @@ -19815,23 +20383,23 @@ void swap(CompactionRequest &a, CompactionRequest &b) { swap(a.__isset, b.__isset); } -CompactionRequest::CompactionRequest(const CompactionRequest& other794) { - dbname = other794.dbname; - tablename = other794.tablename; - partitionname = other794.partitionname; - type = other794.type; - runas = other794.runas; - properties = other794.properties; - __isset = other794.__isset; -} -CompactionRequest& CompactionRequest::operator=(const CompactionRequest& other795) { - dbname = other795.dbname; - tablename = other795.tablename; - partitionname = other795.partitionname; - type = other795.type; - runas = other795.runas; - properties = other795.properties; - __isset = other795.__isset; +CompactionRequest::CompactionRequest(const CompactionRequest& other802) { + dbname = other802.dbname; + tablename = other802.tablename; + partitionname = other802.partitionname; + type = other802.type; + runas = other802.runas; + properties = other802.properties; + __isset = other802.__isset; +} +CompactionRequest& CompactionRequest::operator=(const CompactionRequest& other803) { + dbname = other803.dbname; + tablename = other803.tablename; + partitionname = other803.partitionname; + type = other803.type; + runas = other803.runas; + properties = other803.properties; + __isset = other803.__isset; return *this; } void CompactionRequest::printTo(std::ostream& out) const { @@ -19958,15 +20526,15 @@ void swap(CompactionResponse &a, CompactionResponse &b) { swap(a.accepted, b.accepted); } -CompactionResponse::CompactionResponse(const CompactionResponse& other796) { - id = other796.id; - state = other796.state; - accepted = other796.accepted; +CompactionResponse::CompactionResponse(const CompactionResponse& other804) { + id = other804.id; + state = other804.state; + accepted = other804.accepted; } -CompactionResponse& CompactionResponse::operator=(const CompactionResponse& other797) { - id = other797.id; - state = other797.state; - accepted = other797.accepted; +CompactionResponse& CompactionResponse::operator=(const CompactionResponse& other805) { + id = other805.id; + state = other805.state; + accepted = other805.accepted; return *this; } void CompactionResponse::printTo(std::ostream& out) const { @@ -20027,11 +20595,11 @@ void swap(ShowCompactRequest &a, ShowCompactRequest &b) { (void) b; } -ShowCompactRequest::ShowCompactRequest(const ShowCompactRequest& other798) { - (void) other798; +ShowCompactRequest::ShowCompactRequest(const ShowCompactRequest& other806) { + (void) other806; } -ShowCompactRequest& ShowCompactRequest::operator=(const ShowCompactRequest& other799) { - (void) other799; +ShowCompactRequest& ShowCompactRequest::operator=(const ShowCompactRequest& other807) { + (void) other807; return *this; } void ShowCompactRequest::printTo(std::ostream& out) const { @@ -20157,9 +20725,9 @@ uint32_t ShowCompactResponseElement::read(::apache::thrift::protocol::TProtocol* break; case 4: if (ftype == ::apache::thrift::protocol::T_I32) { - int32_t ecast800; - xfer += iprot->readI32(ecast800); - this->type = (CompactionType::type)ecast800; + int32_t ecast808; + xfer += iprot->readI32(ecast808); + this->type = (CompactionType::type)ecast808; isset_type = true; } else { xfer += iprot->skip(ftype); @@ -20346,37 +20914,37 @@ void swap(ShowCompactResponseElement &a, ShowCompactResponseElement &b) { swap(a.__isset, b.__isset); } -ShowCompactResponseElement::ShowCompactResponseElement(const ShowCompactResponseElement& other801) { - dbname = other801.dbname; - tablename = other801.tablename; - partitionname = other801.partitionname; - type = other801.type; - state = other801.state; - workerid = other801.workerid; - start = other801.start; - runAs = other801.runAs; - hightestTxnId = other801.hightestTxnId; - metaInfo = other801.metaInfo; - endTime = other801.endTime; - hadoopJobId = other801.hadoopJobId; - id = other801.id; - __isset = other801.__isset; -} -ShowCompactResponseElement& ShowCompactResponseElement::operator=(const ShowCompactResponseElement& other802) { - dbname = other802.dbname; - tablename = other802.tablename; - partitionname = other802.partitionname; - type = other802.type; - state = other802.state; - workerid = other802.workerid; - start = other802.start; - runAs = other802.runAs; - hightestTxnId = other802.hightestTxnId; - metaInfo = other802.metaInfo; - endTime = other802.endTime; - hadoopJobId = other802.hadoopJobId; - id = other802.id; - __isset = other802.__isset; +ShowCompactResponseElement::ShowCompactResponseElement(const ShowCompactResponseElement& other809) { + dbname = other809.dbname; + tablename = other809.tablename; + partitionname = other809.partitionname; + type = other809.type; + state = other809.state; + workerid = other809.workerid; + start = other809.start; + runAs = other809.runAs; + hightestTxnId = other809.hightestTxnId; + metaInfo = other809.metaInfo; + endTime = other809.endTime; + hadoopJobId = other809.hadoopJobId; + id = other809.id; + __isset = other809.__isset; +} +ShowCompactResponseElement& ShowCompactResponseElement::operator=(const ShowCompactResponseElement& other810) { + dbname = other810.dbname; + tablename = other810.tablename; + partitionname = other810.partitionname; + type = other810.type; + state = other810.state; + workerid = other810.workerid; + start = other810.start; + runAs = other810.runAs; + hightestTxnId = other810.hightestTxnId; + metaInfo = other810.metaInfo; + endTime = other810.endTime; + hadoopJobId = other810.hadoopJobId; + id = other810.id; + __isset = other810.__isset; return *this; } void ShowCompactResponseElement::printTo(std::ostream& out) const { @@ -20433,14 +21001,14 @@ uint32_t ShowCompactResponse::read(::apache::thrift::protocol::TProtocol* iprot) if (ftype == ::apache::thrift::protocol::T_LIST) { { this->compacts.clear(); - uint32_t _size803; - ::apache::thrift::protocol::TType _etype806; - xfer += iprot->readListBegin(_etype806, _size803); - this->compacts.resize(_size803); - uint32_t _i807; - for (_i807 = 0; _i807 < _size803; ++_i807) + uint32_t _size811; + ::apache::thrift::protocol::TType _etype814; + xfer += iprot->readListBegin(_etype814, _size811); + this->compacts.resize(_size811); + uint32_t _i815; + for (_i815 = 0; _i815 < _size811; ++_i815) { - xfer += this->compacts[_i807].read(iprot); + xfer += this->compacts[_i815].read(iprot); } xfer += iprot->readListEnd(); } @@ -20471,10 +21039,10 @@ uint32_t ShowCompactResponse::write(::apache::thrift::protocol::TProtocol* oprot xfer += oprot->writeFieldBegin("compacts", ::apache::thrift::protocol::T_LIST, 1); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast(this->compacts.size())); - std::vector ::const_iterator _iter808; - for (_iter808 = this->compacts.begin(); _iter808 != this->compacts.end(); ++_iter808) + std::vector ::const_iterator _iter816; + for (_iter816 = this->compacts.begin(); _iter816 != this->compacts.end(); ++_iter816) { - xfer += (*_iter808).write(oprot); + xfer += (*_iter816).write(oprot); } xfer += oprot->writeListEnd(); } @@ -20490,11 +21058,11 @@ void swap(ShowCompactResponse &a, ShowCompactResponse &b) { swap(a.compacts, b.compacts); } -ShowCompactResponse::ShowCompactResponse(const ShowCompactResponse& other809) { - compacts = other809.compacts; +ShowCompactResponse::ShowCompactResponse(const ShowCompactResponse& other817) { + compacts = other817.compacts; } -ShowCompactResponse& ShowCompactResponse::operator=(const ShowCompactResponse& other810) { - compacts = other810.compacts; +ShowCompactResponse& ShowCompactResponse::operator=(const ShowCompactResponse& other818) { + compacts = other818.compacts; return *this; } void ShowCompactResponse::printTo(std::ostream& out) const { @@ -20596,14 +21164,14 @@ uint32_t AddDynamicPartitions::read(::apache::thrift::protocol::TProtocol* iprot if (ftype == ::apache::thrift::protocol::T_LIST) { { this->partitionnames.clear(); - uint32_t _size811; - ::apache::thrift::protocol::TType _etype814; - xfer += iprot->readListBegin(_etype814, _size811); - this->partitionnames.resize(_size811); - uint32_t _i815; - for (_i815 = 0; _i815 < _size811; ++_i815) + uint32_t _size819; + ::apache::thrift::protocol::TType _etype822; + xfer += iprot->readListBegin(_etype822, _size819); + this->partitionnames.resize(_size819); + uint32_t _i823; + for (_i823 = 0; _i823 < _size819; ++_i823) { - xfer += iprot->readString(this->partitionnames[_i815]); + xfer += iprot->readString(this->partitionnames[_i823]); } xfer += iprot->readListEnd(); } @@ -20614,9 +21182,9 @@ uint32_t AddDynamicPartitions::read(::apache::thrift::protocol::TProtocol* iprot break; case 6: if (ftype == ::apache::thrift::protocol::T_I32) { - int32_t ecast816; - xfer += iprot->readI32(ecast816); - this->operationType = (DataOperationType::type)ecast816; + int32_t ecast824; + xfer += iprot->readI32(ecast824); + this->operationType = (DataOperationType::type)ecast824; this->__isset.operationType = true; } else { xfer += iprot->skip(ftype); @@ -20668,10 +21236,10 @@ uint32_t AddDynamicPartitions::write(::apache::thrift::protocol::TProtocol* opro xfer += oprot->writeFieldBegin("partitionnames", ::apache::thrift::protocol::T_LIST, 5); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast(this->partitionnames.size())); - std::vector ::const_iterator _iter817; - for (_iter817 = this->partitionnames.begin(); _iter817 != this->partitionnames.end(); ++_iter817) + std::vector ::const_iterator _iter825; + for (_iter825 = this->partitionnames.begin(); _iter825 != this->partitionnames.end(); ++_iter825) { - xfer += oprot->writeString((*_iter817)); + xfer += oprot->writeString((*_iter825)); } xfer += oprot->writeListEnd(); } @@ -20698,23 +21266,23 @@ void swap(AddDynamicPartitions &a, AddDynamicPartitions &b) { swap(a.__isset, b.__isset); } -AddDynamicPartitions::AddDynamicPartitions(const AddDynamicPartitions& other818) { - txnid = other818.txnid; - writeid = other818.writeid; - dbname = other818.dbname; - tablename = other818.tablename; - partitionnames = other818.partitionnames; - operationType = other818.operationType; - __isset = other818.__isset; -} -AddDynamicPartitions& AddDynamicPartitions::operator=(const AddDynamicPartitions& other819) { - txnid = other819.txnid; - writeid = other819.writeid; - dbname = other819.dbname; - tablename = other819.tablename; - partitionnames = other819.partitionnames; - operationType = other819.operationType; - __isset = other819.__isset; +AddDynamicPartitions::AddDynamicPartitions(const AddDynamicPartitions& other826) { + txnid = other826.txnid; + writeid = other826.writeid; + dbname = other826.dbname; + tablename = other826.tablename; + partitionnames = other826.partitionnames; + operationType = other826.operationType; + __isset = other826.__isset; +} +AddDynamicPartitions& AddDynamicPartitions::operator=(const AddDynamicPartitions& other827) { + txnid = other827.txnid; + writeid = other827.writeid; + dbname = other827.dbname; + tablename = other827.tablename; + partitionnames = other827.partitionnames; + operationType = other827.operationType; + __isset = other827.__isset; return *this; } void AddDynamicPartitions::printTo(std::ostream& out) const { @@ -20897,23 +21465,23 @@ void swap(BasicTxnInfo &a, BasicTxnInfo &b) { swap(a.__isset, b.__isset); } -BasicTxnInfo::BasicTxnInfo(const BasicTxnInfo& other820) { - isnull = other820.isnull; - time = other820.time; - txnid = other820.txnid; - dbname = other820.dbname; - tablename = other820.tablename; - partitionname = other820.partitionname; - __isset = other820.__isset; -} -BasicTxnInfo& BasicTxnInfo::operator=(const BasicTxnInfo& other821) { - isnull = other821.isnull; - time = other821.time; - txnid = other821.txnid; - dbname = other821.dbname; - tablename = other821.tablename; - partitionname = other821.partitionname; - __isset = other821.__isset; +BasicTxnInfo::BasicTxnInfo(const BasicTxnInfo& other828) { + isnull = other828.isnull; + time = other828.time; + txnid = other828.txnid; + dbname = other828.dbname; + tablename = other828.tablename; + partitionname = other828.partitionname; + __isset = other828.__isset; +} +BasicTxnInfo& BasicTxnInfo::operator=(const BasicTxnInfo& other829) { + isnull = other829.isnull; + time = other829.time; + txnid = other829.txnid; + dbname = other829.dbname; + tablename = other829.tablename; + partitionname = other829.partitionname; + __isset = other829.__isset; return *this; } void BasicTxnInfo::printTo(std::ostream& out) const { @@ -21007,15 +21575,15 @@ uint32_t CreationMetadata::read(::apache::thrift::protocol::TProtocol* iprot) { if (ftype == ::apache::thrift::protocol::T_SET) { { this->tablesUsed.clear(); - uint32_t _size822; - ::apache::thrift::protocol::TType _etype825; - xfer += iprot->readSetBegin(_etype825, _size822); - uint32_t _i826; - for (_i826 = 0; _i826 < _size822; ++_i826) + uint32_t _size830; + ::apache::thrift::protocol::TType _etype833; + xfer += iprot->readSetBegin(_etype833, _size830); + uint32_t _i834; + for (_i834 = 0; _i834 < _size830; ++_i834) { - std::string _elem827; - xfer += iprot->readString(_elem827); - this->tablesUsed.insert(_elem827); + std::string _elem835; + xfer += iprot->readString(_elem835); + this->tablesUsed.insert(_elem835); } xfer += iprot->readSetEnd(); } @@ -21072,10 +21640,10 @@ uint32_t CreationMetadata::write(::apache::thrift::protocol::TProtocol* oprot) c xfer += oprot->writeFieldBegin("tablesUsed", ::apache::thrift::protocol::T_SET, 4); { xfer += oprot->writeSetBegin(::apache::thrift::protocol::T_STRING, static_cast(this->tablesUsed.size())); - std::set ::const_iterator _iter828; - for (_iter828 = this->tablesUsed.begin(); _iter828 != this->tablesUsed.end(); ++_iter828) + std::set ::const_iterator _iter836; + for (_iter836 = this->tablesUsed.begin(); _iter836 != this->tablesUsed.end(); ++_iter836) { - xfer += oprot->writeString((*_iter828)); + xfer += oprot->writeString((*_iter836)); } xfer += oprot->writeSetEnd(); } @@ -21101,21 +21669,21 @@ void swap(CreationMetadata &a, CreationMetadata &b) { swap(a.__isset, b.__isset); } -CreationMetadata::CreationMetadata(const CreationMetadata& other829) { - catName = other829.catName; - dbName = other829.dbName; - tblName = other829.tblName; - tablesUsed = other829.tablesUsed; - validTxnList = other829.validTxnList; - __isset = other829.__isset; -} -CreationMetadata& CreationMetadata::operator=(const CreationMetadata& other830) { - catName = other830.catName; - dbName = other830.dbName; - tblName = other830.tblName; - tablesUsed = other830.tablesUsed; - validTxnList = other830.validTxnList; - __isset = other830.__isset; +CreationMetadata::CreationMetadata(const CreationMetadata& other837) { + catName = other837.catName; + dbName = other837.dbName; + tblName = other837.tblName; + tablesUsed = other837.tablesUsed; + validTxnList = other837.validTxnList; + __isset = other837.__isset; +} +CreationMetadata& CreationMetadata::operator=(const CreationMetadata& other838) { + catName = other838.catName; + dbName = other838.dbName; + tblName = other838.tblName; + tablesUsed = other838.tablesUsed; + validTxnList = other838.validTxnList; + __isset = other838.__isset; return *this; } void CreationMetadata::printTo(std::ostream& out) const { @@ -21221,15 +21789,15 @@ void swap(NotificationEventRequest &a, NotificationEventRequest &b) { swap(a.__isset, b.__isset); } -NotificationEventRequest::NotificationEventRequest(const NotificationEventRequest& other831) { - lastEvent = other831.lastEvent; - maxEvents = other831.maxEvents; - __isset = other831.__isset; +NotificationEventRequest::NotificationEventRequest(const NotificationEventRequest& other839) { + lastEvent = other839.lastEvent; + maxEvents = other839.maxEvents; + __isset = other839.__isset; } -NotificationEventRequest& NotificationEventRequest::operator=(const NotificationEventRequest& other832) { - lastEvent = other832.lastEvent; - maxEvents = other832.maxEvents; - __isset = other832.__isset; +NotificationEventRequest& NotificationEventRequest::operator=(const NotificationEventRequest& other840) { + lastEvent = other840.lastEvent; + maxEvents = other840.maxEvents; + __isset = other840.__isset; return *this; } void NotificationEventRequest::printTo(std::ostream& out) const { @@ -21449,27 +22017,27 @@ void swap(NotificationEvent &a, NotificationEvent &b) { swap(a.__isset, b.__isset); } -NotificationEvent::NotificationEvent(const NotificationEvent& other833) { - eventId = other833.eventId; - eventTime = other833.eventTime; - eventType = other833.eventType; - dbName = other833.dbName; - tableName = other833.tableName; - message = other833.message; - messageFormat = other833.messageFormat; - catName = other833.catName; - __isset = other833.__isset; -} -NotificationEvent& NotificationEvent::operator=(const NotificationEvent& other834) { - eventId = other834.eventId; - eventTime = other834.eventTime; - eventType = other834.eventType; - dbName = other834.dbName; - tableName = other834.tableName; - message = other834.message; - messageFormat = other834.messageFormat; - catName = other834.catName; - __isset = other834.__isset; +NotificationEvent::NotificationEvent(const NotificationEvent& other841) { + eventId = other841.eventId; + eventTime = other841.eventTime; + eventType = other841.eventType; + dbName = other841.dbName; + tableName = other841.tableName; + message = other841.message; + messageFormat = other841.messageFormat; + catName = other841.catName; + __isset = other841.__isset; +} +NotificationEvent& NotificationEvent::operator=(const NotificationEvent& other842) { + eventId = other842.eventId; + eventTime = other842.eventTime; + eventType = other842.eventType; + dbName = other842.dbName; + tableName = other842.tableName; + message = other842.message; + messageFormat = other842.messageFormat; + catName = other842.catName; + __isset = other842.__isset; return *this; } void NotificationEvent::printTo(std::ostream& out) const { @@ -21521,14 +22089,14 @@ uint32_t NotificationEventResponse::read(::apache::thrift::protocol::TProtocol* if (ftype == ::apache::thrift::protocol::T_LIST) { { this->events.clear(); - uint32_t _size835; - ::apache::thrift::protocol::TType _etype838; - xfer += iprot->readListBegin(_etype838, _size835); - this->events.resize(_size835); - uint32_t _i839; - for (_i839 = 0; _i839 < _size835; ++_i839) + uint32_t _size843; + ::apache::thrift::protocol::TType _etype846; + xfer += iprot->readListBegin(_etype846, _size843); + this->events.resize(_size843); + uint32_t _i847; + for (_i847 = 0; _i847 < _size843; ++_i847) { - xfer += this->events[_i839].read(iprot); + xfer += this->events[_i847].read(iprot); } xfer += iprot->readListEnd(); } @@ -21559,10 +22127,10 @@ uint32_t NotificationEventResponse::write(::apache::thrift::protocol::TProtocol* xfer += oprot->writeFieldBegin("events", ::apache::thrift::protocol::T_LIST, 1); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast(this->events.size())); - std::vector ::const_iterator _iter840; - for (_iter840 = this->events.begin(); _iter840 != this->events.end(); ++_iter840) + std::vector ::const_iterator _iter848; + for (_iter848 = this->events.begin(); _iter848 != this->events.end(); ++_iter848) { - xfer += (*_iter840).write(oprot); + xfer += (*_iter848).write(oprot); } xfer += oprot->writeListEnd(); } @@ -21578,11 +22146,11 @@ void swap(NotificationEventResponse &a, NotificationEventResponse &b) { swap(a.events, b.events); } -NotificationEventResponse::NotificationEventResponse(const NotificationEventResponse& other841) { - events = other841.events; +NotificationEventResponse::NotificationEventResponse(const NotificationEventResponse& other849) { + events = other849.events; } -NotificationEventResponse& NotificationEventResponse::operator=(const NotificationEventResponse& other842) { - events = other842.events; +NotificationEventResponse& NotificationEventResponse::operator=(const NotificationEventResponse& other850) { + events = other850.events; return *this; } void NotificationEventResponse::printTo(std::ostream& out) const { @@ -21664,11 +22232,11 @@ void swap(CurrentNotificationEventId &a, CurrentNotificationEventId &b) { swap(a.eventId, b.eventId); } -CurrentNotificationEventId::CurrentNotificationEventId(const CurrentNotificationEventId& other843) { - eventId = other843.eventId; +CurrentNotificationEventId::CurrentNotificationEventId(const CurrentNotificationEventId& other851) { + eventId = other851.eventId; } -CurrentNotificationEventId& CurrentNotificationEventId::operator=(const CurrentNotificationEventId& other844) { - eventId = other844.eventId; +CurrentNotificationEventId& CurrentNotificationEventId::operator=(const CurrentNotificationEventId& other852) { + eventId = other852.eventId; return *this; } void CurrentNotificationEventId::printTo(std::ostream& out) const { @@ -21790,17 +22358,17 @@ void swap(NotificationEventsCountRequest &a, NotificationEventsCountRequest &b) swap(a.__isset, b.__isset); } -NotificationEventsCountRequest::NotificationEventsCountRequest(const NotificationEventsCountRequest& other845) { - fromEventId = other845.fromEventId; - dbName = other845.dbName; - catName = other845.catName; - __isset = other845.__isset; +NotificationEventsCountRequest::NotificationEventsCountRequest(const NotificationEventsCountRequest& other853) { + fromEventId = other853.fromEventId; + dbName = other853.dbName; + catName = other853.catName; + __isset = other853.__isset; } -NotificationEventsCountRequest& NotificationEventsCountRequest::operator=(const NotificationEventsCountRequest& other846) { - fromEventId = other846.fromEventId; - dbName = other846.dbName; - catName = other846.catName; - __isset = other846.__isset; +NotificationEventsCountRequest& NotificationEventsCountRequest::operator=(const NotificationEventsCountRequest& other854) { + fromEventId = other854.fromEventId; + dbName = other854.dbName; + catName = other854.catName; + __isset = other854.__isset; return *this; } void NotificationEventsCountRequest::printTo(std::ostream& out) const { @@ -21884,11 +22452,11 @@ void swap(NotificationEventsCountResponse &a, NotificationEventsCountResponse &b swap(a.eventsCount, b.eventsCount); } -NotificationEventsCountResponse::NotificationEventsCountResponse(const NotificationEventsCountResponse& other847) { - eventsCount = other847.eventsCount; +NotificationEventsCountResponse::NotificationEventsCountResponse(const NotificationEventsCountResponse& other855) { + eventsCount = other855.eventsCount; } -NotificationEventsCountResponse& NotificationEventsCountResponse::operator=(const NotificationEventsCountResponse& other848) { - eventsCount = other848.eventsCount; +NotificationEventsCountResponse& NotificationEventsCountResponse::operator=(const NotificationEventsCountResponse& other856) { + eventsCount = other856.eventsCount; return *this; } void NotificationEventsCountResponse::printTo(std::ostream& out) const { @@ -21951,14 +22519,14 @@ uint32_t InsertEventRequestData::read(::apache::thrift::protocol::TProtocol* ipr if (ftype == ::apache::thrift::protocol::T_LIST) { { this->filesAdded.clear(); - uint32_t _size849; - ::apache::thrift::protocol::TType _etype852; - xfer += iprot->readListBegin(_etype852, _size849); - this->filesAdded.resize(_size849); - uint32_t _i853; - for (_i853 = 0; _i853 < _size849; ++_i853) + uint32_t _size857; + ::apache::thrift::protocol::TType _etype860; + xfer += iprot->readListBegin(_etype860, _size857); + this->filesAdded.resize(_size857); + uint32_t _i861; + for (_i861 = 0; _i861 < _size857; ++_i861) { - xfer += iprot->readString(this->filesAdded[_i853]); + xfer += iprot->readString(this->filesAdded[_i861]); } xfer += iprot->readListEnd(); } @@ -21971,14 +22539,14 @@ uint32_t InsertEventRequestData::read(::apache::thrift::protocol::TProtocol* ipr if (ftype == ::apache::thrift::protocol::T_LIST) { { this->filesAddedChecksum.clear(); - uint32_t _size854; - ::apache::thrift::protocol::TType _etype857; - xfer += iprot->readListBegin(_etype857, _size854); - this->filesAddedChecksum.resize(_size854); - uint32_t _i858; - for (_i858 = 0; _i858 < _size854; ++_i858) + uint32_t _size862; + ::apache::thrift::protocol::TType _etype865; + xfer += iprot->readListBegin(_etype865, _size862); + this->filesAddedChecksum.resize(_size862); + uint32_t _i866; + for (_i866 = 0; _i866 < _size862; ++_i866) { - xfer += iprot->readString(this->filesAddedChecksum[_i858]); + xfer += iprot->readString(this->filesAddedChecksum[_i866]); } xfer += iprot->readListEnd(); } @@ -22014,10 +22582,10 @@ uint32_t InsertEventRequestData::write(::apache::thrift::protocol::TProtocol* op xfer += oprot->writeFieldBegin("filesAdded", ::apache::thrift::protocol::T_LIST, 2); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast(this->filesAdded.size())); - std::vector ::const_iterator _iter859; - for (_iter859 = this->filesAdded.begin(); _iter859 != this->filesAdded.end(); ++_iter859) + std::vector ::const_iterator _iter867; + for (_iter867 = this->filesAdded.begin(); _iter867 != this->filesAdded.end(); ++_iter867) { - xfer += oprot->writeString((*_iter859)); + xfer += oprot->writeString((*_iter867)); } xfer += oprot->writeListEnd(); } @@ -22027,10 +22595,10 @@ uint32_t InsertEventRequestData::write(::apache::thrift::protocol::TProtocol* op xfer += oprot->writeFieldBegin("filesAddedChecksum", ::apache::thrift::protocol::T_LIST, 3); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast(this->filesAddedChecksum.size())); - std::vector ::const_iterator _iter860; - for (_iter860 = this->filesAddedChecksum.begin(); _iter860 != this->filesAddedChecksum.end(); ++_iter860) + std::vector ::const_iterator _iter868; + for (_iter868 = this->filesAddedChecksum.begin(); _iter868 != this->filesAddedChecksum.end(); ++_iter868) { - xfer += oprot->writeString((*_iter860)); + xfer += oprot->writeString((*_iter868)); } xfer += oprot->writeListEnd(); } @@ -22049,17 +22617,17 @@ void swap(InsertEventRequestData &a, InsertEventRequestData &b) { swap(a.__isset, b.__isset); } -InsertEventRequestData::InsertEventRequestData(const InsertEventRequestData& other861) { - replace = other861.replace; - filesAdded = other861.filesAdded; - filesAddedChecksum = other861.filesAddedChecksum; - __isset = other861.__isset; +InsertEventRequestData::InsertEventRequestData(const InsertEventRequestData& other869) { + replace = other869.replace; + filesAdded = other869.filesAdded; + filesAddedChecksum = other869.filesAddedChecksum; + __isset = other869.__isset; } -InsertEventRequestData& InsertEventRequestData::operator=(const InsertEventRequestData& other862) { - replace = other862.replace; - filesAdded = other862.filesAdded; - filesAddedChecksum = other862.filesAddedChecksum; - __isset = other862.__isset; +InsertEventRequestData& InsertEventRequestData::operator=(const InsertEventRequestData& other870) { + replace = other870.replace; + filesAdded = other870.filesAdded; + filesAddedChecksum = other870.filesAddedChecksum; + __isset = other870.__isset; return *this; } void InsertEventRequestData::printTo(std::ostream& out) const { @@ -22141,13 +22709,13 @@ void swap(FireEventRequestData &a, FireEventRequestData &b) { swap(a.__isset, b.__isset); } -FireEventRequestData::FireEventRequestData(const FireEventRequestData& other863) { - insertData = other863.insertData; - __isset = other863.__isset; +FireEventRequestData::FireEventRequestData(const FireEventRequestData& other871) { + insertData = other871.insertData; + __isset = other871.__isset; } -FireEventRequestData& FireEventRequestData::operator=(const FireEventRequestData& other864) { - insertData = other864.insertData; - __isset = other864.__isset; +FireEventRequestData& FireEventRequestData::operator=(const FireEventRequestData& other872) { + insertData = other872.insertData; + __isset = other872.__isset; return *this; } void FireEventRequestData::printTo(std::ostream& out) const { @@ -22249,14 +22817,14 @@ uint32_t FireEventRequest::read(::apache::thrift::protocol::TProtocol* iprot) { if (ftype == ::apache::thrift::protocol::T_LIST) { { this->partitionVals.clear(); - uint32_t _size865; - ::apache::thrift::protocol::TType _etype868; - xfer += iprot->readListBegin(_etype868, _size865); - this->partitionVals.resize(_size865); - uint32_t _i869; - for (_i869 = 0; _i869 < _size865; ++_i869) + uint32_t _size873; + ::apache::thrift::protocol::TType _etype876; + xfer += iprot->readListBegin(_etype876, _size873); + this->partitionVals.resize(_size873); + uint32_t _i877; + for (_i877 = 0; _i877 < _size873; ++_i877) { - xfer += iprot->readString(this->partitionVals[_i869]); + xfer += iprot->readString(this->partitionVals[_i877]); } xfer += iprot->readListEnd(); } @@ -22316,10 +22884,10 @@ uint32_t FireEventRequest::write(::apache::thrift::protocol::TProtocol* oprot) c xfer += oprot->writeFieldBegin("partitionVals", ::apache::thrift::protocol::T_LIST, 5); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast(this->partitionVals.size())); - std::vector ::const_iterator _iter870; - for (_iter870 = this->partitionVals.begin(); _iter870 != this->partitionVals.end(); ++_iter870) + std::vector ::const_iterator _iter878; + for (_iter878 = this->partitionVals.begin(); _iter878 != this->partitionVals.end(); ++_iter878) { - xfer += oprot->writeString((*_iter870)); + xfer += oprot->writeString((*_iter878)); } xfer += oprot->writeListEnd(); } @@ -22346,23 +22914,23 @@ void swap(FireEventRequest &a, FireEventRequest &b) { swap(a.__isset, b.__isset); } -FireEventRequest::FireEventRequest(const FireEventRequest& other871) { - successful = other871.successful; - data = other871.data; - dbName = other871.dbName; - tableName = other871.tableName; - partitionVals = other871.partitionVals; - catName = other871.catName; - __isset = other871.__isset; -} -FireEventRequest& FireEventRequest::operator=(const FireEventRequest& other872) { - successful = other872.successful; - data = other872.data; - dbName = other872.dbName; - tableName = other872.tableName; - partitionVals = other872.partitionVals; - catName = other872.catName; - __isset = other872.__isset; +FireEventRequest::FireEventRequest(const FireEventRequest& other879) { + successful = other879.successful; + data = other879.data; + dbName = other879.dbName; + tableName = other879.tableName; + partitionVals = other879.partitionVals; + catName = other879.catName; + __isset = other879.__isset; +} +FireEventRequest& FireEventRequest::operator=(const FireEventRequest& other880) { + successful = other880.successful; + data = other880.data; + dbName = other880.dbName; + tableName = other880.tableName; + partitionVals = other880.partitionVals; + catName = other880.catName; + __isset = other880.__isset; return *this; } void FireEventRequest::printTo(std::ostream& out) const { @@ -22426,11 +22994,11 @@ void swap(FireEventResponse &a, FireEventResponse &b) { (void) b; } -FireEventResponse::FireEventResponse(const FireEventResponse& other873) { - (void) other873; +FireEventResponse::FireEventResponse(const FireEventResponse& other881) { + (void) other881; } -FireEventResponse& FireEventResponse::operator=(const FireEventResponse& other874) { - (void) other874; +FireEventResponse& FireEventResponse::operator=(const FireEventResponse& other882) { + (void) other882; return *this; } void FireEventResponse::printTo(std::ostream& out) const { @@ -22530,15 +23098,15 @@ void swap(MetadataPpdResult &a, MetadataPpdResult &b) { swap(a.__isset, b.__isset); } -MetadataPpdResult::MetadataPpdResult(const MetadataPpdResult& other875) { - metadata = other875.metadata; - includeBitset = other875.includeBitset; - __isset = other875.__isset; +MetadataPpdResult::MetadataPpdResult(const MetadataPpdResult& other883) { + metadata = other883.metadata; + includeBitset = other883.includeBitset; + __isset = other883.__isset; } -MetadataPpdResult& MetadataPpdResult::operator=(const MetadataPpdResult& other876) { - metadata = other876.metadata; - includeBitset = other876.includeBitset; - __isset = other876.__isset; +MetadataPpdResult& MetadataPpdResult::operator=(const MetadataPpdResult& other884) { + metadata = other884.metadata; + includeBitset = other884.includeBitset; + __isset = other884.__isset; return *this; } void MetadataPpdResult::printTo(std::ostream& out) const { @@ -22589,17 +23157,17 @@ uint32_t GetFileMetadataByExprResult::read(::apache::thrift::protocol::TProtocol if (ftype == ::apache::thrift::protocol::T_MAP) { { this->metadata.clear(); - uint32_t _size877; - ::apache::thrift::protocol::TType _ktype878; - ::apache::thrift::protocol::TType _vtype879; - xfer += iprot->readMapBegin(_ktype878, _vtype879, _size877); - uint32_t _i881; - for (_i881 = 0; _i881 < _size877; ++_i881) + uint32_t _size885; + ::apache::thrift::protocol::TType _ktype886; + ::apache::thrift::protocol::TType _vtype887; + xfer += iprot->readMapBegin(_ktype886, _vtype887, _size885); + uint32_t _i889; + for (_i889 = 0; _i889 < _size885; ++_i889) { - int64_t _key882; - xfer += iprot->readI64(_key882); - MetadataPpdResult& _val883 = this->metadata[_key882]; - xfer += _val883.read(iprot); + int64_t _key890; + xfer += iprot->readI64(_key890); + MetadataPpdResult& _val891 = this->metadata[_key890]; + xfer += _val891.read(iprot); } xfer += iprot->readMapEnd(); } @@ -22640,11 +23208,11 @@ uint32_t GetFileMetadataByExprResult::write(::apache::thrift::protocol::TProtoco xfer += oprot->writeFieldBegin("metadata", ::apache::thrift::protocol::T_MAP, 1); { xfer += oprot->writeMapBegin(::apache::thrift::protocol::T_I64, ::apache::thrift::protocol::T_STRUCT, static_cast(this->metadata.size())); - std::map ::const_iterator _iter884; - for (_iter884 = this->metadata.begin(); _iter884 != this->metadata.end(); ++_iter884) + std::map ::const_iterator _iter892; + for (_iter892 = this->metadata.begin(); _iter892 != this->metadata.end(); ++_iter892) { - xfer += oprot->writeI64(_iter884->first); - xfer += _iter884->second.write(oprot); + xfer += oprot->writeI64(_iter892->first); + xfer += _iter892->second.write(oprot); } xfer += oprot->writeMapEnd(); } @@ -22665,13 +23233,13 @@ void swap(GetFileMetadataByExprResult &a, GetFileMetadataByExprResult &b) { swap(a.isSupported, b.isSupported); } -GetFileMetadataByExprResult::GetFileMetadataByExprResult(const GetFileMetadataByExprResult& other885) { - metadata = other885.metadata; - isSupported = other885.isSupported; +GetFileMetadataByExprResult::GetFileMetadataByExprResult(const GetFileMetadataByExprResult& other893) { + metadata = other893.metadata; + isSupported = other893.isSupported; } -GetFileMetadataByExprResult& GetFileMetadataByExprResult::operator=(const GetFileMetadataByExprResult& other886) { - metadata = other886.metadata; - isSupported = other886.isSupported; +GetFileMetadataByExprResult& GetFileMetadataByExprResult::operator=(const GetFileMetadataByExprResult& other894) { + metadata = other894.metadata; + isSupported = other894.isSupported; return *this; } void GetFileMetadataByExprResult::printTo(std::ostream& out) const { @@ -22732,14 +23300,14 @@ uint32_t GetFileMetadataByExprRequest::read(::apache::thrift::protocol::TProtoco if (ftype == ::apache::thrift::protocol::T_LIST) { { this->fileIds.clear(); - uint32_t _size887; - ::apache::thrift::protocol::TType _etype890; - xfer += iprot->readListBegin(_etype890, _size887); - this->fileIds.resize(_size887); - uint32_t _i891; - for (_i891 = 0; _i891 < _size887; ++_i891) + uint32_t _size895; + ::apache::thrift::protocol::TType _etype898; + xfer += iprot->readListBegin(_etype898, _size895); + this->fileIds.resize(_size895); + uint32_t _i899; + for (_i899 = 0; _i899 < _size895; ++_i899) { - xfer += iprot->readI64(this->fileIds[_i891]); + xfer += iprot->readI64(this->fileIds[_i899]); } xfer += iprot->readListEnd(); } @@ -22766,9 +23334,9 @@ uint32_t GetFileMetadataByExprRequest::read(::apache::thrift::protocol::TProtoco break; case 4: if (ftype == ::apache::thrift::protocol::T_I32) { - int32_t ecast892; - xfer += iprot->readI32(ecast892); - this->type = (FileMetadataExprType::type)ecast892; + int32_t ecast900; + xfer += iprot->readI32(ecast900); + this->type = (FileMetadataExprType::type)ecast900; this->__isset.type = true; } else { xfer += iprot->skip(ftype); @@ -22798,10 +23366,10 @@ uint32_t GetFileMetadataByExprRequest::write(::apache::thrift::protocol::TProtoc xfer += oprot->writeFieldBegin("fileIds", ::apache::thrift::protocol::T_LIST, 1); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_I64, static_cast(this->fileIds.size())); - std::vector ::const_iterator _iter893; - for (_iter893 = this->fileIds.begin(); _iter893 != this->fileIds.end(); ++_iter893) + std::vector ::const_iterator _iter901; + for (_iter901 = this->fileIds.begin(); _iter901 != this->fileIds.end(); ++_iter901) { - xfer += oprot->writeI64((*_iter893)); + xfer += oprot->writeI64((*_iter901)); } xfer += oprot->writeListEnd(); } @@ -22835,19 +23403,19 @@ void swap(GetFileMetadataByExprRequest &a, GetFileMetadataByExprRequest &b) { swap(a.__isset, b.__isset); } -GetFileMetadataByExprRequest::GetFileMetadataByExprRequest(const GetFileMetadataByExprRequest& other894) { - fileIds = other894.fileIds; - expr = other894.expr; - doGetFooters = other894.doGetFooters; - type = other894.type; - __isset = other894.__isset; +GetFileMetadataByExprRequest::GetFileMetadataByExprRequest(const GetFileMetadataByExprRequest& other902) { + fileIds = other902.fileIds; + expr = other902.expr; + doGetFooters = other902.doGetFooters; + type = other902.type; + __isset = other902.__isset; } -GetFileMetadataByExprRequest& GetFileMetadataByExprRequest::operator=(const GetFileMetadataByExprRequest& other895) { - fileIds = other895.fileIds; - expr = other895.expr; - doGetFooters = other895.doGetFooters; - type = other895.type; - __isset = other895.__isset; +GetFileMetadataByExprRequest& GetFileMetadataByExprRequest::operator=(const GetFileMetadataByExprRequest& other903) { + fileIds = other903.fileIds; + expr = other903.expr; + doGetFooters = other903.doGetFooters; + type = other903.type; + __isset = other903.__isset; return *this; } void GetFileMetadataByExprRequest::printTo(std::ostream& out) const { @@ -22900,17 +23468,17 @@ uint32_t GetFileMetadataResult::read(::apache::thrift::protocol::TProtocol* ipro if (ftype == ::apache::thrift::protocol::T_MAP) { { this->metadata.clear(); - uint32_t _size896; - ::apache::thrift::protocol::TType _ktype897; - ::apache::thrift::protocol::TType _vtype898; - xfer += iprot->readMapBegin(_ktype897, _vtype898, _size896); - uint32_t _i900; - for (_i900 = 0; _i900 < _size896; ++_i900) + uint32_t _size904; + ::apache::thrift::protocol::TType _ktype905; + ::apache::thrift::protocol::TType _vtype906; + xfer += iprot->readMapBegin(_ktype905, _vtype906, _size904); + uint32_t _i908; + for (_i908 = 0; _i908 < _size904; ++_i908) { - int64_t _key901; - xfer += iprot->readI64(_key901); - std::string& _val902 = this->metadata[_key901]; - xfer += iprot->readBinary(_val902); + int64_t _key909; + xfer += iprot->readI64(_key909); + std::string& _val910 = this->metadata[_key909]; + xfer += iprot->readBinary(_val910); } xfer += iprot->readMapEnd(); } @@ -22951,11 +23519,11 @@ uint32_t GetFileMetadataResult::write(::apache::thrift::protocol::TProtocol* opr xfer += oprot->writeFieldBegin("metadata", ::apache::thrift::protocol::T_MAP, 1); { xfer += oprot->writeMapBegin(::apache::thrift::protocol::T_I64, ::apache::thrift::protocol::T_STRING, static_cast(this->metadata.size())); - std::map ::const_iterator _iter903; - for (_iter903 = this->metadata.begin(); _iter903 != this->metadata.end(); ++_iter903) + std::map ::const_iterator _iter911; + for (_iter911 = this->metadata.begin(); _iter911 != this->metadata.end(); ++_iter911) { - xfer += oprot->writeI64(_iter903->first); - xfer += oprot->writeBinary(_iter903->second); + xfer += oprot->writeI64(_iter911->first); + xfer += oprot->writeBinary(_iter911->second); } xfer += oprot->writeMapEnd(); } @@ -22976,13 +23544,13 @@ void swap(GetFileMetadataResult &a, GetFileMetadataResult &b) { swap(a.isSupported, b.isSupported); } -GetFileMetadataResult::GetFileMetadataResult(const GetFileMetadataResult& other904) { - metadata = other904.metadata; - isSupported = other904.isSupported; +GetFileMetadataResult::GetFileMetadataResult(const GetFileMetadataResult& other912) { + metadata = other912.metadata; + isSupported = other912.isSupported; } -GetFileMetadataResult& GetFileMetadataResult::operator=(const GetFileMetadataResult& other905) { - metadata = other905.metadata; - isSupported = other905.isSupported; +GetFileMetadataResult& GetFileMetadataResult::operator=(const GetFileMetadataResult& other913) { + metadata = other913.metadata; + isSupported = other913.isSupported; return *this; } void GetFileMetadataResult::printTo(std::ostream& out) const { @@ -23028,14 +23596,14 @@ uint32_t GetFileMetadataRequest::read(::apache::thrift::protocol::TProtocol* ipr if (ftype == ::apache::thrift::protocol::T_LIST) { { this->fileIds.clear(); - uint32_t _size906; - ::apache::thrift::protocol::TType _etype909; - xfer += iprot->readListBegin(_etype909, _size906); - this->fileIds.resize(_size906); - uint32_t _i910; - for (_i910 = 0; _i910 < _size906; ++_i910) + uint32_t _size914; + ::apache::thrift::protocol::TType _etype917; + xfer += iprot->readListBegin(_etype917, _size914); + this->fileIds.resize(_size914); + uint32_t _i918; + for (_i918 = 0; _i918 < _size914; ++_i918) { - xfer += iprot->readI64(this->fileIds[_i910]); + xfer += iprot->readI64(this->fileIds[_i918]); } xfer += iprot->readListEnd(); } @@ -23066,10 +23634,10 @@ uint32_t GetFileMetadataRequest::write(::apache::thrift::protocol::TProtocol* op xfer += oprot->writeFieldBegin("fileIds", ::apache::thrift::protocol::T_LIST, 1); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_I64, static_cast(this->fileIds.size())); - std::vector ::const_iterator _iter911; - for (_iter911 = this->fileIds.begin(); _iter911 != this->fileIds.end(); ++_iter911) + std::vector ::const_iterator _iter919; + for (_iter919 = this->fileIds.begin(); _iter919 != this->fileIds.end(); ++_iter919) { - xfer += oprot->writeI64((*_iter911)); + xfer += oprot->writeI64((*_iter919)); } xfer += oprot->writeListEnd(); } @@ -23085,11 +23653,11 @@ void swap(GetFileMetadataRequest &a, GetFileMetadataRequest &b) { swap(a.fileIds, b.fileIds); } -GetFileMetadataRequest::GetFileMetadataRequest(const GetFileMetadataRequest& other912) { - fileIds = other912.fileIds; +GetFileMetadataRequest::GetFileMetadataRequest(const GetFileMetadataRequest& other920) { + fileIds = other920.fileIds; } -GetFileMetadataRequest& GetFileMetadataRequest::operator=(const GetFileMetadataRequest& other913) { - fileIds = other913.fileIds; +GetFileMetadataRequest& GetFileMetadataRequest::operator=(const GetFileMetadataRequest& other921) { + fileIds = other921.fileIds; return *this; } void GetFileMetadataRequest::printTo(std::ostream& out) const { @@ -23148,11 +23716,11 @@ void swap(PutFileMetadataResult &a, PutFileMetadataResult &b) { (void) b; } -PutFileMetadataResult::PutFileMetadataResult(const PutFileMetadataResult& other914) { - (void) other914; +PutFileMetadataResult::PutFileMetadataResult(const PutFileMetadataResult& other922) { + (void) other922; } -PutFileMetadataResult& PutFileMetadataResult::operator=(const PutFileMetadataResult& other915) { - (void) other915; +PutFileMetadataResult& PutFileMetadataResult::operator=(const PutFileMetadataResult& other923) { + (void) other923; return *this; } void PutFileMetadataResult::printTo(std::ostream& out) const { @@ -23206,14 +23774,14 @@ uint32_t PutFileMetadataRequest::read(::apache::thrift::protocol::TProtocol* ipr if (ftype == ::apache::thrift::protocol::T_LIST) { { this->fileIds.clear(); - uint32_t _size916; - ::apache::thrift::protocol::TType _etype919; - xfer += iprot->readListBegin(_etype919, _size916); - this->fileIds.resize(_size916); - uint32_t _i920; - for (_i920 = 0; _i920 < _size916; ++_i920) + uint32_t _size924; + ::apache::thrift::protocol::TType _etype927; + xfer += iprot->readListBegin(_etype927, _size924); + this->fileIds.resize(_size924); + uint32_t _i928; + for (_i928 = 0; _i928 < _size924; ++_i928) { - xfer += iprot->readI64(this->fileIds[_i920]); + xfer += iprot->readI64(this->fileIds[_i928]); } xfer += iprot->readListEnd(); } @@ -23226,14 +23794,14 @@ uint32_t PutFileMetadataRequest::read(::apache::thrift::protocol::TProtocol* ipr if (ftype == ::apache::thrift::protocol::T_LIST) { { this->metadata.clear(); - uint32_t _size921; - ::apache::thrift::protocol::TType _etype924; - xfer += iprot->readListBegin(_etype924, _size921); - this->metadata.resize(_size921); - uint32_t _i925; - for (_i925 = 0; _i925 < _size921; ++_i925) + uint32_t _size929; + ::apache::thrift::protocol::TType _etype932; + xfer += iprot->readListBegin(_etype932, _size929); + this->metadata.resize(_size929); + uint32_t _i933; + for (_i933 = 0; _i933 < _size929; ++_i933) { - xfer += iprot->readBinary(this->metadata[_i925]); + xfer += iprot->readBinary(this->metadata[_i933]); } xfer += iprot->readListEnd(); } @@ -23244,9 +23812,9 @@ uint32_t PutFileMetadataRequest::read(::apache::thrift::protocol::TProtocol* ipr break; case 3: if (ftype == ::apache::thrift::protocol::T_I32) { - int32_t ecast926; - xfer += iprot->readI32(ecast926); - this->type = (FileMetadataExprType::type)ecast926; + int32_t ecast934; + xfer += iprot->readI32(ecast934); + this->type = (FileMetadataExprType::type)ecast934; this->__isset.type = true; } else { xfer += iprot->skip(ftype); @@ -23276,10 +23844,10 @@ uint32_t PutFileMetadataRequest::write(::apache::thrift::protocol::TProtocol* op xfer += oprot->writeFieldBegin("fileIds", ::apache::thrift::protocol::T_LIST, 1); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_I64, static_cast(this->fileIds.size())); - std::vector ::const_iterator _iter927; - for (_iter927 = this->fileIds.begin(); _iter927 != this->fileIds.end(); ++_iter927) + std::vector ::const_iterator _iter935; + for (_iter935 = this->fileIds.begin(); _iter935 != this->fileIds.end(); ++_iter935) { - xfer += oprot->writeI64((*_iter927)); + xfer += oprot->writeI64((*_iter935)); } xfer += oprot->writeListEnd(); } @@ -23288,10 +23856,10 @@ uint32_t PutFileMetadataRequest::write(::apache::thrift::protocol::TProtocol* op xfer += oprot->writeFieldBegin("metadata", ::apache::thrift::protocol::T_LIST, 2); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast(this->metadata.size())); - std::vector ::const_iterator _iter928; - for (_iter928 = this->metadata.begin(); _iter928 != this->metadata.end(); ++_iter928) + std::vector ::const_iterator _iter936; + for (_iter936 = this->metadata.begin(); _iter936 != this->metadata.end(); ++_iter936) { - xfer += oprot->writeBinary((*_iter928)); + xfer += oprot->writeBinary((*_iter936)); } xfer += oprot->writeListEnd(); } @@ -23315,17 +23883,17 @@ void swap(PutFileMetadataRequest &a, PutFileMetadataRequest &b) { swap(a.__isset, b.__isset); } -PutFileMetadataRequest::PutFileMetadataRequest(const PutFileMetadataRequest& other929) { - fileIds = other929.fileIds; - metadata = other929.metadata; - type = other929.type; - __isset = other929.__isset; +PutFileMetadataRequest::PutFileMetadataRequest(const PutFileMetadataRequest& other937) { + fileIds = other937.fileIds; + metadata = other937.metadata; + type = other937.type; + __isset = other937.__isset; } -PutFileMetadataRequest& PutFileMetadataRequest::operator=(const PutFileMetadataRequest& other930) { - fileIds = other930.fileIds; - metadata = other930.metadata; - type = other930.type; - __isset = other930.__isset; +PutFileMetadataRequest& PutFileMetadataRequest::operator=(const PutFileMetadataRequest& other938) { + fileIds = other938.fileIds; + metadata = other938.metadata; + type = other938.type; + __isset = other938.__isset; return *this; } void PutFileMetadataRequest::printTo(std::ostream& out) const { @@ -23386,11 +23954,11 @@ void swap(ClearFileMetadataResult &a, ClearFileMetadataResult &b) { (void) b; } -ClearFileMetadataResult::ClearFileMetadataResult(const ClearFileMetadataResult& other931) { - (void) other931; +ClearFileMetadataResult::ClearFileMetadataResult(const ClearFileMetadataResult& other939) { + (void) other939; } -ClearFileMetadataResult& ClearFileMetadataResult::operator=(const ClearFileMetadataResult& other932) { - (void) other932; +ClearFileMetadataResult& ClearFileMetadataResult::operator=(const ClearFileMetadataResult& other940) { + (void) other940; return *this; } void ClearFileMetadataResult::printTo(std::ostream& out) const { @@ -23434,14 +24002,14 @@ uint32_t ClearFileMetadataRequest::read(::apache::thrift::protocol::TProtocol* i if (ftype == ::apache::thrift::protocol::T_LIST) { { this->fileIds.clear(); - uint32_t _size933; - ::apache::thrift::protocol::TType _etype936; - xfer += iprot->readListBegin(_etype936, _size933); - this->fileIds.resize(_size933); - uint32_t _i937; - for (_i937 = 0; _i937 < _size933; ++_i937) + uint32_t _size941; + ::apache::thrift::protocol::TType _etype944; + xfer += iprot->readListBegin(_etype944, _size941); + this->fileIds.resize(_size941); + uint32_t _i945; + for (_i945 = 0; _i945 < _size941; ++_i945) { - xfer += iprot->readI64(this->fileIds[_i937]); + xfer += iprot->readI64(this->fileIds[_i945]); } xfer += iprot->readListEnd(); } @@ -23472,10 +24040,10 @@ uint32_t ClearFileMetadataRequest::write(::apache::thrift::protocol::TProtocol* xfer += oprot->writeFieldBegin("fileIds", ::apache::thrift::protocol::T_LIST, 1); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_I64, static_cast(this->fileIds.size())); - std::vector ::const_iterator _iter938; - for (_iter938 = this->fileIds.begin(); _iter938 != this->fileIds.end(); ++_iter938) + std::vector ::const_iterator _iter946; + for (_iter946 = this->fileIds.begin(); _iter946 != this->fileIds.end(); ++_iter946) { - xfer += oprot->writeI64((*_iter938)); + xfer += oprot->writeI64((*_iter946)); } xfer += oprot->writeListEnd(); } @@ -23491,11 +24059,11 @@ void swap(ClearFileMetadataRequest &a, ClearFileMetadataRequest &b) { swap(a.fileIds, b.fileIds); } -ClearFileMetadataRequest::ClearFileMetadataRequest(const ClearFileMetadataRequest& other939) { - fileIds = other939.fileIds; +ClearFileMetadataRequest::ClearFileMetadataRequest(const ClearFileMetadataRequest& other947) { + fileIds = other947.fileIds; } -ClearFileMetadataRequest& ClearFileMetadataRequest::operator=(const ClearFileMetadataRequest& other940) { - fileIds = other940.fileIds; +ClearFileMetadataRequest& ClearFileMetadataRequest::operator=(const ClearFileMetadataRequest& other948) { + fileIds = other948.fileIds; return *this; } void ClearFileMetadataRequest::printTo(std::ostream& out) const { @@ -23577,11 +24145,11 @@ void swap(CacheFileMetadataResult &a, CacheFileMetadataResult &b) { swap(a.isSupported, b.isSupported); } -CacheFileMetadataResult::CacheFileMetadataResult(const CacheFileMetadataResult& other941) { - isSupported = other941.isSupported; +CacheFileMetadataResult::CacheFileMetadataResult(const CacheFileMetadataResult& other949) { + isSupported = other949.isSupported; } -CacheFileMetadataResult& CacheFileMetadataResult::operator=(const CacheFileMetadataResult& other942) { - isSupported = other942.isSupported; +CacheFileMetadataResult& CacheFileMetadataResult::operator=(const CacheFileMetadataResult& other950) { + isSupported = other950.isSupported; return *this; } void CacheFileMetadataResult::printTo(std::ostream& out) const { @@ -23722,19 +24290,19 @@ void swap(CacheFileMetadataRequest &a, CacheFileMetadataRequest &b) { swap(a.__isset, b.__isset); } -CacheFileMetadataRequest::CacheFileMetadataRequest(const CacheFileMetadataRequest& other943) { - dbName = other943.dbName; - tblName = other943.tblName; - partName = other943.partName; - isAllParts = other943.isAllParts; - __isset = other943.__isset; +CacheFileMetadataRequest::CacheFileMetadataRequest(const CacheFileMetadataRequest& other951) { + dbName = other951.dbName; + tblName = other951.tblName; + partName = other951.partName; + isAllParts = other951.isAllParts; + __isset = other951.__isset; } -CacheFileMetadataRequest& CacheFileMetadataRequest::operator=(const CacheFileMetadataRequest& other944) { - dbName = other944.dbName; - tblName = other944.tblName; - partName = other944.partName; - isAllParts = other944.isAllParts; - __isset = other944.__isset; +CacheFileMetadataRequest& CacheFileMetadataRequest::operator=(const CacheFileMetadataRequest& other952) { + dbName = other952.dbName; + tblName = other952.tblName; + partName = other952.partName; + isAllParts = other952.isAllParts; + __isset = other952.__isset; return *this; } void CacheFileMetadataRequest::printTo(std::ostream& out) const { @@ -23782,14 +24350,14 @@ uint32_t GetAllFunctionsResponse::read(::apache::thrift::protocol::TProtocol* ip if (ftype == ::apache::thrift::protocol::T_LIST) { { this->functions.clear(); - uint32_t _size945; - ::apache::thrift::protocol::TType _etype948; - xfer += iprot->readListBegin(_etype948, _size945); - this->functions.resize(_size945); - uint32_t _i949; - for (_i949 = 0; _i949 < _size945; ++_i949) + uint32_t _size953; + ::apache::thrift::protocol::TType _etype956; + xfer += iprot->readListBegin(_etype956, _size953); + this->functions.resize(_size953); + uint32_t _i957; + for (_i957 = 0; _i957 < _size953; ++_i957) { - xfer += this->functions[_i949].read(iprot); + xfer += this->functions[_i957].read(iprot); } xfer += iprot->readListEnd(); } @@ -23819,10 +24387,10 @@ uint32_t GetAllFunctionsResponse::write(::apache::thrift::protocol::TProtocol* o xfer += oprot->writeFieldBegin("functions", ::apache::thrift::protocol::T_LIST, 1); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast(this->functions.size())); - std::vector ::const_iterator _iter950; - for (_iter950 = this->functions.begin(); _iter950 != this->functions.end(); ++_iter950) + std::vector ::const_iterator _iter958; + for (_iter958 = this->functions.begin(); _iter958 != this->functions.end(); ++_iter958) { - xfer += (*_iter950).write(oprot); + xfer += (*_iter958).write(oprot); } xfer += oprot->writeListEnd(); } @@ -23839,13 +24407,13 @@ void swap(GetAllFunctionsResponse &a, GetAllFunctionsResponse &b) { swap(a.__isset, b.__isset); } -GetAllFunctionsResponse::GetAllFunctionsResponse(const GetAllFunctionsResponse& other951) { - functions = other951.functions; - __isset = other951.__isset; +GetAllFunctionsResponse::GetAllFunctionsResponse(const GetAllFunctionsResponse& other959) { + functions = other959.functions; + __isset = other959.__isset; } -GetAllFunctionsResponse& GetAllFunctionsResponse::operator=(const GetAllFunctionsResponse& other952) { - functions = other952.functions; - __isset = other952.__isset; +GetAllFunctionsResponse& GetAllFunctionsResponse::operator=(const GetAllFunctionsResponse& other960) { + functions = other960.functions; + __isset = other960.__isset; return *this; } void GetAllFunctionsResponse::printTo(std::ostream& out) const { @@ -23890,16 +24458,16 @@ uint32_t ClientCapabilities::read(::apache::thrift::protocol::TProtocol* iprot) if (ftype == ::apache::thrift::protocol::T_LIST) { { this->values.clear(); - uint32_t _size953; - ::apache::thrift::protocol::TType _etype956; - xfer += iprot->readListBegin(_etype956, _size953); - this->values.resize(_size953); - uint32_t _i957; - for (_i957 = 0; _i957 < _size953; ++_i957) + uint32_t _size961; + ::apache::thrift::protocol::TType _etype964; + xfer += iprot->readListBegin(_etype964, _size961); + this->values.resize(_size961); + uint32_t _i965; + for (_i965 = 0; _i965 < _size961; ++_i965) { - int32_t ecast958; - xfer += iprot->readI32(ecast958); - this->values[_i957] = (ClientCapability::type)ecast958; + int32_t ecast966; + xfer += iprot->readI32(ecast966); + this->values[_i965] = (ClientCapability::type)ecast966; } xfer += iprot->readListEnd(); } @@ -23930,10 +24498,10 @@ uint32_t ClientCapabilities::write(::apache::thrift::protocol::TProtocol* oprot) xfer += oprot->writeFieldBegin("values", ::apache::thrift::protocol::T_LIST, 1); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_I32, static_cast(this->values.size())); - std::vector ::const_iterator _iter959; - for (_iter959 = this->values.begin(); _iter959 != this->values.end(); ++_iter959) + std::vector ::const_iterator _iter967; + for (_iter967 = this->values.begin(); _iter967 != this->values.end(); ++_iter967) { - xfer += oprot->writeI32((int32_t)(*_iter959)); + xfer += oprot->writeI32((int32_t)(*_iter967)); } xfer += oprot->writeListEnd(); } @@ -23949,11 +24517,11 @@ void swap(ClientCapabilities &a, ClientCapabilities &b) { swap(a.values, b.values); } -ClientCapabilities::ClientCapabilities(const ClientCapabilities& other960) { - values = other960.values; +ClientCapabilities::ClientCapabilities(const ClientCapabilities& other968) { + values = other968.values; } -ClientCapabilities& ClientCapabilities::operator=(const ClientCapabilities& other961) { - values = other961.values; +ClientCapabilities& ClientCapabilities::operator=(const ClientCapabilities& other969) { + values = other969.values; return *this; } void ClientCapabilities::printTo(std::ostream& out) const { @@ -23986,6 +24554,16 @@ void GetTableRequest::__set_catName(const std::string& val) { __isset.catName = true; } +void GetTableRequest::__set_txnId(const int64_t val) { + this->txnId = val; +__isset.txnId = true; +} + +void GetTableRequest::__set_validWriteIdList(const std::string& val) { + this->validWriteIdList = val; +__isset.validWriteIdList = true; +} + uint32_t GetTableRequest::read(::apache::thrift::protocol::TProtocol* iprot) { apache::thrift::protocol::TInputRecursionTracker tracker(*iprot); @@ -24041,6 +24619,22 @@ uint32_t GetTableRequest::read(::apache::thrift::protocol::TProtocol* iprot) { xfer += iprot->skip(ftype); } break; + case 5: + if (ftype == ::apache::thrift::protocol::T_I64) { + xfer += iprot->readI64(this->txnId); + this->__isset.txnId = true; + } else { + xfer += iprot->skip(ftype); + } + break; + case 6: + if (ftype == ::apache::thrift::protocol::T_STRING) { + xfer += iprot->readString(this->validWriteIdList); + this->__isset.validWriteIdList = true; + } else { + xfer += iprot->skip(ftype); + } + break; default: xfer += iprot->skip(ftype); break; @@ -24080,6 +24674,16 @@ uint32_t GetTableRequest::write(::apache::thrift::protocol::TProtocol* oprot) co xfer += oprot->writeString(this->catName); xfer += oprot->writeFieldEnd(); } + if (this->__isset.txnId) { + xfer += oprot->writeFieldBegin("txnId", ::apache::thrift::protocol::T_I64, 5); + xfer += oprot->writeI64(this->txnId); + xfer += oprot->writeFieldEnd(); + } + if (this->__isset.validWriteIdList) { + xfer += oprot->writeFieldBegin("validWriteIdList", ::apache::thrift::protocol::T_STRING, 6); + xfer += oprot->writeString(this->validWriteIdList); + xfer += oprot->writeFieldEnd(); + } xfer += oprot->writeFieldStop(); xfer += oprot->writeStructEnd(); return xfer; @@ -24091,22 +24695,28 @@ void swap(GetTableRequest &a, GetTableRequest &b) { swap(a.tblName, b.tblName); swap(a.capabilities, b.capabilities); swap(a.catName, b.catName); + swap(a.txnId, b.txnId); + swap(a.validWriteIdList, b.validWriteIdList); swap(a.__isset, b.__isset); } -GetTableRequest::GetTableRequest(const GetTableRequest& other962) { - dbName = other962.dbName; - tblName = other962.tblName; - capabilities = other962.capabilities; - catName = other962.catName; - __isset = other962.__isset; -} -GetTableRequest& GetTableRequest::operator=(const GetTableRequest& other963) { - dbName = other963.dbName; - tblName = other963.tblName; - capabilities = other963.capabilities; - catName = other963.catName; - __isset = other963.__isset; +GetTableRequest::GetTableRequest(const GetTableRequest& other970) { + dbName = other970.dbName; + tblName = other970.tblName; + capabilities = other970.capabilities; + catName = other970.catName; + txnId = other970.txnId; + validWriteIdList = other970.validWriteIdList; + __isset = other970.__isset; +} +GetTableRequest& GetTableRequest::operator=(const GetTableRequest& other971) { + dbName = other971.dbName; + tblName = other971.tblName; + capabilities = other971.capabilities; + catName = other971.catName; + txnId = other971.txnId; + validWriteIdList = other971.validWriteIdList; + __isset = other971.__isset; return *this; } void GetTableRequest::printTo(std::ostream& out) const { @@ -24116,6 +24726,8 @@ void GetTableRequest::printTo(std::ostream& out) const { out << ", " << "tblName=" << to_string(tblName); out << ", " << "capabilities="; (__isset.capabilities ? (out << to_string(capabilities)) : (out << "")); out << ", " << "catName="; (__isset.catName ? (out << to_string(catName)) : (out << "")); + out << ", " << "txnId="; (__isset.txnId ? (out << to_string(txnId)) : (out << "")); + out << ", " << "validWriteIdList="; (__isset.validWriteIdList ? (out << to_string(validWriteIdList)) : (out << "")); out << ")"; } @@ -24128,6 +24740,11 @@ void GetTableResult::__set_table(const Table& val) { this->table = val; } +void GetTableResult::__set_isStatsCompliant(const IsolationLevelCompliance::type val) { + this->isStatsCompliant = val; +__isset.isStatsCompliant = true; +} + uint32_t GetTableResult::read(::apache::thrift::protocol::TProtocol* iprot) { apache::thrift::protocol::TInputRecursionTracker tracker(*iprot); @@ -24158,6 +24775,16 @@ uint32_t GetTableResult::read(::apache::thrift::protocol::TProtocol* iprot) { xfer += iprot->skip(ftype); } break; + case 2: + if (ftype == ::apache::thrift::protocol::T_I32) { + int32_t ecast972; + xfer += iprot->readI32(ecast972); + this->isStatsCompliant = (IsolationLevelCompliance::type)ecast972; + this->__isset.isStatsCompliant = true; + } else { + xfer += iprot->skip(ftype); + } + break; default: xfer += iprot->skip(ftype); break; @@ -24181,6 +24808,11 @@ uint32_t GetTableResult::write(::apache::thrift::protocol::TProtocol* oprot) con xfer += this->table.write(oprot); xfer += oprot->writeFieldEnd(); + if (this->__isset.isStatsCompliant) { + xfer += oprot->writeFieldBegin("isStatsCompliant", ::apache::thrift::protocol::T_I32, 2); + xfer += oprot->writeI32((int32_t)this->isStatsCompliant); + xfer += oprot->writeFieldEnd(); + } xfer += oprot->writeFieldStop(); xfer += oprot->writeStructEnd(); return xfer; @@ -24189,19 +24821,26 @@ uint32_t GetTableResult::write(::apache::thrift::protocol::TProtocol* oprot) con void swap(GetTableResult &a, GetTableResult &b) { using ::std::swap; swap(a.table, b.table); + swap(a.isStatsCompliant, b.isStatsCompliant); + swap(a.__isset, b.__isset); } -GetTableResult::GetTableResult(const GetTableResult& other964) { - table = other964.table; +GetTableResult::GetTableResult(const GetTableResult& other973) { + table = other973.table; + isStatsCompliant = other973.isStatsCompliant; + __isset = other973.__isset; } -GetTableResult& GetTableResult::operator=(const GetTableResult& other965) { - table = other965.table; +GetTableResult& GetTableResult::operator=(const GetTableResult& other974) { + table = other974.table; + isStatsCompliant = other974.isStatsCompliant; + __isset = other974.__isset; return *this; } void GetTableResult::printTo(std::ostream& out) const { using ::apache::thrift::to_string; out << "GetTableResult("; out << "table=" << to_string(table); + out << ", " << "isStatsCompliant="; (__isset.isStatsCompliant ? (out << to_string(isStatsCompliant)) : (out << "")); out << ")"; } @@ -24263,14 +24902,14 @@ uint32_t GetTablesRequest::read(::apache::thrift::protocol::TProtocol* iprot) { if (ftype == ::apache::thrift::protocol::T_LIST) { { this->tblNames.clear(); - uint32_t _size966; - ::apache::thrift::protocol::TType _etype969; - xfer += iprot->readListBegin(_etype969, _size966); - this->tblNames.resize(_size966); - uint32_t _i970; - for (_i970 = 0; _i970 < _size966; ++_i970) + uint32_t _size975; + ::apache::thrift::protocol::TType _etype978; + xfer += iprot->readListBegin(_etype978, _size975); + this->tblNames.resize(_size975); + uint32_t _i979; + for (_i979 = 0; _i979 < _size975; ++_i979) { - xfer += iprot->readString(this->tblNames[_i970]); + xfer += iprot->readString(this->tblNames[_i979]); } xfer += iprot->readListEnd(); } @@ -24322,10 +24961,10 @@ uint32_t GetTablesRequest::write(::apache::thrift::protocol::TProtocol* oprot) c xfer += oprot->writeFieldBegin("tblNames", ::apache::thrift::protocol::T_LIST, 2); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast(this->tblNames.size())); - std::vector ::const_iterator _iter971; - for (_iter971 = this->tblNames.begin(); _iter971 != this->tblNames.end(); ++_iter971) + std::vector ::const_iterator _iter980; + for (_iter980 = this->tblNames.begin(); _iter980 != this->tblNames.end(); ++_iter980) { - xfer += oprot->writeString((*_iter971)); + xfer += oprot->writeString((*_iter980)); } xfer += oprot->writeListEnd(); } @@ -24355,19 +24994,19 @@ void swap(GetTablesRequest &a, GetTablesRequest &b) { swap(a.__isset, b.__isset); } -GetTablesRequest::GetTablesRequest(const GetTablesRequest& other972) { - dbName = other972.dbName; - tblNames = other972.tblNames; - capabilities = other972.capabilities; - catName = other972.catName; - __isset = other972.__isset; +GetTablesRequest::GetTablesRequest(const GetTablesRequest& other981) { + dbName = other981.dbName; + tblNames = other981.tblNames; + capabilities = other981.capabilities; + catName = other981.catName; + __isset = other981.__isset; } -GetTablesRequest& GetTablesRequest::operator=(const GetTablesRequest& other973) { - dbName = other973.dbName; - tblNames = other973.tblNames; - capabilities = other973.capabilities; - catName = other973.catName; - __isset = other973.__isset; +GetTablesRequest& GetTablesRequest::operator=(const GetTablesRequest& other982) { + dbName = other982.dbName; + tblNames = other982.tblNames; + capabilities = other982.capabilities; + catName = other982.catName; + __isset = other982.__isset; return *this; } void GetTablesRequest::printTo(std::ostream& out) const { @@ -24415,14 +25054,14 @@ uint32_t GetTablesResult::read(::apache::thrift::protocol::TProtocol* iprot) { if (ftype == ::apache::thrift::protocol::T_LIST) { { this->tables.clear(); - uint32_t _size974; - ::apache::thrift::protocol::TType _etype977; - xfer += iprot->readListBegin(_etype977, _size974); - this->tables.resize(_size974); - uint32_t _i978; - for (_i978 = 0; _i978 < _size974; ++_i978) + uint32_t _size983; + ::apache::thrift::protocol::TType _etype986; + xfer += iprot->readListBegin(_etype986, _size983); + this->tables.resize(_size983); + uint32_t _i987; + for (_i987 = 0; _i987 < _size983; ++_i987) { - xfer += this->tables[_i978].read(iprot); + xfer += this->tables[_i987].read(iprot); } xfer += iprot->readListEnd(); } @@ -24453,10 +25092,10 @@ uint32_t GetTablesResult::write(::apache::thrift::protocol::TProtocol* oprot) co xfer += oprot->writeFieldBegin("tables", ::apache::thrift::protocol::T_LIST, 1); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast(this->tables.size())); - std::vector
::const_iterator _iter979; - for (_iter979 = this->tables.begin(); _iter979 != this->tables.end(); ++_iter979) + std::vector
::const_iterator _iter988; + for (_iter988 = this->tables.begin(); _iter988 != this->tables.end(); ++_iter988) { - xfer += (*_iter979).write(oprot); + xfer += (*_iter988).write(oprot); } xfer += oprot->writeListEnd(); } @@ -24472,11 +25111,11 @@ void swap(GetTablesResult &a, GetTablesResult &b) { swap(a.tables, b.tables); } -GetTablesResult::GetTablesResult(const GetTablesResult& other980) { - tables = other980.tables; +GetTablesResult::GetTablesResult(const GetTablesResult& other989) { + tables = other989.tables; } -GetTablesResult& GetTablesResult::operator=(const GetTablesResult& other981) { - tables = other981.tables; +GetTablesResult& GetTablesResult::operator=(const GetTablesResult& other990) { + tables = other990.tables; return *this; } void GetTablesResult::printTo(std::ostream& out) const { @@ -24578,13 +25217,13 @@ void swap(CmRecycleRequest &a, CmRecycleRequest &b) { swap(a.purge, b.purge); } -CmRecycleRequest::CmRecycleRequest(const CmRecycleRequest& other982) { - dataPath = other982.dataPath; - purge = other982.purge; +CmRecycleRequest::CmRecycleRequest(const CmRecycleRequest& other991) { + dataPath = other991.dataPath; + purge = other991.purge; } -CmRecycleRequest& CmRecycleRequest::operator=(const CmRecycleRequest& other983) { - dataPath = other983.dataPath; - purge = other983.purge; +CmRecycleRequest& CmRecycleRequest::operator=(const CmRecycleRequest& other992) { + dataPath = other992.dataPath; + purge = other992.purge; return *this; } void CmRecycleRequest::printTo(std::ostream& out) const { @@ -24644,11 +25283,11 @@ void swap(CmRecycleResponse &a, CmRecycleResponse &b) { (void) b; } -CmRecycleResponse::CmRecycleResponse(const CmRecycleResponse& other984) { - (void) other984; +CmRecycleResponse::CmRecycleResponse(const CmRecycleResponse& other993) { + (void) other993; } -CmRecycleResponse& CmRecycleResponse::operator=(const CmRecycleResponse& other985) { - (void) other985; +CmRecycleResponse& CmRecycleResponse::operator=(const CmRecycleResponse& other994) { + (void) other994; return *this; } void CmRecycleResponse::printTo(std::ostream& out) const { @@ -24808,21 +25447,21 @@ void swap(TableMeta &a, TableMeta &b) { swap(a.__isset, b.__isset); } -TableMeta::TableMeta(const TableMeta& other986) { - dbName = other986.dbName; - tableName = other986.tableName; - tableType = other986.tableType; - comments = other986.comments; - catName = other986.catName; - __isset = other986.__isset; -} -TableMeta& TableMeta::operator=(const TableMeta& other987) { - dbName = other987.dbName; - tableName = other987.tableName; - tableType = other987.tableType; - comments = other987.comments; - catName = other987.catName; - __isset = other987.__isset; +TableMeta::TableMeta(const TableMeta& other995) { + dbName = other995.dbName; + tableName = other995.tableName; + tableType = other995.tableType; + comments = other995.comments; + catName = other995.catName; + __isset = other995.__isset; +} +TableMeta& TableMeta::operator=(const TableMeta& other996) { + dbName = other996.dbName; + tableName = other996.tableName; + tableType = other996.tableType; + comments = other996.comments; + catName = other996.catName; + __isset = other996.__isset; return *this; } void TableMeta::printTo(std::ostream& out) const { @@ -24886,15 +25525,15 @@ uint32_t Materialization::read(::apache::thrift::protocol::TProtocol* iprot) { if (ftype == ::apache::thrift::protocol::T_SET) { { this->tablesUsed.clear(); - uint32_t _size988; - ::apache::thrift::protocol::TType _etype991; - xfer += iprot->readSetBegin(_etype991, _size988); - uint32_t _i992; - for (_i992 = 0; _i992 < _size988; ++_i992) + uint32_t _size997; + ::apache::thrift::protocol::TType _etype1000; + xfer += iprot->readSetBegin(_etype1000, _size997); + uint32_t _i1001; + for (_i1001 = 0; _i1001 < _size997; ++_i1001) { - std::string _elem993; - xfer += iprot->readString(_elem993); - this->tablesUsed.insert(_elem993); + std::string _elem1002; + xfer += iprot->readString(_elem1002); + this->tablesUsed.insert(_elem1002); } xfer += iprot->readSetEnd(); } @@ -24949,10 +25588,10 @@ uint32_t Materialization::write(::apache::thrift::protocol::TProtocol* oprot) co xfer += oprot->writeFieldBegin("tablesUsed", ::apache::thrift::protocol::T_SET, 1); { xfer += oprot->writeSetBegin(::apache::thrift::protocol::T_STRING, static_cast(this->tablesUsed.size())); - std::set ::const_iterator _iter994; - for (_iter994 = this->tablesUsed.begin(); _iter994 != this->tablesUsed.end(); ++_iter994) + std::set ::const_iterator _iter1003; + for (_iter1003 = this->tablesUsed.begin(); _iter1003 != this->tablesUsed.end(); ++_iter1003) { - xfer += oprot->writeString((*_iter994)); + xfer += oprot->writeString((*_iter1003)); } xfer += oprot->writeSetEnd(); } @@ -24987,19 +25626,19 @@ void swap(Materialization &a, Materialization &b) { swap(a.__isset, b.__isset); } -Materialization::Materialization(const Materialization& other995) { - tablesUsed = other995.tablesUsed; - validTxnList = other995.validTxnList; - invalidationTime = other995.invalidationTime; - sourceTablesUpdateDeleteModified = other995.sourceTablesUpdateDeleteModified; - __isset = other995.__isset; +Materialization::Materialization(const Materialization& other1004) { + tablesUsed = other1004.tablesUsed; + validTxnList = other1004.validTxnList; + invalidationTime = other1004.invalidationTime; + sourceTablesUpdateDeleteModified = other1004.sourceTablesUpdateDeleteModified; + __isset = other1004.__isset; } -Materialization& Materialization::operator=(const Materialization& other996) { - tablesUsed = other996.tablesUsed; - validTxnList = other996.validTxnList; - invalidationTime = other996.invalidationTime; - sourceTablesUpdateDeleteModified = other996.sourceTablesUpdateDeleteModified; - __isset = other996.__isset; +Materialization& Materialization::operator=(const Materialization& other1005) { + tablesUsed = other1005.tablesUsed; + validTxnList = other1005.validTxnList; + invalidationTime = other1005.invalidationTime; + sourceTablesUpdateDeleteModified = other1005.sourceTablesUpdateDeleteModified; + __isset = other1005.__isset; return *this; } void Materialization::printTo(std::ostream& out) const { @@ -25068,9 +25707,9 @@ uint32_t WMResourcePlan::read(::apache::thrift::protocol::TProtocol* iprot) { break; case 2: if (ftype == ::apache::thrift::protocol::T_I32) { - int32_t ecast997; - xfer += iprot->readI32(ecast997); - this->status = (WMResourcePlanStatus::type)ecast997; + int32_t ecast1006; + xfer += iprot->readI32(ecast1006); + this->status = (WMResourcePlanStatus::type)ecast1006; this->__isset.status = true; } else { xfer += iprot->skip(ftype); @@ -25144,19 +25783,19 @@ void swap(WMResourcePlan &a, WMResourcePlan &b) { swap(a.__isset, b.__isset); } -WMResourcePlan::WMResourcePlan(const WMResourcePlan& other998) { - name = other998.name; - status = other998.status; - queryParallelism = other998.queryParallelism; - defaultPoolPath = other998.defaultPoolPath; - __isset = other998.__isset; +WMResourcePlan::WMResourcePlan(const WMResourcePlan& other1007) { + name = other1007.name; + status = other1007.status; + queryParallelism = other1007.queryParallelism; + defaultPoolPath = other1007.defaultPoolPath; + __isset = other1007.__isset; } -WMResourcePlan& WMResourcePlan::operator=(const WMResourcePlan& other999) { - name = other999.name; - status = other999.status; - queryParallelism = other999.queryParallelism; - defaultPoolPath = other999.defaultPoolPath; - __isset = other999.__isset; +WMResourcePlan& WMResourcePlan::operator=(const WMResourcePlan& other1008) { + name = other1008.name; + status = other1008.status; + queryParallelism = other1008.queryParallelism; + defaultPoolPath = other1008.defaultPoolPath; + __isset = other1008.__isset; return *this; } void WMResourcePlan::printTo(std::ostream& out) const { @@ -25235,9 +25874,9 @@ uint32_t WMNullableResourcePlan::read(::apache::thrift::protocol::TProtocol* ipr break; case 2: if (ftype == ::apache::thrift::protocol::T_I32) { - int32_t ecast1000; - xfer += iprot->readI32(ecast1000); - this->status = (WMResourcePlanStatus::type)ecast1000; + int32_t ecast1009; + xfer += iprot->readI32(ecast1009); + this->status = (WMResourcePlanStatus::type)ecast1009; this->__isset.status = true; } else { xfer += iprot->skip(ftype); @@ -25338,23 +25977,23 @@ void swap(WMNullableResourcePlan &a, WMNullableResourcePlan &b) { swap(a.__isset, b.__isset); } -WMNullableResourcePlan::WMNullableResourcePlan(const WMNullableResourcePlan& other1001) { - name = other1001.name; - status = other1001.status; - queryParallelism = other1001.queryParallelism; - isSetQueryParallelism = other1001.isSetQueryParallelism; - defaultPoolPath = other1001.defaultPoolPath; - isSetDefaultPoolPath = other1001.isSetDefaultPoolPath; - __isset = other1001.__isset; -} -WMNullableResourcePlan& WMNullableResourcePlan::operator=(const WMNullableResourcePlan& other1002) { - name = other1002.name; - status = other1002.status; - queryParallelism = other1002.queryParallelism; - isSetQueryParallelism = other1002.isSetQueryParallelism; - defaultPoolPath = other1002.defaultPoolPath; - isSetDefaultPoolPath = other1002.isSetDefaultPoolPath; - __isset = other1002.__isset; +WMNullableResourcePlan::WMNullableResourcePlan(const WMNullableResourcePlan& other1010) { + name = other1010.name; + status = other1010.status; + queryParallelism = other1010.queryParallelism; + isSetQueryParallelism = other1010.isSetQueryParallelism; + defaultPoolPath = other1010.defaultPoolPath; + isSetDefaultPoolPath = other1010.isSetDefaultPoolPath; + __isset = other1010.__isset; +} +WMNullableResourcePlan& WMNullableResourcePlan::operator=(const WMNullableResourcePlan& other1011) { + name = other1011.name; + status = other1011.status; + queryParallelism = other1011.queryParallelism; + isSetQueryParallelism = other1011.isSetQueryParallelism; + defaultPoolPath = other1011.defaultPoolPath; + isSetDefaultPoolPath = other1011.isSetDefaultPoolPath; + __isset = other1011.__isset; return *this; } void WMNullableResourcePlan::printTo(std::ostream& out) const { @@ -25519,21 +26158,21 @@ void swap(WMPool &a, WMPool &b) { swap(a.__isset, b.__isset); } -WMPool::WMPool(const WMPool& other1003) { - resourcePlanName = other1003.resourcePlanName; - poolPath = other1003.poolPath; - allocFraction = other1003.allocFraction; - queryParallelism = other1003.queryParallelism; - schedulingPolicy = other1003.schedulingPolicy; - __isset = other1003.__isset; -} -WMPool& WMPool::operator=(const WMPool& other1004) { - resourcePlanName = other1004.resourcePlanName; - poolPath = other1004.poolPath; - allocFraction = other1004.allocFraction; - queryParallelism = other1004.queryParallelism; - schedulingPolicy = other1004.schedulingPolicy; - __isset = other1004.__isset; +WMPool::WMPool(const WMPool& other1012) { + resourcePlanName = other1012.resourcePlanName; + poolPath = other1012.poolPath; + allocFraction = other1012.allocFraction; + queryParallelism = other1012.queryParallelism; + schedulingPolicy = other1012.schedulingPolicy; + __isset = other1012.__isset; +} +WMPool& WMPool::operator=(const WMPool& other1013) { + resourcePlanName = other1013.resourcePlanName; + poolPath = other1013.poolPath; + allocFraction = other1013.allocFraction; + queryParallelism = other1013.queryParallelism; + schedulingPolicy = other1013.schedulingPolicy; + __isset = other1013.__isset; return *this; } void WMPool::printTo(std::ostream& out) const { @@ -25716,23 +26355,23 @@ void swap(WMNullablePool &a, WMNullablePool &b) { swap(a.__isset, b.__isset); } -WMNullablePool::WMNullablePool(const WMNullablePool& other1005) { - resourcePlanName = other1005.resourcePlanName; - poolPath = other1005.poolPath; - allocFraction = other1005.allocFraction; - queryParallelism = other1005.queryParallelism; - schedulingPolicy = other1005.schedulingPolicy; - isSetSchedulingPolicy = other1005.isSetSchedulingPolicy; - __isset = other1005.__isset; -} -WMNullablePool& WMNullablePool::operator=(const WMNullablePool& other1006) { - resourcePlanName = other1006.resourcePlanName; - poolPath = other1006.poolPath; - allocFraction = other1006.allocFraction; - queryParallelism = other1006.queryParallelism; - schedulingPolicy = other1006.schedulingPolicy; - isSetSchedulingPolicy = other1006.isSetSchedulingPolicy; - __isset = other1006.__isset; +WMNullablePool::WMNullablePool(const WMNullablePool& other1014) { + resourcePlanName = other1014.resourcePlanName; + poolPath = other1014.poolPath; + allocFraction = other1014.allocFraction; + queryParallelism = other1014.queryParallelism; + schedulingPolicy = other1014.schedulingPolicy; + isSetSchedulingPolicy = other1014.isSetSchedulingPolicy; + __isset = other1014.__isset; +} +WMNullablePool& WMNullablePool::operator=(const WMNullablePool& other1015) { + resourcePlanName = other1015.resourcePlanName; + poolPath = other1015.poolPath; + allocFraction = other1015.allocFraction; + queryParallelism = other1015.queryParallelism; + schedulingPolicy = other1015.schedulingPolicy; + isSetSchedulingPolicy = other1015.isSetSchedulingPolicy; + __isset = other1015.__isset; return *this; } void WMNullablePool::printTo(std::ostream& out) const { @@ -25897,21 +26536,21 @@ void swap(WMTrigger &a, WMTrigger &b) { swap(a.__isset, b.__isset); } -WMTrigger::WMTrigger(const WMTrigger& other1007) { - resourcePlanName = other1007.resourcePlanName; - triggerName = other1007.triggerName; - triggerExpression = other1007.triggerExpression; - actionExpression = other1007.actionExpression; - isInUnmanaged = other1007.isInUnmanaged; - __isset = other1007.__isset; -} -WMTrigger& WMTrigger::operator=(const WMTrigger& other1008) { - resourcePlanName = other1008.resourcePlanName; - triggerName = other1008.triggerName; - triggerExpression = other1008.triggerExpression; - actionExpression = other1008.actionExpression; - isInUnmanaged = other1008.isInUnmanaged; - __isset = other1008.__isset; +WMTrigger::WMTrigger(const WMTrigger& other1016) { + resourcePlanName = other1016.resourcePlanName; + triggerName = other1016.triggerName; + triggerExpression = other1016.triggerExpression; + actionExpression = other1016.actionExpression; + isInUnmanaged = other1016.isInUnmanaged; + __isset = other1016.__isset; +} +WMTrigger& WMTrigger::operator=(const WMTrigger& other1017) { + resourcePlanName = other1017.resourcePlanName; + triggerName = other1017.triggerName; + triggerExpression = other1017.triggerExpression; + actionExpression = other1017.actionExpression; + isInUnmanaged = other1017.isInUnmanaged; + __isset = other1017.__isset; return *this; } void WMTrigger::printTo(std::ostream& out) const { @@ -26076,21 +26715,21 @@ void swap(WMMapping &a, WMMapping &b) { swap(a.__isset, b.__isset); } -WMMapping::WMMapping(const WMMapping& other1009) { - resourcePlanName = other1009.resourcePlanName; - entityType = other1009.entityType; - entityName = other1009.entityName; - poolPath = other1009.poolPath; - ordering = other1009.ordering; - __isset = other1009.__isset; -} -WMMapping& WMMapping::operator=(const WMMapping& other1010) { - resourcePlanName = other1010.resourcePlanName; - entityType = other1010.entityType; - entityName = other1010.entityName; - poolPath = other1010.poolPath; - ordering = other1010.ordering; - __isset = other1010.__isset; +WMMapping::WMMapping(const WMMapping& other1018) { + resourcePlanName = other1018.resourcePlanName; + entityType = other1018.entityType; + entityName = other1018.entityName; + poolPath = other1018.poolPath; + ordering = other1018.ordering; + __isset = other1018.__isset; +} +WMMapping& WMMapping::operator=(const WMMapping& other1019) { + resourcePlanName = other1019.resourcePlanName; + entityType = other1019.entityType; + entityName = other1019.entityName; + poolPath = other1019.poolPath; + ordering = other1019.ordering; + __isset = other1019.__isset; return *this; } void WMMapping::printTo(std::ostream& out) const { @@ -26196,13 +26835,13 @@ void swap(WMPoolTrigger &a, WMPoolTrigger &b) { swap(a.trigger, b.trigger); } -WMPoolTrigger::WMPoolTrigger(const WMPoolTrigger& other1011) { - pool = other1011.pool; - trigger = other1011.trigger; +WMPoolTrigger::WMPoolTrigger(const WMPoolTrigger& other1020) { + pool = other1020.pool; + trigger = other1020.trigger; } -WMPoolTrigger& WMPoolTrigger::operator=(const WMPoolTrigger& other1012) { - pool = other1012.pool; - trigger = other1012.trigger; +WMPoolTrigger& WMPoolTrigger::operator=(const WMPoolTrigger& other1021) { + pool = other1021.pool; + trigger = other1021.trigger; return *this; } void WMPoolTrigger::printTo(std::ostream& out) const { @@ -26276,14 +26915,14 @@ uint32_t WMFullResourcePlan::read(::apache::thrift::protocol::TProtocol* iprot) if (ftype == ::apache::thrift::protocol::T_LIST) { { this->pools.clear(); - uint32_t _size1013; - ::apache::thrift::protocol::TType _etype1016; - xfer += iprot->readListBegin(_etype1016, _size1013); - this->pools.resize(_size1013); - uint32_t _i1017; - for (_i1017 = 0; _i1017 < _size1013; ++_i1017) + uint32_t _size1022; + ::apache::thrift::protocol::TType _etype1025; + xfer += iprot->readListBegin(_etype1025, _size1022); + this->pools.resize(_size1022); + uint32_t _i1026; + for (_i1026 = 0; _i1026 < _size1022; ++_i1026) { - xfer += this->pools[_i1017].read(iprot); + xfer += this->pools[_i1026].read(iprot); } xfer += iprot->readListEnd(); } @@ -26296,14 +26935,14 @@ uint32_t WMFullResourcePlan::read(::apache::thrift::protocol::TProtocol* iprot) if (ftype == ::apache::thrift::protocol::T_LIST) { { this->mappings.clear(); - uint32_t _size1018; - ::apache::thrift::protocol::TType _etype1021; - xfer += iprot->readListBegin(_etype1021, _size1018); - this->mappings.resize(_size1018); - uint32_t _i1022; - for (_i1022 = 0; _i1022 < _size1018; ++_i1022) + uint32_t _size1027; + ::apache::thrift::protocol::TType _etype1030; + xfer += iprot->readListBegin(_etype1030, _size1027); + this->mappings.resize(_size1027); + uint32_t _i1031; + for (_i1031 = 0; _i1031 < _size1027; ++_i1031) { - xfer += this->mappings[_i1022].read(iprot); + xfer += this->mappings[_i1031].read(iprot); } xfer += iprot->readListEnd(); } @@ -26316,14 +26955,14 @@ uint32_t WMFullResourcePlan::read(::apache::thrift::protocol::TProtocol* iprot) if (ftype == ::apache::thrift::protocol::T_LIST) { { this->triggers.clear(); - uint32_t _size1023; - ::apache::thrift::protocol::TType _etype1026; - xfer += iprot->readListBegin(_etype1026, _size1023); - this->triggers.resize(_size1023); - uint32_t _i1027; - for (_i1027 = 0; _i1027 < _size1023; ++_i1027) + uint32_t _size1032; + ::apache::thrift::protocol::TType _etype1035; + xfer += iprot->readListBegin(_etype1035, _size1032); + this->triggers.resize(_size1032); + uint32_t _i1036; + for (_i1036 = 0; _i1036 < _size1032; ++_i1036) { - xfer += this->triggers[_i1027].read(iprot); + xfer += this->triggers[_i1036].read(iprot); } xfer += iprot->readListEnd(); } @@ -26336,14 +26975,14 @@ uint32_t WMFullResourcePlan::read(::apache::thrift::protocol::TProtocol* iprot) if (ftype == ::apache::thrift::protocol::T_LIST) { { this->poolTriggers.clear(); - uint32_t _size1028; - ::apache::thrift::protocol::TType _etype1031; - xfer += iprot->readListBegin(_etype1031, _size1028); - this->poolTriggers.resize(_size1028); - uint32_t _i1032; - for (_i1032 = 0; _i1032 < _size1028; ++_i1032) + uint32_t _size1037; + ::apache::thrift::protocol::TType _etype1040; + xfer += iprot->readListBegin(_etype1040, _size1037); + this->poolTriggers.resize(_size1037); + uint32_t _i1041; + for (_i1041 = 0; _i1041 < _size1037; ++_i1041) { - xfer += this->poolTriggers[_i1032].read(iprot); + xfer += this->poolTriggers[_i1041].read(iprot); } xfer += iprot->readListEnd(); } @@ -26380,10 +27019,10 @@ uint32_t WMFullResourcePlan::write(::apache::thrift::protocol::TProtocol* oprot) xfer += oprot->writeFieldBegin("pools", ::apache::thrift::protocol::T_LIST, 2); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast(this->pools.size())); - std::vector ::const_iterator _iter1033; - for (_iter1033 = this->pools.begin(); _iter1033 != this->pools.end(); ++_iter1033) + std::vector ::const_iterator _iter1042; + for (_iter1042 = this->pools.begin(); _iter1042 != this->pools.end(); ++_iter1042) { - xfer += (*_iter1033).write(oprot); + xfer += (*_iter1042).write(oprot); } xfer += oprot->writeListEnd(); } @@ -26393,10 +27032,10 @@ uint32_t WMFullResourcePlan::write(::apache::thrift::protocol::TProtocol* oprot) xfer += oprot->writeFieldBegin("mappings", ::apache::thrift::protocol::T_LIST, 3); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast(this->mappings.size())); - std::vector ::const_iterator _iter1034; - for (_iter1034 = this->mappings.begin(); _iter1034 != this->mappings.end(); ++_iter1034) + std::vector ::const_iterator _iter1043; + for (_iter1043 = this->mappings.begin(); _iter1043 != this->mappings.end(); ++_iter1043) { - xfer += (*_iter1034).write(oprot); + xfer += (*_iter1043).write(oprot); } xfer += oprot->writeListEnd(); } @@ -26406,10 +27045,10 @@ uint32_t WMFullResourcePlan::write(::apache::thrift::protocol::TProtocol* oprot) xfer += oprot->writeFieldBegin("triggers", ::apache::thrift::protocol::T_LIST, 4); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast(this->triggers.size())); - std::vector ::const_iterator _iter1035; - for (_iter1035 = this->triggers.begin(); _iter1035 != this->triggers.end(); ++_iter1035) + std::vector ::const_iterator _iter1044; + for (_iter1044 = this->triggers.begin(); _iter1044 != this->triggers.end(); ++_iter1044) { - xfer += (*_iter1035).write(oprot); + xfer += (*_iter1044).write(oprot); } xfer += oprot->writeListEnd(); } @@ -26419,10 +27058,10 @@ uint32_t WMFullResourcePlan::write(::apache::thrift::protocol::TProtocol* oprot) xfer += oprot->writeFieldBegin("poolTriggers", ::apache::thrift::protocol::T_LIST, 5); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast(this->poolTriggers.size())); - std::vector ::const_iterator _iter1036; - for (_iter1036 = this->poolTriggers.begin(); _iter1036 != this->poolTriggers.end(); ++_iter1036) + std::vector ::const_iterator _iter1045; + for (_iter1045 = this->poolTriggers.begin(); _iter1045 != this->poolTriggers.end(); ++_iter1045) { - xfer += (*_iter1036).write(oprot); + xfer += (*_iter1045).write(oprot); } xfer += oprot->writeListEnd(); } @@ -26443,21 +27082,21 @@ void swap(WMFullResourcePlan &a, WMFullResourcePlan &b) { swap(a.__isset, b.__isset); } -WMFullResourcePlan::WMFullResourcePlan(const WMFullResourcePlan& other1037) { - plan = other1037.plan; - pools = other1037.pools; - mappings = other1037.mappings; - triggers = other1037.triggers; - poolTriggers = other1037.poolTriggers; - __isset = other1037.__isset; -} -WMFullResourcePlan& WMFullResourcePlan::operator=(const WMFullResourcePlan& other1038) { - plan = other1038.plan; - pools = other1038.pools; - mappings = other1038.mappings; - triggers = other1038.triggers; - poolTriggers = other1038.poolTriggers; - __isset = other1038.__isset; +WMFullResourcePlan::WMFullResourcePlan(const WMFullResourcePlan& other1046) { + plan = other1046.plan; + pools = other1046.pools; + mappings = other1046.mappings; + triggers = other1046.triggers; + poolTriggers = other1046.poolTriggers; + __isset = other1046.__isset; +} +WMFullResourcePlan& WMFullResourcePlan::operator=(const WMFullResourcePlan& other1047) { + plan = other1047.plan; + pools = other1047.pools; + mappings = other1047.mappings; + triggers = other1047.triggers; + poolTriggers = other1047.poolTriggers; + __isset = other1047.__isset; return *this; } void WMFullResourcePlan::printTo(std::ostream& out) const { @@ -26562,15 +27201,15 @@ void swap(WMCreateResourcePlanRequest &a, WMCreateResourcePlanRequest &b) { swap(a.__isset, b.__isset); } -WMCreateResourcePlanRequest::WMCreateResourcePlanRequest(const WMCreateResourcePlanRequest& other1039) { - resourcePlan = other1039.resourcePlan; - copyFrom = other1039.copyFrom; - __isset = other1039.__isset; +WMCreateResourcePlanRequest::WMCreateResourcePlanRequest(const WMCreateResourcePlanRequest& other1048) { + resourcePlan = other1048.resourcePlan; + copyFrom = other1048.copyFrom; + __isset = other1048.__isset; } -WMCreateResourcePlanRequest& WMCreateResourcePlanRequest::operator=(const WMCreateResourcePlanRequest& other1040) { - resourcePlan = other1040.resourcePlan; - copyFrom = other1040.copyFrom; - __isset = other1040.__isset; +WMCreateResourcePlanRequest& WMCreateResourcePlanRequest::operator=(const WMCreateResourcePlanRequest& other1049) { + resourcePlan = other1049.resourcePlan; + copyFrom = other1049.copyFrom; + __isset = other1049.__isset; return *this; } void WMCreateResourcePlanRequest::printTo(std::ostream& out) const { @@ -26630,11 +27269,11 @@ void swap(WMCreateResourcePlanResponse &a, WMCreateResourcePlanResponse &b) { (void) b; } -WMCreateResourcePlanResponse::WMCreateResourcePlanResponse(const WMCreateResourcePlanResponse& other1041) { - (void) other1041; +WMCreateResourcePlanResponse::WMCreateResourcePlanResponse(const WMCreateResourcePlanResponse& other1050) { + (void) other1050; } -WMCreateResourcePlanResponse& WMCreateResourcePlanResponse::operator=(const WMCreateResourcePlanResponse& other1042) { - (void) other1042; +WMCreateResourcePlanResponse& WMCreateResourcePlanResponse::operator=(const WMCreateResourcePlanResponse& other1051) { + (void) other1051; return *this; } void WMCreateResourcePlanResponse::printTo(std::ostream& out) const { @@ -26692,11 +27331,11 @@ void swap(WMGetActiveResourcePlanRequest &a, WMGetActiveResourcePlanRequest &b) (void) b; } -WMGetActiveResourcePlanRequest::WMGetActiveResourcePlanRequest(const WMGetActiveResourcePlanRequest& other1043) { - (void) other1043; +WMGetActiveResourcePlanRequest::WMGetActiveResourcePlanRequest(const WMGetActiveResourcePlanRequest& other1052) { + (void) other1052; } -WMGetActiveResourcePlanRequest& WMGetActiveResourcePlanRequest::operator=(const WMGetActiveResourcePlanRequest& other1044) { - (void) other1044; +WMGetActiveResourcePlanRequest& WMGetActiveResourcePlanRequest::operator=(const WMGetActiveResourcePlanRequest& other1053) { + (void) other1053; return *this; } void WMGetActiveResourcePlanRequest::printTo(std::ostream& out) const { @@ -26777,13 +27416,13 @@ void swap(WMGetActiveResourcePlanResponse &a, WMGetActiveResourcePlanResponse &b swap(a.__isset, b.__isset); } -WMGetActiveResourcePlanResponse::WMGetActiveResourcePlanResponse(const WMGetActiveResourcePlanResponse& other1045) { - resourcePlan = other1045.resourcePlan; - __isset = other1045.__isset; +WMGetActiveResourcePlanResponse::WMGetActiveResourcePlanResponse(const WMGetActiveResourcePlanResponse& other1054) { + resourcePlan = other1054.resourcePlan; + __isset = other1054.__isset; } -WMGetActiveResourcePlanResponse& WMGetActiveResourcePlanResponse::operator=(const WMGetActiveResourcePlanResponse& other1046) { - resourcePlan = other1046.resourcePlan; - __isset = other1046.__isset; +WMGetActiveResourcePlanResponse& WMGetActiveResourcePlanResponse::operator=(const WMGetActiveResourcePlanResponse& other1055) { + resourcePlan = other1055.resourcePlan; + __isset = other1055.__isset; return *this; } void WMGetActiveResourcePlanResponse::printTo(std::ostream& out) const { @@ -26865,13 +27504,13 @@ void swap(WMGetResourcePlanRequest &a, WMGetResourcePlanRequest &b) { swap(a.__isset, b.__isset); } -WMGetResourcePlanRequest::WMGetResourcePlanRequest(const WMGetResourcePlanRequest& other1047) { - resourcePlanName = other1047.resourcePlanName; - __isset = other1047.__isset; +WMGetResourcePlanRequest::WMGetResourcePlanRequest(const WMGetResourcePlanRequest& other1056) { + resourcePlanName = other1056.resourcePlanName; + __isset = other1056.__isset; } -WMGetResourcePlanRequest& WMGetResourcePlanRequest::operator=(const WMGetResourcePlanRequest& other1048) { - resourcePlanName = other1048.resourcePlanName; - __isset = other1048.__isset; +WMGetResourcePlanRequest& WMGetResourcePlanRequest::operator=(const WMGetResourcePlanRequest& other1057) { + resourcePlanName = other1057.resourcePlanName; + __isset = other1057.__isset; return *this; } void WMGetResourcePlanRequest::printTo(std::ostream& out) const { @@ -26953,13 +27592,13 @@ void swap(WMGetResourcePlanResponse &a, WMGetResourcePlanResponse &b) { swap(a.__isset, b.__isset); } -WMGetResourcePlanResponse::WMGetResourcePlanResponse(const WMGetResourcePlanResponse& other1049) { - resourcePlan = other1049.resourcePlan; - __isset = other1049.__isset; +WMGetResourcePlanResponse::WMGetResourcePlanResponse(const WMGetResourcePlanResponse& other1058) { + resourcePlan = other1058.resourcePlan; + __isset = other1058.__isset; } -WMGetResourcePlanResponse& WMGetResourcePlanResponse::operator=(const WMGetResourcePlanResponse& other1050) { - resourcePlan = other1050.resourcePlan; - __isset = other1050.__isset; +WMGetResourcePlanResponse& WMGetResourcePlanResponse::operator=(const WMGetResourcePlanResponse& other1059) { + resourcePlan = other1059.resourcePlan; + __isset = other1059.__isset; return *this; } void WMGetResourcePlanResponse::printTo(std::ostream& out) const { @@ -27018,11 +27657,11 @@ void swap(WMGetAllResourcePlanRequest &a, WMGetAllResourcePlanRequest &b) { (void) b; } -WMGetAllResourcePlanRequest::WMGetAllResourcePlanRequest(const WMGetAllResourcePlanRequest& other1051) { - (void) other1051; +WMGetAllResourcePlanRequest::WMGetAllResourcePlanRequest(const WMGetAllResourcePlanRequest& other1060) { + (void) other1060; } -WMGetAllResourcePlanRequest& WMGetAllResourcePlanRequest::operator=(const WMGetAllResourcePlanRequest& other1052) { - (void) other1052; +WMGetAllResourcePlanRequest& WMGetAllResourcePlanRequest::operator=(const WMGetAllResourcePlanRequest& other1061) { + (void) other1061; return *this; } void WMGetAllResourcePlanRequest::printTo(std::ostream& out) const { @@ -27066,14 +27705,14 @@ uint32_t WMGetAllResourcePlanResponse::read(::apache::thrift::protocol::TProtoco if (ftype == ::apache::thrift::protocol::T_LIST) { { this->resourcePlans.clear(); - uint32_t _size1053; - ::apache::thrift::protocol::TType _etype1056; - xfer += iprot->readListBegin(_etype1056, _size1053); - this->resourcePlans.resize(_size1053); - uint32_t _i1057; - for (_i1057 = 0; _i1057 < _size1053; ++_i1057) + uint32_t _size1062; + ::apache::thrift::protocol::TType _etype1065; + xfer += iprot->readListBegin(_etype1065, _size1062); + this->resourcePlans.resize(_size1062); + uint32_t _i1066; + for (_i1066 = 0; _i1066 < _size1062; ++_i1066) { - xfer += this->resourcePlans[_i1057].read(iprot); + xfer += this->resourcePlans[_i1066].read(iprot); } xfer += iprot->readListEnd(); } @@ -27103,10 +27742,10 @@ uint32_t WMGetAllResourcePlanResponse::write(::apache::thrift::protocol::TProtoc xfer += oprot->writeFieldBegin("resourcePlans", ::apache::thrift::protocol::T_LIST, 1); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast(this->resourcePlans.size())); - std::vector ::const_iterator _iter1058; - for (_iter1058 = this->resourcePlans.begin(); _iter1058 != this->resourcePlans.end(); ++_iter1058) + std::vector ::const_iterator _iter1067; + for (_iter1067 = this->resourcePlans.begin(); _iter1067 != this->resourcePlans.end(); ++_iter1067) { - xfer += (*_iter1058).write(oprot); + xfer += (*_iter1067).write(oprot); } xfer += oprot->writeListEnd(); } @@ -27123,13 +27762,13 @@ void swap(WMGetAllResourcePlanResponse &a, WMGetAllResourcePlanResponse &b) { swap(a.__isset, b.__isset); } -WMGetAllResourcePlanResponse::WMGetAllResourcePlanResponse(const WMGetAllResourcePlanResponse& other1059) { - resourcePlans = other1059.resourcePlans; - __isset = other1059.__isset; +WMGetAllResourcePlanResponse::WMGetAllResourcePlanResponse(const WMGetAllResourcePlanResponse& other1068) { + resourcePlans = other1068.resourcePlans; + __isset = other1068.__isset; } -WMGetAllResourcePlanResponse& WMGetAllResourcePlanResponse::operator=(const WMGetAllResourcePlanResponse& other1060) { - resourcePlans = other1060.resourcePlans; - __isset = other1060.__isset; +WMGetAllResourcePlanResponse& WMGetAllResourcePlanResponse::operator=(const WMGetAllResourcePlanResponse& other1069) { + resourcePlans = other1069.resourcePlans; + __isset = other1069.__isset; return *this; } void WMGetAllResourcePlanResponse::printTo(std::ostream& out) const { @@ -27287,21 +27926,21 @@ void swap(WMAlterResourcePlanRequest &a, WMAlterResourcePlanRequest &b) { swap(a.__isset, b.__isset); } -WMAlterResourcePlanRequest::WMAlterResourcePlanRequest(const WMAlterResourcePlanRequest& other1061) { - resourcePlanName = other1061.resourcePlanName; - resourcePlan = other1061.resourcePlan; - isEnableAndActivate = other1061.isEnableAndActivate; - isForceDeactivate = other1061.isForceDeactivate; - isReplace = other1061.isReplace; - __isset = other1061.__isset; -} -WMAlterResourcePlanRequest& WMAlterResourcePlanRequest::operator=(const WMAlterResourcePlanRequest& other1062) { - resourcePlanName = other1062.resourcePlanName; - resourcePlan = other1062.resourcePlan; - isEnableAndActivate = other1062.isEnableAndActivate; - isForceDeactivate = other1062.isForceDeactivate; - isReplace = other1062.isReplace; - __isset = other1062.__isset; +WMAlterResourcePlanRequest::WMAlterResourcePlanRequest(const WMAlterResourcePlanRequest& other1070) { + resourcePlanName = other1070.resourcePlanName; + resourcePlan = other1070.resourcePlan; + isEnableAndActivate = other1070.isEnableAndActivate; + isForceDeactivate = other1070.isForceDeactivate; + isReplace = other1070.isReplace; + __isset = other1070.__isset; +} +WMAlterResourcePlanRequest& WMAlterResourcePlanRequest::operator=(const WMAlterResourcePlanRequest& other1071) { + resourcePlanName = other1071.resourcePlanName; + resourcePlan = other1071.resourcePlan; + isEnableAndActivate = other1071.isEnableAndActivate; + isForceDeactivate = other1071.isForceDeactivate; + isReplace = other1071.isReplace; + __isset = other1071.__isset; return *this; } void WMAlterResourcePlanRequest::printTo(std::ostream& out) const { @@ -27387,13 +28026,13 @@ void swap(WMAlterResourcePlanResponse &a, WMAlterResourcePlanResponse &b) { swap(a.__isset, b.__isset); } -WMAlterResourcePlanResponse::WMAlterResourcePlanResponse(const WMAlterResourcePlanResponse& other1063) { - fullResourcePlan = other1063.fullResourcePlan; - __isset = other1063.__isset; +WMAlterResourcePlanResponse::WMAlterResourcePlanResponse(const WMAlterResourcePlanResponse& other1072) { + fullResourcePlan = other1072.fullResourcePlan; + __isset = other1072.__isset; } -WMAlterResourcePlanResponse& WMAlterResourcePlanResponse::operator=(const WMAlterResourcePlanResponse& other1064) { - fullResourcePlan = other1064.fullResourcePlan; - __isset = other1064.__isset; +WMAlterResourcePlanResponse& WMAlterResourcePlanResponse::operator=(const WMAlterResourcePlanResponse& other1073) { + fullResourcePlan = other1073.fullResourcePlan; + __isset = other1073.__isset; return *this; } void WMAlterResourcePlanResponse::printTo(std::ostream& out) const { @@ -27475,13 +28114,13 @@ void swap(WMValidateResourcePlanRequest &a, WMValidateResourcePlanRequest &b) { swap(a.__isset, b.__isset); } -WMValidateResourcePlanRequest::WMValidateResourcePlanRequest(const WMValidateResourcePlanRequest& other1065) { - resourcePlanName = other1065.resourcePlanName; - __isset = other1065.__isset; +WMValidateResourcePlanRequest::WMValidateResourcePlanRequest(const WMValidateResourcePlanRequest& other1074) { + resourcePlanName = other1074.resourcePlanName; + __isset = other1074.__isset; } -WMValidateResourcePlanRequest& WMValidateResourcePlanRequest::operator=(const WMValidateResourcePlanRequest& other1066) { - resourcePlanName = other1066.resourcePlanName; - __isset = other1066.__isset; +WMValidateResourcePlanRequest& WMValidateResourcePlanRequest::operator=(const WMValidateResourcePlanRequest& other1075) { + resourcePlanName = other1075.resourcePlanName; + __isset = other1075.__isset; return *this; } void WMValidateResourcePlanRequest::printTo(std::ostream& out) const { @@ -27531,14 +28170,14 @@ uint32_t WMValidateResourcePlanResponse::read(::apache::thrift::protocol::TProto if (ftype == ::apache::thrift::protocol::T_LIST) { { this->errors.clear(); - uint32_t _size1067; - ::apache::thrift::protocol::TType _etype1070; - xfer += iprot->readListBegin(_etype1070, _size1067); - this->errors.resize(_size1067); - uint32_t _i1071; - for (_i1071 = 0; _i1071 < _size1067; ++_i1071) + uint32_t _size1076; + ::apache::thrift::protocol::TType _etype1079; + xfer += iprot->readListBegin(_etype1079, _size1076); + this->errors.resize(_size1076); + uint32_t _i1080; + for (_i1080 = 0; _i1080 < _size1076; ++_i1080) { - xfer += iprot->readString(this->errors[_i1071]); + xfer += iprot->readString(this->errors[_i1080]); } xfer += iprot->readListEnd(); } @@ -27551,14 +28190,14 @@ uint32_t WMValidateResourcePlanResponse::read(::apache::thrift::protocol::TProto if (ftype == ::apache::thrift::protocol::T_LIST) { { this->warnings.clear(); - uint32_t _size1072; - ::apache::thrift::protocol::TType _etype1075; - xfer += iprot->readListBegin(_etype1075, _size1072); - this->warnings.resize(_size1072); - uint32_t _i1076; - for (_i1076 = 0; _i1076 < _size1072; ++_i1076) + uint32_t _size1081; + ::apache::thrift::protocol::TType _etype1084; + xfer += iprot->readListBegin(_etype1084, _size1081); + this->warnings.resize(_size1081); + uint32_t _i1085; + for (_i1085 = 0; _i1085 < _size1081; ++_i1085) { - xfer += iprot->readString(this->warnings[_i1076]); + xfer += iprot->readString(this->warnings[_i1085]); } xfer += iprot->readListEnd(); } @@ -27588,10 +28227,10 @@ uint32_t WMValidateResourcePlanResponse::write(::apache::thrift::protocol::TProt xfer += oprot->writeFieldBegin("errors", ::apache::thrift::protocol::T_LIST, 1); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast(this->errors.size())); - std::vector ::const_iterator _iter1077; - for (_iter1077 = this->errors.begin(); _iter1077 != this->errors.end(); ++_iter1077) + std::vector ::const_iterator _iter1086; + for (_iter1086 = this->errors.begin(); _iter1086 != this->errors.end(); ++_iter1086) { - xfer += oprot->writeString((*_iter1077)); + xfer += oprot->writeString((*_iter1086)); } xfer += oprot->writeListEnd(); } @@ -27601,10 +28240,10 @@ uint32_t WMValidateResourcePlanResponse::write(::apache::thrift::protocol::TProt xfer += oprot->writeFieldBegin("warnings", ::apache::thrift::protocol::T_LIST, 2); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast(this->warnings.size())); - std::vector ::const_iterator _iter1078; - for (_iter1078 = this->warnings.begin(); _iter1078 != this->warnings.end(); ++_iter1078) + std::vector ::const_iterator _iter1087; + for (_iter1087 = this->warnings.begin(); _iter1087 != this->warnings.end(); ++_iter1087) { - xfer += oprot->writeString((*_iter1078)); + xfer += oprot->writeString((*_iter1087)); } xfer += oprot->writeListEnd(); } @@ -27622,15 +28261,15 @@ void swap(WMValidateResourcePlanResponse &a, WMValidateResourcePlanResponse &b) swap(a.__isset, b.__isset); } -WMValidateResourcePlanResponse::WMValidateResourcePlanResponse(const WMValidateResourcePlanResponse& other1079) { - errors = other1079.errors; - warnings = other1079.warnings; - __isset = other1079.__isset; +WMValidateResourcePlanResponse::WMValidateResourcePlanResponse(const WMValidateResourcePlanResponse& other1088) { + errors = other1088.errors; + warnings = other1088.warnings; + __isset = other1088.__isset; } -WMValidateResourcePlanResponse& WMValidateResourcePlanResponse::operator=(const WMValidateResourcePlanResponse& other1080) { - errors = other1080.errors; - warnings = other1080.warnings; - __isset = other1080.__isset; +WMValidateResourcePlanResponse& WMValidateResourcePlanResponse::operator=(const WMValidateResourcePlanResponse& other1089) { + errors = other1089.errors; + warnings = other1089.warnings; + __isset = other1089.__isset; return *this; } void WMValidateResourcePlanResponse::printTo(std::ostream& out) const { @@ -27713,13 +28352,13 @@ void swap(WMDropResourcePlanRequest &a, WMDropResourcePlanRequest &b) { swap(a.__isset, b.__isset); } -WMDropResourcePlanRequest::WMDropResourcePlanRequest(const WMDropResourcePlanRequest& other1081) { - resourcePlanName = other1081.resourcePlanName; - __isset = other1081.__isset; +WMDropResourcePlanRequest::WMDropResourcePlanRequest(const WMDropResourcePlanRequest& other1090) { + resourcePlanName = other1090.resourcePlanName; + __isset = other1090.__isset; } -WMDropResourcePlanRequest& WMDropResourcePlanRequest::operator=(const WMDropResourcePlanRequest& other1082) { - resourcePlanName = other1082.resourcePlanName; - __isset = other1082.__isset; +WMDropResourcePlanRequest& WMDropResourcePlanRequest::operator=(const WMDropResourcePlanRequest& other1091) { + resourcePlanName = other1091.resourcePlanName; + __isset = other1091.__isset; return *this; } void WMDropResourcePlanRequest::printTo(std::ostream& out) const { @@ -27778,11 +28417,11 @@ void swap(WMDropResourcePlanResponse &a, WMDropResourcePlanResponse &b) { (void) b; } -WMDropResourcePlanResponse::WMDropResourcePlanResponse(const WMDropResourcePlanResponse& other1083) { - (void) other1083; +WMDropResourcePlanResponse::WMDropResourcePlanResponse(const WMDropResourcePlanResponse& other1092) { + (void) other1092; } -WMDropResourcePlanResponse& WMDropResourcePlanResponse::operator=(const WMDropResourcePlanResponse& other1084) { - (void) other1084; +WMDropResourcePlanResponse& WMDropResourcePlanResponse::operator=(const WMDropResourcePlanResponse& other1093) { + (void) other1093; return *this; } void WMDropResourcePlanResponse::printTo(std::ostream& out) const { @@ -27863,13 +28502,13 @@ void swap(WMCreateTriggerRequest &a, WMCreateTriggerRequest &b) { swap(a.__isset, b.__isset); } -WMCreateTriggerRequest::WMCreateTriggerRequest(const WMCreateTriggerRequest& other1085) { - trigger = other1085.trigger; - __isset = other1085.__isset; +WMCreateTriggerRequest::WMCreateTriggerRequest(const WMCreateTriggerRequest& other1094) { + trigger = other1094.trigger; + __isset = other1094.__isset; } -WMCreateTriggerRequest& WMCreateTriggerRequest::operator=(const WMCreateTriggerRequest& other1086) { - trigger = other1086.trigger; - __isset = other1086.__isset; +WMCreateTriggerRequest& WMCreateTriggerRequest::operator=(const WMCreateTriggerRequest& other1095) { + trigger = other1095.trigger; + __isset = other1095.__isset; return *this; } void WMCreateTriggerRequest::printTo(std::ostream& out) const { @@ -27928,11 +28567,11 @@ void swap(WMCreateTriggerResponse &a, WMCreateTriggerResponse &b) { (void) b; } -WMCreateTriggerResponse::WMCreateTriggerResponse(const WMCreateTriggerResponse& other1087) { - (void) other1087; +WMCreateTriggerResponse::WMCreateTriggerResponse(const WMCreateTriggerResponse& other1096) { + (void) other1096; } -WMCreateTriggerResponse& WMCreateTriggerResponse::operator=(const WMCreateTriggerResponse& other1088) { - (void) other1088; +WMCreateTriggerResponse& WMCreateTriggerResponse::operator=(const WMCreateTriggerResponse& other1097) { + (void) other1097; return *this; } void WMCreateTriggerResponse::printTo(std::ostream& out) const { @@ -28013,13 +28652,13 @@ void swap(WMAlterTriggerRequest &a, WMAlterTriggerRequest &b) { swap(a.__isset, b.__isset); } -WMAlterTriggerRequest::WMAlterTriggerRequest(const WMAlterTriggerRequest& other1089) { - trigger = other1089.trigger; - __isset = other1089.__isset; +WMAlterTriggerRequest::WMAlterTriggerRequest(const WMAlterTriggerRequest& other1098) { + trigger = other1098.trigger; + __isset = other1098.__isset; } -WMAlterTriggerRequest& WMAlterTriggerRequest::operator=(const WMAlterTriggerRequest& other1090) { - trigger = other1090.trigger; - __isset = other1090.__isset; +WMAlterTriggerRequest& WMAlterTriggerRequest::operator=(const WMAlterTriggerRequest& other1099) { + trigger = other1099.trigger; + __isset = other1099.__isset; return *this; } void WMAlterTriggerRequest::printTo(std::ostream& out) const { @@ -28078,11 +28717,11 @@ void swap(WMAlterTriggerResponse &a, WMAlterTriggerResponse &b) { (void) b; } -WMAlterTriggerResponse::WMAlterTriggerResponse(const WMAlterTriggerResponse& other1091) { - (void) other1091; +WMAlterTriggerResponse::WMAlterTriggerResponse(const WMAlterTriggerResponse& other1100) { + (void) other1100; } -WMAlterTriggerResponse& WMAlterTriggerResponse::operator=(const WMAlterTriggerResponse& other1092) { - (void) other1092; +WMAlterTriggerResponse& WMAlterTriggerResponse::operator=(const WMAlterTriggerResponse& other1101) { + (void) other1101; return *this; } void WMAlterTriggerResponse::printTo(std::ostream& out) const { @@ -28182,15 +28821,15 @@ void swap(WMDropTriggerRequest &a, WMDropTriggerRequest &b) { swap(a.__isset, b.__isset); } -WMDropTriggerRequest::WMDropTriggerRequest(const WMDropTriggerRequest& other1093) { - resourcePlanName = other1093.resourcePlanName; - triggerName = other1093.triggerName; - __isset = other1093.__isset; +WMDropTriggerRequest::WMDropTriggerRequest(const WMDropTriggerRequest& other1102) { + resourcePlanName = other1102.resourcePlanName; + triggerName = other1102.triggerName; + __isset = other1102.__isset; } -WMDropTriggerRequest& WMDropTriggerRequest::operator=(const WMDropTriggerRequest& other1094) { - resourcePlanName = other1094.resourcePlanName; - triggerName = other1094.triggerName; - __isset = other1094.__isset; +WMDropTriggerRequest& WMDropTriggerRequest::operator=(const WMDropTriggerRequest& other1103) { + resourcePlanName = other1103.resourcePlanName; + triggerName = other1103.triggerName; + __isset = other1103.__isset; return *this; } void WMDropTriggerRequest::printTo(std::ostream& out) const { @@ -28250,11 +28889,11 @@ void swap(WMDropTriggerResponse &a, WMDropTriggerResponse &b) { (void) b; } -WMDropTriggerResponse::WMDropTriggerResponse(const WMDropTriggerResponse& other1095) { - (void) other1095; +WMDropTriggerResponse::WMDropTriggerResponse(const WMDropTriggerResponse& other1104) { + (void) other1104; } -WMDropTriggerResponse& WMDropTriggerResponse::operator=(const WMDropTriggerResponse& other1096) { - (void) other1096; +WMDropTriggerResponse& WMDropTriggerResponse::operator=(const WMDropTriggerResponse& other1105) { + (void) other1105; return *this; } void WMDropTriggerResponse::printTo(std::ostream& out) const { @@ -28335,13 +28974,13 @@ void swap(WMGetTriggersForResourePlanRequest &a, WMGetTriggersForResourePlanRequ swap(a.__isset, b.__isset); } -WMGetTriggersForResourePlanRequest::WMGetTriggersForResourePlanRequest(const WMGetTriggersForResourePlanRequest& other1097) { - resourcePlanName = other1097.resourcePlanName; - __isset = other1097.__isset; +WMGetTriggersForResourePlanRequest::WMGetTriggersForResourePlanRequest(const WMGetTriggersForResourePlanRequest& other1106) { + resourcePlanName = other1106.resourcePlanName; + __isset = other1106.__isset; } -WMGetTriggersForResourePlanRequest& WMGetTriggersForResourePlanRequest::operator=(const WMGetTriggersForResourePlanRequest& other1098) { - resourcePlanName = other1098.resourcePlanName; - __isset = other1098.__isset; +WMGetTriggersForResourePlanRequest& WMGetTriggersForResourePlanRequest::operator=(const WMGetTriggersForResourePlanRequest& other1107) { + resourcePlanName = other1107.resourcePlanName; + __isset = other1107.__isset; return *this; } void WMGetTriggersForResourePlanRequest::printTo(std::ostream& out) const { @@ -28386,14 +29025,14 @@ uint32_t WMGetTriggersForResourePlanResponse::read(::apache::thrift::protocol::T if (ftype == ::apache::thrift::protocol::T_LIST) { { this->triggers.clear(); - uint32_t _size1099; - ::apache::thrift::protocol::TType _etype1102; - xfer += iprot->readListBegin(_etype1102, _size1099); - this->triggers.resize(_size1099); - uint32_t _i1103; - for (_i1103 = 0; _i1103 < _size1099; ++_i1103) + uint32_t _size1108; + ::apache::thrift::protocol::TType _etype1111; + xfer += iprot->readListBegin(_etype1111, _size1108); + this->triggers.resize(_size1108); + uint32_t _i1112; + for (_i1112 = 0; _i1112 < _size1108; ++_i1112) { - xfer += this->triggers[_i1103].read(iprot); + xfer += this->triggers[_i1112].read(iprot); } xfer += iprot->readListEnd(); } @@ -28423,10 +29062,10 @@ uint32_t WMGetTriggersForResourePlanResponse::write(::apache::thrift::protocol:: xfer += oprot->writeFieldBegin("triggers", ::apache::thrift::protocol::T_LIST, 1); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast(this->triggers.size())); - std::vector ::const_iterator _iter1104; - for (_iter1104 = this->triggers.begin(); _iter1104 != this->triggers.end(); ++_iter1104) + std::vector ::const_iterator _iter1113; + for (_iter1113 = this->triggers.begin(); _iter1113 != this->triggers.end(); ++_iter1113) { - xfer += (*_iter1104).write(oprot); + xfer += (*_iter1113).write(oprot); } xfer += oprot->writeListEnd(); } @@ -28443,13 +29082,13 @@ void swap(WMGetTriggersForResourePlanResponse &a, WMGetTriggersForResourePlanRes swap(a.__isset, b.__isset); } -WMGetTriggersForResourePlanResponse::WMGetTriggersForResourePlanResponse(const WMGetTriggersForResourePlanResponse& other1105) { - triggers = other1105.triggers; - __isset = other1105.__isset; +WMGetTriggersForResourePlanResponse::WMGetTriggersForResourePlanResponse(const WMGetTriggersForResourePlanResponse& other1114) { + triggers = other1114.triggers; + __isset = other1114.__isset; } -WMGetTriggersForResourePlanResponse& WMGetTriggersForResourePlanResponse::operator=(const WMGetTriggersForResourePlanResponse& other1106) { - triggers = other1106.triggers; - __isset = other1106.__isset; +WMGetTriggersForResourePlanResponse& WMGetTriggersForResourePlanResponse::operator=(const WMGetTriggersForResourePlanResponse& other1115) { + triggers = other1115.triggers; + __isset = other1115.__isset; return *this; } void WMGetTriggersForResourePlanResponse::printTo(std::ostream& out) const { @@ -28531,13 +29170,13 @@ void swap(WMCreatePoolRequest &a, WMCreatePoolRequest &b) { swap(a.__isset, b.__isset); } -WMCreatePoolRequest::WMCreatePoolRequest(const WMCreatePoolRequest& other1107) { - pool = other1107.pool; - __isset = other1107.__isset; +WMCreatePoolRequest::WMCreatePoolRequest(const WMCreatePoolRequest& other1116) { + pool = other1116.pool; + __isset = other1116.__isset; } -WMCreatePoolRequest& WMCreatePoolRequest::operator=(const WMCreatePoolRequest& other1108) { - pool = other1108.pool; - __isset = other1108.__isset; +WMCreatePoolRequest& WMCreatePoolRequest::operator=(const WMCreatePoolRequest& other1117) { + pool = other1117.pool; + __isset = other1117.__isset; return *this; } void WMCreatePoolRequest::printTo(std::ostream& out) const { @@ -28596,11 +29235,11 @@ void swap(WMCreatePoolResponse &a, WMCreatePoolResponse &b) { (void) b; } -WMCreatePoolResponse::WMCreatePoolResponse(const WMCreatePoolResponse& other1109) { - (void) other1109; +WMCreatePoolResponse::WMCreatePoolResponse(const WMCreatePoolResponse& other1118) { + (void) other1118; } -WMCreatePoolResponse& WMCreatePoolResponse::operator=(const WMCreatePoolResponse& other1110) { - (void) other1110; +WMCreatePoolResponse& WMCreatePoolResponse::operator=(const WMCreatePoolResponse& other1119) { + (void) other1119; return *this; } void WMCreatePoolResponse::printTo(std::ostream& out) const { @@ -28700,15 +29339,15 @@ void swap(WMAlterPoolRequest &a, WMAlterPoolRequest &b) { swap(a.__isset, b.__isset); } -WMAlterPoolRequest::WMAlterPoolRequest(const WMAlterPoolRequest& other1111) { - pool = other1111.pool; - poolPath = other1111.poolPath; - __isset = other1111.__isset; +WMAlterPoolRequest::WMAlterPoolRequest(const WMAlterPoolRequest& other1120) { + pool = other1120.pool; + poolPath = other1120.poolPath; + __isset = other1120.__isset; } -WMAlterPoolRequest& WMAlterPoolRequest::operator=(const WMAlterPoolRequest& other1112) { - pool = other1112.pool; - poolPath = other1112.poolPath; - __isset = other1112.__isset; +WMAlterPoolRequest& WMAlterPoolRequest::operator=(const WMAlterPoolRequest& other1121) { + pool = other1121.pool; + poolPath = other1121.poolPath; + __isset = other1121.__isset; return *this; } void WMAlterPoolRequest::printTo(std::ostream& out) const { @@ -28768,11 +29407,11 @@ void swap(WMAlterPoolResponse &a, WMAlterPoolResponse &b) { (void) b; } -WMAlterPoolResponse::WMAlterPoolResponse(const WMAlterPoolResponse& other1113) { - (void) other1113; +WMAlterPoolResponse::WMAlterPoolResponse(const WMAlterPoolResponse& other1122) { + (void) other1122; } -WMAlterPoolResponse& WMAlterPoolResponse::operator=(const WMAlterPoolResponse& other1114) { - (void) other1114; +WMAlterPoolResponse& WMAlterPoolResponse::operator=(const WMAlterPoolResponse& other1123) { + (void) other1123; return *this; } void WMAlterPoolResponse::printTo(std::ostream& out) const { @@ -28872,15 +29511,15 @@ void swap(WMDropPoolRequest &a, WMDropPoolRequest &b) { swap(a.__isset, b.__isset); } -WMDropPoolRequest::WMDropPoolRequest(const WMDropPoolRequest& other1115) { - resourcePlanName = other1115.resourcePlanName; - poolPath = other1115.poolPath; - __isset = other1115.__isset; +WMDropPoolRequest::WMDropPoolRequest(const WMDropPoolRequest& other1124) { + resourcePlanName = other1124.resourcePlanName; + poolPath = other1124.poolPath; + __isset = other1124.__isset; } -WMDropPoolRequest& WMDropPoolRequest::operator=(const WMDropPoolRequest& other1116) { - resourcePlanName = other1116.resourcePlanName; - poolPath = other1116.poolPath; - __isset = other1116.__isset; +WMDropPoolRequest& WMDropPoolRequest::operator=(const WMDropPoolRequest& other1125) { + resourcePlanName = other1125.resourcePlanName; + poolPath = other1125.poolPath; + __isset = other1125.__isset; return *this; } void WMDropPoolRequest::printTo(std::ostream& out) const { @@ -28940,11 +29579,11 @@ void swap(WMDropPoolResponse &a, WMDropPoolResponse &b) { (void) b; } -WMDropPoolResponse::WMDropPoolResponse(const WMDropPoolResponse& other1117) { - (void) other1117; +WMDropPoolResponse::WMDropPoolResponse(const WMDropPoolResponse& other1126) { + (void) other1126; } -WMDropPoolResponse& WMDropPoolResponse::operator=(const WMDropPoolResponse& other1118) { - (void) other1118; +WMDropPoolResponse& WMDropPoolResponse::operator=(const WMDropPoolResponse& other1127) { + (void) other1127; return *this; } void WMDropPoolResponse::printTo(std::ostream& out) const { @@ -29044,15 +29683,15 @@ void swap(WMCreateOrUpdateMappingRequest &a, WMCreateOrUpdateMappingRequest &b) swap(a.__isset, b.__isset); } -WMCreateOrUpdateMappingRequest::WMCreateOrUpdateMappingRequest(const WMCreateOrUpdateMappingRequest& other1119) { - mapping = other1119.mapping; - update = other1119.update; - __isset = other1119.__isset; +WMCreateOrUpdateMappingRequest::WMCreateOrUpdateMappingRequest(const WMCreateOrUpdateMappingRequest& other1128) { + mapping = other1128.mapping; + update = other1128.update; + __isset = other1128.__isset; } -WMCreateOrUpdateMappingRequest& WMCreateOrUpdateMappingRequest::operator=(const WMCreateOrUpdateMappingRequest& other1120) { - mapping = other1120.mapping; - update = other1120.update; - __isset = other1120.__isset; +WMCreateOrUpdateMappingRequest& WMCreateOrUpdateMappingRequest::operator=(const WMCreateOrUpdateMappingRequest& other1129) { + mapping = other1129.mapping; + update = other1129.update; + __isset = other1129.__isset; return *this; } void WMCreateOrUpdateMappingRequest::printTo(std::ostream& out) const { @@ -29112,11 +29751,11 @@ void swap(WMCreateOrUpdateMappingResponse &a, WMCreateOrUpdateMappingResponse &b (void) b; } -WMCreateOrUpdateMappingResponse::WMCreateOrUpdateMappingResponse(const WMCreateOrUpdateMappingResponse& other1121) { - (void) other1121; +WMCreateOrUpdateMappingResponse::WMCreateOrUpdateMappingResponse(const WMCreateOrUpdateMappingResponse& other1130) { + (void) other1130; } -WMCreateOrUpdateMappingResponse& WMCreateOrUpdateMappingResponse::operator=(const WMCreateOrUpdateMappingResponse& other1122) { - (void) other1122; +WMCreateOrUpdateMappingResponse& WMCreateOrUpdateMappingResponse::operator=(const WMCreateOrUpdateMappingResponse& other1131) { + (void) other1131; return *this; } void WMCreateOrUpdateMappingResponse::printTo(std::ostream& out) const { @@ -29197,13 +29836,13 @@ void swap(WMDropMappingRequest &a, WMDropMappingRequest &b) { swap(a.__isset, b.__isset); } -WMDropMappingRequest::WMDropMappingRequest(const WMDropMappingRequest& other1123) { - mapping = other1123.mapping; - __isset = other1123.__isset; +WMDropMappingRequest::WMDropMappingRequest(const WMDropMappingRequest& other1132) { + mapping = other1132.mapping; + __isset = other1132.__isset; } -WMDropMappingRequest& WMDropMappingRequest::operator=(const WMDropMappingRequest& other1124) { - mapping = other1124.mapping; - __isset = other1124.__isset; +WMDropMappingRequest& WMDropMappingRequest::operator=(const WMDropMappingRequest& other1133) { + mapping = other1133.mapping; + __isset = other1133.__isset; return *this; } void WMDropMappingRequest::printTo(std::ostream& out) const { @@ -29262,11 +29901,11 @@ void swap(WMDropMappingResponse &a, WMDropMappingResponse &b) { (void) b; } -WMDropMappingResponse::WMDropMappingResponse(const WMDropMappingResponse& other1125) { - (void) other1125; +WMDropMappingResponse::WMDropMappingResponse(const WMDropMappingResponse& other1134) { + (void) other1134; } -WMDropMappingResponse& WMDropMappingResponse::operator=(const WMDropMappingResponse& other1126) { - (void) other1126; +WMDropMappingResponse& WMDropMappingResponse::operator=(const WMDropMappingResponse& other1135) { + (void) other1135; return *this; } void WMDropMappingResponse::printTo(std::ostream& out) const { @@ -29404,19 +30043,19 @@ void swap(WMCreateOrDropTriggerToPoolMappingRequest &a, WMCreateOrDropTriggerToP swap(a.__isset, b.__isset); } -WMCreateOrDropTriggerToPoolMappingRequest::WMCreateOrDropTriggerToPoolMappingRequest(const WMCreateOrDropTriggerToPoolMappingRequest& other1127) { - resourcePlanName = other1127.resourcePlanName; - triggerName = other1127.triggerName; - poolPath = other1127.poolPath; - drop = other1127.drop; - __isset = other1127.__isset; +WMCreateOrDropTriggerToPoolMappingRequest::WMCreateOrDropTriggerToPoolMappingRequest(const WMCreateOrDropTriggerToPoolMappingRequest& other1136) { + resourcePlanName = other1136.resourcePlanName; + triggerName = other1136.triggerName; + poolPath = other1136.poolPath; + drop = other1136.drop; + __isset = other1136.__isset; } -WMCreateOrDropTriggerToPoolMappingRequest& WMCreateOrDropTriggerToPoolMappingRequest::operator=(const WMCreateOrDropTriggerToPoolMappingRequest& other1128) { - resourcePlanName = other1128.resourcePlanName; - triggerName = other1128.triggerName; - poolPath = other1128.poolPath; - drop = other1128.drop; - __isset = other1128.__isset; +WMCreateOrDropTriggerToPoolMappingRequest& WMCreateOrDropTriggerToPoolMappingRequest::operator=(const WMCreateOrDropTriggerToPoolMappingRequest& other1137) { + resourcePlanName = other1137.resourcePlanName; + triggerName = other1137.triggerName; + poolPath = other1137.poolPath; + drop = other1137.drop; + __isset = other1137.__isset; return *this; } void WMCreateOrDropTriggerToPoolMappingRequest::printTo(std::ostream& out) const { @@ -29478,11 +30117,11 @@ void swap(WMCreateOrDropTriggerToPoolMappingResponse &a, WMCreateOrDropTriggerTo (void) b; } -WMCreateOrDropTriggerToPoolMappingResponse::WMCreateOrDropTriggerToPoolMappingResponse(const WMCreateOrDropTriggerToPoolMappingResponse& other1129) { - (void) other1129; +WMCreateOrDropTriggerToPoolMappingResponse::WMCreateOrDropTriggerToPoolMappingResponse(const WMCreateOrDropTriggerToPoolMappingResponse& other1138) { + (void) other1138; } -WMCreateOrDropTriggerToPoolMappingResponse& WMCreateOrDropTriggerToPoolMappingResponse::operator=(const WMCreateOrDropTriggerToPoolMappingResponse& other1130) { - (void) other1130; +WMCreateOrDropTriggerToPoolMappingResponse& WMCreateOrDropTriggerToPoolMappingResponse::operator=(const WMCreateOrDropTriggerToPoolMappingResponse& other1139) { + (void) other1139; return *this; } void WMCreateOrDropTriggerToPoolMappingResponse::printTo(std::ostream& out) const { @@ -29557,9 +30196,9 @@ uint32_t ISchema::read(::apache::thrift::protocol::TProtocol* iprot) { { case 1: if (ftype == ::apache::thrift::protocol::T_I32) { - int32_t ecast1131; - xfer += iprot->readI32(ecast1131); - this->schemaType = (SchemaType::type)ecast1131; + int32_t ecast1140; + xfer += iprot->readI32(ecast1140); + this->schemaType = (SchemaType::type)ecast1140; this->__isset.schemaType = true; } else { xfer += iprot->skip(ftype); @@ -29591,9 +30230,9 @@ uint32_t ISchema::read(::apache::thrift::protocol::TProtocol* iprot) { break; case 5: if (ftype == ::apache::thrift::protocol::T_I32) { - int32_t ecast1132; - xfer += iprot->readI32(ecast1132); - this->compatibility = (SchemaCompatibility::type)ecast1132; + int32_t ecast1141; + xfer += iprot->readI32(ecast1141); + this->compatibility = (SchemaCompatibility::type)ecast1141; this->__isset.compatibility = true; } else { xfer += iprot->skip(ftype); @@ -29601,9 +30240,9 @@ uint32_t ISchema::read(::apache::thrift::protocol::TProtocol* iprot) { break; case 6: if (ftype == ::apache::thrift::protocol::T_I32) { - int32_t ecast1133; - xfer += iprot->readI32(ecast1133); - this->validationLevel = (SchemaValidation::type)ecast1133; + int32_t ecast1142; + xfer += iprot->readI32(ecast1142); + this->validationLevel = (SchemaValidation::type)ecast1142; this->__isset.validationLevel = true; } else { xfer += iprot->skip(ftype); @@ -29707,29 +30346,29 @@ void swap(ISchema &a, ISchema &b) { swap(a.__isset, b.__isset); } -ISchema::ISchema(const ISchema& other1134) { - schemaType = other1134.schemaType; - name = other1134.name; - catName = other1134.catName; - dbName = other1134.dbName; - compatibility = other1134.compatibility; - validationLevel = other1134.validationLevel; - canEvolve = other1134.canEvolve; - schemaGroup = other1134.schemaGroup; - description = other1134.description; - __isset = other1134.__isset; -} -ISchema& ISchema::operator=(const ISchema& other1135) { - schemaType = other1135.schemaType; - name = other1135.name; - catName = other1135.catName; - dbName = other1135.dbName; - compatibility = other1135.compatibility; - validationLevel = other1135.validationLevel; - canEvolve = other1135.canEvolve; - schemaGroup = other1135.schemaGroup; - description = other1135.description; - __isset = other1135.__isset; +ISchema::ISchema(const ISchema& other1143) { + schemaType = other1143.schemaType; + name = other1143.name; + catName = other1143.catName; + dbName = other1143.dbName; + compatibility = other1143.compatibility; + validationLevel = other1143.validationLevel; + canEvolve = other1143.canEvolve; + schemaGroup = other1143.schemaGroup; + description = other1143.description; + __isset = other1143.__isset; +} +ISchema& ISchema::operator=(const ISchema& other1144) { + schemaType = other1144.schemaType; + name = other1144.name; + catName = other1144.catName; + dbName = other1144.dbName; + compatibility = other1144.compatibility; + validationLevel = other1144.validationLevel; + canEvolve = other1144.canEvolve; + schemaGroup = other1144.schemaGroup; + description = other1144.description; + __isset = other1144.__isset; return *this; } void ISchema::printTo(std::ostream& out) const { @@ -29851,17 +30490,17 @@ void swap(ISchemaName &a, ISchemaName &b) { swap(a.__isset, b.__isset); } -ISchemaName::ISchemaName(const ISchemaName& other1136) { - catName = other1136.catName; - dbName = other1136.dbName; - schemaName = other1136.schemaName; - __isset = other1136.__isset; +ISchemaName::ISchemaName(const ISchemaName& other1145) { + catName = other1145.catName; + dbName = other1145.dbName; + schemaName = other1145.schemaName; + __isset = other1145.__isset; } -ISchemaName& ISchemaName::operator=(const ISchemaName& other1137) { - catName = other1137.catName; - dbName = other1137.dbName; - schemaName = other1137.schemaName; - __isset = other1137.__isset; +ISchemaName& ISchemaName::operator=(const ISchemaName& other1146) { + catName = other1146.catName; + dbName = other1146.dbName; + schemaName = other1146.schemaName; + __isset = other1146.__isset; return *this; } void ISchemaName::printTo(std::ostream& out) const { @@ -29960,15 +30599,15 @@ void swap(AlterISchemaRequest &a, AlterISchemaRequest &b) { swap(a.__isset, b.__isset); } -AlterISchemaRequest::AlterISchemaRequest(const AlterISchemaRequest& other1138) { - name = other1138.name; - newSchema = other1138.newSchema; - __isset = other1138.__isset; +AlterISchemaRequest::AlterISchemaRequest(const AlterISchemaRequest& other1147) { + name = other1147.name; + newSchema = other1147.newSchema; + __isset = other1147.__isset; } -AlterISchemaRequest& AlterISchemaRequest::operator=(const AlterISchemaRequest& other1139) { - name = other1139.name; - newSchema = other1139.newSchema; - __isset = other1139.__isset; +AlterISchemaRequest& AlterISchemaRequest::operator=(const AlterISchemaRequest& other1148) { + name = other1148.name; + newSchema = other1148.newSchema; + __isset = other1148.__isset; return *this; } void AlterISchemaRequest::printTo(std::ostream& out) const { @@ -30079,14 +30718,14 @@ uint32_t SchemaVersion::read(::apache::thrift::protocol::TProtocol* iprot) { if (ftype == ::apache::thrift::protocol::T_LIST) { { this->cols.clear(); - uint32_t _size1140; - ::apache::thrift::protocol::TType _etype1143; - xfer += iprot->readListBegin(_etype1143, _size1140); - this->cols.resize(_size1140); - uint32_t _i1144; - for (_i1144 = 0; _i1144 < _size1140; ++_i1144) + uint32_t _size1149; + ::apache::thrift::protocol::TType _etype1152; + xfer += iprot->readListBegin(_etype1152, _size1149); + this->cols.resize(_size1149); + uint32_t _i1153; + for (_i1153 = 0; _i1153 < _size1149; ++_i1153) { - xfer += this->cols[_i1144].read(iprot); + xfer += this->cols[_i1153].read(iprot); } xfer += iprot->readListEnd(); } @@ -30097,9 +30736,9 @@ uint32_t SchemaVersion::read(::apache::thrift::protocol::TProtocol* iprot) { break; case 5: if (ftype == ::apache::thrift::protocol::T_I32) { - int32_t ecast1145; - xfer += iprot->readI32(ecast1145); - this->state = (SchemaVersionState::type)ecast1145; + int32_t ecast1154; + xfer += iprot->readI32(ecast1154); + this->state = (SchemaVersionState::type)ecast1154; this->__isset.state = true; } else { xfer += iprot->skip(ftype); @@ -30177,10 +30816,10 @@ uint32_t SchemaVersion::write(::apache::thrift::protocol::TProtocol* oprot) cons xfer += oprot->writeFieldBegin("cols", ::apache::thrift::protocol::T_LIST, 4); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast(this->cols.size())); - std::vector ::const_iterator _iter1146; - for (_iter1146 = this->cols.begin(); _iter1146 != this->cols.end(); ++_iter1146) + std::vector ::const_iterator _iter1155; + for (_iter1155 = this->cols.begin(); _iter1155 != this->cols.end(); ++_iter1155) { - xfer += (*_iter1146).write(oprot); + xfer += (*_iter1155).write(oprot); } xfer += oprot->writeListEnd(); } @@ -30236,31 +30875,31 @@ void swap(SchemaVersion &a, SchemaVersion &b) { swap(a.__isset, b.__isset); } -SchemaVersion::SchemaVersion(const SchemaVersion& other1147) { - schema = other1147.schema; - version = other1147.version; - createdAt = other1147.createdAt; - cols = other1147.cols; - state = other1147.state; - description = other1147.description; - schemaText = other1147.schemaText; - fingerprint = other1147.fingerprint; - name = other1147.name; - serDe = other1147.serDe; - __isset = other1147.__isset; -} -SchemaVersion& SchemaVersion::operator=(const SchemaVersion& other1148) { - schema = other1148.schema; - version = other1148.version; - createdAt = other1148.createdAt; - cols = other1148.cols; - state = other1148.state; - description = other1148.description; - schemaText = other1148.schemaText; - fingerprint = other1148.fingerprint; - name = other1148.name; - serDe = other1148.serDe; - __isset = other1148.__isset; +SchemaVersion::SchemaVersion(const SchemaVersion& other1156) { + schema = other1156.schema; + version = other1156.version; + createdAt = other1156.createdAt; + cols = other1156.cols; + state = other1156.state; + description = other1156.description; + schemaText = other1156.schemaText; + fingerprint = other1156.fingerprint; + name = other1156.name; + serDe = other1156.serDe; + __isset = other1156.__isset; +} +SchemaVersion& SchemaVersion::operator=(const SchemaVersion& other1157) { + schema = other1157.schema; + version = other1157.version; + createdAt = other1157.createdAt; + cols = other1157.cols; + state = other1157.state; + description = other1157.description; + schemaText = other1157.schemaText; + fingerprint = other1157.fingerprint; + name = other1157.name; + serDe = other1157.serDe; + __isset = other1157.__isset; return *this; } void SchemaVersion::printTo(std::ostream& out) const { @@ -30366,15 +31005,15 @@ void swap(SchemaVersionDescriptor &a, SchemaVersionDescriptor &b) { swap(a.__isset, b.__isset); } -SchemaVersionDescriptor::SchemaVersionDescriptor(const SchemaVersionDescriptor& other1149) { - schema = other1149.schema; - version = other1149.version; - __isset = other1149.__isset; +SchemaVersionDescriptor::SchemaVersionDescriptor(const SchemaVersionDescriptor& other1158) { + schema = other1158.schema; + version = other1158.version; + __isset = other1158.__isset; } -SchemaVersionDescriptor& SchemaVersionDescriptor::operator=(const SchemaVersionDescriptor& other1150) { - schema = other1150.schema; - version = other1150.version; - __isset = other1150.__isset; +SchemaVersionDescriptor& SchemaVersionDescriptor::operator=(const SchemaVersionDescriptor& other1159) { + schema = other1159.schema; + version = other1159.version; + __isset = other1159.__isset; return *this; } void SchemaVersionDescriptor::printTo(std::ostream& out) const { @@ -30495,17 +31134,17 @@ void swap(FindSchemasByColsRqst &a, FindSchemasByColsRqst &b) { swap(a.__isset, b.__isset); } -FindSchemasByColsRqst::FindSchemasByColsRqst(const FindSchemasByColsRqst& other1151) { - colName = other1151.colName; - colNamespace = other1151.colNamespace; - type = other1151.type; - __isset = other1151.__isset; +FindSchemasByColsRqst::FindSchemasByColsRqst(const FindSchemasByColsRqst& other1160) { + colName = other1160.colName; + colNamespace = other1160.colNamespace; + type = other1160.type; + __isset = other1160.__isset; } -FindSchemasByColsRqst& FindSchemasByColsRqst::operator=(const FindSchemasByColsRqst& other1152) { - colName = other1152.colName; - colNamespace = other1152.colNamespace; - type = other1152.type; - __isset = other1152.__isset; +FindSchemasByColsRqst& FindSchemasByColsRqst::operator=(const FindSchemasByColsRqst& other1161) { + colName = other1161.colName; + colNamespace = other1161.colNamespace; + type = other1161.type; + __isset = other1161.__isset; return *this; } void FindSchemasByColsRqst::printTo(std::ostream& out) const { @@ -30551,14 +31190,14 @@ uint32_t FindSchemasByColsResp::read(::apache::thrift::protocol::TProtocol* ipro if (ftype == ::apache::thrift::protocol::T_LIST) { { this->schemaVersions.clear(); - uint32_t _size1153; - ::apache::thrift::protocol::TType _etype1156; - xfer += iprot->readListBegin(_etype1156, _size1153); - this->schemaVersions.resize(_size1153); - uint32_t _i1157; - for (_i1157 = 0; _i1157 < _size1153; ++_i1157) + uint32_t _size1162; + ::apache::thrift::protocol::TType _etype1165; + xfer += iprot->readListBegin(_etype1165, _size1162); + this->schemaVersions.resize(_size1162); + uint32_t _i1166; + for (_i1166 = 0; _i1166 < _size1162; ++_i1166) { - xfer += this->schemaVersions[_i1157].read(iprot); + xfer += this->schemaVersions[_i1166].read(iprot); } xfer += iprot->readListEnd(); } @@ -30587,10 +31226,10 @@ uint32_t FindSchemasByColsResp::write(::apache::thrift::protocol::TProtocol* opr xfer += oprot->writeFieldBegin("schemaVersions", ::apache::thrift::protocol::T_LIST, 1); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast(this->schemaVersions.size())); - std::vector ::const_iterator _iter1158; - for (_iter1158 = this->schemaVersions.begin(); _iter1158 != this->schemaVersions.end(); ++_iter1158) + std::vector ::const_iterator _iter1167; + for (_iter1167 = this->schemaVersions.begin(); _iter1167 != this->schemaVersions.end(); ++_iter1167) { - xfer += (*_iter1158).write(oprot); + xfer += (*_iter1167).write(oprot); } xfer += oprot->writeListEnd(); } @@ -30607,13 +31246,13 @@ void swap(FindSchemasByColsResp &a, FindSchemasByColsResp &b) { swap(a.__isset, b.__isset); } -FindSchemasByColsResp::FindSchemasByColsResp(const FindSchemasByColsResp& other1159) { - schemaVersions = other1159.schemaVersions; - __isset = other1159.__isset; +FindSchemasByColsResp::FindSchemasByColsResp(const FindSchemasByColsResp& other1168) { + schemaVersions = other1168.schemaVersions; + __isset = other1168.__isset; } -FindSchemasByColsResp& FindSchemasByColsResp::operator=(const FindSchemasByColsResp& other1160) { - schemaVersions = other1160.schemaVersions; - __isset = other1160.__isset; +FindSchemasByColsResp& FindSchemasByColsResp::operator=(const FindSchemasByColsResp& other1169) { + schemaVersions = other1169.schemaVersions; + __isset = other1169.__isset; return *this; } void FindSchemasByColsResp::printTo(std::ostream& out) const { @@ -30710,15 +31349,15 @@ void swap(MapSchemaVersionToSerdeRequest &a, MapSchemaVersionToSerdeRequest &b) swap(a.__isset, b.__isset); } -MapSchemaVersionToSerdeRequest::MapSchemaVersionToSerdeRequest(const MapSchemaVersionToSerdeRequest& other1161) { - schemaVersion = other1161.schemaVersion; - serdeName = other1161.serdeName; - __isset = other1161.__isset; +MapSchemaVersionToSerdeRequest::MapSchemaVersionToSerdeRequest(const MapSchemaVersionToSerdeRequest& other1170) { + schemaVersion = other1170.schemaVersion; + serdeName = other1170.serdeName; + __isset = other1170.__isset; } -MapSchemaVersionToSerdeRequest& MapSchemaVersionToSerdeRequest::operator=(const MapSchemaVersionToSerdeRequest& other1162) { - schemaVersion = other1162.schemaVersion; - serdeName = other1162.serdeName; - __isset = other1162.__isset; +MapSchemaVersionToSerdeRequest& MapSchemaVersionToSerdeRequest::operator=(const MapSchemaVersionToSerdeRequest& other1171) { + schemaVersion = other1171.schemaVersion; + serdeName = other1171.serdeName; + __isset = other1171.__isset; return *this; } void MapSchemaVersionToSerdeRequest::printTo(std::ostream& out) const { @@ -30773,9 +31412,9 @@ uint32_t SetSchemaVersionStateRequest::read(::apache::thrift::protocol::TProtoco break; case 2: if (ftype == ::apache::thrift::protocol::T_I32) { - int32_t ecast1163; - xfer += iprot->readI32(ecast1163); - this->state = (SchemaVersionState::type)ecast1163; + int32_t ecast1172; + xfer += iprot->readI32(ecast1172); + this->state = (SchemaVersionState::type)ecast1172; this->__isset.state = true; } else { xfer += iprot->skip(ftype); @@ -30818,15 +31457,15 @@ void swap(SetSchemaVersionStateRequest &a, SetSchemaVersionStateRequest &b) { swap(a.__isset, b.__isset); } -SetSchemaVersionStateRequest::SetSchemaVersionStateRequest(const SetSchemaVersionStateRequest& other1164) { - schemaVersion = other1164.schemaVersion; - state = other1164.state; - __isset = other1164.__isset; +SetSchemaVersionStateRequest::SetSchemaVersionStateRequest(const SetSchemaVersionStateRequest& other1173) { + schemaVersion = other1173.schemaVersion; + state = other1173.state; + __isset = other1173.__isset; } -SetSchemaVersionStateRequest& SetSchemaVersionStateRequest::operator=(const SetSchemaVersionStateRequest& other1165) { - schemaVersion = other1165.schemaVersion; - state = other1165.state; - __isset = other1165.__isset; +SetSchemaVersionStateRequest& SetSchemaVersionStateRequest::operator=(const SetSchemaVersionStateRequest& other1174) { + schemaVersion = other1174.schemaVersion; + state = other1174.state; + __isset = other1174.__isset; return *this; } void SetSchemaVersionStateRequest::printTo(std::ostream& out) const { @@ -30907,13 +31546,13 @@ void swap(GetSerdeRequest &a, GetSerdeRequest &b) { swap(a.__isset, b.__isset); } -GetSerdeRequest::GetSerdeRequest(const GetSerdeRequest& other1166) { - serdeName = other1166.serdeName; - __isset = other1166.__isset; +GetSerdeRequest::GetSerdeRequest(const GetSerdeRequest& other1175) { + serdeName = other1175.serdeName; + __isset = other1175.__isset; } -GetSerdeRequest& GetSerdeRequest::operator=(const GetSerdeRequest& other1167) { - serdeName = other1167.serdeName; - __isset = other1167.__isset; +GetSerdeRequest& GetSerdeRequest::operator=(const GetSerdeRequest& other1176) { + serdeName = other1176.serdeName; + __isset = other1176.__isset; return *this; } void GetSerdeRequest::printTo(std::ostream& out) const { @@ -31035,17 +31674,17 @@ void swap(RuntimeStat &a, RuntimeStat &b) { swap(a.__isset, b.__isset); } -RuntimeStat::RuntimeStat(const RuntimeStat& other1168) { - createTime = other1168.createTime; - weight = other1168.weight; - payload = other1168.payload; - __isset = other1168.__isset; +RuntimeStat::RuntimeStat(const RuntimeStat& other1177) { + createTime = other1177.createTime; + weight = other1177.weight; + payload = other1177.payload; + __isset = other1177.__isset; } -RuntimeStat& RuntimeStat::operator=(const RuntimeStat& other1169) { - createTime = other1169.createTime; - weight = other1169.weight; - payload = other1169.payload; - __isset = other1169.__isset; +RuntimeStat& RuntimeStat::operator=(const RuntimeStat& other1178) { + createTime = other1178.createTime; + weight = other1178.weight; + payload = other1178.payload; + __isset = other1178.__isset; return *this; } void RuntimeStat::printTo(std::ostream& out) const { @@ -31149,13 +31788,13 @@ void swap(GetRuntimeStatsRequest &a, GetRuntimeStatsRequest &b) { swap(a.maxCreateTime, b.maxCreateTime); } -GetRuntimeStatsRequest::GetRuntimeStatsRequest(const GetRuntimeStatsRequest& other1170) { - maxWeight = other1170.maxWeight; - maxCreateTime = other1170.maxCreateTime; +GetRuntimeStatsRequest::GetRuntimeStatsRequest(const GetRuntimeStatsRequest& other1179) { + maxWeight = other1179.maxWeight; + maxCreateTime = other1179.maxCreateTime; } -GetRuntimeStatsRequest& GetRuntimeStatsRequest::operator=(const GetRuntimeStatsRequest& other1171) { - maxWeight = other1171.maxWeight; - maxCreateTime = other1171.maxCreateTime; +GetRuntimeStatsRequest& GetRuntimeStatsRequest::operator=(const GetRuntimeStatsRequest& other1180) { + maxWeight = other1180.maxWeight; + maxCreateTime = other1180.maxCreateTime; return *this; } void GetRuntimeStatsRequest::printTo(std::ostream& out) const { @@ -31167,6 +31806,290 @@ void GetRuntimeStatsRequest::printTo(std::ostream& out) const { } +AlterPartitionsRequest::~AlterPartitionsRequest() throw() { +} + + +void AlterPartitionsRequest::__set_dbName(const std::string& val) { + this->dbName = val; +} + +void AlterPartitionsRequest::__set_tableName(const std::string& val) { + this->tableName = val; +} + +void AlterPartitionsRequest::__set_partitions(const std::vector & val) { + this->partitions = val; +} + +void AlterPartitionsRequest::__set_environmentContext(const EnvironmentContext& val) { + this->environmentContext = val; +} + +void AlterPartitionsRequest::__set_txnId(const int64_t val) { + this->txnId = val; +__isset.txnId = true; +} + +void AlterPartitionsRequest::__set_validWriteIdList(const std::string& val) { + this->validWriteIdList = val; +__isset.validWriteIdList = true; +} + +uint32_t AlterPartitionsRequest::read(::apache::thrift::protocol::TProtocol* iprot) { + + apache::thrift::protocol::TInputRecursionTracker tracker(*iprot); + uint32_t xfer = 0; + std::string fname; + ::apache::thrift::protocol::TType ftype; + int16_t fid; + + xfer += iprot->readStructBegin(fname); + + using ::apache::thrift::protocol::TProtocolException; + + bool isset_dbName = false; + bool isset_tableName = false; + bool isset_partitions = false; + bool isset_environmentContext = false; + + while (true) + { + xfer += iprot->readFieldBegin(fname, ftype, fid); + if (ftype == ::apache::thrift::protocol::T_STOP) { + break; + } + switch (fid) + { + case 1: + if (ftype == ::apache::thrift::protocol::T_STRING) { + xfer += iprot->readString(this->dbName); + isset_dbName = true; + } else { + xfer += iprot->skip(ftype); + } + break; + case 2: + if (ftype == ::apache::thrift::protocol::T_STRING) { + xfer += iprot->readString(this->tableName); + isset_tableName = true; + } else { + xfer += iprot->skip(ftype); + } + break; + case 3: + if (ftype == ::apache::thrift::protocol::T_LIST) { + { + this->partitions.clear(); + uint32_t _size1181; + ::apache::thrift::protocol::TType _etype1184; + xfer += iprot->readListBegin(_etype1184, _size1181); + this->partitions.resize(_size1181); + uint32_t _i1185; + for (_i1185 = 0; _i1185 < _size1181; ++_i1185) + { + xfer += this->partitions[_i1185].read(iprot); + } + xfer += iprot->readListEnd(); + } + isset_partitions = true; + } else { + xfer += iprot->skip(ftype); + } + break; + case 4: + if (ftype == ::apache::thrift::protocol::T_STRUCT) { + xfer += this->environmentContext.read(iprot); + isset_environmentContext = true; + } else { + xfer += iprot->skip(ftype); + } + break; + case 5: + if (ftype == ::apache::thrift::protocol::T_I64) { + xfer += iprot->readI64(this->txnId); + this->__isset.txnId = true; + } else { + xfer += iprot->skip(ftype); + } + break; + case 6: + if (ftype == ::apache::thrift::protocol::T_STRING) { + xfer += iprot->readString(this->validWriteIdList); + this->__isset.validWriteIdList = true; + } else { + xfer += iprot->skip(ftype); + } + break; + default: + xfer += iprot->skip(ftype); + break; + } + xfer += iprot->readFieldEnd(); + } + + xfer += iprot->readStructEnd(); + + if (!isset_dbName) + throw TProtocolException(TProtocolException::INVALID_DATA); + if (!isset_tableName) + throw TProtocolException(TProtocolException::INVALID_DATA); + if (!isset_partitions) + throw TProtocolException(TProtocolException::INVALID_DATA); + if (!isset_environmentContext) + throw TProtocolException(TProtocolException::INVALID_DATA); + return xfer; +} + +uint32_t AlterPartitionsRequest::write(::apache::thrift::protocol::TProtocol* oprot) const { + uint32_t xfer = 0; + apache::thrift::protocol::TOutputRecursionTracker tracker(*oprot); + xfer += oprot->writeStructBegin("AlterPartitionsRequest"); + + xfer += oprot->writeFieldBegin("dbName", ::apache::thrift::protocol::T_STRING, 1); + xfer += oprot->writeString(this->dbName); + xfer += oprot->writeFieldEnd(); + + xfer += oprot->writeFieldBegin("tableName", ::apache::thrift::protocol::T_STRING, 2); + xfer += oprot->writeString(this->tableName); + xfer += oprot->writeFieldEnd(); + + xfer += oprot->writeFieldBegin("partitions", ::apache::thrift::protocol::T_LIST, 3); + { + xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast(this->partitions.size())); + std::vector ::const_iterator _iter1186; + for (_iter1186 = this->partitions.begin(); _iter1186 != this->partitions.end(); ++_iter1186) + { + xfer += (*_iter1186).write(oprot); + } + xfer += oprot->writeListEnd(); + } + xfer += oprot->writeFieldEnd(); + + xfer += oprot->writeFieldBegin("environmentContext", ::apache::thrift::protocol::T_STRUCT, 4); + xfer += this->environmentContext.write(oprot); + xfer += oprot->writeFieldEnd(); + + if (this->__isset.txnId) { + xfer += oprot->writeFieldBegin("txnId", ::apache::thrift::protocol::T_I64, 5); + xfer += oprot->writeI64(this->txnId); + xfer += oprot->writeFieldEnd(); + } + if (this->__isset.validWriteIdList) { + xfer += oprot->writeFieldBegin("validWriteIdList", ::apache::thrift::protocol::T_STRING, 6); + xfer += oprot->writeString(this->validWriteIdList); + xfer += oprot->writeFieldEnd(); + } + xfer += oprot->writeFieldStop(); + xfer += oprot->writeStructEnd(); + return xfer; +} + +void swap(AlterPartitionsRequest &a, AlterPartitionsRequest &b) { + using ::std::swap; + swap(a.dbName, b.dbName); + swap(a.tableName, b.tableName); + swap(a.partitions, b.partitions); + swap(a.environmentContext, b.environmentContext); + swap(a.txnId, b.txnId); + swap(a.validWriteIdList, b.validWriteIdList); + swap(a.__isset, b.__isset); +} + +AlterPartitionsRequest::AlterPartitionsRequest(const AlterPartitionsRequest& other1187) { + dbName = other1187.dbName; + tableName = other1187.tableName; + partitions = other1187.partitions; + environmentContext = other1187.environmentContext; + txnId = other1187.txnId; + validWriteIdList = other1187.validWriteIdList; + __isset = other1187.__isset; +} +AlterPartitionsRequest& AlterPartitionsRequest::operator=(const AlterPartitionsRequest& other1188) { + dbName = other1188.dbName; + tableName = other1188.tableName; + partitions = other1188.partitions; + environmentContext = other1188.environmentContext; + txnId = other1188.txnId; + validWriteIdList = other1188.validWriteIdList; + __isset = other1188.__isset; + return *this; +} +void AlterPartitionsRequest::printTo(std::ostream& out) const { + using ::apache::thrift::to_string; + out << "AlterPartitionsRequest("; + out << "dbName=" << to_string(dbName); + out << ", " << "tableName=" << to_string(tableName); + out << ", " << "partitions=" << to_string(partitions); + out << ", " << "environmentContext=" << to_string(environmentContext); + out << ", " << "txnId="; (__isset.txnId ? (out << to_string(txnId)) : (out << "")); + out << ", " << "validWriteIdList="; (__isset.validWriteIdList ? (out << to_string(validWriteIdList)) : (out << "")); + out << ")"; +} + + +AlterPartitionsResponse::~AlterPartitionsResponse() throw() { +} + + +uint32_t AlterPartitionsResponse::read(::apache::thrift::protocol::TProtocol* iprot) { + + apache::thrift::protocol::TInputRecursionTracker tracker(*iprot); + uint32_t xfer = 0; + std::string fname; + ::apache::thrift::protocol::TType ftype; + int16_t fid; + + xfer += iprot->readStructBegin(fname); + + using ::apache::thrift::protocol::TProtocolException; + + + while (true) + { + xfer += iprot->readFieldBegin(fname, ftype, fid); + if (ftype == ::apache::thrift::protocol::T_STOP) { + break; + } + xfer += iprot->skip(ftype); + xfer += iprot->readFieldEnd(); + } + + xfer += iprot->readStructEnd(); + + return xfer; +} + +uint32_t AlterPartitionsResponse::write(::apache::thrift::protocol::TProtocol* oprot) const { + uint32_t xfer = 0; + apache::thrift::protocol::TOutputRecursionTracker tracker(*oprot); + xfer += oprot->writeStructBegin("AlterPartitionsResponse"); + + xfer += oprot->writeFieldStop(); + xfer += oprot->writeStructEnd(); + return xfer; +} + +void swap(AlterPartitionsResponse &a, AlterPartitionsResponse &b) { + using ::std::swap; + (void) a; + (void) b; +} + +AlterPartitionsResponse::AlterPartitionsResponse(const AlterPartitionsResponse& other1189) { + (void) other1189; +} +AlterPartitionsResponse& AlterPartitionsResponse::operator=(const AlterPartitionsResponse& other1190) { + (void) other1190; + return *this; +} +void AlterPartitionsResponse::printTo(std::ostream& out) const { + using ::apache::thrift::to_string; + out << "AlterPartitionsResponse("; + out << ")"; +} + + MetaException::~MetaException() throw() { } @@ -31236,13 +32159,13 @@ void swap(MetaException &a, MetaException &b) { swap(a.__isset, b.__isset); } -MetaException::MetaException(const MetaException& other1172) : TException() { - message = other1172.message; - __isset = other1172.__isset; +MetaException::MetaException(const MetaException& other1191) : TException() { + message = other1191.message; + __isset = other1191.__isset; } -MetaException& MetaException::operator=(const MetaException& other1173) { - message = other1173.message; - __isset = other1173.__isset; +MetaException& MetaException::operator=(const MetaException& other1192) { + message = other1192.message; + __isset = other1192.__isset; return *this; } void MetaException::printTo(std::ostream& out) const { @@ -31333,13 +32256,13 @@ void swap(UnknownTableException &a, UnknownTableException &b) { swap(a.__isset, b.__isset); } -UnknownTableException::UnknownTableException(const UnknownTableException& other1174) : TException() { - message = other1174.message; - __isset = other1174.__isset; +UnknownTableException::UnknownTableException(const UnknownTableException& other1193) : TException() { + message = other1193.message; + __isset = other1193.__isset; } -UnknownTableException& UnknownTableException::operator=(const UnknownTableException& other1175) { - message = other1175.message; - __isset = other1175.__isset; +UnknownTableException& UnknownTableException::operator=(const UnknownTableException& other1194) { + message = other1194.message; + __isset = other1194.__isset; return *this; } void UnknownTableException::printTo(std::ostream& out) const { @@ -31430,13 +32353,13 @@ void swap(UnknownDBException &a, UnknownDBException &b) { swap(a.__isset, b.__isset); } -UnknownDBException::UnknownDBException(const UnknownDBException& other1176) : TException() { - message = other1176.message; - __isset = other1176.__isset; +UnknownDBException::UnknownDBException(const UnknownDBException& other1195) : TException() { + message = other1195.message; + __isset = other1195.__isset; } -UnknownDBException& UnknownDBException::operator=(const UnknownDBException& other1177) { - message = other1177.message; - __isset = other1177.__isset; +UnknownDBException& UnknownDBException::operator=(const UnknownDBException& other1196) { + message = other1196.message; + __isset = other1196.__isset; return *this; } void UnknownDBException::printTo(std::ostream& out) const { @@ -31527,13 +32450,13 @@ void swap(AlreadyExistsException &a, AlreadyExistsException &b) { swap(a.__isset, b.__isset); } -AlreadyExistsException::AlreadyExistsException(const AlreadyExistsException& other1178) : TException() { - message = other1178.message; - __isset = other1178.__isset; +AlreadyExistsException::AlreadyExistsException(const AlreadyExistsException& other1197) : TException() { + message = other1197.message; + __isset = other1197.__isset; } -AlreadyExistsException& AlreadyExistsException::operator=(const AlreadyExistsException& other1179) { - message = other1179.message; - __isset = other1179.__isset; +AlreadyExistsException& AlreadyExistsException::operator=(const AlreadyExistsException& other1198) { + message = other1198.message; + __isset = other1198.__isset; return *this; } void AlreadyExistsException::printTo(std::ostream& out) const { @@ -31624,13 +32547,13 @@ void swap(InvalidPartitionException &a, InvalidPartitionException &b) { swap(a.__isset, b.__isset); } -InvalidPartitionException::InvalidPartitionException(const InvalidPartitionException& other1180) : TException() { - message = other1180.message; - __isset = other1180.__isset; +InvalidPartitionException::InvalidPartitionException(const InvalidPartitionException& other1199) : TException() { + message = other1199.message; + __isset = other1199.__isset; } -InvalidPartitionException& InvalidPartitionException::operator=(const InvalidPartitionException& other1181) { - message = other1181.message; - __isset = other1181.__isset; +InvalidPartitionException& InvalidPartitionException::operator=(const InvalidPartitionException& other1200) { + message = other1200.message; + __isset = other1200.__isset; return *this; } void InvalidPartitionException::printTo(std::ostream& out) const { @@ -31721,13 +32644,13 @@ void swap(UnknownPartitionException &a, UnknownPartitionException &b) { swap(a.__isset, b.__isset); } -UnknownPartitionException::UnknownPartitionException(const UnknownPartitionException& other1182) : TException() { - message = other1182.message; - __isset = other1182.__isset; +UnknownPartitionException::UnknownPartitionException(const UnknownPartitionException& other1201) : TException() { + message = other1201.message; + __isset = other1201.__isset; } -UnknownPartitionException& UnknownPartitionException::operator=(const UnknownPartitionException& other1183) { - message = other1183.message; - __isset = other1183.__isset; +UnknownPartitionException& UnknownPartitionException::operator=(const UnknownPartitionException& other1202) { + message = other1202.message; + __isset = other1202.__isset; return *this; } void UnknownPartitionException::printTo(std::ostream& out) const { @@ -31818,13 +32741,13 @@ void swap(InvalidObjectException &a, InvalidObjectException &b) { swap(a.__isset, b.__isset); } -InvalidObjectException::InvalidObjectException(const InvalidObjectException& other1184) : TException() { - message = other1184.message; - __isset = other1184.__isset; +InvalidObjectException::InvalidObjectException(const InvalidObjectException& other1203) : TException() { + message = other1203.message; + __isset = other1203.__isset; } -InvalidObjectException& InvalidObjectException::operator=(const InvalidObjectException& other1185) { - message = other1185.message; - __isset = other1185.__isset; +InvalidObjectException& InvalidObjectException::operator=(const InvalidObjectException& other1204) { + message = other1204.message; + __isset = other1204.__isset; return *this; } void InvalidObjectException::printTo(std::ostream& out) const { @@ -31915,13 +32838,13 @@ void swap(NoSuchObjectException &a, NoSuchObjectException &b) { swap(a.__isset, b.__isset); } -NoSuchObjectException::NoSuchObjectException(const NoSuchObjectException& other1186) : TException() { - message = other1186.message; - __isset = other1186.__isset; +NoSuchObjectException::NoSuchObjectException(const NoSuchObjectException& other1205) : TException() { + message = other1205.message; + __isset = other1205.__isset; } -NoSuchObjectException& NoSuchObjectException::operator=(const NoSuchObjectException& other1187) { - message = other1187.message; - __isset = other1187.__isset; +NoSuchObjectException& NoSuchObjectException::operator=(const NoSuchObjectException& other1206) { + message = other1206.message; + __isset = other1206.__isset; return *this; } void NoSuchObjectException::printTo(std::ostream& out) const { @@ -32012,13 +32935,13 @@ void swap(InvalidOperationException &a, InvalidOperationException &b) { swap(a.__isset, b.__isset); } -InvalidOperationException::InvalidOperationException(const InvalidOperationException& other1188) : TException() { - message = other1188.message; - __isset = other1188.__isset; +InvalidOperationException::InvalidOperationException(const InvalidOperationException& other1207) : TException() { + message = other1207.message; + __isset = other1207.__isset; } -InvalidOperationException& InvalidOperationException::operator=(const InvalidOperationException& other1189) { - message = other1189.message; - __isset = other1189.__isset; +InvalidOperationException& InvalidOperationException::operator=(const InvalidOperationException& other1208) { + message = other1208.message; + __isset = other1208.__isset; return *this; } void InvalidOperationException::printTo(std::ostream& out) const { @@ -32109,13 +33032,13 @@ void swap(ConfigValSecurityException &a, ConfigValSecurityException &b) { swap(a.__isset, b.__isset); } -ConfigValSecurityException::ConfigValSecurityException(const ConfigValSecurityException& other1190) : TException() { - message = other1190.message; - __isset = other1190.__isset; +ConfigValSecurityException::ConfigValSecurityException(const ConfigValSecurityException& other1209) : TException() { + message = other1209.message; + __isset = other1209.__isset; } -ConfigValSecurityException& ConfigValSecurityException::operator=(const ConfigValSecurityException& other1191) { - message = other1191.message; - __isset = other1191.__isset; +ConfigValSecurityException& ConfigValSecurityException::operator=(const ConfigValSecurityException& other1210) { + message = other1210.message; + __isset = other1210.__isset; return *this; } void ConfigValSecurityException::printTo(std::ostream& out) const { @@ -32206,13 +33129,13 @@ void swap(InvalidInputException &a, InvalidInputException &b) { swap(a.__isset, b.__isset); } -InvalidInputException::InvalidInputException(const InvalidInputException& other1192) : TException() { - message = other1192.message; - __isset = other1192.__isset; +InvalidInputException::InvalidInputException(const InvalidInputException& other1211) : TException() { + message = other1211.message; + __isset = other1211.__isset; } -InvalidInputException& InvalidInputException::operator=(const InvalidInputException& other1193) { - message = other1193.message; - __isset = other1193.__isset; +InvalidInputException& InvalidInputException::operator=(const InvalidInputException& other1212) { + message = other1212.message; + __isset = other1212.__isset; return *this; } void InvalidInputException::printTo(std::ostream& out) const { @@ -32303,13 +33226,13 @@ void swap(NoSuchTxnException &a, NoSuchTxnException &b) { swap(a.__isset, b.__isset); } -NoSuchTxnException::NoSuchTxnException(const NoSuchTxnException& other1194) : TException() { - message = other1194.message; - __isset = other1194.__isset; +NoSuchTxnException::NoSuchTxnException(const NoSuchTxnException& other1213) : TException() { + message = other1213.message; + __isset = other1213.__isset; } -NoSuchTxnException& NoSuchTxnException::operator=(const NoSuchTxnException& other1195) { - message = other1195.message; - __isset = other1195.__isset; +NoSuchTxnException& NoSuchTxnException::operator=(const NoSuchTxnException& other1214) { + message = other1214.message; + __isset = other1214.__isset; return *this; } void NoSuchTxnException::printTo(std::ostream& out) const { @@ -32400,13 +33323,13 @@ void swap(TxnAbortedException &a, TxnAbortedException &b) { swap(a.__isset, b.__isset); } -TxnAbortedException::TxnAbortedException(const TxnAbortedException& other1196) : TException() { - message = other1196.message; - __isset = other1196.__isset; +TxnAbortedException::TxnAbortedException(const TxnAbortedException& other1215) : TException() { + message = other1215.message; + __isset = other1215.__isset; } -TxnAbortedException& TxnAbortedException::operator=(const TxnAbortedException& other1197) { - message = other1197.message; - __isset = other1197.__isset; +TxnAbortedException& TxnAbortedException::operator=(const TxnAbortedException& other1216) { + message = other1216.message; + __isset = other1216.__isset; return *this; } void TxnAbortedException::printTo(std::ostream& out) const { @@ -32497,13 +33420,13 @@ void swap(TxnOpenException &a, TxnOpenException &b) { swap(a.__isset, b.__isset); } -TxnOpenException::TxnOpenException(const TxnOpenException& other1198) : TException() { - message = other1198.message; - __isset = other1198.__isset; +TxnOpenException::TxnOpenException(const TxnOpenException& other1217) : TException() { + message = other1217.message; + __isset = other1217.__isset; } -TxnOpenException& TxnOpenException::operator=(const TxnOpenException& other1199) { - message = other1199.message; - __isset = other1199.__isset; +TxnOpenException& TxnOpenException::operator=(const TxnOpenException& other1218) { + message = other1218.message; + __isset = other1218.__isset; return *this; } void TxnOpenException::printTo(std::ostream& out) const { @@ -32594,13 +33517,13 @@ void swap(NoSuchLockException &a, NoSuchLockException &b) { swap(a.__isset, b.__isset); } -NoSuchLockException::NoSuchLockException(const NoSuchLockException& other1200) : TException() { - message = other1200.message; - __isset = other1200.__isset; +NoSuchLockException::NoSuchLockException(const NoSuchLockException& other1219) : TException() { + message = other1219.message; + __isset = other1219.__isset; } -NoSuchLockException& NoSuchLockException::operator=(const NoSuchLockException& other1201) { - message = other1201.message; - __isset = other1201.__isset; +NoSuchLockException& NoSuchLockException::operator=(const NoSuchLockException& other1220) { + message = other1220.message; + __isset = other1220.__isset; return *this; } void NoSuchLockException::printTo(std::ostream& out) const { diff --git standalone-metastore/src/gen/thrift/gen-cpp/hive_metastore_types.h standalone-metastore/src/gen/thrift/gen-cpp/hive_metastore_types.h index 5c6495e43f..68e34d57bb 100644 --- standalone-metastore/src/gen/thrift/gen-cpp/hive_metastore_types.h +++ standalone-metastore/src/gen/thrift/gen-cpp/hive_metastore_types.h @@ -185,6 +185,16 @@ struct SchemaVersionState { extern const std::map _SchemaVersionState_VALUES_TO_NAMES; +struct IsolationLevelCompliance { + enum type { + YES = 1, + NO = 2, + UNKNOWN = 3 + }; +}; + +extern const std::map _IsolationLevelCompliance_VALUES_TO_NAMES; + struct FunctionType { enum type { JAVA = 1 @@ -667,6 +677,10 @@ class RuntimeStat; class GetRuntimeStatsRequest; +class AlterPartitionsRequest; + +class AlterPartitionsResponse; + class MetaException; class UnknownTableException; @@ -3101,7 +3115,7 @@ inline std::ostream& operator<<(std::ostream& out, const StorageDescriptor& obj) } typedef struct _Table__isset { - _Table__isset() : tableName(false), dbName(false), owner(false), createTime(false), lastAccessTime(false), retention(false), sd(false), partitionKeys(false), parameters(false), viewOriginalText(false), viewExpandedText(false), tableType(false), privileges(false), temporary(true), rewriteEnabled(false), creationMetadata(false), catName(false), ownerType(true) {} + _Table__isset() : tableName(false), dbName(false), owner(false), createTime(false), lastAccessTime(false), retention(false), sd(false), partitionKeys(false), parameters(false), viewOriginalText(false), viewExpandedText(false), tableType(false), privileges(false), temporary(true), rewriteEnabled(false), creationMetadata(false), catName(false), ownerType(true), txnId(true), validWriteIdList(false), isStatsCompliant(false) {} bool tableName :1; bool dbName :1; bool owner :1; @@ -3120,6 +3134,9 @@ typedef struct _Table__isset { bool creationMetadata :1; bool catName :1; bool ownerType :1; + bool txnId :1; + bool validWriteIdList :1; + bool isStatsCompliant :1; } _Table__isset; class Table { @@ -3127,7 +3144,7 @@ class Table { Table(const Table&); Table& operator=(const Table&); - Table() : tableName(), dbName(), owner(), createTime(0), lastAccessTime(0), retention(0), viewOriginalText(), viewExpandedText(), tableType(), temporary(false), rewriteEnabled(0), catName(), ownerType((PrincipalType::type)1) { + Table() : tableName(), dbName(), owner(), createTime(0), lastAccessTime(0), retention(0), viewOriginalText(), viewExpandedText(), tableType(), temporary(false), rewriteEnabled(0), catName(), ownerType((PrincipalType::type)1), txnId(-1LL), validWriteIdList(), isStatsCompliant((IsolationLevelCompliance::type)0) { ownerType = (PrincipalType::type)1; } @@ -3151,6 +3168,9 @@ class Table { CreationMetadata creationMetadata; std::string catName; PrincipalType::type ownerType; + int64_t txnId; + std::string validWriteIdList; + IsolationLevelCompliance::type isStatsCompliant; _Table__isset __isset; @@ -3190,6 +3210,12 @@ class Table { void __set_ownerType(const PrincipalType::type val); + void __set_txnId(const int64_t val); + + void __set_validWriteIdList(const std::string& val); + + void __set_isStatsCompliant(const IsolationLevelCompliance::type val); + bool operator == (const Table & rhs) const { if (!(tableName == rhs.tableName)) @@ -3240,6 +3266,18 @@ class Table { return false; else if (__isset.ownerType && !(ownerType == rhs.ownerType)) return false; + if (__isset.txnId != rhs.__isset.txnId) + return false; + else if (__isset.txnId && !(txnId == rhs.txnId)) + return false; + if (__isset.validWriteIdList != rhs.__isset.validWriteIdList) + return false; + else if (__isset.validWriteIdList && !(validWriteIdList == rhs.validWriteIdList)) + return false; + if (__isset.isStatsCompliant != rhs.__isset.isStatsCompliant) + return false; + else if (__isset.isStatsCompliant && !(isStatsCompliant == rhs.isStatsCompliant)) + return false; return true; } bool operator != (const Table &rhs) const { @@ -3263,7 +3301,7 @@ inline std::ostream& operator<<(std::ostream& out, const Table& obj) } typedef struct _Partition__isset { - _Partition__isset() : values(false), dbName(false), tableName(false), createTime(false), lastAccessTime(false), sd(false), parameters(false), privileges(false), catName(false) {} + _Partition__isset() : values(false), dbName(false), tableName(false), createTime(false), lastAccessTime(false), sd(false), parameters(false), privileges(false), catName(false), txnId(true), validWriteIdList(false), isStatsCompliant(false) {} bool values :1; bool dbName :1; bool tableName :1; @@ -3273,6 +3311,9 @@ typedef struct _Partition__isset { bool parameters :1; bool privileges :1; bool catName :1; + bool txnId :1; + bool validWriteIdList :1; + bool isStatsCompliant :1; } _Partition__isset; class Partition { @@ -3280,7 +3321,7 @@ class Partition { Partition(const Partition&); Partition& operator=(const Partition&); - Partition() : dbName(), tableName(), createTime(0), lastAccessTime(0), catName() { + Partition() : dbName(), tableName(), createTime(0), lastAccessTime(0), catName(), txnId(-1LL), validWriteIdList(), isStatsCompliant((IsolationLevelCompliance::type)0) { } virtual ~Partition() throw(); @@ -3293,6 +3334,9 @@ class Partition { std::map parameters; PrincipalPrivilegeSet privileges; std::string catName; + int64_t txnId; + std::string validWriteIdList; + IsolationLevelCompliance::type isStatsCompliant; _Partition__isset __isset; @@ -3314,6 +3358,12 @@ class Partition { void __set_catName(const std::string& val); + void __set_txnId(const int64_t val); + + void __set_validWriteIdList(const std::string& val); + + void __set_isStatsCompliant(const IsolationLevelCompliance::type val); + bool operator == (const Partition & rhs) const { if (!(values == rhs.values)) @@ -3338,6 +3388,18 @@ class Partition { return false; else if (__isset.catName && !(catName == rhs.catName)) return false; + if (__isset.txnId != rhs.__isset.txnId) + return false; + else if (__isset.txnId && !(txnId == rhs.txnId)) + return false; + if (__isset.validWriteIdList != rhs.__isset.validWriteIdList) + return false; + else if (__isset.validWriteIdList && !(validWriteIdList == rhs.validWriteIdList)) + return false; + if (__isset.isStatsCompliant != rhs.__isset.isStatsCompliant) + return false; + else if (__isset.isStatsCompliant && !(isStatsCompliant == rhs.isStatsCompliant)) + return false; return true; } bool operator != (const Partition &rhs) const { @@ -3537,13 +3599,16 @@ inline std::ostream& operator<<(std::ostream& out, const PartitionListComposingS } typedef struct _PartitionSpec__isset { - _PartitionSpec__isset() : dbName(false), tableName(false), rootPath(false), sharedSDPartitionSpec(false), partitionList(false), catName(false) {} + _PartitionSpec__isset() : dbName(false), tableName(false), rootPath(false), sharedSDPartitionSpec(false), partitionList(false), catName(false), txnId(true), validWriteIdList(false), isStatsCompliant(false) {} bool dbName :1; bool tableName :1; bool rootPath :1; bool sharedSDPartitionSpec :1; bool partitionList :1; bool catName :1; + bool txnId :1; + bool validWriteIdList :1; + bool isStatsCompliant :1; } _PartitionSpec__isset; class PartitionSpec { @@ -3551,7 +3616,7 @@ class PartitionSpec { PartitionSpec(const PartitionSpec&); PartitionSpec& operator=(const PartitionSpec&); - PartitionSpec() : dbName(), tableName(), rootPath(), catName() { + PartitionSpec() : dbName(), tableName(), rootPath(), catName(), txnId(-1LL), validWriteIdList(), isStatsCompliant((IsolationLevelCompliance::type)0) { } virtual ~PartitionSpec() throw(); @@ -3561,6 +3626,9 @@ class PartitionSpec { PartitionSpecWithSharedSD sharedSDPartitionSpec; PartitionListComposingSpec partitionList; std::string catName; + int64_t txnId; + std::string validWriteIdList; + IsolationLevelCompliance::type isStatsCompliant; _PartitionSpec__isset __isset; @@ -3576,6 +3644,12 @@ class PartitionSpec { void __set_catName(const std::string& val); + void __set_txnId(const int64_t val); + + void __set_validWriteIdList(const std::string& val); + + void __set_isStatsCompliant(const IsolationLevelCompliance::type val); + bool operator == (const PartitionSpec & rhs) const { if (!(dbName == rhs.dbName)) @@ -3596,6 +3670,18 @@ class PartitionSpec { return false; else if (__isset.catName && !(catName == rhs.catName)) return false; + if (__isset.txnId != rhs.__isset.txnId) + return false; + else if (__isset.txnId && !(txnId == rhs.txnId)) + return false; + if (__isset.validWriteIdList != rhs.__isset.validWriteIdList) + return false; + else if (__isset.validWriteIdList && !(validWriteIdList == rhs.validWriteIdList)) + return false; + if (__isset.isStatsCompliant != rhs.__isset.isStatsCompliant) + return false; + else if (__isset.isStatsCompliant && !(isStatsCompliant == rhs.isStatsCompliant)) + return false; return true; } bool operator != (const PartitionSpec &rhs) const { @@ -4404,29 +4490,58 @@ inline std::ostream& operator<<(std::ostream& out, const ColumnStatisticsDesc& o return out; } +typedef struct _ColumnStatistics__isset { + _ColumnStatistics__isset() : txnId(true), validWriteIdList(false), isStatsCompliant(false) {} + bool txnId :1; + bool validWriteIdList :1; + bool isStatsCompliant :1; +} _ColumnStatistics__isset; class ColumnStatistics { public: ColumnStatistics(const ColumnStatistics&); ColumnStatistics& operator=(const ColumnStatistics&); - ColumnStatistics() { + ColumnStatistics() : txnId(-1LL), validWriteIdList(), isStatsCompliant((IsolationLevelCompliance::type)0) { } virtual ~ColumnStatistics() throw(); ColumnStatisticsDesc statsDesc; std::vector statsObj; + int64_t txnId; + std::string validWriteIdList; + IsolationLevelCompliance::type isStatsCompliant; + + _ColumnStatistics__isset __isset; void __set_statsDesc(const ColumnStatisticsDesc& val); void __set_statsObj(const std::vector & val); + void __set_txnId(const int64_t val); + + void __set_validWriteIdList(const std::string& val); + + void __set_isStatsCompliant(const IsolationLevelCompliance::type val); + bool operator == (const ColumnStatistics & rhs) const { if (!(statsDesc == rhs.statsDesc)) return false; if (!(statsObj == rhs.statsObj)) return false; + if (__isset.txnId != rhs.__isset.txnId) + return false; + else if (__isset.txnId && !(txnId == rhs.txnId)) + return false; + if (__isset.validWriteIdList != rhs.__isset.validWriteIdList) + return false; + else if (__isset.validWriteIdList && !(validWriteIdList == rhs.validWriteIdList)) + return false; + if (__isset.isStatsCompliant != rhs.__isset.isStatsCompliant) + return false; + else if (__isset.isStatsCompliant && !(isStatsCompliant == rhs.isStatsCompliant)) + return false; return true; } bool operator != (const ColumnStatistics &rhs) const { @@ -4449,29 +4564,42 @@ inline std::ostream& operator<<(std::ostream& out, const ColumnStatistics& obj) return out; } +typedef struct _AggrStats__isset { + _AggrStats__isset() : isStatsCompliant(false) {} + bool isStatsCompliant :1; +} _AggrStats__isset; class AggrStats { public: AggrStats(const AggrStats&); AggrStats& operator=(const AggrStats&); - AggrStats() : partsFound(0) { + AggrStats() : partsFound(0), isStatsCompliant((IsolationLevelCompliance::type)0) { } virtual ~AggrStats() throw(); std::vector colStats; int64_t partsFound; + IsolationLevelCompliance::type isStatsCompliant; + + _AggrStats__isset __isset; void __set_colStats(const std::vector & val); void __set_partsFound(const int64_t val); + void __set_isStatsCompliant(const IsolationLevelCompliance::type val); + bool operator == (const AggrStats & rhs) const { if (!(colStats == rhs.colStats)) return false; if (!(partsFound == rhs.partsFound)) return false; + if (__isset.isStatsCompliant != rhs.__isset.isStatsCompliant) + return false; + else if (__isset.isStatsCompliant && !(isStatsCompliant == rhs.isStatsCompliant)) + return false; return true; } bool operator != (const AggrStats &rhs) const { @@ -4495,8 +4623,10 @@ inline std::ostream& operator<<(std::ostream& out, const AggrStats& obj) } typedef struct _SetPartitionsStatsRequest__isset { - _SetPartitionsStatsRequest__isset() : needMerge(false) {} + _SetPartitionsStatsRequest__isset() : needMerge(false), txnId(true), validWriteIdList(false) {} bool needMerge :1; + bool txnId :1; + bool validWriteIdList :1; } _SetPartitionsStatsRequest__isset; class SetPartitionsStatsRequest { @@ -4504,12 +4634,14 @@ class SetPartitionsStatsRequest { SetPartitionsStatsRequest(const SetPartitionsStatsRequest&); SetPartitionsStatsRequest& operator=(const SetPartitionsStatsRequest&); - SetPartitionsStatsRequest() : needMerge(0) { + SetPartitionsStatsRequest() : needMerge(0), txnId(-1LL), validWriteIdList() { } virtual ~SetPartitionsStatsRequest() throw(); std::vector colStats; bool needMerge; + int64_t txnId; + std::string validWriteIdList; _SetPartitionsStatsRequest__isset __isset; @@ -4517,6 +4649,10 @@ class SetPartitionsStatsRequest { void __set_needMerge(const bool val); + void __set_txnId(const int64_t val); + + void __set_validWriteIdList(const std::string& val); + bool operator == (const SetPartitionsStatsRequest & rhs) const { if (!(colStats == rhs.colStats)) @@ -4525,6 +4661,14 @@ class SetPartitionsStatsRequest { return false; else if (__isset.needMerge && !(needMerge == rhs.needMerge)) return false; + if (__isset.txnId != rhs.__isset.txnId) + return false; + else if (__isset.txnId && !(txnId == rhs.txnId)) + return false; + if (__isset.validWriteIdList != rhs.__isset.validWriteIdList) + return false; + else if (__isset.validWriteIdList && !(validWriteIdList == rhs.validWriteIdList)) + return false; return true; } bool operator != (const SetPartitionsStatsRequest &rhs) const { @@ -5642,24 +5786,37 @@ inline std::ostream& operator<<(std::ostream& out, const PartitionsByExprRequest return out; } +typedef struct _TableStatsResult__isset { + _TableStatsResult__isset() : isStatsCompliant(false) {} + bool isStatsCompliant :1; +} _TableStatsResult__isset; class TableStatsResult { public: TableStatsResult(const TableStatsResult&); TableStatsResult& operator=(const TableStatsResult&); - TableStatsResult() { + TableStatsResult() : isStatsCompliant((IsolationLevelCompliance::type)0) { } virtual ~TableStatsResult() throw(); std::vector tableStats; + IsolationLevelCompliance::type isStatsCompliant; + + _TableStatsResult__isset __isset; void __set_tableStats(const std::vector & val); + void __set_isStatsCompliant(const IsolationLevelCompliance::type val); + bool operator == (const TableStatsResult & rhs) const { if (!(tableStats == rhs.tableStats)) return false; + if (__isset.isStatsCompliant != rhs.__isset.isStatsCompliant) + return false; + else if (__isset.isStatsCompliant && !(isStatsCompliant == rhs.isStatsCompliant)) + return false; return true; } bool operator != (const TableStatsResult &rhs) const { @@ -5682,24 +5839,37 @@ inline std::ostream& operator<<(std::ostream& out, const TableStatsResult& obj) return out; } +typedef struct _PartitionsStatsResult__isset { + _PartitionsStatsResult__isset() : isStatsCompliant(false) {} + bool isStatsCompliant :1; +} _PartitionsStatsResult__isset; class PartitionsStatsResult { public: PartitionsStatsResult(const PartitionsStatsResult&); PartitionsStatsResult& operator=(const PartitionsStatsResult&); - PartitionsStatsResult() { + PartitionsStatsResult() : isStatsCompliant((IsolationLevelCompliance::type)0) { } virtual ~PartitionsStatsResult() throw(); std::map > partStats; + IsolationLevelCompliance::type isStatsCompliant; + + _PartitionsStatsResult__isset __isset; void __set_partStats(const std::map > & val); + void __set_isStatsCompliant(const IsolationLevelCompliance::type val); + bool operator == (const PartitionsStatsResult & rhs) const { if (!(partStats == rhs.partStats)) return false; + if (__isset.isStatsCompliant != rhs.__isset.isStatsCompliant) + return false; + else if (__isset.isStatsCompliant && !(isStatsCompliant == rhs.isStatsCompliant)) + return false; return true; } bool operator != (const PartitionsStatsResult &rhs) const { @@ -5723,8 +5893,10 @@ inline std::ostream& operator<<(std::ostream& out, const PartitionsStatsResult& } typedef struct _TableStatsRequest__isset { - _TableStatsRequest__isset() : catName(false) {} + _TableStatsRequest__isset() : catName(false), txnId(true), validWriteIdList(false) {} bool catName :1; + bool txnId :1; + bool validWriteIdList :1; } _TableStatsRequest__isset; class TableStatsRequest { @@ -5732,7 +5904,7 @@ class TableStatsRequest { TableStatsRequest(const TableStatsRequest&); TableStatsRequest& operator=(const TableStatsRequest&); - TableStatsRequest() : dbName(), tblName(), catName() { + TableStatsRequest() : dbName(), tblName(), catName(), txnId(-1LL), validWriteIdList() { } virtual ~TableStatsRequest() throw(); @@ -5740,6 +5912,8 @@ class TableStatsRequest { std::string tblName; std::vector colNames; std::string catName; + int64_t txnId; + std::string validWriteIdList; _TableStatsRequest__isset __isset; @@ -5751,6 +5925,10 @@ class TableStatsRequest { void __set_catName(const std::string& val); + void __set_txnId(const int64_t val); + + void __set_validWriteIdList(const std::string& val); + bool operator == (const TableStatsRequest & rhs) const { if (!(dbName == rhs.dbName)) @@ -5763,6 +5941,14 @@ class TableStatsRequest { return false; else if (__isset.catName && !(catName == rhs.catName)) return false; + if (__isset.txnId != rhs.__isset.txnId) + return false; + else if (__isset.txnId && !(txnId == rhs.txnId)) + return false; + if (__isset.validWriteIdList != rhs.__isset.validWriteIdList) + return false; + else if (__isset.validWriteIdList && !(validWriteIdList == rhs.validWriteIdList)) + return false; return true; } bool operator != (const TableStatsRequest &rhs) const { @@ -5786,8 +5972,10 @@ inline std::ostream& operator<<(std::ostream& out, const TableStatsRequest& obj) } typedef struct _PartitionsStatsRequest__isset { - _PartitionsStatsRequest__isset() : catName(false) {} + _PartitionsStatsRequest__isset() : catName(false), txnId(true), validWriteIdList(false) {} bool catName :1; + bool txnId :1; + bool validWriteIdList :1; } _PartitionsStatsRequest__isset; class PartitionsStatsRequest { @@ -5795,7 +5983,7 @@ class PartitionsStatsRequest { PartitionsStatsRequest(const PartitionsStatsRequest&); PartitionsStatsRequest& operator=(const PartitionsStatsRequest&); - PartitionsStatsRequest() : dbName(), tblName(), catName() { + PartitionsStatsRequest() : dbName(), tblName(), catName(), txnId(-1LL), validWriteIdList() { } virtual ~PartitionsStatsRequest() throw(); @@ -5804,6 +5992,8 @@ class PartitionsStatsRequest { std::vector colNames; std::vector partNames; std::string catName; + int64_t txnId; + std::string validWriteIdList; _PartitionsStatsRequest__isset __isset; @@ -5817,6 +6007,10 @@ class PartitionsStatsRequest { void __set_catName(const std::string& val); + void __set_txnId(const int64_t val); + + void __set_validWriteIdList(const std::string& val); + bool operator == (const PartitionsStatsRequest & rhs) const { if (!(dbName == rhs.dbName)) @@ -5831,6 +6025,14 @@ class PartitionsStatsRequest { return false; else if (__isset.catName && !(catName == rhs.catName)) return false; + if (__isset.txnId != rhs.__isset.txnId) + return false; + else if (__isset.txnId && !(txnId == rhs.txnId)) + return false; + if (__isset.validWriteIdList != rhs.__isset.validWriteIdList) + return false; + else if (__isset.validWriteIdList && !(validWriteIdList == rhs.validWriteIdList)) + return false; return true; } bool operator != (const PartitionsStatsRequest &rhs) const { @@ -5854,8 +6056,9 @@ inline std::ostream& operator<<(std::ostream& out, const PartitionsStatsRequest& } typedef struct _AddPartitionsResult__isset { - _AddPartitionsResult__isset() : partitions(false) {} + _AddPartitionsResult__isset() : partitions(false), isStatsCompliant(false) {} bool partitions :1; + bool isStatsCompliant :1; } _AddPartitionsResult__isset; class AddPartitionsResult { @@ -5863,22 +6066,29 @@ class AddPartitionsResult { AddPartitionsResult(const AddPartitionsResult&); AddPartitionsResult& operator=(const AddPartitionsResult&); - AddPartitionsResult() { + AddPartitionsResult() : isStatsCompliant((IsolationLevelCompliance::type)0) { } virtual ~AddPartitionsResult() throw(); std::vector partitions; + IsolationLevelCompliance::type isStatsCompliant; _AddPartitionsResult__isset __isset; void __set_partitions(const std::vector & val); + void __set_isStatsCompliant(const IsolationLevelCompliance::type val); + bool operator == (const AddPartitionsResult & rhs) const { if (__isset.partitions != rhs.__isset.partitions) return false; else if (__isset.partitions && !(partitions == rhs.partitions)) return false; + if (__isset.isStatsCompliant != rhs.__isset.isStatsCompliant) + return false; + else if (__isset.isStatsCompliant && !(isStatsCompliant == rhs.isStatsCompliant)) + return false; return true; } bool operator != (const AddPartitionsResult &rhs) const { @@ -5902,9 +6112,11 @@ inline std::ostream& operator<<(std::ostream& out, const AddPartitionsResult& ob } typedef struct _AddPartitionsRequest__isset { - _AddPartitionsRequest__isset() : needResult(true), catName(false) {} + _AddPartitionsRequest__isset() : needResult(true), catName(false), txnId(true), validWriteIdList(false) {} bool needResult :1; bool catName :1; + bool txnId :1; + bool validWriteIdList :1; } _AddPartitionsRequest__isset; class AddPartitionsRequest { @@ -5912,7 +6124,7 @@ class AddPartitionsRequest { AddPartitionsRequest(const AddPartitionsRequest&); AddPartitionsRequest& operator=(const AddPartitionsRequest&); - AddPartitionsRequest() : dbName(), tblName(), ifNotExists(0), needResult(true), catName() { + AddPartitionsRequest() : dbName(), tblName(), ifNotExists(0), needResult(true), catName(), txnId(-1LL), validWriteIdList() { } virtual ~AddPartitionsRequest() throw(); @@ -5922,6 +6134,8 @@ class AddPartitionsRequest { bool ifNotExists; bool needResult; std::string catName; + int64_t txnId; + std::string validWriteIdList; _AddPartitionsRequest__isset __isset; @@ -5937,6 +6151,10 @@ class AddPartitionsRequest { void __set_catName(const std::string& val); + void __set_txnId(const int64_t val); + + void __set_validWriteIdList(const std::string& val); + bool operator == (const AddPartitionsRequest & rhs) const { if (!(dbName == rhs.dbName)) @@ -5955,6 +6173,14 @@ class AddPartitionsRequest { return false; else if (__isset.catName && !(catName == rhs.catName)) return false; + if (__isset.txnId != rhs.__isset.txnId) + return false; + else if (__isset.txnId && !(txnId == rhs.txnId)) + return false; + if (__isset.validWriteIdList != rhs.__isset.validWriteIdList) + return false; + else if (__isset.validWriteIdList && !(validWriteIdList == rhs.validWriteIdList)) + return false; return true; } bool operator != (const AddPartitionsRequest &rhs) const { @@ -9873,9 +10099,11 @@ inline std::ostream& operator<<(std::ostream& out, const ClientCapabilities& obj } typedef struct _GetTableRequest__isset { - _GetTableRequest__isset() : capabilities(false), catName(false) {} + _GetTableRequest__isset() : capabilities(false), catName(false), txnId(true), validWriteIdList(false) {} bool capabilities :1; bool catName :1; + bool txnId :1; + bool validWriteIdList :1; } _GetTableRequest__isset; class GetTableRequest { @@ -9883,7 +10111,7 @@ class GetTableRequest { GetTableRequest(const GetTableRequest&); GetTableRequest& operator=(const GetTableRequest&); - GetTableRequest() : dbName(), tblName(), catName() { + GetTableRequest() : dbName(), tblName(), catName(), txnId(-1LL), validWriteIdList() { } virtual ~GetTableRequest() throw(); @@ -9891,6 +10119,8 @@ class GetTableRequest { std::string tblName; ClientCapabilities capabilities; std::string catName; + int64_t txnId; + std::string validWriteIdList; _GetTableRequest__isset __isset; @@ -9902,6 +10132,10 @@ class GetTableRequest { void __set_catName(const std::string& val); + void __set_txnId(const int64_t val); + + void __set_validWriteIdList(const std::string& val); + bool operator == (const GetTableRequest & rhs) const { if (!(dbName == rhs.dbName)) @@ -9916,6 +10150,14 @@ class GetTableRequest { return false; else if (__isset.catName && !(catName == rhs.catName)) return false; + if (__isset.txnId != rhs.__isset.txnId) + return false; + else if (__isset.txnId && !(txnId == rhs.txnId)) + return false; + if (__isset.validWriteIdList != rhs.__isset.validWriteIdList) + return false; + else if (__isset.validWriteIdList && !(validWriteIdList == rhs.validWriteIdList)) + return false; return true; } bool operator != (const GetTableRequest &rhs) const { @@ -9938,24 +10180,37 @@ inline std::ostream& operator<<(std::ostream& out, const GetTableRequest& obj) return out; } +typedef struct _GetTableResult__isset { + _GetTableResult__isset() : isStatsCompliant(false) {} + bool isStatsCompliant :1; +} _GetTableResult__isset; class GetTableResult { public: GetTableResult(const GetTableResult&); GetTableResult& operator=(const GetTableResult&); - GetTableResult() { + GetTableResult() : isStatsCompliant((IsolationLevelCompliance::type)0) { } virtual ~GetTableResult() throw(); Table table; + IsolationLevelCompliance::type isStatsCompliant; + + _GetTableResult__isset __isset; void __set_table(const Table& val); + void __set_isStatsCompliant(const IsolationLevelCompliance::type val); + bool operator == (const GetTableResult & rhs) const { if (!(table == rhs.table)) return false; + if (__isset.isStatsCompliant != rhs.__isset.isStatsCompliant) + return false; + else if (__isset.isStatsCompliant && !(isStatsCompliant == rhs.isStatsCompliant)) + return false; return true; } bool operator != (const GetTableResult &rhs) const { @@ -13186,6 +13441,117 @@ inline std::ostream& operator<<(std::ostream& out, const GetRuntimeStatsRequest& return out; } +typedef struct _AlterPartitionsRequest__isset { + _AlterPartitionsRequest__isset() : txnId(true), validWriteIdList(false) {} + bool txnId :1; + bool validWriteIdList :1; +} _AlterPartitionsRequest__isset; + +class AlterPartitionsRequest { + public: + + AlterPartitionsRequest(const AlterPartitionsRequest&); + AlterPartitionsRequest& operator=(const AlterPartitionsRequest&); + AlterPartitionsRequest() : dbName(), tableName(), txnId(-1LL), validWriteIdList() { + } + + virtual ~AlterPartitionsRequest() throw(); + std::string dbName; + std::string tableName; + std::vector partitions; + EnvironmentContext environmentContext; + int64_t txnId; + std::string validWriteIdList; + + _AlterPartitionsRequest__isset __isset; + + void __set_dbName(const std::string& val); + + void __set_tableName(const std::string& val); + + void __set_partitions(const std::vector & val); + + void __set_environmentContext(const EnvironmentContext& val); + + void __set_txnId(const int64_t val); + + void __set_validWriteIdList(const std::string& val); + + bool operator == (const AlterPartitionsRequest & rhs) const + { + if (!(dbName == rhs.dbName)) + return false; + if (!(tableName == rhs.tableName)) + return false; + if (!(partitions == rhs.partitions)) + return false; + if (!(environmentContext == rhs.environmentContext)) + return false; + if (__isset.txnId != rhs.__isset.txnId) + return false; + else if (__isset.txnId && !(txnId == rhs.txnId)) + return false; + if (__isset.validWriteIdList != rhs.__isset.validWriteIdList) + return false; + else if (__isset.validWriteIdList && !(validWriteIdList == rhs.validWriteIdList)) + return false; + return true; + } + bool operator != (const AlterPartitionsRequest &rhs) const { + return !(*this == rhs); + } + + bool operator < (const AlterPartitionsRequest & ) const; + + uint32_t read(::apache::thrift::protocol::TProtocol* iprot); + uint32_t write(::apache::thrift::protocol::TProtocol* oprot) const; + + virtual void printTo(std::ostream& out) const; +}; + +void swap(AlterPartitionsRequest &a, AlterPartitionsRequest &b); + +inline std::ostream& operator<<(std::ostream& out, const AlterPartitionsRequest& obj) +{ + obj.printTo(out); + return out; +} + + +class AlterPartitionsResponse { + public: + + AlterPartitionsResponse(const AlterPartitionsResponse&); + AlterPartitionsResponse& operator=(const AlterPartitionsResponse&); + AlterPartitionsResponse() { + } + + virtual ~AlterPartitionsResponse() throw(); + + bool operator == (const AlterPartitionsResponse & /* rhs */) const + { + return true; + } + bool operator != (const AlterPartitionsResponse &rhs) const { + return !(*this == rhs); + } + + bool operator < (const AlterPartitionsResponse & ) const; + + uint32_t read(::apache::thrift::protocol::TProtocol* iprot); + uint32_t write(::apache::thrift::protocol::TProtocol* oprot) const; + + virtual void printTo(std::ostream& out) const; +}; + +void swap(AlterPartitionsResponse &a, AlterPartitionsResponse &b); + +inline std::ostream& operator<<(std::ostream& out, const AlterPartitionsResponse& obj) +{ + obj.printTo(out); + return out; +} + typedef struct _MetaException__isset { _MetaException__isset() : message(false) {} bool message :1; diff --git standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/AddPartitionsRequest.java standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/AddPartitionsRequest.java index dd3a127013..56e50433a0 100644 --- standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/AddPartitionsRequest.java +++ standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/AddPartitionsRequest.java @@ -44,6 +44,8 @@ private static final org.apache.thrift.protocol.TField IF_NOT_EXISTS_FIELD_DESC = new org.apache.thrift.protocol.TField("ifNotExists", org.apache.thrift.protocol.TType.BOOL, (short)4); private static final org.apache.thrift.protocol.TField NEED_RESULT_FIELD_DESC = new org.apache.thrift.protocol.TField("needResult", org.apache.thrift.protocol.TType.BOOL, (short)5); private static final org.apache.thrift.protocol.TField CAT_NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("catName", org.apache.thrift.protocol.TType.STRING, (short)6); + private static final org.apache.thrift.protocol.TField TXN_ID_FIELD_DESC = new org.apache.thrift.protocol.TField("txnId", org.apache.thrift.protocol.TType.I64, (short)7); + private static final org.apache.thrift.protocol.TField VALID_WRITE_ID_LIST_FIELD_DESC = new org.apache.thrift.protocol.TField("validWriteIdList", org.apache.thrift.protocol.TType.STRING, (short)8); private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); static { @@ -57,6 +59,8 @@ private boolean ifNotExists; // required private boolean needResult; // optional private String catName; // optional + private long txnId; // optional + private String validWriteIdList; // optional /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ public enum _Fields implements org.apache.thrift.TFieldIdEnum { @@ -65,7 +69,9 @@ PARTS((short)3, "parts"), IF_NOT_EXISTS((short)4, "ifNotExists"), NEED_RESULT((short)5, "needResult"), - CAT_NAME((short)6, "catName"); + CAT_NAME((short)6, "catName"), + TXN_ID((short)7, "txnId"), + VALID_WRITE_ID_LIST((short)8, "validWriteIdList"); private static final Map byName = new HashMap(); @@ -92,6 +98,10 @@ public static _Fields findByThriftId(int fieldId) { return NEED_RESULT; case 6: // CAT_NAME return CAT_NAME; + case 7: // TXN_ID + return TXN_ID; + case 8: // VALID_WRITE_ID_LIST + return VALID_WRITE_ID_LIST; default: return null; } @@ -134,8 +144,9 @@ public String getFieldName() { // isset id assignments private static final int __IFNOTEXISTS_ISSET_ID = 0; private static final int __NEEDRESULT_ISSET_ID = 1; + private static final int __TXNID_ISSET_ID = 2; private byte __isset_bitfield = 0; - private static final _Fields optionals[] = {_Fields.NEED_RESULT,_Fields.CAT_NAME}; + private static final _Fields optionals[] = {_Fields.NEED_RESULT,_Fields.CAT_NAME,_Fields.TXN_ID,_Fields.VALID_WRITE_ID_LIST}; public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; static { Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); @@ -152,6 +163,10 @@ public String getFieldName() { new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.BOOL))); tmpMap.put(_Fields.CAT_NAME, new org.apache.thrift.meta_data.FieldMetaData("catName", org.apache.thrift.TFieldRequirementType.OPTIONAL, new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING))); + tmpMap.put(_Fields.TXN_ID, new org.apache.thrift.meta_data.FieldMetaData("txnId", org.apache.thrift.TFieldRequirementType.OPTIONAL, + new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.I64))); + tmpMap.put(_Fields.VALID_WRITE_ID_LIST, new org.apache.thrift.meta_data.FieldMetaData("validWriteIdList", org.apache.thrift.TFieldRequirementType.OPTIONAL, + new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING))); metaDataMap = Collections.unmodifiableMap(tmpMap); org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(AddPartitionsRequest.class, metaDataMap); } @@ -159,6 +174,8 @@ public String getFieldName() { public AddPartitionsRequest() { this.needResult = true; + this.txnId = -1L; + } public AddPartitionsRequest( @@ -198,6 +215,10 @@ public AddPartitionsRequest(AddPartitionsRequest other) { if (other.isSetCatName()) { this.catName = other.catName; } + this.txnId = other.txnId; + if (other.isSetValidWriteIdList()) { + this.validWriteIdList = other.validWriteIdList; + } } public AddPartitionsRequest deepCopy() { @@ -214,6 +235,9 @@ public void clear() { this.needResult = true; this.catName = null; + this.txnId = -1L; + + this.validWriteIdList = null; } public String getDbName() { @@ -367,6 +391,51 @@ public void setCatNameIsSet(boolean value) { } } + public long getTxnId() { + return this.txnId; + } + + public void setTxnId(long txnId) { + this.txnId = txnId; + setTxnIdIsSet(true); + } + + public void unsetTxnId() { + __isset_bitfield = EncodingUtils.clearBit(__isset_bitfield, __TXNID_ISSET_ID); + } + + /** Returns true if field txnId is set (has been assigned a value) and false otherwise */ + public boolean isSetTxnId() { + return EncodingUtils.testBit(__isset_bitfield, __TXNID_ISSET_ID); + } + + public void setTxnIdIsSet(boolean value) { + __isset_bitfield = EncodingUtils.setBit(__isset_bitfield, __TXNID_ISSET_ID, value); + } + + public String getValidWriteIdList() { + return this.validWriteIdList; + } + + public void setValidWriteIdList(String validWriteIdList) { + this.validWriteIdList = validWriteIdList; + } + + public void unsetValidWriteIdList() { + this.validWriteIdList = null; + } + + /** Returns true if field validWriteIdList is set (has been assigned a value) and false otherwise */ + public boolean isSetValidWriteIdList() { + return this.validWriteIdList != null; + } + + public void setValidWriteIdListIsSet(boolean value) { + if (!value) { + this.validWriteIdList = null; + } + } + public void setFieldValue(_Fields field, Object value) { switch (field) { case DB_NAME: @@ -417,6 +486,22 @@ public void setFieldValue(_Fields field, Object value) { } break; + case TXN_ID: + if (value == null) { + unsetTxnId(); + } else { + setTxnId((Long)value); + } + break; + + case VALID_WRITE_ID_LIST: + if (value == null) { + unsetValidWriteIdList(); + } else { + setValidWriteIdList((String)value); + } + break; + } } @@ -440,6 +525,12 @@ public Object getFieldValue(_Fields field) { case CAT_NAME: return getCatName(); + case TXN_ID: + return getTxnId(); + + case VALID_WRITE_ID_LIST: + return getValidWriteIdList(); + } throw new IllegalStateException(); } @@ -463,6 +554,10 @@ public boolean isSet(_Fields field) { return isSetNeedResult(); case CAT_NAME: return isSetCatName(); + case TXN_ID: + return isSetTxnId(); + case VALID_WRITE_ID_LIST: + return isSetValidWriteIdList(); } throw new IllegalStateException(); } @@ -534,6 +629,24 @@ public boolean equals(AddPartitionsRequest that) { return false; } + boolean this_present_txnId = true && this.isSetTxnId(); + boolean that_present_txnId = true && that.isSetTxnId(); + if (this_present_txnId || that_present_txnId) { + if (!(this_present_txnId && that_present_txnId)) + return false; + if (this.txnId != that.txnId) + return false; + } + + boolean this_present_validWriteIdList = true && this.isSetValidWriteIdList(); + boolean that_present_validWriteIdList = true && that.isSetValidWriteIdList(); + if (this_present_validWriteIdList || that_present_validWriteIdList) { + if (!(this_present_validWriteIdList && that_present_validWriteIdList)) + return false; + if (!this.validWriteIdList.equals(that.validWriteIdList)) + return false; + } + return true; } @@ -571,6 +684,16 @@ public int hashCode() { if (present_catName) list.add(catName); + boolean present_txnId = true && (isSetTxnId()); + list.add(present_txnId); + if (present_txnId) + list.add(txnId); + + boolean present_validWriteIdList = true && (isSetValidWriteIdList()); + list.add(present_validWriteIdList); + if (present_validWriteIdList) + list.add(validWriteIdList); + return list.hashCode(); } @@ -642,6 +765,26 @@ public int compareTo(AddPartitionsRequest other) { return lastComparison; } } + lastComparison = Boolean.valueOf(isSetTxnId()).compareTo(other.isSetTxnId()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetTxnId()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.txnId, other.txnId); + if (lastComparison != 0) { + return lastComparison; + } + } + lastComparison = Boolean.valueOf(isSetValidWriteIdList()).compareTo(other.isSetValidWriteIdList()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetValidWriteIdList()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.validWriteIdList, other.validWriteIdList); + if (lastComparison != 0) { + return lastComparison; + } + } return 0; } @@ -705,6 +848,22 @@ public String toString() { } first = false; } + if (isSetTxnId()) { + if (!first) sb.append(", "); + sb.append("txnId:"); + sb.append(this.txnId); + first = false; + } + if (isSetValidWriteIdList()) { + if (!first) sb.append(", "); + sb.append("validWriteIdList:"); + if (this.validWriteIdList == null) { + sb.append("null"); + } else { + sb.append(this.validWriteIdList); + } + first = false; + } sb.append(")"); return sb.toString(); } @@ -825,6 +984,22 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, AddPartitionsReques org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); } break; + case 7: // TXN_ID + if (schemeField.type == org.apache.thrift.protocol.TType.I64) { + struct.txnId = iprot.readI64(); + struct.setTxnIdIsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + break; + case 8: // VALID_WRITE_ID_LIST + if (schemeField.type == org.apache.thrift.protocol.TType.STRING) { + struct.validWriteIdList = iprot.readString(); + struct.setValidWriteIdListIsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + break; default: org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); } @@ -875,6 +1050,18 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, AddPartitionsReque oprot.writeFieldEnd(); } } + if (struct.isSetTxnId()) { + oprot.writeFieldBegin(TXN_ID_FIELD_DESC); + oprot.writeI64(struct.txnId); + oprot.writeFieldEnd(); + } + if (struct.validWriteIdList != null) { + if (struct.isSetValidWriteIdList()) { + oprot.writeFieldBegin(VALID_WRITE_ID_LIST_FIELD_DESC); + oprot.writeString(struct.validWriteIdList); + oprot.writeFieldEnd(); + } + } oprot.writeFieldStop(); oprot.writeStructEnd(); } @@ -909,13 +1096,25 @@ public void write(org.apache.thrift.protocol.TProtocol prot, AddPartitionsReques if (struct.isSetCatName()) { optionals.set(1); } - oprot.writeBitSet(optionals, 2); + if (struct.isSetTxnId()) { + optionals.set(2); + } + if (struct.isSetValidWriteIdList()) { + optionals.set(3); + } + oprot.writeBitSet(optionals, 4); if (struct.isSetNeedResult()) { oprot.writeBool(struct.needResult); } if (struct.isSetCatName()) { oprot.writeString(struct.catName); } + if (struct.isSetTxnId()) { + oprot.writeI64(struct.txnId); + } + if (struct.isSetValidWriteIdList()) { + oprot.writeString(struct.validWriteIdList); + } } @Override @@ -939,7 +1138,7 @@ public void read(org.apache.thrift.protocol.TProtocol prot, AddPartitionsRequest struct.setPartsIsSet(true); struct.ifNotExists = iprot.readBool(); struct.setIfNotExistsIsSet(true); - BitSet incoming = iprot.readBitSet(2); + BitSet incoming = iprot.readBitSet(4); if (incoming.get(0)) { struct.needResult = iprot.readBool(); struct.setNeedResultIsSet(true); @@ -948,6 +1147,14 @@ public void read(org.apache.thrift.protocol.TProtocol prot, AddPartitionsRequest struct.catName = iprot.readString(); struct.setCatNameIsSet(true); } + if (incoming.get(2)) { + struct.txnId = iprot.readI64(); + struct.setTxnIdIsSet(true); + } + if (incoming.get(3)) { + struct.validWriteIdList = iprot.readString(); + struct.setValidWriteIdListIsSet(true); + } } } diff --git standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/AddPartitionsResult.java standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/AddPartitionsResult.java index fe41b8c711..03d1fc4f7c 100644 --- standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/AddPartitionsResult.java +++ standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/AddPartitionsResult.java @@ -39,6 +39,7 @@ private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("AddPartitionsResult"); private static final org.apache.thrift.protocol.TField PARTITIONS_FIELD_DESC = new org.apache.thrift.protocol.TField("partitions", org.apache.thrift.protocol.TType.LIST, (short)1); + private static final org.apache.thrift.protocol.TField IS_STATS_COMPLIANT_FIELD_DESC = new org.apache.thrift.protocol.TField("isStatsCompliant", org.apache.thrift.protocol.TType.I32, (short)2); private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); static { @@ -47,10 +48,16 @@ } private List partitions; // optional + private IsolationLevelCompliance isStatsCompliant; // optional /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ public enum _Fields implements org.apache.thrift.TFieldIdEnum { - PARTITIONS((short)1, "partitions"); + PARTITIONS((short)1, "partitions"), + /** + * + * @see IsolationLevelCompliance + */ + IS_STATS_COMPLIANT((short)2, "isStatsCompliant"); private static final Map byName = new HashMap(); @@ -67,6 +74,8 @@ public static _Fields findByThriftId(int fieldId) { switch(fieldId) { case 1: // PARTITIONS return PARTITIONS; + case 2: // IS_STATS_COMPLIANT + return IS_STATS_COMPLIANT; default: return null; } @@ -107,13 +116,15 @@ public String getFieldName() { } // isset id assignments - private static final _Fields optionals[] = {_Fields.PARTITIONS}; + private static final _Fields optionals[] = {_Fields.PARTITIONS,_Fields.IS_STATS_COMPLIANT}; public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; static { Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); tmpMap.put(_Fields.PARTITIONS, new org.apache.thrift.meta_data.FieldMetaData("partitions", org.apache.thrift.TFieldRequirementType.OPTIONAL, new org.apache.thrift.meta_data.ListMetaData(org.apache.thrift.protocol.TType.LIST, new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, Partition.class)))); + tmpMap.put(_Fields.IS_STATS_COMPLIANT, new org.apache.thrift.meta_data.FieldMetaData("isStatsCompliant", org.apache.thrift.TFieldRequirementType.OPTIONAL, + new org.apache.thrift.meta_data.EnumMetaData(org.apache.thrift.protocol.TType.ENUM, IsolationLevelCompliance.class))); metaDataMap = Collections.unmodifiableMap(tmpMap); org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(AddPartitionsResult.class, metaDataMap); } @@ -132,6 +143,9 @@ public AddPartitionsResult(AddPartitionsResult other) { } this.partitions = __this__partitions; } + if (other.isSetIsStatsCompliant()) { + this.isStatsCompliant = other.isStatsCompliant; + } } public AddPartitionsResult deepCopy() { @@ -141,6 +155,7 @@ public AddPartitionsResult deepCopy() { @Override public void clear() { this.partitions = null; + this.isStatsCompliant = null; } public int getPartitionsSize() { @@ -181,6 +196,37 @@ public void setPartitionsIsSet(boolean value) { } } + /** + * + * @see IsolationLevelCompliance + */ + public IsolationLevelCompliance getIsStatsCompliant() { + return this.isStatsCompliant; + } + + /** + * + * @see IsolationLevelCompliance + */ + public void setIsStatsCompliant(IsolationLevelCompliance isStatsCompliant) { + this.isStatsCompliant = isStatsCompliant; + } + + public void unsetIsStatsCompliant() { + this.isStatsCompliant = null; + } + + /** Returns true if field isStatsCompliant is set (has been assigned a value) and false otherwise */ + public boolean isSetIsStatsCompliant() { + return this.isStatsCompliant != null; + } + + public void setIsStatsCompliantIsSet(boolean value) { + if (!value) { + this.isStatsCompliant = null; + } + } + public void setFieldValue(_Fields field, Object value) { switch (field) { case PARTITIONS: @@ -191,6 +237,14 @@ public void setFieldValue(_Fields field, Object value) { } break; + case IS_STATS_COMPLIANT: + if (value == null) { + unsetIsStatsCompliant(); + } else { + setIsStatsCompliant((IsolationLevelCompliance)value); + } + break; + } } @@ -199,6 +253,9 @@ public Object getFieldValue(_Fields field) { case PARTITIONS: return getPartitions(); + case IS_STATS_COMPLIANT: + return getIsStatsCompliant(); + } throw new IllegalStateException(); } @@ -212,6 +269,8 @@ public boolean isSet(_Fields field) { switch (field) { case PARTITIONS: return isSetPartitions(); + case IS_STATS_COMPLIANT: + return isSetIsStatsCompliant(); } throw new IllegalStateException(); } @@ -238,6 +297,15 @@ public boolean equals(AddPartitionsResult that) { return false; } + boolean this_present_isStatsCompliant = true && this.isSetIsStatsCompliant(); + boolean that_present_isStatsCompliant = true && that.isSetIsStatsCompliant(); + if (this_present_isStatsCompliant || that_present_isStatsCompliant) { + if (!(this_present_isStatsCompliant && that_present_isStatsCompliant)) + return false; + if (!this.isStatsCompliant.equals(that.isStatsCompliant)) + return false; + } + return true; } @@ -250,6 +318,11 @@ public int hashCode() { if (present_partitions) list.add(partitions); + boolean present_isStatsCompliant = true && (isSetIsStatsCompliant()); + list.add(present_isStatsCompliant); + if (present_isStatsCompliant) + list.add(isStatsCompliant.getValue()); + return list.hashCode(); } @@ -271,6 +344,16 @@ public int compareTo(AddPartitionsResult other) { return lastComparison; } } + lastComparison = Boolean.valueOf(isSetIsStatsCompliant()).compareTo(other.isSetIsStatsCompliant()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetIsStatsCompliant()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.isStatsCompliant, other.isStatsCompliant); + if (lastComparison != 0) { + return lastComparison; + } + } return 0; } @@ -300,6 +383,16 @@ public String toString() { } first = false; } + if (isSetIsStatsCompliant()) { + if (!first) sb.append(", "); + sb.append("isStatsCompliant:"); + if (this.isStatsCompliant == null) { + sb.append("null"); + } else { + sb.append(this.isStatsCompliant); + } + first = false; + } sb.append(")"); return sb.toString(); } @@ -362,6 +455,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, AddPartitionsResult org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); } break; + case 2: // IS_STATS_COMPLIANT + if (schemeField.type == org.apache.thrift.protocol.TType.I32) { + struct.isStatsCompliant = org.apache.hadoop.hive.metastore.api.IsolationLevelCompliance.findByValue(iprot.readI32()); + struct.setIsStatsCompliantIsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + break; default: org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); } @@ -389,6 +490,13 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, AddPartitionsResul oprot.writeFieldEnd(); } } + if (struct.isStatsCompliant != null) { + if (struct.isSetIsStatsCompliant()) { + oprot.writeFieldBegin(IS_STATS_COMPLIANT_FIELD_DESC); + oprot.writeI32(struct.isStatsCompliant.getValue()); + oprot.writeFieldEnd(); + } + } oprot.writeFieldStop(); oprot.writeStructEnd(); } @@ -410,7 +518,10 @@ public void write(org.apache.thrift.protocol.TProtocol prot, AddPartitionsResult if (struct.isSetPartitions()) { optionals.set(0); } - oprot.writeBitSet(optionals, 1); + if (struct.isSetIsStatsCompliant()) { + optionals.set(1); + } + oprot.writeBitSet(optionals, 2); if (struct.isSetPartitions()) { { oprot.writeI32(struct.partitions.size()); @@ -420,12 +531,15 @@ public void write(org.apache.thrift.protocol.TProtocol prot, AddPartitionsResult } } } + if (struct.isSetIsStatsCompliant()) { + oprot.writeI32(struct.isStatsCompliant.getValue()); + } } @Override public void read(org.apache.thrift.protocol.TProtocol prot, AddPartitionsResult struct) throws org.apache.thrift.TException { TTupleProtocol iprot = (TTupleProtocol) prot; - BitSet incoming = iprot.readBitSet(1); + BitSet incoming = iprot.readBitSet(2); if (incoming.get(0)) { { org.apache.thrift.protocol.TList _list479 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); @@ -440,6 +554,10 @@ public void read(org.apache.thrift.protocol.TProtocol prot, AddPartitionsResult } struct.setPartitionsIsSet(true); } + if (incoming.get(1)) { + struct.isStatsCompliant = org.apache.hadoop.hive.metastore.api.IsolationLevelCompliance.findByValue(iprot.readI32()); + struct.setIsStatsCompliantIsSet(true); + } } } diff --git standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/AggrStats.java standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/AggrStats.java index fff212dfd4..fea95c33ae 100644 --- standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/AggrStats.java +++ standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/AggrStats.java @@ -40,6 +40,7 @@ private static final org.apache.thrift.protocol.TField COL_STATS_FIELD_DESC = new org.apache.thrift.protocol.TField("colStats", org.apache.thrift.protocol.TType.LIST, (short)1); private static final org.apache.thrift.protocol.TField PARTS_FOUND_FIELD_DESC = new org.apache.thrift.protocol.TField("partsFound", org.apache.thrift.protocol.TType.I64, (short)2); + private static final org.apache.thrift.protocol.TField IS_STATS_COMPLIANT_FIELD_DESC = new org.apache.thrift.protocol.TField("isStatsCompliant", org.apache.thrift.protocol.TType.I32, (short)3); private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); static { @@ -49,11 +50,17 @@ private List colStats; // required private long partsFound; // required + private IsolationLevelCompliance isStatsCompliant; // optional /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ public enum _Fields implements org.apache.thrift.TFieldIdEnum { COL_STATS((short)1, "colStats"), - PARTS_FOUND((short)2, "partsFound"); + PARTS_FOUND((short)2, "partsFound"), + /** + * + * @see IsolationLevelCompliance + */ + IS_STATS_COMPLIANT((short)3, "isStatsCompliant"); private static final Map byName = new HashMap(); @@ -72,6 +79,8 @@ public static _Fields findByThriftId(int fieldId) { return COL_STATS; case 2: // PARTS_FOUND return PARTS_FOUND; + case 3: // IS_STATS_COMPLIANT + return IS_STATS_COMPLIANT; default: return null; } @@ -114,6 +123,7 @@ public String getFieldName() { // isset id assignments private static final int __PARTSFOUND_ISSET_ID = 0; private byte __isset_bitfield = 0; + private static final _Fields optionals[] = {_Fields.IS_STATS_COMPLIANT}; public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; static { Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); @@ -122,6 +132,8 @@ public String getFieldName() { new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, ColumnStatisticsObj.class)))); tmpMap.put(_Fields.PARTS_FOUND, new org.apache.thrift.meta_data.FieldMetaData("partsFound", org.apache.thrift.TFieldRequirementType.REQUIRED, new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.I64))); + tmpMap.put(_Fields.IS_STATS_COMPLIANT, new org.apache.thrift.meta_data.FieldMetaData("isStatsCompliant", org.apache.thrift.TFieldRequirementType.OPTIONAL, + new org.apache.thrift.meta_data.EnumMetaData(org.apache.thrift.protocol.TType.ENUM, IsolationLevelCompliance.class))); metaDataMap = Collections.unmodifiableMap(tmpMap); org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(AggrStats.class, metaDataMap); } @@ -152,6 +164,9 @@ public AggrStats(AggrStats other) { this.colStats = __this__colStats; } this.partsFound = other.partsFound; + if (other.isSetIsStatsCompliant()) { + this.isStatsCompliant = other.isStatsCompliant; + } } public AggrStats deepCopy() { @@ -163,6 +178,7 @@ public void clear() { this.colStats = null; setPartsFoundIsSet(false); this.partsFound = 0; + this.isStatsCompliant = null; } public int getColStatsSize() { @@ -225,6 +241,37 @@ public void setPartsFoundIsSet(boolean value) { __isset_bitfield = EncodingUtils.setBit(__isset_bitfield, __PARTSFOUND_ISSET_ID, value); } + /** + * + * @see IsolationLevelCompliance + */ + public IsolationLevelCompliance getIsStatsCompliant() { + return this.isStatsCompliant; + } + + /** + * + * @see IsolationLevelCompliance + */ + public void setIsStatsCompliant(IsolationLevelCompliance isStatsCompliant) { + this.isStatsCompliant = isStatsCompliant; + } + + public void unsetIsStatsCompliant() { + this.isStatsCompliant = null; + } + + /** Returns true if field isStatsCompliant is set (has been assigned a value) and false otherwise */ + public boolean isSetIsStatsCompliant() { + return this.isStatsCompliant != null; + } + + public void setIsStatsCompliantIsSet(boolean value) { + if (!value) { + this.isStatsCompliant = null; + } + } + public void setFieldValue(_Fields field, Object value) { switch (field) { case COL_STATS: @@ -243,6 +290,14 @@ public void setFieldValue(_Fields field, Object value) { } break; + case IS_STATS_COMPLIANT: + if (value == null) { + unsetIsStatsCompliant(); + } else { + setIsStatsCompliant((IsolationLevelCompliance)value); + } + break; + } } @@ -254,6 +309,9 @@ public Object getFieldValue(_Fields field) { case PARTS_FOUND: return getPartsFound(); + case IS_STATS_COMPLIANT: + return getIsStatsCompliant(); + } throw new IllegalStateException(); } @@ -269,6 +327,8 @@ public boolean isSet(_Fields field) { return isSetColStats(); case PARTS_FOUND: return isSetPartsFound(); + case IS_STATS_COMPLIANT: + return isSetIsStatsCompliant(); } throw new IllegalStateException(); } @@ -304,6 +364,15 @@ public boolean equals(AggrStats that) { return false; } + boolean this_present_isStatsCompliant = true && this.isSetIsStatsCompliant(); + boolean that_present_isStatsCompliant = true && that.isSetIsStatsCompliant(); + if (this_present_isStatsCompliant || that_present_isStatsCompliant) { + if (!(this_present_isStatsCompliant && that_present_isStatsCompliant)) + return false; + if (!this.isStatsCompliant.equals(that.isStatsCompliant)) + return false; + } + return true; } @@ -321,6 +390,11 @@ public int hashCode() { if (present_partsFound) list.add(partsFound); + boolean present_isStatsCompliant = true && (isSetIsStatsCompliant()); + list.add(present_isStatsCompliant); + if (present_isStatsCompliant) + list.add(isStatsCompliant.getValue()); + return list.hashCode(); } @@ -352,6 +426,16 @@ public int compareTo(AggrStats other) { return lastComparison; } } + lastComparison = Boolean.valueOf(isSetIsStatsCompliant()).compareTo(other.isSetIsStatsCompliant()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetIsStatsCompliant()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.isStatsCompliant, other.isStatsCompliant); + if (lastComparison != 0) { + return lastComparison; + } + } return 0; } @@ -383,6 +467,16 @@ public String toString() { sb.append("partsFound:"); sb.append(this.partsFound); first = false; + if (isSetIsStatsCompliant()) { + if (!first) sb.append(", "); + sb.append("isStatsCompliant:"); + if (this.isStatsCompliant == null) { + sb.append("null"); + } else { + sb.append(this.isStatsCompliant); + } + first = false; + } sb.append(")"); return sb.toString(); } @@ -463,6 +557,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, AggrStats struct) t org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); } break; + case 3: // IS_STATS_COMPLIANT + if (schemeField.type == org.apache.thrift.protocol.TType.I32) { + struct.isStatsCompliant = org.apache.hadoop.hive.metastore.api.IsolationLevelCompliance.findByValue(iprot.readI32()); + struct.setIsStatsCompliantIsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + break; default: org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); } @@ -491,6 +593,13 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, AggrStats struct) oprot.writeFieldBegin(PARTS_FOUND_FIELD_DESC); oprot.writeI64(struct.partsFound); oprot.writeFieldEnd(); + if (struct.isStatsCompliant != null) { + if (struct.isSetIsStatsCompliant()) { + oprot.writeFieldBegin(IS_STATS_COMPLIANT_FIELD_DESC); + oprot.writeI32(struct.isStatsCompliant.getValue()); + oprot.writeFieldEnd(); + } + } oprot.writeFieldStop(); oprot.writeStructEnd(); } @@ -516,6 +625,14 @@ public void write(org.apache.thrift.protocol.TProtocol prot, AggrStats struct) t } } oprot.writeI64(struct.partsFound); + BitSet optionals = new BitSet(); + if (struct.isSetIsStatsCompliant()) { + optionals.set(0); + } + oprot.writeBitSet(optionals, 1); + if (struct.isSetIsStatsCompliant()) { + oprot.writeI32(struct.isStatsCompliant.getValue()); + } } @Override @@ -535,6 +652,11 @@ public void read(org.apache.thrift.protocol.TProtocol prot, AggrStats struct) th struct.setColStatsIsSet(true); struct.partsFound = iprot.readI64(); struct.setPartsFoundIsSet(true); + BitSet incoming = iprot.readBitSet(1); + if (incoming.get(0)) { + struct.isStatsCompliant = org.apache.hadoop.hive.metastore.api.IsolationLevelCompliance.findByValue(iprot.readI32()); + struct.setIsStatsCompliantIsSet(true); + } } } diff --git standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/AlterPartitionsRequest.java standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/AlterPartitionsRequest.java new file mode 100644 index 0000000000..8d4102fc4a --- /dev/null +++ standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/AlterPartitionsRequest.java @@ -0,0 +1,966 @@ +/** + * Autogenerated by Thrift Compiler (0.9.3) + * + * DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING + * @generated + */ +package org.apache.hadoop.hive.metastore.api; + +import org.apache.thrift.scheme.IScheme; +import org.apache.thrift.scheme.SchemeFactory; +import org.apache.thrift.scheme.StandardScheme; + +import org.apache.thrift.scheme.TupleScheme; +import org.apache.thrift.protocol.TTupleProtocol; +import org.apache.thrift.protocol.TProtocolException; +import org.apache.thrift.EncodingUtils; +import org.apache.thrift.TException; +import org.apache.thrift.async.AsyncMethodCallback; +import org.apache.thrift.server.AbstractNonblockingServer.*; +import java.util.List; +import java.util.ArrayList; +import java.util.Map; +import java.util.HashMap; +import java.util.EnumMap; +import java.util.Set; +import java.util.HashSet; +import java.util.EnumSet; +import java.util.Collections; +import java.util.BitSet; +import java.nio.ByteBuffer; +import java.util.Arrays; +import javax.annotation.Generated; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +@SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"}) +@Generated(value = "Autogenerated by Thrift Compiler (0.9.3)") +@org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public class AlterPartitionsRequest implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { + private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("AlterPartitionsRequest"); + + private static final org.apache.thrift.protocol.TField DB_NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("dbName", org.apache.thrift.protocol.TType.STRING, (short)1); + private static final org.apache.thrift.protocol.TField TABLE_NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("tableName", org.apache.thrift.protocol.TType.STRING, (short)2); + private static final org.apache.thrift.protocol.TField PARTITIONS_FIELD_DESC = new org.apache.thrift.protocol.TField("partitions", org.apache.thrift.protocol.TType.LIST, (short)3); + private static final org.apache.thrift.protocol.TField ENVIRONMENT_CONTEXT_FIELD_DESC = new org.apache.thrift.protocol.TField("environmentContext", org.apache.thrift.protocol.TType.STRUCT, (short)4); + private static final org.apache.thrift.protocol.TField TXN_ID_FIELD_DESC = new org.apache.thrift.protocol.TField("txnId", org.apache.thrift.protocol.TType.I64, (short)5); + private static final org.apache.thrift.protocol.TField VALID_WRITE_ID_LIST_FIELD_DESC = new org.apache.thrift.protocol.TField("validWriteIdList", org.apache.thrift.protocol.TType.STRING, (short)6); + + private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); + static { + schemes.put(StandardScheme.class, new AlterPartitionsRequestStandardSchemeFactory()); + schemes.put(TupleScheme.class, new AlterPartitionsRequestTupleSchemeFactory()); + } + + private String dbName; // required + private String tableName; // required + private List partitions; // required + private EnvironmentContext environmentContext; // required + private long txnId; // optional + private String validWriteIdList; // optional + + /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ + public enum _Fields implements org.apache.thrift.TFieldIdEnum { + DB_NAME((short)1, "dbName"), + TABLE_NAME((short)2, "tableName"), + PARTITIONS((short)3, "partitions"), + ENVIRONMENT_CONTEXT((short)4, "environmentContext"), + TXN_ID((short)5, "txnId"), + VALID_WRITE_ID_LIST((short)6, "validWriteIdList"); + + private static final Map byName = new HashMap(); + + static { + for (_Fields field : EnumSet.allOf(_Fields.class)) { + byName.put(field.getFieldName(), field); + } + } + + /** + * Find the _Fields constant that matches fieldId, or null if its not found. + */ + public static _Fields findByThriftId(int fieldId) { + switch(fieldId) { + case 1: // DB_NAME + return DB_NAME; + case 2: // TABLE_NAME + return TABLE_NAME; + case 3: // PARTITIONS + return PARTITIONS; + case 4: // ENVIRONMENT_CONTEXT + return ENVIRONMENT_CONTEXT; + case 5: // TXN_ID + return TXN_ID; + case 6: // VALID_WRITE_ID_LIST + return VALID_WRITE_ID_LIST; + default: + return null; + } + } + + /** + * Find the _Fields constant that matches fieldId, throwing an exception + * if it is not found. + */ + public static _Fields findByThriftIdOrThrow(int fieldId) { + _Fields fields = findByThriftId(fieldId); + if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!"); + return fields; + } + + /** + * Find the _Fields constant that matches name, or null if its not found. + */ + public static _Fields findByName(String name) { + return byName.get(name); + } + + private final short _thriftId; + private final String _fieldName; + + _Fields(short thriftId, String fieldName) { + _thriftId = thriftId; + _fieldName = fieldName; + } + + public short getThriftFieldId() { + return _thriftId; + } + + public String getFieldName() { + return _fieldName; + } + } + + // isset id assignments + private static final int __TXNID_ISSET_ID = 0; + private byte __isset_bitfield = 0; + private static final _Fields optionals[] = {_Fields.TXN_ID,_Fields.VALID_WRITE_ID_LIST}; + public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; + static { + Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); + tmpMap.put(_Fields.DB_NAME, new org.apache.thrift.meta_data.FieldMetaData("dbName", org.apache.thrift.TFieldRequirementType.REQUIRED, + new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING))); + tmpMap.put(_Fields.TABLE_NAME, new org.apache.thrift.meta_data.FieldMetaData("tableName", org.apache.thrift.TFieldRequirementType.REQUIRED, + new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING))); + tmpMap.put(_Fields.PARTITIONS, new org.apache.thrift.meta_data.FieldMetaData("partitions", org.apache.thrift.TFieldRequirementType.REQUIRED, + new org.apache.thrift.meta_data.ListMetaData(org.apache.thrift.protocol.TType.LIST, + new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, Partition.class)))); + tmpMap.put(_Fields.ENVIRONMENT_CONTEXT, new org.apache.thrift.meta_data.FieldMetaData("environmentContext", org.apache.thrift.TFieldRequirementType.REQUIRED, + new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, EnvironmentContext.class))); + tmpMap.put(_Fields.TXN_ID, new org.apache.thrift.meta_data.FieldMetaData("txnId", org.apache.thrift.TFieldRequirementType.OPTIONAL, + new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.I64))); + tmpMap.put(_Fields.VALID_WRITE_ID_LIST, new org.apache.thrift.meta_data.FieldMetaData("validWriteIdList", org.apache.thrift.TFieldRequirementType.OPTIONAL, + new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING))); + metaDataMap = Collections.unmodifiableMap(tmpMap); + org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(AlterPartitionsRequest.class, metaDataMap); + } + + public AlterPartitionsRequest() { + this.txnId = -1L; + + } + + public AlterPartitionsRequest( + String dbName, + String tableName, + List partitions, + EnvironmentContext environmentContext) + { + this(); + this.dbName = dbName; + this.tableName = tableName; + this.partitions = partitions; + this.environmentContext = environmentContext; + } + + /** + * Performs a deep copy on other. + */ + public AlterPartitionsRequest(AlterPartitionsRequest other) { + __isset_bitfield = other.__isset_bitfield; + if (other.isSetDbName()) { + this.dbName = other.dbName; + } + if (other.isSetTableName()) { + this.tableName = other.tableName; + } + if (other.isSetPartitions()) { + List __this__partitions = new ArrayList(other.partitions.size()); + for (Partition other_element : other.partitions) { + __this__partitions.add(new Partition(other_element)); + } + this.partitions = __this__partitions; + } + if (other.isSetEnvironmentContext()) { + this.environmentContext = new EnvironmentContext(other.environmentContext); + } + this.txnId = other.txnId; + if (other.isSetValidWriteIdList()) { + this.validWriteIdList = other.validWriteIdList; + } + } + + public AlterPartitionsRequest deepCopy() { + return new AlterPartitionsRequest(this); + } + + @Override + public void clear() { + this.dbName = null; + this.tableName = null; + this.partitions = null; + this.environmentContext = null; + this.txnId = -1L; + + this.validWriteIdList = null; + } + + public String getDbName() { + return this.dbName; + } + + public void setDbName(String dbName) { + this.dbName = dbName; + } + + public void unsetDbName() { + this.dbName = null; + } + + /** Returns true if field dbName is set (has been assigned a value) and false otherwise */ + public boolean isSetDbName() { + return this.dbName != null; + } + + public void setDbNameIsSet(boolean value) { + if (!value) { + this.dbName = null; + } + } + + public String getTableName() { + return this.tableName; + } + + public void setTableName(String tableName) { + this.tableName = tableName; + } + + public void unsetTableName() { + this.tableName = null; + } + + /** Returns true if field tableName is set (has been assigned a value) and false otherwise */ + public boolean isSetTableName() { + return this.tableName != null; + } + + public void setTableNameIsSet(boolean value) { + if (!value) { + this.tableName = null; + } + } + + public int getPartitionsSize() { + return (this.partitions == null) ? 0 : this.partitions.size(); + } + + public java.util.Iterator getPartitionsIterator() { + return (this.partitions == null) ? null : this.partitions.iterator(); + } + + public void addToPartitions(Partition elem) { + if (this.partitions == null) { + this.partitions = new ArrayList(); + } + this.partitions.add(elem); + } + + public List getPartitions() { + return this.partitions; + } + + public void setPartitions(List partitions) { + this.partitions = partitions; + } + + public void unsetPartitions() { + this.partitions = null; + } + + /** Returns true if field partitions is set (has been assigned a value) and false otherwise */ + public boolean isSetPartitions() { + return this.partitions != null; + } + + public void setPartitionsIsSet(boolean value) { + if (!value) { + this.partitions = null; + } + } + + public EnvironmentContext getEnvironmentContext() { + return this.environmentContext; + } + + public void setEnvironmentContext(EnvironmentContext environmentContext) { + this.environmentContext = environmentContext; + } + + public void unsetEnvironmentContext() { + this.environmentContext = null; + } + + /** Returns true if field environmentContext is set (has been assigned a value) and false otherwise */ + public boolean isSetEnvironmentContext() { + return this.environmentContext != null; + } + + public void setEnvironmentContextIsSet(boolean value) { + if (!value) { + this.environmentContext = null; + } + } + + public long getTxnId() { + return this.txnId; + } + + public void setTxnId(long txnId) { + this.txnId = txnId; + setTxnIdIsSet(true); + } + + public void unsetTxnId() { + __isset_bitfield = EncodingUtils.clearBit(__isset_bitfield, __TXNID_ISSET_ID); + } + + /** Returns true if field txnId is set (has been assigned a value) and false otherwise */ + public boolean isSetTxnId() { + return EncodingUtils.testBit(__isset_bitfield, __TXNID_ISSET_ID); + } + + public void setTxnIdIsSet(boolean value) { + __isset_bitfield = EncodingUtils.setBit(__isset_bitfield, __TXNID_ISSET_ID, value); + } + + public String getValidWriteIdList() { + return this.validWriteIdList; + } + + public void setValidWriteIdList(String validWriteIdList) { + this.validWriteIdList = validWriteIdList; + } + + public void unsetValidWriteIdList() { + this.validWriteIdList = null; + } + + /** Returns true if field validWriteIdList is set (has been assigned a value) and false otherwise */ + public boolean isSetValidWriteIdList() { + return this.validWriteIdList != null; + } + + public void setValidWriteIdListIsSet(boolean value) { + if (!value) { + this.validWriteIdList = null; + } + } + + public void setFieldValue(_Fields field, Object value) { + switch (field) { + case DB_NAME: + if (value == null) { + unsetDbName(); + } else { + setDbName((String)value); + } + break; + + case TABLE_NAME: + if (value == null) { + unsetTableName(); + } else { + setTableName((String)value); + } + break; + + case PARTITIONS: + if (value == null) { + unsetPartitions(); + } else { + setPartitions((List)value); + } + break; + + case ENVIRONMENT_CONTEXT: + if (value == null) { + unsetEnvironmentContext(); + } else { + setEnvironmentContext((EnvironmentContext)value); + } + break; + + case TXN_ID: + if (value == null) { + unsetTxnId(); + } else { + setTxnId((Long)value); + } + break; + + case VALID_WRITE_ID_LIST: + if (value == null) { + unsetValidWriteIdList(); + } else { + setValidWriteIdList((String)value); + } + break; + + } + } + + public Object getFieldValue(_Fields field) { + switch (field) { + case DB_NAME: + return getDbName(); + + case TABLE_NAME: + return getTableName(); + + case PARTITIONS: + return getPartitions(); + + case ENVIRONMENT_CONTEXT: + return getEnvironmentContext(); + + case TXN_ID: + return getTxnId(); + + case VALID_WRITE_ID_LIST: + return getValidWriteIdList(); + + } + throw new IllegalStateException(); + } + + /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */ + public boolean isSet(_Fields field) { + if (field == null) { + throw new IllegalArgumentException(); + } + + switch (field) { + case DB_NAME: + return isSetDbName(); + case TABLE_NAME: + return isSetTableName(); + case PARTITIONS: + return isSetPartitions(); + case ENVIRONMENT_CONTEXT: + return isSetEnvironmentContext(); + case TXN_ID: + return isSetTxnId(); + case VALID_WRITE_ID_LIST: + return isSetValidWriteIdList(); + } + throw new IllegalStateException(); + } + + @Override + public boolean equals(Object that) { + if (that == null) + return false; + if (that instanceof AlterPartitionsRequest) + return this.equals((AlterPartitionsRequest)that); + return false; + } + + public boolean equals(AlterPartitionsRequest that) { + if (that == null) + return false; + + boolean this_present_dbName = true && this.isSetDbName(); + boolean that_present_dbName = true && that.isSetDbName(); + if (this_present_dbName || that_present_dbName) { + if (!(this_present_dbName && that_present_dbName)) + return false; + if (!this.dbName.equals(that.dbName)) + return false; + } + + boolean this_present_tableName = true && this.isSetTableName(); + boolean that_present_tableName = true && that.isSetTableName(); + if (this_present_tableName || that_present_tableName) { + if (!(this_present_tableName && that_present_tableName)) + return false; + if (!this.tableName.equals(that.tableName)) + return false; + } + + boolean this_present_partitions = true && this.isSetPartitions(); + boolean that_present_partitions = true && that.isSetPartitions(); + if (this_present_partitions || that_present_partitions) { + if (!(this_present_partitions && that_present_partitions)) + return false; + if (!this.partitions.equals(that.partitions)) + return false; + } + + boolean this_present_environmentContext = true && this.isSetEnvironmentContext(); + boolean that_present_environmentContext = true && that.isSetEnvironmentContext(); + if (this_present_environmentContext || that_present_environmentContext) { + if (!(this_present_environmentContext && that_present_environmentContext)) + return false; + if (!this.environmentContext.equals(that.environmentContext)) + return false; + } + + boolean this_present_txnId = true && this.isSetTxnId(); + boolean that_present_txnId = true && that.isSetTxnId(); + if (this_present_txnId || that_present_txnId) { + if (!(this_present_txnId && that_present_txnId)) + return false; + if (this.txnId != that.txnId) + return false; + } + + boolean this_present_validWriteIdList = true && this.isSetValidWriteIdList(); + boolean that_present_validWriteIdList = true && that.isSetValidWriteIdList(); + if (this_present_validWriteIdList || that_present_validWriteIdList) { + if (!(this_present_validWriteIdList && that_present_validWriteIdList)) + return false; + if (!this.validWriteIdList.equals(that.validWriteIdList)) + return false; + } + + return true; + } + + @Override + public int hashCode() { + List list = new ArrayList(); + + boolean present_dbName = true && (isSetDbName()); + list.add(present_dbName); + if (present_dbName) + list.add(dbName); + + boolean present_tableName = true && (isSetTableName()); + list.add(present_tableName); + if (present_tableName) + list.add(tableName); + + boolean present_partitions = true && (isSetPartitions()); + list.add(present_partitions); + if (present_partitions) + list.add(partitions); + + boolean present_environmentContext = true && (isSetEnvironmentContext()); + list.add(present_environmentContext); + if (present_environmentContext) + list.add(environmentContext); + + boolean present_txnId = true && (isSetTxnId()); + list.add(present_txnId); + if (present_txnId) + list.add(txnId); + + boolean present_validWriteIdList = true && (isSetValidWriteIdList()); + list.add(present_validWriteIdList); + if (present_validWriteIdList) + list.add(validWriteIdList); + + return list.hashCode(); + } + + @Override + public int compareTo(AlterPartitionsRequest other) { + if (!getClass().equals(other.getClass())) { + return getClass().getName().compareTo(other.getClass().getName()); + } + + int lastComparison = 0; + + lastComparison = Boolean.valueOf(isSetDbName()).compareTo(other.isSetDbName()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetDbName()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.dbName, other.dbName); + if (lastComparison != 0) { + return lastComparison; + } + } + lastComparison = Boolean.valueOf(isSetTableName()).compareTo(other.isSetTableName()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetTableName()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.tableName, other.tableName); + if (lastComparison != 0) { + return lastComparison; + } + } + lastComparison = Boolean.valueOf(isSetPartitions()).compareTo(other.isSetPartitions()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetPartitions()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.partitions, other.partitions); + if (lastComparison != 0) { + return lastComparison; + } + } + lastComparison = Boolean.valueOf(isSetEnvironmentContext()).compareTo(other.isSetEnvironmentContext()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetEnvironmentContext()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.environmentContext, other.environmentContext); + if (lastComparison != 0) { + return lastComparison; + } + } + lastComparison = Boolean.valueOf(isSetTxnId()).compareTo(other.isSetTxnId()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetTxnId()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.txnId, other.txnId); + if (lastComparison != 0) { + return lastComparison; + } + } + lastComparison = Boolean.valueOf(isSetValidWriteIdList()).compareTo(other.isSetValidWriteIdList()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetValidWriteIdList()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.validWriteIdList, other.validWriteIdList); + if (lastComparison != 0) { + return lastComparison; + } + } + return 0; + } + + public _Fields fieldForId(int fieldId) { + return _Fields.findByThriftId(fieldId); + } + + public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException { + schemes.get(iprot.getScheme()).getScheme().read(iprot, this); + } + + public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException { + schemes.get(oprot.getScheme()).getScheme().write(oprot, this); + } + + @Override + public String toString() { + StringBuilder sb = new StringBuilder("AlterPartitionsRequest("); + boolean first = true; + + sb.append("dbName:"); + if (this.dbName == null) { + sb.append("null"); + } else { + sb.append(this.dbName); + } + first = false; + if (!first) sb.append(", "); + sb.append("tableName:"); + if (this.tableName == null) { + sb.append("null"); + } else { + sb.append(this.tableName); + } + first = false; + if (!first) sb.append(", "); + sb.append("partitions:"); + if (this.partitions == null) { + sb.append("null"); + } else { + sb.append(this.partitions); + } + first = false; + if (!first) sb.append(", "); + sb.append("environmentContext:"); + if (this.environmentContext == null) { + sb.append("null"); + } else { + sb.append(this.environmentContext); + } + first = false; + if (isSetTxnId()) { + if (!first) sb.append(", "); + sb.append("txnId:"); + sb.append(this.txnId); + first = false; + } + if (isSetValidWriteIdList()) { + if (!first) sb.append(", "); + sb.append("validWriteIdList:"); + if (this.validWriteIdList == null) { + sb.append("null"); + } else { + sb.append(this.validWriteIdList); + } + first = false; + } + sb.append(")"); + return sb.toString(); + } + + public void validate() throws org.apache.thrift.TException { + // check for required fields + if (!isSetDbName()) { + throw new org.apache.thrift.protocol.TProtocolException("Required field 'dbName' is unset! Struct:" + toString()); + } + + if (!isSetTableName()) { + throw new org.apache.thrift.protocol.TProtocolException("Required field 'tableName' is unset! Struct:" + toString()); + } + + if (!isSetPartitions()) { + throw new org.apache.thrift.protocol.TProtocolException("Required field 'partitions' is unset! Struct:" + toString()); + } + + if (!isSetEnvironmentContext()) { + throw new org.apache.thrift.protocol.TProtocolException("Required field 'environmentContext' is unset! Struct:" + toString()); + } + + // check for sub-struct validity + if (environmentContext != null) { + environmentContext.validate(); + } + } + + private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException { + try { + write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out))); + } catch (org.apache.thrift.TException te) { + throw new java.io.IOException(te); + } + } + + private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException { + try { + // it doesn't seem like you should have to do this, but java serialization is wacky, and doesn't call the default constructor. + __isset_bitfield = 0; + read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in))); + } catch (org.apache.thrift.TException te) { + throw new java.io.IOException(te); + } + } + + private static class AlterPartitionsRequestStandardSchemeFactory implements SchemeFactory { + public AlterPartitionsRequestStandardScheme getScheme() { + return new AlterPartitionsRequestStandardScheme(); + } + } + + private static class AlterPartitionsRequestStandardScheme extends StandardScheme { + + public void read(org.apache.thrift.protocol.TProtocol iprot, AlterPartitionsRequest struct) throws org.apache.thrift.TException { + org.apache.thrift.protocol.TField schemeField; + iprot.readStructBegin(); + while (true) + { + schemeField = iprot.readFieldBegin(); + if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { + break; + } + switch (schemeField.id) { + case 1: // DB_NAME + if (schemeField.type == org.apache.thrift.protocol.TType.STRING) { + struct.dbName = iprot.readString(); + struct.setDbNameIsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + break; + case 2: // TABLE_NAME + if (schemeField.type == org.apache.thrift.protocol.TType.STRING) { + struct.tableName = iprot.readString(); + struct.setTableNameIsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + break; + case 3: // PARTITIONS + if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { + { + org.apache.thrift.protocol.TList _list928 = iprot.readListBegin(); + struct.partitions = new ArrayList(_list928.size); + Partition _elem929; + for (int _i930 = 0; _i930 < _list928.size; ++_i930) + { + _elem929 = new Partition(); + _elem929.read(iprot); + struct.partitions.add(_elem929); + } + iprot.readListEnd(); + } + struct.setPartitionsIsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + break; + case 4: // ENVIRONMENT_CONTEXT + if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) { + struct.environmentContext = new EnvironmentContext(); + struct.environmentContext.read(iprot); + struct.setEnvironmentContextIsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + break; + case 5: // TXN_ID + if (schemeField.type == org.apache.thrift.protocol.TType.I64) { + struct.txnId = iprot.readI64(); + struct.setTxnIdIsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + break; + case 6: // VALID_WRITE_ID_LIST + if (schemeField.type == org.apache.thrift.protocol.TType.STRING) { + struct.validWriteIdList = iprot.readString(); + struct.setValidWriteIdListIsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + break; + default: + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + iprot.readFieldEnd(); + } + iprot.readStructEnd(); + struct.validate(); + } + + public void write(org.apache.thrift.protocol.TProtocol oprot, AlterPartitionsRequest struct) throws org.apache.thrift.TException { + struct.validate(); + + oprot.writeStructBegin(STRUCT_DESC); + if (struct.dbName != null) { + oprot.writeFieldBegin(DB_NAME_FIELD_DESC); + oprot.writeString(struct.dbName); + oprot.writeFieldEnd(); + } + if (struct.tableName != null) { + oprot.writeFieldBegin(TABLE_NAME_FIELD_DESC); + oprot.writeString(struct.tableName); + oprot.writeFieldEnd(); + } + if (struct.partitions != null) { + oprot.writeFieldBegin(PARTITIONS_FIELD_DESC); + { + oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.partitions.size())); + for (Partition _iter931 : struct.partitions) + { + _iter931.write(oprot); + } + oprot.writeListEnd(); + } + oprot.writeFieldEnd(); + } + if (struct.environmentContext != null) { + oprot.writeFieldBegin(ENVIRONMENT_CONTEXT_FIELD_DESC); + struct.environmentContext.write(oprot); + oprot.writeFieldEnd(); + } + if (struct.isSetTxnId()) { + oprot.writeFieldBegin(TXN_ID_FIELD_DESC); + oprot.writeI64(struct.txnId); + oprot.writeFieldEnd(); + } + if (struct.validWriteIdList != null) { + if (struct.isSetValidWriteIdList()) { + oprot.writeFieldBegin(VALID_WRITE_ID_LIST_FIELD_DESC); + oprot.writeString(struct.validWriteIdList); + oprot.writeFieldEnd(); + } + } + oprot.writeFieldStop(); + oprot.writeStructEnd(); + } + + } + + private static class AlterPartitionsRequestTupleSchemeFactory implements SchemeFactory { + public AlterPartitionsRequestTupleScheme getScheme() { + return new AlterPartitionsRequestTupleScheme(); + } + } + + private static class AlterPartitionsRequestTupleScheme extends TupleScheme { + + @Override + public void write(org.apache.thrift.protocol.TProtocol prot, AlterPartitionsRequest struct) throws org.apache.thrift.TException { + TTupleProtocol oprot = (TTupleProtocol) prot; + oprot.writeString(struct.dbName); + oprot.writeString(struct.tableName); + { + oprot.writeI32(struct.partitions.size()); + for (Partition _iter932 : struct.partitions) + { + _iter932.write(oprot); + } + } + struct.environmentContext.write(oprot); + BitSet optionals = new BitSet(); + if (struct.isSetTxnId()) { + optionals.set(0); + } + if (struct.isSetValidWriteIdList()) { + optionals.set(1); + } + oprot.writeBitSet(optionals, 2); + if (struct.isSetTxnId()) { + oprot.writeI64(struct.txnId); + } + if (struct.isSetValidWriteIdList()) { + oprot.writeString(struct.validWriteIdList); + } + } + + @Override + public void read(org.apache.thrift.protocol.TProtocol prot, AlterPartitionsRequest struct) throws org.apache.thrift.TException { + TTupleProtocol iprot = (TTupleProtocol) prot; + struct.dbName = iprot.readString(); + struct.setDbNameIsSet(true); + struct.tableName = iprot.readString(); + struct.setTableNameIsSet(true); + { + org.apache.thrift.protocol.TList _list933 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + struct.partitions = new ArrayList(_list933.size); + Partition _elem934; + for (int _i935 = 0; _i935 < _list933.size; ++_i935) + { + _elem934 = new Partition(); + _elem934.read(iprot); + struct.partitions.add(_elem934); + } + } + struct.setPartitionsIsSet(true); + struct.environmentContext = new EnvironmentContext(); + struct.environmentContext.read(iprot); + struct.setEnvironmentContextIsSet(true); + BitSet incoming = iprot.readBitSet(2); + if (incoming.get(0)) { + struct.txnId = iprot.readI64(); + struct.setTxnIdIsSet(true); + } + if (incoming.get(1)) { + struct.validWriteIdList = iprot.readString(); + struct.setValidWriteIdListIsSet(true); + } + } + } + +} + diff --git standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/AlterPartitionsResponse.java standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/AlterPartitionsResponse.java new file mode 100644 index 0000000000..8e03462ddb --- /dev/null +++ standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/AlterPartitionsResponse.java @@ -0,0 +1,283 @@ +/** + * Autogenerated by Thrift Compiler (0.9.3) + * + * DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING + * @generated + */ +package org.apache.hadoop.hive.metastore.api; + +import org.apache.thrift.scheme.IScheme; +import org.apache.thrift.scheme.SchemeFactory; +import org.apache.thrift.scheme.StandardScheme; + +import org.apache.thrift.scheme.TupleScheme; +import org.apache.thrift.protocol.TTupleProtocol; +import org.apache.thrift.protocol.TProtocolException; +import org.apache.thrift.EncodingUtils; +import org.apache.thrift.TException; +import org.apache.thrift.async.AsyncMethodCallback; +import org.apache.thrift.server.AbstractNonblockingServer.*; +import java.util.List; +import java.util.ArrayList; +import java.util.Map; +import java.util.HashMap; +import java.util.EnumMap; +import java.util.Set; +import java.util.HashSet; +import java.util.EnumSet; +import java.util.Collections; +import java.util.BitSet; +import java.nio.ByteBuffer; +import java.util.Arrays; +import javax.annotation.Generated; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +@SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"}) +@Generated(value = "Autogenerated by Thrift Compiler (0.9.3)") +@org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public class AlterPartitionsResponse implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { + private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("AlterPartitionsResponse"); + + + private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); + static { + schemes.put(StandardScheme.class, new AlterPartitionsResponseStandardSchemeFactory()); + schemes.put(TupleScheme.class, new AlterPartitionsResponseTupleSchemeFactory()); + } + + + /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ + public enum _Fields implements org.apache.thrift.TFieldIdEnum { +; + + private static final Map byName = new HashMap(); + + static { + for (_Fields field : EnumSet.allOf(_Fields.class)) { + byName.put(field.getFieldName(), field); + } + } + + /** + * Find the _Fields constant that matches fieldId, or null if its not found. + */ + public static _Fields findByThriftId(int fieldId) { + switch(fieldId) { + default: + return null; + } + } + + /** + * Find the _Fields constant that matches fieldId, throwing an exception + * if it is not found. + */ + public static _Fields findByThriftIdOrThrow(int fieldId) { + _Fields fields = findByThriftId(fieldId); + if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!"); + return fields; + } + + /** + * Find the _Fields constant that matches name, or null if its not found. + */ + public static _Fields findByName(String name) { + return byName.get(name); + } + + private final short _thriftId; + private final String _fieldName; + + _Fields(short thriftId, String fieldName) { + _thriftId = thriftId; + _fieldName = fieldName; + } + + public short getThriftFieldId() { + return _thriftId; + } + + public String getFieldName() { + return _fieldName; + } + } + public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; + static { + Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); + metaDataMap = Collections.unmodifiableMap(tmpMap); + org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(AlterPartitionsResponse.class, metaDataMap); + } + + public AlterPartitionsResponse() { + } + + /** + * Performs a deep copy on other. + */ + public AlterPartitionsResponse(AlterPartitionsResponse other) { + } + + public AlterPartitionsResponse deepCopy() { + return new AlterPartitionsResponse(this); + } + + @Override + public void clear() { + } + + public void setFieldValue(_Fields field, Object value) { + switch (field) { + } + } + + public Object getFieldValue(_Fields field) { + switch (field) { + } + throw new IllegalStateException(); + } + + /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */ + public boolean isSet(_Fields field) { + if (field == null) { + throw new IllegalArgumentException(); + } + + switch (field) { + } + throw new IllegalStateException(); + } + + @Override + public boolean equals(Object that) { + if (that == null) + return false; + if (that instanceof AlterPartitionsResponse) + return this.equals((AlterPartitionsResponse)that); + return false; + } + + public boolean equals(AlterPartitionsResponse that) { + if (that == null) + return false; + + return true; + } + + @Override + public int hashCode() { + List list = new ArrayList(); + + return list.hashCode(); + } + + @Override + public int compareTo(AlterPartitionsResponse other) { + if (!getClass().equals(other.getClass())) { + return getClass().getName().compareTo(other.getClass().getName()); + } + + int lastComparison = 0; + + return 0; + } + + public _Fields fieldForId(int fieldId) { + return _Fields.findByThriftId(fieldId); + } + + public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException { + schemes.get(iprot.getScheme()).getScheme().read(iprot, this); + } + + public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException { + schemes.get(oprot.getScheme()).getScheme().write(oprot, this); + } + + @Override + public String toString() { + StringBuilder sb = new StringBuilder("AlterPartitionsResponse("); + boolean first = true; + + sb.append(")"); + return sb.toString(); + } + + public void validate() throws org.apache.thrift.TException { + // check for required fields + // check for sub-struct validity + } + + private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException { + try { + write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out))); + } catch (org.apache.thrift.TException te) { + throw new java.io.IOException(te); + } + } + + private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException { + try { + read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in))); + } catch (org.apache.thrift.TException te) { + throw new java.io.IOException(te); + } + } + + private static class AlterPartitionsResponseStandardSchemeFactory implements SchemeFactory { + public AlterPartitionsResponseStandardScheme getScheme() { + return new AlterPartitionsResponseStandardScheme(); + } + } + + private static class AlterPartitionsResponseStandardScheme extends StandardScheme { + + public void read(org.apache.thrift.protocol.TProtocol iprot, AlterPartitionsResponse struct) throws org.apache.thrift.TException { + org.apache.thrift.protocol.TField schemeField; + iprot.readStructBegin(); + while (true) + { + schemeField = iprot.readFieldBegin(); + if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { + break; + } + switch (schemeField.id) { + default: + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + iprot.readFieldEnd(); + } + iprot.readStructEnd(); + struct.validate(); + } + + public void write(org.apache.thrift.protocol.TProtocol oprot, AlterPartitionsResponse struct) throws org.apache.thrift.TException { + struct.validate(); + + oprot.writeStructBegin(STRUCT_DESC); + oprot.writeFieldStop(); + oprot.writeStructEnd(); + } + + } + + private static class AlterPartitionsResponseTupleSchemeFactory implements SchemeFactory { + public AlterPartitionsResponseTupleScheme getScheme() { + return new AlterPartitionsResponseTupleScheme(); + } + } + + private static class AlterPartitionsResponseTupleScheme extends TupleScheme { + + @Override + public void write(org.apache.thrift.protocol.TProtocol prot, AlterPartitionsResponse struct) throws org.apache.thrift.TException { + TTupleProtocol oprot = (TTupleProtocol) prot; + } + + @Override + public void read(org.apache.thrift.protocol.TProtocol prot, AlterPartitionsResponse struct) throws org.apache.thrift.TException { + TTupleProtocol iprot = (TTupleProtocol) prot; + } + } + +} + diff --git standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ColumnStatistics.java standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ColumnStatistics.java index 6ce7214c9d..9fd43cc32e 100644 --- standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ColumnStatistics.java +++ standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ColumnStatistics.java @@ -40,6 +40,9 @@ private static final org.apache.thrift.protocol.TField STATS_DESC_FIELD_DESC = new org.apache.thrift.protocol.TField("statsDesc", org.apache.thrift.protocol.TType.STRUCT, (short)1); private static final org.apache.thrift.protocol.TField STATS_OBJ_FIELD_DESC = new org.apache.thrift.protocol.TField("statsObj", org.apache.thrift.protocol.TType.LIST, (short)2); + private static final org.apache.thrift.protocol.TField TXN_ID_FIELD_DESC = new org.apache.thrift.protocol.TField("txnId", org.apache.thrift.protocol.TType.I64, (short)3); + private static final org.apache.thrift.protocol.TField VALID_WRITE_ID_LIST_FIELD_DESC = new org.apache.thrift.protocol.TField("validWriteIdList", org.apache.thrift.protocol.TType.STRING, (short)4); + private static final org.apache.thrift.protocol.TField IS_STATS_COMPLIANT_FIELD_DESC = new org.apache.thrift.protocol.TField("isStatsCompliant", org.apache.thrift.protocol.TType.I32, (short)5); private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); static { @@ -49,11 +52,21 @@ private ColumnStatisticsDesc statsDesc; // required private List statsObj; // required + private long txnId; // optional + private String validWriteIdList; // optional + private IsolationLevelCompliance isStatsCompliant; // optional /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ public enum _Fields implements org.apache.thrift.TFieldIdEnum { STATS_DESC((short)1, "statsDesc"), - STATS_OBJ((short)2, "statsObj"); + STATS_OBJ((short)2, "statsObj"), + TXN_ID((short)3, "txnId"), + VALID_WRITE_ID_LIST((short)4, "validWriteIdList"), + /** + * + * @see IsolationLevelCompliance + */ + IS_STATS_COMPLIANT((short)5, "isStatsCompliant"); private static final Map byName = new HashMap(); @@ -72,6 +85,12 @@ public static _Fields findByThriftId(int fieldId) { return STATS_DESC; case 2: // STATS_OBJ return STATS_OBJ; + case 3: // TXN_ID + return TXN_ID; + case 4: // VALID_WRITE_ID_LIST + return VALID_WRITE_ID_LIST; + case 5: // IS_STATS_COMPLIANT + return IS_STATS_COMPLIANT; default: return null; } @@ -112,6 +131,9 @@ public String getFieldName() { } // isset id assignments + private static final int __TXNID_ISSET_ID = 0; + private byte __isset_bitfield = 0; + private static final _Fields optionals[] = {_Fields.TXN_ID,_Fields.VALID_WRITE_ID_LIST,_Fields.IS_STATS_COMPLIANT}; public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; static { Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); @@ -120,11 +142,19 @@ public String getFieldName() { tmpMap.put(_Fields.STATS_OBJ, new org.apache.thrift.meta_data.FieldMetaData("statsObj", org.apache.thrift.TFieldRequirementType.REQUIRED, new org.apache.thrift.meta_data.ListMetaData(org.apache.thrift.protocol.TType.LIST, new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, ColumnStatisticsObj.class)))); + tmpMap.put(_Fields.TXN_ID, new org.apache.thrift.meta_data.FieldMetaData("txnId", org.apache.thrift.TFieldRequirementType.OPTIONAL, + new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.I64))); + tmpMap.put(_Fields.VALID_WRITE_ID_LIST, new org.apache.thrift.meta_data.FieldMetaData("validWriteIdList", org.apache.thrift.TFieldRequirementType.OPTIONAL, + new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING))); + tmpMap.put(_Fields.IS_STATS_COMPLIANT, new org.apache.thrift.meta_data.FieldMetaData("isStatsCompliant", org.apache.thrift.TFieldRequirementType.OPTIONAL, + new org.apache.thrift.meta_data.EnumMetaData(org.apache.thrift.protocol.TType.ENUM, IsolationLevelCompliance.class))); metaDataMap = Collections.unmodifiableMap(tmpMap); org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(ColumnStatistics.class, metaDataMap); } public ColumnStatistics() { + this.txnId = -1L; + } public ColumnStatistics( @@ -140,6 +170,7 @@ public ColumnStatistics( * Performs a deep copy on other. */ public ColumnStatistics(ColumnStatistics other) { + __isset_bitfield = other.__isset_bitfield; if (other.isSetStatsDesc()) { this.statsDesc = new ColumnStatisticsDesc(other.statsDesc); } @@ -150,6 +181,13 @@ public ColumnStatistics(ColumnStatistics other) { } this.statsObj = __this__statsObj; } + this.txnId = other.txnId; + if (other.isSetValidWriteIdList()) { + this.validWriteIdList = other.validWriteIdList; + } + if (other.isSetIsStatsCompliant()) { + this.isStatsCompliant = other.isStatsCompliant; + } } public ColumnStatistics deepCopy() { @@ -160,6 +198,10 @@ public ColumnStatistics deepCopy() { public void clear() { this.statsDesc = null; this.statsObj = null; + this.txnId = -1L; + + this.validWriteIdList = null; + this.isStatsCompliant = null; } public ColumnStatisticsDesc getStatsDesc() { @@ -223,6 +265,82 @@ public void setStatsObjIsSet(boolean value) { } } + public long getTxnId() { + return this.txnId; + } + + public void setTxnId(long txnId) { + this.txnId = txnId; + setTxnIdIsSet(true); + } + + public void unsetTxnId() { + __isset_bitfield = EncodingUtils.clearBit(__isset_bitfield, __TXNID_ISSET_ID); + } + + /** Returns true if field txnId is set (has been assigned a value) and false otherwise */ + public boolean isSetTxnId() { + return EncodingUtils.testBit(__isset_bitfield, __TXNID_ISSET_ID); + } + + public void setTxnIdIsSet(boolean value) { + __isset_bitfield = EncodingUtils.setBit(__isset_bitfield, __TXNID_ISSET_ID, value); + } + + public String getValidWriteIdList() { + return this.validWriteIdList; + } + + public void setValidWriteIdList(String validWriteIdList) { + this.validWriteIdList = validWriteIdList; + } + + public void unsetValidWriteIdList() { + this.validWriteIdList = null; + } + + /** Returns true if field validWriteIdList is set (has been assigned a value) and false otherwise */ + public boolean isSetValidWriteIdList() { + return this.validWriteIdList != null; + } + + public void setValidWriteIdListIsSet(boolean value) { + if (!value) { + this.validWriteIdList = null; + } + } + + /** + * + * @see IsolationLevelCompliance + */ + public IsolationLevelCompliance getIsStatsCompliant() { + return this.isStatsCompliant; + } + + /** + * + * @see IsolationLevelCompliance + */ + public void setIsStatsCompliant(IsolationLevelCompliance isStatsCompliant) { + this.isStatsCompliant = isStatsCompliant; + } + + public void unsetIsStatsCompliant() { + this.isStatsCompliant = null; + } + + /** Returns true if field isStatsCompliant is set (has been assigned a value) and false otherwise */ + public boolean isSetIsStatsCompliant() { + return this.isStatsCompliant != null; + } + + public void setIsStatsCompliantIsSet(boolean value) { + if (!value) { + this.isStatsCompliant = null; + } + } + public void setFieldValue(_Fields field, Object value) { switch (field) { case STATS_DESC: @@ -241,6 +359,30 @@ public void setFieldValue(_Fields field, Object value) { } break; + case TXN_ID: + if (value == null) { + unsetTxnId(); + } else { + setTxnId((Long)value); + } + break; + + case VALID_WRITE_ID_LIST: + if (value == null) { + unsetValidWriteIdList(); + } else { + setValidWriteIdList((String)value); + } + break; + + case IS_STATS_COMPLIANT: + if (value == null) { + unsetIsStatsCompliant(); + } else { + setIsStatsCompliant((IsolationLevelCompliance)value); + } + break; + } } @@ -252,6 +394,15 @@ public Object getFieldValue(_Fields field) { case STATS_OBJ: return getStatsObj(); + case TXN_ID: + return getTxnId(); + + case VALID_WRITE_ID_LIST: + return getValidWriteIdList(); + + case IS_STATS_COMPLIANT: + return getIsStatsCompliant(); + } throw new IllegalStateException(); } @@ -267,6 +418,12 @@ public boolean isSet(_Fields field) { return isSetStatsDesc(); case STATS_OBJ: return isSetStatsObj(); + case TXN_ID: + return isSetTxnId(); + case VALID_WRITE_ID_LIST: + return isSetValidWriteIdList(); + case IS_STATS_COMPLIANT: + return isSetIsStatsCompliant(); } throw new IllegalStateException(); } @@ -302,6 +459,33 @@ public boolean equals(ColumnStatistics that) { return false; } + boolean this_present_txnId = true && this.isSetTxnId(); + boolean that_present_txnId = true && that.isSetTxnId(); + if (this_present_txnId || that_present_txnId) { + if (!(this_present_txnId && that_present_txnId)) + return false; + if (this.txnId != that.txnId) + return false; + } + + boolean this_present_validWriteIdList = true && this.isSetValidWriteIdList(); + boolean that_present_validWriteIdList = true && that.isSetValidWriteIdList(); + if (this_present_validWriteIdList || that_present_validWriteIdList) { + if (!(this_present_validWriteIdList && that_present_validWriteIdList)) + return false; + if (!this.validWriteIdList.equals(that.validWriteIdList)) + return false; + } + + boolean this_present_isStatsCompliant = true && this.isSetIsStatsCompliant(); + boolean that_present_isStatsCompliant = true && that.isSetIsStatsCompliant(); + if (this_present_isStatsCompliant || that_present_isStatsCompliant) { + if (!(this_present_isStatsCompliant && that_present_isStatsCompliant)) + return false; + if (!this.isStatsCompliant.equals(that.isStatsCompliant)) + return false; + } + return true; } @@ -319,6 +503,21 @@ public int hashCode() { if (present_statsObj) list.add(statsObj); + boolean present_txnId = true && (isSetTxnId()); + list.add(present_txnId); + if (present_txnId) + list.add(txnId); + + boolean present_validWriteIdList = true && (isSetValidWriteIdList()); + list.add(present_validWriteIdList); + if (present_validWriteIdList) + list.add(validWriteIdList); + + boolean present_isStatsCompliant = true && (isSetIsStatsCompliant()); + list.add(present_isStatsCompliant); + if (present_isStatsCompliant) + list.add(isStatsCompliant.getValue()); + return list.hashCode(); } @@ -350,6 +549,36 @@ public int compareTo(ColumnStatistics other) { return lastComparison; } } + lastComparison = Boolean.valueOf(isSetTxnId()).compareTo(other.isSetTxnId()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetTxnId()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.txnId, other.txnId); + if (lastComparison != 0) { + return lastComparison; + } + } + lastComparison = Boolean.valueOf(isSetValidWriteIdList()).compareTo(other.isSetValidWriteIdList()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetValidWriteIdList()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.validWriteIdList, other.validWriteIdList); + if (lastComparison != 0) { + return lastComparison; + } + } + lastComparison = Boolean.valueOf(isSetIsStatsCompliant()).compareTo(other.isSetIsStatsCompliant()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetIsStatsCompliant()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.isStatsCompliant, other.isStatsCompliant); + if (lastComparison != 0) { + return lastComparison; + } + } return 0; } @@ -385,6 +614,32 @@ public String toString() { sb.append(this.statsObj); } first = false; + if (isSetTxnId()) { + if (!first) sb.append(", "); + sb.append("txnId:"); + sb.append(this.txnId); + first = false; + } + if (isSetValidWriteIdList()) { + if (!first) sb.append(", "); + sb.append("validWriteIdList:"); + if (this.validWriteIdList == null) { + sb.append("null"); + } else { + sb.append(this.validWriteIdList); + } + first = false; + } + if (isSetIsStatsCompliant()) { + if (!first) sb.append(", "); + sb.append("isStatsCompliant:"); + if (this.isStatsCompliant == null) { + sb.append("null"); + } else { + sb.append(this.isStatsCompliant); + } + first = false; + } sb.append(")"); return sb.toString(); } @@ -415,6 +670,8 @@ private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOExcept private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException { try { + // it doesn't seem like you should have to do this, but java serialization is wacky, and doesn't call the default constructor. + __isset_bitfield = 0; read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in))); } catch (org.apache.thrift.TException te) { throw new java.io.IOException(te); @@ -467,6 +724,30 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, ColumnStatistics st org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); } break; + case 3: // TXN_ID + if (schemeField.type == org.apache.thrift.protocol.TType.I64) { + struct.txnId = iprot.readI64(); + struct.setTxnIdIsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + break; + case 4: // VALID_WRITE_ID_LIST + if (schemeField.type == org.apache.thrift.protocol.TType.STRING) { + struct.validWriteIdList = iprot.readString(); + struct.setValidWriteIdListIsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + break; + case 5: // IS_STATS_COMPLIANT + if (schemeField.type == org.apache.thrift.protocol.TType.I32) { + struct.isStatsCompliant = org.apache.hadoop.hive.metastore.api.IsolationLevelCompliance.findByValue(iprot.readI32()); + struct.setIsStatsCompliantIsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + break; default: org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); } @@ -497,6 +778,25 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, ColumnStatistics s } oprot.writeFieldEnd(); } + if (struct.isSetTxnId()) { + oprot.writeFieldBegin(TXN_ID_FIELD_DESC); + oprot.writeI64(struct.txnId); + oprot.writeFieldEnd(); + } + if (struct.validWriteIdList != null) { + if (struct.isSetValidWriteIdList()) { + oprot.writeFieldBegin(VALID_WRITE_ID_LIST_FIELD_DESC); + oprot.writeString(struct.validWriteIdList); + oprot.writeFieldEnd(); + } + } + if (struct.isStatsCompliant != null) { + if (struct.isSetIsStatsCompliant()) { + oprot.writeFieldBegin(IS_STATS_COMPLIANT_FIELD_DESC); + oprot.writeI32(struct.isStatsCompliant.getValue()); + oprot.writeFieldEnd(); + } + } oprot.writeFieldStop(); oprot.writeStructEnd(); } @@ -522,6 +822,26 @@ public void write(org.apache.thrift.protocol.TProtocol prot, ColumnStatistics st _iter272.write(oprot); } } + BitSet optionals = new BitSet(); + if (struct.isSetTxnId()) { + optionals.set(0); + } + if (struct.isSetValidWriteIdList()) { + optionals.set(1); + } + if (struct.isSetIsStatsCompliant()) { + optionals.set(2); + } + oprot.writeBitSet(optionals, 3); + if (struct.isSetTxnId()) { + oprot.writeI64(struct.txnId); + } + if (struct.isSetValidWriteIdList()) { + oprot.writeString(struct.validWriteIdList); + } + if (struct.isSetIsStatsCompliant()) { + oprot.writeI32(struct.isStatsCompliant.getValue()); + } } @Override @@ -542,6 +862,19 @@ public void read(org.apache.thrift.protocol.TProtocol prot, ColumnStatistics str } } struct.setStatsObjIsSet(true); + BitSet incoming = iprot.readBitSet(3); + if (incoming.get(0)) { + struct.txnId = iprot.readI64(); + struct.setTxnIdIsSet(true); + } + if (incoming.get(1)) { + struct.validWriteIdList = iprot.readString(); + struct.setValidWriteIdListIsSet(true); + } + if (incoming.get(2)) { + struct.isStatsCompliant = org.apache.hadoop.hive.metastore.api.IsolationLevelCompliance.findByValue(iprot.readI32()); + struct.setIsStatsCompliantIsSet(true); + } } } diff --git standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GetTableRequest.java standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GetTableRequest.java index 3c88d8fc6d..821049e11f 100644 --- standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GetTableRequest.java +++ standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GetTableRequest.java @@ -42,6 +42,8 @@ private static final org.apache.thrift.protocol.TField TBL_NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("tblName", org.apache.thrift.protocol.TType.STRING, (short)2); private static final org.apache.thrift.protocol.TField CAPABILITIES_FIELD_DESC = new org.apache.thrift.protocol.TField("capabilities", org.apache.thrift.protocol.TType.STRUCT, (short)3); private static final org.apache.thrift.protocol.TField CAT_NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("catName", org.apache.thrift.protocol.TType.STRING, (short)4); + private static final org.apache.thrift.protocol.TField TXN_ID_FIELD_DESC = new org.apache.thrift.protocol.TField("txnId", org.apache.thrift.protocol.TType.I64, (short)5); + private static final org.apache.thrift.protocol.TField VALID_WRITE_ID_LIST_FIELD_DESC = new org.apache.thrift.protocol.TField("validWriteIdList", org.apache.thrift.protocol.TType.STRING, (short)6); private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); static { @@ -53,13 +55,17 @@ private String tblName; // required private ClientCapabilities capabilities; // optional private String catName; // optional + private long txnId; // optional + private String validWriteIdList; // optional /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ public enum _Fields implements org.apache.thrift.TFieldIdEnum { DB_NAME((short)1, "dbName"), TBL_NAME((short)2, "tblName"), CAPABILITIES((short)3, "capabilities"), - CAT_NAME((short)4, "catName"); + CAT_NAME((short)4, "catName"), + TXN_ID((short)5, "txnId"), + VALID_WRITE_ID_LIST((short)6, "validWriteIdList"); private static final Map byName = new HashMap(); @@ -82,6 +88,10 @@ public static _Fields findByThriftId(int fieldId) { return CAPABILITIES; case 4: // CAT_NAME return CAT_NAME; + case 5: // TXN_ID + return TXN_ID; + case 6: // VALID_WRITE_ID_LIST + return VALID_WRITE_ID_LIST; default: return null; } @@ -122,7 +132,9 @@ public String getFieldName() { } // isset id assignments - private static final _Fields optionals[] = {_Fields.CAPABILITIES,_Fields.CAT_NAME}; + private static final int __TXNID_ISSET_ID = 0; + private byte __isset_bitfield = 0; + private static final _Fields optionals[] = {_Fields.CAPABILITIES,_Fields.CAT_NAME,_Fields.TXN_ID,_Fields.VALID_WRITE_ID_LIST}; public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; static { Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); @@ -134,11 +146,17 @@ public String getFieldName() { new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, ClientCapabilities.class))); tmpMap.put(_Fields.CAT_NAME, new org.apache.thrift.meta_data.FieldMetaData("catName", org.apache.thrift.TFieldRequirementType.OPTIONAL, new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING))); + tmpMap.put(_Fields.TXN_ID, new org.apache.thrift.meta_data.FieldMetaData("txnId", org.apache.thrift.TFieldRequirementType.OPTIONAL, + new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.I64))); + tmpMap.put(_Fields.VALID_WRITE_ID_LIST, new org.apache.thrift.meta_data.FieldMetaData("validWriteIdList", org.apache.thrift.TFieldRequirementType.OPTIONAL, + new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING))); metaDataMap = Collections.unmodifiableMap(tmpMap); org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(GetTableRequest.class, metaDataMap); } public GetTableRequest() { + this.txnId = -1L; + } public GetTableRequest( @@ -154,6 +172,7 @@ public GetTableRequest( * Performs a deep copy on other. */ public GetTableRequest(GetTableRequest other) { + __isset_bitfield = other.__isset_bitfield; if (other.isSetDbName()) { this.dbName = other.dbName; } @@ -166,6 +185,10 @@ public GetTableRequest(GetTableRequest other) { if (other.isSetCatName()) { this.catName = other.catName; } + this.txnId = other.txnId; + if (other.isSetValidWriteIdList()) { + this.validWriteIdList = other.validWriteIdList; + } } public GetTableRequest deepCopy() { @@ -178,6 +201,9 @@ public void clear() { this.tblName = null; this.capabilities = null; this.catName = null; + this.txnId = -1L; + + this.validWriteIdList = null; } public String getDbName() { @@ -272,6 +298,51 @@ public void setCatNameIsSet(boolean value) { } } + public long getTxnId() { + return this.txnId; + } + + public void setTxnId(long txnId) { + this.txnId = txnId; + setTxnIdIsSet(true); + } + + public void unsetTxnId() { + __isset_bitfield = EncodingUtils.clearBit(__isset_bitfield, __TXNID_ISSET_ID); + } + + /** Returns true if field txnId is set (has been assigned a value) and false otherwise */ + public boolean isSetTxnId() { + return EncodingUtils.testBit(__isset_bitfield, __TXNID_ISSET_ID); + } + + public void setTxnIdIsSet(boolean value) { + __isset_bitfield = EncodingUtils.setBit(__isset_bitfield, __TXNID_ISSET_ID, value); + } + + public String getValidWriteIdList() { + return this.validWriteIdList; + } + + public void setValidWriteIdList(String validWriteIdList) { + this.validWriteIdList = validWriteIdList; + } + + public void unsetValidWriteIdList() { + this.validWriteIdList = null; + } + + /** Returns true if field validWriteIdList is set (has been assigned a value) and false otherwise */ + public boolean isSetValidWriteIdList() { + return this.validWriteIdList != null; + } + + public void setValidWriteIdListIsSet(boolean value) { + if (!value) { + this.validWriteIdList = null; + } + } + public void setFieldValue(_Fields field, Object value) { switch (field) { case DB_NAME: @@ -306,6 +377,22 @@ public void setFieldValue(_Fields field, Object value) { } break; + case TXN_ID: + if (value == null) { + unsetTxnId(); + } else { + setTxnId((Long)value); + } + break; + + case VALID_WRITE_ID_LIST: + if (value == null) { + unsetValidWriteIdList(); + } else { + setValidWriteIdList((String)value); + } + break; + } } @@ -323,6 +410,12 @@ public Object getFieldValue(_Fields field) { case CAT_NAME: return getCatName(); + case TXN_ID: + return getTxnId(); + + case VALID_WRITE_ID_LIST: + return getValidWriteIdList(); + } throw new IllegalStateException(); } @@ -342,6 +435,10 @@ public boolean isSet(_Fields field) { return isSetCapabilities(); case CAT_NAME: return isSetCatName(); + case TXN_ID: + return isSetTxnId(); + case VALID_WRITE_ID_LIST: + return isSetValidWriteIdList(); } throw new IllegalStateException(); } @@ -395,6 +492,24 @@ public boolean equals(GetTableRequest that) { return false; } + boolean this_present_txnId = true && this.isSetTxnId(); + boolean that_present_txnId = true && that.isSetTxnId(); + if (this_present_txnId || that_present_txnId) { + if (!(this_present_txnId && that_present_txnId)) + return false; + if (this.txnId != that.txnId) + return false; + } + + boolean this_present_validWriteIdList = true && this.isSetValidWriteIdList(); + boolean that_present_validWriteIdList = true && that.isSetValidWriteIdList(); + if (this_present_validWriteIdList || that_present_validWriteIdList) { + if (!(this_present_validWriteIdList && that_present_validWriteIdList)) + return false; + if (!this.validWriteIdList.equals(that.validWriteIdList)) + return false; + } + return true; } @@ -422,6 +537,16 @@ public int hashCode() { if (present_catName) list.add(catName); + boolean present_txnId = true && (isSetTxnId()); + list.add(present_txnId); + if (present_txnId) + list.add(txnId); + + boolean present_validWriteIdList = true && (isSetValidWriteIdList()); + list.add(present_validWriteIdList); + if (present_validWriteIdList) + list.add(validWriteIdList); + return list.hashCode(); } @@ -473,6 +598,26 @@ public int compareTo(GetTableRequest other) { return lastComparison; } } + lastComparison = Boolean.valueOf(isSetTxnId()).compareTo(other.isSetTxnId()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetTxnId()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.txnId, other.txnId); + if (lastComparison != 0) { + return lastComparison; + } + } + lastComparison = Boolean.valueOf(isSetValidWriteIdList()).compareTo(other.isSetValidWriteIdList()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetValidWriteIdList()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.validWriteIdList, other.validWriteIdList); + if (lastComparison != 0) { + return lastComparison; + } + } return 0; } @@ -528,6 +673,22 @@ public String toString() { } first = false; } + if (isSetTxnId()) { + if (!first) sb.append(", "); + sb.append("txnId:"); + sb.append(this.txnId); + first = false; + } + if (isSetValidWriteIdList()) { + if (!first) sb.append(", "); + sb.append("validWriteIdList:"); + if (this.validWriteIdList == null) { + sb.append("null"); + } else { + sb.append(this.validWriteIdList); + } + first = false; + } sb.append(")"); return sb.toString(); } @@ -558,6 +719,8 @@ private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOExcept private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException { try { + // it doesn't seem like you should have to do this, but java serialization is wacky, and doesn't call the default constructor. + __isset_bitfield = 0; read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in))); } catch (org.apache.thrift.TException te) { throw new java.io.IOException(te); @@ -615,6 +778,22 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, GetTableRequest str org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); } break; + case 5: // TXN_ID + if (schemeField.type == org.apache.thrift.protocol.TType.I64) { + struct.txnId = iprot.readI64(); + struct.setTxnIdIsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + break; + case 6: // VALID_WRITE_ID_LIST + if (schemeField.type == org.apache.thrift.protocol.TType.STRING) { + struct.validWriteIdList = iprot.readString(); + struct.setValidWriteIdListIsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + break; default: org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); } @@ -652,6 +831,18 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, GetTableRequest st oprot.writeFieldEnd(); } } + if (struct.isSetTxnId()) { + oprot.writeFieldBegin(TXN_ID_FIELD_DESC); + oprot.writeI64(struct.txnId); + oprot.writeFieldEnd(); + } + if (struct.validWriteIdList != null) { + if (struct.isSetValidWriteIdList()) { + oprot.writeFieldBegin(VALID_WRITE_ID_LIST_FIELD_DESC); + oprot.writeString(struct.validWriteIdList); + oprot.writeFieldEnd(); + } + } oprot.writeFieldStop(); oprot.writeStructEnd(); } @@ -678,13 +869,25 @@ public void write(org.apache.thrift.protocol.TProtocol prot, GetTableRequest str if (struct.isSetCatName()) { optionals.set(1); } - oprot.writeBitSet(optionals, 2); + if (struct.isSetTxnId()) { + optionals.set(2); + } + if (struct.isSetValidWriteIdList()) { + optionals.set(3); + } + oprot.writeBitSet(optionals, 4); if (struct.isSetCapabilities()) { struct.capabilities.write(oprot); } if (struct.isSetCatName()) { oprot.writeString(struct.catName); } + if (struct.isSetTxnId()) { + oprot.writeI64(struct.txnId); + } + if (struct.isSetValidWriteIdList()) { + oprot.writeString(struct.validWriteIdList); + } } @Override @@ -694,7 +897,7 @@ public void read(org.apache.thrift.protocol.TProtocol prot, GetTableRequest stru struct.setDbNameIsSet(true); struct.tblName = iprot.readString(); struct.setTblNameIsSet(true); - BitSet incoming = iprot.readBitSet(2); + BitSet incoming = iprot.readBitSet(4); if (incoming.get(0)) { struct.capabilities = new ClientCapabilities(); struct.capabilities.read(iprot); @@ -704,6 +907,14 @@ public void read(org.apache.thrift.protocol.TProtocol prot, GetTableRequest stru struct.catName = iprot.readString(); struct.setCatNameIsSet(true); } + if (incoming.get(2)) { + struct.txnId = iprot.readI64(); + struct.setTxnIdIsSet(true); + } + if (incoming.get(3)) { + struct.validWriteIdList = iprot.readString(); + struct.setValidWriteIdListIsSet(true); + } } } diff --git standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GetTableResult.java standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GetTableResult.java index 968e250f0b..80aff928ac 100644 --- standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GetTableResult.java +++ standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GetTableResult.java @@ -39,6 +39,7 @@ private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("GetTableResult"); private static final org.apache.thrift.protocol.TField TABLE_FIELD_DESC = new org.apache.thrift.protocol.TField("table", org.apache.thrift.protocol.TType.STRUCT, (short)1); + private static final org.apache.thrift.protocol.TField IS_STATS_COMPLIANT_FIELD_DESC = new org.apache.thrift.protocol.TField("isStatsCompliant", org.apache.thrift.protocol.TType.I32, (short)2); private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); static { @@ -47,10 +48,16 @@ } private Table table; // required + private IsolationLevelCompliance isStatsCompliant; // optional /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ public enum _Fields implements org.apache.thrift.TFieldIdEnum { - TABLE((short)1, "table"); + TABLE((short)1, "table"), + /** + * + * @see IsolationLevelCompliance + */ + IS_STATS_COMPLIANT((short)2, "isStatsCompliant"); private static final Map byName = new HashMap(); @@ -67,6 +74,8 @@ public static _Fields findByThriftId(int fieldId) { switch(fieldId) { case 1: // TABLE return TABLE; + case 2: // IS_STATS_COMPLIANT + return IS_STATS_COMPLIANT; default: return null; } @@ -107,11 +116,14 @@ public String getFieldName() { } // isset id assignments + private static final _Fields optionals[] = {_Fields.IS_STATS_COMPLIANT}; public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; static { Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); tmpMap.put(_Fields.TABLE, new org.apache.thrift.meta_data.FieldMetaData("table", org.apache.thrift.TFieldRequirementType.REQUIRED, new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, Table.class))); + tmpMap.put(_Fields.IS_STATS_COMPLIANT, new org.apache.thrift.meta_data.FieldMetaData("isStatsCompliant", org.apache.thrift.TFieldRequirementType.OPTIONAL, + new org.apache.thrift.meta_data.EnumMetaData(org.apache.thrift.protocol.TType.ENUM, IsolationLevelCompliance.class))); metaDataMap = Collections.unmodifiableMap(tmpMap); org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(GetTableResult.class, metaDataMap); } @@ -133,6 +145,9 @@ public GetTableResult(GetTableResult other) { if (other.isSetTable()) { this.table = new Table(other.table); } + if (other.isSetIsStatsCompliant()) { + this.isStatsCompliant = other.isStatsCompliant; + } } public GetTableResult deepCopy() { @@ -142,6 +157,7 @@ public GetTableResult deepCopy() { @Override public void clear() { this.table = null; + this.isStatsCompliant = null; } public Table getTable() { @@ -167,6 +183,37 @@ public void setTableIsSet(boolean value) { } } + /** + * + * @see IsolationLevelCompliance + */ + public IsolationLevelCompliance getIsStatsCompliant() { + return this.isStatsCompliant; + } + + /** + * + * @see IsolationLevelCompliance + */ + public void setIsStatsCompliant(IsolationLevelCompliance isStatsCompliant) { + this.isStatsCompliant = isStatsCompliant; + } + + public void unsetIsStatsCompliant() { + this.isStatsCompliant = null; + } + + /** Returns true if field isStatsCompliant is set (has been assigned a value) and false otherwise */ + public boolean isSetIsStatsCompliant() { + return this.isStatsCompliant != null; + } + + public void setIsStatsCompliantIsSet(boolean value) { + if (!value) { + this.isStatsCompliant = null; + } + } + public void setFieldValue(_Fields field, Object value) { switch (field) { case TABLE: @@ -177,6 +224,14 @@ public void setFieldValue(_Fields field, Object value) { } break; + case IS_STATS_COMPLIANT: + if (value == null) { + unsetIsStatsCompliant(); + } else { + setIsStatsCompliant((IsolationLevelCompliance)value); + } + break; + } } @@ -185,6 +240,9 @@ public Object getFieldValue(_Fields field) { case TABLE: return getTable(); + case IS_STATS_COMPLIANT: + return getIsStatsCompliant(); + } throw new IllegalStateException(); } @@ -198,6 +256,8 @@ public boolean isSet(_Fields field) { switch (field) { case TABLE: return isSetTable(); + case IS_STATS_COMPLIANT: + return isSetIsStatsCompliant(); } throw new IllegalStateException(); } @@ -224,6 +284,15 @@ public boolean equals(GetTableResult that) { return false; } + boolean this_present_isStatsCompliant = true && this.isSetIsStatsCompliant(); + boolean that_present_isStatsCompliant = true && that.isSetIsStatsCompliant(); + if (this_present_isStatsCompliant || that_present_isStatsCompliant) { + if (!(this_present_isStatsCompliant && that_present_isStatsCompliant)) + return false; + if (!this.isStatsCompliant.equals(that.isStatsCompliant)) + return false; + } + return true; } @@ -236,6 +305,11 @@ public int hashCode() { if (present_table) list.add(table); + boolean present_isStatsCompliant = true && (isSetIsStatsCompliant()); + list.add(present_isStatsCompliant); + if (present_isStatsCompliant) + list.add(isStatsCompliant.getValue()); + return list.hashCode(); } @@ -257,6 +331,16 @@ public int compareTo(GetTableResult other) { return lastComparison; } } + lastComparison = Boolean.valueOf(isSetIsStatsCompliant()).compareTo(other.isSetIsStatsCompliant()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetIsStatsCompliant()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.isStatsCompliant, other.isStatsCompliant); + if (lastComparison != 0) { + return lastComparison; + } + } return 0; } @@ -284,6 +368,16 @@ public String toString() { sb.append(this.table); } first = false; + if (isSetIsStatsCompliant()) { + if (!first) sb.append(", "); + sb.append("isStatsCompliant:"); + if (this.isStatsCompliant == null) { + sb.append("null"); + } else { + sb.append(this.isStatsCompliant); + } + first = false; + } sb.append(")"); return sb.toString(); } @@ -343,6 +437,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, GetTableResult stru org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); } break; + case 2: // IS_STATS_COMPLIANT + if (schemeField.type == org.apache.thrift.protocol.TType.I32) { + struct.isStatsCompliant = org.apache.hadoop.hive.metastore.api.IsolationLevelCompliance.findByValue(iprot.readI32()); + struct.setIsStatsCompliantIsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + break; default: org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); } @@ -361,6 +463,13 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, GetTableResult str struct.table.write(oprot); oprot.writeFieldEnd(); } + if (struct.isStatsCompliant != null) { + if (struct.isSetIsStatsCompliant()) { + oprot.writeFieldBegin(IS_STATS_COMPLIANT_FIELD_DESC); + oprot.writeI32(struct.isStatsCompliant.getValue()); + oprot.writeFieldEnd(); + } + } oprot.writeFieldStop(); oprot.writeStructEnd(); } @@ -379,6 +488,14 @@ public GetTableResultTupleScheme getScheme() { public void write(org.apache.thrift.protocol.TProtocol prot, GetTableResult struct) throws org.apache.thrift.TException { TTupleProtocol oprot = (TTupleProtocol) prot; struct.table.write(oprot); + BitSet optionals = new BitSet(); + if (struct.isSetIsStatsCompliant()) { + optionals.set(0); + } + oprot.writeBitSet(optionals, 1); + if (struct.isSetIsStatsCompliant()) { + oprot.writeI32(struct.isStatsCompliant.getValue()); + } } @Override @@ -387,6 +504,11 @@ public void read(org.apache.thrift.protocol.TProtocol prot, GetTableResult struc struct.table = new Table(); struct.table.read(iprot); struct.setTableIsSet(true); + BitSet incoming = iprot.readBitSet(1); + if (incoming.get(0)) { + struct.isStatsCompliant = org.apache.hadoop.hive.metastore.api.IsolationLevelCompliance.findByValue(iprot.readI32()); + struct.setIsStatsCompliantIsSet(true); + } } } diff --git standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/IsolationLevelCompliance.java standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/IsolationLevelCompliance.java new file mode 100644 index 0000000000..cb2559f439 --- /dev/null +++ standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/IsolationLevelCompliance.java @@ -0,0 +1,48 @@ +/** + * Autogenerated by Thrift Compiler (0.9.3) + * + * DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING + * @generated + */ +package org.apache.hadoop.hive.metastore.api; + + +import java.util.Map; +import java.util.HashMap; +import org.apache.thrift.TEnum; + +public enum IsolationLevelCompliance implements org.apache.thrift.TEnum { + YES(1), + NO(2), + UNKNOWN(3); + + private final int value; + + private IsolationLevelCompliance(int value) { + this.value = value; + } + + /** + * Get the integer value of this enum value, as defined in the Thrift IDL. + */ + public int getValue() { + return value; + } + + /** + * Find a the enum type by its integer value, as defined in the Thrift IDL. + * @return null if the value is not found. + */ + public static IsolationLevelCompliance findByValue(int value) { + switch (value) { + case 1: + return YES; + case 2: + return NO; + case 3: + return UNKNOWN; + default: + return null; + } + } +} diff --git standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/Partition.java standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/Partition.java index 51f809a0f8..5b40d2f55a 100644 --- standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/Partition.java +++ standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/Partition.java @@ -47,6 +47,9 @@ private static final org.apache.thrift.protocol.TField PARAMETERS_FIELD_DESC = new org.apache.thrift.protocol.TField("parameters", org.apache.thrift.protocol.TType.MAP, (short)7); private static final org.apache.thrift.protocol.TField PRIVILEGES_FIELD_DESC = new org.apache.thrift.protocol.TField("privileges", org.apache.thrift.protocol.TType.STRUCT, (short)8); private static final org.apache.thrift.protocol.TField CAT_NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("catName", org.apache.thrift.protocol.TType.STRING, (short)9); + private static final org.apache.thrift.protocol.TField TXN_ID_FIELD_DESC = new org.apache.thrift.protocol.TField("txnId", org.apache.thrift.protocol.TType.I64, (short)10); + private static final org.apache.thrift.protocol.TField VALID_WRITE_ID_LIST_FIELD_DESC = new org.apache.thrift.protocol.TField("validWriteIdList", org.apache.thrift.protocol.TType.STRING, (short)11); + private static final org.apache.thrift.protocol.TField IS_STATS_COMPLIANT_FIELD_DESC = new org.apache.thrift.protocol.TField("isStatsCompliant", org.apache.thrift.protocol.TType.I32, (short)12); private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); static { @@ -63,6 +66,9 @@ private Map parameters; // required private PrincipalPrivilegeSet privileges; // optional private String catName; // optional + private long txnId; // optional + private String validWriteIdList; // optional + private IsolationLevelCompliance isStatsCompliant; // optional /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ public enum _Fields implements org.apache.thrift.TFieldIdEnum { @@ -74,7 +80,14 @@ SD((short)6, "sd"), PARAMETERS((short)7, "parameters"), PRIVILEGES((short)8, "privileges"), - CAT_NAME((short)9, "catName"); + CAT_NAME((short)9, "catName"), + TXN_ID((short)10, "txnId"), + VALID_WRITE_ID_LIST((short)11, "validWriteIdList"), + /** + * + * @see IsolationLevelCompliance + */ + IS_STATS_COMPLIANT((short)12, "isStatsCompliant"); private static final Map byName = new HashMap(); @@ -107,6 +120,12 @@ public static _Fields findByThriftId(int fieldId) { return PRIVILEGES; case 9: // CAT_NAME return CAT_NAME; + case 10: // TXN_ID + return TXN_ID; + case 11: // VALID_WRITE_ID_LIST + return VALID_WRITE_ID_LIST; + case 12: // IS_STATS_COMPLIANT + return IS_STATS_COMPLIANT; default: return null; } @@ -149,8 +168,9 @@ public String getFieldName() { // isset id assignments private static final int __CREATETIME_ISSET_ID = 0; private static final int __LASTACCESSTIME_ISSET_ID = 1; + private static final int __TXNID_ISSET_ID = 2; private byte __isset_bitfield = 0; - private static final _Fields optionals[] = {_Fields.PRIVILEGES,_Fields.CAT_NAME}; + private static final _Fields optionals[] = {_Fields.PRIVILEGES,_Fields.CAT_NAME,_Fields.TXN_ID,_Fields.VALID_WRITE_ID_LIST,_Fields.IS_STATS_COMPLIANT}; public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; static { Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); @@ -175,11 +195,19 @@ public String getFieldName() { new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, PrincipalPrivilegeSet.class))); tmpMap.put(_Fields.CAT_NAME, new org.apache.thrift.meta_data.FieldMetaData("catName", org.apache.thrift.TFieldRequirementType.OPTIONAL, new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING))); + tmpMap.put(_Fields.TXN_ID, new org.apache.thrift.meta_data.FieldMetaData("txnId", org.apache.thrift.TFieldRequirementType.OPTIONAL, + new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.I64))); + tmpMap.put(_Fields.VALID_WRITE_ID_LIST, new org.apache.thrift.meta_data.FieldMetaData("validWriteIdList", org.apache.thrift.TFieldRequirementType.OPTIONAL, + new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING))); + tmpMap.put(_Fields.IS_STATS_COMPLIANT, new org.apache.thrift.meta_data.FieldMetaData("isStatsCompliant", org.apache.thrift.TFieldRequirementType.OPTIONAL, + new org.apache.thrift.meta_data.EnumMetaData(org.apache.thrift.protocol.TType.ENUM, IsolationLevelCompliance.class))); metaDataMap = Collections.unmodifiableMap(tmpMap); org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(Partition.class, metaDataMap); } public Partition() { + this.txnId = -1L; + } public Partition( @@ -233,6 +261,13 @@ public Partition(Partition other) { if (other.isSetCatName()) { this.catName = org.apache.hadoop.hive.metastore.utils.StringUtils.intern(other.catName); } + this.txnId = other.txnId; + if (other.isSetValidWriteIdList()) { + this.validWriteIdList = other.validWriteIdList; + } + if (other.isSetIsStatsCompliant()) { + this.isStatsCompliant = other.isStatsCompliant; + } } public Partition deepCopy() { @@ -252,6 +287,10 @@ public void clear() { this.parameters = null; this.privileges = null; this.catName = null; + this.txnId = -1L; + + this.validWriteIdList = null; + this.isStatsCompliant = null; } public int getValuesSize() { @@ -485,6 +524,82 @@ public void setCatNameIsSet(boolean value) { } } + public long getTxnId() { + return this.txnId; + } + + public void setTxnId(long txnId) { + this.txnId = txnId; + setTxnIdIsSet(true); + } + + public void unsetTxnId() { + __isset_bitfield = EncodingUtils.clearBit(__isset_bitfield, __TXNID_ISSET_ID); + } + + /** Returns true if field txnId is set (has been assigned a value) and false otherwise */ + public boolean isSetTxnId() { + return EncodingUtils.testBit(__isset_bitfield, __TXNID_ISSET_ID); + } + + public void setTxnIdIsSet(boolean value) { + __isset_bitfield = EncodingUtils.setBit(__isset_bitfield, __TXNID_ISSET_ID, value); + } + + public String getValidWriteIdList() { + return this.validWriteIdList; + } + + public void setValidWriteIdList(String validWriteIdList) { + this.validWriteIdList = validWriteIdList; + } + + public void unsetValidWriteIdList() { + this.validWriteIdList = null; + } + + /** Returns true if field validWriteIdList is set (has been assigned a value) and false otherwise */ + public boolean isSetValidWriteIdList() { + return this.validWriteIdList != null; + } + + public void setValidWriteIdListIsSet(boolean value) { + if (!value) { + this.validWriteIdList = null; + } + } + + /** + * + * @see IsolationLevelCompliance + */ + public IsolationLevelCompliance getIsStatsCompliant() { + return this.isStatsCompliant; + } + + /** + * + * @see IsolationLevelCompliance + */ + public void setIsStatsCompliant(IsolationLevelCompliance isStatsCompliant) { + this.isStatsCompliant = isStatsCompliant; + } + + public void unsetIsStatsCompliant() { + this.isStatsCompliant = null; + } + + /** Returns true if field isStatsCompliant is set (has been assigned a value) and false otherwise */ + public boolean isSetIsStatsCompliant() { + return this.isStatsCompliant != null; + } + + public void setIsStatsCompliantIsSet(boolean value) { + if (!value) { + this.isStatsCompliant = null; + } + } + public void setFieldValue(_Fields field, Object value) { switch (field) { case VALUES: @@ -559,6 +674,30 @@ public void setFieldValue(_Fields field, Object value) { } break; + case TXN_ID: + if (value == null) { + unsetTxnId(); + } else { + setTxnId((Long)value); + } + break; + + case VALID_WRITE_ID_LIST: + if (value == null) { + unsetValidWriteIdList(); + } else { + setValidWriteIdList((String)value); + } + break; + + case IS_STATS_COMPLIANT: + if (value == null) { + unsetIsStatsCompliant(); + } else { + setIsStatsCompliant((IsolationLevelCompliance)value); + } + break; + } } @@ -591,6 +730,15 @@ public Object getFieldValue(_Fields field) { case CAT_NAME: return getCatName(); + case TXN_ID: + return getTxnId(); + + case VALID_WRITE_ID_LIST: + return getValidWriteIdList(); + + case IS_STATS_COMPLIANT: + return getIsStatsCompliant(); + } throw new IllegalStateException(); } @@ -620,6 +768,12 @@ public boolean isSet(_Fields field) { return isSetPrivileges(); case CAT_NAME: return isSetCatName(); + case TXN_ID: + return isSetTxnId(); + case VALID_WRITE_ID_LIST: + return isSetValidWriteIdList(); + case IS_STATS_COMPLIANT: + return isSetIsStatsCompliant(); } throw new IllegalStateException(); } @@ -718,6 +872,33 @@ public boolean equals(Partition that) { return false; } + boolean this_present_txnId = true && this.isSetTxnId(); + boolean that_present_txnId = true && that.isSetTxnId(); + if (this_present_txnId || that_present_txnId) { + if (!(this_present_txnId && that_present_txnId)) + return false; + if (this.txnId != that.txnId) + return false; + } + + boolean this_present_validWriteIdList = true && this.isSetValidWriteIdList(); + boolean that_present_validWriteIdList = true && that.isSetValidWriteIdList(); + if (this_present_validWriteIdList || that_present_validWriteIdList) { + if (!(this_present_validWriteIdList && that_present_validWriteIdList)) + return false; + if (!this.validWriteIdList.equals(that.validWriteIdList)) + return false; + } + + boolean this_present_isStatsCompliant = true && this.isSetIsStatsCompliant(); + boolean that_present_isStatsCompliant = true && that.isSetIsStatsCompliant(); + if (this_present_isStatsCompliant || that_present_isStatsCompliant) { + if (!(this_present_isStatsCompliant && that_present_isStatsCompliant)) + return false; + if (!this.isStatsCompliant.equals(that.isStatsCompliant)) + return false; + } + return true; } @@ -770,6 +951,21 @@ public int hashCode() { if (present_catName) list.add(catName); + boolean present_txnId = true && (isSetTxnId()); + list.add(present_txnId); + if (present_txnId) + list.add(txnId); + + boolean present_validWriteIdList = true && (isSetValidWriteIdList()); + list.add(present_validWriteIdList); + if (present_validWriteIdList) + list.add(validWriteIdList); + + boolean present_isStatsCompliant = true && (isSetIsStatsCompliant()); + list.add(present_isStatsCompliant); + if (present_isStatsCompliant) + list.add(isStatsCompliant.getValue()); + return list.hashCode(); } @@ -871,6 +1067,36 @@ public int compareTo(Partition other) { return lastComparison; } } + lastComparison = Boolean.valueOf(isSetTxnId()).compareTo(other.isSetTxnId()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetTxnId()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.txnId, other.txnId); + if (lastComparison != 0) { + return lastComparison; + } + } + lastComparison = Boolean.valueOf(isSetValidWriteIdList()).compareTo(other.isSetValidWriteIdList()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetValidWriteIdList()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.validWriteIdList, other.validWriteIdList); + if (lastComparison != 0) { + return lastComparison; + } + } + lastComparison = Boolean.valueOf(isSetIsStatsCompliant()).compareTo(other.isSetIsStatsCompliant()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetIsStatsCompliant()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.isStatsCompliant, other.isStatsCompliant); + if (lastComparison != 0) { + return lastComparison; + } + } return 0; } @@ -958,6 +1184,32 @@ public String toString() { } first = false; } + if (isSetTxnId()) { + if (!first) sb.append(", "); + sb.append("txnId:"); + sb.append(this.txnId); + first = false; + } + if (isSetValidWriteIdList()) { + if (!first) sb.append(", "); + sb.append("validWriteIdList:"); + if (this.validWriteIdList == null) { + sb.append("null"); + } else { + sb.append(this.validWriteIdList); + } + first = false; + } + if (isSetIsStatsCompliant()) { + if (!first) sb.append(", "); + sb.append("isStatsCompliant:"); + if (this.isStatsCompliant == null) { + sb.append("null"); + } else { + sb.append(this.isStatsCompliant); + } + first = false; + } sb.append(")"); return sb.toString(); } @@ -1105,6 +1357,30 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, Partition struct) t org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); } break; + case 10: // TXN_ID + if (schemeField.type == org.apache.thrift.protocol.TType.I64) { + struct.txnId = iprot.readI64(); + struct.setTxnIdIsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + break; + case 11: // VALID_WRITE_ID_LIST + if (schemeField.type == org.apache.thrift.protocol.TType.STRING) { + struct.validWriteIdList = iprot.readString(); + struct.setValidWriteIdListIsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + break; + case 12: // IS_STATS_COMPLIANT + if (schemeField.type == org.apache.thrift.protocol.TType.I32) { + struct.isStatsCompliant = org.apache.hadoop.hive.metastore.api.IsolationLevelCompliance.findByValue(iprot.readI32()); + struct.setIsStatsCompliantIsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + break; default: org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); } @@ -1178,6 +1454,25 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, Partition struct) oprot.writeFieldEnd(); } } + if (struct.isSetTxnId()) { + oprot.writeFieldBegin(TXN_ID_FIELD_DESC); + oprot.writeI64(struct.txnId); + oprot.writeFieldEnd(); + } + if (struct.validWriteIdList != null) { + if (struct.isSetValidWriteIdList()) { + oprot.writeFieldBegin(VALID_WRITE_ID_LIST_FIELD_DESC); + oprot.writeString(struct.validWriteIdList); + oprot.writeFieldEnd(); + } + } + if (struct.isStatsCompliant != null) { + if (struct.isSetIsStatsCompliant()) { + oprot.writeFieldBegin(IS_STATS_COMPLIANT_FIELD_DESC); + oprot.writeI32(struct.isStatsCompliant.getValue()); + oprot.writeFieldEnd(); + } + } oprot.writeFieldStop(); oprot.writeStructEnd(); } @@ -1223,7 +1518,16 @@ public void write(org.apache.thrift.protocol.TProtocol prot, Partition struct) t if (struct.isSetCatName()) { optionals.set(8); } - oprot.writeBitSet(optionals, 9); + if (struct.isSetTxnId()) { + optionals.set(9); + } + if (struct.isSetValidWriteIdList()) { + optionals.set(10); + } + if (struct.isSetIsStatsCompliant()) { + optionals.set(11); + } + oprot.writeBitSet(optionals, 12); if (struct.isSetValues()) { { oprot.writeI32(struct.values.size()); @@ -1264,12 +1568,21 @@ public void write(org.apache.thrift.protocol.TProtocol prot, Partition struct) t if (struct.isSetCatName()) { oprot.writeString(struct.catName); } + if (struct.isSetTxnId()) { + oprot.writeI64(struct.txnId); + } + if (struct.isSetValidWriteIdList()) { + oprot.writeString(struct.validWriteIdList); + } + if (struct.isSetIsStatsCompliant()) { + oprot.writeI32(struct.isStatsCompliant.getValue()); + } } @Override public void read(org.apache.thrift.protocol.TProtocol prot, Partition struct) throws org.apache.thrift.TException { TTupleProtocol iprot = (TTupleProtocol) prot; - BitSet incoming = iprot.readBitSet(9); + BitSet incoming = iprot.readBitSet(12); if (incoming.get(0)) { { org.apache.thrift.protocol.TList _list227 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); @@ -1328,6 +1641,18 @@ public void read(org.apache.thrift.protocol.TProtocol prot, Partition struct) th struct.catName = org.apache.hadoop.hive.metastore.utils.StringUtils.intern(iprot.readString()); struct.setCatNameIsSet(true); } + if (incoming.get(9)) { + struct.txnId = iprot.readI64(); + struct.setTxnIdIsSet(true); + } + if (incoming.get(10)) { + struct.validWriteIdList = iprot.readString(); + struct.setValidWriteIdListIsSet(true); + } + if (incoming.get(11)) { + struct.isStatsCompliant = org.apache.hadoop.hive.metastore.api.IsolationLevelCompliance.findByValue(iprot.readI32()); + struct.setIsStatsCompliantIsSet(true); + } } } diff --git standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/PartitionSpec.java standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/PartitionSpec.java index 247fdaa5ac..bc625b04a6 100644 --- standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/PartitionSpec.java +++ standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/PartitionSpec.java @@ -44,6 +44,9 @@ private static final org.apache.thrift.protocol.TField SHARED_SDPARTITION_SPEC_FIELD_DESC = new org.apache.thrift.protocol.TField("sharedSDPartitionSpec", org.apache.thrift.protocol.TType.STRUCT, (short)4); private static final org.apache.thrift.protocol.TField PARTITION_LIST_FIELD_DESC = new org.apache.thrift.protocol.TField("partitionList", org.apache.thrift.protocol.TType.STRUCT, (short)5); private static final org.apache.thrift.protocol.TField CAT_NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("catName", org.apache.thrift.protocol.TType.STRING, (short)6); + private static final org.apache.thrift.protocol.TField TXN_ID_FIELD_DESC = new org.apache.thrift.protocol.TField("txnId", org.apache.thrift.protocol.TType.I64, (short)7); + private static final org.apache.thrift.protocol.TField VALID_WRITE_ID_LIST_FIELD_DESC = new org.apache.thrift.protocol.TField("validWriteIdList", org.apache.thrift.protocol.TType.STRING, (short)8); + private static final org.apache.thrift.protocol.TField IS_STATS_COMPLIANT_FIELD_DESC = new org.apache.thrift.protocol.TField("isStatsCompliant", org.apache.thrift.protocol.TType.I32, (short)9); private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); static { @@ -57,6 +60,9 @@ private PartitionSpecWithSharedSD sharedSDPartitionSpec; // optional private PartitionListComposingSpec partitionList; // optional private String catName; // optional + private long txnId; // optional + private String validWriteIdList; // optional + private IsolationLevelCompliance isStatsCompliant; // optional /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ public enum _Fields implements org.apache.thrift.TFieldIdEnum { @@ -65,7 +71,14 @@ ROOT_PATH((short)3, "rootPath"), SHARED_SDPARTITION_SPEC((short)4, "sharedSDPartitionSpec"), PARTITION_LIST((short)5, "partitionList"), - CAT_NAME((short)6, "catName"); + CAT_NAME((short)6, "catName"), + TXN_ID((short)7, "txnId"), + VALID_WRITE_ID_LIST((short)8, "validWriteIdList"), + /** + * + * @see IsolationLevelCompliance + */ + IS_STATS_COMPLIANT((short)9, "isStatsCompliant"); private static final Map byName = new HashMap(); @@ -92,6 +105,12 @@ public static _Fields findByThriftId(int fieldId) { return PARTITION_LIST; case 6: // CAT_NAME return CAT_NAME; + case 7: // TXN_ID + return TXN_ID; + case 8: // VALID_WRITE_ID_LIST + return VALID_WRITE_ID_LIST; + case 9: // IS_STATS_COMPLIANT + return IS_STATS_COMPLIANT; default: return null; } @@ -132,7 +151,9 @@ public String getFieldName() { } // isset id assignments - private static final _Fields optionals[] = {_Fields.SHARED_SDPARTITION_SPEC,_Fields.PARTITION_LIST,_Fields.CAT_NAME}; + private static final int __TXNID_ISSET_ID = 0; + private byte __isset_bitfield = 0; + private static final _Fields optionals[] = {_Fields.SHARED_SDPARTITION_SPEC,_Fields.PARTITION_LIST,_Fields.CAT_NAME,_Fields.TXN_ID,_Fields.VALID_WRITE_ID_LIST,_Fields.IS_STATS_COMPLIANT}; public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; static { Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); @@ -148,11 +169,19 @@ public String getFieldName() { new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, PartitionListComposingSpec.class))); tmpMap.put(_Fields.CAT_NAME, new org.apache.thrift.meta_data.FieldMetaData("catName", org.apache.thrift.TFieldRequirementType.OPTIONAL, new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING))); + tmpMap.put(_Fields.TXN_ID, new org.apache.thrift.meta_data.FieldMetaData("txnId", org.apache.thrift.TFieldRequirementType.OPTIONAL, + new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.I64))); + tmpMap.put(_Fields.VALID_WRITE_ID_LIST, new org.apache.thrift.meta_data.FieldMetaData("validWriteIdList", org.apache.thrift.TFieldRequirementType.OPTIONAL, + new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING))); + tmpMap.put(_Fields.IS_STATS_COMPLIANT, new org.apache.thrift.meta_data.FieldMetaData("isStatsCompliant", org.apache.thrift.TFieldRequirementType.OPTIONAL, + new org.apache.thrift.meta_data.EnumMetaData(org.apache.thrift.protocol.TType.ENUM, IsolationLevelCompliance.class))); metaDataMap = Collections.unmodifiableMap(tmpMap); org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(PartitionSpec.class, metaDataMap); } public PartitionSpec() { + this.txnId = -1L; + } public PartitionSpec( @@ -170,6 +199,7 @@ public PartitionSpec( * Performs a deep copy on other. */ public PartitionSpec(PartitionSpec other) { + __isset_bitfield = other.__isset_bitfield; if (other.isSetDbName()) { this.dbName = other.dbName; } @@ -188,6 +218,13 @@ public PartitionSpec(PartitionSpec other) { if (other.isSetCatName()) { this.catName = other.catName; } + this.txnId = other.txnId; + if (other.isSetValidWriteIdList()) { + this.validWriteIdList = other.validWriteIdList; + } + if (other.isSetIsStatsCompliant()) { + this.isStatsCompliant = other.isStatsCompliant; + } } public PartitionSpec deepCopy() { @@ -202,6 +239,10 @@ public void clear() { this.sharedSDPartitionSpec = null; this.partitionList = null; this.catName = null; + this.txnId = -1L; + + this.validWriteIdList = null; + this.isStatsCompliant = null; } public String getDbName() { @@ -342,6 +383,82 @@ public void setCatNameIsSet(boolean value) { } } + public long getTxnId() { + return this.txnId; + } + + public void setTxnId(long txnId) { + this.txnId = txnId; + setTxnIdIsSet(true); + } + + public void unsetTxnId() { + __isset_bitfield = EncodingUtils.clearBit(__isset_bitfield, __TXNID_ISSET_ID); + } + + /** Returns true if field txnId is set (has been assigned a value) and false otherwise */ + public boolean isSetTxnId() { + return EncodingUtils.testBit(__isset_bitfield, __TXNID_ISSET_ID); + } + + public void setTxnIdIsSet(boolean value) { + __isset_bitfield = EncodingUtils.setBit(__isset_bitfield, __TXNID_ISSET_ID, value); + } + + public String getValidWriteIdList() { + return this.validWriteIdList; + } + + public void setValidWriteIdList(String validWriteIdList) { + this.validWriteIdList = validWriteIdList; + } + + public void unsetValidWriteIdList() { + this.validWriteIdList = null; + } + + /** Returns true if field validWriteIdList is set (has been assigned a value) and false otherwise */ + public boolean isSetValidWriteIdList() { + return this.validWriteIdList != null; + } + + public void setValidWriteIdListIsSet(boolean value) { + if (!value) { + this.validWriteIdList = null; + } + } + + /** + * + * @see IsolationLevelCompliance + */ + public IsolationLevelCompliance getIsStatsCompliant() { + return this.isStatsCompliant; + } + + /** + * + * @see IsolationLevelCompliance + */ + public void setIsStatsCompliant(IsolationLevelCompliance isStatsCompliant) { + this.isStatsCompliant = isStatsCompliant; + } + + public void unsetIsStatsCompliant() { + this.isStatsCompliant = null; + } + + /** Returns true if field isStatsCompliant is set (has been assigned a value) and false otherwise */ + public boolean isSetIsStatsCompliant() { + return this.isStatsCompliant != null; + } + + public void setIsStatsCompliantIsSet(boolean value) { + if (!value) { + this.isStatsCompliant = null; + } + } + public void setFieldValue(_Fields field, Object value) { switch (field) { case DB_NAME: @@ -392,6 +509,30 @@ public void setFieldValue(_Fields field, Object value) { } break; + case TXN_ID: + if (value == null) { + unsetTxnId(); + } else { + setTxnId((Long)value); + } + break; + + case VALID_WRITE_ID_LIST: + if (value == null) { + unsetValidWriteIdList(); + } else { + setValidWriteIdList((String)value); + } + break; + + case IS_STATS_COMPLIANT: + if (value == null) { + unsetIsStatsCompliant(); + } else { + setIsStatsCompliant((IsolationLevelCompliance)value); + } + break; + } } @@ -415,6 +556,15 @@ public Object getFieldValue(_Fields field) { case CAT_NAME: return getCatName(); + case TXN_ID: + return getTxnId(); + + case VALID_WRITE_ID_LIST: + return getValidWriteIdList(); + + case IS_STATS_COMPLIANT: + return getIsStatsCompliant(); + } throw new IllegalStateException(); } @@ -438,6 +588,12 @@ public boolean isSet(_Fields field) { return isSetPartitionList(); case CAT_NAME: return isSetCatName(); + case TXN_ID: + return isSetTxnId(); + case VALID_WRITE_ID_LIST: + return isSetValidWriteIdList(); + case IS_STATS_COMPLIANT: + return isSetIsStatsCompliant(); } throw new IllegalStateException(); } @@ -509,6 +665,33 @@ public boolean equals(PartitionSpec that) { return false; } + boolean this_present_txnId = true && this.isSetTxnId(); + boolean that_present_txnId = true && that.isSetTxnId(); + if (this_present_txnId || that_present_txnId) { + if (!(this_present_txnId && that_present_txnId)) + return false; + if (this.txnId != that.txnId) + return false; + } + + boolean this_present_validWriteIdList = true && this.isSetValidWriteIdList(); + boolean that_present_validWriteIdList = true && that.isSetValidWriteIdList(); + if (this_present_validWriteIdList || that_present_validWriteIdList) { + if (!(this_present_validWriteIdList && that_present_validWriteIdList)) + return false; + if (!this.validWriteIdList.equals(that.validWriteIdList)) + return false; + } + + boolean this_present_isStatsCompliant = true && this.isSetIsStatsCompliant(); + boolean that_present_isStatsCompliant = true && that.isSetIsStatsCompliant(); + if (this_present_isStatsCompliant || that_present_isStatsCompliant) { + if (!(this_present_isStatsCompliant && that_present_isStatsCompliant)) + return false; + if (!this.isStatsCompliant.equals(that.isStatsCompliant)) + return false; + } + return true; } @@ -546,6 +729,21 @@ public int hashCode() { if (present_catName) list.add(catName); + boolean present_txnId = true && (isSetTxnId()); + list.add(present_txnId); + if (present_txnId) + list.add(txnId); + + boolean present_validWriteIdList = true && (isSetValidWriteIdList()); + list.add(present_validWriteIdList); + if (present_validWriteIdList) + list.add(validWriteIdList); + + boolean present_isStatsCompliant = true && (isSetIsStatsCompliant()); + list.add(present_isStatsCompliant); + if (present_isStatsCompliant) + list.add(isStatsCompliant.getValue()); + return list.hashCode(); } @@ -617,6 +815,36 @@ public int compareTo(PartitionSpec other) { return lastComparison; } } + lastComparison = Boolean.valueOf(isSetTxnId()).compareTo(other.isSetTxnId()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetTxnId()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.txnId, other.txnId); + if (lastComparison != 0) { + return lastComparison; + } + } + lastComparison = Boolean.valueOf(isSetValidWriteIdList()).compareTo(other.isSetValidWriteIdList()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetValidWriteIdList()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.validWriteIdList, other.validWriteIdList); + if (lastComparison != 0) { + return lastComparison; + } + } + lastComparison = Boolean.valueOf(isSetIsStatsCompliant()).compareTo(other.isSetIsStatsCompliant()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetIsStatsCompliant()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.isStatsCompliant, other.isStatsCompliant); + if (lastComparison != 0) { + return lastComparison; + } + } return 0; } @@ -690,6 +918,32 @@ public String toString() { } first = false; } + if (isSetTxnId()) { + if (!first) sb.append(", "); + sb.append("txnId:"); + sb.append(this.txnId); + first = false; + } + if (isSetValidWriteIdList()) { + if (!first) sb.append(", "); + sb.append("validWriteIdList:"); + if (this.validWriteIdList == null) { + sb.append("null"); + } else { + sb.append(this.validWriteIdList); + } + first = false; + } + if (isSetIsStatsCompliant()) { + if (!first) sb.append(", "); + sb.append("isStatsCompliant:"); + if (this.isStatsCompliant == null) { + sb.append("null"); + } else { + sb.append(this.isStatsCompliant); + } + first = false; + } sb.append(")"); return sb.toString(); } @@ -715,6 +969,8 @@ private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOExcept private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException { try { + // it doesn't seem like you should have to do this, but java serialization is wacky, and doesn't call the default constructor. + __isset_bitfield = 0; read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in))); } catch (org.apache.thrift.TException te) { throw new java.io.IOException(te); @@ -789,6 +1045,30 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, PartitionSpec struc org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); } break; + case 7: // TXN_ID + if (schemeField.type == org.apache.thrift.protocol.TType.I64) { + struct.txnId = iprot.readI64(); + struct.setTxnIdIsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + break; + case 8: // VALID_WRITE_ID_LIST + if (schemeField.type == org.apache.thrift.protocol.TType.STRING) { + struct.validWriteIdList = iprot.readString(); + struct.setValidWriteIdListIsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + break; + case 9: // IS_STATS_COMPLIANT + if (schemeField.type == org.apache.thrift.protocol.TType.I32) { + struct.isStatsCompliant = org.apache.hadoop.hive.metastore.api.IsolationLevelCompliance.findByValue(iprot.readI32()); + struct.setIsStatsCompliantIsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + break; default: org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); } @@ -838,6 +1118,25 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, PartitionSpec stru oprot.writeFieldEnd(); } } + if (struct.isSetTxnId()) { + oprot.writeFieldBegin(TXN_ID_FIELD_DESC); + oprot.writeI64(struct.txnId); + oprot.writeFieldEnd(); + } + if (struct.validWriteIdList != null) { + if (struct.isSetValidWriteIdList()) { + oprot.writeFieldBegin(VALID_WRITE_ID_LIST_FIELD_DESC); + oprot.writeString(struct.validWriteIdList); + oprot.writeFieldEnd(); + } + } + if (struct.isStatsCompliant != null) { + if (struct.isSetIsStatsCompliant()) { + oprot.writeFieldBegin(IS_STATS_COMPLIANT_FIELD_DESC); + oprot.writeI32(struct.isStatsCompliant.getValue()); + oprot.writeFieldEnd(); + } + } oprot.writeFieldStop(); oprot.writeStructEnd(); } @@ -874,7 +1173,16 @@ public void write(org.apache.thrift.protocol.TProtocol prot, PartitionSpec struc if (struct.isSetCatName()) { optionals.set(5); } - oprot.writeBitSet(optionals, 6); + if (struct.isSetTxnId()) { + optionals.set(6); + } + if (struct.isSetValidWriteIdList()) { + optionals.set(7); + } + if (struct.isSetIsStatsCompliant()) { + optionals.set(8); + } + oprot.writeBitSet(optionals, 9); if (struct.isSetDbName()) { oprot.writeString(struct.dbName); } @@ -893,12 +1201,21 @@ public void write(org.apache.thrift.protocol.TProtocol prot, PartitionSpec struc if (struct.isSetCatName()) { oprot.writeString(struct.catName); } + if (struct.isSetTxnId()) { + oprot.writeI64(struct.txnId); + } + if (struct.isSetValidWriteIdList()) { + oprot.writeString(struct.validWriteIdList); + } + if (struct.isSetIsStatsCompliant()) { + oprot.writeI32(struct.isStatsCompliant.getValue()); + } } @Override public void read(org.apache.thrift.protocol.TProtocol prot, PartitionSpec struct) throws org.apache.thrift.TException { TTupleProtocol iprot = (TTupleProtocol) prot; - BitSet incoming = iprot.readBitSet(6); + BitSet incoming = iprot.readBitSet(9); if (incoming.get(0)) { struct.dbName = iprot.readString(); struct.setDbNameIsSet(true); @@ -925,6 +1242,18 @@ public void read(org.apache.thrift.protocol.TProtocol prot, PartitionSpec struct struct.catName = iprot.readString(); struct.setCatNameIsSet(true); } + if (incoming.get(6)) { + struct.txnId = iprot.readI64(); + struct.setTxnIdIsSet(true); + } + if (incoming.get(7)) { + struct.validWriteIdList = iprot.readString(); + struct.setValidWriteIdListIsSet(true); + } + if (incoming.get(8)) { + struct.isStatsCompliant = org.apache.hadoop.hive.metastore.api.IsolationLevelCompliance.findByValue(iprot.readI32()); + struct.setIsStatsCompliantIsSet(true); + } } } diff --git standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/PartitionsStatsRequest.java standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/PartitionsStatsRequest.java index 91cf567e74..a298b89925 100644 --- standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/PartitionsStatsRequest.java +++ standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/PartitionsStatsRequest.java @@ -43,6 +43,8 @@ private static final org.apache.thrift.protocol.TField COL_NAMES_FIELD_DESC = new org.apache.thrift.protocol.TField("colNames", org.apache.thrift.protocol.TType.LIST, (short)3); private static final org.apache.thrift.protocol.TField PART_NAMES_FIELD_DESC = new org.apache.thrift.protocol.TField("partNames", org.apache.thrift.protocol.TType.LIST, (short)4); private static final org.apache.thrift.protocol.TField CAT_NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("catName", org.apache.thrift.protocol.TType.STRING, (short)5); + private static final org.apache.thrift.protocol.TField TXN_ID_FIELD_DESC = new org.apache.thrift.protocol.TField("txnId", org.apache.thrift.protocol.TType.I64, (short)6); + private static final org.apache.thrift.protocol.TField VALID_WRITE_ID_LIST_FIELD_DESC = new org.apache.thrift.protocol.TField("validWriteIdList", org.apache.thrift.protocol.TType.STRING, (short)7); private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); static { @@ -55,6 +57,8 @@ private List colNames; // required private List partNames; // required private String catName; // optional + private long txnId; // optional + private String validWriteIdList; // optional /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ public enum _Fields implements org.apache.thrift.TFieldIdEnum { @@ -62,7 +66,9 @@ TBL_NAME((short)2, "tblName"), COL_NAMES((short)3, "colNames"), PART_NAMES((short)4, "partNames"), - CAT_NAME((short)5, "catName"); + CAT_NAME((short)5, "catName"), + TXN_ID((short)6, "txnId"), + VALID_WRITE_ID_LIST((short)7, "validWriteIdList"); private static final Map byName = new HashMap(); @@ -87,6 +93,10 @@ public static _Fields findByThriftId(int fieldId) { return PART_NAMES; case 5: // CAT_NAME return CAT_NAME; + case 6: // TXN_ID + return TXN_ID; + case 7: // VALID_WRITE_ID_LIST + return VALID_WRITE_ID_LIST; default: return null; } @@ -127,7 +137,9 @@ public String getFieldName() { } // isset id assignments - private static final _Fields optionals[] = {_Fields.CAT_NAME}; + private static final int __TXNID_ISSET_ID = 0; + private byte __isset_bitfield = 0; + private static final _Fields optionals[] = {_Fields.CAT_NAME,_Fields.TXN_ID,_Fields.VALID_WRITE_ID_LIST}; public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; static { Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); @@ -143,11 +155,17 @@ public String getFieldName() { new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING)))); tmpMap.put(_Fields.CAT_NAME, new org.apache.thrift.meta_data.FieldMetaData("catName", org.apache.thrift.TFieldRequirementType.OPTIONAL, new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING))); + tmpMap.put(_Fields.TXN_ID, new org.apache.thrift.meta_data.FieldMetaData("txnId", org.apache.thrift.TFieldRequirementType.OPTIONAL, + new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.I64))); + tmpMap.put(_Fields.VALID_WRITE_ID_LIST, new org.apache.thrift.meta_data.FieldMetaData("validWriteIdList", org.apache.thrift.TFieldRequirementType.OPTIONAL, + new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING))); metaDataMap = Collections.unmodifiableMap(tmpMap); org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(PartitionsStatsRequest.class, metaDataMap); } public PartitionsStatsRequest() { + this.txnId = -1L; + } public PartitionsStatsRequest( @@ -167,6 +185,7 @@ public PartitionsStatsRequest( * Performs a deep copy on other. */ public PartitionsStatsRequest(PartitionsStatsRequest other) { + __isset_bitfield = other.__isset_bitfield; if (other.isSetDbName()) { this.dbName = other.dbName; } @@ -184,6 +203,10 @@ public PartitionsStatsRequest(PartitionsStatsRequest other) { if (other.isSetCatName()) { this.catName = other.catName; } + this.txnId = other.txnId; + if (other.isSetValidWriteIdList()) { + this.validWriteIdList = other.validWriteIdList; + } } public PartitionsStatsRequest deepCopy() { @@ -197,6 +220,9 @@ public void clear() { this.colNames = null; this.partNames = null; this.catName = null; + this.txnId = -1L; + + this.validWriteIdList = null; } public String getDbName() { @@ -344,6 +370,51 @@ public void setCatNameIsSet(boolean value) { } } + public long getTxnId() { + return this.txnId; + } + + public void setTxnId(long txnId) { + this.txnId = txnId; + setTxnIdIsSet(true); + } + + public void unsetTxnId() { + __isset_bitfield = EncodingUtils.clearBit(__isset_bitfield, __TXNID_ISSET_ID); + } + + /** Returns true if field txnId is set (has been assigned a value) and false otherwise */ + public boolean isSetTxnId() { + return EncodingUtils.testBit(__isset_bitfield, __TXNID_ISSET_ID); + } + + public void setTxnIdIsSet(boolean value) { + __isset_bitfield = EncodingUtils.setBit(__isset_bitfield, __TXNID_ISSET_ID, value); + } + + public String getValidWriteIdList() { + return this.validWriteIdList; + } + + public void setValidWriteIdList(String validWriteIdList) { + this.validWriteIdList = validWriteIdList; + } + + public void unsetValidWriteIdList() { + this.validWriteIdList = null; + } + + /** Returns true if field validWriteIdList is set (has been assigned a value) and false otherwise */ + public boolean isSetValidWriteIdList() { + return this.validWriteIdList != null; + } + + public void setValidWriteIdListIsSet(boolean value) { + if (!value) { + this.validWriteIdList = null; + } + } + public void setFieldValue(_Fields field, Object value) { switch (field) { case DB_NAME: @@ -386,6 +457,22 @@ public void setFieldValue(_Fields field, Object value) { } break; + case TXN_ID: + if (value == null) { + unsetTxnId(); + } else { + setTxnId((Long)value); + } + break; + + case VALID_WRITE_ID_LIST: + if (value == null) { + unsetValidWriteIdList(); + } else { + setValidWriteIdList((String)value); + } + break; + } } @@ -406,6 +493,12 @@ public Object getFieldValue(_Fields field) { case CAT_NAME: return getCatName(); + case TXN_ID: + return getTxnId(); + + case VALID_WRITE_ID_LIST: + return getValidWriteIdList(); + } throw new IllegalStateException(); } @@ -427,6 +520,10 @@ public boolean isSet(_Fields field) { return isSetPartNames(); case CAT_NAME: return isSetCatName(); + case TXN_ID: + return isSetTxnId(); + case VALID_WRITE_ID_LIST: + return isSetValidWriteIdList(); } throw new IllegalStateException(); } @@ -489,6 +586,24 @@ public boolean equals(PartitionsStatsRequest that) { return false; } + boolean this_present_txnId = true && this.isSetTxnId(); + boolean that_present_txnId = true && that.isSetTxnId(); + if (this_present_txnId || that_present_txnId) { + if (!(this_present_txnId && that_present_txnId)) + return false; + if (this.txnId != that.txnId) + return false; + } + + boolean this_present_validWriteIdList = true && this.isSetValidWriteIdList(); + boolean that_present_validWriteIdList = true && that.isSetValidWriteIdList(); + if (this_present_validWriteIdList || that_present_validWriteIdList) { + if (!(this_present_validWriteIdList && that_present_validWriteIdList)) + return false; + if (!this.validWriteIdList.equals(that.validWriteIdList)) + return false; + } + return true; } @@ -521,6 +636,16 @@ public int hashCode() { if (present_catName) list.add(catName); + boolean present_txnId = true && (isSetTxnId()); + list.add(present_txnId); + if (present_txnId) + list.add(txnId); + + boolean present_validWriteIdList = true && (isSetValidWriteIdList()); + list.add(present_validWriteIdList); + if (present_validWriteIdList) + list.add(validWriteIdList); + return list.hashCode(); } @@ -582,6 +707,26 @@ public int compareTo(PartitionsStatsRequest other) { return lastComparison; } } + lastComparison = Boolean.valueOf(isSetTxnId()).compareTo(other.isSetTxnId()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetTxnId()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.txnId, other.txnId); + if (lastComparison != 0) { + return lastComparison; + } + } + lastComparison = Boolean.valueOf(isSetValidWriteIdList()).compareTo(other.isSetValidWriteIdList()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetValidWriteIdList()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.validWriteIdList, other.validWriteIdList); + if (lastComparison != 0) { + return lastComparison; + } + } return 0; } @@ -643,6 +788,22 @@ public String toString() { } first = false; } + if (isSetTxnId()) { + if (!first) sb.append(", "); + sb.append("txnId:"); + sb.append(this.txnId); + first = false; + } + if (isSetValidWriteIdList()) { + if (!first) sb.append(", "); + sb.append("validWriteIdList:"); + if (this.validWriteIdList == null) { + sb.append("null"); + } else { + sb.append(this.validWriteIdList); + } + first = false; + } sb.append(")"); return sb.toString(); } @@ -678,6 +839,8 @@ private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOExcept private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException { try { + // it doesn't seem like you should have to do this, but java serialization is wacky, and doesn't call the default constructor. + __isset_bitfield = 0; read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in))); } catch (org.apache.thrift.TException te) { throw new java.io.IOException(te); @@ -762,6 +925,22 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, PartitionsStatsRequ org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); } break; + case 6: // TXN_ID + if (schemeField.type == org.apache.thrift.protocol.TType.I64) { + struct.txnId = iprot.readI64(); + struct.setTxnIdIsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + break; + case 7: // VALID_WRITE_ID_LIST + if (schemeField.type == org.apache.thrift.protocol.TType.STRING) { + struct.validWriteIdList = iprot.readString(); + struct.setValidWriteIdListIsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + break; default: org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); } @@ -816,6 +995,18 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, PartitionsStatsReq oprot.writeFieldEnd(); } } + if (struct.isSetTxnId()) { + oprot.writeFieldBegin(TXN_ID_FIELD_DESC); + oprot.writeI64(struct.txnId); + oprot.writeFieldEnd(); + } + if (struct.validWriteIdList != null) { + if (struct.isSetValidWriteIdList()) { + oprot.writeFieldBegin(VALID_WRITE_ID_LIST_FIELD_DESC); + oprot.writeString(struct.validWriteIdList); + oprot.writeFieldEnd(); + } + } oprot.writeFieldStop(); oprot.writeStructEnd(); } @@ -853,10 +1044,22 @@ public void write(org.apache.thrift.protocol.TProtocol prot, PartitionsStatsRequ if (struct.isSetCatName()) { optionals.set(0); } - oprot.writeBitSet(optionals, 1); + if (struct.isSetTxnId()) { + optionals.set(1); + } + if (struct.isSetValidWriteIdList()) { + optionals.set(2); + } + oprot.writeBitSet(optionals, 3); if (struct.isSetCatName()) { oprot.writeString(struct.catName); } + if (struct.isSetTxnId()) { + oprot.writeI64(struct.txnId); + } + if (struct.isSetValidWriteIdList()) { + oprot.writeString(struct.validWriteIdList); + } } @Override @@ -888,11 +1091,19 @@ public void read(org.apache.thrift.protocol.TProtocol prot, PartitionsStatsReque } } struct.setPartNamesIsSet(true); - BitSet incoming = iprot.readBitSet(1); + BitSet incoming = iprot.readBitSet(3); if (incoming.get(0)) { struct.catName = iprot.readString(); struct.setCatNameIsSet(true); } + if (incoming.get(1)) { + struct.txnId = iprot.readI64(); + struct.setTxnIdIsSet(true); + } + if (incoming.get(2)) { + struct.validWriteIdList = iprot.readString(); + struct.setValidWriteIdListIsSet(true); + } } } diff --git standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/PartitionsStatsResult.java standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/PartitionsStatsResult.java index 4caec8fa7e..2414399c3e 100644 --- standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/PartitionsStatsResult.java +++ standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/PartitionsStatsResult.java @@ -39,6 +39,7 @@ private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("PartitionsStatsResult"); private static final org.apache.thrift.protocol.TField PART_STATS_FIELD_DESC = new org.apache.thrift.protocol.TField("partStats", org.apache.thrift.protocol.TType.MAP, (short)1); + private static final org.apache.thrift.protocol.TField IS_STATS_COMPLIANT_FIELD_DESC = new org.apache.thrift.protocol.TField("isStatsCompliant", org.apache.thrift.protocol.TType.I32, (short)2); private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); static { @@ -47,10 +48,16 @@ } private Map> partStats; // required + private IsolationLevelCompliance isStatsCompliant; // optional /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ public enum _Fields implements org.apache.thrift.TFieldIdEnum { - PART_STATS((short)1, "partStats"); + PART_STATS((short)1, "partStats"), + /** + * + * @see IsolationLevelCompliance + */ + IS_STATS_COMPLIANT((short)2, "isStatsCompliant"); private static final Map byName = new HashMap(); @@ -67,6 +74,8 @@ public static _Fields findByThriftId(int fieldId) { switch(fieldId) { case 1: // PART_STATS return PART_STATS; + case 2: // IS_STATS_COMPLIANT + return IS_STATS_COMPLIANT; default: return null; } @@ -107,6 +116,7 @@ public String getFieldName() { } // isset id assignments + private static final _Fields optionals[] = {_Fields.IS_STATS_COMPLIANT}; public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; static { Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); @@ -115,6 +125,8 @@ public String getFieldName() { new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING), new org.apache.thrift.meta_data.ListMetaData(org.apache.thrift.protocol.TType.LIST, new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, ColumnStatisticsObj.class))))); + tmpMap.put(_Fields.IS_STATS_COMPLIANT, new org.apache.thrift.meta_data.FieldMetaData("isStatsCompliant", org.apache.thrift.TFieldRequirementType.OPTIONAL, + new org.apache.thrift.meta_data.EnumMetaData(org.apache.thrift.protocol.TType.ENUM, IsolationLevelCompliance.class))); metaDataMap = Collections.unmodifiableMap(tmpMap); org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(PartitionsStatsResult.class, metaDataMap); } @@ -151,6 +163,9 @@ public PartitionsStatsResult(PartitionsStatsResult other) { } this.partStats = __this__partStats; } + if (other.isSetIsStatsCompliant()) { + this.isStatsCompliant = other.isStatsCompliant; + } } public PartitionsStatsResult deepCopy() { @@ -160,6 +175,7 @@ public PartitionsStatsResult deepCopy() { @Override public void clear() { this.partStats = null; + this.isStatsCompliant = null; } public int getPartStatsSize() { @@ -196,6 +212,37 @@ public void setPartStatsIsSet(boolean value) { } } + /** + * + * @see IsolationLevelCompliance + */ + public IsolationLevelCompliance getIsStatsCompliant() { + return this.isStatsCompliant; + } + + /** + * + * @see IsolationLevelCompliance + */ + public void setIsStatsCompliant(IsolationLevelCompliance isStatsCompliant) { + this.isStatsCompliant = isStatsCompliant; + } + + public void unsetIsStatsCompliant() { + this.isStatsCompliant = null; + } + + /** Returns true if field isStatsCompliant is set (has been assigned a value) and false otherwise */ + public boolean isSetIsStatsCompliant() { + return this.isStatsCompliant != null; + } + + public void setIsStatsCompliantIsSet(boolean value) { + if (!value) { + this.isStatsCompliant = null; + } + } + public void setFieldValue(_Fields field, Object value) { switch (field) { case PART_STATS: @@ -206,6 +253,14 @@ public void setFieldValue(_Fields field, Object value) { } break; + case IS_STATS_COMPLIANT: + if (value == null) { + unsetIsStatsCompliant(); + } else { + setIsStatsCompliant((IsolationLevelCompliance)value); + } + break; + } } @@ -214,6 +269,9 @@ public Object getFieldValue(_Fields field) { case PART_STATS: return getPartStats(); + case IS_STATS_COMPLIANT: + return getIsStatsCompliant(); + } throw new IllegalStateException(); } @@ -227,6 +285,8 @@ public boolean isSet(_Fields field) { switch (field) { case PART_STATS: return isSetPartStats(); + case IS_STATS_COMPLIANT: + return isSetIsStatsCompliant(); } throw new IllegalStateException(); } @@ -253,6 +313,15 @@ public boolean equals(PartitionsStatsResult that) { return false; } + boolean this_present_isStatsCompliant = true && this.isSetIsStatsCompliant(); + boolean that_present_isStatsCompliant = true && that.isSetIsStatsCompliant(); + if (this_present_isStatsCompliant || that_present_isStatsCompliant) { + if (!(this_present_isStatsCompliant && that_present_isStatsCompliant)) + return false; + if (!this.isStatsCompliant.equals(that.isStatsCompliant)) + return false; + } + return true; } @@ -265,6 +334,11 @@ public int hashCode() { if (present_partStats) list.add(partStats); + boolean present_isStatsCompliant = true && (isSetIsStatsCompliant()); + list.add(present_isStatsCompliant); + if (present_isStatsCompliant) + list.add(isStatsCompliant.getValue()); + return list.hashCode(); } @@ -286,6 +360,16 @@ public int compareTo(PartitionsStatsResult other) { return lastComparison; } } + lastComparison = Boolean.valueOf(isSetIsStatsCompliant()).compareTo(other.isSetIsStatsCompliant()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetIsStatsCompliant()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.isStatsCompliant, other.isStatsCompliant); + if (lastComparison != 0) { + return lastComparison; + } + } return 0; } @@ -313,6 +397,16 @@ public String toString() { sb.append(this.partStats); } first = false; + if (isSetIsStatsCompliant()) { + if (!first) sb.append(", "); + sb.append("isStatsCompliant:"); + if (this.isStatsCompliant == null) { + sb.append("null"); + } else { + sb.append(this.isStatsCompliant); + } + first = false; + } sb.append(")"); return sb.toString(); } @@ -391,6 +485,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, PartitionsStatsResu org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); } break; + case 2: // IS_STATS_COMPLIANT + if (schemeField.type == org.apache.thrift.protocol.TType.I32) { + struct.isStatsCompliant = org.apache.hadoop.hive.metastore.api.IsolationLevelCompliance.findByValue(iprot.readI32()); + struct.setIsStatsCompliantIsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + break; default: org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); } @@ -424,6 +526,13 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, PartitionsStatsRes } oprot.writeFieldEnd(); } + if (struct.isStatsCompliant != null) { + if (struct.isSetIsStatsCompliant()) { + oprot.writeFieldBegin(IS_STATS_COMPLIANT_FIELD_DESC); + oprot.writeI32(struct.isStatsCompliant.getValue()); + oprot.writeFieldEnd(); + } + } oprot.writeFieldStop(); oprot.writeStructEnd(); } @@ -455,6 +564,14 @@ public void write(org.apache.thrift.protocol.TProtocol prot, PartitionsStatsResu } } } + BitSet optionals = new BitSet(); + if (struct.isSetIsStatsCompliant()) { + optionals.set(0); + } + oprot.writeBitSet(optionals, 1); + if (struct.isSetIsStatsCompliant()) { + oprot.writeI32(struct.isStatsCompliant.getValue()); + } } @Override @@ -483,6 +600,11 @@ public void read(org.apache.thrift.protocol.TProtocol prot, PartitionsStatsResul } } struct.setPartStatsIsSet(true); + BitSet incoming = iprot.readBitSet(1); + if (incoming.get(0)) { + struct.isStatsCompliant = org.apache.hadoop.hive.metastore.api.IsolationLevelCompliance.findByValue(iprot.readI32()); + struct.setIsStatsCompliantIsSet(true); + } } } diff --git standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/SetPartitionsStatsRequest.java standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/SetPartitionsStatsRequest.java index a0ae84e760..8f460129f5 100644 --- standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/SetPartitionsStatsRequest.java +++ standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/SetPartitionsStatsRequest.java @@ -40,6 +40,8 @@ private static final org.apache.thrift.protocol.TField COL_STATS_FIELD_DESC = new org.apache.thrift.protocol.TField("colStats", org.apache.thrift.protocol.TType.LIST, (short)1); private static final org.apache.thrift.protocol.TField NEED_MERGE_FIELD_DESC = new org.apache.thrift.protocol.TField("needMerge", org.apache.thrift.protocol.TType.BOOL, (short)2); + private static final org.apache.thrift.protocol.TField TXN_ID_FIELD_DESC = new org.apache.thrift.protocol.TField("txnId", org.apache.thrift.protocol.TType.I64, (short)3); + private static final org.apache.thrift.protocol.TField VALID_WRITE_ID_LIST_FIELD_DESC = new org.apache.thrift.protocol.TField("validWriteIdList", org.apache.thrift.protocol.TType.STRING, (short)4); private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); static { @@ -49,11 +51,15 @@ private List colStats; // required private boolean needMerge; // optional + private long txnId; // optional + private String validWriteIdList; // optional /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ public enum _Fields implements org.apache.thrift.TFieldIdEnum { COL_STATS((short)1, "colStats"), - NEED_MERGE((short)2, "needMerge"); + NEED_MERGE((short)2, "needMerge"), + TXN_ID((short)3, "txnId"), + VALID_WRITE_ID_LIST((short)4, "validWriteIdList"); private static final Map byName = new HashMap(); @@ -72,6 +78,10 @@ public static _Fields findByThriftId(int fieldId) { return COL_STATS; case 2: // NEED_MERGE return NEED_MERGE; + case 3: // TXN_ID + return TXN_ID; + case 4: // VALID_WRITE_ID_LIST + return VALID_WRITE_ID_LIST; default: return null; } @@ -113,8 +123,9 @@ public String getFieldName() { // isset id assignments private static final int __NEEDMERGE_ISSET_ID = 0; + private static final int __TXNID_ISSET_ID = 1; private byte __isset_bitfield = 0; - private static final _Fields optionals[] = {_Fields.NEED_MERGE}; + private static final _Fields optionals[] = {_Fields.NEED_MERGE,_Fields.TXN_ID,_Fields.VALID_WRITE_ID_LIST}; public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; static { Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); @@ -123,11 +134,17 @@ public String getFieldName() { new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, ColumnStatistics.class)))); tmpMap.put(_Fields.NEED_MERGE, new org.apache.thrift.meta_data.FieldMetaData("needMerge", org.apache.thrift.TFieldRequirementType.OPTIONAL, new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.BOOL))); + tmpMap.put(_Fields.TXN_ID, new org.apache.thrift.meta_data.FieldMetaData("txnId", org.apache.thrift.TFieldRequirementType.OPTIONAL, + new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.I64))); + tmpMap.put(_Fields.VALID_WRITE_ID_LIST, new org.apache.thrift.meta_data.FieldMetaData("validWriteIdList", org.apache.thrift.TFieldRequirementType.OPTIONAL, + new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING))); metaDataMap = Collections.unmodifiableMap(tmpMap); org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(SetPartitionsStatsRequest.class, metaDataMap); } public SetPartitionsStatsRequest() { + this.txnId = -1L; + } public SetPartitionsStatsRequest( @@ -150,6 +167,10 @@ public SetPartitionsStatsRequest(SetPartitionsStatsRequest other) { this.colStats = __this__colStats; } this.needMerge = other.needMerge; + this.txnId = other.txnId; + if (other.isSetValidWriteIdList()) { + this.validWriteIdList = other.validWriteIdList; + } } public SetPartitionsStatsRequest deepCopy() { @@ -161,6 +182,9 @@ public void clear() { this.colStats = null; setNeedMergeIsSet(false); this.needMerge = false; + this.txnId = -1L; + + this.validWriteIdList = null; } public int getColStatsSize() { @@ -223,6 +247,51 @@ public void setNeedMergeIsSet(boolean value) { __isset_bitfield = EncodingUtils.setBit(__isset_bitfield, __NEEDMERGE_ISSET_ID, value); } + public long getTxnId() { + return this.txnId; + } + + public void setTxnId(long txnId) { + this.txnId = txnId; + setTxnIdIsSet(true); + } + + public void unsetTxnId() { + __isset_bitfield = EncodingUtils.clearBit(__isset_bitfield, __TXNID_ISSET_ID); + } + + /** Returns true if field txnId is set (has been assigned a value) and false otherwise */ + public boolean isSetTxnId() { + return EncodingUtils.testBit(__isset_bitfield, __TXNID_ISSET_ID); + } + + public void setTxnIdIsSet(boolean value) { + __isset_bitfield = EncodingUtils.setBit(__isset_bitfield, __TXNID_ISSET_ID, value); + } + + public String getValidWriteIdList() { + return this.validWriteIdList; + } + + public void setValidWriteIdList(String validWriteIdList) { + this.validWriteIdList = validWriteIdList; + } + + public void unsetValidWriteIdList() { + this.validWriteIdList = null; + } + + /** Returns true if field validWriteIdList is set (has been assigned a value) and false otherwise */ + public boolean isSetValidWriteIdList() { + return this.validWriteIdList != null; + } + + public void setValidWriteIdListIsSet(boolean value) { + if (!value) { + this.validWriteIdList = null; + } + } + public void setFieldValue(_Fields field, Object value) { switch (field) { case COL_STATS: @@ -241,6 +310,22 @@ public void setFieldValue(_Fields field, Object value) { } break; + case TXN_ID: + if (value == null) { + unsetTxnId(); + } else { + setTxnId((Long)value); + } + break; + + case VALID_WRITE_ID_LIST: + if (value == null) { + unsetValidWriteIdList(); + } else { + setValidWriteIdList((String)value); + } + break; + } } @@ -252,6 +337,12 @@ public Object getFieldValue(_Fields field) { case NEED_MERGE: return isNeedMerge(); + case TXN_ID: + return getTxnId(); + + case VALID_WRITE_ID_LIST: + return getValidWriteIdList(); + } throw new IllegalStateException(); } @@ -267,6 +358,10 @@ public boolean isSet(_Fields field) { return isSetColStats(); case NEED_MERGE: return isSetNeedMerge(); + case TXN_ID: + return isSetTxnId(); + case VALID_WRITE_ID_LIST: + return isSetValidWriteIdList(); } throw new IllegalStateException(); } @@ -302,6 +397,24 @@ public boolean equals(SetPartitionsStatsRequest that) { return false; } + boolean this_present_txnId = true && this.isSetTxnId(); + boolean that_present_txnId = true && that.isSetTxnId(); + if (this_present_txnId || that_present_txnId) { + if (!(this_present_txnId && that_present_txnId)) + return false; + if (this.txnId != that.txnId) + return false; + } + + boolean this_present_validWriteIdList = true && this.isSetValidWriteIdList(); + boolean that_present_validWriteIdList = true && that.isSetValidWriteIdList(); + if (this_present_validWriteIdList || that_present_validWriteIdList) { + if (!(this_present_validWriteIdList && that_present_validWriteIdList)) + return false; + if (!this.validWriteIdList.equals(that.validWriteIdList)) + return false; + } + return true; } @@ -319,6 +432,16 @@ public int hashCode() { if (present_needMerge) list.add(needMerge); + boolean present_txnId = true && (isSetTxnId()); + list.add(present_txnId); + if (present_txnId) + list.add(txnId); + + boolean present_validWriteIdList = true && (isSetValidWriteIdList()); + list.add(present_validWriteIdList); + if (present_validWriteIdList) + list.add(validWriteIdList); + return list.hashCode(); } @@ -350,6 +473,26 @@ public int compareTo(SetPartitionsStatsRequest other) { return lastComparison; } } + lastComparison = Boolean.valueOf(isSetTxnId()).compareTo(other.isSetTxnId()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetTxnId()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.txnId, other.txnId); + if (lastComparison != 0) { + return lastComparison; + } + } + lastComparison = Boolean.valueOf(isSetValidWriteIdList()).compareTo(other.isSetValidWriteIdList()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetValidWriteIdList()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.validWriteIdList, other.validWriteIdList); + if (lastComparison != 0) { + return lastComparison; + } + } return 0; } @@ -383,6 +526,22 @@ public String toString() { sb.append(this.needMerge); first = false; } + if (isSetTxnId()) { + if (!first) sb.append(", "); + sb.append("txnId:"); + sb.append(this.txnId); + first = false; + } + if (isSetValidWriteIdList()) { + if (!first) sb.append(", "); + sb.append("validWriteIdList:"); + if (this.validWriteIdList == null) { + sb.append("null"); + } else { + sb.append(this.validWriteIdList); + } + first = false; + } sb.append(")"); return sb.toString(); } @@ -459,6 +618,22 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, SetPartitionsStatsR org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); } break; + case 3: // TXN_ID + if (schemeField.type == org.apache.thrift.protocol.TType.I64) { + struct.txnId = iprot.readI64(); + struct.setTxnIdIsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + break; + case 4: // VALID_WRITE_ID_LIST + if (schemeField.type == org.apache.thrift.protocol.TType.STRING) { + struct.validWriteIdList = iprot.readString(); + struct.setValidWriteIdListIsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + break; default: org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); } @@ -489,6 +664,18 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, SetPartitionsStats oprot.writeBool(struct.needMerge); oprot.writeFieldEnd(); } + if (struct.isSetTxnId()) { + oprot.writeFieldBegin(TXN_ID_FIELD_DESC); + oprot.writeI64(struct.txnId); + oprot.writeFieldEnd(); + } + if (struct.validWriteIdList != null) { + if (struct.isSetValidWriteIdList()) { + oprot.writeFieldBegin(VALID_WRITE_ID_LIST_FIELD_DESC); + oprot.writeString(struct.validWriteIdList); + oprot.writeFieldEnd(); + } + } oprot.writeFieldStop(); oprot.writeStructEnd(); } @@ -517,10 +704,22 @@ public void write(org.apache.thrift.protocol.TProtocol prot, SetPartitionsStatsR if (struct.isSetNeedMerge()) { optionals.set(0); } - oprot.writeBitSet(optionals, 1); + if (struct.isSetTxnId()) { + optionals.set(1); + } + if (struct.isSetValidWriteIdList()) { + optionals.set(2); + } + oprot.writeBitSet(optionals, 3); if (struct.isSetNeedMerge()) { oprot.writeBool(struct.needMerge); } + if (struct.isSetTxnId()) { + oprot.writeI64(struct.txnId); + } + if (struct.isSetValidWriteIdList()) { + oprot.writeString(struct.validWriteIdList); + } } @Override @@ -538,11 +737,19 @@ public void read(org.apache.thrift.protocol.TProtocol prot, SetPartitionsStatsRe } } struct.setColStatsIsSet(true); - BitSet incoming = iprot.readBitSet(1); + BitSet incoming = iprot.readBitSet(3); if (incoming.get(0)) { struct.needMerge = iprot.readBool(); struct.setNeedMergeIsSet(true); } + if (incoming.get(1)) { + struct.txnId = iprot.readI64(); + struct.setTxnIdIsSet(true); + } + if (incoming.get(2)) { + struct.validWriteIdList = iprot.readString(); + struct.setValidWriteIdListIsSet(true); + } } } diff --git standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/Table.java standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/Table.java index 38d4f64f64..d9f17cc53a 100644 --- standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/Table.java +++ standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/Table.java @@ -56,6 +56,9 @@ private static final org.apache.thrift.protocol.TField CREATION_METADATA_FIELD_DESC = new org.apache.thrift.protocol.TField("creationMetadata", org.apache.thrift.protocol.TType.STRUCT, (short)16); private static final org.apache.thrift.protocol.TField CAT_NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("catName", org.apache.thrift.protocol.TType.STRING, (short)17); private static final org.apache.thrift.protocol.TField OWNER_TYPE_FIELD_DESC = new org.apache.thrift.protocol.TField("ownerType", org.apache.thrift.protocol.TType.I32, (short)18); + private static final org.apache.thrift.protocol.TField TXN_ID_FIELD_DESC = new org.apache.thrift.protocol.TField("txnId", org.apache.thrift.protocol.TType.I64, (short)19); + private static final org.apache.thrift.protocol.TField VALID_WRITE_ID_LIST_FIELD_DESC = new org.apache.thrift.protocol.TField("validWriteIdList", org.apache.thrift.protocol.TType.STRING, (short)20); + private static final org.apache.thrift.protocol.TField IS_STATS_COMPLIANT_FIELD_DESC = new org.apache.thrift.protocol.TField("isStatsCompliant", org.apache.thrift.protocol.TType.I32, (short)21); private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); static { @@ -81,6 +84,9 @@ private CreationMetadata creationMetadata; // optional private String catName; // optional private PrincipalType ownerType; // optional + private long txnId; // optional + private String validWriteIdList; // optional + private IsolationLevelCompliance isStatsCompliant; // optional /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ public enum _Fields implements org.apache.thrift.TFieldIdEnum { @@ -105,7 +111,14 @@ * * @see PrincipalType */ - OWNER_TYPE((short)18, "ownerType"); + OWNER_TYPE((short)18, "ownerType"), + TXN_ID((short)19, "txnId"), + VALID_WRITE_ID_LIST((short)20, "validWriteIdList"), + /** + * + * @see IsolationLevelCompliance + */ + IS_STATS_COMPLIANT((short)21, "isStatsCompliant"); private static final Map byName = new HashMap(); @@ -156,6 +169,12 @@ public static _Fields findByThriftId(int fieldId) { return CAT_NAME; case 18: // OWNER_TYPE return OWNER_TYPE; + case 19: // TXN_ID + return TXN_ID; + case 20: // VALID_WRITE_ID_LIST + return VALID_WRITE_ID_LIST; + case 21: // IS_STATS_COMPLIANT + return IS_STATS_COMPLIANT; default: return null; } @@ -201,8 +220,9 @@ public String getFieldName() { private static final int __RETENTION_ISSET_ID = 2; private static final int __TEMPORARY_ISSET_ID = 3; private static final int __REWRITEENABLED_ISSET_ID = 4; + private static final int __TXNID_ISSET_ID = 5; private byte __isset_bitfield = 0; - private static final _Fields optionals[] = {_Fields.PRIVILEGES,_Fields.TEMPORARY,_Fields.REWRITE_ENABLED,_Fields.CREATION_METADATA,_Fields.CAT_NAME,_Fields.OWNER_TYPE}; + private static final _Fields optionals[] = {_Fields.PRIVILEGES,_Fields.TEMPORARY,_Fields.REWRITE_ENABLED,_Fields.CREATION_METADATA,_Fields.CAT_NAME,_Fields.OWNER_TYPE,_Fields.TXN_ID,_Fields.VALID_WRITE_ID_LIST,_Fields.IS_STATS_COMPLIANT}; public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; static { Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); @@ -245,6 +265,12 @@ public String getFieldName() { new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING))); tmpMap.put(_Fields.OWNER_TYPE, new org.apache.thrift.meta_data.FieldMetaData("ownerType", org.apache.thrift.TFieldRequirementType.OPTIONAL, new org.apache.thrift.meta_data.EnumMetaData(org.apache.thrift.protocol.TType.ENUM, PrincipalType.class))); + tmpMap.put(_Fields.TXN_ID, new org.apache.thrift.meta_data.FieldMetaData("txnId", org.apache.thrift.TFieldRequirementType.OPTIONAL, + new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.I64))); + tmpMap.put(_Fields.VALID_WRITE_ID_LIST, new org.apache.thrift.meta_data.FieldMetaData("validWriteIdList", org.apache.thrift.TFieldRequirementType.OPTIONAL, + new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING))); + tmpMap.put(_Fields.IS_STATS_COMPLIANT, new org.apache.thrift.meta_data.FieldMetaData("isStatsCompliant", org.apache.thrift.TFieldRequirementType.OPTIONAL, + new org.apache.thrift.meta_data.EnumMetaData(org.apache.thrift.protocol.TType.ENUM, IsolationLevelCompliance.class))); metaDataMap = Collections.unmodifiableMap(tmpMap); org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(Table.class, metaDataMap); } @@ -254,6 +280,8 @@ public Table() { this.ownerType = org.apache.hadoop.hive.metastore.api.PrincipalType.USER; + this.txnId = -1L; + } public Table( @@ -342,6 +370,13 @@ public Table(Table other) { if (other.isSetOwnerType()) { this.ownerType = other.ownerType; } + this.txnId = other.txnId; + if (other.isSetValidWriteIdList()) { + this.validWriteIdList = other.validWriteIdList; + } + if (other.isSetIsStatsCompliant()) { + this.isStatsCompliant = other.isStatsCompliant; + } } public Table deepCopy() { @@ -374,6 +409,10 @@ public void clear() { this.catName = null; this.ownerType = org.apache.hadoop.hive.metastore.api.PrincipalType.USER; + this.txnId = -1L; + + this.validWriteIdList = null; + this.isStatsCompliant = null; } public String getTableName() { @@ -819,6 +858,82 @@ public void setOwnerTypeIsSet(boolean value) { } } + public long getTxnId() { + return this.txnId; + } + + public void setTxnId(long txnId) { + this.txnId = txnId; + setTxnIdIsSet(true); + } + + public void unsetTxnId() { + __isset_bitfield = EncodingUtils.clearBit(__isset_bitfield, __TXNID_ISSET_ID); + } + + /** Returns true if field txnId is set (has been assigned a value) and false otherwise */ + public boolean isSetTxnId() { + return EncodingUtils.testBit(__isset_bitfield, __TXNID_ISSET_ID); + } + + public void setTxnIdIsSet(boolean value) { + __isset_bitfield = EncodingUtils.setBit(__isset_bitfield, __TXNID_ISSET_ID, value); + } + + public String getValidWriteIdList() { + return this.validWriteIdList; + } + + public void setValidWriteIdList(String validWriteIdList) { + this.validWriteIdList = validWriteIdList; + } + + public void unsetValidWriteIdList() { + this.validWriteIdList = null; + } + + /** Returns true if field validWriteIdList is set (has been assigned a value) and false otherwise */ + public boolean isSetValidWriteIdList() { + return this.validWriteIdList != null; + } + + public void setValidWriteIdListIsSet(boolean value) { + if (!value) { + this.validWriteIdList = null; + } + } + + /** + * + * @see IsolationLevelCompliance + */ + public IsolationLevelCompliance getIsStatsCompliant() { + return this.isStatsCompliant; + } + + /** + * + * @see IsolationLevelCompliance + */ + public void setIsStatsCompliant(IsolationLevelCompliance isStatsCompliant) { + this.isStatsCompliant = isStatsCompliant; + } + + public void unsetIsStatsCompliant() { + this.isStatsCompliant = null; + } + + /** Returns true if field isStatsCompliant is set (has been assigned a value) and false otherwise */ + public boolean isSetIsStatsCompliant() { + return this.isStatsCompliant != null; + } + + public void setIsStatsCompliantIsSet(boolean value) { + if (!value) { + this.isStatsCompliant = null; + } + } + public void setFieldValue(_Fields field, Object value) { switch (field) { case TABLE_NAME: @@ -965,6 +1080,30 @@ public void setFieldValue(_Fields field, Object value) { } break; + case TXN_ID: + if (value == null) { + unsetTxnId(); + } else { + setTxnId((Long)value); + } + break; + + case VALID_WRITE_ID_LIST: + if (value == null) { + unsetValidWriteIdList(); + } else { + setValidWriteIdList((String)value); + } + break; + + case IS_STATS_COMPLIANT: + if (value == null) { + unsetIsStatsCompliant(); + } else { + setIsStatsCompliant((IsolationLevelCompliance)value); + } + break; + } } @@ -1024,6 +1163,15 @@ public Object getFieldValue(_Fields field) { case OWNER_TYPE: return getOwnerType(); + case TXN_ID: + return getTxnId(); + + case VALID_WRITE_ID_LIST: + return getValidWriteIdList(); + + case IS_STATS_COMPLIANT: + return getIsStatsCompliant(); + } throw new IllegalStateException(); } @@ -1071,6 +1219,12 @@ public boolean isSet(_Fields field) { return isSetCatName(); case OWNER_TYPE: return isSetOwnerType(); + case TXN_ID: + return isSetTxnId(); + case VALID_WRITE_ID_LIST: + return isSetValidWriteIdList(); + case IS_STATS_COMPLIANT: + return isSetIsStatsCompliant(); } throw new IllegalStateException(); } @@ -1250,6 +1404,33 @@ public boolean equals(Table that) { return false; } + boolean this_present_txnId = true && this.isSetTxnId(); + boolean that_present_txnId = true && that.isSetTxnId(); + if (this_present_txnId || that_present_txnId) { + if (!(this_present_txnId && that_present_txnId)) + return false; + if (this.txnId != that.txnId) + return false; + } + + boolean this_present_validWriteIdList = true && this.isSetValidWriteIdList(); + boolean that_present_validWriteIdList = true && that.isSetValidWriteIdList(); + if (this_present_validWriteIdList || that_present_validWriteIdList) { + if (!(this_present_validWriteIdList && that_present_validWriteIdList)) + return false; + if (!this.validWriteIdList.equals(that.validWriteIdList)) + return false; + } + + boolean this_present_isStatsCompliant = true && this.isSetIsStatsCompliant(); + boolean that_present_isStatsCompliant = true && that.isSetIsStatsCompliant(); + if (this_present_isStatsCompliant || that_present_isStatsCompliant) { + if (!(this_present_isStatsCompliant && that_present_isStatsCompliant)) + return false; + if (!this.isStatsCompliant.equals(that.isStatsCompliant)) + return false; + } + return true; } @@ -1347,6 +1528,21 @@ public int hashCode() { if (present_ownerType) list.add(ownerType.getValue()); + boolean present_txnId = true && (isSetTxnId()); + list.add(present_txnId); + if (present_txnId) + list.add(txnId); + + boolean present_validWriteIdList = true && (isSetValidWriteIdList()); + list.add(present_validWriteIdList); + if (present_validWriteIdList) + list.add(validWriteIdList); + + boolean present_isStatsCompliant = true && (isSetIsStatsCompliant()); + list.add(present_isStatsCompliant); + if (present_isStatsCompliant) + list.add(isStatsCompliant.getValue()); + return list.hashCode(); } @@ -1538,6 +1734,36 @@ public int compareTo(Table other) { return lastComparison; } } + lastComparison = Boolean.valueOf(isSetTxnId()).compareTo(other.isSetTxnId()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetTxnId()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.txnId, other.txnId); + if (lastComparison != 0) { + return lastComparison; + } + } + lastComparison = Boolean.valueOf(isSetValidWriteIdList()).compareTo(other.isSetValidWriteIdList()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetValidWriteIdList()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.validWriteIdList, other.validWriteIdList); + if (lastComparison != 0) { + return lastComparison; + } + } + lastComparison = Boolean.valueOf(isSetIsStatsCompliant()).compareTo(other.isSetIsStatsCompliant()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetIsStatsCompliant()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.isStatsCompliant, other.isStatsCompliant); + if (lastComparison != 0) { + return lastComparison; + } + } return 0; } @@ -1693,6 +1919,32 @@ public String toString() { } first = false; } + if (isSetTxnId()) { + if (!first) sb.append(", "); + sb.append("txnId:"); + sb.append(this.txnId); + first = false; + } + if (isSetValidWriteIdList()) { + if (!first) sb.append(", "); + sb.append("validWriteIdList:"); + if (this.validWriteIdList == null) { + sb.append("null"); + } else { + sb.append(this.validWriteIdList); + } + first = false; + } + if (isSetIsStatsCompliant()) { + if (!first) sb.append(", "); + sb.append("isStatsCompliant:"); + if (this.isStatsCompliant == null) { + sb.append("null"); + } else { + sb.append(this.isStatsCompliant); + } + first = false; + } sb.append(")"); return sb.toString(); } @@ -1914,6 +2166,30 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, Table struct) throw org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); } break; + case 19: // TXN_ID + if (schemeField.type == org.apache.thrift.protocol.TType.I64) { + struct.txnId = iprot.readI64(); + struct.setTxnIdIsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + break; + case 20: // VALID_WRITE_ID_LIST + if (schemeField.type == org.apache.thrift.protocol.TType.STRING) { + struct.validWriteIdList = iprot.readString(); + struct.setValidWriteIdListIsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + break; + case 21: // IS_STATS_COMPLIANT + if (schemeField.type == org.apache.thrift.protocol.TType.I32) { + struct.isStatsCompliant = org.apache.hadoop.hive.metastore.api.IsolationLevelCompliance.findByValue(iprot.readI32()); + struct.setIsStatsCompliantIsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + break; default: org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); } @@ -2034,6 +2310,25 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, Table struct) thro oprot.writeFieldEnd(); } } + if (struct.isSetTxnId()) { + oprot.writeFieldBegin(TXN_ID_FIELD_DESC); + oprot.writeI64(struct.txnId); + oprot.writeFieldEnd(); + } + if (struct.validWriteIdList != null) { + if (struct.isSetValidWriteIdList()) { + oprot.writeFieldBegin(VALID_WRITE_ID_LIST_FIELD_DESC); + oprot.writeString(struct.validWriteIdList); + oprot.writeFieldEnd(); + } + } + if (struct.isStatsCompliant != null) { + if (struct.isSetIsStatsCompliant()) { + oprot.writeFieldBegin(IS_STATS_COMPLIANT_FIELD_DESC); + oprot.writeI32(struct.isStatsCompliant.getValue()); + oprot.writeFieldEnd(); + } + } oprot.writeFieldStop(); oprot.writeStructEnd(); } @@ -2106,7 +2401,16 @@ public void write(org.apache.thrift.protocol.TProtocol prot, Table struct) throw if (struct.isSetOwnerType()) { optionals.set(17); } - oprot.writeBitSet(optionals, 18); + if (struct.isSetTxnId()) { + optionals.set(18); + } + if (struct.isSetValidWriteIdList()) { + optionals.set(19); + } + if (struct.isSetIsStatsCompliant()) { + optionals.set(20); + } + oprot.writeBitSet(optionals, 21); if (struct.isSetTableName()) { oprot.writeString(struct.tableName); } @@ -2174,12 +2478,21 @@ public void write(org.apache.thrift.protocol.TProtocol prot, Table struct) throw if (struct.isSetOwnerType()) { oprot.writeI32(struct.ownerType.getValue()); } + if (struct.isSetTxnId()) { + oprot.writeI64(struct.txnId); + } + if (struct.isSetValidWriteIdList()) { + oprot.writeString(struct.validWriteIdList); + } + if (struct.isSetIsStatsCompliant()) { + oprot.writeI32(struct.isStatsCompliant.getValue()); + } } @Override public void read(org.apache.thrift.protocol.TProtocol prot, Table struct) throws org.apache.thrift.TException { TTupleProtocol iprot = (TTupleProtocol) prot; - BitSet incoming = iprot.readBitSet(18); + BitSet incoming = iprot.readBitSet(21); if (incoming.get(0)) { struct.tableName = iprot.readString(); struct.setTableNameIsSet(true); @@ -2276,6 +2589,18 @@ public void read(org.apache.thrift.protocol.TProtocol prot, Table struct) throws struct.ownerType = org.apache.hadoop.hive.metastore.api.PrincipalType.findByValue(iprot.readI32()); struct.setOwnerTypeIsSet(true); } + if (incoming.get(18)) { + struct.txnId = iprot.readI64(); + struct.setTxnIdIsSet(true); + } + if (incoming.get(19)) { + struct.validWriteIdList = iprot.readString(); + struct.setValidWriteIdListIsSet(true); + } + if (incoming.get(20)) { + struct.isStatsCompliant = org.apache.hadoop.hive.metastore.api.IsolationLevelCompliance.findByValue(iprot.readI32()); + struct.setIsStatsCompliantIsSet(true); + } } } diff --git standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/TableStatsRequest.java standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/TableStatsRequest.java index a663a64c67..c9b70a4456 100644 --- standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/TableStatsRequest.java +++ standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/TableStatsRequest.java @@ -42,6 +42,8 @@ private static final org.apache.thrift.protocol.TField TBL_NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("tblName", org.apache.thrift.protocol.TType.STRING, (short)2); private static final org.apache.thrift.protocol.TField COL_NAMES_FIELD_DESC = new org.apache.thrift.protocol.TField("colNames", org.apache.thrift.protocol.TType.LIST, (short)3); private static final org.apache.thrift.protocol.TField CAT_NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("catName", org.apache.thrift.protocol.TType.STRING, (short)4); + private static final org.apache.thrift.protocol.TField TXN_ID_FIELD_DESC = new org.apache.thrift.protocol.TField("txnId", org.apache.thrift.protocol.TType.I64, (short)5); + private static final org.apache.thrift.protocol.TField VALID_WRITE_ID_LIST_FIELD_DESC = new org.apache.thrift.protocol.TField("validWriteIdList", org.apache.thrift.protocol.TType.STRING, (short)6); private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); static { @@ -53,13 +55,17 @@ private String tblName; // required private List colNames; // required private String catName; // optional + private long txnId; // optional + private String validWriteIdList; // optional /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ public enum _Fields implements org.apache.thrift.TFieldIdEnum { DB_NAME((short)1, "dbName"), TBL_NAME((short)2, "tblName"), COL_NAMES((short)3, "colNames"), - CAT_NAME((short)4, "catName"); + CAT_NAME((short)4, "catName"), + TXN_ID((short)5, "txnId"), + VALID_WRITE_ID_LIST((short)6, "validWriteIdList"); private static final Map byName = new HashMap(); @@ -82,6 +88,10 @@ public static _Fields findByThriftId(int fieldId) { return COL_NAMES; case 4: // CAT_NAME return CAT_NAME; + case 5: // TXN_ID + return TXN_ID; + case 6: // VALID_WRITE_ID_LIST + return VALID_WRITE_ID_LIST; default: return null; } @@ -122,7 +132,9 @@ public String getFieldName() { } // isset id assignments - private static final _Fields optionals[] = {_Fields.CAT_NAME}; + private static final int __TXNID_ISSET_ID = 0; + private byte __isset_bitfield = 0; + private static final _Fields optionals[] = {_Fields.CAT_NAME,_Fields.TXN_ID,_Fields.VALID_WRITE_ID_LIST}; public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; static { Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); @@ -135,11 +147,17 @@ public String getFieldName() { new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING)))); tmpMap.put(_Fields.CAT_NAME, new org.apache.thrift.meta_data.FieldMetaData("catName", org.apache.thrift.TFieldRequirementType.OPTIONAL, new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING))); + tmpMap.put(_Fields.TXN_ID, new org.apache.thrift.meta_data.FieldMetaData("txnId", org.apache.thrift.TFieldRequirementType.OPTIONAL, + new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.I64))); + tmpMap.put(_Fields.VALID_WRITE_ID_LIST, new org.apache.thrift.meta_data.FieldMetaData("validWriteIdList", org.apache.thrift.TFieldRequirementType.OPTIONAL, + new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING))); metaDataMap = Collections.unmodifiableMap(tmpMap); org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(TableStatsRequest.class, metaDataMap); } public TableStatsRequest() { + this.txnId = -1L; + } public TableStatsRequest( @@ -157,6 +175,7 @@ public TableStatsRequest( * Performs a deep copy on other. */ public TableStatsRequest(TableStatsRequest other) { + __isset_bitfield = other.__isset_bitfield; if (other.isSetDbName()) { this.dbName = other.dbName; } @@ -170,6 +189,10 @@ public TableStatsRequest(TableStatsRequest other) { if (other.isSetCatName()) { this.catName = other.catName; } + this.txnId = other.txnId; + if (other.isSetValidWriteIdList()) { + this.validWriteIdList = other.validWriteIdList; + } } public TableStatsRequest deepCopy() { @@ -182,6 +205,9 @@ public void clear() { this.tblName = null; this.colNames = null; this.catName = null; + this.txnId = -1L; + + this.validWriteIdList = null; } public String getDbName() { @@ -291,6 +317,51 @@ public void setCatNameIsSet(boolean value) { } } + public long getTxnId() { + return this.txnId; + } + + public void setTxnId(long txnId) { + this.txnId = txnId; + setTxnIdIsSet(true); + } + + public void unsetTxnId() { + __isset_bitfield = EncodingUtils.clearBit(__isset_bitfield, __TXNID_ISSET_ID); + } + + /** Returns true if field txnId is set (has been assigned a value) and false otherwise */ + public boolean isSetTxnId() { + return EncodingUtils.testBit(__isset_bitfield, __TXNID_ISSET_ID); + } + + public void setTxnIdIsSet(boolean value) { + __isset_bitfield = EncodingUtils.setBit(__isset_bitfield, __TXNID_ISSET_ID, value); + } + + public String getValidWriteIdList() { + return this.validWriteIdList; + } + + public void setValidWriteIdList(String validWriteIdList) { + this.validWriteIdList = validWriteIdList; + } + + public void unsetValidWriteIdList() { + this.validWriteIdList = null; + } + + /** Returns true if field validWriteIdList is set (has been assigned a value) and false otherwise */ + public boolean isSetValidWriteIdList() { + return this.validWriteIdList != null; + } + + public void setValidWriteIdListIsSet(boolean value) { + if (!value) { + this.validWriteIdList = null; + } + } + public void setFieldValue(_Fields field, Object value) { switch (field) { case DB_NAME: @@ -325,6 +396,22 @@ public void setFieldValue(_Fields field, Object value) { } break; + case TXN_ID: + if (value == null) { + unsetTxnId(); + } else { + setTxnId((Long)value); + } + break; + + case VALID_WRITE_ID_LIST: + if (value == null) { + unsetValidWriteIdList(); + } else { + setValidWriteIdList((String)value); + } + break; + } } @@ -342,6 +429,12 @@ public Object getFieldValue(_Fields field) { case CAT_NAME: return getCatName(); + case TXN_ID: + return getTxnId(); + + case VALID_WRITE_ID_LIST: + return getValidWriteIdList(); + } throw new IllegalStateException(); } @@ -361,6 +454,10 @@ public boolean isSet(_Fields field) { return isSetColNames(); case CAT_NAME: return isSetCatName(); + case TXN_ID: + return isSetTxnId(); + case VALID_WRITE_ID_LIST: + return isSetValidWriteIdList(); } throw new IllegalStateException(); } @@ -414,6 +511,24 @@ public boolean equals(TableStatsRequest that) { return false; } + boolean this_present_txnId = true && this.isSetTxnId(); + boolean that_present_txnId = true && that.isSetTxnId(); + if (this_present_txnId || that_present_txnId) { + if (!(this_present_txnId && that_present_txnId)) + return false; + if (this.txnId != that.txnId) + return false; + } + + boolean this_present_validWriteIdList = true && this.isSetValidWriteIdList(); + boolean that_present_validWriteIdList = true && that.isSetValidWriteIdList(); + if (this_present_validWriteIdList || that_present_validWriteIdList) { + if (!(this_present_validWriteIdList && that_present_validWriteIdList)) + return false; + if (!this.validWriteIdList.equals(that.validWriteIdList)) + return false; + } + return true; } @@ -441,6 +556,16 @@ public int hashCode() { if (present_catName) list.add(catName); + boolean present_txnId = true && (isSetTxnId()); + list.add(present_txnId); + if (present_txnId) + list.add(txnId); + + boolean present_validWriteIdList = true && (isSetValidWriteIdList()); + list.add(present_validWriteIdList); + if (present_validWriteIdList) + list.add(validWriteIdList); + return list.hashCode(); } @@ -492,6 +617,26 @@ public int compareTo(TableStatsRequest other) { return lastComparison; } } + lastComparison = Boolean.valueOf(isSetTxnId()).compareTo(other.isSetTxnId()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetTxnId()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.txnId, other.txnId); + if (lastComparison != 0) { + return lastComparison; + } + } + lastComparison = Boolean.valueOf(isSetValidWriteIdList()).compareTo(other.isSetValidWriteIdList()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetValidWriteIdList()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.validWriteIdList, other.validWriteIdList); + if (lastComparison != 0) { + return lastComparison; + } + } return 0; } @@ -545,6 +690,22 @@ public String toString() { } first = false; } + if (isSetTxnId()) { + if (!first) sb.append(", "); + sb.append("txnId:"); + sb.append(this.txnId); + first = false; + } + if (isSetValidWriteIdList()) { + if (!first) sb.append(", "); + sb.append("validWriteIdList:"); + if (this.validWriteIdList == null) { + sb.append("null"); + } else { + sb.append(this.validWriteIdList); + } + first = false; + } sb.append(")"); return sb.toString(); } @@ -576,6 +737,8 @@ private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOExcept private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException { try { + // it doesn't seem like you should have to do this, but java serialization is wacky, and doesn't call the default constructor. + __isset_bitfield = 0; read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in))); } catch (org.apache.thrift.TException te) { throw new java.io.IOException(te); @@ -642,6 +805,22 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, TableStatsRequest s org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); } break; + case 5: // TXN_ID + if (schemeField.type == org.apache.thrift.protocol.TType.I64) { + struct.txnId = iprot.readI64(); + struct.setTxnIdIsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + break; + case 6: // VALID_WRITE_ID_LIST + if (schemeField.type == org.apache.thrift.protocol.TType.STRING) { + struct.validWriteIdList = iprot.readString(); + struct.setValidWriteIdListIsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + break; default: org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); } @@ -684,6 +863,18 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, TableStatsRequest oprot.writeFieldEnd(); } } + if (struct.isSetTxnId()) { + oprot.writeFieldBegin(TXN_ID_FIELD_DESC); + oprot.writeI64(struct.txnId); + oprot.writeFieldEnd(); + } + if (struct.validWriteIdList != null) { + if (struct.isSetValidWriteIdList()) { + oprot.writeFieldBegin(VALID_WRITE_ID_LIST_FIELD_DESC); + oprot.writeString(struct.validWriteIdList); + oprot.writeFieldEnd(); + } + } oprot.writeFieldStop(); oprot.writeStructEnd(); } @@ -714,10 +905,22 @@ public void write(org.apache.thrift.protocol.TProtocol prot, TableStatsRequest s if (struct.isSetCatName()) { optionals.set(0); } - oprot.writeBitSet(optionals, 1); + if (struct.isSetTxnId()) { + optionals.set(1); + } + if (struct.isSetValidWriteIdList()) { + optionals.set(2); + } + oprot.writeBitSet(optionals, 3); if (struct.isSetCatName()) { oprot.writeString(struct.catName); } + if (struct.isSetTxnId()) { + oprot.writeI64(struct.txnId); + } + if (struct.isSetValidWriteIdList()) { + oprot.writeString(struct.validWriteIdList); + } } @Override @@ -738,11 +941,19 @@ public void read(org.apache.thrift.protocol.TProtocol prot, TableStatsRequest st } } struct.setColNamesIsSet(true); - BitSet incoming = iprot.readBitSet(1); + BitSet incoming = iprot.readBitSet(3); if (incoming.get(0)) { struct.catName = iprot.readString(); struct.setCatNameIsSet(true); } + if (incoming.get(1)) { + struct.txnId = iprot.readI64(); + struct.setTxnIdIsSet(true); + } + if (incoming.get(2)) { + struct.validWriteIdList = iprot.readString(); + struct.setValidWriteIdListIsSet(true); + } } } diff --git standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/TableStatsResult.java standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/TableStatsResult.java index dff7d5c204..4864f68133 100644 --- standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/TableStatsResult.java +++ standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/TableStatsResult.java @@ -39,6 +39,7 @@ private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("TableStatsResult"); private static final org.apache.thrift.protocol.TField TABLE_STATS_FIELD_DESC = new org.apache.thrift.protocol.TField("tableStats", org.apache.thrift.protocol.TType.LIST, (short)1); + private static final org.apache.thrift.protocol.TField IS_STATS_COMPLIANT_FIELD_DESC = new org.apache.thrift.protocol.TField("isStatsCompliant", org.apache.thrift.protocol.TType.I32, (short)2); private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); static { @@ -47,10 +48,16 @@ } private List tableStats; // required + private IsolationLevelCompliance isStatsCompliant; // optional /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ public enum _Fields implements org.apache.thrift.TFieldIdEnum { - TABLE_STATS((short)1, "tableStats"); + TABLE_STATS((short)1, "tableStats"), + /** + * + * @see IsolationLevelCompliance + */ + IS_STATS_COMPLIANT((short)2, "isStatsCompliant"); private static final Map byName = new HashMap(); @@ -67,6 +74,8 @@ public static _Fields findByThriftId(int fieldId) { switch(fieldId) { case 1: // TABLE_STATS return TABLE_STATS; + case 2: // IS_STATS_COMPLIANT + return IS_STATS_COMPLIANT; default: return null; } @@ -107,12 +116,15 @@ public String getFieldName() { } // isset id assignments + private static final _Fields optionals[] = {_Fields.IS_STATS_COMPLIANT}; public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; static { Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); tmpMap.put(_Fields.TABLE_STATS, new org.apache.thrift.meta_data.FieldMetaData("tableStats", org.apache.thrift.TFieldRequirementType.REQUIRED, new org.apache.thrift.meta_data.ListMetaData(org.apache.thrift.protocol.TType.LIST, new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, ColumnStatisticsObj.class)))); + tmpMap.put(_Fields.IS_STATS_COMPLIANT, new org.apache.thrift.meta_data.FieldMetaData("isStatsCompliant", org.apache.thrift.TFieldRequirementType.OPTIONAL, + new org.apache.thrift.meta_data.EnumMetaData(org.apache.thrift.protocol.TType.ENUM, IsolationLevelCompliance.class))); metaDataMap = Collections.unmodifiableMap(tmpMap); org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(TableStatsResult.class, metaDataMap); } @@ -138,6 +150,9 @@ public TableStatsResult(TableStatsResult other) { } this.tableStats = __this__tableStats; } + if (other.isSetIsStatsCompliant()) { + this.isStatsCompliant = other.isStatsCompliant; + } } public TableStatsResult deepCopy() { @@ -147,6 +162,7 @@ public TableStatsResult deepCopy() { @Override public void clear() { this.tableStats = null; + this.isStatsCompliant = null; } public int getTableStatsSize() { @@ -187,6 +203,37 @@ public void setTableStatsIsSet(boolean value) { } } + /** + * + * @see IsolationLevelCompliance + */ + public IsolationLevelCompliance getIsStatsCompliant() { + return this.isStatsCompliant; + } + + /** + * + * @see IsolationLevelCompliance + */ + public void setIsStatsCompliant(IsolationLevelCompliance isStatsCompliant) { + this.isStatsCompliant = isStatsCompliant; + } + + public void unsetIsStatsCompliant() { + this.isStatsCompliant = null; + } + + /** Returns true if field isStatsCompliant is set (has been assigned a value) and false otherwise */ + public boolean isSetIsStatsCompliant() { + return this.isStatsCompliant != null; + } + + public void setIsStatsCompliantIsSet(boolean value) { + if (!value) { + this.isStatsCompliant = null; + } + } + public void setFieldValue(_Fields field, Object value) { switch (field) { case TABLE_STATS: @@ -197,6 +244,14 @@ public void setFieldValue(_Fields field, Object value) { } break; + case IS_STATS_COMPLIANT: + if (value == null) { + unsetIsStatsCompliant(); + } else { + setIsStatsCompliant((IsolationLevelCompliance)value); + } + break; + } } @@ -205,6 +260,9 @@ public Object getFieldValue(_Fields field) { case TABLE_STATS: return getTableStats(); + case IS_STATS_COMPLIANT: + return getIsStatsCompliant(); + } throw new IllegalStateException(); } @@ -218,6 +276,8 @@ public boolean isSet(_Fields field) { switch (field) { case TABLE_STATS: return isSetTableStats(); + case IS_STATS_COMPLIANT: + return isSetIsStatsCompliant(); } throw new IllegalStateException(); } @@ -244,6 +304,15 @@ public boolean equals(TableStatsResult that) { return false; } + boolean this_present_isStatsCompliant = true && this.isSetIsStatsCompliant(); + boolean that_present_isStatsCompliant = true && that.isSetIsStatsCompliant(); + if (this_present_isStatsCompliant || that_present_isStatsCompliant) { + if (!(this_present_isStatsCompliant && that_present_isStatsCompliant)) + return false; + if (!this.isStatsCompliant.equals(that.isStatsCompliant)) + return false; + } + return true; } @@ -256,6 +325,11 @@ public int hashCode() { if (present_tableStats) list.add(tableStats); + boolean present_isStatsCompliant = true && (isSetIsStatsCompliant()); + list.add(present_isStatsCompliant); + if (present_isStatsCompliant) + list.add(isStatsCompliant.getValue()); + return list.hashCode(); } @@ -277,6 +351,16 @@ public int compareTo(TableStatsResult other) { return lastComparison; } } + lastComparison = Boolean.valueOf(isSetIsStatsCompliant()).compareTo(other.isSetIsStatsCompliant()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetIsStatsCompliant()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.isStatsCompliant, other.isStatsCompliant); + if (lastComparison != 0) { + return lastComparison; + } + } return 0; } @@ -304,6 +388,16 @@ public String toString() { sb.append(this.tableStats); } first = false; + if (isSetIsStatsCompliant()) { + if (!first) sb.append(", "); + sb.append("isStatsCompliant:"); + if (this.isStatsCompliant == null) { + sb.append("null"); + } else { + sb.append(this.isStatsCompliant); + } + first = false; + } sb.append(")"); return sb.toString(); } @@ -370,6 +464,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, TableStatsResult st org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); } break; + case 2: // IS_STATS_COMPLIANT + if (schemeField.type == org.apache.thrift.protocol.TType.I32) { + struct.isStatsCompliant = org.apache.hadoop.hive.metastore.api.IsolationLevelCompliance.findByValue(iprot.readI32()); + struct.setIsStatsCompliantIsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + break; default: org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); } @@ -395,6 +497,13 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, TableStatsResult s } oprot.writeFieldEnd(); } + if (struct.isStatsCompliant != null) { + if (struct.isSetIsStatsCompliant()) { + oprot.writeFieldBegin(IS_STATS_COMPLIANT_FIELD_DESC); + oprot.writeI32(struct.isStatsCompliant.getValue()); + oprot.writeFieldEnd(); + } + } oprot.writeFieldStop(); oprot.writeStructEnd(); } @@ -419,6 +528,14 @@ public void write(org.apache.thrift.protocol.TProtocol prot, TableStatsResult st _iter428.write(oprot); } } + BitSet optionals = new BitSet(); + if (struct.isSetIsStatsCompliant()) { + optionals.set(0); + } + oprot.writeBitSet(optionals, 1); + if (struct.isSetIsStatsCompliant()) { + oprot.writeI32(struct.isStatsCompliant.getValue()); + } } @Override @@ -436,6 +553,11 @@ public void read(org.apache.thrift.protocol.TProtocol prot, TableStatsResult str } } struct.setTableStatsIsSet(true); + BitSet incoming = iprot.readBitSet(1); + if (incoming.get(0)) { + struct.isStatsCompliant = org.apache.hadoop.hive.metastore.api.IsolationLevelCompliance.findByValue(iprot.readI32()); + struct.setIsStatsCompliantIsSet(true); + } } } diff --git standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ThriftHiveMetastore.java standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ThriftHiveMetastore.java index 672ebf96d4..c6ce900969 100644 --- standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ThriftHiveMetastore.java +++ standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ThriftHiveMetastore.java @@ -208,7 +208,7 @@ public void alter_partitions(String db_name, String tbl_name, List new_parts) throws InvalidOperationException, MetaException, org.apache.thrift.TException; - public void alter_partitions_with_environment_context(String db_name, String tbl_name, List new_parts, EnvironmentContext environment_context) throws InvalidOperationException, MetaException, org.apache.thrift.TException; + public AlterPartitionsResponse alter_partitions_with_environment_context(AlterPartitionsRequest req) throws InvalidOperationException, MetaException, org.apache.thrift.TException; public void alter_partition_with_environment_context(String db_name, String tbl_name, Partition new_part, EnvironmentContext environment_context) throws InvalidOperationException, MetaException, org.apache.thrift.TException; @@ -626,7 +626,7 @@ public void alter_partitions(String db_name, String tbl_name, List new_parts, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException; - public void alter_partitions_with_environment_context(String db_name, String tbl_name, List new_parts, EnvironmentContext environment_context, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException; + public void alter_partitions_with_environment_context(AlterPartitionsRequest req, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException; public void alter_partition_with_environment_context(String db_name, String tbl_name, Partition new_part, EnvironmentContext environment_context, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException; @@ -3401,33 +3401,33 @@ public void recv_alter_partitions() throws InvalidOperationException, MetaExcept return; } - public void alter_partitions_with_environment_context(String db_name, String tbl_name, List new_parts, EnvironmentContext environment_context) throws InvalidOperationException, MetaException, org.apache.thrift.TException + public AlterPartitionsResponse alter_partitions_with_environment_context(AlterPartitionsRequest req) throws InvalidOperationException, MetaException, org.apache.thrift.TException { - send_alter_partitions_with_environment_context(db_name, tbl_name, new_parts, environment_context); - recv_alter_partitions_with_environment_context(); + send_alter_partitions_with_environment_context(req); + return recv_alter_partitions_with_environment_context(); } - public void send_alter_partitions_with_environment_context(String db_name, String tbl_name, List new_parts, EnvironmentContext environment_context) throws org.apache.thrift.TException + public void send_alter_partitions_with_environment_context(AlterPartitionsRequest req) throws org.apache.thrift.TException { alter_partitions_with_environment_context_args args = new alter_partitions_with_environment_context_args(); - args.setDb_name(db_name); - args.setTbl_name(tbl_name); - args.setNew_parts(new_parts); - args.setEnvironment_context(environment_context); + args.setReq(req); sendBase("alter_partitions_with_environment_context", args); } - public void recv_alter_partitions_with_environment_context() throws InvalidOperationException, MetaException, org.apache.thrift.TException + public AlterPartitionsResponse recv_alter_partitions_with_environment_context() throws InvalidOperationException, MetaException, org.apache.thrift.TException { alter_partitions_with_environment_context_result result = new alter_partitions_with_environment_context_result(); receiveBase(result, "alter_partitions_with_environment_context"); + if (result.isSetSuccess()) { + return result.success; + } if (result.o1 != null) { throw result.o1; } if (result.o2 != null) { throw result.o2; } - return; + throw new org.apache.thrift.TApplicationException(org.apache.thrift.TApplicationException.MISSING_RESULT, "alter_partitions_with_environment_context failed: unknown result"); } public void alter_partition_with_environment_context(String db_name, String tbl_name, Partition new_part, EnvironmentContext environment_context) throws InvalidOperationException, MetaException, org.apache.thrift.TException @@ -9869,44 +9869,35 @@ public void getResult() throws InvalidOperationException, MetaException, org.apa } } - public void alter_partitions_with_environment_context(String db_name, String tbl_name, List new_parts, EnvironmentContext environment_context, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException { + public void alter_partitions_with_environment_context(AlterPartitionsRequest req, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException { checkReady(); - alter_partitions_with_environment_context_call method_call = new alter_partitions_with_environment_context_call(db_name, tbl_name, new_parts, environment_context, resultHandler, this, ___protocolFactory, ___transport); + alter_partitions_with_environment_context_call method_call = new alter_partitions_with_environment_context_call(req, resultHandler, this, ___protocolFactory, ___transport); this.___currentMethod = method_call; ___manager.call(method_call); } @org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public static class alter_partitions_with_environment_context_call extends org.apache.thrift.async.TAsyncMethodCall { - private String db_name; - private String tbl_name; - private List new_parts; - private EnvironmentContext environment_context; - public alter_partitions_with_environment_context_call(String db_name, String tbl_name, List new_parts, EnvironmentContext environment_context, org.apache.thrift.async.AsyncMethodCallback resultHandler, org.apache.thrift.async.TAsyncClient client, org.apache.thrift.protocol.TProtocolFactory protocolFactory, org.apache.thrift.transport.TNonblockingTransport transport) throws org.apache.thrift.TException { + private AlterPartitionsRequest req; + public alter_partitions_with_environment_context_call(AlterPartitionsRequest req, org.apache.thrift.async.AsyncMethodCallback resultHandler, org.apache.thrift.async.TAsyncClient client, org.apache.thrift.protocol.TProtocolFactory protocolFactory, org.apache.thrift.transport.TNonblockingTransport transport) throws org.apache.thrift.TException { super(client, protocolFactory, transport, resultHandler, false); - this.db_name = db_name; - this.tbl_name = tbl_name; - this.new_parts = new_parts; - this.environment_context = environment_context; + this.req = req; } public void write_args(org.apache.thrift.protocol.TProtocol prot) throws org.apache.thrift.TException { prot.writeMessageBegin(new org.apache.thrift.protocol.TMessage("alter_partitions_with_environment_context", org.apache.thrift.protocol.TMessageType.CALL, 0)); alter_partitions_with_environment_context_args args = new alter_partitions_with_environment_context_args(); - args.setDb_name(db_name); - args.setTbl_name(tbl_name); - args.setNew_parts(new_parts); - args.setEnvironment_context(environment_context); + args.setReq(req); args.write(prot); prot.writeMessageEnd(); } - public void getResult() throws InvalidOperationException, MetaException, org.apache.thrift.TException { + public AlterPartitionsResponse getResult() throws InvalidOperationException, MetaException, org.apache.thrift.TException { if (getState() != org.apache.thrift.async.TAsyncMethodCall.State.RESPONSE_READ) { throw new IllegalStateException("Method call not finished!"); } org.apache.thrift.transport.TMemoryInputTransport memoryTransport = new org.apache.thrift.transport.TMemoryInputTransport(getFrameBuffer().array()); org.apache.thrift.protocol.TProtocol prot = client.getProtocolFactory().getProtocol(memoryTransport); - (new Client(prot)).recv_alter_partitions_with_environment_context(); + return (new Client(prot)).recv_alter_partitions_with_environment_context(); } } @@ -16414,7 +16405,7 @@ protected boolean isOneway() { public alter_partitions_with_environment_context_result getResult(I iface, alter_partitions_with_environment_context_args args) throws org.apache.thrift.TException { alter_partitions_with_environment_context_result result = new alter_partitions_with_environment_context_result(); try { - iface.alter_partitions_with_environment_context(args.db_name, args.tbl_name, args.new_parts, args.environment_context); + result.success = iface.alter_partitions_with_environment_context(args.req); } catch (InvalidOperationException o1) { result.o1 = o1; } catch (MetaException o2) { @@ -24951,7 +24942,7 @@ public void start(I iface, alter_partitions_args args, org.apache.thrift.async.A } } - @org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public static class alter_partitions_with_environment_context extends org.apache.thrift.AsyncProcessFunction { + @org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public static class alter_partitions_with_environment_context extends org.apache.thrift.AsyncProcessFunction { public alter_partitions_with_environment_context() { super("alter_partitions_with_environment_context"); } @@ -24960,11 +24951,12 @@ public alter_partitions_with_environment_context_args getEmptyArgsInstance() { return new alter_partitions_with_environment_context_args(); } - public AsyncMethodCallback getResultHandler(final AsyncFrameBuffer fb, final int seqid) { + public AsyncMethodCallback getResultHandler(final AsyncFrameBuffer fb, final int seqid) { final org.apache.thrift.AsyncProcessFunction fcall = this; - return new AsyncMethodCallback() { - public void onComplete(Void o) { + return new AsyncMethodCallback() { + public void onComplete(AlterPartitionsResponse o) { alter_partitions_with_environment_context_result result = new alter_partitions_with_environment_context_result(); + result.success = o; try { fcall.sendResponse(fb,result, org.apache.thrift.protocol.TMessageType.REPLY,seqid); return; @@ -25007,8 +24999,8 @@ protected boolean isOneway() { return false; } - public void start(I iface, alter_partitions_with_environment_context_args args, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws TException { - iface.alter_partitions_with_environment_context(args.db_name, args.tbl_name, args.new_parts, args.environment_context,resultHandler); + public void start(I iface, alter_partitions_with_environment_context_args args, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws TException { + iface.alter_partitions_with_environment_context(args.req,resultHandler); } } @@ -42252,13 +42244,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_databases_resul case 0: // SUCCESS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list928 = iprot.readListBegin(); - struct.success = new ArrayList(_list928.size); - String _elem929; - for (int _i930 = 0; _i930 < _list928.size; ++_i930) + org.apache.thrift.protocol.TList _list936 = iprot.readListBegin(); + struct.success = new ArrayList(_list936.size); + String _elem937; + for (int _i938 = 0; _i938 < _list936.size; ++_i938) { - _elem929 = iprot.readString(); - struct.success.add(_elem929); + _elem937 = iprot.readString(); + struct.success.add(_elem937); } iprot.readListEnd(); } @@ -42293,9 +42285,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_databases_resu oprot.writeFieldBegin(SUCCESS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.success.size())); - for (String _iter931 : struct.success) + for (String _iter939 : struct.success) { - oprot.writeString(_iter931); + oprot.writeString(_iter939); } oprot.writeListEnd(); } @@ -42334,9 +42326,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_databases_resul if (struct.isSetSuccess()) { { oprot.writeI32(struct.success.size()); - for (String _iter932 : struct.success) + for (String _iter940 : struct.success) { - oprot.writeString(_iter932); + oprot.writeString(_iter940); } } } @@ -42351,13 +42343,13 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_databases_result BitSet incoming = iprot.readBitSet(2); if (incoming.get(0)) { { - org.apache.thrift.protocol.TList _list933 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.success = new ArrayList(_list933.size); - String _elem934; - for (int _i935 = 0; _i935 < _list933.size; ++_i935) + org.apache.thrift.protocol.TList _list941 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.success = new ArrayList(_list941.size); + String _elem942; + for (int _i943 = 0; _i943 < _list941.size; ++_i943) { - _elem934 = iprot.readString(); - struct.success.add(_elem934); + _elem942 = iprot.readString(); + struct.success.add(_elem942); } } struct.setSuccessIsSet(true); @@ -43011,13 +43003,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_all_databases_r case 0: // SUCCESS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list936 = iprot.readListBegin(); - struct.success = new ArrayList(_list936.size); - String _elem937; - for (int _i938 = 0; _i938 < _list936.size; ++_i938) + org.apache.thrift.protocol.TList _list944 = iprot.readListBegin(); + struct.success = new ArrayList(_list944.size); + String _elem945; + for (int _i946 = 0; _i946 < _list944.size; ++_i946) { - _elem937 = iprot.readString(); - struct.success.add(_elem937); + _elem945 = iprot.readString(); + struct.success.add(_elem945); } iprot.readListEnd(); } @@ -43052,9 +43044,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_all_databases_ oprot.writeFieldBegin(SUCCESS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.success.size())); - for (String _iter939 : struct.success) + for (String _iter947 : struct.success) { - oprot.writeString(_iter939); + oprot.writeString(_iter947); } oprot.writeListEnd(); } @@ -43093,9 +43085,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_all_databases_r if (struct.isSetSuccess()) { { oprot.writeI32(struct.success.size()); - for (String _iter940 : struct.success) + for (String _iter948 : struct.success) { - oprot.writeString(_iter940); + oprot.writeString(_iter948); } } } @@ -43110,13 +43102,13 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_all_databases_re BitSet incoming = iprot.readBitSet(2); if (incoming.get(0)) { { - org.apache.thrift.protocol.TList _list941 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.success = new ArrayList(_list941.size); - String _elem942; - for (int _i943 = 0; _i943 < _list941.size; ++_i943) + org.apache.thrift.protocol.TList _list949 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.success = new ArrayList(_list949.size); + String _elem950; + for (int _i951 = 0; _i951 < _list949.size; ++_i951) { - _elem942 = iprot.readString(); - struct.success.add(_elem942); + _elem950 = iprot.readString(); + struct.success.add(_elem950); } } struct.setSuccessIsSet(true); @@ -47723,16 +47715,16 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_type_all_result case 0: // SUCCESS if (schemeField.type == org.apache.thrift.protocol.TType.MAP) { { - org.apache.thrift.protocol.TMap _map944 = iprot.readMapBegin(); - struct.success = new HashMap(2*_map944.size); - String _key945; - Type _val946; - for (int _i947 = 0; _i947 < _map944.size; ++_i947) + org.apache.thrift.protocol.TMap _map952 = iprot.readMapBegin(); + struct.success = new HashMap(2*_map952.size); + String _key953; + Type _val954; + for (int _i955 = 0; _i955 < _map952.size; ++_i955) { - _key945 = iprot.readString(); - _val946 = new Type(); - _val946.read(iprot); - struct.success.put(_key945, _val946); + _key953 = iprot.readString(); + _val954 = new Type(); + _val954.read(iprot); + struct.success.put(_key953, _val954); } iprot.readMapEnd(); } @@ -47767,10 +47759,10 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_type_all_resul oprot.writeFieldBegin(SUCCESS_FIELD_DESC); { oprot.writeMapBegin(new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRUCT, struct.success.size())); - for (Map.Entry _iter948 : struct.success.entrySet()) + for (Map.Entry _iter956 : struct.success.entrySet()) { - oprot.writeString(_iter948.getKey()); - _iter948.getValue().write(oprot); + oprot.writeString(_iter956.getKey()); + _iter956.getValue().write(oprot); } oprot.writeMapEnd(); } @@ -47809,10 +47801,10 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_type_all_result if (struct.isSetSuccess()) { { oprot.writeI32(struct.success.size()); - for (Map.Entry _iter949 : struct.success.entrySet()) + for (Map.Entry _iter957 : struct.success.entrySet()) { - oprot.writeString(_iter949.getKey()); - _iter949.getValue().write(oprot); + oprot.writeString(_iter957.getKey()); + _iter957.getValue().write(oprot); } } } @@ -47827,16 +47819,16 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_type_all_result BitSet incoming = iprot.readBitSet(2); if (incoming.get(0)) { { - org.apache.thrift.protocol.TMap _map950 = new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); - struct.success = new HashMap(2*_map950.size); - String _key951; - Type _val952; - for (int _i953 = 0; _i953 < _map950.size; ++_i953) + org.apache.thrift.protocol.TMap _map958 = new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + struct.success = new HashMap(2*_map958.size); + String _key959; + Type _val960; + for (int _i961 = 0; _i961 < _map958.size; ++_i961) { - _key951 = iprot.readString(); - _val952 = new Type(); - _val952.read(iprot); - struct.success.put(_key951, _val952); + _key959 = iprot.readString(); + _val960 = new Type(); + _val960.read(iprot); + struct.success.put(_key959, _val960); } } struct.setSuccessIsSet(true); @@ -48871,14 +48863,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_fields_result s case 0: // SUCCESS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list954 = iprot.readListBegin(); - struct.success = new ArrayList(_list954.size); - FieldSchema _elem955; - for (int _i956 = 0; _i956 < _list954.size; ++_i956) + org.apache.thrift.protocol.TList _list962 = iprot.readListBegin(); + struct.success = new ArrayList(_list962.size); + FieldSchema _elem963; + for (int _i964 = 0; _i964 < _list962.size; ++_i964) { - _elem955 = new FieldSchema(); - _elem955.read(iprot); - struct.success.add(_elem955); + _elem963 = new FieldSchema(); + _elem963.read(iprot); + struct.success.add(_elem963); } iprot.readListEnd(); } @@ -48931,9 +48923,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_fields_result oprot.writeFieldBegin(SUCCESS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.success.size())); - for (FieldSchema _iter957 : struct.success) + for (FieldSchema _iter965 : struct.success) { - _iter957.write(oprot); + _iter965.write(oprot); } oprot.writeListEnd(); } @@ -48988,9 +48980,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_fields_result s if (struct.isSetSuccess()) { { oprot.writeI32(struct.success.size()); - for (FieldSchema _iter958 : struct.success) + for (FieldSchema _iter966 : struct.success) { - _iter958.write(oprot); + _iter966.write(oprot); } } } @@ -49011,14 +49003,14 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_fields_result st BitSet incoming = iprot.readBitSet(4); if (incoming.get(0)) { { - org.apache.thrift.protocol.TList _list959 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); - struct.success = new ArrayList(_list959.size); - FieldSchema _elem960; - for (int _i961 = 0; _i961 < _list959.size; ++_i961) + org.apache.thrift.protocol.TList _list967 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + struct.success = new ArrayList(_list967.size); + FieldSchema _elem968; + for (int _i969 = 0; _i969 < _list967.size; ++_i969) { - _elem960 = new FieldSchema(); - _elem960.read(iprot); - struct.success.add(_elem960); + _elem968 = new FieldSchema(); + _elem968.read(iprot); + struct.success.add(_elem968); } } struct.setSuccessIsSet(true); @@ -50172,14 +50164,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_fields_with_env case 0: // SUCCESS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list962 = iprot.readListBegin(); - struct.success = new ArrayList(_list962.size); - FieldSchema _elem963; - for (int _i964 = 0; _i964 < _list962.size; ++_i964) + org.apache.thrift.protocol.TList _list970 = iprot.readListBegin(); + struct.success = new ArrayList(_list970.size); + FieldSchema _elem971; + for (int _i972 = 0; _i972 < _list970.size; ++_i972) { - _elem963 = new FieldSchema(); - _elem963.read(iprot); - struct.success.add(_elem963); + _elem971 = new FieldSchema(); + _elem971.read(iprot); + struct.success.add(_elem971); } iprot.readListEnd(); } @@ -50232,9 +50224,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_fields_with_en oprot.writeFieldBegin(SUCCESS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.success.size())); - for (FieldSchema _iter965 : struct.success) + for (FieldSchema _iter973 : struct.success) { - _iter965.write(oprot); + _iter973.write(oprot); } oprot.writeListEnd(); } @@ -50289,9 +50281,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_fields_with_env if (struct.isSetSuccess()) { { oprot.writeI32(struct.success.size()); - for (FieldSchema _iter966 : struct.success) + for (FieldSchema _iter974 : struct.success) { - _iter966.write(oprot); + _iter974.write(oprot); } } } @@ -50312,14 +50304,14 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_fields_with_envi BitSet incoming = iprot.readBitSet(4); if (incoming.get(0)) { { - org.apache.thrift.protocol.TList _list967 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); - struct.success = new ArrayList(_list967.size); - FieldSchema _elem968; - for (int _i969 = 0; _i969 < _list967.size; ++_i969) + org.apache.thrift.protocol.TList _list975 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + struct.success = new ArrayList(_list975.size); + FieldSchema _elem976; + for (int _i977 = 0; _i977 < _list975.size; ++_i977) { - _elem968 = new FieldSchema(); - _elem968.read(iprot); - struct.success.add(_elem968); + _elem976 = new FieldSchema(); + _elem976.read(iprot); + struct.success.add(_elem976); } } struct.setSuccessIsSet(true); @@ -51364,14 +51356,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_schema_result s case 0: // SUCCESS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list970 = iprot.readListBegin(); - struct.success = new ArrayList(_list970.size); - FieldSchema _elem971; - for (int _i972 = 0; _i972 < _list970.size; ++_i972) + org.apache.thrift.protocol.TList _list978 = iprot.readListBegin(); + struct.success = new ArrayList(_list978.size); + FieldSchema _elem979; + for (int _i980 = 0; _i980 < _list978.size; ++_i980) { - _elem971 = new FieldSchema(); - _elem971.read(iprot); - struct.success.add(_elem971); + _elem979 = new FieldSchema(); + _elem979.read(iprot); + struct.success.add(_elem979); } iprot.readListEnd(); } @@ -51424,9 +51416,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_schema_result oprot.writeFieldBegin(SUCCESS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.success.size())); - for (FieldSchema _iter973 : struct.success) + for (FieldSchema _iter981 : struct.success) { - _iter973.write(oprot); + _iter981.write(oprot); } oprot.writeListEnd(); } @@ -51481,9 +51473,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_schema_result s if (struct.isSetSuccess()) { { oprot.writeI32(struct.success.size()); - for (FieldSchema _iter974 : struct.success) + for (FieldSchema _iter982 : struct.success) { - _iter974.write(oprot); + _iter982.write(oprot); } } } @@ -51504,14 +51496,14 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_schema_result st BitSet incoming = iprot.readBitSet(4); if (incoming.get(0)) { { - org.apache.thrift.protocol.TList _list975 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); - struct.success = new ArrayList(_list975.size); - FieldSchema _elem976; - for (int _i977 = 0; _i977 < _list975.size; ++_i977) + org.apache.thrift.protocol.TList _list983 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + struct.success = new ArrayList(_list983.size); + FieldSchema _elem984; + for (int _i985 = 0; _i985 < _list983.size; ++_i985) { - _elem976 = new FieldSchema(); - _elem976.read(iprot); - struct.success.add(_elem976); + _elem984 = new FieldSchema(); + _elem984.read(iprot); + struct.success.add(_elem984); } } struct.setSuccessIsSet(true); @@ -52665,14 +52657,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_schema_with_env case 0: // SUCCESS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list978 = iprot.readListBegin(); - struct.success = new ArrayList(_list978.size); - FieldSchema _elem979; - for (int _i980 = 0; _i980 < _list978.size; ++_i980) + org.apache.thrift.protocol.TList _list986 = iprot.readListBegin(); + struct.success = new ArrayList(_list986.size); + FieldSchema _elem987; + for (int _i988 = 0; _i988 < _list986.size; ++_i988) { - _elem979 = new FieldSchema(); - _elem979.read(iprot); - struct.success.add(_elem979); + _elem987 = new FieldSchema(); + _elem987.read(iprot); + struct.success.add(_elem987); } iprot.readListEnd(); } @@ -52725,9 +52717,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_schema_with_en oprot.writeFieldBegin(SUCCESS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.success.size())); - for (FieldSchema _iter981 : struct.success) + for (FieldSchema _iter989 : struct.success) { - _iter981.write(oprot); + _iter989.write(oprot); } oprot.writeListEnd(); } @@ -52782,9 +52774,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_schema_with_env if (struct.isSetSuccess()) { { oprot.writeI32(struct.success.size()); - for (FieldSchema _iter982 : struct.success) + for (FieldSchema _iter990 : struct.success) { - _iter982.write(oprot); + _iter990.write(oprot); } } } @@ -52805,14 +52797,14 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_schema_with_envi BitSet incoming = iprot.readBitSet(4); if (incoming.get(0)) { { - org.apache.thrift.protocol.TList _list983 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); - struct.success = new ArrayList(_list983.size); - FieldSchema _elem984; - for (int _i985 = 0; _i985 < _list983.size; ++_i985) + org.apache.thrift.protocol.TList _list991 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + struct.success = new ArrayList(_list991.size); + FieldSchema _elem992; + for (int _i993 = 0; _i993 < _list991.size; ++_i993) { - _elem984 = new FieldSchema(); - _elem984.read(iprot); - struct.success.add(_elem984); + _elem992 = new FieldSchema(); + _elem992.read(iprot); + struct.success.add(_elem992); } } struct.setSuccessIsSet(true); @@ -55941,14 +55933,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, create_table_with_c case 2: // PRIMARY_KEYS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list986 = iprot.readListBegin(); - struct.primaryKeys = new ArrayList(_list986.size); - SQLPrimaryKey _elem987; - for (int _i988 = 0; _i988 < _list986.size; ++_i988) + org.apache.thrift.protocol.TList _list994 = iprot.readListBegin(); + struct.primaryKeys = new ArrayList(_list994.size); + SQLPrimaryKey _elem995; + for (int _i996 = 0; _i996 < _list994.size; ++_i996) { - _elem987 = new SQLPrimaryKey(); - _elem987.read(iprot); - struct.primaryKeys.add(_elem987); + _elem995 = new SQLPrimaryKey(); + _elem995.read(iprot); + struct.primaryKeys.add(_elem995); } iprot.readListEnd(); } @@ -55960,14 +55952,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, create_table_with_c case 3: // FOREIGN_KEYS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list989 = iprot.readListBegin(); - struct.foreignKeys = new ArrayList(_list989.size); - SQLForeignKey _elem990; - for (int _i991 = 0; _i991 < _list989.size; ++_i991) + org.apache.thrift.protocol.TList _list997 = iprot.readListBegin(); + struct.foreignKeys = new ArrayList(_list997.size); + SQLForeignKey _elem998; + for (int _i999 = 0; _i999 < _list997.size; ++_i999) { - _elem990 = new SQLForeignKey(); - _elem990.read(iprot); - struct.foreignKeys.add(_elem990); + _elem998 = new SQLForeignKey(); + _elem998.read(iprot); + struct.foreignKeys.add(_elem998); } iprot.readListEnd(); } @@ -55979,14 +55971,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, create_table_with_c case 4: // UNIQUE_CONSTRAINTS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list992 = iprot.readListBegin(); - struct.uniqueConstraints = new ArrayList(_list992.size); - SQLUniqueConstraint _elem993; - for (int _i994 = 0; _i994 < _list992.size; ++_i994) + org.apache.thrift.protocol.TList _list1000 = iprot.readListBegin(); + struct.uniqueConstraints = new ArrayList(_list1000.size); + SQLUniqueConstraint _elem1001; + for (int _i1002 = 0; _i1002 < _list1000.size; ++_i1002) { - _elem993 = new SQLUniqueConstraint(); - _elem993.read(iprot); - struct.uniqueConstraints.add(_elem993); + _elem1001 = new SQLUniqueConstraint(); + _elem1001.read(iprot); + struct.uniqueConstraints.add(_elem1001); } iprot.readListEnd(); } @@ -55998,14 +55990,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, create_table_with_c case 5: // NOT_NULL_CONSTRAINTS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list995 = iprot.readListBegin(); - struct.notNullConstraints = new ArrayList(_list995.size); - SQLNotNullConstraint _elem996; - for (int _i997 = 0; _i997 < _list995.size; ++_i997) + org.apache.thrift.protocol.TList _list1003 = iprot.readListBegin(); + struct.notNullConstraints = new ArrayList(_list1003.size); + SQLNotNullConstraint _elem1004; + for (int _i1005 = 0; _i1005 < _list1003.size; ++_i1005) { - _elem996 = new SQLNotNullConstraint(); - _elem996.read(iprot); - struct.notNullConstraints.add(_elem996); + _elem1004 = new SQLNotNullConstraint(); + _elem1004.read(iprot); + struct.notNullConstraints.add(_elem1004); } iprot.readListEnd(); } @@ -56017,14 +56009,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, create_table_with_c case 6: // DEFAULT_CONSTRAINTS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list998 = iprot.readListBegin(); - struct.defaultConstraints = new ArrayList(_list998.size); - SQLDefaultConstraint _elem999; - for (int _i1000 = 0; _i1000 < _list998.size; ++_i1000) + org.apache.thrift.protocol.TList _list1006 = iprot.readListBegin(); + struct.defaultConstraints = new ArrayList(_list1006.size); + SQLDefaultConstraint _elem1007; + for (int _i1008 = 0; _i1008 < _list1006.size; ++_i1008) { - _elem999 = new SQLDefaultConstraint(); - _elem999.read(iprot); - struct.defaultConstraints.add(_elem999); + _elem1007 = new SQLDefaultConstraint(); + _elem1007.read(iprot); + struct.defaultConstraints.add(_elem1007); } iprot.readListEnd(); } @@ -56036,14 +56028,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, create_table_with_c case 7: // CHECK_CONSTRAINTS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1001 = iprot.readListBegin(); - struct.checkConstraints = new ArrayList(_list1001.size); - SQLCheckConstraint _elem1002; - for (int _i1003 = 0; _i1003 < _list1001.size; ++_i1003) + org.apache.thrift.protocol.TList _list1009 = iprot.readListBegin(); + struct.checkConstraints = new ArrayList(_list1009.size); + SQLCheckConstraint _elem1010; + for (int _i1011 = 0; _i1011 < _list1009.size; ++_i1011) { - _elem1002 = new SQLCheckConstraint(); - _elem1002.read(iprot); - struct.checkConstraints.add(_elem1002); + _elem1010 = new SQLCheckConstraint(); + _elem1010.read(iprot); + struct.checkConstraints.add(_elem1010); } iprot.readListEnd(); } @@ -56074,9 +56066,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, create_table_with_ oprot.writeFieldBegin(PRIMARY_KEYS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.primaryKeys.size())); - for (SQLPrimaryKey _iter1004 : struct.primaryKeys) + for (SQLPrimaryKey _iter1012 : struct.primaryKeys) { - _iter1004.write(oprot); + _iter1012.write(oprot); } oprot.writeListEnd(); } @@ -56086,9 +56078,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, create_table_with_ oprot.writeFieldBegin(FOREIGN_KEYS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.foreignKeys.size())); - for (SQLForeignKey _iter1005 : struct.foreignKeys) + for (SQLForeignKey _iter1013 : struct.foreignKeys) { - _iter1005.write(oprot); + _iter1013.write(oprot); } oprot.writeListEnd(); } @@ -56098,9 +56090,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, create_table_with_ oprot.writeFieldBegin(UNIQUE_CONSTRAINTS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.uniqueConstraints.size())); - for (SQLUniqueConstraint _iter1006 : struct.uniqueConstraints) + for (SQLUniqueConstraint _iter1014 : struct.uniqueConstraints) { - _iter1006.write(oprot); + _iter1014.write(oprot); } oprot.writeListEnd(); } @@ -56110,9 +56102,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, create_table_with_ oprot.writeFieldBegin(NOT_NULL_CONSTRAINTS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.notNullConstraints.size())); - for (SQLNotNullConstraint _iter1007 : struct.notNullConstraints) + for (SQLNotNullConstraint _iter1015 : struct.notNullConstraints) { - _iter1007.write(oprot); + _iter1015.write(oprot); } oprot.writeListEnd(); } @@ -56122,9 +56114,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, create_table_with_ oprot.writeFieldBegin(DEFAULT_CONSTRAINTS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.defaultConstraints.size())); - for (SQLDefaultConstraint _iter1008 : struct.defaultConstraints) + for (SQLDefaultConstraint _iter1016 : struct.defaultConstraints) { - _iter1008.write(oprot); + _iter1016.write(oprot); } oprot.writeListEnd(); } @@ -56134,9 +56126,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, create_table_with_ oprot.writeFieldBegin(CHECK_CONSTRAINTS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.checkConstraints.size())); - for (SQLCheckConstraint _iter1009 : struct.checkConstraints) + for (SQLCheckConstraint _iter1017 : struct.checkConstraints) { - _iter1009.write(oprot); + _iter1017.write(oprot); } oprot.writeListEnd(); } @@ -56188,54 +56180,54 @@ public void write(org.apache.thrift.protocol.TProtocol prot, create_table_with_c if (struct.isSetPrimaryKeys()) { { oprot.writeI32(struct.primaryKeys.size()); - for (SQLPrimaryKey _iter1010 : struct.primaryKeys) + for (SQLPrimaryKey _iter1018 : struct.primaryKeys) { - _iter1010.write(oprot); + _iter1018.write(oprot); } } } if (struct.isSetForeignKeys()) { { oprot.writeI32(struct.foreignKeys.size()); - for (SQLForeignKey _iter1011 : struct.foreignKeys) + for (SQLForeignKey _iter1019 : struct.foreignKeys) { - _iter1011.write(oprot); + _iter1019.write(oprot); } } } if (struct.isSetUniqueConstraints()) { { oprot.writeI32(struct.uniqueConstraints.size()); - for (SQLUniqueConstraint _iter1012 : struct.uniqueConstraints) + for (SQLUniqueConstraint _iter1020 : struct.uniqueConstraints) { - _iter1012.write(oprot); + _iter1020.write(oprot); } } } if (struct.isSetNotNullConstraints()) { { oprot.writeI32(struct.notNullConstraints.size()); - for (SQLNotNullConstraint _iter1013 : struct.notNullConstraints) + for (SQLNotNullConstraint _iter1021 : struct.notNullConstraints) { - _iter1013.write(oprot); + _iter1021.write(oprot); } } } if (struct.isSetDefaultConstraints()) { { oprot.writeI32(struct.defaultConstraints.size()); - for (SQLDefaultConstraint _iter1014 : struct.defaultConstraints) + for (SQLDefaultConstraint _iter1022 : struct.defaultConstraints) { - _iter1014.write(oprot); + _iter1022.write(oprot); } } } if (struct.isSetCheckConstraints()) { { oprot.writeI32(struct.checkConstraints.size()); - for (SQLCheckConstraint _iter1015 : struct.checkConstraints) + for (SQLCheckConstraint _iter1023 : struct.checkConstraints) { - _iter1015.write(oprot); + _iter1023.write(oprot); } } } @@ -56252,84 +56244,84 @@ public void read(org.apache.thrift.protocol.TProtocol prot, create_table_with_co } if (incoming.get(1)) { { - org.apache.thrift.protocol.TList _list1016 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); - struct.primaryKeys = new ArrayList(_list1016.size); - SQLPrimaryKey _elem1017; - for (int _i1018 = 0; _i1018 < _list1016.size; ++_i1018) + org.apache.thrift.protocol.TList _list1024 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + struct.primaryKeys = new ArrayList(_list1024.size); + SQLPrimaryKey _elem1025; + for (int _i1026 = 0; _i1026 < _list1024.size; ++_i1026) { - _elem1017 = new SQLPrimaryKey(); - _elem1017.read(iprot); - struct.primaryKeys.add(_elem1017); + _elem1025 = new SQLPrimaryKey(); + _elem1025.read(iprot); + struct.primaryKeys.add(_elem1025); } } struct.setPrimaryKeysIsSet(true); } if (incoming.get(2)) { { - org.apache.thrift.protocol.TList _list1019 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); - struct.foreignKeys = new ArrayList(_list1019.size); - SQLForeignKey _elem1020; - for (int _i1021 = 0; _i1021 < _list1019.size; ++_i1021) + org.apache.thrift.protocol.TList _list1027 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + struct.foreignKeys = new ArrayList(_list1027.size); + SQLForeignKey _elem1028; + for (int _i1029 = 0; _i1029 < _list1027.size; ++_i1029) { - _elem1020 = new SQLForeignKey(); - _elem1020.read(iprot); - struct.foreignKeys.add(_elem1020); + _elem1028 = new SQLForeignKey(); + _elem1028.read(iprot); + struct.foreignKeys.add(_elem1028); } } struct.setForeignKeysIsSet(true); } if (incoming.get(3)) { { - org.apache.thrift.protocol.TList _list1022 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); - struct.uniqueConstraints = new ArrayList(_list1022.size); - SQLUniqueConstraint _elem1023; - for (int _i1024 = 0; _i1024 < _list1022.size; ++_i1024) + org.apache.thrift.protocol.TList _list1030 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + struct.uniqueConstraints = new ArrayList(_list1030.size); + SQLUniqueConstraint _elem1031; + for (int _i1032 = 0; _i1032 < _list1030.size; ++_i1032) { - _elem1023 = new SQLUniqueConstraint(); - _elem1023.read(iprot); - struct.uniqueConstraints.add(_elem1023); + _elem1031 = new SQLUniqueConstraint(); + _elem1031.read(iprot); + struct.uniqueConstraints.add(_elem1031); } } struct.setUniqueConstraintsIsSet(true); } if (incoming.get(4)) { { - org.apache.thrift.protocol.TList _list1025 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); - struct.notNullConstraints = new ArrayList(_list1025.size); - SQLNotNullConstraint _elem1026; - for (int _i1027 = 0; _i1027 < _list1025.size; ++_i1027) + org.apache.thrift.protocol.TList _list1033 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + struct.notNullConstraints = new ArrayList(_list1033.size); + SQLNotNullConstraint _elem1034; + for (int _i1035 = 0; _i1035 < _list1033.size; ++_i1035) { - _elem1026 = new SQLNotNullConstraint(); - _elem1026.read(iprot); - struct.notNullConstraints.add(_elem1026); + _elem1034 = new SQLNotNullConstraint(); + _elem1034.read(iprot); + struct.notNullConstraints.add(_elem1034); } } struct.setNotNullConstraintsIsSet(true); } if (incoming.get(5)) { { - org.apache.thrift.protocol.TList _list1028 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); - struct.defaultConstraints = new ArrayList(_list1028.size); - SQLDefaultConstraint _elem1029; - for (int _i1030 = 0; _i1030 < _list1028.size; ++_i1030) + org.apache.thrift.protocol.TList _list1036 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + struct.defaultConstraints = new ArrayList(_list1036.size); + SQLDefaultConstraint _elem1037; + for (int _i1038 = 0; _i1038 < _list1036.size; ++_i1038) { - _elem1029 = new SQLDefaultConstraint(); - _elem1029.read(iprot); - struct.defaultConstraints.add(_elem1029); + _elem1037 = new SQLDefaultConstraint(); + _elem1037.read(iprot); + struct.defaultConstraints.add(_elem1037); } } struct.setDefaultConstraintsIsSet(true); } if (incoming.get(6)) { { - org.apache.thrift.protocol.TList _list1031 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); - struct.checkConstraints = new ArrayList(_list1031.size); - SQLCheckConstraint _elem1032; - for (int _i1033 = 0; _i1033 < _list1031.size; ++_i1033) + org.apache.thrift.protocol.TList _list1039 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + struct.checkConstraints = new ArrayList(_list1039.size); + SQLCheckConstraint _elem1040; + for (int _i1041 = 0; _i1041 < _list1039.size; ++_i1041) { - _elem1032 = new SQLCheckConstraint(); - _elem1032.read(iprot); - struct.checkConstraints.add(_elem1032); + _elem1040 = new SQLCheckConstraint(); + _elem1040.read(iprot); + struct.checkConstraints.add(_elem1040); } } struct.setCheckConstraintsIsSet(true); @@ -65479,13 +65471,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, truncate_table_args case 3: // PART_NAMES if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1034 = iprot.readListBegin(); - struct.partNames = new ArrayList(_list1034.size); - String _elem1035; - for (int _i1036 = 0; _i1036 < _list1034.size; ++_i1036) + org.apache.thrift.protocol.TList _list1042 = iprot.readListBegin(); + struct.partNames = new ArrayList(_list1042.size); + String _elem1043; + for (int _i1044 = 0; _i1044 < _list1042.size; ++_i1044) { - _elem1035 = iprot.readString(); - struct.partNames.add(_elem1035); + _elem1043 = iprot.readString(); + struct.partNames.add(_elem1043); } iprot.readListEnd(); } @@ -65521,9 +65513,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, truncate_table_arg oprot.writeFieldBegin(PART_NAMES_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.partNames.size())); - for (String _iter1037 : struct.partNames) + for (String _iter1045 : struct.partNames) { - oprot.writeString(_iter1037); + oprot.writeString(_iter1045); } oprot.writeListEnd(); } @@ -65566,9 +65558,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, truncate_table_args if (struct.isSetPartNames()) { { oprot.writeI32(struct.partNames.size()); - for (String _iter1038 : struct.partNames) + for (String _iter1046 : struct.partNames) { - oprot.writeString(_iter1038); + oprot.writeString(_iter1046); } } } @@ -65588,13 +65580,13 @@ public void read(org.apache.thrift.protocol.TProtocol prot, truncate_table_args } if (incoming.get(2)) { { - org.apache.thrift.protocol.TList _list1039 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.partNames = new ArrayList(_list1039.size); - String _elem1040; - for (int _i1041 = 0; _i1041 < _list1039.size; ++_i1041) + org.apache.thrift.protocol.TList _list1047 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.partNames = new ArrayList(_list1047.size); + String _elem1048; + for (int _i1049 = 0; _i1049 < _list1047.size; ++_i1049) { - _elem1040 = iprot.readString(); - struct.partNames.add(_elem1040); + _elem1048 = iprot.readString(); + struct.partNames.add(_elem1048); } } struct.setPartNamesIsSet(true); @@ -66819,13 +66811,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_tables_result s case 0: // SUCCESS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1042 = iprot.readListBegin(); - struct.success = new ArrayList(_list1042.size); - String _elem1043; - for (int _i1044 = 0; _i1044 < _list1042.size; ++_i1044) + org.apache.thrift.protocol.TList _list1050 = iprot.readListBegin(); + struct.success = new ArrayList(_list1050.size); + String _elem1051; + for (int _i1052 = 0; _i1052 < _list1050.size; ++_i1052) { - _elem1043 = iprot.readString(); - struct.success.add(_elem1043); + _elem1051 = iprot.readString(); + struct.success.add(_elem1051); } iprot.readListEnd(); } @@ -66860,9 +66852,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_tables_result oprot.writeFieldBegin(SUCCESS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.success.size())); - for (String _iter1045 : struct.success) + for (String _iter1053 : struct.success) { - oprot.writeString(_iter1045); + oprot.writeString(_iter1053); } oprot.writeListEnd(); } @@ -66901,9 +66893,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_tables_result s if (struct.isSetSuccess()) { { oprot.writeI32(struct.success.size()); - for (String _iter1046 : struct.success) + for (String _iter1054 : struct.success) { - oprot.writeString(_iter1046); + oprot.writeString(_iter1054); } } } @@ -66918,13 +66910,13 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_tables_result st BitSet incoming = iprot.readBitSet(2); if (incoming.get(0)) { { - org.apache.thrift.protocol.TList _list1047 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.success = new ArrayList(_list1047.size); - String _elem1048; - for (int _i1049 = 0; _i1049 < _list1047.size; ++_i1049) + org.apache.thrift.protocol.TList _list1055 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.success = new ArrayList(_list1055.size); + String _elem1056; + for (int _i1057 = 0; _i1057 < _list1055.size; ++_i1057) { - _elem1048 = iprot.readString(); - struct.success.add(_elem1048); + _elem1056 = iprot.readString(); + struct.success.add(_elem1056); } } struct.setSuccessIsSet(true); @@ -67898,13 +67890,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_tables_by_type_ case 0: // SUCCESS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1050 = iprot.readListBegin(); - struct.success = new ArrayList(_list1050.size); - String _elem1051; - for (int _i1052 = 0; _i1052 < _list1050.size; ++_i1052) + org.apache.thrift.protocol.TList _list1058 = iprot.readListBegin(); + struct.success = new ArrayList(_list1058.size); + String _elem1059; + for (int _i1060 = 0; _i1060 < _list1058.size; ++_i1060) { - _elem1051 = iprot.readString(); - struct.success.add(_elem1051); + _elem1059 = iprot.readString(); + struct.success.add(_elem1059); } iprot.readListEnd(); } @@ -67939,9 +67931,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_tables_by_type oprot.writeFieldBegin(SUCCESS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.success.size())); - for (String _iter1053 : struct.success) + for (String _iter1061 : struct.success) { - oprot.writeString(_iter1053); + oprot.writeString(_iter1061); } oprot.writeListEnd(); } @@ -67980,9 +67972,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_tables_by_type_ if (struct.isSetSuccess()) { { oprot.writeI32(struct.success.size()); - for (String _iter1054 : struct.success) + for (String _iter1062 : struct.success) { - oprot.writeString(_iter1054); + oprot.writeString(_iter1062); } } } @@ -67997,13 +67989,13 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_tables_by_type_r BitSet incoming = iprot.readBitSet(2); if (incoming.get(0)) { { - org.apache.thrift.protocol.TList _list1055 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.success = new ArrayList(_list1055.size); - String _elem1056; - for (int _i1057 = 0; _i1057 < _list1055.size; ++_i1057) + org.apache.thrift.protocol.TList _list1063 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.success = new ArrayList(_list1063.size); + String _elem1064; + for (int _i1065 = 0; _i1065 < _list1063.size; ++_i1065) { - _elem1056 = iprot.readString(); - struct.success.add(_elem1056); + _elem1064 = iprot.readString(); + struct.success.add(_elem1064); } } struct.setSuccessIsSet(true); @@ -68769,13 +68761,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_materialized_vi case 0: // SUCCESS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1058 = iprot.readListBegin(); - struct.success = new ArrayList(_list1058.size); - String _elem1059; - for (int _i1060 = 0; _i1060 < _list1058.size; ++_i1060) + org.apache.thrift.protocol.TList _list1066 = iprot.readListBegin(); + struct.success = new ArrayList(_list1066.size); + String _elem1067; + for (int _i1068 = 0; _i1068 < _list1066.size; ++_i1068) { - _elem1059 = iprot.readString(); - struct.success.add(_elem1059); + _elem1067 = iprot.readString(); + struct.success.add(_elem1067); } iprot.readListEnd(); } @@ -68810,9 +68802,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_materialized_v oprot.writeFieldBegin(SUCCESS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.success.size())); - for (String _iter1061 : struct.success) + for (String _iter1069 : struct.success) { - oprot.writeString(_iter1061); + oprot.writeString(_iter1069); } oprot.writeListEnd(); } @@ -68851,9 +68843,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_materialized_vi if (struct.isSetSuccess()) { { oprot.writeI32(struct.success.size()); - for (String _iter1062 : struct.success) + for (String _iter1070 : struct.success) { - oprot.writeString(_iter1062); + oprot.writeString(_iter1070); } } } @@ -68868,13 +68860,13 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_materialized_vie BitSet incoming = iprot.readBitSet(2); if (incoming.get(0)) { { - org.apache.thrift.protocol.TList _list1063 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.success = new ArrayList(_list1063.size); - String _elem1064; - for (int _i1065 = 0; _i1065 < _list1063.size; ++_i1065) + org.apache.thrift.protocol.TList _list1071 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.success = new ArrayList(_list1071.size); + String _elem1072; + for (int _i1073 = 0; _i1073 < _list1071.size; ++_i1073) { - _elem1064 = iprot.readString(); - struct.success.add(_elem1064); + _elem1072 = iprot.readString(); + struct.success.add(_elem1072); } } struct.setSuccessIsSet(true); @@ -69379,13 +69371,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_table_meta_args case 3: // TBL_TYPES if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1066 = iprot.readListBegin(); - struct.tbl_types = new ArrayList(_list1066.size); - String _elem1067; - for (int _i1068 = 0; _i1068 < _list1066.size; ++_i1068) + org.apache.thrift.protocol.TList _list1074 = iprot.readListBegin(); + struct.tbl_types = new ArrayList(_list1074.size); + String _elem1075; + for (int _i1076 = 0; _i1076 < _list1074.size; ++_i1076) { - _elem1067 = iprot.readString(); - struct.tbl_types.add(_elem1067); + _elem1075 = iprot.readString(); + struct.tbl_types.add(_elem1075); } iprot.readListEnd(); } @@ -69421,9 +69413,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_table_meta_arg oprot.writeFieldBegin(TBL_TYPES_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.tbl_types.size())); - for (String _iter1069 : struct.tbl_types) + for (String _iter1077 : struct.tbl_types) { - oprot.writeString(_iter1069); + oprot.writeString(_iter1077); } oprot.writeListEnd(); } @@ -69466,9 +69458,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_table_meta_args if (struct.isSetTbl_types()) { { oprot.writeI32(struct.tbl_types.size()); - for (String _iter1070 : struct.tbl_types) + for (String _iter1078 : struct.tbl_types) { - oprot.writeString(_iter1070); + oprot.writeString(_iter1078); } } } @@ -69488,13 +69480,13 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_table_meta_args } if (incoming.get(2)) { { - org.apache.thrift.protocol.TList _list1071 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.tbl_types = new ArrayList(_list1071.size); - String _elem1072; - for (int _i1073 = 0; _i1073 < _list1071.size; ++_i1073) + org.apache.thrift.protocol.TList _list1079 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.tbl_types = new ArrayList(_list1079.size); + String _elem1080; + for (int _i1081 = 0; _i1081 < _list1079.size; ++_i1081) { - _elem1072 = iprot.readString(); - struct.tbl_types.add(_elem1072); + _elem1080 = iprot.readString(); + struct.tbl_types.add(_elem1080); } } struct.setTbl_typesIsSet(true); @@ -69900,14 +69892,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_table_meta_resu case 0: // SUCCESS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1074 = iprot.readListBegin(); - struct.success = new ArrayList(_list1074.size); - TableMeta _elem1075; - for (int _i1076 = 0; _i1076 < _list1074.size; ++_i1076) + org.apache.thrift.protocol.TList _list1082 = iprot.readListBegin(); + struct.success = new ArrayList(_list1082.size); + TableMeta _elem1083; + for (int _i1084 = 0; _i1084 < _list1082.size; ++_i1084) { - _elem1075 = new TableMeta(); - _elem1075.read(iprot); - struct.success.add(_elem1075); + _elem1083 = new TableMeta(); + _elem1083.read(iprot); + struct.success.add(_elem1083); } iprot.readListEnd(); } @@ -69942,9 +69934,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_table_meta_res oprot.writeFieldBegin(SUCCESS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.success.size())); - for (TableMeta _iter1077 : struct.success) + for (TableMeta _iter1085 : struct.success) { - _iter1077.write(oprot); + _iter1085.write(oprot); } oprot.writeListEnd(); } @@ -69983,9 +69975,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_table_meta_resu if (struct.isSetSuccess()) { { oprot.writeI32(struct.success.size()); - for (TableMeta _iter1078 : struct.success) + for (TableMeta _iter1086 : struct.success) { - _iter1078.write(oprot); + _iter1086.write(oprot); } } } @@ -70000,14 +69992,14 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_table_meta_resul BitSet incoming = iprot.readBitSet(2); if (incoming.get(0)) { { - org.apache.thrift.protocol.TList _list1079 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); - struct.success = new ArrayList(_list1079.size); - TableMeta _elem1080; - for (int _i1081 = 0; _i1081 < _list1079.size; ++_i1081) + org.apache.thrift.protocol.TList _list1087 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + struct.success = new ArrayList(_list1087.size); + TableMeta _elem1088; + for (int _i1089 = 0; _i1089 < _list1087.size; ++_i1089) { - _elem1080 = new TableMeta(); - _elem1080.read(iprot); - struct.success.add(_elem1080); + _elem1088 = new TableMeta(); + _elem1088.read(iprot); + struct.success.add(_elem1088); } } struct.setSuccessIsSet(true); @@ -70773,13 +70765,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_all_tables_resu case 0: // SUCCESS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1082 = iprot.readListBegin(); - struct.success = new ArrayList(_list1082.size); - String _elem1083; - for (int _i1084 = 0; _i1084 < _list1082.size; ++_i1084) + org.apache.thrift.protocol.TList _list1090 = iprot.readListBegin(); + struct.success = new ArrayList(_list1090.size); + String _elem1091; + for (int _i1092 = 0; _i1092 < _list1090.size; ++_i1092) { - _elem1083 = iprot.readString(); - struct.success.add(_elem1083); + _elem1091 = iprot.readString(); + struct.success.add(_elem1091); } iprot.readListEnd(); } @@ -70814,9 +70806,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_all_tables_res oprot.writeFieldBegin(SUCCESS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.success.size())); - for (String _iter1085 : struct.success) + for (String _iter1093 : struct.success) { - oprot.writeString(_iter1085); + oprot.writeString(_iter1093); } oprot.writeListEnd(); } @@ -70855,9 +70847,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_all_tables_resu if (struct.isSetSuccess()) { { oprot.writeI32(struct.success.size()); - for (String _iter1086 : struct.success) + for (String _iter1094 : struct.success) { - oprot.writeString(_iter1086); + oprot.writeString(_iter1094); } } } @@ -70872,13 +70864,13 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_all_tables_resul BitSet incoming = iprot.readBitSet(2); if (incoming.get(0)) { { - org.apache.thrift.protocol.TList _list1087 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.success = new ArrayList(_list1087.size); - String _elem1088; - for (int _i1089 = 0; _i1089 < _list1087.size; ++_i1089) + org.apache.thrift.protocol.TList _list1095 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.success = new ArrayList(_list1095.size); + String _elem1096; + for (int _i1097 = 0; _i1097 < _list1095.size; ++_i1097) { - _elem1088 = iprot.readString(); - struct.success.add(_elem1088); + _elem1096 = iprot.readString(); + struct.success.add(_elem1096); } } struct.setSuccessIsSet(true); @@ -72331,13 +72323,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_table_objects_b case 2: // TBL_NAMES if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1090 = iprot.readListBegin(); - struct.tbl_names = new ArrayList(_list1090.size); - String _elem1091; - for (int _i1092 = 0; _i1092 < _list1090.size; ++_i1092) + org.apache.thrift.protocol.TList _list1098 = iprot.readListBegin(); + struct.tbl_names = new ArrayList(_list1098.size); + String _elem1099; + for (int _i1100 = 0; _i1100 < _list1098.size; ++_i1100) { - _elem1091 = iprot.readString(); - struct.tbl_names.add(_elem1091); + _elem1099 = iprot.readString(); + struct.tbl_names.add(_elem1099); } iprot.readListEnd(); } @@ -72368,9 +72360,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_table_objects_ oprot.writeFieldBegin(TBL_NAMES_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.tbl_names.size())); - for (String _iter1093 : struct.tbl_names) + for (String _iter1101 : struct.tbl_names) { - oprot.writeString(_iter1093); + oprot.writeString(_iter1101); } oprot.writeListEnd(); } @@ -72407,9 +72399,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_table_objects_b if (struct.isSetTbl_names()) { { oprot.writeI32(struct.tbl_names.size()); - for (String _iter1094 : struct.tbl_names) + for (String _iter1102 : struct.tbl_names) { - oprot.writeString(_iter1094); + oprot.writeString(_iter1102); } } } @@ -72425,13 +72417,13 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_table_objects_by } if (incoming.get(1)) { { - org.apache.thrift.protocol.TList _list1095 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.tbl_names = new ArrayList(_list1095.size); - String _elem1096; - for (int _i1097 = 0; _i1097 < _list1095.size; ++_i1097) + org.apache.thrift.protocol.TList _list1103 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.tbl_names = new ArrayList(_list1103.size); + String _elem1104; + for (int _i1105 = 0; _i1105 < _list1103.size; ++_i1105) { - _elem1096 = iprot.readString(); - struct.tbl_names.add(_elem1096); + _elem1104 = iprot.readString(); + struct.tbl_names.add(_elem1104); } } struct.setTbl_namesIsSet(true); @@ -72756,14 +72748,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_table_objects_b case 0: // SUCCESS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1098 = iprot.readListBegin(); - struct.success = new ArrayList
(_list1098.size); - Table _elem1099; - for (int _i1100 = 0; _i1100 < _list1098.size; ++_i1100) + org.apache.thrift.protocol.TList _list1106 = iprot.readListBegin(); + struct.success = new ArrayList
(_list1106.size); + Table _elem1107; + for (int _i1108 = 0; _i1108 < _list1106.size; ++_i1108) { - _elem1099 = new Table(); - _elem1099.read(iprot); - struct.success.add(_elem1099); + _elem1107 = new Table(); + _elem1107.read(iprot); + struct.success.add(_elem1107); } iprot.readListEnd(); } @@ -72789,9 +72781,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_table_objects_ oprot.writeFieldBegin(SUCCESS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.success.size())); - for (Table _iter1101 : struct.success) + for (Table _iter1109 : struct.success) { - _iter1101.write(oprot); + _iter1109.write(oprot); } oprot.writeListEnd(); } @@ -72822,9 +72814,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_table_objects_b if (struct.isSetSuccess()) { { oprot.writeI32(struct.success.size()); - for (Table _iter1102 : struct.success) + for (Table _iter1110 : struct.success) { - _iter1102.write(oprot); + _iter1110.write(oprot); } } } @@ -72836,14 +72828,14 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_table_objects_by BitSet incoming = iprot.readBitSet(1); if (incoming.get(0)) { { - org.apache.thrift.protocol.TList _list1103 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); - struct.success = new ArrayList
(_list1103.size); - Table _elem1104; - for (int _i1105 = 0; _i1105 < _list1103.size; ++_i1105) + org.apache.thrift.protocol.TList _list1111 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + struct.success = new ArrayList
(_list1111.size); + Table _elem1112; + for (int _i1113 = 0; _i1113 < _list1111.size; ++_i1113) { - _elem1104 = new Table(); - _elem1104.read(iprot); - struct.success.add(_elem1104); + _elem1112 = new Table(); + _elem1112.read(iprot); + struct.success.add(_elem1112); } } struct.setSuccessIsSet(true); @@ -75236,13 +75228,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_materialization case 2: // TBL_NAMES if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1106 = iprot.readListBegin(); - struct.tbl_names = new ArrayList(_list1106.size); - String _elem1107; - for (int _i1108 = 0; _i1108 < _list1106.size; ++_i1108) + org.apache.thrift.protocol.TList _list1114 = iprot.readListBegin(); + struct.tbl_names = new ArrayList(_list1114.size); + String _elem1115; + for (int _i1116 = 0; _i1116 < _list1114.size; ++_i1116) { - _elem1107 = iprot.readString(); - struct.tbl_names.add(_elem1107); + _elem1115 = iprot.readString(); + struct.tbl_names.add(_elem1115); } iprot.readListEnd(); } @@ -75273,9 +75265,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_materializatio oprot.writeFieldBegin(TBL_NAMES_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.tbl_names.size())); - for (String _iter1109 : struct.tbl_names) + for (String _iter1117 : struct.tbl_names) { - oprot.writeString(_iter1109); + oprot.writeString(_iter1117); } oprot.writeListEnd(); } @@ -75312,9 +75304,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_materialization if (struct.isSetTbl_names()) { { oprot.writeI32(struct.tbl_names.size()); - for (String _iter1110 : struct.tbl_names) + for (String _iter1118 : struct.tbl_names) { - oprot.writeString(_iter1110); + oprot.writeString(_iter1118); } } } @@ -75330,13 +75322,13 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_materialization_ } if (incoming.get(1)) { { - org.apache.thrift.protocol.TList _list1111 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.tbl_names = new ArrayList(_list1111.size); - String _elem1112; - for (int _i1113 = 0; _i1113 < _list1111.size; ++_i1113) + org.apache.thrift.protocol.TList _list1119 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.tbl_names = new ArrayList(_list1119.size); + String _elem1120; + for (int _i1121 = 0; _i1121 < _list1119.size; ++_i1121) { - _elem1112 = iprot.readString(); - struct.tbl_names.add(_elem1112); + _elem1120 = iprot.readString(); + struct.tbl_names.add(_elem1120); } } struct.setTbl_namesIsSet(true); @@ -75909,16 +75901,16 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_materialization case 0: // SUCCESS if (schemeField.type == org.apache.thrift.protocol.TType.MAP) { { - org.apache.thrift.protocol.TMap _map1114 = iprot.readMapBegin(); - struct.success = new HashMap(2*_map1114.size); - String _key1115; - Materialization _val1116; - for (int _i1117 = 0; _i1117 < _map1114.size; ++_i1117) + org.apache.thrift.protocol.TMap _map1122 = iprot.readMapBegin(); + struct.success = new HashMap(2*_map1122.size); + String _key1123; + Materialization _val1124; + for (int _i1125 = 0; _i1125 < _map1122.size; ++_i1125) { - _key1115 = iprot.readString(); - _val1116 = new Materialization(); - _val1116.read(iprot); - struct.success.put(_key1115, _val1116); + _key1123 = iprot.readString(); + _val1124 = new Materialization(); + _val1124.read(iprot); + struct.success.put(_key1123, _val1124); } iprot.readMapEnd(); } @@ -75971,10 +75963,10 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_materializatio oprot.writeFieldBegin(SUCCESS_FIELD_DESC); { oprot.writeMapBegin(new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRUCT, struct.success.size())); - for (Map.Entry _iter1118 : struct.success.entrySet()) + for (Map.Entry _iter1126 : struct.success.entrySet()) { - oprot.writeString(_iter1118.getKey()); - _iter1118.getValue().write(oprot); + oprot.writeString(_iter1126.getKey()); + _iter1126.getValue().write(oprot); } oprot.writeMapEnd(); } @@ -76029,10 +76021,10 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_materialization if (struct.isSetSuccess()) { { oprot.writeI32(struct.success.size()); - for (Map.Entry _iter1119 : struct.success.entrySet()) + for (Map.Entry _iter1127 : struct.success.entrySet()) { - oprot.writeString(_iter1119.getKey()); - _iter1119.getValue().write(oprot); + oprot.writeString(_iter1127.getKey()); + _iter1127.getValue().write(oprot); } } } @@ -76053,16 +76045,16 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_materialization_ BitSet incoming = iprot.readBitSet(4); if (incoming.get(0)) { { - org.apache.thrift.protocol.TMap _map1120 = new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); - struct.success = new HashMap(2*_map1120.size); - String _key1121; - Materialization _val1122; - for (int _i1123 = 0; _i1123 < _map1120.size; ++_i1123) + org.apache.thrift.protocol.TMap _map1128 = new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + struct.success = new HashMap(2*_map1128.size); + String _key1129; + Materialization _val1130; + for (int _i1131 = 0; _i1131 < _map1128.size; ++_i1131) { - _key1121 = iprot.readString(); - _val1122 = new Materialization(); - _val1122.read(iprot); - struct.success.put(_key1121, _val1122); + _key1129 = iprot.readString(); + _val1130 = new Materialization(); + _val1130.read(iprot); + struct.success.put(_key1129, _val1130); } } struct.setSuccessIsSet(true); @@ -78455,13 +78447,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_table_names_by_ case 0: // SUCCESS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1124 = iprot.readListBegin(); - struct.success = new ArrayList(_list1124.size); - String _elem1125; - for (int _i1126 = 0; _i1126 < _list1124.size; ++_i1126) + org.apache.thrift.protocol.TList _list1132 = iprot.readListBegin(); + struct.success = new ArrayList(_list1132.size); + String _elem1133; + for (int _i1134 = 0; _i1134 < _list1132.size; ++_i1134) { - _elem1125 = iprot.readString(); - struct.success.add(_elem1125); + _elem1133 = iprot.readString(); + struct.success.add(_elem1133); } iprot.readListEnd(); } @@ -78514,9 +78506,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_table_names_by oprot.writeFieldBegin(SUCCESS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.success.size())); - for (String _iter1127 : struct.success) + for (String _iter1135 : struct.success) { - oprot.writeString(_iter1127); + oprot.writeString(_iter1135); } oprot.writeListEnd(); } @@ -78571,9 +78563,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_table_names_by_ if (struct.isSetSuccess()) { { oprot.writeI32(struct.success.size()); - for (String _iter1128 : struct.success) + for (String _iter1136 : struct.success) { - oprot.writeString(_iter1128); + oprot.writeString(_iter1136); } } } @@ -78594,13 +78586,13 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_table_names_by_f BitSet incoming = iprot.readBitSet(4); if (incoming.get(0)) { { - org.apache.thrift.protocol.TList _list1129 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.success = new ArrayList(_list1129.size); - String _elem1130; - for (int _i1131 = 0; _i1131 < _list1129.size; ++_i1131) + org.apache.thrift.protocol.TList _list1137 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.success = new ArrayList(_list1137.size); + String _elem1138; + for (int _i1139 = 0; _i1139 < _list1137.size; ++_i1139) { - _elem1130 = iprot.readString(); - struct.success.add(_elem1130); + _elem1138 = iprot.readString(); + struct.success.add(_elem1138); } } struct.setSuccessIsSet(true); @@ -84459,14 +84451,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, add_partitions_args case 1: // NEW_PARTS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1132 = iprot.readListBegin(); - struct.new_parts = new ArrayList(_list1132.size); - Partition _elem1133; - for (int _i1134 = 0; _i1134 < _list1132.size; ++_i1134) + org.apache.thrift.protocol.TList _list1140 = iprot.readListBegin(); + struct.new_parts = new ArrayList(_list1140.size); + Partition _elem1141; + for (int _i1142 = 0; _i1142 < _list1140.size; ++_i1142) { - _elem1133 = new Partition(); - _elem1133.read(iprot); - struct.new_parts.add(_elem1133); + _elem1141 = new Partition(); + _elem1141.read(iprot); + struct.new_parts.add(_elem1141); } iprot.readListEnd(); } @@ -84492,9 +84484,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, add_partitions_arg oprot.writeFieldBegin(NEW_PARTS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.new_parts.size())); - for (Partition _iter1135 : struct.new_parts) + for (Partition _iter1143 : struct.new_parts) { - _iter1135.write(oprot); + _iter1143.write(oprot); } oprot.writeListEnd(); } @@ -84525,9 +84517,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, add_partitions_args if (struct.isSetNew_parts()) { { oprot.writeI32(struct.new_parts.size()); - for (Partition _iter1136 : struct.new_parts) + for (Partition _iter1144 : struct.new_parts) { - _iter1136.write(oprot); + _iter1144.write(oprot); } } } @@ -84539,14 +84531,14 @@ public void read(org.apache.thrift.protocol.TProtocol prot, add_partitions_args BitSet incoming = iprot.readBitSet(1); if (incoming.get(0)) { { - org.apache.thrift.protocol.TList _list1137 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); - struct.new_parts = new ArrayList(_list1137.size); - Partition _elem1138; - for (int _i1139 = 0; _i1139 < _list1137.size; ++_i1139) + org.apache.thrift.protocol.TList _list1145 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + struct.new_parts = new ArrayList(_list1145.size); + Partition _elem1146; + for (int _i1147 = 0; _i1147 < _list1145.size; ++_i1147) { - _elem1138 = new Partition(); - _elem1138.read(iprot); - struct.new_parts.add(_elem1138); + _elem1146 = new Partition(); + _elem1146.read(iprot); + struct.new_parts.add(_elem1146); } } struct.setNew_partsIsSet(true); @@ -85547,14 +85539,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, add_partitions_pspe case 1: // NEW_PARTS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1140 = iprot.readListBegin(); - struct.new_parts = new ArrayList(_list1140.size); - PartitionSpec _elem1141; - for (int _i1142 = 0; _i1142 < _list1140.size; ++_i1142) + org.apache.thrift.protocol.TList _list1148 = iprot.readListBegin(); + struct.new_parts = new ArrayList(_list1148.size); + PartitionSpec _elem1149; + for (int _i1150 = 0; _i1150 < _list1148.size; ++_i1150) { - _elem1141 = new PartitionSpec(); - _elem1141.read(iprot); - struct.new_parts.add(_elem1141); + _elem1149 = new PartitionSpec(); + _elem1149.read(iprot); + struct.new_parts.add(_elem1149); } iprot.readListEnd(); } @@ -85580,9 +85572,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, add_partitions_psp oprot.writeFieldBegin(NEW_PARTS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.new_parts.size())); - for (PartitionSpec _iter1143 : struct.new_parts) + for (PartitionSpec _iter1151 : struct.new_parts) { - _iter1143.write(oprot); + _iter1151.write(oprot); } oprot.writeListEnd(); } @@ -85613,9 +85605,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, add_partitions_pspe if (struct.isSetNew_parts()) { { oprot.writeI32(struct.new_parts.size()); - for (PartitionSpec _iter1144 : struct.new_parts) + for (PartitionSpec _iter1152 : struct.new_parts) { - _iter1144.write(oprot); + _iter1152.write(oprot); } } } @@ -85627,14 +85619,14 @@ public void read(org.apache.thrift.protocol.TProtocol prot, add_partitions_pspec BitSet incoming = iprot.readBitSet(1); if (incoming.get(0)) { { - org.apache.thrift.protocol.TList _list1145 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); - struct.new_parts = new ArrayList(_list1145.size); - PartitionSpec _elem1146; - for (int _i1147 = 0; _i1147 < _list1145.size; ++_i1147) + org.apache.thrift.protocol.TList _list1153 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + struct.new_parts = new ArrayList(_list1153.size); + PartitionSpec _elem1154; + for (int _i1155 = 0; _i1155 < _list1153.size; ++_i1155) { - _elem1146 = new PartitionSpec(); - _elem1146.read(iprot); - struct.new_parts.add(_elem1146); + _elem1154 = new PartitionSpec(); + _elem1154.read(iprot); + struct.new_parts.add(_elem1154); } } struct.setNew_partsIsSet(true); @@ -86810,13 +86802,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, append_partition_ar case 3: // PART_VALS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1148 = iprot.readListBegin(); - struct.part_vals = new ArrayList(_list1148.size); - String _elem1149; - for (int _i1150 = 0; _i1150 < _list1148.size; ++_i1150) + org.apache.thrift.protocol.TList _list1156 = iprot.readListBegin(); + struct.part_vals = new ArrayList(_list1156.size); + String _elem1157; + for (int _i1158 = 0; _i1158 < _list1156.size; ++_i1158) { - _elem1149 = iprot.readString(); - struct.part_vals.add(_elem1149); + _elem1157 = iprot.readString(); + struct.part_vals.add(_elem1157); } iprot.readListEnd(); } @@ -86852,9 +86844,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, append_partition_a oprot.writeFieldBegin(PART_VALS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.part_vals.size())); - for (String _iter1151 : struct.part_vals) + for (String _iter1159 : struct.part_vals) { - oprot.writeString(_iter1151); + oprot.writeString(_iter1159); } oprot.writeListEnd(); } @@ -86897,9 +86889,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, append_partition_ar if (struct.isSetPart_vals()) { { oprot.writeI32(struct.part_vals.size()); - for (String _iter1152 : struct.part_vals) + for (String _iter1160 : struct.part_vals) { - oprot.writeString(_iter1152); + oprot.writeString(_iter1160); } } } @@ -86919,13 +86911,13 @@ public void read(org.apache.thrift.protocol.TProtocol prot, append_partition_arg } if (incoming.get(2)) { { - org.apache.thrift.protocol.TList _list1153 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.part_vals = new ArrayList(_list1153.size); - String _elem1154; - for (int _i1155 = 0; _i1155 < _list1153.size; ++_i1155) + org.apache.thrift.protocol.TList _list1161 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.part_vals = new ArrayList(_list1161.size); + String _elem1162; + for (int _i1163 = 0; _i1163 < _list1161.size; ++_i1163) { - _elem1154 = iprot.readString(); - struct.part_vals.add(_elem1154); + _elem1162 = iprot.readString(); + struct.part_vals.add(_elem1162); } } struct.setPart_valsIsSet(true); @@ -89234,13 +89226,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, append_partition_wi case 3: // PART_VALS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1156 = iprot.readListBegin(); - struct.part_vals = new ArrayList(_list1156.size); - String _elem1157; - for (int _i1158 = 0; _i1158 < _list1156.size; ++_i1158) + org.apache.thrift.protocol.TList _list1164 = iprot.readListBegin(); + struct.part_vals = new ArrayList(_list1164.size); + String _elem1165; + for (int _i1166 = 0; _i1166 < _list1164.size; ++_i1166) { - _elem1157 = iprot.readString(); - struct.part_vals.add(_elem1157); + _elem1165 = iprot.readString(); + struct.part_vals.add(_elem1165); } iprot.readListEnd(); } @@ -89285,9 +89277,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, append_partition_w oprot.writeFieldBegin(PART_VALS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.part_vals.size())); - for (String _iter1159 : struct.part_vals) + for (String _iter1167 : struct.part_vals) { - oprot.writeString(_iter1159); + oprot.writeString(_iter1167); } oprot.writeListEnd(); } @@ -89338,9 +89330,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, append_partition_wi if (struct.isSetPart_vals()) { { oprot.writeI32(struct.part_vals.size()); - for (String _iter1160 : struct.part_vals) + for (String _iter1168 : struct.part_vals) { - oprot.writeString(_iter1160); + oprot.writeString(_iter1168); } } } @@ -89363,13 +89355,13 @@ public void read(org.apache.thrift.protocol.TProtocol prot, append_partition_wit } if (incoming.get(2)) { { - org.apache.thrift.protocol.TList _list1161 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.part_vals = new ArrayList(_list1161.size); - String _elem1162; - for (int _i1163 = 0; _i1163 < _list1161.size; ++_i1163) + org.apache.thrift.protocol.TList _list1169 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.part_vals = new ArrayList(_list1169.size); + String _elem1170; + for (int _i1171 = 0; _i1171 < _list1169.size; ++_i1171) { - _elem1162 = iprot.readString(); - struct.part_vals.add(_elem1162); + _elem1170 = iprot.readString(); + struct.part_vals.add(_elem1170); } } struct.setPart_valsIsSet(true); @@ -93239,13 +93231,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, drop_partition_args case 3: // PART_VALS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1164 = iprot.readListBegin(); - struct.part_vals = new ArrayList(_list1164.size); - String _elem1165; - for (int _i1166 = 0; _i1166 < _list1164.size; ++_i1166) + org.apache.thrift.protocol.TList _list1172 = iprot.readListBegin(); + struct.part_vals = new ArrayList(_list1172.size); + String _elem1173; + for (int _i1174 = 0; _i1174 < _list1172.size; ++_i1174) { - _elem1165 = iprot.readString(); - struct.part_vals.add(_elem1165); + _elem1173 = iprot.readString(); + struct.part_vals.add(_elem1173); } iprot.readListEnd(); } @@ -93289,9 +93281,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, drop_partition_arg oprot.writeFieldBegin(PART_VALS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.part_vals.size())); - for (String _iter1167 : struct.part_vals) + for (String _iter1175 : struct.part_vals) { - oprot.writeString(_iter1167); + oprot.writeString(_iter1175); } oprot.writeListEnd(); } @@ -93340,9 +93332,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, drop_partition_args if (struct.isSetPart_vals()) { { oprot.writeI32(struct.part_vals.size()); - for (String _iter1168 : struct.part_vals) + for (String _iter1176 : struct.part_vals) { - oprot.writeString(_iter1168); + oprot.writeString(_iter1176); } } } @@ -93365,13 +93357,13 @@ public void read(org.apache.thrift.protocol.TProtocol prot, drop_partition_args } if (incoming.get(2)) { { - org.apache.thrift.protocol.TList _list1169 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.part_vals = new ArrayList(_list1169.size); - String _elem1170; - for (int _i1171 = 0; _i1171 < _list1169.size; ++_i1171) + org.apache.thrift.protocol.TList _list1177 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.part_vals = new ArrayList(_list1177.size); + String _elem1178; + for (int _i1179 = 0; _i1179 < _list1177.size; ++_i1179) { - _elem1170 = iprot.readString(); - struct.part_vals.add(_elem1170); + _elem1178 = iprot.readString(); + struct.part_vals.add(_elem1178); } } struct.setPart_valsIsSet(true); @@ -94610,13 +94602,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, drop_partition_with case 3: // PART_VALS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1172 = iprot.readListBegin(); - struct.part_vals = new ArrayList(_list1172.size); - String _elem1173; - for (int _i1174 = 0; _i1174 < _list1172.size; ++_i1174) + org.apache.thrift.protocol.TList _list1180 = iprot.readListBegin(); + struct.part_vals = new ArrayList(_list1180.size); + String _elem1181; + for (int _i1182 = 0; _i1182 < _list1180.size; ++_i1182) { - _elem1173 = iprot.readString(); - struct.part_vals.add(_elem1173); + _elem1181 = iprot.readString(); + struct.part_vals.add(_elem1181); } iprot.readListEnd(); } @@ -94669,9 +94661,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, drop_partition_wit oprot.writeFieldBegin(PART_VALS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.part_vals.size())); - for (String _iter1175 : struct.part_vals) + for (String _iter1183 : struct.part_vals) { - oprot.writeString(_iter1175); + oprot.writeString(_iter1183); } oprot.writeListEnd(); } @@ -94728,9 +94720,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, drop_partition_with if (struct.isSetPart_vals()) { { oprot.writeI32(struct.part_vals.size()); - for (String _iter1176 : struct.part_vals) + for (String _iter1184 : struct.part_vals) { - oprot.writeString(_iter1176); + oprot.writeString(_iter1184); } } } @@ -94756,13 +94748,13 @@ public void read(org.apache.thrift.protocol.TProtocol prot, drop_partition_with_ } if (incoming.get(2)) { { - org.apache.thrift.protocol.TList _list1177 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.part_vals = new ArrayList(_list1177.size); - String _elem1178; - for (int _i1179 = 0; _i1179 < _list1177.size; ++_i1179) + org.apache.thrift.protocol.TList _list1185 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.part_vals = new ArrayList(_list1185.size); + String _elem1186; + for (int _i1187 = 0; _i1187 < _list1185.size; ++_i1187) { - _elem1178 = iprot.readString(); - struct.part_vals.add(_elem1178); + _elem1186 = iprot.readString(); + struct.part_vals.add(_elem1186); } } struct.setPart_valsIsSet(true); @@ -99364,13 +99356,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_partition_args case 3: // PART_VALS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1180 = iprot.readListBegin(); - struct.part_vals = new ArrayList(_list1180.size); - String _elem1181; - for (int _i1182 = 0; _i1182 < _list1180.size; ++_i1182) + org.apache.thrift.protocol.TList _list1188 = iprot.readListBegin(); + struct.part_vals = new ArrayList(_list1188.size); + String _elem1189; + for (int _i1190 = 0; _i1190 < _list1188.size; ++_i1190) { - _elem1181 = iprot.readString(); - struct.part_vals.add(_elem1181); + _elem1189 = iprot.readString(); + struct.part_vals.add(_elem1189); } iprot.readListEnd(); } @@ -99406,9 +99398,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_partition_args oprot.writeFieldBegin(PART_VALS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.part_vals.size())); - for (String _iter1183 : struct.part_vals) + for (String _iter1191 : struct.part_vals) { - oprot.writeString(_iter1183); + oprot.writeString(_iter1191); } oprot.writeListEnd(); } @@ -99451,9 +99443,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_partition_args if (struct.isSetPart_vals()) { { oprot.writeI32(struct.part_vals.size()); - for (String _iter1184 : struct.part_vals) + for (String _iter1192 : struct.part_vals) { - oprot.writeString(_iter1184); + oprot.writeString(_iter1192); } } } @@ -99473,13 +99465,13 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_partition_args s } if (incoming.get(2)) { { - org.apache.thrift.protocol.TList _list1185 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.part_vals = new ArrayList(_list1185.size); - String _elem1186; - for (int _i1187 = 0; _i1187 < _list1185.size; ++_i1187) + org.apache.thrift.protocol.TList _list1193 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.part_vals = new ArrayList(_list1193.size); + String _elem1194; + for (int _i1195 = 0; _i1195 < _list1193.size; ++_i1195) { - _elem1186 = iprot.readString(); - struct.part_vals.add(_elem1186); + _elem1194 = iprot.readString(); + struct.part_vals.add(_elem1194); } } struct.setPart_valsIsSet(true); @@ -100697,15 +100689,15 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, exchange_partition_ case 1: // PARTITION_SPECS if (schemeField.type == org.apache.thrift.protocol.TType.MAP) { { - org.apache.thrift.protocol.TMap _map1188 = iprot.readMapBegin(); - struct.partitionSpecs = new HashMap(2*_map1188.size); - String _key1189; - String _val1190; - for (int _i1191 = 0; _i1191 < _map1188.size; ++_i1191) + org.apache.thrift.protocol.TMap _map1196 = iprot.readMapBegin(); + struct.partitionSpecs = new HashMap(2*_map1196.size); + String _key1197; + String _val1198; + for (int _i1199 = 0; _i1199 < _map1196.size; ++_i1199) { - _key1189 = iprot.readString(); - _val1190 = iprot.readString(); - struct.partitionSpecs.put(_key1189, _val1190); + _key1197 = iprot.readString(); + _val1198 = iprot.readString(); + struct.partitionSpecs.put(_key1197, _val1198); } iprot.readMapEnd(); } @@ -100763,10 +100755,10 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, exchange_partition oprot.writeFieldBegin(PARTITION_SPECS_FIELD_DESC); { oprot.writeMapBegin(new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRING, struct.partitionSpecs.size())); - for (Map.Entry _iter1192 : struct.partitionSpecs.entrySet()) + for (Map.Entry _iter1200 : struct.partitionSpecs.entrySet()) { - oprot.writeString(_iter1192.getKey()); - oprot.writeString(_iter1192.getValue()); + oprot.writeString(_iter1200.getKey()); + oprot.writeString(_iter1200.getValue()); } oprot.writeMapEnd(); } @@ -100829,10 +100821,10 @@ public void write(org.apache.thrift.protocol.TProtocol prot, exchange_partition_ if (struct.isSetPartitionSpecs()) { { oprot.writeI32(struct.partitionSpecs.size()); - for (Map.Entry _iter1193 : struct.partitionSpecs.entrySet()) + for (Map.Entry _iter1201 : struct.partitionSpecs.entrySet()) { - oprot.writeString(_iter1193.getKey()); - oprot.writeString(_iter1193.getValue()); + oprot.writeString(_iter1201.getKey()); + oprot.writeString(_iter1201.getValue()); } } } @@ -100856,15 +100848,15 @@ public void read(org.apache.thrift.protocol.TProtocol prot, exchange_partition_a BitSet incoming = iprot.readBitSet(5); if (incoming.get(0)) { { - org.apache.thrift.protocol.TMap _map1194 = new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.partitionSpecs = new HashMap(2*_map1194.size); - String _key1195; - String _val1196; - for (int _i1197 = 0; _i1197 < _map1194.size; ++_i1197) + org.apache.thrift.protocol.TMap _map1202 = new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.partitionSpecs = new HashMap(2*_map1202.size); + String _key1203; + String _val1204; + for (int _i1205 = 0; _i1205 < _map1202.size; ++_i1205) { - _key1195 = iprot.readString(); - _val1196 = iprot.readString(); - struct.partitionSpecs.put(_key1195, _val1196); + _key1203 = iprot.readString(); + _val1204 = iprot.readString(); + struct.partitionSpecs.put(_key1203, _val1204); } } struct.setPartitionSpecsIsSet(true); @@ -102310,15 +102302,15 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, exchange_partitions case 1: // PARTITION_SPECS if (schemeField.type == org.apache.thrift.protocol.TType.MAP) { { - org.apache.thrift.protocol.TMap _map1198 = iprot.readMapBegin(); - struct.partitionSpecs = new HashMap(2*_map1198.size); - String _key1199; - String _val1200; - for (int _i1201 = 0; _i1201 < _map1198.size; ++_i1201) + org.apache.thrift.protocol.TMap _map1206 = iprot.readMapBegin(); + struct.partitionSpecs = new HashMap(2*_map1206.size); + String _key1207; + String _val1208; + for (int _i1209 = 0; _i1209 < _map1206.size; ++_i1209) { - _key1199 = iprot.readString(); - _val1200 = iprot.readString(); - struct.partitionSpecs.put(_key1199, _val1200); + _key1207 = iprot.readString(); + _val1208 = iprot.readString(); + struct.partitionSpecs.put(_key1207, _val1208); } iprot.readMapEnd(); } @@ -102376,10 +102368,10 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, exchange_partition oprot.writeFieldBegin(PARTITION_SPECS_FIELD_DESC); { oprot.writeMapBegin(new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRING, struct.partitionSpecs.size())); - for (Map.Entry _iter1202 : struct.partitionSpecs.entrySet()) + for (Map.Entry _iter1210 : struct.partitionSpecs.entrySet()) { - oprot.writeString(_iter1202.getKey()); - oprot.writeString(_iter1202.getValue()); + oprot.writeString(_iter1210.getKey()); + oprot.writeString(_iter1210.getValue()); } oprot.writeMapEnd(); } @@ -102442,10 +102434,10 @@ public void write(org.apache.thrift.protocol.TProtocol prot, exchange_partitions if (struct.isSetPartitionSpecs()) { { oprot.writeI32(struct.partitionSpecs.size()); - for (Map.Entry _iter1203 : struct.partitionSpecs.entrySet()) + for (Map.Entry _iter1211 : struct.partitionSpecs.entrySet()) { - oprot.writeString(_iter1203.getKey()); - oprot.writeString(_iter1203.getValue()); + oprot.writeString(_iter1211.getKey()); + oprot.writeString(_iter1211.getValue()); } } } @@ -102469,15 +102461,15 @@ public void read(org.apache.thrift.protocol.TProtocol prot, exchange_partitions_ BitSet incoming = iprot.readBitSet(5); if (incoming.get(0)) { { - org.apache.thrift.protocol.TMap _map1204 = new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.partitionSpecs = new HashMap(2*_map1204.size); - String _key1205; - String _val1206; - for (int _i1207 = 0; _i1207 < _map1204.size; ++_i1207) + org.apache.thrift.protocol.TMap _map1212 = new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.partitionSpecs = new HashMap(2*_map1212.size); + String _key1213; + String _val1214; + for (int _i1215 = 0; _i1215 < _map1212.size; ++_i1215) { - _key1205 = iprot.readString(); - _val1206 = iprot.readString(); - struct.partitionSpecs.put(_key1205, _val1206); + _key1213 = iprot.readString(); + _val1214 = iprot.readString(); + struct.partitionSpecs.put(_key1213, _val1214); } } struct.setPartitionSpecsIsSet(true); @@ -103142,14 +103134,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, exchange_partitions case 0: // SUCCESS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1208 = iprot.readListBegin(); - struct.success = new ArrayList(_list1208.size); - Partition _elem1209; - for (int _i1210 = 0; _i1210 < _list1208.size; ++_i1210) + org.apache.thrift.protocol.TList _list1216 = iprot.readListBegin(); + struct.success = new ArrayList(_list1216.size); + Partition _elem1217; + for (int _i1218 = 0; _i1218 < _list1216.size; ++_i1218) { - _elem1209 = new Partition(); - _elem1209.read(iprot); - struct.success.add(_elem1209); + _elem1217 = new Partition(); + _elem1217.read(iprot); + struct.success.add(_elem1217); } iprot.readListEnd(); } @@ -103211,9 +103203,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, exchange_partition oprot.writeFieldBegin(SUCCESS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.success.size())); - for (Partition _iter1211 : struct.success) + for (Partition _iter1219 : struct.success) { - _iter1211.write(oprot); + _iter1219.write(oprot); } oprot.writeListEnd(); } @@ -103276,9 +103268,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, exchange_partitions if (struct.isSetSuccess()) { { oprot.writeI32(struct.success.size()); - for (Partition _iter1212 : struct.success) + for (Partition _iter1220 : struct.success) { - _iter1212.write(oprot); + _iter1220.write(oprot); } } } @@ -103302,14 +103294,14 @@ public void read(org.apache.thrift.protocol.TProtocol prot, exchange_partitions_ BitSet incoming = iprot.readBitSet(5); if (incoming.get(0)) { { - org.apache.thrift.protocol.TList _list1213 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); - struct.success = new ArrayList(_list1213.size); - Partition _elem1214; - for (int _i1215 = 0; _i1215 < _list1213.size; ++_i1215) + org.apache.thrift.protocol.TList _list1221 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + struct.success = new ArrayList(_list1221.size); + Partition _elem1222; + for (int _i1223 = 0; _i1223 < _list1221.size; ++_i1223) { - _elem1214 = new Partition(); - _elem1214.read(iprot); - struct.success.add(_elem1214); + _elem1222 = new Partition(); + _elem1222.read(iprot); + struct.success.add(_elem1222); } } struct.setSuccessIsSet(true); @@ -104008,13 +104000,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_partition_with_ case 3: // PART_VALS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1216 = iprot.readListBegin(); - struct.part_vals = new ArrayList(_list1216.size); - String _elem1217; - for (int _i1218 = 0; _i1218 < _list1216.size; ++_i1218) + org.apache.thrift.protocol.TList _list1224 = iprot.readListBegin(); + struct.part_vals = new ArrayList(_list1224.size); + String _elem1225; + for (int _i1226 = 0; _i1226 < _list1224.size; ++_i1226) { - _elem1217 = iprot.readString(); - struct.part_vals.add(_elem1217); + _elem1225 = iprot.readString(); + struct.part_vals.add(_elem1225); } iprot.readListEnd(); } @@ -104034,13 +104026,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_partition_with_ case 5: // GROUP_NAMES if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1219 = iprot.readListBegin(); - struct.group_names = new ArrayList(_list1219.size); - String _elem1220; - for (int _i1221 = 0; _i1221 < _list1219.size; ++_i1221) + org.apache.thrift.protocol.TList _list1227 = iprot.readListBegin(); + struct.group_names = new ArrayList(_list1227.size); + String _elem1228; + for (int _i1229 = 0; _i1229 < _list1227.size; ++_i1229) { - _elem1220 = iprot.readString(); - struct.group_names.add(_elem1220); + _elem1228 = iprot.readString(); + struct.group_names.add(_elem1228); } iprot.readListEnd(); } @@ -104076,9 +104068,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_partition_with oprot.writeFieldBegin(PART_VALS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.part_vals.size())); - for (String _iter1222 : struct.part_vals) + for (String _iter1230 : struct.part_vals) { - oprot.writeString(_iter1222); + oprot.writeString(_iter1230); } oprot.writeListEnd(); } @@ -104093,9 +104085,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_partition_with oprot.writeFieldBegin(GROUP_NAMES_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.group_names.size())); - for (String _iter1223 : struct.group_names) + for (String _iter1231 : struct.group_names) { - oprot.writeString(_iter1223); + oprot.writeString(_iter1231); } oprot.writeListEnd(); } @@ -104144,9 +104136,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_partition_with_ if (struct.isSetPart_vals()) { { oprot.writeI32(struct.part_vals.size()); - for (String _iter1224 : struct.part_vals) + for (String _iter1232 : struct.part_vals) { - oprot.writeString(_iter1224); + oprot.writeString(_iter1232); } } } @@ -104156,9 +104148,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_partition_with_ if (struct.isSetGroup_names()) { { oprot.writeI32(struct.group_names.size()); - for (String _iter1225 : struct.group_names) + for (String _iter1233 : struct.group_names) { - oprot.writeString(_iter1225); + oprot.writeString(_iter1233); } } } @@ -104178,13 +104170,13 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_partition_with_a } if (incoming.get(2)) { { - org.apache.thrift.protocol.TList _list1226 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.part_vals = new ArrayList(_list1226.size); - String _elem1227; - for (int _i1228 = 0; _i1228 < _list1226.size; ++_i1228) + org.apache.thrift.protocol.TList _list1234 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.part_vals = new ArrayList(_list1234.size); + String _elem1235; + for (int _i1236 = 0; _i1236 < _list1234.size; ++_i1236) { - _elem1227 = iprot.readString(); - struct.part_vals.add(_elem1227); + _elem1235 = iprot.readString(); + struct.part_vals.add(_elem1235); } } struct.setPart_valsIsSet(true); @@ -104195,13 +104187,13 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_partition_with_a } if (incoming.get(4)) { { - org.apache.thrift.protocol.TList _list1229 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.group_names = new ArrayList(_list1229.size); - String _elem1230; - for (int _i1231 = 0; _i1231 < _list1229.size; ++_i1231) + org.apache.thrift.protocol.TList _list1237 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.group_names = new ArrayList(_list1237.size); + String _elem1238; + for (int _i1239 = 0; _i1239 < _list1237.size; ++_i1239) { - _elem1230 = iprot.readString(); - struct.group_names.add(_elem1230); + _elem1238 = iprot.readString(); + struct.group_names.add(_elem1238); } } struct.setGroup_namesIsSet(true); @@ -106970,14 +106962,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_partitions_resu case 0: // SUCCESS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1232 = iprot.readListBegin(); - struct.success = new ArrayList(_list1232.size); - Partition _elem1233; - for (int _i1234 = 0; _i1234 < _list1232.size; ++_i1234) + org.apache.thrift.protocol.TList _list1240 = iprot.readListBegin(); + struct.success = new ArrayList(_list1240.size); + Partition _elem1241; + for (int _i1242 = 0; _i1242 < _list1240.size; ++_i1242) { - _elem1233 = new Partition(); - _elem1233.read(iprot); - struct.success.add(_elem1233); + _elem1241 = new Partition(); + _elem1241.read(iprot); + struct.success.add(_elem1241); } iprot.readListEnd(); } @@ -107021,9 +107013,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_partitions_res oprot.writeFieldBegin(SUCCESS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.success.size())); - for (Partition _iter1235 : struct.success) + for (Partition _iter1243 : struct.success) { - _iter1235.write(oprot); + _iter1243.write(oprot); } oprot.writeListEnd(); } @@ -107070,9 +107062,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_partitions_resu if (struct.isSetSuccess()) { { oprot.writeI32(struct.success.size()); - for (Partition _iter1236 : struct.success) + for (Partition _iter1244 : struct.success) { - _iter1236.write(oprot); + _iter1244.write(oprot); } } } @@ -107090,14 +107082,14 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_partitions_resul BitSet incoming = iprot.readBitSet(3); if (incoming.get(0)) { { - org.apache.thrift.protocol.TList _list1237 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); - struct.success = new ArrayList(_list1237.size); - Partition _elem1238; - for (int _i1239 = 0; _i1239 < _list1237.size; ++_i1239) + org.apache.thrift.protocol.TList _list1245 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + struct.success = new ArrayList(_list1245.size); + Partition _elem1246; + for (int _i1247 = 0; _i1247 < _list1245.size; ++_i1247) { - _elem1238 = new Partition(); - _elem1238.read(iprot); - struct.success.add(_elem1238); + _elem1246 = new Partition(); + _elem1246.read(iprot); + struct.success.add(_elem1246); } } struct.setSuccessIsSet(true); @@ -107787,13 +107779,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_partitions_with case 5: // GROUP_NAMES if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1240 = iprot.readListBegin(); - struct.group_names = new ArrayList(_list1240.size); - String _elem1241; - for (int _i1242 = 0; _i1242 < _list1240.size; ++_i1242) + org.apache.thrift.protocol.TList _list1248 = iprot.readListBegin(); + struct.group_names = new ArrayList(_list1248.size); + String _elem1249; + for (int _i1250 = 0; _i1250 < _list1248.size; ++_i1250) { - _elem1241 = iprot.readString(); - struct.group_names.add(_elem1241); + _elem1249 = iprot.readString(); + struct.group_names.add(_elem1249); } iprot.readListEnd(); } @@ -107837,9 +107829,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_partitions_wit oprot.writeFieldBegin(GROUP_NAMES_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.group_names.size())); - for (String _iter1243 : struct.group_names) + for (String _iter1251 : struct.group_names) { - oprot.writeString(_iter1243); + oprot.writeString(_iter1251); } oprot.writeListEnd(); } @@ -107894,9 +107886,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_partitions_with if (struct.isSetGroup_names()) { { oprot.writeI32(struct.group_names.size()); - for (String _iter1244 : struct.group_names) + for (String _iter1252 : struct.group_names) { - oprot.writeString(_iter1244); + oprot.writeString(_iter1252); } } } @@ -107924,13 +107916,13 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_partitions_with_ } if (incoming.get(4)) { { - org.apache.thrift.protocol.TList _list1245 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.group_names = new ArrayList(_list1245.size); - String _elem1246; - for (int _i1247 = 0; _i1247 < _list1245.size; ++_i1247) + org.apache.thrift.protocol.TList _list1253 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.group_names = new ArrayList(_list1253.size); + String _elem1254; + for (int _i1255 = 0; _i1255 < _list1253.size; ++_i1255) { - _elem1246 = iprot.readString(); - struct.group_names.add(_elem1246); + _elem1254 = iprot.readString(); + struct.group_names.add(_elem1254); } } struct.setGroup_namesIsSet(true); @@ -108417,14 +108409,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_partitions_with case 0: // SUCCESS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1248 = iprot.readListBegin(); - struct.success = new ArrayList(_list1248.size); - Partition _elem1249; - for (int _i1250 = 0; _i1250 < _list1248.size; ++_i1250) + org.apache.thrift.protocol.TList _list1256 = iprot.readListBegin(); + struct.success = new ArrayList(_list1256.size); + Partition _elem1257; + for (int _i1258 = 0; _i1258 < _list1256.size; ++_i1258) { - _elem1249 = new Partition(); - _elem1249.read(iprot); - struct.success.add(_elem1249); + _elem1257 = new Partition(); + _elem1257.read(iprot); + struct.success.add(_elem1257); } iprot.readListEnd(); } @@ -108468,9 +108460,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_partitions_wit oprot.writeFieldBegin(SUCCESS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.success.size())); - for (Partition _iter1251 : struct.success) + for (Partition _iter1259 : struct.success) { - _iter1251.write(oprot); + _iter1259.write(oprot); } oprot.writeListEnd(); } @@ -108517,9 +108509,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_partitions_with if (struct.isSetSuccess()) { { oprot.writeI32(struct.success.size()); - for (Partition _iter1252 : struct.success) + for (Partition _iter1260 : struct.success) { - _iter1252.write(oprot); + _iter1260.write(oprot); } } } @@ -108537,14 +108529,14 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_partitions_with_ BitSet incoming = iprot.readBitSet(3); if (incoming.get(0)) { { - org.apache.thrift.protocol.TList _list1253 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); - struct.success = new ArrayList(_list1253.size); - Partition _elem1254; - for (int _i1255 = 0; _i1255 < _list1253.size; ++_i1255) + org.apache.thrift.protocol.TList _list1261 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + struct.success = new ArrayList(_list1261.size); + Partition _elem1262; + for (int _i1263 = 0; _i1263 < _list1261.size; ++_i1263) { - _elem1254 = new Partition(); - _elem1254.read(iprot); - struct.success.add(_elem1254); + _elem1262 = new Partition(); + _elem1262.read(iprot); + struct.success.add(_elem1262); } } struct.setSuccessIsSet(true); @@ -109607,14 +109599,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_partitions_pspe case 0: // SUCCESS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1256 = iprot.readListBegin(); - struct.success = new ArrayList(_list1256.size); - PartitionSpec _elem1257; - for (int _i1258 = 0; _i1258 < _list1256.size; ++_i1258) + org.apache.thrift.protocol.TList _list1264 = iprot.readListBegin(); + struct.success = new ArrayList(_list1264.size); + PartitionSpec _elem1265; + for (int _i1266 = 0; _i1266 < _list1264.size; ++_i1266) { - _elem1257 = new PartitionSpec(); - _elem1257.read(iprot); - struct.success.add(_elem1257); + _elem1265 = new PartitionSpec(); + _elem1265.read(iprot); + struct.success.add(_elem1265); } iprot.readListEnd(); } @@ -109658,9 +109650,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_partitions_psp oprot.writeFieldBegin(SUCCESS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.success.size())); - for (PartitionSpec _iter1259 : struct.success) + for (PartitionSpec _iter1267 : struct.success) { - _iter1259.write(oprot); + _iter1267.write(oprot); } oprot.writeListEnd(); } @@ -109707,9 +109699,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_partitions_pspe if (struct.isSetSuccess()) { { oprot.writeI32(struct.success.size()); - for (PartitionSpec _iter1260 : struct.success) + for (PartitionSpec _iter1268 : struct.success) { - _iter1260.write(oprot); + _iter1268.write(oprot); } } } @@ -109727,14 +109719,14 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_partitions_pspec BitSet incoming = iprot.readBitSet(3); if (incoming.get(0)) { { - org.apache.thrift.protocol.TList _list1261 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); - struct.success = new ArrayList(_list1261.size); - PartitionSpec _elem1262; - for (int _i1263 = 0; _i1263 < _list1261.size; ++_i1263) + org.apache.thrift.protocol.TList _list1269 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + struct.success = new ArrayList(_list1269.size); + PartitionSpec _elem1270; + for (int _i1271 = 0; _i1271 < _list1269.size; ++_i1271) { - _elem1262 = new PartitionSpec(); - _elem1262.read(iprot); - struct.success.add(_elem1262); + _elem1270 = new PartitionSpec(); + _elem1270.read(iprot); + struct.success.add(_elem1270); } } struct.setSuccessIsSet(true); @@ -110794,13 +110786,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_partition_names case 0: // SUCCESS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1264 = iprot.readListBegin(); - struct.success = new ArrayList(_list1264.size); - String _elem1265; - for (int _i1266 = 0; _i1266 < _list1264.size; ++_i1266) + org.apache.thrift.protocol.TList _list1272 = iprot.readListBegin(); + struct.success = new ArrayList(_list1272.size); + String _elem1273; + for (int _i1274 = 0; _i1274 < _list1272.size; ++_i1274) { - _elem1265 = iprot.readString(); - struct.success.add(_elem1265); + _elem1273 = iprot.readString(); + struct.success.add(_elem1273); } iprot.readListEnd(); } @@ -110844,9 +110836,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_partition_name oprot.writeFieldBegin(SUCCESS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.success.size())); - for (String _iter1267 : struct.success) + for (String _iter1275 : struct.success) { - oprot.writeString(_iter1267); + oprot.writeString(_iter1275); } oprot.writeListEnd(); } @@ -110893,9 +110885,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_partition_names if (struct.isSetSuccess()) { { oprot.writeI32(struct.success.size()); - for (String _iter1268 : struct.success) + for (String _iter1276 : struct.success) { - oprot.writeString(_iter1268); + oprot.writeString(_iter1276); } } } @@ -110913,13 +110905,13 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_partition_names_ BitSet incoming = iprot.readBitSet(3); if (incoming.get(0)) { { - org.apache.thrift.protocol.TList _list1269 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.success = new ArrayList(_list1269.size); - String _elem1270; - for (int _i1271 = 0; _i1271 < _list1269.size; ++_i1271) + org.apache.thrift.protocol.TList _list1277 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.success = new ArrayList(_list1277.size); + String _elem1278; + for (int _i1279 = 0; _i1279 < _list1277.size; ++_i1279) { - _elem1270 = iprot.readString(); - struct.success.add(_elem1270); + _elem1278 = iprot.readString(); + struct.success.add(_elem1278); } } struct.setSuccessIsSet(true); @@ -112450,13 +112442,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_partitions_ps_a case 3: // PART_VALS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1272 = iprot.readListBegin(); - struct.part_vals = new ArrayList(_list1272.size); - String _elem1273; - for (int _i1274 = 0; _i1274 < _list1272.size; ++_i1274) + org.apache.thrift.protocol.TList _list1280 = iprot.readListBegin(); + struct.part_vals = new ArrayList(_list1280.size); + String _elem1281; + for (int _i1282 = 0; _i1282 < _list1280.size; ++_i1282) { - _elem1273 = iprot.readString(); - struct.part_vals.add(_elem1273); + _elem1281 = iprot.readString(); + struct.part_vals.add(_elem1281); } iprot.readListEnd(); } @@ -112500,9 +112492,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_partitions_ps_ oprot.writeFieldBegin(PART_VALS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.part_vals.size())); - for (String _iter1275 : struct.part_vals) + for (String _iter1283 : struct.part_vals) { - oprot.writeString(_iter1275); + oprot.writeString(_iter1283); } oprot.writeListEnd(); } @@ -112551,9 +112543,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_partitions_ps_a if (struct.isSetPart_vals()) { { oprot.writeI32(struct.part_vals.size()); - for (String _iter1276 : struct.part_vals) + for (String _iter1284 : struct.part_vals) { - oprot.writeString(_iter1276); + oprot.writeString(_iter1284); } } } @@ -112576,13 +112568,13 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_partitions_ps_ar } if (incoming.get(2)) { { - org.apache.thrift.protocol.TList _list1277 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.part_vals = new ArrayList(_list1277.size); - String _elem1278; - for (int _i1279 = 0; _i1279 < _list1277.size; ++_i1279) + org.apache.thrift.protocol.TList _list1285 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.part_vals = new ArrayList(_list1285.size); + String _elem1286; + for (int _i1287 = 0; _i1287 < _list1285.size; ++_i1287) { - _elem1278 = iprot.readString(); - struct.part_vals.add(_elem1278); + _elem1286 = iprot.readString(); + struct.part_vals.add(_elem1286); } } struct.setPart_valsIsSet(true); @@ -113073,14 +113065,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_partitions_ps_r case 0: // SUCCESS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1280 = iprot.readListBegin(); - struct.success = new ArrayList(_list1280.size); - Partition _elem1281; - for (int _i1282 = 0; _i1282 < _list1280.size; ++_i1282) + org.apache.thrift.protocol.TList _list1288 = iprot.readListBegin(); + struct.success = new ArrayList(_list1288.size); + Partition _elem1289; + for (int _i1290 = 0; _i1290 < _list1288.size; ++_i1290) { - _elem1281 = new Partition(); - _elem1281.read(iprot); - struct.success.add(_elem1281); + _elem1289 = new Partition(); + _elem1289.read(iprot); + struct.success.add(_elem1289); } iprot.readListEnd(); } @@ -113124,9 +113116,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_partitions_ps_ oprot.writeFieldBegin(SUCCESS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.success.size())); - for (Partition _iter1283 : struct.success) + for (Partition _iter1291 : struct.success) { - _iter1283.write(oprot); + _iter1291.write(oprot); } oprot.writeListEnd(); } @@ -113173,9 +113165,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_partitions_ps_r if (struct.isSetSuccess()) { { oprot.writeI32(struct.success.size()); - for (Partition _iter1284 : struct.success) + for (Partition _iter1292 : struct.success) { - _iter1284.write(oprot); + _iter1292.write(oprot); } } } @@ -113193,14 +113185,14 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_partitions_ps_re BitSet incoming = iprot.readBitSet(3); if (incoming.get(0)) { { - org.apache.thrift.protocol.TList _list1285 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); - struct.success = new ArrayList(_list1285.size); - Partition _elem1286; - for (int _i1287 = 0; _i1287 < _list1285.size; ++_i1287) + org.apache.thrift.protocol.TList _list1293 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + struct.success = new ArrayList(_list1293.size); + Partition _elem1294; + for (int _i1295 = 0; _i1295 < _list1293.size; ++_i1295) { - _elem1286 = new Partition(); - _elem1286.read(iprot); - struct.success.add(_elem1286); + _elem1294 = new Partition(); + _elem1294.read(iprot); + struct.success.add(_elem1294); } } struct.setSuccessIsSet(true); @@ -113972,13 +113964,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_partitions_ps_w case 3: // PART_VALS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1288 = iprot.readListBegin(); - struct.part_vals = new ArrayList(_list1288.size); - String _elem1289; - for (int _i1290 = 0; _i1290 < _list1288.size; ++_i1290) + org.apache.thrift.protocol.TList _list1296 = iprot.readListBegin(); + struct.part_vals = new ArrayList(_list1296.size); + String _elem1297; + for (int _i1298 = 0; _i1298 < _list1296.size; ++_i1298) { - _elem1289 = iprot.readString(); - struct.part_vals.add(_elem1289); + _elem1297 = iprot.readString(); + struct.part_vals.add(_elem1297); } iprot.readListEnd(); } @@ -114006,13 +113998,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_partitions_ps_w case 6: // GROUP_NAMES if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1291 = iprot.readListBegin(); - struct.group_names = new ArrayList(_list1291.size); - String _elem1292; - for (int _i1293 = 0; _i1293 < _list1291.size; ++_i1293) + org.apache.thrift.protocol.TList _list1299 = iprot.readListBegin(); + struct.group_names = new ArrayList(_list1299.size); + String _elem1300; + for (int _i1301 = 0; _i1301 < _list1299.size; ++_i1301) { - _elem1292 = iprot.readString(); - struct.group_names.add(_elem1292); + _elem1300 = iprot.readString(); + struct.group_names.add(_elem1300); } iprot.readListEnd(); } @@ -114048,9 +114040,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_partitions_ps_ oprot.writeFieldBegin(PART_VALS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.part_vals.size())); - for (String _iter1294 : struct.part_vals) + for (String _iter1302 : struct.part_vals) { - oprot.writeString(_iter1294); + oprot.writeString(_iter1302); } oprot.writeListEnd(); } @@ -114068,9 +114060,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_partitions_ps_ oprot.writeFieldBegin(GROUP_NAMES_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.group_names.size())); - for (String _iter1295 : struct.group_names) + for (String _iter1303 : struct.group_names) { - oprot.writeString(_iter1295); + oprot.writeString(_iter1303); } oprot.writeListEnd(); } @@ -114122,9 +114114,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_partitions_ps_w if (struct.isSetPart_vals()) { { oprot.writeI32(struct.part_vals.size()); - for (String _iter1296 : struct.part_vals) + for (String _iter1304 : struct.part_vals) { - oprot.writeString(_iter1296); + oprot.writeString(_iter1304); } } } @@ -114137,9 +114129,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_partitions_ps_w if (struct.isSetGroup_names()) { { oprot.writeI32(struct.group_names.size()); - for (String _iter1297 : struct.group_names) + for (String _iter1305 : struct.group_names) { - oprot.writeString(_iter1297); + oprot.writeString(_iter1305); } } } @@ -114159,13 +114151,13 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_partitions_ps_wi } if (incoming.get(2)) { { - org.apache.thrift.protocol.TList _list1298 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.part_vals = new ArrayList(_list1298.size); - String _elem1299; - for (int _i1300 = 0; _i1300 < _list1298.size; ++_i1300) + org.apache.thrift.protocol.TList _list1306 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.part_vals = new ArrayList(_list1306.size); + String _elem1307; + for (int _i1308 = 0; _i1308 < _list1306.size; ++_i1308) { - _elem1299 = iprot.readString(); - struct.part_vals.add(_elem1299); + _elem1307 = iprot.readString(); + struct.part_vals.add(_elem1307); } } struct.setPart_valsIsSet(true); @@ -114180,13 +114172,13 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_partitions_ps_wi } if (incoming.get(5)) { { - org.apache.thrift.protocol.TList _list1301 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.group_names = new ArrayList(_list1301.size); - String _elem1302; - for (int _i1303 = 0; _i1303 < _list1301.size; ++_i1303) + org.apache.thrift.protocol.TList _list1309 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.group_names = new ArrayList(_list1309.size); + String _elem1310; + for (int _i1311 = 0; _i1311 < _list1309.size; ++_i1311) { - _elem1302 = iprot.readString(); - struct.group_names.add(_elem1302); + _elem1310 = iprot.readString(); + struct.group_names.add(_elem1310); } } struct.setGroup_namesIsSet(true); @@ -114673,14 +114665,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_partitions_ps_w case 0: // SUCCESS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1304 = iprot.readListBegin(); - struct.success = new ArrayList(_list1304.size); - Partition _elem1305; - for (int _i1306 = 0; _i1306 < _list1304.size; ++_i1306) + org.apache.thrift.protocol.TList _list1312 = iprot.readListBegin(); + struct.success = new ArrayList(_list1312.size); + Partition _elem1313; + for (int _i1314 = 0; _i1314 < _list1312.size; ++_i1314) { - _elem1305 = new Partition(); - _elem1305.read(iprot); - struct.success.add(_elem1305); + _elem1313 = new Partition(); + _elem1313.read(iprot); + struct.success.add(_elem1313); } iprot.readListEnd(); } @@ -114724,9 +114716,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_partitions_ps_ oprot.writeFieldBegin(SUCCESS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.success.size())); - for (Partition _iter1307 : struct.success) + for (Partition _iter1315 : struct.success) { - _iter1307.write(oprot); + _iter1315.write(oprot); } oprot.writeListEnd(); } @@ -114773,9 +114765,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_partitions_ps_w if (struct.isSetSuccess()) { { oprot.writeI32(struct.success.size()); - for (Partition _iter1308 : struct.success) + for (Partition _iter1316 : struct.success) { - _iter1308.write(oprot); + _iter1316.write(oprot); } } } @@ -114793,14 +114785,14 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_partitions_ps_wi BitSet incoming = iprot.readBitSet(3); if (incoming.get(0)) { { - org.apache.thrift.protocol.TList _list1309 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); - struct.success = new ArrayList(_list1309.size); - Partition _elem1310; - for (int _i1311 = 0; _i1311 < _list1309.size; ++_i1311) + org.apache.thrift.protocol.TList _list1317 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + struct.success = new ArrayList(_list1317.size); + Partition _elem1318; + for (int _i1319 = 0; _i1319 < _list1317.size; ++_i1319) { - _elem1310 = new Partition(); - _elem1310.read(iprot); - struct.success.add(_elem1310); + _elem1318 = new Partition(); + _elem1318.read(iprot); + struct.success.add(_elem1318); } } struct.setSuccessIsSet(true); @@ -115393,13 +115385,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_partition_names case 3: // PART_VALS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1312 = iprot.readListBegin(); - struct.part_vals = new ArrayList(_list1312.size); - String _elem1313; - for (int _i1314 = 0; _i1314 < _list1312.size; ++_i1314) + org.apache.thrift.protocol.TList _list1320 = iprot.readListBegin(); + struct.part_vals = new ArrayList(_list1320.size); + String _elem1321; + for (int _i1322 = 0; _i1322 < _list1320.size; ++_i1322) { - _elem1313 = iprot.readString(); - struct.part_vals.add(_elem1313); + _elem1321 = iprot.readString(); + struct.part_vals.add(_elem1321); } iprot.readListEnd(); } @@ -115443,9 +115435,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_partition_name oprot.writeFieldBegin(PART_VALS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.part_vals.size())); - for (String _iter1315 : struct.part_vals) + for (String _iter1323 : struct.part_vals) { - oprot.writeString(_iter1315); + oprot.writeString(_iter1323); } oprot.writeListEnd(); } @@ -115494,9 +115486,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_partition_names if (struct.isSetPart_vals()) { { oprot.writeI32(struct.part_vals.size()); - for (String _iter1316 : struct.part_vals) + for (String _iter1324 : struct.part_vals) { - oprot.writeString(_iter1316); + oprot.writeString(_iter1324); } } } @@ -115519,13 +115511,13 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_partition_names_ } if (incoming.get(2)) { { - org.apache.thrift.protocol.TList _list1317 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.part_vals = new ArrayList(_list1317.size); - String _elem1318; - for (int _i1319 = 0; _i1319 < _list1317.size; ++_i1319) + org.apache.thrift.protocol.TList _list1325 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.part_vals = new ArrayList(_list1325.size); + String _elem1326; + for (int _i1327 = 0; _i1327 < _list1325.size; ++_i1327) { - _elem1318 = iprot.readString(); - struct.part_vals.add(_elem1318); + _elem1326 = iprot.readString(); + struct.part_vals.add(_elem1326); } } struct.setPart_valsIsSet(true); @@ -116013,13 +116005,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_partition_names case 0: // SUCCESS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1320 = iprot.readListBegin(); - struct.success = new ArrayList(_list1320.size); - String _elem1321; - for (int _i1322 = 0; _i1322 < _list1320.size; ++_i1322) + org.apache.thrift.protocol.TList _list1328 = iprot.readListBegin(); + struct.success = new ArrayList(_list1328.size); + String _elem1329; + for (int _i1330 = 0; _i1330 < _list1328.size; ++_i1330) { - _elem1321 = iprot.readString(); - struct.success.add(_elem1321); + _elem1329 = iprot.readString(); + struct.success.add(_elem1329); } iprot.readListEnd(); } @@ -116063,9 +116055,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_partition_name oprot.writeFieldBegin(SUCCESS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.success.size())); - for (String _iter1323 : struct.success) + for (String _iter1331 : struct.success) { - oprot.writeString(_iter1323); + oprot.writeString(_iter1331); } oprot.writeListEnd(); } @@ -116112,9 +116104,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_partition_names if (struct.isSetSuccess()) { { oprot.writeI32(struct.success.size()); - for (String _iter1324 : struct.success) + for (String _iter1332 : struct.success) { - oprot.writeString(_iter1324); + oprot.writeString(_iter1332); } } } @@ -116132,13 +116124,13 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_partition_names_ BitSet incoming = iprot.readBitSet(3); if (incoming.get(0)) { { - org.apache.thrift.protocol.TList _list1325 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.success = new ArrayList(_list1325.size); - String _elem1326; - for (int _i1327 = 0; _i1327 < _list1325.size; ++_i1327) + org.apache.thrift.protocol.TList _list1333 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.success = new ArrayList(_list1333.size); + String _elem1334; + for (int _i1335 = 0; _i1335 < _list1333.size; ++_i1335) { - _elem1326 = iprot.readString(); - struct.success.add(_elem1326); + _elem1334 = iprot.readString(); + struct.success.add(_elem1334); } } struct.setSuccessIsSet(true); @@ -117305,14 +117297,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_partitions_by_f case 0: // SUCCESS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1328 = iprot.readListBegin(); - struct.success = new ArrayList(_list1328.size); - Partition _elem1329; - for (int _i1330 = 0; _i1330 < _list1328.size; ++_i1330) + org.apache.thrift.protocol.TList _list1336 = iprot.readListBegin(); + struct.success = new ArrayList(_list1336.size); + Partition _elem1337; + for (int _i1338 = 0; _i1338 < _list1336.size; ++_i1338) { - _elem1329 = new Partition(); - _elem1329.read(iprot); - struct.success.add(_elem1329); + _elem1337 = new Partition(); + _elem1337.read(iprot); + struct.success.add(_elem1337); } iprot.readListEnd(); } @@ -117356,9 +117348,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_partitions_by_ oprot.writeFieldBegin(SUCCESS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.success.size())); - for (Partition _iter1331 : struct.success) + for (Partition _iter1339 : struct.success) { - _iter1331.write(oprot); + _iter1339.write(oprot); } oprot.writeListEnd(); } @@ -117405,9 +117397,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_partitions_by_f if (struct.isSetSuccess()) { { oprot.writeI32(struct.success.size()); - for (Partition _iter1332 : struct.success) + for (Partition _iter1340 : struct.success) { - _iter1332.write(oprot); + _iter1340.write(oprot); } } } @@ -117425,14 +117417,14 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_partitions_by_fi BitSet incoming = iprot.readBitSet(3); if (incoming.get(0)) { { - org.apache.thrift.protocol.TList _list1333 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); - struct.success = new ArrayList(_list1333.size); - Partition _elem1334; - for (int _i1335 = 0; _i1335 < _list1333.size; ++_i1335) + org.apache.thrift.protocol.TList _list1341 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + struct.success = new ArrayList(_list1341.size); + Partition _elem1342; + for (int _i1343 = 0; _i1343 < _list1341.size; ++_i1343) { - _elem1334 = new Partition(); - _elem1334.read(iprot); - struct.success.add(_elem1334); + _elem1342 = new Partition(); + _elem1342.read(iprot); + struct.success.add(_elem1342); } } struct.setSuccessIsSet(true); @@ -118599,14 +118591,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_part_specs_by_f case 0: // SUCCESS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1336 = iprot.readListBegin(); - struct.success = new ArrayList(_list1336.size); - PartitionSpec _elem1337; - for (int _i1338 = 0; _i1338 < _list1336.size; ++_i1338) + org.apache.thrift.protocol.TList _list1344 = iprot.readListBegin(); + struct.success = new ArrayList(_list1344.size); + PartitionSpec _elem1345; + for (int _i1346 = 0; _i1346 < _list1344.size; ++_i1346) { - _elem1337 = new PartitionSpec(); - _elem1337.read(iprot); - struct.success.add(_elem1337); + _elem1345 = new PartitionSpec(); + _elem1345.read(iprot); + struct.success.add(_elem1345); } iprot.readListEnd(); } @@ -118650,9 +118642,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_part_specs_by_ oprot.writeFieldBegin(SUCCESS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.success.size())); - for (PartitionSpec _iter1339 : struct.success) + for (PartitionSpec _iter1347 : struct.success) { - _iter1339.write(oprot); + _iter1347.write(oprot); } oprot.writeListEnd(); } @@ -118699,9 +118691,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_part_specs_by_f if (struct.isSetSuccess()) { { oprot.writeI32(struct.success.size()); - for (PartitionSpec _iter1340 : struct.success) + for (PartitionSpec _iter1348 : struct.success) { - _iter1340.write(oprot); + _iter1348.write(oprot); } } } @@ -118719,14 +118711,14 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_part_specs_by_fi BitSet incoming = iprot.readBitSet(3); if (incoming.get(0)) { { - org.apache.thrift.protocol.TList _list1341 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); - struct.success = new ArrayList(_list1341.size); - PartitionSpec _elem1342; - for (int _i1343 = 0; _i1343 < _list1341.size; ++_i1343) + org.apache.thrift.protocol.TList _list1349 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + struct.success = new ArrayList(_list1349.size); + PartitionSpec _elem1350; + for (int _i1351 = 0; _i1351 < _list1349.size; ++_i1351) { - _elem1342 = new PartitionSpec(); - _elem1342.read(iprot); - struct.success.add(_elem1342); + _elem1350 = new PartitionSpec(); + _elem1350.read(iprot); + struct.success.add(_elem1350); } } struct.setSuccessIsSet(true); @@ -121310,13 +121302,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_partitions_by_n case 3: // NAMES if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1344 = iprot.readListBegin(); - struct.names = new ArrayList(_list1344.size); - String _elem1345; - for (int _i1346 = 0; _i1346 < _list1344.size; ++_i1346) + org.apache.thrift.protocol.TList _list1352 = iprot.readListBegin(); + struct.names = new ArrayList(_list1352.size); + String _elem1353; + for (int _i1354 = 0; _i1354 < _list1352.size; ++_i1354) { - _elem1345 = iprot.readString(); - struct.names.add(_elem1345); + _elem1353 = iprot.readString(); + struct.names.add(_elem1353); } iprot.readListEnd(); } @@ -121352,9 +121344,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_partitions_by_ oprot.writeFieldBegin(NAMES_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.names.size())); - for (String _iter1347 : struct.names) + for (String _iter1355 : struct.names) { - oprot.writeString(_iter1347); + oprot.writeString(_iter1355); } oprot.writeListEnd(); } @@ -121397,9 +121389,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_partitions_by_n if (struct.isSetNames()) { { oprot.writeI32(struct.names.size()); - for (String _iter1348 : struct.names) + for (String _iter1356 : struct.names) { - oprot.writeString(_iter1348); + oprot.writeString(_iter1356); } } } @@ -121419,13 +121411,13 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_partitions_by_na } if (incoming.get(2)) { { - org.apache.thrift.protocol.TList _list1349 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.names = new ArrayList(_list1349.size); - String _elem1350; - for (int _i1351 = 0; _i1351 < _list1349.size; ++_i1351) + org.apache.thrift.protocol.TList _list1357 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.names = new ArrayList(_list1357.size); + String _elem1358; + for (int _i1359 = 0; _i1359 < _list1357.size; ++_i1359) { - _elem1350 = iprot.readString(); - struct.names.add(_elem1350); + _elem1358 = iprot.readString(); + struct.names.add(_elem1358); } } struct.setNamesIsSet(true); @@ -121912,14 +121904,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_partitions_by_n case 0: // SUCCESS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1352 = iprot.readListBegin(); - struct.success = new ArrayList(_list1352.size); - Partition _elem1353; - for (int _i1354 = 0; _i1354 < _list1352.size; ++_i1354) + org.apache.thrift.protocol.TList _list1360 = iprot.readListBegin(); + struct.success = new ArrayList(_list1360.size); + Partition _elem1361; + for (int _i1362 = 0; _i1362 < _list1360.size; ++_i1362) { - _elem1353 = new Partition(); - _elem1353.read(iprot); - struct.success.add(_elem1353); + _elem1361 = new Partition(); + _elem1361.read(iprot); + struct.success.add(_elem1361); } iprot.readListEnd(); } @@ -121963,9 +121955,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_partitions_by_ oprot.writeFieldBegin(SUCCESS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.success.size())); - for (Partition _iter1355 : struct.success) + for (Partition _iter1363 : struct.success) { - _iter1355.write(oprot); + _iter1363.write(oprot); } oprot.writeListEnd(); } @@ -122012,9 +122004,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_partitions_by_n if (struct.isSetSuccess()) { { oprot.writeI32(struct.success.size()); - for (Partition _iter1356 : struct.success) + for (Partition _iter1364 : struct.success) { - _iter1356.write(oprot); + _iter1364.write(oprot); } } } @@ -122032,14 +122024,14 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_partitions_by_na BitSet incoming = iprot.readBitSet(3); if (incoming.get(0)) { { - org.apache.thrift.protocol.TList _list1357 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); - struct.success = new ArrayList(_list1357.size); - Partition _elem1358; - for (int _i1359 = 0; _i1359 < _list1357.size; ++_i1359) + org.apache.thrift.protocol.TList _list1365 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + struct.success = new ArrayList(_list1365.size); + Partition _elem1366; + for (int _i1367 = 0; _i1367 < _list1365.size; ++_i1367) { - _elem1358 = new Partition(); - _elem1358.read(iprot); - struct.success.add(_elem1358); + _elem1366 = new Partition(); + _elem1366.read(iprot); + struct.success.add(_elem1366); } } struct.setSuccessIsSet(true); @@ -123589,14 +123581,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, alter_partitions_ar case 3: // NEW_PARTS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1360 = iprot.readListBegin(); - struct.new_parts = new ArrayList(_list1360.size); - Partition _elem1361; - for (int _i1362 = 0; _i1362 < _list1360.size; ++_i1362) + org.apache.thrift.protocol.TList _list1368 = iprot.readListBegin(); + struct.new_parts = new ArrayList(_list1368.size); + Partition _elem1369; + for (int _i1370 = 0; _i1370 < _list1368.size; ++_i1370) { - _elem1361 = new Partition(); - _elem1361.read(iprot); - struct.new_parts.add(_elem1361); + _elem1369 = new Partition(); + _elem1369.read(iprot); + struct.new_parts.add(_elem1369); } iprot.readListEnd(); } @@ -123632,9 +123624,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, alter_partitions_a oprot.writeFieldBegin(NEW_PARTS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.new_parts.size())); - for (Partition _iter1363 : struct.new_parts) + for (Partition _iter1371 : struct.new_parts) { - _iter1363.write(oprot); + _iter1371.write(oprot); } oprot.writeListEnd(); } @@ -123677,9 +123669,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, alter_partitions_ar if (struct.isSetNew_parts()) { { oprot.writeI32(struct.new_parts.size()); - for (Partition _iter1364 : struct.new_parts) + for (Partition _iter1372 : struct.new_parts) { - _iter1364.write(oprot); + _iter1372.write(oprot); } } } @@ -123699,14 +123691,14 @@ public void read(org.apache.thrift.protocol.TProtocol prot, alter_partitions_arg } if (incoming.get(2)) { { - org.apache.thrift.protocol.TList _list1365 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); - struct.new_parts = new ArrayList(_list1365.size); - Partition _elem1366; - for (int _i1367 = 0; _i1367 < _list1365.size; ++_i1367) + org.apache.thrift.protocol.TList _list1373 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + struct.new_parts = new ArrayList(_list1373.size); + Partition _elem1374; + for (int _i1375 = 0; _i1375 < _list1373.size; ++_i1375) { - _elem1366 = new Partition(); - _elem1366.read(iprot); - struct.new_parts.add(_elem1366); + _elem1374 = new Partition(); + _elem1374.read(iprot); + struct.new_parts.add(_elem1374); } } struct.setNew_partsIsSet(true); @@ -124185,10 +124177,7 @@ public void read(org.apache.thrift.protocol.TProtocol prot, alter_partitions_res @org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public static class alter_partitions_with_environment_context_args implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("alter_partitions_with_environment_context_args"); - private static final org.apache.thrift.protocol.TField DB_NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("db_name", org.apache.thrift.protocol.TType.STRING, (short)1); - private static final org.apache.thrift.protocol.TField TBL_NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("tbl_name", org.apache.thrift.protocol.TType.STRING, (short)2); - private static final org.apache.thrift.protocol.TField NEW_PARTS_FIELD_DESC = new org.apache.thrift.protocol.TField("new_parts", org.apache.thrift.protocol.TType.LIST, (short)3); - private static final org.apache.thrift.protocol.TField ENVIRONMENT_CONTEXT_FIELD_DESC = new org.apache.thrift.protocol.TField("environment_context", org.apache.thrift.protocol.TType.STRUCT, (short)4); + private static final org.apache.thrift.protocol.TField REQ_FIELD_DESC = new org.apache.thrift.protocol.TField("req", org.apache.thrift.protocol.TType.STRUCT, (short)1); private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); static { @@ -124196,17 +124185,11 @@ public void read(org.apache.thrift.protocol.TProtocol prot, alter_partitions_res schemes.put(TupleScheme.class, new alter_partitions_with_environment_context_argsTupleSchemeFactory()); } - private String db_name; // required - private String tbl_name; // required - private List new_parts; // required - private EnvironmentContext environment_context; // required + private AlterPartitionsRequest req; // required /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ public enum _Fields implements org.apache.thrift.TFieldIdEnum { - DB_NAME((short)1, "db_name"), - TBL_NAME((short)2, "tbl_name"), - NEW_PARTS((short)3, "new_parts"), - ENVIRONMENT_CONTEXT((short)4, "environment_context"); + REQ((short)1, "req"); private static final Map byName = new HashMap(); @@ -124221,14 +124204,8 @@ public void read(org.apache.thrift.protocol.TProtocol prot, alter_partitions_res */ public static _Fields findByThriftId(int fieldId) { switch(fieldId) { - case 1: // DB_NAME - return DB_NAME; - case 2: // TBL_NAME - return TBL_NAME; - case 3: // NEW_PARTS - return NEW_PARTS; - case 4: // ENVIRONMENT_CONTEXT - return ENVIRONMENT_CONTEXT; + case 1: // REQ + return REQ; default: return null; } @@ -124272,15 +124249,8 @@ public String getFieldName() { public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; static { Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); - tmpMap.put(_Fields.DB_NAME, new org.apache.thrift.meta_data.FieldMetaData("db_name", org.apache.thrift.TFieldRequirementType.DEFAULT, - new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING))); - tmpMap.put(_Fields.TBL_NAME, new org.apache.thrift.meta_data.FieldMetaData("tbl_name", org.apache.thrift.TFieldRequirementType.DEFAULT, - new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING))); - tmpMap.put(_Fields.NEW_PARTS, new org.apache.thrift.meta_data.FieldMetaData("new_parts", org.apache.thrift.TFieldRequirementType.DEFAULT, - new org.apache.thrift.meta_data.ListMetaData(org.apache.thrift.protocol.TType.LIST, - new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, Partition.class)))); - tmpMap.put(_Fields.ENVIRONMENT_CONTEXT, new org.apache.thrift.meta_data.FieldMetaData("environment_context", org.apache.thrift.TFieldRequirementType.DEFAULT, - new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, EnvironmentContext.class))); + tmpMap.put(_Fields.REQ, new org.apache.thrift.meta_data.FieldMetaData("req", org.apache.thrift.TFieldRequirementType.DEFAULT, + new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, AlterPartitionsRequest.class))); metaDataMap = Collections.unmodifiableMap(tmpMap); org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(alter_partitions_with_environment_context_args.class, metaDataMap); } @@ -124289,37 +124259,18 @@ public alter_partitions_with_environment_context_args() { } public alter_partitions_with_environment_context_args( - String db_name, - String tbl_name, - List new_parts, - EnvironmentContext environment_context) + AlterPartitionsRequest req) { this(); - this.db_name = db_name; - this.tbl_name = tbl_name; - this.new_parts = new_parts; - this.environment_context = environment_context; + this.req = req; } /** * Performs a deep copy on other. */ public alter_partitions_with_environment_context_args(alter_partitions_with_environment_context_args other) { - if (other.isSetDb_name()) { - this.db_name = other.db_name; - } - if (other.isSetTbl_name()) { - this.tbl_name = other.tbl_name; - } - if (other.isSetNew_parts()) { - List __this__new_parts = new ArrayList(other.new_parts.size()); - for (Partition other_element : other.new_parts) { - __this__new_parts.add(new Partition(other_element)); - } - this.new_parts = __this__new_parts; - } - if (other.isSetEnvironment_context()) { - this.environment_context = new EnvironmentContext(other.environment_context); + if (other.isSetReq()) { + this.req = new AlterPartitionsRequest(other.req); } } @@ -124329,150 +124280,39 @@ public alter_partitions_with_environment_context_args deepCopy() { @Override public void clear() { - this.db_name = null; - this.tbl_name = null; - this.new_parts = null; - this.environment_context = null; - } - - public String getDb_name() { - return this.db_name; - } - - public void setDb_name(String db_name) { - this.db_name = db_name; - } - - public void unsetDb_name() { - this.db_name = null; - } - - /** Returns true if field db_name is set (has been assigned a value) and false otherwise */ - public boolean isSetDb_name() { - return this.db_name != null; - } - - public void setDb_nameIsSet(boolean value) { - if (!value) { - this.db_name = null; - } - } - - public String getTbl_name() { - return this.tbl_name; - } - - public void setTbl_name(String tbl_name) { - this.tbl_name = tbl_name; - } - - public void unsetTbl_name() { - this.tbl_name = null; - } - - /** Returns true if field tbl_name is set (has been assigned a value) and false otherwise */ - public boolean isSetTbl_name() { - return this.tbl_name != null; - } - - public void setTbl_nameIsSet(boolean value) { - if (!value) { - this.tbl_name = null; - } - } - - public int getNew_partsSize() { - return (this.new_parts == null) ? 0 : this.new_parts.size(); - } - - public java.util.Iterator getNew_partsIterator() { - return (this.new_parts == null) ? null : this.new_parts.iterator(); - } - - public void addToNew_parts(Partition elem) { - if (this.new_parts == null) { - this.new_parts = new ArrayList(); - } - this.new_parts.add(elem); - } - - public List getNew_parts() { - return this.new_parts; - } - - public void setNew_parts(List new_parts) { - this.new_parts = new_parts; - } - - public void unsetNew_parts() { - this.new_parts = null; - } - - /** Returns true if field new_parts is set (has been assigned a value) and false otherwise */ - public boolean isSetNew_parts() { - return this.new_parts != null; - } - - public void setNew_partsIsSet(boolean value) { - if (!value) { - this.new_parts = null; - } + this.req = null; } - public EnvironmentContext getEnvironment_context() { - return this.environment_context; + public AlterPartitionsRequest getReq() { + return this.req; } - public void setEnvironment_context(EnvironmentContext environment_context) { - this.environment_context = environment_context; + public void setReq(AlterPartitionsRequest req) { + this.req = req; } - public void unsetEnvironment_context() { - this.environment_context = null; + public void unsetReq() { + this.req = null; } - /** Returns true if field environment_context is set (has been assigned a value) and false otherwise */ - public boolean isSetEnvironment_context() { - return this.environment_context != null; + /** Returns true if field req is set (has been assigned a value) and false otherwise */ + public boolean isSetReq() { + return this.req != null; } - public void setEnvironment_contextIsSet(boolean value) { + public void setReqIsSet(boolean value) { if (!value) { - this.environment_context = null; + this.req = null; } } public void setFieldValue(_Fields field, Object value) { switch (field) { - case DB_NAME: - if (value == null) { - unsetDb_name(); - } else { - setDb_name((String)value); - } - break; - - case TBL_NAME: - if (value == null) { - unsetTbl_name(); - } else { - setTbl_name((String)value); - } - break; - - case NEW_PARTS: - if (value == null) { - unsetNew_parts(); - } else { - setNew_parts((List)value); - } - break; - - case ENVIRONMENT_CONTEXT: + case REQ: if (value == null) { - unsetEnvironment_context(); + unsetReq(); } else { - setEnvironment_context((EnvironmentContext)value); + setReq((AlterPartitionsRequest)value); } break; @@ -124481,17 +124321,8 @@ public void setFieldValue(_Fields field, Object value) { public Object getFieldValue(_Fields field) { switch (field) { - case DB_NAME: - return getDb_name(); - - case TBL_NAME: - return getTbl_name(); - - case NEW_PARTS: - return getNew_parts(); - - case ENVIRONMENT_CONTEXT: - return getEnvironment_context(); + case REQ: + return getReq(); } throw new IllegalStateException(); @@ -124504,14 +124335,8 @@ public boolean isSet(_Fields field) { } switch (field) { - case DB_NAME: - return isSetDb_name(); - case TBL_NAME: - return isSetTbl_name(); - case NEW_PARTS: - return isSetNew_parts(); - case ENVIRONMENT_CONTEXT: - return isSetEnvironment_context(); + case REQ: + return isSetReq(); } throw new IllegalStateException(); } @@ -124529,39 +124354,12 @@ public boolean equals(alter_partitions_with_environment_context_args that) { if (that == null) return false; - boolean this_present_db_name = true && this.isSetDb_name(); - boolean that_present_db_name = true && that.isSetDb_name(); - if (this_present_db_name || that_present_db_name) { - if (!(this_present_db_name && that_present_db_name)) - return false; - if (!this.db_name.equals(that.db_name)) - return false; - } - - boolean this_present_tbl_name = true && this.isSetTbl_name(); - boolean that_present_tbl_name = true && that.isSetTbl_name(); - if (this_present_tbl_name || that_present_tbl_name) { - if (!(this_present_tbl_name && that_present_tbl_name)) - return false; - if (!this.tbl_name.equals(that.tbl_name)) - return false; - } - - boolean this_present_new_parts = true && this.isSetNew_parts(); - boolean that_present_new_parts = true && that.isSetNew_parts(); - if (this_present_new_parts || that_present_new_parts) { - if (!(this_present_new_parts && that_present_new_parts)) - return false; - if (!this.new_parts.equals(that.new_parts)) - return false; - } - - boolean this_present_environment_context = true && this.isSetEnvironment_context(); - boolean that_present_environment_context = true && that.isSetEnvironment_context(); - if (this_present_environment_context || that_present_environment_context) { - if (!(this_present_environment_context && that_present_environment_context)) + boolean this_present_req = true && this.isSetReq(); + boolean that_present_req = true && that.isSetReq(); + if (this_present_req || that_present_req) { + if (!(this_present_req && that_present_req)) return false; - if (!this.environment_context.equals(that.environment_context)) + if (!this.req.equals(that.req)) return false; } @@ -124572,25 +124370,10 @@ public boolean equals(alter_partitions_with_environment_context_args that) { public int hashCode() { List list = new ArrayList(); - boolean present_db_name = true && (isSetDb_name()); - list.add(present_db_name); - if (present_db_name) - list.add(db_name); - - boolean present_tbl_name = true && (isSetTbl_name()); - list.add(present_tbl_name); - if (present_tbl_name) - list.add(tbl_name); - - boolean present_new_parts = true && (isSetNew_parts()); - list.add(present_new_parts); - if (present_new_parts) - list.add(new_parts); - - boolean present_environment_context = true && (isSetEnvironment_context()); - list.add(present_environment_context); - if (present_environment_context) - list.add(environment_context); + boolean present_req = true && (isSetReq()); + list.add(present_req); + if (present_req) + list.add(req); return list.hashCode(); } @@ -124603,42 +124386,12 @@ public int compareTo(alter_partitions_with_environment_context_args other) { int lastComparison = 0; - lastComparison = Boolean.valueOf(isSetDb_name()).compareTo(other.isSetDb_name()); - if (lastComparison != 0) { - return lastComparison; - } - if (isSetDb_name()) { - lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.db_name, other.db_name); - if (lastComparison != 0) { - return lastComparison; - } - } - lastComparison = Boolean.valueOf(isSetTbl_name()).compareTo(other.isSetTbl_name()); - if (lastComparison != 0) { - return lastComparison; - } - if (isSetTbl_name()) { - lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.tbl_name, other.tbl_name); - if (lastComparison != 0) { - return lastComparison; - } - } - lastComparison = Boolean.valueOf(isSetNew_parts()).compareTo(other.isSetNew_parts()); - if (lastComparison != 0) { - return lastComparison; - } - if (isSetNew_parts()) { - lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.new_parts, other.new_parts); - if (lastComparison != 0) { - return lastComparison; - } - } - lastComparison = Boolean.valueOf(isSetEnvironment_context()).compareTo(other.isSetEnvironment_context()); + lastComparison = Boolean.valueOf(isSetReq()).compareTo(other.isSetReq()); if (lastComparison != 0) { return lastComparison; } - if (isSetEnvironment_context()) { - lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.environment_context, other.environment_context); + if (isSetReq()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.req, other.req); if (lastComparison != 0) { return lastComparison; } @@ -124663,35 +124416,11 @@ public String toString() { StringBuilder sb = new StringBuilder("alter_partitions_with_environment_context_args("); boolean first = true; - sb.append("db_name:"); - if (this.db_name == null) { - sb.append("null"); - } else { - sb.append(this.db_name); - } - first = false; - if (!first) sb.append(", "); - sb.append("tbl_name:"); - if (this.tbl_name == null) { - sb.append("null"); - } else { - sb.append(this.tbl_name); - } - first = false; - if (!first) sb.append(", "); - sb.append("new_parts:"); - if (this.new_parts == null) { - sb.append("null"); - } else { - sb.append(this.new_parts); - } - first = false; - if (!first) sb.append(", "); - sb.append("environment_context:"); - if (this.environment_context == null) { + sb.append("req:"); + if (this.req == null) { sb.append("null"); } else { - sb.append(this.environment_context); + sb.append(this.req); } first = false; sb.append(")"); @@ -124701,8 +124430,8 @@ public String toString() { public void validate() throws org.apache.thrift.TException { // check for required fields // check for sub-struct validity - if (environment_context != null) { - environment_context.validate(); + if (req != null) { + req.validate(); } } @@ -124740,46 +124469,11 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, alter_partitions_wi break; } switch (schemeField.id) { - case 1: // DB_NAME - if (schemeField.type == org.apache.thrift.protocol.TType.STRING) { - struct.db_name = iprot.readString(); - struct.setDb_nameIsSet(true); - } else { - org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); - } - break; - case 2: // TBL_NAME - if (schemeField.type == org.apache.thrift.protocol.TType.STRING) { - struct.tbl_name = iprot.readString(); - struct.setTbl_nameIsSet(true); - } else { - org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); - } - break; - case 3: // NEW_PARTS - if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { - { - org.apache.thrift.protocol.TList _list1368 = iprot.readListBegin(); - struct.new_parts = new ArrayList(_list1368.size); - Partition _elem1369; - for (int _i1370 = 0; _i1370 < _list1368.size; ++_i1370) - { - _elem1369 = new Partition(); - _elem1369.read(iprot); - struct.new_parts.add(_elem1369); - } - iprot.readListEnd(); - } - struct.setNew_partsIsSet(true); - } else { - org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); - } - break; - case 4: // ENVIRONMENT_CONTEXT + case 1: // REQ if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) { - struct.environment_context = new EnvironmentContext(); - struct.environment_context.read(iprot); - struct.setEnvironment_contextIsSet(true); + struct.req = new AlterPartitionsRequest(); + struct.req.read(iprot); + struct.setReqIsSet(true); } else { org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); } @@ -124797,31 +124491,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, alter_partitions_w struct.validate(); oprot.writeStructBegin(STRUCT_DESC); - if (struct.db_name != null) { - oprot.writeFieldBegin(DB_NAME_FIELD_DESC); - oprot.writeString(struct.db_name); - oprot.writeFieldEnd(); - } - if (struct.tbl_name != null) { - oprot.writeFieldBegin(TBL_NAME_FIELD_DESC); - oprot.writeString(struct.tbl_name); - oprot.writeFieldEnd(); - } - if (struct.new_parts != null) { - oprot.writeFieldBegin(NEW_PARTS_FIELD_DESC); - { - oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.new_parts.size())); - for (Partition _iter1371 : struct.new_parts) - { - _iter1371.write(oprot); - } - oprot.writeListEnd(); - } - oprot.writeFieldEnd(); - } - if (struct.environment_context != null) { - oprot.writeFieldBegin(ENVIRONMENT_CONTEXT_FIELD_DESC); - struct.environment_context.write(oprot); + if (struct.req != null) { + oprot.writeFieldBegin(REQ_FIELD_DESC); + struct.req.write(oprot); oprot.writeFieldEnd(); } oprot.writeFieldStop(); @@ -124842,69 +124514,23 @@ public alter_partitions_with_environment_context_argsTupleScheme getScheme() { public void write(org.apache.thrift.protocol.TProtocol prot, alter_partitions_with_environment_context_args struct) throws org.apache.thrift.TException { TTupleProtocol oprot = (TTupleProtocol) prot; BitSet optionals = new BitSet(); - if (struct.isSetDb_name()) { + if (struct.isSetReq()) { optionals.set(0); } - if (struct.isSetTbl_name()) { - optionals.set(1); - } - if (struct.isSetNew_parts()) { - optionals.set(2); - } - if (struct.isSetEnvironment_context()) { - optionals.set(3); - } - oprot.writeBitSet(optionals, 4); - if (struct.isSetDb_name()) { - oprot.writeString(struct.db_name); - } - if (struct.isSetTbl_name()) { - oprot.writeString(struct.tbl_name); - } - if (struct.isSetNew_parts()) { - { - oprot.writeI32(struct.new_parts.size()); - for (Partition _iter1372 : struct.new_parts) - { - _iter1372.write(oprot); - } - } - } - if (struct.isSetEnvironment_context()) { - struct.environment_context.write(oprot); + oprot.writeBitSet(optionals, 1); + if (struct.isSetReq()) { + struct.req.write(oprot); } } @Override public void read(org.apache.thrift.protocol.TProtocol prot, alter_partitions_with_environment_context_args struct) throws org.apache.thrift.TException { TTupleProtocol iprot = (TTupleProtocol) prot; - BitSet incoming = iprot.readBitSet(4); + BitSet incoming = iprot.readBitSet(1); if (incoming.get(0)) { - struct.db_name = iprot.readString(); - struct.setDb_nameIsSet(true); - } - if (incoming.get(1)) { - struct.tbl_name = iprot.readString(); - struct.setTbl_nameIsSet(true); - } - if (incoming.get(2)) { - { - org.apache.thrift.protocol.TList _list1373 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); - struct.new_parts = new ArrayList(_list1373.size); - Partition _elem1374; - for (int _i1375 = 0; _i1375 < _list1373.size; ++_i1375) - { - _elem1374 = new Partition(); - _elem1374.read(iprot); - struct.new_parts.add(_elem1374); - } - } - struct.setNew_partsIsSet(true); - } - if (incoming.get(3)) { - struct.environment_context = new EnvironmentContext(); - struct.environment_context.read(iprot); - struct.setEnvironment_contextIsSet(true); + struct.req = new AlterPartitionsRequest(); + struct.req.read(iprot); + struct.setReqIsSet(true); } } } @@ -124914,6 +124540,7 @@ public void read(org.apache.thrift.protocol.TProtocol prot, alter_partitions_wit @org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public static class alter_partitions_with_environment_context_result implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("alter_partitions_with_environment_context_result"); + private static final org.apache.thrift.protocol.TField SUCCESS_FIELD_DESC = new org.apache.thrift.protocol.TField("success", org.apache.thrift.protocol.TType.STRUCT, (short)0); private static final org.apache.thrift.protocol.TField O1_FIELD_DESC = new org.apache.thrift.protocol.TField("o1", org.apache.thrift.protocol.TType.STRUCT, (short)1); private static final org.apache.thrift.protocol.TField O2_FIELD_DESC = new org.apache.thrift.protocol.TField("o2", org.apache.thrift.protocol.TType.STRUCT, (short)2); @@ -124923,11 +124550,13 @@ public void read(org.apache.thrift.protocol.TProtocol prot, alter_partitions_wit schemes.put(TupleScheme.class, new alter_partitions_with_environment_context_resultTupleSchemeFactory()); } + private AlterPartitionsResponse success; // required private InvalidOperationException o1; // required private MetaException o2; // required /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ public enum _Fields implements org.apache.thrift.TFieldIdEnum { + SUCCESS((short)0, "success"), O1((short)1, "o1"), O2((short)2, "o2"); @@ -124944,6 +124573,8 @@ public void read(org.apache.thrift.protocol.TProtocol prot, alter_partitions_wit */ public static _Fields findByThriftId(int fieldId) { switch(fieldId) { + case 0: // SUCCESS + return SUCCESS; case 1: // O1 return O1; case 2: // O2 @@ -124991,6 +124622,8 @@ public String getFieldName() { public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; static { Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); + tmpMap.put(_Fields.SUCCESS, new org.apache.thrift.meta_data.FieldMetaData("success", org.apache.thrift.TFieldRequirementType.DEFAULT, + new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, AlterPartitionsResponse.class))); tmpMap.put(_Fields.O1, new org.apache.thrift.meta_data.FieldMetaData("o1", org.apache.thrift.TFieldRequirementType.DEFAULT, new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRUCT))); tmpMap.put(_Fields.O2, new org.apache.thrift.meta_data.FieldMetaData("o2", org.apache.thrift.TFieldRequirementType.DEFAULT, @@ -125003,10 +124636,12 @@ public alter_partitions_with_environment_context_result() { } public alter_partitions_with_environment_context_result( + AlterPartitionsResponse success, InvalidOperationException o1, MetaException o2) { this(); + this.success = success; this.o1 = o1; this.o2 = o2; } @@ -125015,6 +124650,9 @@ public alter_partitions_with_environment_context_result( * Performs a deep copy on other. */ public alter_partitions_with_environment_context_result(alter_partitions_with_environment_context_result other) { + if (other.isSetSuccess()) { + this.success = new AlterPartitionsResponse(other.success); + } if (other.isSetO1()) { this.o1 = new InvalidOperationException(other.o1); } @@ -125029,10 +124667,34 @@ public alter_partitions_with_environment_context_result deepCopy() { @Override public void clear() { + this.success = null; this.o1 = null; this.o2 = null; } + public AlterPartitionsResponse getSuccess() { + return this.success; + } + + public void setSuccess(AlterPartitionsResponse success) { + this.success = success; + } + + public void unsetSuccess() { + this.success = null; + } + + /** Returns true if field success is set (has been assigned a value) and false otherwise */ + public boolean isSetSuccess() { + return this.success != null; + } + + public void setSuccessIsSet(boolean value) { + if (!value) { + this.success = null; + } + } + public InvalidOperationException getO1() { return this.o1; } @@ -125081,6 +124743,14 @@ public void setO2IsSet(boolean value) { public void setFieldValue(_Fields field, Object value) { switch (field) { + case SUCCESS: + if (value == null) { + unsetSuccess(); + } else { + setSuccess((AlterPartitionsResponse)value); + } + break; + case O1: if (value == null) { unsetO1(); @@ -125102,6 +124772,9 @@ public void setFieldValue(_Fields field, Object value) { public Object getFieldValue(_Fields field) { switch (field) { + case SUCCESS: + return getSuccess(); + case O1: return getO1(); @@ -125119,6 +124792,8 @@ public boolean isSet(_Fields field) { } switch (field) { + case SUCCESS: + return isSetSuccess(); case O1: return isSetO1(); case O2: @@ -125140,6 +124815,15 @@ public boolean equals(alter_partitions_with_environment_context_result that) { if (that == null) return false; + boolean this_present_success = true && this.isSetSuccess(); + boolean that_present_success = true && that.isSetSuccess(); + if (this_present_success || that_present_success) { + if (!(this_present_success && that_present_success)) + return false; + if (!this.success.equals(that.success)) + return false; + } + boolean this_present_o1 = true && this.isSetO1(); boolean that_present_o1 = true && that.isSetO1(); if (this_present_o1 || that_present_o1) { @@ -125165,6 +124849,11 @@ public boolean equals(alter_partitions_with_environment_context_result that) { public int hashCode() { List list = new ArrayList(); + boolean present_success = true && (isSetSuccess()); + list.add(present_success); + if (present_success) + list.add(success); + boolean present_o1 = true && (isSetO1()); list.add(present_o1); if (present_o1) @@ -125186,6 +124875,16 @@ public int compareTo(alter_partitions_with_environment_context_result other) { int lastComparison = 0; + lastComparison = Boolean.valueOf(isSetSuccess()).compareTo(other.isSetSuccess()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetSuccess()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.success, other.success); + if (lastComparison != 0) { + return lastComparison; + } + } lastComparison = Boolean.valueOf(isSetO1()).compareTo(other.isSetO1()); if (lastComparison != 0) { return lastComparison; @@ -125226,6 +124925,14 @@ public String toString() { StringBuilder sb = new StringBuilder("alter_partitions_with_environment_context_result("); boolean first = true; + sb.append("success:"); + if (this.success == null) { + sb.append("null"); + } else { + sb.append(this.success); + } + first = false; + if (!first) sb.append(", "); sb.append("o1:"); if (this.o1 == null) { sb.append("null"); @@ -125248,6 +124955,9 @@ public String toString() { public void validate() throws org.apache.thrift.TException { // check for required fields // check for sub-struct validity + if (success != null) { + success.validate(); + } } private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException { @@ -125284,6 +124994,15 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, alter_partitions_wi break; } switch (schemeField.id) { + case 0: // SUCCESS + if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) { + struct.success = new AlterPartitionsResponse(); + struct.success.read(iprot); + struct.setSuccessIsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + break; case 1: // O1 if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) { struct.o1 = new InvalidOperationException(); @@ -125315,6 +125034,11 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, alter_partitions_w struct.validate(); oprot.writeStructBegin(STRUCT_DESC); + if (struct.success != null) { + oprot.writeFieldBegin(SUCCESS_FIELD_DESC); + struct.success.write(oprot); + oprot.writeFieldEnd(); + } if (struct.o1 != null) { oprot.writeFieldBegin(O1_FIELD_DESC); struct.o1.write(oprot); @@ -125343,13 +125067,19 @@ public alter_partitions_with_environment_context_resultTupleScheme getScheme() { public void write(org.apache.thrift.protocol.TProtocol prot, alter_partitions_with_environment_context_result struct) throws org.apache.thrift.TException { TTupleProtocol oprot = (TTupleProtocol) prot; BitSet optionals = new BitSet(); - if (struct.isSetO1()) { + if (struct.isSetSuccess()) { optionals.set(0); } - if (struct.isSetO2()) { + if (struct.isSetO1()) { optionals.set(1); } - oprot.writeBitSet(optionals, 2); + if (struct.isSetO2()) { + optionals.set(2); + } + oprot.writeBitSet(optionals, 3); + if (struct.isSetSuccess()) { + struct.success.write(oprot); + } if (struct.isSetO1()) { struct.o1.write(oprot); } @@ -125361,13 +125091,18 @@ public void write(org.apache.thrift.protocol.TProtocol prot, alter_partitions_wi @Override public void read(org.apache.thrift.protocol.TProtocol prot, alter_partitions_with_environment_context_result struct) throws org.apache.thrift.TException { TTupleProtocol iprot = (TTupleProtocol) prot; - BitSet incoming = iprot.readBitSet(2); + BitSet incoming = iprot.readBitSet(3); if (incoming.get(0)) { + struct.success = new AlterPartitionsResponse(); + struct.success.read(iprot); + struct.setSuccessIsSet(true); + } + if (incoming.get(1)) { struct.o1 = new InvalidOperationException(); struct.o1.read(iprot); struct.setO1IsSet(true); } - if (incoming.get(1)) { + if (incoming.get(2)) { struct.o2 = new MetaException(); struct.o2.read(iprot); struct.setO2IsSet(true); diff --git standalone-metastore/src/gen/thrift/gen-php/metastore/ThriftHiveMetastore.php standalone-metastore/src/gen/thrift/gen-php/metastore/ThriftHiveMetastore.php index ec26ccad14..38895e3147 100644 --- standalone-metastore/src/gen/thrift/gen-php/metastore/ThriftHiveMetastore.php +++ standalone-metastore/src/gen/thrift/gen-php/metastore/ThriftHiveMetastore.php @@ -714,14 +714,12 @@ interface ThriftHiveMetastoreIf extends \FacebookServiceIf { */ public function alter_partitions($db_name, $tbl_name, array $new_parts); /** - * @param string $db_name - * @param string $tbl_name - * @param \metastore\Partition[] $new_parts - * @param \metastore\EnvironmentContext $environment_context + * @param \metastore\AlterPartitionsRequest $req + * @return \metastore\AlterPartitionsResponse * @throws \metastore\InvalidOperationException * @throws \metastore\MetaException */ - public function alter_partitions_with_environment_context($db_name, $tbl_name, array $new_parts, \metastore\EnvironmentContext $environment_context); + public function alter_partitions_with_environment_context(\metastore\AlterPartitionsRequest $req); /** * @param string $db_name * @param string $tbl_name @@ -6394,19 +6392,16 @@ class ThriftHiveMetastoreClient extends \FacebookServiceClient implements \metas return; } - public function alter_partitions_with_environment_context($db_name, $tbl_name, array $new_parts, \metastore\EnvironmentContext $environment_context) + public function alter_partitions_with_environment_context(\metastore\AlterPartitionsRequest $req) { - $this->send_alter_partitions_with_environment_context($db_name, $tbl_name, $new_parts, $environment_context); - $this->recv_alter_partitions_with_environment_context(); + $this->send_alter_partitions_with_environment_context($req); + return $this->recv_alter_partitions_with_environment_context(); } - public function send_alter_partitions_with_environment_context($db_name, $tbl_name, array $new_parts, \metastore\EnvironmentContext $environment_context) + public function send_alter_partitions_with_environment_context(\metastore\AlterPartitionsRequest $req) { $args = new \metastore\ThriftHiveMetastore_alter_partitions_with_environment_context_args(); - $args->db_name = $db_name; - $args->tbl_name = $tbl_name; - $args->new_parts = $new_parts; - $args->environment_context = $environment_context; + $args->req = $req; $bin_accel = ($this->output_ instanceof TBinaryProtocolAccelerated) && function_exists('thrift_protocol_write_binary'); if ($bin_accel) { @@ -6442,13 +6437,16 @@ class ThriftHiveMetastoreClient extends \FacebookServiceClient implements \metas $result->read($this->input_); $this->input_->readMessageEnd(); } + if ($result->success !== null) { + return $result->success; + } if ($result->o1 !== null) { throw $result->o1; } if ($result->o2 !== null) { throw $result->o2; } - return; + throw new \Exception("alter_partitions_with_environment_context failed: unknown result"); } public function alter_partition_with_environment_context($db_name, $tbl_name, \metastore\Partition $new_part, \metastore\EnvironmentContext $environment_context) @@ -15440,14 +15438,14 @@ class ThriftHiveMetastore_get_databases_result { case 0: if ($ftype == TType::LST) { $this->success = array(); - $_size820 = 0; - $_etype823 = 0; - $xfer += $input->readListBegin($_etype823, $_size820); - for ($_i824 = 0; $_i824 < $_size820; ++$_i824) + $_size827 = 0; + $_etype830 = 0; + $xfer += $input->readListBegin($_etype830, $_size827); + for ($_i831 = 0; $_i831 < $_size827; ++$_i831) { - $elem825 = null; - $xfer += $input->readString($elem825); - $this->success []= $elem825; + $elem832 = null; + $xfer += $input->readString($elem832); + $this->success []= $elem832; } $xfer += $input->readListEnd(); } else { @@ -15483,9 +15481,9 @@ class ThriftHiveMetastore_get_databases_result { { $output->writeListBegin(TType::STRING, count($this->success)); { - foreach ($this->success as $iter826) + foreach ($this->success as $iter833) { - $xfer += $output->writeString($iter826); + $xfer += $output->writeString($iter833); } } $output->writeListEnd(); @@ -15616,14 +15614,14 @@ class ThriftHiveMetastore_get_all_databases_result { case 0: if ($ftype == TType::LST) { $this->success = array(); - $_size827 = 0; - $_etype830 = 0; - $xfer += $input->readListBegin($_etype830, $_size827); - for ($_i831 = 0; $_i831 < $_size827; ++$_i831) + $_size834 = 0; + $_etype837 = 0; + $xfer += $input->readListBegin($_etype837, $_size834); + for ($_i838 = 0; $_i838 < $_size834; ++$_i838) { - $elem832 = null; - $xfer += $input->readString($elem832); - $this->success []= $elem832; + $elem839 = null; + $xfer += $input->readString($elem839); + $this->success []= $elem839; } $xfer += $input->readListEnd(); } else { @@ -15659,9 +15657,9 @@ class ThriftHiveMetastore_get_all_databases_result { { $output->writeListBegin(TType::STRING, count($this->success)); { - foreach ($this->success as $iter833) + foreach ($this->success as $iter840) { - $xfer += $output->writeString($iter833); + $xfer += $output->writeString($iter840); } } $output->writeListEnd(); @@ -16662,18 +16660,18 @@ class ThriftHiveMetastore_get_type_all_result { case 0: if ($ftype == TType::MAP) { $this->success = array(); - $_size834 = 0; - $_ktype835 = 0; - $_vtype836 = 0; - $xfer += $input->readMapBegin($_ktype835, $_vtype836, $_size834); - for ($_i838 = 0; $_i838 < $_size834; ++$_i838) + $_size841 = 0; + $_ktype842 = 0; + $_vtype843 = 0; + $xfer += $input->readMapBegin($_ktype842, $_vtype843, $_size841); + for ($_i845 = 0; $_i845 < $_size841; ++$_i845) { - $key839 = ''; - $val840 = new \metastore\Type(); - $xfer += $input->readString($key839); - $val840 = new \metastore\Type(); - $xfer += $val840->read($input); - $this->success[$key839] = $val840; + $key846 = ''; + $val847 = new \metastore\Type(); + $xfer += $input->readString($key846); + $val847 = new \metastore\Type(); + $xfer += $val847->read($input); + $this->success[$key846] = $val847; } $xfer += $input->readMapEnd(); } else { @@ -16709,10 +16707,10 @@ class ThriftHiveMetastore_get_type_all_result { { $output->writeMapBegin(TType::STRING, TType::STRUCT, count($this->success)); { - foreach ($this->success as $kiter841 => $viter842) + foreach ($this->success as $kiter848 => $viter849) { - $xfer += $output->writeString($kiter841); - $xfer += $viter842->write($output); + $xfer += $output->writeString($kiter848); + $xfer += $viter849->write($output); } } $output->writeMapEnd(); @@ -16916,15 +16914,15 @@ class ThriftHiveMetastore_get_fields_result { case 0: if ($ftype == TType::LST) { $this->success = array(); - $_size843 = 0; - $_etype846 = 0; - $xfer += $input->readListBegin($_etype846, $_size843); - for ($_i847 = 0; $_i847 < $_size843; ++$_i847) + $_size850 = 0; + $_etype853 = 0; + $xfer += $input->readListBegin($_etype853, $_size850); + for ($_i854 = 0; $_i854 < $_size850; ++$_i854) { - $elem848 = null; - $elem848 = new \metastore\FieldSchema(); - $xfer += $elem848->read($input); - $this->success []= $elem848; + $elem855 = null; + $elem855 = new \metastore\FieldSchema(); + $xfer += $elem855->read($input); + $this->success []= $elem855; } $xfer += $input->readListEnd(); } else { @@ -16976,9 +16974,9 @@ class ThriftHiveMetastore_get_fields_result { { $output->writeListBegin(TType::STRUCT, count($this->success)); { - foreach ($this->success as $iter849) + foreach ($this->success as $iter856) { - $xfer += $iter849->write($output); + $xfer += $iter856->write($output); } } $output->writeListEnd(); @@ -17220,15 +17218,15 @@ class ThriftHiveMetastore_get_fields_with_environment_context_result { case 0: if ($ftype == TType::LST) { $this->success = array(); - $_size850 = 0; - $_etype853 = 0; - $xfer += $input->readListBegin($_etype853, $_size850); - for ($_i854 = 0; $_i854 < $_size850; ++$_i854) + $_size857 = 0; + $_etype860 = 0; + $xfer += $input->readListBegin($_etype860, $_size857); + for ($_i861 = 0; $_i861 < $_size857; ++$_i861) { - $elem855 = null; - $elem855 = new \metastore\FieldSchema(); - $xfer += $elem855->read($input); - $this->success []= $elem855; + $elem862 = null; + $elem862 = new \metastore\FieldSchema(); + $xfer += $elem862->read($input); + $this->success []= $elem862; } $xfer += $input->readListEnd(); } else { @@ -17280,9 +17278,9 @@ class ThriftHiveMetastore_get_fields_with_environment_context_result { { $output->writeListBegin(TType::STRUCT, count($this->success)); { - foreach ($this->success as $iter856) + foreach ($this->success as $iter863) { - $xfer += $iter856->write($output); + $xfer += $iter863->write($output); } } $output->writeListEnd(); @@ -17496,15 +17494,15 @@ class ThriftHiveMetastore_get_schema_result { case 0: if ($ftype == TType::LST) { $this->success = array(); - $_size857 = 0; - $_etype860 = 0; - $xfer += $input->readListBegin($_etype860, $_size857); - for ($_i861 = 0; $_i861 < $_size857; ++$_i861) + $_size864 = 0; + $_etype867 = 0; + $xfer += $input->readListBegin($_etype867, $_size864); + for ($_i868 = 0; $_i868 < $_size864; ++$_i868) { - $elem862 = null; - $elem862 = new \metastore\FieldSchema(); - $xfer += $elem862->read($input); - $this->success []= $elem862; + $elem869 = null; + $elem869 = new \metastore\FieldSchema(); + $xfer += $elem869->read($input); + $this->success []= $elem869; } $xfer += $input->readListEnd(); } else { @@ -17556,9 +17554,9 @@ class ThriftHiveMetastore_get_schema_result { { $output->writeListBegin(TType::STRUCT, count($this->success)); { - foreach ($this->success as $iter863) + foreach ($this->success as $iter870) { - $xfer += $iter863->write($output); + $xfer += $iter870->write($output); } } $output->writeListEnd(); @@ -17800,15 +17798,15 @@ class ThriftHiveMetastore_get_schema_with_environment_context_result { case 0: if ($ftype == TType::LST) { $this->success = array(); - $_size864 = 0; - $_etype867 = 0; - $xfer += $input->readListBegin($_etype867, $_size864); - for ($_i868 = 0; $_i868 < $_size864; ++$_i868) + $_size871 = 0; + $_etype874 = 0; + $xfer += $input->readListBegin($_etype874, $_size871); + for ($_i875 = 0; $_i875 < $_size871; ++$_i875) { - $elem869 = null; - $elem869 = new \metastore\FieldSchema(); - $xfer += $elem869->read($input); - $this->success []= $elem869; + $elem876 = null; + $elem876 = new \metastore\FieldSchema(); + $xfer += $elem876->read($input); + $this->success []= $elem876; } $xfer += $input->readListEnd(); } else { @@ -17860,9 +17858,9 @@ class ThriftHiveMetastore_get_schema_with_environment_context_result { { $output->writeListBegin(TType::STRUCT, count($this->success)); { - foreach ($this->success as $iter870) + foreach ($this->success as $iter877) { - $xfer += $iter870->write($output); + $xfer += $iter877->write($output); } } $output->writeListEnd(); @@ -18534,15 +18532,15 @@ class ThriftHiveMetastore_create_table_with_constraints_args { case 2: if ($ftype == TType::LST) { $this->primaryKeys = array(); - $_size871 = 0; - $_etype874 = 0; - $xfer += $input->readListBegin($_etype874, $_size871); - for ($_i875 = 0; $_i875 < $_size871; ++$_i875) + $_size878 = 0; + $_etype881 = 0; + $xfer += $input->readListBegin($_etype881, $_size878); + for ($_i882 = 0; $_i882 < $_size878; ++$_i882) { - $elem876 = null; - $elem876 = new \metastore\SQLPrimaryKey(); - $xfer += $elem876->read($input); - $this->primaryKeys []= $elem876; + $elem883 = null; + $elem883 = new \metastore\SQLPrimaryKey(); + $xfer += $elem883->read($input); + $this->primaryKeys []= $elem883; } $xfer += $input->readListEnd(); } else { @@ -18552,15 +18550,15 @@ class ThriftHiveMetastore_create_table_with_constraints_args { case 3: if ($ftype == TType::LST) { $this->foreignKeys = array(); - $_size877 = 0; - $_etype880 = 0; - $xfer += $input->readListBegin($_etype880, $_size877); - for ($_i881 = 0; $_i881 < $_size877; ++$_i881) + $_size884 = 0; + $_etype887 = 0; + $xfer += $input->readListBegin($_etype887, $_size884); + for ($_i888 = 0; $_i888 < $_size884; ++$_i888) { - $elem882 = null; - $elem882 = new \metastore\SQLForeignKey(); - $xfer += $elem882->read($input); - $this->foreignKeys []= $elem882; + $elem889 = null; + $elem889 = new \metastore\SQLForeignKey(); + $xfer += $elem889->read($input); + $this->foreignKeys []= $elem889; } $xfer += $input->readListEnd(); } else { @@ -18570,15 +18568,15 @@ class ThriftHiveMetastore_create_table_with_constraints_args { case 4: if ($ftype == TType::LST) { $this->uniqueConstraints = array(); - $_size883 = 0; - $_etype886 = 0; - $xfer += $input->readListBegin($_etype886, $_size883); - for ($_i887 = 0; $_i887 < $_size883; ++$_i887) + $_size890 = 0; + $_etype893 = 0; + $xfer += $input->readListBegin($_etype893, $_size890); + for ($_i894 = 0; $_i894 < $_size890; ++$_i894) { - $elem888 = null; - $elem888 = new \metastore\SQLUniqueConstraint(); - $xfer += $elem888->read($input); - $this->uniqueConstraints []= $elem888; + $elem895 = null; + $elem895 = new \metastore\SQLUniqueConstraint(); + $xfer += $elem895->read($input); + $this->uniqueConstraints []= $elem895; } $xfer += $input->readListEnd(); } else { @@ -18588,15 +18586,15 @@ class ThriftHiveMetastore_create_table_with_constraints_args { case 5: if ($ftype == TType::LST) { $this->notNullConstraints = array(); - $_size889 = 0; - $_etype892 = 0; - $xfer += $input->readListBegin($_etype892, $_size889); - for ($_i893 = 0; $_i893 < $_size889; ++$_i893) + $_size896 = 0; + $_etype899 = 0; + $xfer += $input->readListBegin($_etype899, $_size896); + for ($_i900 = 0; $_i900 < $_size896; ++$_i900) { - $elem894 = null; - $elem894 = new \metastore\SQLNotNullConstraint(); - $xfer += $elem894->read($input); - $this->notNullConstraints []= $elem894; + $elem901 = null; + $elem901 = new \metastore\SQLNotNullConstraint(); + $xfer += $elem901->read($input); + $this->notNullConstraints []= $elem901; } $xfer += $input->readListEnd(); } else { @@ -18606,15 +18604,15 @@ class ThriftHiveMetastore_create_table_with_constraints_args { case 6: if ($ftype == TType::LST) { $this->defaultConstraints = array(); - $_size895 = 0; - $_etype898 = 0; - $xfer += $input->readListBegin($_etype898, $_size895); - for ($_i899 = 0; $_i899 < $_size895; ++$_i899) + $_size902 = 0; + $_etype905 = 0; + $xfer += $input->readListBegin($_etype905, $_size902); + for ($_i906 = 0; $_i906 < $_size902; ++$_i906) { - $elem900 = null; - $elem900 = new \metastore\SQLDefaultConstraint(); - $xfer += $elem900->read($input); - $this->defaultConstraints []= $elem900; + $elem907 = null; + $elem907 = new \metastore\SQLDefaultConstraint(); + $xfer += $elem907->read($input); + $this->defaultConstraints []= $elem907; } $xfer += $input->readListEnd(); } else { @@ -18624,15 +18622,15 @@ class ThriftHiveMetastore_create_table_with_constraints_args { case 7: if ($ftype == TType::LST) { $this->checkConstraints = array(); - $_size901 = 0; - $_etype904 = 0; - $xfer += $input->readListBegin($_etype904, $_size901); - for ($_i905 = 0; $_i905 < $_size901; ++$_i905) + $_size908 = 0; + $_etype911 = 0; + $xfer += $input->readListBegin($_etype911, $_size908); + for ($_i912 = 0; $_i912 < $_size908; ++$_i912) { - $elem906 = null; - $elem906 = new \metastore\SQLCheckConstraint(); - $xfer += $elem906->read($input); - $this->checkConstraints []= $elem906; + $elem913 = null; + $elem913 = new \metastore\SQLCheckConstraint(); + $xfer += $elem913->read($input); + $this->checkConstraints []= $elem913; } $xfer += $input->readListEnd(); } else { @@ -18668,9 +18666,9 @@ class ThriftHiveMetastore_create_table_with_constraints_args { { $output->writeListBegin(TType::STRUCT, count($this->primaryKeys)); { - foreach ($this->primaryKeys as $iter907) + foreach ($this->primaryKeys as $iter914) { - $xfer += $iter907->write($output); + $xfer += $iter914->write($output); } } $output->writeListEnd(); @@ -18685,9 +18683,9 @@ class ThriftHiveMetastore_create_table_with_constraints_args { { $output->writeListBegin(TType::STRUCT, count($this->foreignKeys)); { - foreach ($this->foreignKeys as $iter908) + foreach ($this->foreignKeys as $iter915) { - $xfer += $iter908->write($output); + $xfer += $iter915->write($output); } } $output->writeListEnd(); @@ -18702,9 +18700,9 @@ class ThriftHiveMetastore_create_table_with_constraints_args { { $output->writeListBegin(TType::STRUCT, count($this->uniqueConstraints)); { - foreach ($this->uniqueConstraints as $iter909) + foreach ($this->uniqueConstraints as $iter916) { - $xfer += $iter909->write($output); + $xfer += $iter916->write($output); } } $output->writeListEnd(); @@ -18719,9 +18717,9 @@ class ThriftHiveMetastore_create_table_with_constraints_args { { $output->writeListBegin(TType::STRUCT, count($this->notNullConstraints)); { - foreach ($this->notNullConstraints as $iter910) + foreach ($this->notNullConstraints as $iter917) { - $xfer += $iter910->write($output); + $xfer += $iter917->write($output); } } $output->writeListEnd(); @@ -18736,9 +18734,9 @@ class ThriftHiveMetastore_create_table_with_constraints_args { { $output->writeListBegin(TType::STRUCT, count($this->defaultConstraints)); { - foreach ($this->defaultConstraints as $iter911) + foreach ($this->defaultConstraints as $iter918) { - $xfer += $iter911->write($output); + $xfer += $iter918->write($output); } } $output->writeListEnd(); @@ -18753,9 +18751,9 @@ class ThriftHiveMetastore_create_table_with_constraints_args { { $output->writeListBegin(TType::STRUCT, count($this->checkConstraints)); { - foreach ($this->checkConstraints as $iter912) + foreach ($this->checkConstraints as $iter919) { - $xfer += $iter912->write($output); + $xfer += $iter919->write($output); } } $output->writeListEnd(); @@ -20755,14 +20753,14 @@ class ThriftHiveMetastore_truncate_table_args { case 3: if ($ftype == TType::LST) { $this->partNames = array(); - $_size913 = 0; - $_etype916 = 0; - $xfer += $input->readListBegin($_etype916, $_size913); - for ($_i917 = 0; $_i917 < $_size913; ++$_i917) + $_size920 = 0; + $_etype923 = 0; + $xfer += $input->readListBegin($_etype923, $_size920); + for ($_i924 = 0; $_i924 < $_size920; ++$_i924) { - $elem918 = null; - $xfer += $input->readString($elem918); - $this->partNames []= $elem918; + $elem925 = null; + $xfer += $input->readString($elem925); + $this->partNames []= $elem925; } $xfer += $input->readListEnd(); } else { @@ -20800,9 +20798,9 @@ class ThriftHiveMetastore_truncate_table_args { { $output->writeListBegin(TType::STRING, count($this->partNames)); { - foreach ($this->partNames as $iter919) + foreach ($this->partNames as $iter926) { - $xfer += $output->writeString($iter919); + $xfer += $output->writeString($iter926); } } $output->writeListEnd(); @@ -21053,14 +21051,14 @@ class ThriftHiveMetastore_get_tables_result { case 0: if ($ftype == TType::LST) { $this->success = array(); - $_size920 = 0; - $_etype923 = 0; - $xfer += $input->readListBegin($_etype923, $_size920); - for ($_i924 = 0; $_i924 < $_size920; ++$_i924) + $_size927 = 0; + $_etype930 = 0; + $xfer += $input->readListBegin($_etype930, $_size927); + for ($_i931 = 0; $_i931 < $_size927; ++$_i931) { - $elem925 = null; - $xfer += $input->readString($elem925); - $this->success []= $elem925; + $elem932 = null; + $xfer += $input->readString($elem932); + $this->success []= $elem932; } $xfer += $input->readListEnd(); } else { @@ -21096,9 +21094,9 @@ class ThriftHiveMetastore_get_tables_result { { $output->writeListBegin(TType::STRING, count($this->success)); { - foreach ($this->success as $iter926) + foreach ($this->success as $iter933) { - $xfer += $output->writeString($iter926); + $xfer += $output->writeString($iter933); } } $output->writeListEnd(); @@ -21300,14 +21298,14 @@ class ThriftHiveMetastore_get_tables_by_type_result { case 0: if ($ftype == TType::LST) { $this->success = array(); - $_size927 = 0; - $_etype930 = 0; - $xfer += $input->readListBegin($_etype930, $_size927); - for ($_i931 = 0; $_i931 < $_size927; ++$_i931) + $_size934 = 0; + $_etype937 = 0; + $xfer += $input->readListBegin($_etype937, $_size934); + for ($_i938 = 0; $_i938 < $_size934; ++$_i938) { - $elem932 = null; - $xfer += $input->readString($elem932); - $this->success []= $elem932; + $elem939 = null; + $xfer += $input->readString($elem939); + $this->success []= $elem939; } $xfer += $input->readListEnd(); } else { @@ -21343,9 +21341,9 @@ class ThriftHiveMetastore_get_tables_by_type_result { { $output->writeListBegin(TType::STRING, count($this->success)); { - foreach ($this->success as $iter933) + foreach ($this->success as $iter940) { - $xfer += $output->writeString($iter933); + $xfer += $output->writeString($iter940); } } $output->writeListEnd(); @@ -21501,14 +21499,14 @@ class ThriftHiveMetastore_get_materialized_views_for_rewriting_result { case 0: if ($ftype == TType::LST) { $this->success = array(); - $_size934 = 0; - $_etype937 = 0; - $xfer += $input->readListBegin($_etype937, $_size934); - for ($_i938 = 0; $_i938 < $_size934; ++$_i938) + $_size941 = 0; + $_etype944 = 0; + $xfer += $input->readListBegin($_etype944, $_size941); + for ($_i945 = 0; $_i945 < $_size941; ++$_i945) { - $elem939 = null; - $xfer += $input->readString($elem939); - $this->success []= $elem939; + $elem946 = null; + $xfer += $input->readString($elem946); + $this->success []= $elem946; } $xfer += $input->readListEnd(); } else { @@ -21544,9 +21542,9 @@ class ThriftHiveMetastore_get_materialized_views_for_rewriting_result { { $output->writeListBegin(TType::STRING, count($this->success)); { - foreach ($this->success as $iter940) + foreach ($this->success as $iter947) { - $xfer += $output->writeString($iter940); + $xfer += $output->writeString($iter947); } } $output->writeListEnd(); @@ -21651,14 +21649,14 @@ class ThriftHiveMetastore_get_table_meta_args { case 3: if ($ftype == TType::LST) { $this->tbl_types = array(); - $_size941 = 0; - $_etype944 = 0; - $xfer += $input->readListBegin($_etype944, $_size941); - for ($_i945 = 0; $_i945 < $_size941; ++$_i945) + $_size948 = 0; + $_etype951 = 0; + $xfer += $input->readListBegin($_etype951, $_size948); + for ($_i952 = 0; $_i952 < $_size948; ++$_i952) { - $elem946 = null; - $xfer += $input->readString($elem946); - $this->tbl_types []= $elem946; + $elem953 = null; + $xfer += $input->readString($elem953); + $this->tbl_types []= $elem953; } $xfer += $input->readListEnd(); } else { @@ -21696,9 +21694,9 @@ class ThriftHiveMetastore_get_table_meta_args { { $output->writeListBegin(TType::STRING, count($this->tbl_types)); { - foreach ($this->tbl_types as $iter947) + foreach ($this->tbl_types as $iter954) { - $xfer += $output->writeString($iter947); + $xfer += $output->writeString($iter954); } } $output->writeListEnd(); @@ -21775,15 +21773,15 @@ class ThriftHiveMetastore_get_table_meta_result { case 0: if ($ftype == TType::LST) { $this->success = array(); - $_size948 = 0; - $_etype951 = 0; - $xfer += $input->readListBegin($_etype951, $_size948); - for ($_i952 = 0; $_i952 < $_size948; ++$_i952) + $_size955 = 0; + $_etype958 = 0; + $xfer += $input->readListBegin($_etype958, $_size955); + for ($_i959 = 0; $_i959 < $_size955; ++$_i959) { - $elem953 = null; - $elem953 = new \metastore\TableMeta(); - $xfer += $elem953->read($input); - $this->success []= $elem953; + $elem960 = null; + $elem960 = new \metastore\TableMeta(); + $xfer += $elem960->read($input); + $this->success []= $elem960; } $xfer += $input->readListEnd(); } else { @@ -21819,9 +21817,9 @@ class ThriftHiveMetastore_get_table_meta_result { { $output->writeListBegin(TType::STRUCT, count($this->success)); { - foreach ($this->success as $iter954) + foreach ($this->success as $iter961) { - $xfer += $iter954->write($output); + $xfer += $iter961->write($output); } } $output->writeListEnd(); @@ -21977,14 +21975,14 @@ class ThriftHiveMetastore_get_all_tables_result { case 0: if ($ftype == TType::LST) { $this->success = array(); - $_size955 = 0; - $_etype958 = 0; - $xfer += $input->readListBegin($_etype958, $_size955); - for ($_i959 = 0; $_i959 < $_size955; ++$_i959) + $_size962 = 0; + $_etype965 = 0; + $xfer += $input->readListBegin($_etype965, $_size962); + for ($_i966 = 0; $_i966 < $_size962; ++$_i966) { - $elem960 = null; - $xfer += $input->readString($elem960); - $this->success []= $elem960; + $elem967 = null; + $xfer += $input->readString($elem967); + $this->success []= $elem967; } $xfer += $input->readListEnd(); } else { @@ -22020,9 +22018,9 @@ class ThriftHiveMetastore_get_all_tables_result { { $output->writeListBegin(TType::STRING, count($this->success)); { - foreach ($this->success as $iter961) + foreach ($this->success as $iter968) { - $xfer += $output->writeString($iter961); + $xfer += $output->writeString($iter968); } } $output->writeListEnd(); @@ -22337,14 +22335,14 @@ class ThriftHiveMetastore_get_table_objects_by_name_args { case 2: if ($ftype == TType::LST) { $this->tbl_names = array(); - $_size962 = 0; - $_etype965 = 0; - $xfer += $input->readListBegin($_etype965, $_size962); - for ($_i966 = 0; $_i966 < $_size962; ++$_i966) + $_size969 = 0; + $_etype972 = 0; + $xfer += $input->readListBegin($_etype972, $_size969); + for ($_i973 = 0; $_i973 < $_size969; ++$_i973) { - $elem967 = null; - $xfer += $input->readString($elem967); - $this->tbl_names []= $elem967; + $elem974 = null; + $xfer += $input->readString($elem974); + $this->tbl_names []= $elem974; } $xfer += $input->readListEnd(); } else { @@ -22377,9 +22375,9 @@ class ThriftHiveMetastore_get_table_objects_by_name_args { { $output->writeListBegin(TType::STRING, count($this->tbl_names)); { - foreach ($this->tbl_names as $iter968) + foreach ($this->tbl_names as $iter975) { - $xfer += $output->writeString($iter968); + $xfer += $output->writeString($iter975); } } $output->writeListEnd(); @@ -22444,15 +22442,15 @@ class ThriftHiveMetastore_get_table_objects_by_name_result { case 0: if ($ftype == TType::LST) { $this->success = array(); - $_size969 = 0; - $_etype972 = 0; - $xfer += $input->readListBegin($_etype972, $_size969); - for ($_i973 = 0; $_i973 < $_size969; ++$_i973) + $_size976 = 0; + $_etype979 = 0; + $xfer += $input->readListBegin($_etype979, $_size976); + for ($_i980 = 0; $_i980 < $_size976; ++$_i980) { - $elem974 = null; - $elem974 = new \metastore\Table(); - $xfer += $elem974->read($input); - $this->success []= $elem974; + $elem981 = null; + $elem981 = new \metastore\Table(); + $xfer += $elem981->read($input); + $this->success []= $elem981; } $xfer += $input->readListEnd(); } else { @@ -22480,9 +22478,9 @@ class ThriftHiveMetastore_get_table_objects_by_name_result { { $output->writeListBegin(TType::STRUCT, count($this->success)); { - foreach ($this->success as $iter975) + foreach ($this->success as $iter982) { - $xfer += $iter975->write($output); + $xfer += $iter982->write($output); } } $output->writeListEnd(); @@ -23009,14 +23007,14 @@ class ThriftHiveMetastore_get_materialization_invalidation_info_args { case 2: if ($ftype == TType::LST) { $this->tbl_names = array(); - $_size976 = 0; - $_etype979 = 0; - $xfer += $input->readListBegin($_etype979, $_size976); - for ($_i980 = 0; $_i980 < $_size976; ++$_i980) + $_size983 = 0; + $_etype986 = 0; + $xfer += $input->readListBegin($_etype986, $_size983); + for ($_i987 = 0; $_i987 < $_size983; ++$_i987) { - $elem981 = null; - $xfer += $input->readString($elem981); - $this->tbl_names []= $elem981; + $elem988 = null; + $xfer += $input->readString($elem988); + $this->tbl_names []= $elem988; } $xfer += $input->readListEnd(); } else { @@ -23049,9 +23047,9 @@ class ThriftHiveMetastore_get_materialization_invalidation_info_args { { $output->writeListBegin(TType::STRING, count($this->tbl_names)); { - foreach ($this->tbl_names as $iter982) + foreach ($this->tbl_names as $iter989) { - $xfer += $output->writeString($iter982); + $xfer += $output->writeString($iter989); } } $output->writeListEnd(); @@ -23156,18 +23154,18 @@ class ThriftHiveMetastore_get_materialization_invalidation_info_result { case 0: if ($ftype == TType::MAP) { $this->success = array(); - $_size983 = 0; - $_ktype984 = 0; - $_vtype985 = 0; - $xfer += $input->readMapBegin($_ktype984, $_vtype985, $_size983); - for ($_i987 = 0; $_i987 < $_size983; ++$_i987) + $_size990 = 0; + $_ktype991 = 0; + $_vtype992 = 0; + $xfer += $input->readMapBegin($_ktype991, $_vtype992, $_size990); + for ($_i994 = 0; $_i994 < $_size990; ++$_i994) { - $key988 = ''; - $val989 = new \metastore\Materialization(); - $xfer += $input->readString($key988); - $val989 = new \metastore\Materialization(); - $xfer += $val989->read($input); - $this->success[$key988] = $val989; + $key995 = ''; + $val996 = new \metastore\Materialization(); + $xfer += $input->readString($key995); + $val996 = new \metastore\Materialization(); + $xfer += $val996->read($input); + $this->success[$key995] = $val996; } $xfer += $input->readMapEnd(); } else { @@ -23219,10 +23217,10 @@ class ThriftHiveMetastore_get_materialization_invalidation_info_result { { $output->writeMapBegin(TType::STRING, TType::STRUCT, count($this->success)); { - foreach ($this->success as $kiter990 => $viter991) + foreach ($this->success as $kiter997 => $viter998) { - $xfer += $output->writeString($kiter990); - $xfer += $viter991->write($output); + $xfer += $output->writeString($kiter997); + $xfer += $viter998->write($output); } } $output->writeMapEnd(); @@ -23734,14 +23732,14 @@ class ThriftHiveMetastore_get_table_names_by_filter_result { case 0: if ($ftype == TType::LST) { $this->success = array(); - $_size992 = 0; - $_etype995 = 0; - $xfer += $input->readListBegin($_etype995, $_size992); - for ($_i996 = 0; $_i996 < $_size992; ++$_i996) + $_size999 = 0; + $_etype1002 = 0; + $xfer += $input->readListBegin($_etype1002, $_size999); + for ($_i1003 = 0; $_i1003 < $_size999; ++$_i1003) { - $elem997 = null; - $xfer += $input->readString($elem997); - $this->success []= $elem997; + $elem1004 = null; + $xfer += $input->readString($elem1004); + $this->success []= $elem1004; } $xfer += $input->readListEnd(); } else { @@ -23793,9 +23791,9 @@ class ThriftHiveMetastore_get_table_names_by_filter_result { { $output->writeListBegin(TType::STRING, count($this->success)); { - foreach ($this->success as $iter998) + foreach ($this->success as $iter1005) { - $xfer += $output->writeString($iter998); + $xfer += $output->writeString($iter1005); } } $output->writeListEnd(); @@ -25108,15 +25106,15 @@ class ThriftHiveMetastore_add_partitions_args { case 1: if ($ftype == TType::LST) { $this->new_parts = array(); - $_size999 = 0; - $_etype1002 = 0; - $xfer += $input->readListBegin($_etype1002, $_size999); - for ($_i1003 = 0; $_i1003 < $_size999; ++$_i1003) + $_size1006 = 0; + $_etype1009 = 0; + $xfer += $input->readListBegin($_etype1009, $_size1006); + for ($_i1010 = 0; $_i1010 < $_size1006; ++$_i1010) { - $elem1004 = null; - $elem1004 = new \metastore\Partition(); - $xfer += $elem1004->read($input); - $this->new_parts []= $elem1004; + $elem1011 = null; + $elem1011 = new \metastore\Partition(); + $xfer += $elem1011->read($input); + $this->new_parts []= $elem1011; } $xfer += $input->readListEnd(); } else { @@ -25144,9 +25142,9 @@ class ThriftHiveMetastore_add_partitions_args { { $output->writeListBegin(TType::STRUCT, count($this->new_parts)); { - foreach ($this->new_parts as $iter1005) + foreach ($this->new_parts as $iter1012) { - $xfer += $iter1005->write($output); + $xfer += $iter1012->write($output); } } $output->writeListEnd(); @@ -25361,15 +25359,15 @@ class ThriftHiveMetastore_add_partitions_pspec_args { case 1: if ($ftype == TType::LST) { $this->new_parts = array(); - $_size1006 = 0; - $_etype1009 = 0; - $xfer += $input->readListBegin($_etype1009, $_size1006); - for ($_i1010 = 0; $_i1010 < $_size1006; ++$_i1010) + $_size1013 = 0; + $_etype1016 = 0; + $xfer += $input->readListBegin($_etype1016, $_size1013); + for ($_i1017 = 0; $_i1017 < $_size1013; ++$_i1017) { - $elem1011 = null; - $elem1011 = new \metastore\PartitionSpec(); - $xfer += $elem1011->read($input); - $this->new_parts []= $elem1011; + $elem1018 = null; + $elem1018 = new \metastore\PartitionSpec(); + $xfer += $elem1018->read($input); + $this->new_parts []= $elem1018; } $xfer += $input->readListEnd(); } else { @@ -25397,9 +25395,9 @@ class ThriftHiveMetastore_add_partitions_pspec_args { { $output->writeListBegin(TType::STRUCT, count($this->new_parts)); { - foreach ($this->new_parts as $iter1012) + foreach ($this->new_parts as $iter1019) { - $xfer += $iter1012->write($output); + $xfer += $iter1019->write($output); } } $output->writeListEnd(); @@ -25649,14 +25647,14 @@ class ThriftHiveMetastore_append_partition_args { case 3: if ($ftype == TType::LST) { $this->part_vals = array(); - $_size1013 = 0; - $_etype1016 = 0; - $xfer += $input->readListBegin($_etype1016, $_size1013); - for ($_i1017 = 0; $_i1017 < $_size1013; ++$_i1017) + $_size1020 = 0; + $_etype1023 = 0; + $xfer += $input->readListBegin($_etype1023, $_size1020); + for ($_i1024 = 0; $_i1024 < $_size1020; ++$_i1024) { - $elem1018 = null; - $xfer += $input->readString($elem1018); - $this->part_vals []= $elem1018; + $elem1025 = null; + $xfer += $input->readString($elem1025); + $this->part_vals []= $elem1025; } $xfer += $input->readListEnd(); } else { @@ -25694,9 +25692,9 @@ class ThriftHiveMetastore_append_partition_args { { $output->writeListBegin(TType::STRING, count($this->part_vals)); { - foreach ($this->part_vals as $iter1019) + foreach ($this->part_vals as $iter1026) { - $xfer += $output->writeString($iter1019); + $xfer += $output->writeString($iter1026); } } $output->writeListEnd(); @@ -26198,14 +26196,14 @@ class ThriftHiveMetastore_append_partition_with_environment_context_args { case 3: if ($ftype == TType::LST) { $this->part_vals = array(); - $_size1020 = 0; - $_etype1023 = 0; - $xfer += $input->readListBegin($_etype1023, $_size1020); - for ($_i1024 = 0; $_i1024 < $_size1020; ++$_i1024) + $_size1027 = 0; + $_etype1030 = 0; + $xfer += $input->readListBegin($_etype1030, $_size1027); + for ($_i1031 = 0; $_i1031 < $_size1027; ++$_i1031) { - $elem1025 = null; - $xfer += $input->readString($elem1025); - $this->part_vals []= $elem1025; + $elem1032 = null; + $xfer += $input->readString($elem1032); + $this->part_vals []= $elem1032; } $xfer += $input->readListEnd(); } else { @@ -26251,9 +26249,9 @@ class ThriftHiveMetastore_append_partition_with_environment_context_args { { $output->writeListBegin(TType::STRING, count($this->part_vals)); { - foreach ($this->part_vals as $iter1026) + foreach ($this->part_vals as $iter1033) { - $xfer += $output->writeString($iter1026); + $xfer += $output->writeString($iter1033); } } $output->writeListEnd(); @@ -27107,14 +27105,14 @@ class ThriftHiveMetastore_drop_partition_args { case 3: if ($ftype == TType::LST) { $this->part_vals = array(); - $_size1027 = 0; - $_etype1030 = 0; - $xfer += $input->readListBegin($_etype1030, $_size1027); - for ($_i1031 = 0; $_i1031 < $_size1027; ++$_i1031) + $_size1034 = 0; + $_etype1037 = 0; + $xfer += $input->readListBegin($_etype1037, $_size1034); + for ($_i1038 = 0; $_i1038 < $_size1034; ++$_i1038) { - $elem1032 = null; - $xfer += $input->readString($elem1032); - $this->part_vals []= $elem1032; + $elem1039 = null; + $xfer += $input->readString($elem1039); + $this->part_vals []= $elem1039; } $xfer += $input->readListEnd(); } else { @@ -27159,9 +27157,9 @@ class ThriftHiveMetastore_drop_partition_args { { $output->writeListBegin(TType::STRING, count($this->part_vals)); { - foreach ($this->part_vals as $iter1033) + foreach ($this->part_vals as $iter1040) { - $xfer += $output->writeString($iter1033); + $xfer += $output->writeString($iter1040); } } $output->writeListEnd(); @@ -27414,14 +27412,14 @@ class ThriftHiveMetastore_drop_partition_with_environment_context_args { case 3: if ($ftype == TType::LST) { $this->part_vals = array(); - $_size1034 = 0; - $_etype1037 = 0; - $xfer += $input->readListBegin($_etype1037, $_size1034); - for ($_i1038 = 0; $_i1038 < $_size1034; ++$_i1038) + $_size1041 = 0; + $_etype1044 = 0; + $xfer += $input->readListBegin($_etype1044, $_size1041); + for ($_i1045 = 0; $_i1045 < $_size1041; ++$_i1045) { - $elem1039 = null; - $xfer += $input->readString($elem1039); - $this->part_vals []= $elem1039; + $elem1046 = null; + $xfer += $input->readString($elem1046); + $this->part_vals []= $elem1046; } $xfer += $input->readListEnd(); } else { @@ -27474,9 +27472,9 @@ class ThriftHiveMetastore_drop_partition_with_environment_context_args { { $output->writeListBegin(TType::STRING, count($this->part_vals)); { - foreach ($this->part_vals as $iter1040) + foreach ($this->part_vals as $iter1047) { - $xfer += $output->writeString($iter1040); + $xfer += $output->writeString($iter1047); } } $output->writeListEnd(); @@ -28490,14 +28488,14 @@ class ThriftHiveMetastore_get_partition_args { case 3: if ($ftype == TType::LST) { $this->part_vals = array(); - $_size1041 = 0; - $_etype1044 = 0; - $xfer += $input->readListBegin($_etype1044, $_size1041); - for ($_i1045 = 0; $_i1045 < $_size1041; ++$_i1045) + $_size1048 = 0; + $_etype1051 = 0; + $xfer += $input->readListBegin($_etype1051, $_size1048); + for ($_i1052 = 0; $_i1052 < $_size1048; ++$_i1052) { - $elem1046 = null; - $xfer += $input->readString($elem1046); - $this->part_vals []= $elem1046; + $elem1053 = null; + $xfer += $input->readString($elem1053); + $this->part_vals []= $elem1053; } $xfer += $input->readListEnd(); } else { @@ -28535,9 +28533,9 @@ class ThriftHiveMetastore_get_partition_args { { $output->writeListBegin(TType::STRING, count($this->part_vals)); { - foreach ($this->part_vals as $iter1047) + foreach ($this->part_vals as $iter1054) { - $xfer += $output->writeString($iter1047); + $xfer += $output->writeString($iter1054); } } $output->writeListEnd(); @@ -28779,17 +28777,17 @@ class ThriftHiveMetastore_exchange_partition_args { case 1: if ($ftype == TType::MAP) { $this->partitionSpecs = array(); - $_size1048 = 0; - $_ktype1049 = 0; - $_vtype1050 = 0; - $xfer += $input->readMapBegin($_ktype1049, $_vtype1050, $_size1048); - for ($_i1052 = 0; $_i1052 < $_size1048; ++$_i1052) + $_size1055 = 0; + $_ktype1056 = 0; + $_vtype1057 = 0; + $xfer += $input->readMapBegin($_ktype1056, $_vtype1057, $_size1055); + for ($_i1059 = 0; $_i1059 < $_size1055; ++$_i1059) { - $key1053 = ''; - $val1054 = ''; - $xfer += $input->readString($key1053); - $xfer += $input->readString($val1054); - $this->partitionSpecs[$key1053] = $val1054; + $key1060 = ''; + $val1061 = ''; + $xfer += $input->readString($key1060); + $xfer += $input->readString($val1061); + $this->partitionSpecs[$key1060] = $val1061; } $xfer += $input->readMapEnd(); } else { @@ -28845,10 +28843,10 @@ class ThriftHiveMetastore_exchange_partition_args { { $output->writeMapBegin(TType::STRING, TType::STRING, count($this->partitionSpecs)); { - foreach ($this->partitionSpecs as $kiter1055 => $viter1056) + foreach ($this->partitionSpecs as $kiter1062 => $viter1063) { - $xfer += $output->writeString($kiter1055); - $xfer += $output->writeString($viter1056); + $xfer += $output->writeString($kiter1062); + $xfer += $output->writeString($viter1063); } } $output->writeMapEnd(); @@ -29160,17 +29158,17 @@ class ThriftHiveMetastore_exchange_partitions_args { case 1: if ($ftype == TType::MAP) { $this->partitionSpecs = array(); - $_size1057 = 0; - $_ktype1058 = 0; - $_vtype1059 = 0; - $xfer += $input->readMapBegin($_ktype1058, $_vtype1059, $_size1057); - for ($_i1061 = 0; $_i1061 < $_size1057; ++$_i1061) + $_size1064 = 0; + $_ktype1065 = 0; + $_vtype1066 = 0; + $xfer += $input->readMapBegin($_ktype1065, $_vtype1066, $_size1064); + for ($_i1068 = 0; $_i1068 < $_size1064; ++$_i1068) { - $key1062 = ''; - $val1063 = ''; - $xfer += $input->readString($key1062); - $xfer += $input->readString($val1063); - $this->partitionSpecs[$key1062] = $val1063; + $key1069 = ''; + $val1070 = ''; + $xfer += $input->readString($key1069); + $xfer += $input->readString($val1070); + $this->partitionSpecs[$key1069] = $val1070; } $xfer += $input->readMapEnd(); } else { @@ -29226,10 +29224,10 @@ class ThriftHiveMetastore_exchange_partitions_args { { $output->writeMapBegin(TType::STRING, TType::STRING, count($this->partitionSpecs)); { - foreach ($this->partitionSpecs as $kiter1064 => $viter1065) + foreach ($this->partitionSpecs as $kiter1071 => $viter1072) { - $xfer += $output->writeString($kiter1064); - $xfer += $output->writeString($viter1065); + $xfer += $output->writeString($kiter1071); + $xfer += $output->writeString($viter1072); } } $output->writeMapEnd(); @@ -29362,15 +29360,15 @@ class ThriftHiveMetastore_exchange_partitions_result { case 0: if ($ftype == TType::LST) { $this->success = array(); - $_size1066 = 0; - $_etype1069 = 0; - $xfer += $input->readListBegin($_etype1069, $_size1066); - for ($_i1070 = 0; $_i1070 < $_size1066; ++$_i1070) + $_size1073 = 0; + $_etype1076 = 0; + $xfer += $input->readListBegin($_etype1076, $_size1073); + for ($_i1077 = 0; $_i1077 < $_size1073; ++$_i1077) { - $elem1071 = null; - $elem1071 = new \metastore\Partition(); - $xfer += $elem1071->read($input); - $this->success []= $elem1071; + $elem1078 = null; + $elem1078 = new \metastore\Partition(); + $xfer += $elem1078->read($input); + $this->success []= $elem1078; } $xfer += $input->readListEnd(); } else { @@ -29430,9 +29428,9 @@ class ThriftHiveMetastore_exchange_partitions_result { { $output->writeListBegin(TType::STRUCT, count($this->success)); { - foreach ($this->success as $iter1072) + foreach ($this->success as $iter1079) { - $xfer += $iter1072->write($output); + $xfer += $iter1079->write($output); } } $output->writeListEnd(); @@ -29578,14 +29576,14 @@ class ThriftHiveMetastore_get_partition_with_auth_args { case 3: if ($ftype == TType::LST) { $this->part_vals = array(); - $_size1073 = 0; - $_etype1076 = 0; - $xfer += $input->readListBegin($_etype1076, $_size1073); - for ($_i1077 = 0; $_i1077 < $_size1073; ++$_i1077) + $_size1080 = 0; + $_etype1083 = 0; + $xfer += $input->readListBegin($_etype1083, $_size1080); + for ($_i1084 = 0; $_i1084 < $_size1080; ++$_i1084) { - $elem1078 = null; - $xfer += $input->readString($elem1078); - $this->part_vals []= $elem1078; + $elem1085 = null; + $xfer += $input->readString($elem1085); + $this->part_vals []= $elem1085; } $xfer += $input->readListEnd(); } else { @@ -29602,14 +29600,14 @@ class ThriftHiveMetastore_get_partition_with_auth_args { case 5: if ($ftype == TType::LST) { $this->group_names = array(); - $_size1079 = 0; - $_etype1082 = 0; - $xfer += $input->readListBegin($_etype1082, $_size1079); - for ($_i1083 = 0; $_i1083 < $_size1079; ++$_i1083) + $_size1086 = 0; + $_etype1089 = 0; + $xfer += $input->readListBegin($_etype1089, $_size1086); + for ($_i1090 = 0; $_i1090 < $_size1086; ++$_i1090) { - $elem1084 = null; - $xfer += $input->readString($elem1084); - $this->group_names []= $elem1084; + $elem1091 = null; + $xfer += $input->readString($elem1091); + $this->group_names []= $elem1091; } $xfer += $input->readListEnd(); } else { @@ -29647,9 +29645,9 @@ class ThriftHiveMetastore_get_partition_with_auth_args { { $output->writeListBegin(TType::STRING, count($this->part_vals)); { - foreach ($this->part_vals as $iter1085) + foreach ($this->part_vals as $iter1092) { - $xfer += $output->writeString($iter1085); + $xfer += $output->writeString($iter1092); } } $output->writeListEnd(); @@ -29669,9 +29667,9 @@ class ThriftHiveMetastore_get_partition_with_auth_args { { $output->writeListBegin(TType::STRING, count($this->group_names)); { - foreach ($this->group_names as $iter1086) + foreach ($this->group_names as $iter1093) { - $xfer += $output->writeString($iter1086); + $xfer += $output->writeString($iter1093); } } $output->writeListEnd(); @@ -30262,15 +30260,15 @@ class ThriftHiveMetastore_get_partitions_result { case 0: if ($ftype == TType::LST) { $this->success = array(); - $_size1087 = 0; - $_etype1090 = 0; - $xfer += $input->readListBegin($_etype1090, $_size1087); - for ($_i1091 = 0; $_i1091 < $_size1087; ++$_i1091) + $_size1094 = 0; + $_etype1097 = 0; + $xfer += $input->readListBegin($_etype1097, $_size1094); + for ($_i1098 = 0; $_i1098 < $_size1094; ++$_i1098) { - $elem1092 = null; - $elem1092 = new \metastore\Partition(); - $xfer += $elem1092->read($input); - $this->success []= $elem1092; + $elem1099 = null; + $elem1099 = new \metastore\Partition(); + $xfer += $elem1099->read($input); + $this->success []= $elem1099; } $xfer += $input->readListEnd(); } else { @@ -30314,9 +30312,9 @@ class ThriftHiveMetastore_get_partitions_result { { $output->writeListBegin(TType::STRUCT, count($this->success)); { - foreach ($this->success as $iter1093) + foreach ($this->success as $iter1100) { - $xfer += $iter1093->write($output); + $xfer += $iter1100->write($output); } } $output->writeListEnd(); @@ -30462,14 +30460,14 @@ class ThriftHiveMetastore_get_partitions_with_auth_args { case 5: if ($ftype == TType::LST) { $this->group_names = array(); - $_size1094 = 0; - $_etype1097 = 0; - $xfer += $input->readListBegin($_etype1097, $_size1094); - for ($_i1098 = 0; $_i1098 < $_size1094; ++$_i1098) + $_size1101 = 0; + $_etype1104 = 0; + $xfer += $input->readListBegin($_etype1104, $_size1101); + for ($_i1105 = 0; $_i1105 < $_size1101; ++$_i1105) { - $elem1099 = null; - $xfer += $input->readString($elem1099); - $this->group_names []= $elem1099; + $elem1106 = null; + $xfer += $input->readString($elem1106); + $this->group_names []= $elem1106; } $xfer += $input->readListEnd(); } else { @@ -30517,9 +30515,9 @@ class ThriftHiveMetastore_get_partitions_with_auth_args { { $output->writeListBegin(TType::STRING, count($this->group_names)); { - foreach ($this->group_names as $iter1100) + foreach ($this->group_names as $iter1107) { - $xfer += $output->writeString($iter1100); + $xfer += $output->writeString($iter1107); } } $output->writeListEnd(); @@ -30608,15 +30606,15 @@ class ThriftHiveMetastore_get_partitions_with_auth_result { case 0: if ($ftype == TType::LST) { $this->success = array(); - $_size1101 = 0; - $_etype1104 = 0; - $xfer += $input->readListBegin($_etype1104, $_size1101); - for ($_i1105 = 0; $_i1105 < $_size1101; ++$_i1105) + $_size1108 = 0; + $_etype1111 = 0; + $xfer += $input->readListBegin($_etype1111, $_size1108); + for ($_i1112 = 0; $_i1112 < $_size1108; ++$_i1112) { - $elem1106 = null; - $elem1106 = new \metastore\Partition(); - $xfer += $elem1106->read($input); - $this->success []= $elem1106; + $elem1113 = null; + $elem1113 = new \metastore\Partition(); + $xfer += $elem1113->read($input); + $this->success []= $elem1113; } $xfer += $input->readListEnd(); } else { @@ -30660,9 +30658,9 @@ class ThriftHiveMetastore_get_partitions_with_auth_result { { $output->writeListBegin(TType::STRUCT, count($this->success)); { - foreach ($this->success as $iter1107) + foreach ($this->success as $iter1114) { - $xfer += $iter1107->write($output); + $xfer += $iter1114->write($output); } } $output->writeListEnd(); @@ -30882,15 +30880,15 @@ class ThriftHiveMetastore_get_partitions_pspec_result { case 0: if ($ftype == TType::LST) { $this->success = array(); - $_size1108 = 0; - $_etype1111 = 0; - $xfer += $input->readListBegin($_etype1111, $_size1108); - for ($_i1112 = 0; $_i1112 < $_size1108; ++$_i1112) + $_size1115 = 0; + $_etype1118 = 0; + $xfer += $input->readListBegin($_etype1118, $_size1115); + for ($_i1119 = 0; $_i1119 < $_size1115; ++$_i1119) { - $elem1113 = null; - $elem1113 = new \metastore\PartitionSpec(); - $xfer += $elem1113->read($input); - $this->success []= $elem1113; + $elem1120 = null; + $elem1120 = new \metastore\PartitionSpec(); + $xfer += $elem1120->read($input); + $this->success []= $elem1120; } $xfer += $input->readListEnd(); } else { @@ -30934,9 +30932,9 @@ class ThriftHiveMetastore_get_partitions_pspec_result { { $output->writeListBegin(TType::STRUCT, count($this->success)); { - foreach ($this->success as $iter1114) + foreach ($this->success as $iter1121) { - $xfer += $iter1114->write($output); + $xfer += $iter1121->write($output); } } $output->writeListEnd(); @@ -31155,14 +31153,14 @@ class ThriftHiveMetastore_get_partition_names_result { case 0: if ($ftype == TType::LST) { $this->success = array(); - $_size1115 = 0; - $_etype1118 = 0; - $xfer += $input->readListBegin($_etype1118, $_size1115); - for ($_i1119 = 0; $_i1119 < $_size1115; ++$_i1119) + $_size1122 = 0; + $_etype1125 = 0; + $xfer += $input->readListBegin($_etype1125, $_size1122); + for ($_i1126 = 0; $_i1126 < $_size1122; ++$_i1126) { - $elem1120 = null; - $xfer += $input->readString($elem1120); - $this->success []= $elem1120; + $elem1127 = null; + $xfer += $input->readString($elem1127); + $this->success []= $elem1127; } $xfer += $input->readListEnd(); } else { @@ -31206,9 +31204,9 @@ class ThriftHiveMetastore_get_partition_names_result { { $output->writeListBegin(TType::STRING, count($this->success)); { - foreach ($this->success as $iter1121) + foreach ($this->success as $iter1128) { - $xfer += $output->writeString($iter1121); + $xfer += $output->writeString($iter1128); } } $output->writeListEnd(); @@ -31539,14 +31537,14 @@ class ThriftHiveMetastore_get_partitions_ps_args { case 3: if ($ftype == TType::LST) { $this->part_vals = array(); - $_size1122 = 0; - $_etype1125 = 0; - $xfer += $input->readListBegin($_etype1125, $_size1122); - for ($_i1126 = 0; $_i1126 < $_size1122; ++$_i1126) + $_size1129 = 0; + $_etype1132 = 0; + $xfer += $input->readListBegin($_etype1132, $_size1129); + for ($_i1133 = 0; $_i1133 < $_size1129; ++$_i1133) { - $elem1127 = null; - $xfer += $input->readString($elem1127); - $this->part_vals []= $elem1127; + $elem1134 = null; + $xfer += $input->readString($elem1134); + $this->part_vals []= $elem1134; } $xfer += $input->readListEnd(); } else { @@ -31591,9 +31589,9 @@ class ThriftHiveMetastore_get_partitions_ps_args { { $output->writeListBegin(TType::STRING, count($this->part_vals)); { - foreach ($this->part_vals as $iter1128) + foreach ($this->part_vals as $iter1135) { - $xfer += $output->writeString($iter1128); + $xfer += $output->writeString($iter1135); } } $output->writeListEnd(); @@ -31687,15 +31685,15 @@ class ThriftHiveMetastore_get_partitions_ps_result { case 0: if ($ftype == TType::LST) { $this->success = array(); - $_size1129 = 0; - $_etype1132 = 0; - $xfer += $input->readListBegin($_etype1132, $_size1129); - for ($_i1133 = 0; $_i1133 < $_size1129; ++$_i1133) + $_size1136 = 0; + $_etype1139 = 0; + $xfer += $input->readListBegin($_etype1139, $_size1136); + for ($_i1140 = 0; $_i1140 < $_size1136; ++$_i1140) { - $elem1134 = null; - $elem1134 = new \metastore\Partition(); - $xfer += $elem1134->read($input); - $this->success []= $elem1134; + $elem1141 = null; + $elem1141 = new \metastore\Partition(); + $xfer += $elem1141->read($input); + $this->success []= $elem1141; } $xfer += $input->readListEnd(); } else { @@ -31739,9 +31737,9 @@ class ThriftHiveMetastore_get_partitions_ps_result { { $output->writeListBegin(TType::STRUCT, count($this->success)); { - foreach ($this->success as $iter1135) + foreach ($this->success as $iter1142) { - $xfer += $iter1135->write($output); + $xfer += $iter1142->write($output); } } $output->writeListEnd(); @@ -31888,14 +31886,14 @@ class ThriftHiveMetastore_get_partitions_ps_with_auth_args { case 3: if ($ftype == TType::LST) { $this->part_vals = array(); - $_size1136 = 0; - $_etype1139 = 0; - $xfer += $input->readListBegin($_etype1139, $_size1136); - for ($_i1140 = 0; $_i1140 < $_size1136; ++$_i1140) + $_size1143 = 0; + $_etype1146 = 0; + $xfer += $input->readListBegin($_etype1146, $_size1143); + for ($_i1147 = 0; $_i1147 < $_size1143; ++$_i1147) { - $elem1141 = null; - $xfer += $input->readString($elem1141); - $this->part_vals []= $elem1141; + $elem1148 = null; + $xfer += $input->readString($elem1148); + $this->part_vals []= $elem1148; } $xfer += $input->readListEnd(); } else { @@ -31919,14 +31917,14 @@ class ThriftHiveMetastore_get_partitions_ps_with_auth_args { case 6: if ($ftype == TType::LST) { $this->group_names = array(); - $_size1142 = 0; - $_etype1145 = 0; - $xfer += $input->readListBegin($_etype1145, $_size1142); - for ($_i1146 = 0; $_i1146 < $_size1142; ++$_i1146) + $_size1149 = 0; + $_etype1152 = 0; + $xfer += $input->readListBegin($_etype1152, $_size1149); + for ($_i1153 = 0; $_i1153 < $_size1149; ++$_i1153) { - $elem1147 = null; - $xfer += $input->readString($elem1147); - $this->group_names []= $elem1147; + $elem1154 = null; + $xfer += $input->readString($elem1154); + $this->group_names []= $elem1154; } $xfer += $input->readListEnd(); } else { @@ -31964,9 +31962,9 @@ class ThriftHiveMetastore_get_partitions_ps_with_auth_args { { $output->writeListBegin(TType::STRING, count($this->part_vals)); { - foreach ($this->part_vals as $iter1148) + foreach ($this->part_vals as $iter1155) { - $xfer += $output->writeString($iter1148); + $xfer += $output->writeString($iter1155); } } $output->writeListEnd(); @@ -31991,9 +31989,9 @@ class ThriftHiveMetastore_get_partitions_ps_with_auth_args { { $output->writeListBegin(TType::STRING, count($this->group_names)); { - foreach ($this->group_names as $iter1149) + foreach ($this->group_names as $iter1156) { - $xfer += $output->writeString($iter1149); + $xfer += $output->writeString($iter1156); } } $output->writeListEnd(); @@ -32082,15 +32080,15 @@ class ThriftHiveMetastore_get_partitions_ps_with_auth_result { case 0: if ($ftype == TType::LST) { $this->success = array(); - $_size1150 = 0; - $_etype1153 = 0; - $xfer += $input->readListBegin($_etype1153, $_size1150); - for ($_i1154 = 0; $_i1154 < $_size1150; ++$_i1154) + $_size1157 = 0; + $_etype1160 = 0; + $xfer += $input->readListBegin($_etype1160, $_size1157); + for ($_i1161 = 0; $_i1161 < $_size1157; ++$_i1161) { - $elem1155 = null; - $elem1155 = new \metastore\Partition(); - $xfer += $elem1155->read($input); - $this->success []= $elem1155; + $elem1162 = null; + $elem1162 = new \metastore\Partition(); + $xfer += $elem1162->read($input); + $this->success []= $elem1162; } $xfer += $input->readListEnd(); } else { @@ -32134,9 +32132,9 @@ class ThriftHiveMetastore_get_partitions_ps_with_auth_result { { $output->writeListBegin(TType::STRUCT, count($this->success)); { - foreach ($this->success as $iter1156) + foreach ($this->success as $iter1163) { - $xfer += $iter1156->write($output); + $xfer += $iter1163->write($output); } } $output->writeListEnd(); @@ -32257,14 +32255,14 @@ class ThriftHiveMetastore_get_partition_names_ps_args { case 3: if ($ftype == TType::LST) { $this->part_vals = array(); - $_size1157 = 0; - $_etype1160 = 0; - $xfer += $input->readListBegin($_etype1160, $_size1157); - for ($_i1161 = 0; $_i1161 < $_size1157; ++$_i1161) + $_size1164 = 0; + $_etype1167 = 0; + $xfer += $input->readListBegin($_etype1167, $_size1164); + for ($_i1168 = 0; $_i1168 < $_size1164; ++$_i1168) { - $elem1162 = null; - $xfer += $input->readString($elem1162); - $this->part_vals []= $elem1162; + $elem1169 = null; + $xfer += $input->readString($elem1169); + $this->part_vals []= $elem1169; } $xfer += $input->readListEnd(); } else { @@ -32309,9 +32307,9 @@ class ThriftHiveMetastore_get_partition_names_ps_args { { $output->writeListBegin(TType::STRING, count($this->part_vals)); { - foreach ($this->part_vals as $iter1163) + foreach ($this->part_vals as $iter1170) { - $xfer += $output->writeString($iter1163); + $xfer += $output->writeString($iter1170); } } $output->writeListEnd(); @@ -32404,14 +32402,14 @@ class ThriftHiveMetastore_get_partition_names_ps_result { case 0: if ($ftype == TType::LST) { $this->success = array(); - $_size1164 = 0; - $_etype1167 = 0; - $xfer += $input->readListBegin($_etype1167, $_size1164); - for ($_i1168 = 0; $_i1168 < $_size1164; ++$_i1168) + $_size1171 = 0; + $_etype1174 = 0; + $xfer += $input->readListBegin($_etype1174, $_size1171); + for ($_i1175 = 0; $_i1175 < $_size1171; ++$_i1175) { - $elem1169 = null; - $xfer += $input->readString($elem1169); - $this->success []= $elem1169; + $elem1176 = null; + $xfer += $input->readString($elem1176); + $this->success []= $elem1176; } $xfer += $input->readListEnd(); } else { @@ -32455,9 +32453,9 @@ class ThriftHiveMetastore_get_partition_names_ps_result { { $output->writeListBegin(TType::STRING, count($this->success)); { - foreach ($this->success as $iter1170) + foreach ($this->success as $iter1177) { - $xfer += $output->writeString($iter1170); + $xfer += $output->writeString($iter1177); } } $output->writeListEnd(); @@ -32700,15 +32698,15 @@ class ThriftHiveMetastore_get_partitions_by_filter_result { case 0: if ($ftype == TType::LST) { $this->success = array(); - $_size1171 = 0; - $_etype1174 = 0; - $xfer += $input->readListBegin($_etype1174, $_size1171); - for ($_i1175 = 0; $_i1175 < $_size1171; ++$_i1175) + $_size1178 = 0; + $_etype1181 = 0; + $xfer += $input->readListBegin($_etype1181, $_size1178); + for ($_i1182 = 0; $_i1182 < $_size1178; ++$_i1182) { - $elem1176 = null; - $elem1176 = new \metastore\Partition(); - $xfer += $elem1176->read($input); - $this->success []= $elem1176; + $elem1183 = null; + $elem1183 = new \metastore\Partition(); + $xfer += $elem1183->read($input); + $this->success []= $elem1183; } $xfer += $input->readListEnd(); } else { @@ -32752,9 +32750,9 @@ class ThriftHiveMetastore_get_partitions_by_filter_result { { $output->writeListBegin(TType::STRUCT, count($this->success)); { - foreach ($this->success as $iter1177) + foreach ($this->success as $iter1184) { - $xfer += $iter1177->write($output); + $xfer += $iter1184->write($output); } } $output->writeListEnd(); @@ -32997,15 +32995,15 @@ class ThriftHiveMetastore_get_part_specs_by_filter_result { case 0: if ($ftype == TType::LST) { $this->success = array(); - $_size1178 = 0; - $_etype1181 = 0; - $xfer += $input->readListBegin($_etype1181, $_size1178); - for ($_i1182 = 0; $_i1182 < $_size1178; ++$_i1182) + $_size1185 = 0; + $_etype1188 = 0; + $xfer += $input->readListBegin($_etype1188, $_size1185); + for ($_i1189 = 0; $_i1189 < $_size1185; ++$_i1189) { - $elem1183 = null; - $elem1183 = new \metastore\PartitionSpec(); - $xfer += $elem1183->read($input); - $this->success []= $elem1183; + $elem1190 = null; + $elem1190 = new \metastore\PartitionSpec(); + $xfer += $elem1190->read($input); + $this->success []= $elem1190; } $xfer += $input->readListEnd(); } else { @@ -33049,9 +33047,9 @@ class ThriftHiveMetastore_get_part_specs_by_filter_result { { $output->writeListBegin(TType::STRUCT, count($this->success)); { - foreach ($this->success as $iter1184) + foreach ($this->success as $iter1191) { - $xfer += $iter1184->write($output); + $xfer += $iter1191->write($output); } } $output->writeListEnd(); @@ -33617,14 +33615,14 @@ class ThriftHiveMetastore_get_partitions_by_names_args { case 3: if ($ftype == TType::LST) { $this->names = array(); - $_size1185 = 0; - $_etype1188 = 0; - $xfer += $input->readListBegin($_etype1188, $_size1185); - for ($_i1189 = 0; $_i1189 < $_size1185; ++$_i1189) + $_size1192 = 0; + $_etype1195 = 0; + $xfer += $input->readListBegin($_etype1195, $_size1192); + for ($_i1196 = 0; $_i1196 < $_size1192; ++$_i1196) { - $elem1190 = null; - $xfer += $input->readString($elem1190); - $this->names []= $elem1190; + $elem1197 = null; + $xfer += $input->readString($elem1197); + $this->names []= $elem1197; } $xfer += $input->readListEnd(); } else { @@ -33662,9 +33660,9 @@ class ThriftHiveMetastore_get_partitions_by_names_args { { $output->writeListBegin(TType::STRING, count($this->names)); { - foreach ($this->names as $iter1191) + foreach ($this->names as $iter1198) { - $xfer += $output->writeString($iter1191); + $xfer += $output->writeString($iter1198); } } $output->writeListEnd(); @@ -33753,15 +33751,15 @@ class ThriftHiveMetastore_get_partitions_by_names_result { case 0: if ($ftype == TType::LST) { $this->success = array(); - $_size1192 = 0; - $_etype1195 = 0; - $xfer += $input->readListBegin($_etype1195, $_size1192); - for ($_i1196 = 0; $_i1196 < $_size1192; ++$_i1196) + $_size1199 = 0; + $_etype1202 = 0; + $xfer += $input->readListBegin($_etype1202, $_size1199); + for ($_i1203 = 0; $_i1203 < $_size1199; ++$_i1203) { - $elem1197 = null; - $elem1197 = new \metastore\Partition(); - $xfer += $elem1197->read($input); - $this->success []= $elem1197; + $elem1204 = null; + $elem1204 = new \metastore\Partition(); + $xfer += $elem1204->read($input); + $this->success []= $elem1204; } $xfer += $input->readListEnd(); } else { @@ -33805,9 +33803,9 @@ class ThriftHiveMetastore_get_partitions_by_names_result { { $output->writeListBegin(TType::STRUCT, count($this->success)); { - foreach ($this->success as $iter1198) + foreach ($this->success as $iter1205) { - $xfer += $iter1198->write($output); + $xfer += $iter1205->write($output); } } $output->writeListEnd(); @@ -34146,15 +34144,15 @@ class ThriftHiveMetastore_alter_partitions_args { case 3: if ($ftype == TType::LST) { $this->new_parts = array(); - $_size1199 = 0; - $_etype1202 = 0; - $xfer += $input->readListBegin($_etype1202, $_size1199); - for ($_i1203 = 0; $_i1203 < $_size1199; ++$_i1203) + $_size1206 = 0; + $_etype1209 = 0; + $xfer += $input->readListBegin($_etype1209, $_size1206); + for ($_i1210 = 0; $_i1210 < $_size1206; ++$_i1210) { - $elem1204 = null; - $elem1204 = new \metastore\Partition(); - $xfer += $elem1204->read($input); - $this->new_parts []= $elem1204; + $elem1211 = null; + $elem1211 = new \metastore\Partition(); + $xfer += $elem1211->read($input); + $this->new_parts []= $elem1211; } $xfer += $input->readListEnd(); } else { @@ -34192,9 +34190,9 @@ class ThriftHiveMetastore_alter_partitions_args { { $output->writeListBegin(TType::STRUCT, count($this->new_parts)); { - foreach ($this->new_parts as $iter1205) + foreach ($this->new_parts as $iter1212) { - $xfer += $iter1205->write($output); + $xfer += $iter1212->write($output); } } $output->writeListEnd(); @@ -34314,61 +34312,23 @@ class ThriftHiveMetastore_alter_partitions_with_environment_context_args { static $_TSPEC; /** - * @var string - */ - public $db_name = null; - /** - * @var string - */ - public $tbl_name = null; - /** - * @var \metastore\Partition[] - */ - public $new_parts = null; - /** - * @var \metastore\EnvironmentContext + * @var \metastore\AlterPartitionsRequest */ - public $environment_context = null; + public $req = null; public function __construct($vals=null) { if (!isset(self::$_TSPEC)) { self::$_TSPEC = array( 1 => array( - 'var' => 'db_name', - 'type' => TType::STRING, - ), - 2 => array( - 'var' => 'tbl_name', - 'type' => TType::STRING, - ), - 3 => array( - 'var' => 'new_parts', - 'type' => TType::LST, - 'etype' => TType::STRUCT, - 'elem' => array( - 'type' => TType::STRUCT, - 'class' => '\metastore\Partition', - ), - ), - 4 => array( - 'var' => 'environment_context', + 'var' => 'req', 'type' => TType::STRUCT, - 'class' => '\metastore\EnvironmentContext', + 'class' => '\metastore\AlterPartitionsRequest', ), ); } if (is_array($vals)) { - if (isset($vals['db_name'])) { - $this->db_name = $vals['db_name']; - } - if (isset($vals['tbl_name'])) { - $this->tbl_name = $vals['tbl_name']; - } - if (isset($vals['new_parts'])) { - $this->new_parts = $vals['new_parts']; - } - if (isset($vals['environment_context'])) { - $this->environment_context = $vals['environment_context']; + if (isset($vals['req'])) { + $this->req = $vals['req']; } } } @@ -34393,41 +34353,9 @@ class ThriftHiveMetastore_alter_partitions_with_environment_context_args { switch ($fid) { case 1: - if ($ftype == TType::STRING) { - $xfer += $input->readString($this->db_name); - } else { - $xfer += $input->skip($ftype); - } - break; - case 2: - if ($ftype == TType::STRING) { - $xfer += $input->readString($this->tbl_name); - } else { - $xfer += $input->skip($ftype); - } - break; - case 3: - if ($ftype == TType::LST) { - $this->new_parts = array(); - $_size1206 = 0; - $_etype1209 = 0; - $xfer += $input->readListBegin($_etype1209, $_size1206); - for ($_i1210 = 0; $_i1210 < $_size1206; ++$_i1210) - { - $elem1211 = null; - $elem1211 = new \metastore\Partition(); - $xfer += $elem1211->read($input); - $this->new_parts []= $elem1211; - } - $xfer += $input->readListEnd(); - } else { - $xfer += $input->skip($ftype); - } - break; - case 4: if ($ftype == TType::STRUCT) { - $this->environment_context = new \metastore\EnvironmentContext(); - $xfer += $this->environment_context->read($input); + $this->req = new \metastore\AlterPartitionsRequest(); + $xfer += $this->req->read($input); } else { $xfer += $input->skip($ftype); } @@ -34445,39 +34373,12 @@ class ThriftHiveMetastore_alter_partitions_with_environment_context_args { public function write($output) { $xfer = 0; $xfer += $output->writeStructBegin('ThriftHiveMetastore_alter_partitions_with_environment_context_args'); - if ($this->db_name !== null) { - $xfer += $output->writeFieldBegin('db_name', TType::STRING, 1); - $xfer += $output->writeString($this->db_name); - $xfer += $output->writeFieldEnd(); - } - if ($this->tbl_name !== null) { - $xfer += $output->writeFieldBegin('tbl_name', TType::STRING, 2); - $xfer += $output->writeString($this->tbl_name); - $xfer += $output->writeFieldEnd(); - } - if ($this->new_parts !== null) { - if (!is_array($this->new_parts)) { - throw new TProtocolException('Bad type in structure.', TProtocolException::INVALID_DATA); - } - $xfer += $output->writeFieldBegin('new_parts', TType::LST, 3); - { - $output->writeListBegin(TType::STRUCT, count($this->new_parts)); - { - foreach ($this->new_parts as $iter1212) - { - $xfer += $iter1212->write($output); - } - } - $output->writeListEnd(); - } - $xfer += $output->writeFieldEnd(); - } - if ($this->environment_context !== null) { - if (!is_object($this->environment_context)) { + if ($this->req !== null) { + if (!is_object($this->req)) { throw new TProtocolException('Bad type in structure.', TProtocolException::INVALID_DATA); } - $xfer += $output->writeFieldBegin('environment_context', TType::STRUCT, 4); - $xfer += $this->environment_context->write($output); + $xfer += $output->writeFieldBegin('req', TType::STRUCT, 1); + $xfer += $this->req->write($output); $xfer += $output->writeFieldEnd(); } $xfer += $output->writeFieldStop(); @@ -34491,6 +34392,10 @@ class ThriftHiveMetastore_alter_partitions_with_environment_context_result { static $_TSPEC; /** + * @var \metastore\AlterPartitionsResponse + */ + public $success = null; + /** * @var \metastore\InvalidOperationException */ public $o1 = null; @@ -34502,6 +34407,11 @@ class ThriftHiveMetastore_alter_partitions_with_environment_context_result { public function __construct($vals=null) { if (!isset(self::$_TSPEC)) { self::$_TSPEC = array( + 0 => array( + 'var' => 'success', + 'type' => TType::STRUCT, + 'class' => '\metastore\AlterPartitionsResponse', + ), 1 => array( 'var' => 'o1', 'type' => TType::STRUCT, @@ -34515,6 +34425,9 @@ class ThriftHiveMetastore_alter_partitions_with_environment_context_result { ); } if (is_array($vals)) { + if (isset($vals['success'])) { + $this->success = $vals['success']; + } if (isset($vals['o1'])) { $this->o1 = $vals['o1']; } @@ -34543,6 +34456,14 @@ class ThriftHiveMetastore_alter_partitions_with_environment_context_result { } switch ($fid) { + case 0: + if ($ftype == TType::STRUCT) { + $this->success = new \metastore\AlterPartitionsResponse(); + $xfer += $this->success->read($input); + } else { + $xfer += $input->skip($ftype); + } + break; case 1: if ($ftype == TType::STRUCT) { $this->o1 = new \metastore\InvalidOperationException(); @@ -34572,6 +34493,14 @@ class ThriftHiveMetastore_alter_partitions_with_environment_context_result { public function write($output) { $xfer = 0; $xfer += $output->writeStructBegin('ThriftHiveMetastore_alter_partitions_with_environment_context_result'); + if ($this->success !== null) { + if (!is_object($this->success)) { + throw new TProtocolException('Bad type in structure.', TProtocolException::INVALID_DATA); + } + $xfer += $output->writeFieldBegin('success', TType::STRUCT, 0); + $xfer += $this->success->write($output); + $xfer += $output->writeFieldEnd(); + } if ($this->o1 !== null) { $xfer += $output->writeFieldBegin('o1', TType::STRUCT, 1); $xfer += $this->o1->write($output); diff --git standalone-metastore/src/gen/thrift/gen-php/metastore/Types.php standalone-metastore/src/gen/thrift/gen-php/metastore/Types.php index a29ebb7f59..9033e9ac8d 100644 --- standalone-metastore/src/gen/thrift/gen-php/metastore/Types.php +++ standalone-metastore/src/gen/thrift/gen-php/metastore/Types.php @@ -203,6 +203,17 @@ final class SchemaVersionState { ); } +final class IsolationLevelCompliance { + const YES = 1; + const NO = 2; + const UNKNOWN = 3; + static public $__names = array( + 1 => 'YES', + 2 => 'NO', + 3 => 'UNKNOWN', + ); +} + final class FunctionType { const JAVA = 1; static public $__names = array( @@ -6517,6 +6528,18 @@ class Table { * @var int */ public $ownerType = 1; + /** + * @var int + */ + public $txnId = -1; + /** + * @var string + */ + public $validWriteIdList = null; + /** + * @var int + */ + public $isStatsCompliant = null; public function __construct($vals=null) { if (!isset(self::$_TSPEC)) { @@ -6609,6 +6632,18 @@ class Table { 'var' => 'ownerType', 'type' => TType::I32, ), + 19 => array( + 'var' => 'txnId', + 'type' => TType::I64, + ), + 20 => array( + 'var' => 'validWriteIdList', + 'type' => TType::STRING, + ), + 21 => array( + 'var' => 'isStatsCompliant', + 'type' => TType::I32, + ), ); } if (is_array($vals)) { @@ -6666,6 +6701,15 @@ class Table { if (isset($vals['ownerType'])) { $this->ownerType = $vals['ownerType']; } + if (isset($vals['txnId'])) { + $this->txnId = $vals['txnId']; + } + if (isset($vals['validWriteIdList'])) { + $this->validWriteIdList = $vals['validWriteIdList']; + } + if (isset($vals['isStatsCompliant'])) { + $this->isStatsCompliant = $vals['isStatsCompliant']; + } } } @@ -6841,6 +6885,27 @@ class Table { $xfer += $input->skip($ftype); } break; + case 19: + if ($ftype == TType::I64) { + $xfer += $input->readI64($this->txnId); + } else { + $xfer += $input->skip($ftype); + } + break; + case 20: + if ($ftype == TType::STRING) { + $xfer += $input->readString($this->validWriteIdList); + } else { + $xfer += $input->skip($ftype); + } + break; + case 21: + if ($ftype == TType::I32) { + $xfer += $input->readI32($this->isStatsCompliant); + } else { + $xfer += $input->skip($ftype); + } + break; default: $xfer += $input->skip($ftype); break; @@ -6978,6 +7043,21 @@ class Table { $xfer += $output->writeI32($this->ownerType); $xfer += $output->writeFieldEnd(); } + if ($this->txnId !== null) { + $xfer += $output->writeFieldBegin('txnId', TType::I64, 19); + $xfer += $output->writeI64($this->txnId); + $xfer += $output->writeFieldEnd(); + } + if ($this->validWriteIdList !== null) { + $xfer += $output->writeFieldBegin('validWriteIdList', TType::STRING, 20); + $xfer += $output->writeString($this->validWriteIdList); + $xfer += $output->writeFieldEnd(); + } + if ($this->isStatsCompliant !== null) { + $xfer += $output->writeFieldBegin('isStatsCompliant', TType::I32, 21); + $xfer += $output->writeI32($this->isStatsCompliant); + $xfer += $output->writeFieldEnd(); + } $xfer += $output->writeFieldStop(); $xfer += $output->writeStructEnd(); return $xfer; @@ -7024,6 +7104,18 @@ class Partition { * @var string */ public $catName = null; + /** + * @var int + */ + public $txnId = -1; + /** + * @var string + */ + public $validWriteIdList = null; + /** + * @var int + */ + public $isStatsCompliant = null; public function __construct($vals=null) { if (!isset(self::$_TSPEC)) { @@ -7078,6 +7170,18 @@ class Partition { 'var' => 'catName', 'type' => TType::STRING, ), + 10 => array( + 'var' => 'txnId', + 'type' => TType::I64, + ), + 11 => array( + 'var' => 'validWriteIdList', + 'type' => TType::STRING, + ), + 12 => array( + 'var' => 'isStatsCompliant', + 'type' => TType::I32, + ), ); } if (is_array($vals)) { @@ -7108,6 +7212,15 @@ class Partition { if (isset($vals['catName'])) { $this->catName = $vals['catName']; } + if (isset($vals['txnId'])) { + $this->txnId = $vals['txnId']; + } + if (isset($vals['validWriteIdList'])) { + $this->validWriteIdList = $vals['validWriteIdList']; + } + if (isset($vals['isStatsCompliant'])) { + $this->isStatsCompliant = $vals['isStatsCompliant']; + } } } @@ -7218,6 +7331,27 @@ class Partition { $xfer += $input->skip($ftype); } break; + case 10: + if ($ftype == TType::I64) { + $xfer += $input->readI64($this->txnId); + } else { + $xfer += $input->skip($ftype); + } + break; + case 11: + if ($ftype == TType::STRING) { + $xfer += $input->readString($this->validWriteIdList); + } else { + $xfer += $input->skip($ftype); + } + break; + case 12: + if ($ftype == TType::I32) { + $xfer += $input->readI32($this->isStatsCompliant); + } else { + $xfer += $input->skip($ftype); + } + break; default: $xfer += $input->skip($ftype); break; @@ -7307,6 +7441,21 @@ class Partition { $xfer += $output->writeString($this->catName); $xfer += $output->writeFieldEnd(); } + if ($this->txnId !== null) { + $xfer += $output->writeFieldBegin('txnId', TType::I64, 10); + $xfer += $output->writeI64($this->txnId); + $xfer += $output->writeFieldEnd(); + } + if ($this->validWriteIdList !== null) { + $xfer += $output->writeFieldBegin('validWriteIdList', TType::STRING, 11); + $xfer += $output->writeString($this->validWriteIdList); + $xfer += $output->writeFieldEnd(); + } + if ($this->isStatsCompliant !== null) { + $xfer += $output->writeFieldBegin('isStatsCompliant', TType::I32, 12); + $xfer += $output->writeI32($this->isStatsCompliant); + $xfer += $output->writeFieldEnd(); + } $xfer += $output->writeFieldStop(); $xfer += $output->writeStructEnd(); return $xfer; @@ -7830,6 +7979,18 @@ class PartitionSpec { * @var string */ public $catName = null; + /** + * @var int + */ + public $txnId = -1; + /** + * @var string + */ + public $validWriteIdList = null; + /** + * @var int + */ + public $isStatsCompliant = null; public function __construct($vals=null) { if (!isset(self::$_TSPEC)) { @@ -7860,6 +8021,18 @@ class PartitionSpec { 'var' => 'catName', 'type' => TType::STRING, ), + 7 => array( + 'var' => 'txnId', + 'type' => TType::I64, + ), + 8 => array( + 'var' => 'validWriteIdList', + 'type' => TType::STRING, + ), + 9 => array( + 'var' => 'isStatsCompliant', + 'type' => TType::I32, + ), ); } if (is_array($vals)) { @@ -7881,6 +8054,15 @@ class PartitionSpec { if (isset($vals['catName'])) { $this->catName = $vals['catName']; } + if (isset($vals['txnId'])) { + $this->txnId = $vals['txnId']; + } + if (isset($vals['validWriteIdList'])) { + $this->validWriteIdList = $vals['validWriteIdList']; + } + if (isset($vals['isStatsCompliant'])) { + $this->isStatsCompliant = $vals['isStatsCompliant']; + } } } @@ -7947,6 +8129,27 @@ class PartitionSpec { $xfer += $input->skip($ftype); } break; + case 7: + if ($ftype == TType::I64) { + $xfer += $input->readI64($this->txnId); + } else { + $xfer += $input->skip($ftype); + } + break; + case 8: + if ($ftype == TType::STRING) { + $xfer += $input->readString($this->validWriteIdList); + } else { + $xfer += $input->skip($ftype); + } + break; + case 9: + if ($ftype == TType::I32) { + $xfer += $input->readI32($this->isStatsCompliant); + } else { + $xfer += $input->skip($ftype); + } + break; default: $xfer += $input->skip($ftype); break; @@ -7996,6 +8199,21 @@ class PartitionSpec { $xfer += $output->writeString($this->catName); $xfer += $output->writeFieldEnd(); } + if ($this->txnId !== null) { + $xfer += $output->writeFieldBegin('txnId', TType::I64, 7); + $xfer += $output->writeI64($this->txnId); + $xfer += $output->writeFieldEnd(); + } + if ($this->validWriteIdList !== null) { + $xfer += $output->writeFieldBegin('validWriteIdList', TType::STRING, 8); + $xfer += $output->writeString($this->validWriteIdList); + $xfer += $output->writeFieldEnd(); + } + if ($this->isStatsCompliant !== null) { + $xfer += $output->writeFieldBegin('isStatsCompliant', TType::I32, 9); + $xfer += $output->writeI32($this->isStatsCompliant); + $xfer += $output->writeFieldEnd(); + } $xfer += $output->writeFieldStop(); $xfer += $output->writeStructEnd(); return $xfer; @@ -9894,6 +10112,18 @@ class ColumnStatistics { * @var \metastore\ColumnStatisticsObj[] */ public $statsObj = null; + /** + * @var int + */ + public $txnId = -1; + /** + * @var string + */ + public $validWriteIdList = null; + /** + * @var int + */ + public $isStatsCompliant = null; public function __construct($vals=null) { if (!isset(self::$_TSPEC)) { @@ -9912,6 +10142,18 @@ class ColumnStatistics { 'class' => '\metastore\ColumnStatisticsObj', ), ), + 3 => array( + 'var' => 'txnId', + 'type' => TType::I64, + ), + 4 => array( + 'var' => 'validWriteIdList', + 'type' => TType::STRING, + ), + 5 => array( + 'var' => 'isStatsCompliant', + 'type' => TType::I32, + ), ); } if (is_array($vals)) { @@ -9921,6 +10163,15 @@ class ColumnStatistics { if (isset($vals['statsObj'])) { $this->statsObj = $vals['statsObj']; } + if (isset($vals['txnId'])) { + $this->txnId = $vals['txnId']; + } + if (isset($vals['validWriteIdList'])) { + $this->validWriteIdList = $vals['validWriteIdList']; + } + if (isset($vals['isStatsCompliant'])) { + $this->isStatsCompliant = $vals['isStatsCompliant']; + } } } @@ -9969,6 +10220,27 @@ class ColumnStatistics { $xfer += $input->skip($ftype); } break; + case 3: + if ($ftype == TType::I64) { + $xfer += $input->readI64($this->txnId); + } else { + $xfer += $input->skip($ftype); + } + break; + case 4: + if ($ftype == TType::STRING) { + $xfer += $input->readString($this->validWriteIdList); + } else { + $xfer += $input->skip($ftype); + } + break; + case 5: + if ($ftype == TType::I32) { + $xfer += $input->readI32($this->isStatsCompliant); + } else { + $xfer += $input->skip($ftype); + } + break; default: $xfer += $input->skip($ftype); break; @@ -10007,6 +10279,21 @@ class ColumnStatistics { } $xfer += $output->writeFieldEnd(); } + if ($this->txnId !== null) { + $xfer += $output->writeFieldBegin('txnId', TType::I64, 3); + $xfer += $output->writeI64($this->txnId); + $xfer += $output->writeFieldEnd(); + } + if ($this->validWriteIdList !== null) { + $xfer += $output->writeFieldBegin('validWriteIdList', TType::STRING, 4); + $xfer += $output->writeString($this->validWriteIdList); + $xfer += $output->writeFieldEnd(); + } + if ($this->isStatsCompliant !== null) { + $xfer += $output->writeFieldBegin('isStatsCompliant', TType::I32, 5); + $xfer += $output->writeI32($this->isStatsCompliant); + $xfer += $output->writeFieldEnd(); + } $xfer += $output->writeFieldStop(); $xfer += $output->writeStructEnd(); return $xfer; @@ -10025,6 +10312,10 @@ class AggrStats { * @var int */ public $partsFound = null; + /** + * @var int + */ + public $isStatsCompliant = null; public function __construct($vals=null) { if (!isset(self::$_TSPEC)) { @@ -10042,6 +10333,10 @@ class AggrStats { 'var' => 'partsFound', 'type' => TType::I64, ), + 3 => array( + 'var' => 'isStatsCompliant', + 'type' => TType::I32, + ), ); } if (is_array($vals)) { @@ -10051,6 +10346,9 @@ class AggrStats { if (isset($vals['partsFound'])) { $this->partsFound = $vals['partsFound']; } + if (isset($vals['isStatsCompliant'])) { + $this->isStatsCompliant = $vals['isStatsCompliant']; + } } } @@ -10098,6 +10396,13 @@ class AggrStats { $xfer += $input->skip($ftype); } break; + case 3: + if ($ftype == TType::I32) { + $xfer += $input->readI32($this->isStatsCompliant); + } else { + $xfer += $input->skip($ftype); + } + break; default: $xfer += $input->skip($ftype); break; @@ -10133,6 +10438,11 @@ class AggrStats { $xfer += $output->writeI64($this->partsFound); $xfer += $output->writeFieldEnd(); } + if ($this->isStatsCompliant !== null) { + $xfer += $output->writeFieldBegin('isStatsCompliant', TType::I32, 3); + $xfer += $output->writeI32($this->isStatsCompliant); + $xfer += $output->writeFieldEnd(); + } $xfer += $output->writeFieldStop(); $xfer += $output->writeStructEnd(); return $xfer; @@ -10151,6 +10461,14 @@ class SetPartitionsStatsRequest { * @var bool */ public $needMerge = null; + /** + * @var int + */ + public $txnId = -1; + /** + * @var string + */ + public $validWriteIdList = null; public function __construct($vals=null) { if (!isset(self::$_TSPEC)) { @@ -10168,6 +10486,14 @@ class SetPartitionsStatsRequest { 'var' => 'needMerge', 'type' => TType::BOOL, ), + 3 => array( + 'var' => 'txnId', + 'type' => TType::I64, + ), + 4 => array( + 'var' => 'validWriteIdList', + 'type' => TType::STRING, + ), ); } if (is_array($vals)) { @@ -10177,6 +10503,12 @@ class SetPartitionsStatsRequest { if (isset($vals['needMerge'])) { $this->needMerge = $vals['needMerge']; } + if (isset($vals['txnId'])) { + $this->txnId = $vals['txnId']; + } + if (isset($vals['validWriteIdList'])) { + $this->validWriteIdList = $vals['validWriteIdList']; + } } } @@ -10224,6 +10556,20 @@ class SetPartitionsStatsRequest { $xfer += $input->skip($ftype); } break; + case 3: + if ($ftype == TType::I64) { + $xfer += $input->readI64($this->txnId); + } else { + $xfer += $input->skip($ftype); + } + break; + case 4: + if ($ftype == TType::STRING) { + $xfer += $input->readString($this->validWriteIdList); + } else { + $xfer += $input->skip($ftype); + } + break; default: $xfer += $input->skip($ftype); break; @@ -10259,6 +10605,16 @@ class SetPartitionsStatsRequest { $xfer += $output->writeBool($this->needMerge); $xfer += $output->writeFieldEnd(); } + if ($this->txnId !== null) { + $xfer += $output->writeFieldBegin('txnId', TType::I64, 3); + $xfer += $output->writeI64($this->txnId); + $xfer += $output->writeFieldEnd(); + } + if ($this->validWriteIdList !== null) { + $xfer += $output->writeFieldBegin('validWriteIdList', TType::STRING, 4); + $xfer += $output->writeString($this->validWriteIdList); + $xfer += $output->writeFieldEnd(); + } $xfer += $output->writeFieldStop(); $xfer += $output->writeStructEnd(); return $xfer; @@ -13010,6 +13366,10 @@ class TableStatsResult { * @var \metastore\ColumnStatisticsObj[] */ public $tableStats = null; + /** + * @var int + */ + public $isStatsCompliant = null; public function __construct($vals=null) { if (!isset(self::$_TSPEC)) { @@ -13023,12 +13383,19 @@ class TableStatsResult { 'class' => '\metastore\ColumnStatisticsObj', ), ), + 2 => array( + 'var' => 'isStatsCompliant', + 'type' => TType::I32, + ), ); } if (is_array($vals)) { if (isset($vals['tableStats'])) { $this->tableStats = $vals['tableStats']; } + if (isset($vals['isStatsCompliant'])) { + $this->isStatsCompliant = $vals['isStatsCompliant']; + } } } @@ -13069,6 +13436,13 @@ class TableStatsResult { $xfer += $input->skip($ftype); } break; + case 2: + if ($ftype == TType::I32) { + $xfer += $input->readI32($this->isStatsCompliant); + } else { + $xfer += $input->skip($ftype); + } + break; default: $xfer += $input->skip($ftype); break; @@ -13099,6 +13473,11 @@ class TableStatsResult { } $xfer += $output->writeFieldEnd(); } + if ($this->isStatsCompliant !== null) { + $xfer += $output->writeFieldBegin('isStatsCompliant', TType::I32, 2); + $xfer += $output->writeI32($this->isStatsCompliant); + $xfer += $output->writeFieldEnd(); + } $xfer += $output->writeFieldStop(); $xfer += $output->writeStructEnd(); return $xfer; @@ -13113,6 +13492,10 @@ class PartitionsStatsResult { * @var array */ public $partStats = null; + /** + * @var int + */ + public $isStatsCompliant = null; public function __construct($vals=null) { if (!isset(self::$_TSPEC)) { @@ -13134,12 +13517,19 @@ class PartitionsStatsResult { ), ), ), + 2 => array( + 'var' => 'isStatsCompliant', + 'type' => TType::I32, + ), ); } if (is_array($vals)) { if (isset($vals['partStats'])) { $this->partStats = $vals['partStats']; } + if (isset($vals['isStatsCompliant'])) { + $this->isStatsCompliant = $vals['isStatsCompliant']; + } } } @@ -13193,6 +13583,13 @@ class PartitionsStatsResult { $xfer += $input->skip($ftype); } break; + case 2: + if ($ftype == TType::I32) { + $xfer += $input->readI32($this->isStatsCompliant); + } else { + $xfer += $input->skip($ftype); + } + break; default: $xfer += $input->skip($ftype); break; @@ -13233,6 +13630,11 @@ class PartitionsStatsResult { } $xfer += $output->writeFieldEnd(); } + if ($this->isStatsCompliant !== null) { + $xfer += $output->writeFieldBegin('isStatsCompliant', TType::I32, 2); + $xfer += $output->writeI32($this->isStatsCompliant); + $xfer += $output->writeFieldEnd(); + } $xfer += $output->writeFieldStop(); $xfer += $output->writeStructEnd(); return $xfer; @@ -13259,6 +13661,14 @@ class TableStatsRequest { * @var string */ public $catName = null; + /** + * @var int + */ + public $txnId = -1; + /** + * @var string + */ + public $validWriteIdList = null; public function __construct($vals=null) { if (!isset(self::$_TSPEC)) { @@ -13283,6 +13693,14 @@ class TableStatsRequest { 'var' => 'catName', 'type' => TType::STRING, ), + 5 => array( + 'var' => 'txnId', + 'type' => TType::I64, + ), + 6 => array( + 'var' => 'validWriteIdList', + 'type' => TType::STRING, + ), ); } if (is_array($vals)) { @@ -13298,6 +13716,12 @@ class TableStatsRequest { if (isset($vals['catName'])) { $this->catName = $vals['catName']; } + if (isset($vals['txnId'])) { + $this->txnId = $vals['txnId']; + } + if (isset($vals['validWriteIdList'])) { + $this->validWriteIdList = $vals['validWriteIdList']; + } } } @@ -13358,6 +13782,20 @@ class TableStatsRequest { $xfer += $input->skip($ftype); } break; + case 5: + if ($ftype == TType::I64) { + $xfer += $input->readI64($this->txnId); + } else { + $xfer += $input->skip($ftype); + } + break; + case 6: + if ($ftype == TType::STRING) { + $xfer += $input->readString($this->validWriteIdList); + } else { + $xfer += $input->skip($ftype); + } + break; default: $xfer += $input->skip($ftype); break; @@ -13403,6 +13841,16 @@ class TableStatsRequest { $xfer += $output->writeString($this->catName); $xfer += $output->writeFieldEnd(); } + if ($this->txnId !== null) { + $xfer += $output->writeFieldBegin('txnId', TType::I64, 5); + $xfer += $output->writeI64($this->txnId); + $xfer += $output->writeFieldEnd(); + } + if ($this->validWriteIdList !== null) { + $xfer += $output->writeFieldBegin('validWriteIdList', TType::STRING, 6); + $xfer += $output->writeString($this->validWriteIdList); + $xfer += $output->writeFieldEnd(); + } $xfer += $output->writeFieldStop(); $xfer += $output->writeStructEnd(); return $xfer; @@ -13433,6 +13881,14 @@ class PartitionsStatsRequest { * @var string */ public $catName = null; + /** + * @var int + */ + public $txnId = -1; + /** + * @var string + */ + public $validWriteIdList = null; public function __construct($vals=null) { if (!isset(self::$_TSPEC)) { @@ -13465,6 +13921,14 @@ class PartitionsStatsRequest { 'var' => 'catName', 'type' => TType::STRING, ), + 6 => array( + 'var' => 'txnId', + 'type' => TType::I64, + ), + 7 => array( + 'var' => 'validWriteIdList', + 'type' => TType::STRING, + ), ); } if (is_array($vals)) { @@ -13483,6 +13947,12 @@ class PartitionsStatsRequest { if (isset($vals['catName'])) { $this->catName = $vals['catName']; } + if (isset($vals['txnId'])) { + $this->txnId = $vals['txnId']; + } + if (isset($vals['validWriteIdList'])) { + $this->validWriteIdList = $vals['validWriteIdList']; + } } } @@ -13560,6 +14030,20 @@ class PartitionsStatsRequest { $xfer += $input->skip($ftype); } break; + case 6: + if ($ftype == TType::I64) { + $xfer += $input->readI64($this->txnId); + } else { + $xfer += $input->skip($ftype); + } + break; + case 7: + if ($ftype == TType::STRING) { + $xfer += $input->readString($this->validWriteIdList); + } else { + $xfer += $input->skip($ftype); + } + break; default: $xfer += $input->skip($ftype); break; @@ -13622,6 +14106,16 @@ class PartitionsStatsRequest { $xfer += $output->writeString($this->catName); $xfer += $output->writeFieldEnd(); } + if ($this->txnId !== null) { + $xfer += $output->writeFieldBegin('txnId', TType::I64, 6); + $xfer += $output->writeI64($this->txnId); + $xfer += $output->writeFieldEnd(); + } + if ($this->validWriteIdList !== null) { + $xfer += $output->writeFieldBegin('validWriteIdList', TType::STRING, 7); + $xfer += $output->writeString($this->validWriteIdList); + $xfer += $output->writeFieldEnd(); + } $xfer += $output->writeFieldStop(); $xfer += $output->writeStructEnd(); return $xfer; @@ -13636,6 +14130,10 @@ class AddPartitionsResult { * @var \metastore\Partition[] */ public $partitions = null; + /** + * @var int + */ + public $isStatsCompliant = null; public function __construct($vals=null) { if (!isset(self::$_TSPEC)) { @@ -13649,12 +14147,19 @@ class AddPartitionsResult { 'class' => '\metastore\Partition', ), ), + 2 => array( + 'var' => 'isStatsCompliant', + 'type' => TType::I32, + ), ); } if (is_array($vals)) { if (isset($vals['partitions'])) { $this->partitions = $vals['partitions']; } + if (isset($vals['isStatsCompliant'])) { + $this->isStatsCompliant = $vals['isStatsCompliant']; + } } } @@ -13695,6 +14200,13 @@ class AddPartitionsResult { $xfer += $input->skip($ftype); } break; + case 2: + if ($ftype == TType::I32) { + $xfer += $input->readI32($this->isStatsCompliant); + } else { + $xfer += $input->skip($ftype); + } + break; default: $xfer += $input->skip($ftype); break; @@ -13725,6 +14237,11 @@ class AddPartitionsResult { } $xfer += $output->writeFieldEnd(); } + if ($this->isStatsCompliant !== null) { + $xfer += $output->writeFieldBegin('isStatsCompliant', TType::I32, 2); + $xfer += $output->writeI32($this->isStatsCompliant); + $xfer += $output->writeFieldEnd(); + } $xfer += $output->writeFieldStop(); $xfer += $output->writeStructEnd(); return $xfer; @@ -13759,6 +14276,14 @@ class AddPartitionsRequest { * @var string */ public $catName = null; + /** + * @var int + */ + public $txnId = -1; + /** + * @var string + */ + public $validWriteIdList = null; public function __construct($vals=null) { if (!isset(self::$_TSPEC)) { @@ -13792,6 +14317,14 @@ class AddPartitionsRequest { 'var' => 'catName', 'type' => TType::STRING, ), + 7 => array( + 'var' => 'txnId', + 'type' => TType::I64, + ), + 8 => array( + 'var' => 'validWriteIdList', + 'type' => TType::STRING, + ), ); } if (is_array($vals)) { @@ -13813,6 +14346,12 @@ class AddPartitionsRequest { if (isset($vals['catName'])) { $this->catName = $vals['catName']; } + if (isset($vals['txnId'])) { + $this->txnId = $vals['txnId']; + } + if (isset($vals['validWriteIdList'])) { + $this->validWriteIdList = $vals['validWriteIdList']; + } } } @@ -13888,6 +14427,20 @@ class AddPartitionsRequest { $xfer += $input->skip($ftype); } break; + case 7: + if ($ftype == TType::I64) { + $xfer += $input->readI64($this->txnId); + } else { + $xfer += $input->skip($ftype); + } + break; + case 8: + if ($ftype == TType::STRING) { + $xfer += $input->readString($this->validWriteIdList); + } else { + $xfer += $input->skip($ftype); + } + break; default: $xfer += $input->skip($ftype); break; @@ -13943,6 +14496,16 @@ class AddPartitionsRequest { $xfer += $output->writeString($this->catName); $xfer += $output->writeFieldEnd(); } + if ($this->txnId !== null) { + $xfer += $output->writeFieldBegin('txnId', TType::I64, 7); + $xfer += $output->writeI64($this->txnId); + $xfer += $output->writeFieldEnd(); + } + if ($this->validWriteIdList !== null) { + $xfer += $output->writeFieldBegin('validWriteIdList', TType::STRING, 8); + $xfer += $output->writeString($this->validWriteIdList); + $xfer += $output->writeFieldEnd(); + } $xfer += $output->writeFieldStop(); $xfer += $output->writeStructEnd(); return $xfer; @@ -23596,6 +24159,14 @@ class GetTableRequest { * @var string */ public $catName = null; + /** + * @var int + */ + public $txnId = -1; + /** + * @var string + */ + public $validWriteIdList = null; public function __construct($vals=null) { if (!isset(self::$_TSPEC)) { @@ -23617,6 +24188,14 @@ class GetTableRequest { 'var' => 'catName', 'type' => TType::STRING, ), + 5 => array( + 'var' => 'txnId', + 'type' => TType::I64, + ), + 6 => array( + 'var' => 'validWriteIdList', + 'type' => TType::STRING, + ), ); } if (is_array($vals)) { @@ -23632,6 +24211,12 @@ class GetTableRequest { if (isset($vals['catName'])) { $this->catName = $vals['catName']; } + if (isset($vals['txnId'])) { + $this->txnId = $vals['txnId']; + } + if (isset($vals['validWriteIdList'])) { + $this->validWriteIdList = $vals['validWriteIdList']; + } } } @@ -23683,6 +24268,20 @@ class GetTableRequest { $xfer += $input->skip($ftype); } break; + case 5: + if ($ftype == TType::I64) { + $xfer += $input->readI64($this->txnId); + } else { + $xfer += $input->skip($ftype); + } + break; + case 6: + if ($ftype == TType::STRING) { + $xfer += $input->readString($this->validWriteIdList); + } else { + $xfer += $input->skip($ftype); + } + break; default: $xfer += $input->skip($ftype); break; @@ -23719,6 +24318,16 @@ class GetTableRequest { $xfer += $output->writeString($this->catName); $xfer += $output->writeFieldEnd(); } + if ($this->txnId !== null) { + $xfer += $output->writeFieldBegin('txnId', TType::I64, 5); + $xfer += $output->writeI64($this->txnId); + $xfer += $output->writeFieldEnd(); + } + if ($this->validWriteIdList !== null) { + $xfer += $output->writeFieldBegin('validWriteIdList', TType::STRING, 6); + $xfer += $output->writeString($this->validWriteIdList); + $xfer += $output->writeFieldEnd(); + } $xfer += $output->writeFieldStop(); $xfer += $output->writeStructEnd(); return $xfer; @@ -23733,6 +24342,10 @@ class GetTableResult { * @var \metastore\Table */ public $table = null; + /** + * @var int + */ + public $isStatsCompliant = null; public function __construct($vals=null) { if (!isset(self::$_TSPEC)) { @@ -23742,12 +24355,19 @@ class GetTableResult { 'type' => TType::STRUCT, 'class' => '\metastore\Table', ), + 2 => array( + 'var' => 'isStatsCompliant', + 'type' => TType::I32, + ), ); } if (is_array($vals)) { if (isset($vals['table'])) { $this->table = $vals['table']; } + if (isset($vals['isStatsCompliant'])) { + $this->isStatsCompliant = $vals['isStatsCompliant']; + } } } @@ -23778,6 +24398,13 @@ class GetTableResult { $xfer += $input->skip($ftype); } break; + case 2: + if ($ftype == TType::I32) { + $xfer += $input->readI32($this->isStatsCompliant); + } else { + $xfer += $input->skip($ftype); + } + break; default: $xfer += $input->skip($ftype); break; @@ -23799,6 +24426,11 @@ class GetTableResult { $xfer += $this->table->write($output); $xfer += $output->writeFieldEnd(); } + if ($this->isStatsCompliant !== null) { + $xfer += $output->writeFieldBegin('isStatsCompliant', TType::I32, 2); + $xfer += $output->writeI32($this->isStatsCompliant); + $xfer += $output->writeFieldEnd(); + } $xfer += $output->writeFieldStop(); $xfer += $output->writeStructEnd(); return $xfer; @@ -30306,6 +30938,279 @@ class GetRuntimeStatsRequest { } +class AlterPartitionsRequest { + static $_TSPEC; + + /** + * @var string + */ + public $dbName = null; + /** + * @var string + */ + public $tableName = null; + /** + * @var \metastore\Partition[] + */ + public $partitions = null; + /** + * @var \metastore\EnvironmentContext + */ + public $environmentContext = null; + /** + * @var int + */ + public $txnId = -1; + /** + * @var string + */ + public $validWriteIdList = null; + + public function __construct($vals=null) { + if (!isset(self::$_TSPEC)) { + self::$_TSPEC = array( + 1 => array( + 'var' => 'dbName', + 'type' => TType::STRING, + ), + 2 => array( + 'var' => 'tableName', + 'type' => TType::STRING, + ), + 3 => array( + 'var' => 'partitions', + 'type' => TType::LST, + 'etype' => TType::STRUCT, + 'elem' => array( + 'type' => TType::STRUCT, + 'class' => '\metastore\Partition', + ), + ), + 4 => array( + 'var' => 'environmentContext', + 'type' => TType::STRUCT, + 'class' => '\metastore\EnvironmentContext', + ), + 5 => array( + 'var' => 'txnId', + 'type' => TType::I64, + ), + 6 => array( + 'var' => 'validWriteIdList', + 'type' => TType::STRING, + ), + ); + } + if (is_array($vals)) { + if (isset($vals['dbName'])) { + $this->dbName = $vals['dbName']; + } + if (isset($vals['tableName'])) { + $this->tableName = $vals['tableName']; + } + if (isset($vals['partitions'])) { + $this->partitions = $vals['partitions']; + } + if (isset($vals['environmentContext'])) { + $this->environmentContext = $vals['environmentContext']; + } + if (isset($vals['txnId'])) { + $this->txnId = $vals['txnId']; + } + if (isset($vals['validWriteIdList'])) { + $this->validWriteIdList = $vals['validWriteIdList']; + } + } + } + + public function getName() { + return 'AlterPartitionsRequest'; + } + + public function read($input) + { + $xfer = 0; + $fname = null; + $ftype = 0; + $fid = 0; + $xfer += $input->readStructBegin($fname); + while (true) + { + $xfer += $input->readFieldBegin($fname, $ftype, $fid); + if ($ftype == TType::STOP) { + break; + } + switch ($fid) + { + case 1: + if ($ftype == TType::STRING) { + $xfer += $input->readString($this->dbName); + } else { + $xfer += $input->skip($ftype); + } + break; + case 2: + if ($ftype == TType::STRING) { + $xfer += $input->readString($this->tableName); + } else { + $xfer += $input->skip($ftype); + } + break; + case 3: + if ($ftype == TType::LST) { + $this->partitions = array(); + $_size820 = 0; + $_etype823 = 0; + $xfer += $input->readListBegin($_etype823, $_size820); + for ($_i824 = 0; $_i824 < $_size820; ++$_i824) + { + $elem825 = null; + $elem825 = new \metastore\Partition(); + $xfer += $elem825->read($input); + $this->partitions []= $elem825; + } + $xfer += $input->readListEnd(); + } else { + $xfer += $input->skip($ftype); + } + break; + case 4: + if ($ftype == TType::STRUCT) { + $this->environmentContext = new \metastore\EnvironmentContext(); + $xfer += $this->environmentContext->read($input); + } else { + $xfer += $input->skip($ftype); + } + break; + case 5: + if ($ftype == TType::I64) { + $xfer += $input->readI64($this->txnId); + } else { + $xfer += $input->skip($ftype); + } + break; + case 6: + if ($ftype == TType::STRING) { + $xfer += $input->readString($this->validWriteIdList); + } else { + $xfer += $input->skip($ftype); + } + break; + default: + $xfer += $input->skip($ftype); + break; + } + $xfer += $input->readFieldEnd(); + } + $xfer += $input->readStructEnd(); + return $xfer; + } + + public function write($output) { + $xfer = 0; + $xfer += $output->writeStructBegin('AlterPartitionsRequest'); + if ($this->dbName !== null) { + $xfer += $output->writeFieldBegin('dbName', TType::STRING, 1); + $xfer += $output->writeString($this->dbName); + $xfer += $output->writeFieldEnd(); + } + if ($this->tableName !== null) { + $xfer += $output->writeFieldBegin('tableName', TType::STRING, 2); + $xfer += $output->writeString($this->tableName); + $xfer += $output->writeFieldEnd(); + } + if ($this->partitions !== null) { + if (!is_array($this->partitions)) { + throw new TProtocolException('Bad type in structure.', TProtocolException::INVALID_DATA); + } + $xfer += $output->writeFieldBegin('partitions', TType::LST, 3); + { + $output->writeListBegin(TType::STRUCT, count($this->partitions)); + { + foreach ($this->partitions as $iter826) + { + $xfer += $iter826->write($output); + } + } + $output->writeListEnd(); + } + $xfer += $output->writeFieldEnd(); + } + if ($this->environmentContext !== null) { + if (!is_object($this->environmentContext)) { + throw new TProtocolException('Bad type in structure.', TProtocolException::INVALID_DATA); + } + $xfer += $output->writeFieldBegin('environmentContext', TType::STRUCT, 4); + $xfer += $this->environmentContext->write($output); + $xfer += $output->writeFieldEnd(); + } + if ($this->txnId !== null) { + $xfer += $output->writeFieldBegin('txnId', TType::I64, 5); + $xfer += $output->writeI64($this->txnId); + $xfer += $output->writeFieldEnd(); + } + if ($this->validWriteIdList !== null) { + $xfer += $output->writeFieldBegin('validWriteIdList', TType::STRING, 6); + $xfer += $output->writeString($this->validWriteIdList); + $xfer += $output->writeFieldEnd(); + } + $xfer += $output->writeFieldStop(); + $xfer += $output->writeStructEnd(); + return $xfer; + } + +} + +class AlterPartitionsResponse { + static $_TSPEC; + + + public function __construct() { + if (!isset(self::$_TSPEC)) { + self::$_TSPEC = array( + ); + } + } + + public function getName() { + return 'AlterPartitionsResponse'; + } + + public function read($input) + { + $xfer = 0; + $fname = null; + $ftype = 0; + $fid = 0; + $xfer += $input->readStructBegin($fname); + while (true) + { + $xfer += $input->readFieldBegin($fname, $ftype, $fid); + if ($ftype == TType::STOP) { + break; + } + switch ($fid) + { + default: + $xfer += $input->skip($ftype); + break; + } + $xfer += $input->readFieldEnd(); + } + $xfer += $input->readStructEnd(); + return $xfer; + } + + public function write($output) { + $xfer = 0; + $xfer += $output->writeStructBegin('AlterPartitionsResponse'); + $xfer += $output->writeFieldStop(); + $xfer += $output->writeStructEnd(); + return $xfer; + } + +} + class MetaException extends TException { static $_TSPEC; diff --git standalone-metastore/src/gen/thrift/gen-py/hive_metastore/ThriftHiveMetastore-remote standalone-metastore/src/gen/thrift/gen-py/hive_metastore/ThriftHiveMetastore-remote index 54023726df..dbc54f8ba7 100755 --- standalone-metastore/src/gen/thrift/gen-py/hive_metastore/ThriftHiveMetastore-remote +++ standalone-metastore/src/gen/thrift/gen-py/hive_metastore/ThriftHiveMetastore-remote @@ -107,7 +107,7 @@ if len(sys.argv) <= 1 or sys.argv[1] == '--help': print(' get_partitions_by_names(string db_name, string tbl_name, names)') print(' void alter_partition(string db_name, string tbl_name, Partition new_part)') print(' void alter_partitions(string db_name, string tbl_name, new_parts)') - print(' void alter_partitions_with_environment_context(string db_name, string tbl_name, new_parts, EnvironmentContext environment_context)') + print(' AlterPartitionsResponse alter_partitions_with_environment_context(AlterPartitionsRequest req)') print(' void alter_partition_with_environment_context(string db_name, string tbl_name, Partition new_part, EnvironmentContext environment_context)') print(' void rename_partition(string db_name, string tbl_name, part_vals, Partition new_part)') print(' bool partition_name_has_valid_characters( part_vals, bool throw_exception)') @@ -799,10 +799,10 @@ elif cmd == 'alter_partitions': pp.pprint(client.alter_partitions(args[0],args[1],eval(args[2]),)) elif cmd == 'alter_partitions_with_environment_context': - if len(args) != 4: - print('alter_partitions_with_environment_context requires 4 args') + if len(args) != 1: + print('alter_partitions_with_environment_context requires 1 args') sys.exit(1) - pp.pprint(client.alter_partitions_with_environment_context(args[0],args[1],eval(args[2]),eval(args[3]),)) + pp.pprint(client.alter_partitions_with_environment_context(eval(args[0]),)) elif cmd == 'alter_partition_with_environment_context': if len(args) != 4: diff --git standalone-metastore/src/gen/thrift/gen-py/hive_metastore/ThriftHiveMetastore.py standalone-metastore/src/gen/thrift/gen-py/hive_metastore/ThriftHiveMetastore.py index 5a3f2c193a..cb5e1582fc 100644 --- standalone-metastore/src/gen/thrift/gen-py/hive_metastore/ThriftHiveMetastore.py +++ standalone-metastore/src/gen/thrift/gen-py/hive_metastore/ThriftHiveMetastore.py @@ -717,13 +717,10 @@ def alter_partitions(self, db_name, tbl_name, new_parts): """ pass - def alter_partitions_with_environment_context(self, db_name, tbl_name, new_parts, environment_context): + def alter_partitions_with_environment_context(self, req): """ Parameters: - - db_name - - tbl_name - - new_parts - - environment_context + - req """ pass @@ -4734,24 +4731,18 @@ def recv_alter_partitions(self): raise result.o2 return - def alter_partitions_with_environment_context(self, db_name, tbl_name, new_parts, environment_context): + def alter_partitions_with_environment_context(self, req): """ Parameters: - - db_name - - tbl_name - - new_parts - - environment_context + - req """ - self.send_alter_partitions_with_environment_context(db_name, tbl_name, new_parts, environment_context) - self.recv_alter_partitions_with_environment_context() + self.send_alter_partitions_with_environment_context(req) + return self.recv_alter_partitions_with_environment_context() - def send_alter_partitions_with_environment_context(self, db_name, tbl_name, new_parts, environment_context): + def send_alter_partitions_with_environment_context(self, req): self._oprot.writeMessageBegin('alter_partitions_with_environment_context', TMessageType.CALL, self._seqid) args = alter_partitions_with_environment_context_args() - args.db_name = db_name - args.tbl_name = tbl_name - args.new_parts = new_parts - args.environment_context = environment_context + args.req = req args.write(self._oprot) self._oprot.writeMessageEnd() self._oprot.trans.flush() @@ -4767,11 +4758,13 @@ def recv_alter_partitions_with_environment_context(self): result = alter_partitions_with_environment_context_result() result.read(iprot) iprot.readMessageEnd() + if result.success is not None: + return result.success if result.o1 is not None: raise result.o1 if result.o2 is not None: raise result.o2 - return + raise TApplicationException(TApplicationException.MISSING_RESULT, "alter_partitions_with_environment_context failed: unknown result") def alter_partition_with_environment_context(self, db_name, tbl_name, new_part, environment_context): """ @@ -11366,7 +11359,7 @@ def process_alter_partitions_with_environment_context(self, seqid, iprot, oprot) iprot.readMessageEnd() result = alter_partitions_with_environment_context_result() try: - self._handler.alter_partitions_with_environment_context(args.db_name, args.tbl_name, args.new_parts, args.environment_context) + result.success = self._handler.alter_partitions_with_environment_context(args.req) msg_type = TMessageType.REPLY except (TTransport.TTransportException, KeyboardInterrupt, SystemExit): raise @@ -15987,10 +15980,10 @@ def read(self, iprot): if fid == 0: if ftype == TType.LIST: self.success = [] - (_etype819, _size816) = iprot.readListBegin() - for _i820 in xrange(_size816): - _elem821 = iprot.readString() - self.success.append(_elem821) + (_etype826, _size823) = iprot.readListBegin() + for _i827 in xrange(_size823): + _elem828 = iprot.readString() + self.success.append(_elem828) iprot.readListEnd() else: iprot.skip(ftype) @@ -16013,8 +16006,8 @@ def write(self, oprot): if self.success is not None: oprot.writeFieldBegin('success', TType.LIST, 0) oprot.writeListBegin(TType.STRING, len(self.success)) - for iter822 in self.success: - oprot.writeString(iter822) + for iter829 in self.success: + oprot.writeString(iter829) oprot.writeListEnd() oprot.writeFieldEnd() if self.o1 is not None: @@ -16119,10 +16112,10 @@ def read(self, iprot): if fid == 0: if ftype == TType.LIST: self.success = [] - (_etype826, _size823) = iprot.readListBegin() - for _i827 in xrange(_size823): - _elem828 = iprot.readString() - self.success.append(_elem828) + (_etype833, _size830) = iprot.readListBegin() + for _i834 in xrange(_size830): + _elem835 = iprot.readString() + self.success.append(_elem835) iprot.readListEnd() else: iprot.skip(ftype) @@ -16145,8 +16138,8 @@ def write(self, oprot): if self.success is not None: oprot.writeFieldBegin('success', TType.LIST, 0) oprot.writeListBegin(TType.STRING, len(self.success)) - for iter829 in self.success: - oprot.writeString(iter829) + for iter836 in self.success: + oprot.writeString(iter836) oprot.writeListEnd() oprot.writeFieldEnd() if self.o1 is not None: @@ -16916,12 +16909,12 @@ def read(self, iprot): if fid == 0: if ftype == TType.MAP: self.success = {} - (_ktype831, _vtype832, _size830 ) = iprot.readMapBegin() - for _i834 in xrange(_size830): - _key835 = iprot.readString() - _val836 = Type() - _val836.read(iprot) - self.success[_key835] = _val836 + (_ktype838, _vtype839, _size837 ) = iprot.readMapBegin() + for _i841 in xrange(_size837): + _key842 = iprot.readString() + _val843 = Type() + _val843.read(iprot) + self.success[_key842] = _val843 iprot.readMapEnd() else: iprot.skip(ftype) @@ -16944,9 +16937,9 @@ def write(self, oprot): if self.success is not None: oprot.writeFieldBegin('success', TType.MAP, 0) oprot.writeMapBegin(TType.STRING, TType.STRUCT, len(self.success)) - for kiter837,viter838 in self.success.items(): - oprot.writeString(kiter837) - viter838.write(oprot) + for kiter844,viter845 in self.success.items(): + oprot.writeString(kiter844) + viter845.write(oprot) oprot.writeMapEnd() oprot.writeFieldEnd() if self.o2 is not None: @@ -17089,11 +17082,11 @@ def read(self, iprot): if fid == 0: if ftype == TType.LIST: self.success = [] - (_etype842, _size839) = iprot.readListBegin() - for _i843 in xrange(_size839): - _elem844 = FieldSchema() - _elem844.read(iprot) - self.success.append(_elem844) + (_etype849, _size846) = iprot.readListBegin() + for _i850 in xrange(_size846): + _elem851 = FieldSchema() + _elem851.read(iprot) + self.success.append(_elem851) iprot.readListEnd() else: iprot.skip(ftype) @@ -17128,8 +17121,8 @@ def write(self, oprot): if self.success is not None: oprot.writeFieldBegin('success', TType.LIST, 0) oprot.writeListBegin(TType.STRUCT, len(self.success)) - for iter845 in self.success: - iter845.write(oprot) + for iter852 in self.success: + iter852.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() if self.o1 is not None: @@ -17296,11 +17289,11 @@ def read(self, iprot): if fid == 0: if ftype == TType.LIST: self.success = [] - (_etype849, _size846) = iprot.readListBegin() - for _i850 in xrange(_size846): - _elem851 = FieldSchema() - _elem851.read(iprot) - self.success.append(_elem851) + (_etype856, _size853) = iprot.readListBegin() + for _i857 in xrange(_size853): + _elem858 = FieldSchema() + _elem858.read(iprot) + self.success.append(_elem858) iprot.readListEnd() else: iprot.skip(ftype) @@ -17335,8 +17328,8 @@ def write(self, oprot): if self.success is not None: oprot.writeFieldBegin('success', TType.LIST, 0) oprot.writeListBegin(TType.STRUCT, len(self.success)) - for iter852 in self.success: - iter852.write(oprot) + for iter859 in self.success: + iter859.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() if self.o1 is not None: @@ -17489,11 +17482,11 @@ def read(self, iprot): if fid == 0: if ftype == TType.LIST: self.success = [] - (_etype856, _size853) = iprot.readListBegin() - for _i857 in xrange(_size853): - _elem858 = FieldSchema() - _elem858.read(iprot) - self.success.append(_elem858) + (_etype863, _size860) = iprot.readListBegin() + for _i864 in xrange(_size860): + _elem865 = FieldSchema() + _elem865.read(iprot) + self.success.append(_elem865) iprot.readListEnd() else: iprot.skip(ftype) @@ -17528,8 +17521,8 @@ def write(self, oprot): if self.success is not None: oprot.writeFieldBegin('success', TType.LIST, 0) oprot.writeListBegin(TType.STRUCT, len(self.success)) - for iter859 in self.success: - iter859.write(oprot) + for iter866 in self.success: + iter866.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() if self.o1 is not None: @@ -17696,11 +17689,11 @@ def read(self, iprot): if fid == 0: if ftype == TType.LIST: self.success = [] - (_etype863, _size860) = iprot.readListBegin() - for _i864 in xrange(_size860): - _elem865 = FieldSchema() - _elem865.read(iprot) - self.success.append(_elem865) + (_etype870, _size867) = iprot.readListBegin() + for _i871 in xrange(_size867): + _elem872 = FieldSchema() + _elem872.read(iprot) + self.success.append(_elem872) iprot.readListEnd() else: iprot.skip(ftype) @@ -17735,8 +17728,8 @@ def write(self, oprot): if self.success is not None: oprot.writeFieldBegin('success', TType.LIST, 0) oprot.writeListBegin(TType.STRUCT, len(self.success)) - for iter866 in self.success: - iter866.write(oprot) + for iter873 in self.success: + iter873.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() if self.o1 is not None: @@ -18189,66 +18182,66 @@ def read(self, iprot): elif fid == 2: if ftype == TType.LIST: self.primaryKeys = [] - (_etype870, _size867) = iprot.readListBegin() - for _i871 in xrange(_size867): - _elem872 = SQLPrimaryKey() - _elem872.read(iprot) - self.primaryKeys.append(_elem872) + (_etype877, _size874) = iprot.readListBegin() + for _i878 in xrange(_size874): + _elem879 = SQLPrimaryKey() + _elem879.read(iprot) + self.primaryKeys.append(_elem879) iprot.readListEnd() else: iprot.skip(ftype) elif fid == 3: if ftype == TType.LIST: self.foreignKeys = [] - (_etype876, _size873) = iprot.readListBegin() - for _i877 in xrange(_size873): - _elem878 = SQLForeignKey() - _elem878.read(iprot) - self.foreignKeys.append(_elem878) + (_etype883, _size880) = iprot.readListBegin() + for _i884 in xrange(_size880): + _elem885 = SQLForeignKey() + _elem885.read(iprot) + self.foreignKeys.append(_elem885) iprot.readListEnd() else: iprot.skip(ftype) elif fid == 4: if ftype == TType.LIST: self.uniqueConstraints = [] - (_etype882, _size879) = iprot.readListBegin() - for _i883 in xrange(_size879): - _elem884 = SQLUniqueConstraint() - _elem884.read(iprot) - self.uniqueConstraints.append(_elem884) + (_etype889, _size886) = iprot.readListBegin() + for _i890 in xrange(_size886): + _elem891 = SQLUniqueConstraint() + _elem891.read(iprot) + self.uniqueConstraints.append(_elem891) iprot.readListEnd() else: iprot.skip(ftype) elif fid == 5: if ftype == TType.LIST: self.notNullConstraints = [] - (_etype888, _size885) = iprot.readListBegin() - for _i889 in xrange(_size885): - _elem890 = SQLNotNullConstraint() - _elem890.read(iprot) - self.notNullConstraints.append(_elem890) + (_etype895, _size892) = iprot.readListBegin() + for _i896 in xrange(_size892): + _elem897 = SQLNotNullConstraint() + _elem897.read(iprot) + self.notNullConstraints.append(_elem897) iprot.readListEnd() else: iprot.skip(ftype) elif fid == 6: if ftype == TType.LIST: self.defaultConstraints = [] - (_etype894, _size891) = iprot.readListBegin() - for _i895 in xrange(_size891): - _elem896 = SQLDefaultConstraint() - _elem896.read(iprot) - self.defaultConstraints.append(_elem896) + (_etype901, _size898) = iprot.readListBegin() + for _i902 in xrange(_size898): + _elem903 = SQLDefaultConstraint() + _elem903.read(iprot) + self.defaultConstraints.append(_elem903) iprot.readListEnd() else: iprot.skip(ftype) elif fid == 7: if ftype == TType.LIST: self.checkConstraints = [] - (_etype900, _size897) = iprot.readListBegin() - for _i901 in xrange(_size897): - _elem902 = SQLCheckConstraint() - _elem902.read(iprot) - self.checkConstraints.append(_elem902) + (_etype907, _size904) = iprot.readListBegin() + for _i908 in xrange(_size904): + _elem909 = SQLCheckConstraint() + _elem909.read(iprot) + self.checkConstraints.append(_elem909) iprot.readListEnd() else: iprot.skip(ftype) @@ -18269,43 +18262,43 @@ def write(self, oprot): if self.primaryKeys is not None: oprot.writeFieldBegin('primaryKeys', TType.LIST, 2) oprot.writeListBegin(TType.STRUCT, len(self.primaryKeys)) - for iter903 in self.primaryKeys: - iter903.write(oprot) + for iter910 in self.primaryKeys: + iter910.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() if self.foreignKeys is not None: oprot.writeFieldBegin('foreignKeys', TType.LIST, 3) oprot.writeListBegin(TType.STRUCT, len(self.foreignKeys)) - for iter904 in self.foreignKeys: - iter904.write(oprot) + for iter911 in self.foreignKeys: + iter911.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() if self.uniqueConstraints is not None: oprot.writeFieldBegin('uniqueConstraints', TType.LIST, 4) oprot.writeListBegin(TType.STRUCT, len(self.uniqueConstraints)) - for iter905 in self.uniqueConstraints: - iter905.write(oprot) + for iter912 in self.uniqueConstraints: + iter912.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() if self.notNullConstraints is not None: oprot.writeFieldBegin('notNullConstraints', TType.LIST, 5) oprot.writeListBegin(TType.STRUCT, len(self.notNullConstraints)) - for iter906 in self.notNullConstraints: - iter906.write(oprot) + for iter913 in self.notNullConstraints: + iter913.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() if self.defaultConstraints is not None: oprot.writeFieldBegin('defaultConstraints', TType.LIST, 6) oprot.writeListBegin(TType.STRUCT, len(self.defaultConstraints)) - for iter907 in self.defaultConstraints: - iter907.write(oprot) + for iter914 in self.defaultConstraints: + iter914.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() if self.checkConstraints is not None: oprot.writeFieldBegin('checkConstraints', TType.LIST, 7) oprot.writeListBegin(TType.STRUCT, len(self.checkConstraints)) - for iter908 in self.checkConstraints: - iter908.write(oprot) + for iter915 in self.checkConstraints: + iter915.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() oprot.writeFieldStop() @@ -19865,10 +19858,10 @@ def read(self, iprot): elif fid == 3: if ftype == TType.LIST: self.partNames = [] - (_etype912, _size909) = iprot.readListBegin() - for _i913 in xrange(_size909): - _elem914 = iprot.readString() - self.partNames.append(_elem914) + (_etype919, _size916) = iprot.readListBegin() + for _i920 in xrange(_size916): + _elem921 = iprot.readString() + self.partNames.append(_elem921) iprot.readListEnd() else: iprot.skip(ftype) @@ -19893,8 +19886,8 @@ def write(self, oprot): if self.partNames is not None: oprot.writeFieldBegin('partNames', TType.LIST, 3) oprot.writeListBegin(TType.STRING, len(self.partNames)) - for iter915 in self.partNames: - oprot.writeString(iter915) + for iter922 in self.partNames: + oprot.writeString(iter922) oprot.writeListEnd() oprot.writeFieldEnd() oprot.writeFieldStop() @@ -20094,10 +20087,10 @@ def read(self, iprot): if fid == 0: if ftype == TType.LIST: self.success = [] - (_etype919, _size916) = iprot.readListBegin() - for _i920 in xrange(_size916): - _elem921 = iprot.readString() - self.success.append(_elem921) + (_etype926, _size923) = iprot.readListBegin() + for _i927 in xrange(_size923): + _elem928 = iprot.readString() + self.success.append(_elem928) iprot.readListEnd() else: iprot.skip(ftype) @@ -20120,8 +20113,8 @@ def write(self, oprot): if self.success is not None: oprot.writeFieldBegin('success', TType.LIST, 0) oprot.writeListBegin(TType.STRING, len(self.success)) - for iter922 in self.success: - oprot.writeString(iter922) + for iter929 in self.success: + oprot.writeString(iter929) oprot.writeListEnd() oprot.writeFieldEnd() if self.o1 is not None: @@ -20271,10 +20264,10 @@ def read(self, iprot): if fid == 0: if ftype == TType.LIST: self.success = [] - (_etype926, _size923) = iprot.readListBegin() - for _i927 in xrange(_size923): - _elem928 = iprot.readString() - self.success.append(_elem928) + (_etype933, _size930) = iprot.readListBegin() + for _i934 in xrange(_size930): + _elem935 = iprot.readString() + self.success.append(_elem935) iprot.readListEnd() else: iprot.skip(ftype) @@ -20297,8 +20290,8 @@ def write(self, oprot): if self.success is not None: oprot.writeFieldBegin('success', TType.LIST, 0) oprot.writeListBegin(TType.STRING, len(self.success)) - for iter929 in self.success: - oprot.writeString(iter929) + for iter936 in self.success: + oprot.writeString(iter936) oprot.writeListEnd() oprot.writeFieldEnd() if self.o1 is not None: @@ -20422,10 +20415,10 @@ def read(self, iprot): if fid == 0: if ftype == TType.LIST: self.success = [] - (_etype933, _size930) = iprot.readListBegin() - for _i934 in xrange(_size930): - _elem935 = iprot.readString() - self.success.append(_elem935) + (_etype940, _size937) = iprot.readListBegin() + for _i941 in xrange(_size937): + _elem942 = iprot.readString() + self.success.append(_elem942) iprot.readListEnd() else: iprot.skip(ftype) @@ -20448,8 +20441,8 @@ def write(self, oprot): if self.success is not None: oprot.writeFieldBegin('success', TType.LIST, 0) oprot.writeListBegin(TType.STRING, len(self.success)) - for iter936 in self.success: - oprot.writeString(iter936) + for iter943 in self.success: + oprot.writeString(iter943) oprot.writeListEnd() oprot.writeFieldEnd() if self.o1 is not None: @@ -20522,10 +20515,10 @@ def read(self, iprot): elif fid == 3: if ftype == TType.LIST: self.tbl_types = [] - (_etype940, _size937) = iprot.readListBegin() - for _i941 in xrange(_size937): - _elem942 = iprot.readString() - self.tbl_types.append(_elem942) + (_etype947, _size944) = iprot.readListBegin() + for _i948 in xrange(_size944): + _elem949 = iprot.readString() + self.tbl_types.append(_elem949) iprot.readListEnd() else: iprot.skip(ftype) @@ -20550,8 +20543,8 @@ def write(self, oprot): if self.tbl_types is not None: oprot.writeFieldBegin('tbl_types', TType.LIST, 3) oprot.writeListBegin(TType.STRING, len(self.tbl_types)) - for iter943 in self.tbl_types: - oprot.writeString(iter943) + for iter950 in self.tbl_types: + oprot.writeString(iter950) oprot.writeListEnd() oprot.writeFieldEnd() oprot.writeFieldStop() @@ -20607,11 +20600,11 @@ def read(self, iprot): if fid == 0: if ftype == TType.LIST: self.success = [] - (_etype947, _size944) = iprot.readListBegin() - for _i948 in xrange(_size944): - _elem949 = TableMeta() - _elem949.read(iprot) - self.success.append(_elem949) + (_etype954, _size951) = iprot.readListBegin() + for _i955 in xrange(_size951): + _elem956 = TableMeta() + _elem956.read(iprot) + self.success.append(_elem956) iprot.readListEnd() else: iprot.skip(ftype) @@ -20634,8 +20627,8 @@ def write(self, oprot): if self.success is not None: oprot.writeFieldBegin('success', TType.LIST, 0) oprot.writeListBegin(TType.STRUCT, len(self.success)) - for iter950 in self.success: - iter950.write(oprot) + for iter957 in self.success: + iter957.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() if self.o1 is not None: @@ -20759,10 +20752,10 @@ def read(self, iprot): if fid == 0: if ftype == TType.LIST: self.success = [] - (_etype954, _size951) = iprot.readListBegin() - for _i955 in xrange(_size951): - _elem956 = iprot.readString() - self.success.append(_elem956) + (_etype961, _size958) = iprot.readListBegin() + for _i962 in xrange(_size958): + _elem963 = iprot.readString() + self.success.append(_elem963) iprot.readListEnd() else: iprot.skip(ftype) @@ -20785,8 +20778,8 @@ def write(self, oprot): if self.success is not None: oprot.writeFieldBegin('success', TType.LIST, 0) oprot.writeListBegin(TType.STRING, len(self.success)) - for iter957 in self.success: - oprot.writeString(iter957) + for iter964 in self.success: + oprot.writeString(iter964) oprot.writeListEnd() oprot.writeFieldEnd() if self.o1 is not None: @@ -21022,10 +21015,10 @@ def read(self, iprot): elif fid == 2: if ftype == TType.LIST: self.tbl_names = [] - (_etype961, _size958) = iprot.readListBegin() - for _i962 in xrange(_size958): - _elem963 = iprot.readString() - self.tbl_names.append(_elem963) + (_etype968, _size965) = iprot.readListBegin() + for _i969 in xrange(_size965): + _elem970 = iprot.readString() + self.tbl_names.append(_elem970) iprot.readListEnd() else: iprot.skip(ftype) @@ -21046,8 +21039,8 @@ def write(self, oprot): if self.tbl_names is not None: oprot.writeFieldBegin('tbl_names', TType.LIST, 2) oprot.writeListBegin(TType.STRING, len(self.tbl_names)) - for iter964 in self.tbl_names: - oprot.writeString(iter964) + for iter971 in self.tbl_names: + oprot.writeString(iter971) oprot.writeListEnd() oprot.writeFieldEnd() oprot.writeFieldStop() @@ -21099,11 +21092,11 @@ def read(self, iprot): if fid == 0: if ftype == TType.LIST: self.success = [] - (_etype968, _size965) = iprot.readListBegin() - for _i969 in xrange(_size965): - _elem970 = Table() - _elem970.read(iprot) - self.success.append(_elem970) + (_etype975, _size972) = iprot.readListBegin() + for _i976 in xrange(_size972): + _elem977 = Table() + _elem977.read(iprot) + self.success.append(_elem977) iprot.readListEnd() else: iprot.skip(ftype) @@ -21120,8 +21113,8 @@ def write(self, oprot): if self.success is not None: oprot.writeFieldBegin('success', TType.LIST, 0) oprot.writeListBegin(TType.STRUCT, len(self.success)) - for iter971 in self.success: - iter971.write(oprot) + for iter978 in self.success: + iter978.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() oprot.writeFieldStop() @@ -21513,10 +21506,10 @@ def read(self, iprot): elif fid == 2: if ftype == TType.LIST: self.tbl_names = [] - (_etype975, _size972) = iprot.readListBegin() - for _i976 in xrange(_size972): - _elem977 = iprot.readString() - self.tbl_names.append(_elem977) + (_etype982, _size979) = iprot.readListBegin() + for _i983 in xrange(_size979): + _elem984 = iprot.readString() + self.tbl_names.append(_elem984) iprot.readListEnd() else: iprot.skip(ftype) @@ -21537,8 +21530,8 @@ def write(self, oprot): if self.tbl_names is not None: oprot.writeFieldBegin('tbl_names', TType.LIST, 2) oprot.writeListBegin(TType.STRING, len(self.tbl_names)) - for iter978 in self.tbl_names: - oprot.writeString(iter978) + for iter985 in self.tbl_names: + oprot.writeString(iter985) oprot.writeListEnd() oprot.writeFieldEnd() oprot.writeFieldStop() @@ -21599,12 +21592,12 @@ def read(self, iprot): if fid == 0: if ftype == TType.MAP: self.success = {} - (_ktype980, _vtype981, _size979 ) = iprot.readMapBegin() - for _i983 in xrange(_size979): - _key984 = iprot.readString() - _val985 = Materialization() - _val985.read(iprot) - self.success[_key984] = _val985 + (_ktype987, _vtype988, _size986 ) = iprot.readMapBegin() + for _i990 in xrange(_size986): + _key991 = iprot.readString() + _val992 = Materialization() + _val992.read(iprot) + self.success[_key991] = _val992 iprot.readMapEnd() else: iprot.skip(ftype) @@ -21639,9 +21632,9 @@ def write(self, oprot): if self.success is not None: oprot.writeFieldBegin('success', TType.MAP, 0) oprot.writeMapBegin(TType.STRING, TType.STRUCT, len(self.success)) - for kiter986,viter987 in self.success.items(): - oprot.writeString(kiter986) - viter987.write(oprot) + for kiter993,viter994 in self.success.items(): + oprot.writeString(kiter993) + viter994.write(oprot) oprot.writeMapEnd() oprot.writeFieldEnd() if self.o1 is not None: @@ -22006,10 +21999,10 @@ def read(self, iprot): if fid == 0: if ftype == TType.LIST: self.success = [] - (_etype991, _size988) = iprot.readListBegin() - for _i992 in xrange(_size988): - _elem993 = iprot.readString() - self.success.append(_elem993) + (_etype998, _size995) = iprot.readListBegin() + for _i999 in xrange(_size995): + _elem1000 = iprot.readString() + self.success.append(_elem1000) iprot.readListEnd() else: iprot.skip(ftype) @@ -22044,8 +22037,8 @@ def write(self, oprot): if self.success is not None: oprot.writeFieldBegin('success', TType.LIST, 0) oprot.writeListBegin(TType.STRING, len(self.success)) - for iter994 in self.success: - oprot.writeString(iter994) + for iter1001 in self.success: + oprot.writeString(iter1001) oprot.writeListEnd() oprot.writeFieldEnd() if self.o1 is not None: @@ -23015,11 +23008,11 @@ def read(self, iprot): if fid == 1: if ftype == TType.LIST: self.new_parts = [] - (_etype998, _size995) = iprot.readListBegin() - for _i999 in xrange(_size995): - _elem1000 = Partition() - _elem1000.read(iprot) - self.new_parts.append(_elem1000) + (_etype1005, _size1002) = iprot.readListBegin() + for _i1006 in xrange(_size1002): + _elem1007 = Partition() + _elem1007.read(iprot) + self.new_parts.append(_elem1007) iprot.readListEnd() else: iprot.skip(ftype) @@ -23036,8 +23029,8 @@ def write(self, oprot): if self.new_parts is not None: oprot.writeFieldBegin('new_parts', TType.LIST, 1) oprot.writeListBegin(TType.STRUCT, len(self.new_parts)) - for iter1001 in self.new_parts: - iter1001.write(oprot) + for iter1008 in self.new_parts: + iter1008.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() oprot.writeFieldStop() @@ -23195,11 +23188,11 @@ def read(self, iprot): if fid == 1: if ftype == TType.LIST: self.new_parts = [] - (_etype1005, _size1002) = iprot.readListBegin() - for _i1006 in xrange(_size1002): - _elem1007 = PartitionSpec() - _elem1007.read(iprot) - self.new_parts.append(_elem1007) + (_etype1012, _size1009) = iprot.readListBegin() + for _i1013 in xrange(_size1009): + _elem1014 = PartitionSpec() + _elem1014.read(iprot) + self.new_parts.append(_elem1014) iprot.readListEnd() else: iprot.skip(ftype) @@ -23216,8 +23209,8 @@ def write(self, oprot): if self.new_parts is not None: oprot.writeFieldBegin('new_parts', TType.LIST, 1) oprot.writeListBegin(TType.STRUCT, len(self.new_parts)) - for iter1008 in self.new_parts: - iter1008.write(oprot) + for iter1015 in self.new_parts: + iter1015.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() oprot.writeFieldStop() @@ -23391,10 +23384,10 @@ def read(self, iprot): elif fid == 3: if ftype == TType.LIST: self.part_vals = [] - (_etype1012, _size1009) = iprot.readListBegin() - for _i1013 in xrange(_size1009): - _elem1014 = iprot.readString() - self.part_vals.append(_elem1014) + (_etype1019, _size1016) = iprot.readListBegin() + for _i1020 in xrange(_size1016): + _elem1021 = iprot.readString() + self.part_vals.append(_elem1021) iprot.readListEnd() else: iprot.skip(ftype) @@ -23419,8 +23412,8 @@ def write(self, oprot): if self.part_vals is not None: oprot.writeFieldBegin('part_vals', TType.LIST, 3) oprot.writeListBegin(TType.STRING, len(self.part_vals)) - for iter1015 in self.part_vals: - oprot.writeString(iter1015) + for iter1022 in self.part_vals: + oprot.writeString(iter1022) oprot.writeListEnd() oprot.writeFieldEnd() oprot.writeFieldStop() @@ -23773,10 +23766,10 @@ def read(self, iprot): elif fid == 3: if ftype == TType.LIST: self.part_vals = [] - (_etype1019, _size1016) = iprot.readListBegin() - for _i1020 in xrange(_size1016): - _elem1021 = iprot.readString() - self.part_vals.append(_elem1021) + (_etype1026, _size1023) = iprot.readListBegin() + for _i1027 in xrange(_size1023): + _elem1028 = iprot.readString() + self.part_vals.append(_elem1028) iprot.readListEnd() else: iprot.skip(ftype) @@ -23807,8 +23800,8 @@ def write(self, oprot): if self.part_vals is not None: oprot.writeFieldBegin('part_vals', TType.LIST, 3) oprot.writeListBegin(TType.STRING, len(self.part_vals)) - for iter1022 in self.part_vals: - oprot.writeString(iter1022) + for iter1029 in self.part_vals: + oprot.writeString(iter1029) oprot.writeListEnd() oprot.writeFieldEnd() if self.environment_context is not None: @@ -24403,10 +24396,10 @@ def read(self, iprot): elif fid == 3: if ftype == TType.LIST: self.part_vals = [] - (_etype1026, _size1023) = iprot.readListBegin() - for _i1027 in xrange(_size1023): - _elem1028 = iprot.readString() - self.part_vals.append(_elem1028) + (_etype1033, _size1030) = iprot.readListBegin() + for _i1034 in xrange(_size1030): + _elem1035 = iprot.readString() + self.part_vals.append(_elem1035) iprot.readListEnd() else: iprot.skip(ftype) @@ -24436,8 +24429,8 @@ def write(self, oprot): if self.part_vals is not None: oprot.writeFieldBegin('part_vals', TType.LIST, 3) oprot.writeListBegin(TType.STRING, len(self.part_vals)) - for iter1029 in self.part_vals: - oprot.writeString(iter1029) + for iter1036 in self.part_vals: + oprot.writeString(iter1036) oprot.writeListEnd() oprot.writeFieldEnd() if self.deleteData is not None: @@ -24610,10 +24603,10 @@ def read(self, iprot): elif fid == 3: if ftype == TType.LIST: self.part_vals = [] - (_etype1033, _size1030) = iprot.readListBegin() - for _i1034 in xrange(_size1030): - _elem1035 = iprot.readString() - self.part_vals.append(_elem1035) + (_etype1040, _size1037) = iprot.readListBegin() + for _i1041 in xrange(_size1037): + _elem1042 = iprot.readString() + self.part_vals.append(_elem1042) iprot.readListEnd() else: iprot.skip(ftype) @@ -24649,8 +24642,8 @@ def write(self, oprot): if self.part_vals is not None: oprot.writeFieldBegin('part_vals', TType.LIST, 3) oprot.writeListBegin(TType.STRING, len(self.part_vals)) - for iter1036 in self.part_vals: - oprot.writeString(iter1036) + for iter1043 in self.part_vals: + oprot.writeString(iter1043) oprot.writeListEnd() oprot.writeFieldEnd() if self.deleteData is not None: @@ -25387,10 +25380,10 @@ def read(self, iprot): elif fid == 3: if ftype == TType.LIST: self.part_vals = [] - (_etype1040, _size1037) = iprot.readListBegin() - for _i1041 in xrange(_size1037): - _elem1042 = iprot.readString() - self.part_vals.append(_elem1042) + (_etype1047, _size1044) = iprot.readListBegin() + for _i1048 in xrange(_size1044): + _elem1049 = iprot.readString() + self.part_vals.append(_elem1049) iprot.readListEnd() else: iprot.skip(ftype) @@ -25415,8 +25408,8 @@ def write(self, oprot): if self.part_vals is not None: oprot.writeFieldBegin('part_vals', TType.LIST, 3) oprot.writeListBegin(TType.STRING, len(self.part_vals)) - for iter1043 in self.part_vals: - oprot.writeString(iter1043) + for iter1050 in self.part_vals: + oprot.writeString(iter1050) oprot.writeListEnd() oprot.writeFieldEnd() oprot.writeFieldStop() @@ -25575,11 +25568,11 @@ def read(self, iprot): if fid == 1: if ftype == TType.MAP: self.partitionSpecs = {} - (_ktype1045, _vtype1046, _size1044 ) = iprot.readMapBegin() - for _i1048 in xrange(_size1044): - _key1049 = iprot.readString() - _val1050 = iprot.readString() - self.partitionSpecs[_key1049] = _val1050 + (_ktype1052, _vtype1053, _size1051 ) = iprot.readMapBegin() + for _i1055 in xrange(_size1051): + _key1056 = iprot.readString() + _val1057 = iprot.readString() + self.partitionSpecs[_key1056] = _val1057 iprot.readMapEnd() else: iprot.skip(ftype) @@ -25616,9 +25609,9 @@ def write(self, oprot): if self.partitionSpecs is not None: oprot.writeFieldBegin('partitionSpecs', TType.MAP, 1) oprot.writeMapBegin(TType.STRING, TType.STRING, len(self.partitionSpecs)) - for kiter1051,viter1052 in self.partitionSpecs.items(): - oprot.writeString(kiter1051) - oprot.writeString(viter1052) + for kiter1058,viter1059 in self.partitionSpecs.items(): + oprot.writeString(kiter1058) + oprot.writeString(viter1059) oprot.writeMapEnd() oprot.writeFieldEnd() if self.source_db is not None: @@ -25823,11 +25816,11 @@ def read(self, iprot): if fid == 1: if ftype == TType.MAP: self.partitionSpecs = {} - (_ktype1054, _vtype1055, _size1053 ) = iprot.readMapBegin() - for _i1057 in xrange(_size1053): - _key1058 = iprot.readString() - _val1059 = iprot.readString() - self.partitionSpecs[_key1058] = _val1059 + (_ktype1061, _vtype1062, _size1060 ) = iprot.readMapBegin() + for _i1064 in xrange(_size1060): + _key1065 = iprot.readString() + _val1066 = iprot.readString() + self.partitionSpecs[_key1065] = _val1066 iprot.readMapEnd() else: iprot.skip(ftype) @@ -25864,9 +25857,9 @@ def write(self, oprot): if self.partitionSpecs is not None: oprot.writeFieldBegin('partitionSpecs', TType.MAP, 1) oprot.writeMapBegin(TType.STRING, TType.STRING, len(self.partitionSpecs)) - for kiter1060,viter1061 in self.partitionSpecs.items(): - oprot.writeString(kiter1060) - oprot.writeString(viter1061) + for kiter1067,viter1068 in self.partitionSpecs.items(): + oprot.writeString(kiter1067) + oprot.writeString(viter1068) oprot.writeMapEnd() oprot.writeFieldEnd() if self.source_db is not None: @@ -25949,11 +25942,11 @@ def read(self, iprot): if fid == 0: if ftype == TType.LIST: self.success = [] - (_etype1065, _size1062) = iprot.readListBegin() - for _i1066 in xrange(_size1062): - _elem1067 = Partition() - _elem1067.read(iprot) - self.success.append(_elem1067) + (_etype1072, _size1069) = iprot.readListBegin() + for _i1073 in xrange(_size1069): + _elem1074 = Partition() + _elem1074.read(iprot) + self.success.append(_elem1074) iprot.readListEnd() else: iprot.skip(ftype) @@ -25994,8 +25987,8 @@ def write(self, oprot): if self.success is not None: oprot.writeFieldBegin('success', TType.LIST, 0) oprot.writeListBegin(TType.STRUCT, len(self.success)) - for iter1068 in self.success: - iter1068.write(oprot) + for iter1075 in self.success: + iter1075.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() if self.o1 is not None: @@ -26089,10 +26082,10 @@ def read(self, iprot): elif fid == 3: if ftype == TType.LIST: self.part_vals = [] - (_etype1072, _size1069) = iprot.readListBegin() - for _i1073 in xrange(_size1069): - _elem1074 = iprot.readString() - self.part_vals.append(_elem1074) + (_etype1079, _size1076) = iprot.readListBegin() + for _i1080 in xrange(_size1076): + _elem1081 = iprot.readString() + self.part_vals.append(_elem1081) iprot.readListEnd() else: iprot.skip(ftype) @@ -26104,10 +26097,10 @@ def read(self, iprot): elif fid == 5: if ftype == TType.LIST: self.group_names = [] - (_etype1078, _size1075) = iprot.readListBegin() - for _i1079 in xrange(_size1075): - _elem1080 = iprot.readString() - self.group_names.append(_elem1080) + (_etype1085, _size1082) = iprot.readListBegin() + for _i1086 in xrange(_size1082): + _elem1087 = iprot.readString() + self.group_names.append(_elem1087) iprot.readListEnd() else: iprot.skip(ftype) @@ -26132,8 +26125,8 @@ def write(self, oprot): if self.part_vals is not None: oprot.writeFieldBegin('part_vals', TType.LIST, 3) oprot.writeListBegin(TType.STRING, len(self.part_vals)) - for iter1081 in self.part_vals: - oprot.writeString(iter1081) + for iter1088 in self.part_vals: + oprot.writeString(iter1088) oprot.writeListEnd() oprot.writeFieldEnd() if self.user_name is not None: @@ -26143,8 +26136,8 @@ def write(self, oprot): if self.group_names is not None: oprot.writeFieldBegin('group_names', TType.LIST, 5) oprot.writeListBegin(TType.STRING, len(self.group_names)) - for iter1082 in self.group_names: - oprot.writeString(iter1082) + for iter1089 in self.group_names: + oprot.writeString(iter1089) oprot.writeListEnd() oprot.writeFieldEnd() oprot.writeFieldStop() @@ -26573,11 +26566,11 @@ def read(self, iprot): if fid == 0: if ftype == TType.LIST: self.success = [] - (_etype1086, _size1083) = iprot.readListBegin() - for _i1087 in xrange(_size1083): - _elem1088 = Partition() - _elem1088.read(iprot) - self.success.append(_elem1088) + (_etype1093, _size1090) = iprot.readListBegin() + for _i1094 in xrange(_size1090): + _elem1095 = Partition() + _elem1095.read(iprot) + self.success.append(_elem1095) iprot.readListEnd() else: iprot.skip(ftype) @@ -26606,8 +26599,8 @@ def write(self, oprot): if self.success is not None: oprot.writeFieldBegin('success', TType.LIST, 0) oprot.writeListBegin(TType.STRUCT, len(self.success)) - for iter1089 in self.success: - iter1089.write(oprot) + for iter1096 in self.success: + iter1096.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() if self.o1 is not None: @@ -26701,10 +26694,10 @@ def read(self, iprot): elif fid == 5: if ftype == TType.LIST: self.group_names = [] - (_etype1093, _size1090) = iprot.readListBegin() - for _i1094 in xrange(_size1090): - _elem1095 = iprot.readString() - self.group_names.append(_elem1095) + (_etype1100, _size1097) = iprot.readListBegin() + for _i1101 in xrange(_size1097): + _elem1102 = iprot.readString() + self.group_names.append(_elem1102) iprot.readListEnd() else: iprot.skip(ftype) @@ -26737,8 +26730,8 @@ def write(self, oprot): if self.group_names is not None: oprot.writeFieldBegin('group_names', TType.LIST, 5) oprot.writeListBegin(TType.STRING, len(self.group_names)) - for iter1096 in self.group_names: - oprot.writeString(iter1096) + for iter1103 in self.group_names: + oprot.writeString(iter1103) oprot.writeListEnd() oprot.writeFieldEnd() oprot.writeFieldStop() @@ -26799,11 +26792,11 @@ def read(self, iprot): if fid == 0: if ftype == TType.LIST: self.success = [] - (_etype1100, _size1097) = iprot.readListBegin() - for _i1101 in xrange(_size1097): - _elem1102 = Partition() - _elem1102.read(iprot) - self.success.append(_elem1102) + (_etype1107, _size1104) = iprot.readListBegin() + for _i1108 in xrange(_size1104): + _elem1109 = Partition() + _elem1109.read(iprot) + self.success.append(_elem1109) iprot.readListEnd() else: iprot.skip(ftype) @@ -26832,8 +26825,8 @@ def write(self, oprot): if self.success is not None: oprot.writeFieldBegin('success', TType.LIST, 0) oprot.writeListBegin(TType.STRUCT, len(self.success)) - for iter1103 in self.success: - iter1103.write(oprot) + for iter1110 in self.success: + iter1110.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() if self.o1 is not None: @@ -26991,11 +26984,11 @@ def read(self, iprot): if fid == 0: if ftype == TType.LIST: self.success = [] - (_etype1107, _size1104) = iprot.readListBegin() - for _i1108 in xrange(_size1104): - _elem1109 = PartitionSpec() - _elem1109.read(iprot) - self.success.append(_elem1109) + (_etype1114, _size1111) = iprot.readListBegin() + for _i1115 in xrange(_size1111): + _elem1116 = PartitionSpec() + _elem1116.read(iprot) + self.success.append(_elem1116) iprot.readListEnd() else: iprot.skip(ftype) @@ -27024,8 +27017,8 @@ def write(self, oprot): if self.success is not None: oprot.writeFieldBegin('success', TType.LIST, 0) oprot.writeListBegin(TType.STRUCT, len(self.success)) - for iter1110 in self.success: - iter1110.write(oprot) + for iter1117 in self.success: + iter1117.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() if self.o1 is not None: @@ -27183,10 +27176,10 @@ def read(self, iprot): if fid == 0: if ftype == TType.LIST: self.success = [] - (_etype1114, _size1111) = iprot.readListBegin() - for _i1115 in xrange(_size1111): - _elem1116 = iprot.readString() - self.success.append(_elem1116) + (_etype1121, _size1118) = iprot.readListBegin() + for _i1122 in xrange(_size1118): + _elem1123 = iprot.readString() + self.success.append(_elem1123) iprot.readListEnd() else: iprot.skip(ftype) @@ -27215,8 +27208,8 @@ def write(self, oprot): if self.success is not None: oprot.writeFieldBegin('success', TType.LIST, 0) oprot.writeListBegin(TType.STRING, len(self.success)) - for iter1117 in self.success: - oprot.writeString(iter1117) + for iter1124 in self.success: + oprot.writeString(iter1124) oprot.writeListEnd() oprot.writeFieldEnd() if self.o1 is not None: @@ -27456,10 +27449,10 @@ def read(self, iprot): elif fid == 3: if ftype == TType.LIST: self.part_vals = [] - (_etype1121, _size1118) = iprot.readListBegin() - for _i1122 in xrange(_size1118): - _elem1123 = iprot.readString() - self.part_vals.append(_elem1123) + (_etype1128, _size1125) = iprot.readListBegin() + for _i1129 in xrange(_size1125): + _elem1130 = iprot.readString() + self.part_vals.append(_elem1130) iprot.readListEnd() else: iprot.skip(ftype) @@ -27489,8 +27482,8 @@ def write(self, oprot): if self.part_vals is not None: oprot.writeFieldBegin('part_vals', TType.LIST, 3) oprot.writeListBegin(TType.STRING, len(self.part_vals)) - for iter1124 in self.part_vals: - oprot.writeString(iter1124) + for iter1131 in self.part_vals: + oprot.writeString(iter1131) oprot.writeListEnd() oprot.writeFieldEnd() if self.max_parts is not None: @@ -27554,11 +27547,11 @@ def read(self, iprot): if fid == 0: if ftype == TType.LIST: self.success = [] - (_etype1128, _size1125) = iprot.readListBegin() - for _i1129 in xrange(_size1125): - _elem1130 = Partition() - _elem1130.read(iprot) - self.success.append(_elem1130) + (_etype1135, _size1132) = iprot.readListBegin() + for _i1136 in xrange(_size1132): + _elem1137 = Partition() + _elem1137.read(iprot) + self.success.append(_elem1137) iprot.readListEnd() else: iprot.skip(ftype) @@ -27587,8 +27580,8 @@ def write(self, oprot): if self.success is not None: oprot.writeFieldBegin('success', TType.LIST, 0) oprot.writeListBegin(TType.STRUCT, len(self.success)) - for iter1131 in self.success: - iter1131.write(oprot) + for iter1138 in self.success: + iter1138.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() if self.o1 is not None: @@ -27675,10 +27668,10 @@ def read(self, iprot): elif fid == 3: if ftype == TType.LIST: self.part_vals = [] - (_etype1135, _size1132) = iprot.readListBegin() - for _i1136 in xrange(_size1132): - _elem1137 = iprot.readString() - self.part_vals.append(_elem1137) + (_etype1142, _size1139) = iprot.readListBegin() + for _i1143 in xrange(_size1139): + _elem1144 = iprot.readString() + self.part_vals.append(_elem1144) iprot.readListEnd() else: iprot.skip(ftype) @@ -27695,10 +27688,10 @@ def read(self, iprot): elif fid == 6: if ftype == TType.LIST: self.group_names = [] - (_etype1141, _size1138) = iprot.readListBegin() - for _i1142 in xrange(_size1138): - _elem1143 = iprot.readString() - self.group_names.append(_elem1143) + (_etype1148, _size1145) = iprot.readListBegin() + for _i1149 in xrange(_size1145): + _elem1150 = iprot.readString() + self.group_names.append(_elem1150) iprot.readListEnd() else: iprot.skip(ftype) @@ -27723,8 +27716,8 @@ def write(self, oprot): if self.part_vals is not None: oprot.writeFieldBegin('part_vals', TType.LIST, 3) oprot.writeListBegin(TType.STRING, len(self.part_vals)) - for iter1144 in self.part_vals: - oprot.writeString(iter1144) + for iter1151 in self.part_vals: + oprot.writeString(iter1151) oprot.writeListEnd() oprot.writeFieldEnd() if self.max_parts is not None: @@ -27738,8 +27731,8 @@ def write(self, oprot): if self.group_names is not None: oprot.writeFieldBegin('group_names', TType.LIST, 6) oprot.writeListBegin(TType.STRING, len(self.group_names)) - for iter1145 in self.group_names: - oprot.writeString(iter1145) + for iter1152 in self.group_names: + oprot.writeString(iter1152) oprot.writeListEnd() oprot.writeFieldEnd() oprot.writeFieldStop() @@ -27801,11 +27794,11 @@ def read(self, iprot): if fid == 0: if ftype == TType.LIST: self.success = [] - (_etype1149, _size1146) = iprot.readListBegin() - for _i1150 in xrange(_size1146): - _elem1151 = Partition() - _elem1151.read(iprot) - self.success.append(_elem1151) + (_etype1156, _size1153) = iprot.readListBegin() + for _i1157 in xrange(_size1153): + _elem1158 = Partition() + _elem1158.read(iprot) + self.success.append(_elem1158) iprot.readListEnd() else: iprot.skip(ftype) @@ -27834,8 +27827,8 @@ def write(self, oprot): if self.success is not None: oprot.writeFieldBegin('success', TType.LIST, 0) oprot.writeListBegin(TType.STRUCT, len(self.success)) - for iter1152 in self.success: - iter1152.write(oprot) + for iter1159 in self.success: + iter1159.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() if self.o1 is not None: @@ -27916,10 +27909,10 @@ def read(self, iprot): elif fid == 3: if ftype == TType.LIST: self.part_vals = [] - (_etype1156, _size1153) = iprot.readListBegin() - for _i1157 in xrange(_size1153): - _elem1158 = iprot.readString() - self.part_vals.append(_elem1158) + (_etype1163, _size1160) = iprot.readListBegin() + for _i1164 in xrange(_size1160): + _elem1165 = iprot.readString() + self.part_vals.append(_elem1165) iprot.readListEnd() else: iprot.skip(ftype) @@ -27949,8 +27942,8 @@ def write(self, oprot): if self.part_vals is not None: oprot.writeFieldBegin('part_vals', TType.LIST, 3) oprot.writeListBegin(TType.STRING, len(self.part_vals)) - for iter1159 in self.part_vals: - oprot.writeString(iter1159) + for iter1166 in self.part_vals: + oprot.writeString(iter1166) oprot.writeListEnd() oprot.writeFieldEnd() if self.max_parts is not None: @@ -28014,10 +28007,10 @@ def read(self, iprot): if fid == 0: if ftype == TType.LIST: self.success = [] - (_etype1163, _size1160) = iprot.readListBegin() - for _i1164 in xrange(_size1160): - _elem1165 = iprot.readString() - self.success.append(_elem1165) + (_etype1170, _size1167) = iprot.readListBegin() + for _i1171 in xrange(_size1167): + _elem1172 = iprot.readString() + self.success.append(_elem1172) iprot.readListEnd() else: iprot.skip(ftype) @@ -28046,8 +28039,8 @@ def write(self, oprot): if self.success is not None: oprot.writeFieldBegin('success', TType.LIST, 0) oprot.writeListBegin(TType.STRING, len(self.success)) - for iter1166 in self.success: - oprot.writeString(iter1166) + for iter1173 in self.success: + oprot.writeString(iter1173) oprot.writeListEnd() oprot.writeFieldEnd() if self.o1 is not None: @@ -28218,11 +28211,11 @@ def read(self, iprot): if fid == 0: if ftype == TType.LIST: self.success = [] - (_etype1170, _size1167) = iprot.readListBegin() - for _i1171 in xrange(_size1167): - _elem1172 = Partition() - _elem1172.read(iprot) - self.success.append(_elem1172) + (_etype1177, _size1174) = iprot.readListBegin() + for _i1178 in xrange(_size1174): + _elem1179 = Partition() + _elem1179.read(iprot) + self.success.append(_elem1179) iprot.readListEnd() else: iprot.skip(ftype) @@ -28251,8 +28244,8 @@ def write(self, oprot): if self.success is not None: oprot.writeFieldBegin('success', TType.LIST, 0) oprot.writeListBegin(TType.STRUCT, len(self.success)) - for iter1173 in self.success: - iter1173.write(oprot) + for iter1180 in self.success: + iter1180.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() if self.o1 is not None: @@ -28423,11 +28416,11 @@ def read(self, iprot): if fid == 0: if ftype == TType.LIST: self.success = [] - (_etype1177, _size1174) = iprot.readListBegin() - for _i1178 in xrange(_size1174): - _elem1179 = PartitionSpec() - _elem1179.read(iprot) - self.success.append(_elem1179) + (_etype1184, _size1181) = iprot.readListBegin() + for _i1185 in xrange(_size1181): + _elem1186 = PartitionSpec() + _elem1186.read(iprot) + self.success.append(_elem1186) iprot.readListEnd() else: iprot.skip(ftype) @@ -28456,8 +28449,8 @@ def write(self, oprot): if self.success is not None: oprot.writeFieldBegin('success', TType.LIST, 0) oprot.writeListBegin(TType.STRUCT, len(self.success)) - for iter1180 in self.success: - iter1180.write(oprot) + for iter1187 in self.success: + iter1187.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() if self.o1 is not None: @@ -28877,10 +28870,10 @@ def read(self, iprot): elif fid == 3: if ftype == TType.LIST: self.names = [] - (_etype1184, _size1181) = iprot.readListBegin() - for _i1185 in xrange(_size1181): - _elem1186 = iprot.readString() - self.names.append(_elem1186) + (_etype1191, _size1188) = iprot.readListBegin() + for _i1192 in xrange(_size1188): + _elem1193 = iprot.readString() + self.names.append(_elem1193) iprot.readListEnd() else: iprot.skip(ftype) @@ -28905,8 +28898,8 @@ def write(self, oprot): if self.names is not None: oprot.writeFieldBegin('names', TType.LIST, 3) oprot.writeListBegin(TType.STRING, len(self.names)) - for iter1187 in self.names: - oprot.writeString(iter1187) + for iter1194 in self.names: + oprot.writeString(iter1194) oprot.writeListEnd() oprot.writeFieldEnd() oprot.writeFieldStop() @@ -28965,11 +28958,11 @@ def read(self, iprot): if fid == 0: if ftype == TType.LIST: self.success = [] - (_etype1191, _size1188) = iprot.readListBegin() - for _i1192 in xrange(_size1188): - _elem1193 = Partition() - _elem1193.read(iprot) - self.success.append(_elem1193) + (_etype1198, _size1195) = iprot.readListBegin() + for _i1199 in xrange(_size1195): + _elem1200 = Partition() + _elem1200.read(iprot) + self.success.append(_elem1200) iprot.readListEnd() else: iprot.skip(ftype) @@ -28998,8 +28991,8 @@ def write(self, oprot): if self.success is not None: oprot.writeFieldBegin('success', TType.LIST, 0) oprot.writeListBegin(TType.STRUCT, len(self.success)) - for iter1194 in self.success: - iter1194.write(oprot) + for iter1201 in self.success: + iter1201.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() if self.o1 is not None: @@ -29249,11 +29242,11 @@ def read(self, iprot): elif fid == 3: if ftype == TType.LIST: self.new_parts = [] - (_etype1198, _size1195) = iprot.readListBegin() - for _i1199 in xrange(_size1195): - _elem1200 = Partition() - _elem1200.read(iprot) - self.new_parts.append(_elem1200) + (_etype1205, _size1202) = iprot.readListBegin() + for _i1206 in xrange(_size1202): + _elem1207 = Partition() + _elem1207.read(iprot) + self.new_parts.append(_elem1207) iprot.readListEnd() else: iprot.skip(ftype) @@ -29278,8 +29271,8 @@ def write(self, oprot): if self.new_parts is not None: oprot.writeFieldBegin('new_parts', TType.LIST, 3) oprot.writeListBegin(TType.STRUCT, len(self.new_parts)) - for iter1201 in self.new_parts: - iter1201.write(oprot) + for iter1208 in self.new_parts: + iter1208.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() oprot.writeFieldStop() @@ -29390,25 +29383,16 @@ def __ne__(self, other): class alter_partitions_with_environment_context_args: """ Attributes: - - db_name - - tbl_name - - new_parts - - environment_context + - req """ thrift_spec = ( None, # 0 - (1, TType.STRING, 'db_name', None, None, ), # 1 - (2, TType.STRING, 'tbl_name', None, None, ), # 2 - (3, TType.LIST, 'new_parts', (TType.STRUCT,(Partition, Partition.thrift_spec)), None, ), # 3 - (4, TType.STRUCT, 'environment_context', (EnvironmentContext, EnvironmentContext.thrift_spec), None, ), # 4 + (1, TType.STRUCT, 'req', (AlterPartitionsRequest, AlterPartitionsRequest.thrift_spec), None, ), # 1 ) - def __init__(self, db_name=None, tbl_name=None, new_parts=None, environment_context=None,): - self.db_name = db_name - self.tbl_name = tbl_name - self.new_parts = new_parts - self.environment_context = environment_context + def __init__(self, req=None,): + self.req = req def read(self, iprot): if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: @@ -29420,30 +29404,9 @@ def read(self, iprot): if ftype == TType.STOP: break if fid == 1: - if ftype == TType.STRING: - self.db_name = iprot.readString() - else: - iprot.skip(ftype) - elif fid == 2: - if ftype == TType.STRING: - self.tbl_name = iprot.readString() - else: - iprot.skip(ftype) - elif fid == 3: - if ftype == TType.LIST: - self.new_parts = [] - (_etype1205, _size1202) = iprot.readListBegin() - for _i1206 in xrange(_size1202): - _elem1207 = Partition() - _elem1207.read(iprot) - self.new_parts.append(_elem1207) - iprot.readListEnd() - else: - iprot.skip(ftype) - elif fid == 4: if ftype == TType.STRUCT: - self.environment_context = EnvironmentContext() - self.environment_context.read(iprot) + self.req = AlterPartitionsRequest() + self.req.read(iprot) else: iprot.skip(ftype) else: @@ -29456,24 +29419,9 @@ def write(self, oprot): oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))) return oprot.writeStructBegin('alter_partitions_with_environment_context_args') - if self.db_name is not None: - oprot.writeFieldBegin('db_name', TType.STRING, 1) - oprot.writeString(self.db_name) - oprot.writeFieldEnd() - if self.tbl_name is not None: - oprot.writeFieldBegin('tbl_name', TType.STRING, 2) - oprot.writeString(self.tbl_name) - oprot.writeFieldEnd() - if self.new_parts is not None: - oprot.writeFieldBegin('new_parts', TType.LIST, 3) - oprot.writeListBegin(TType.STRUCT, len(self.new_parts)) - for iter1208 in self.new_parts: - iter1208.write(oprot) - oprot.writeListEnd() - oprot.writeFieldEnd() - if self.environment_context is not None: - oprot.writeFieldBegin('environment_context', TType.STRUCT, 4) - self.environment_context.write(oprot) + if self.req is not None: + oprot.writeFieldBegin('req', TType.STRUCT, 1) + self.req.write(oprot) oprot.writeFieldEnd() oprot.writeFieldStop() oprot.writeStructEnd() @@ -29484,10 +29432,7 @@ def validate(self): def __hash__(self): value = 17 - value = (value * 31) ^ hash(self.db_name) - value = (value * 31) ^ hash(self.tbl_name) - value = (value * 31) ^ hash(self.new_parts) - value = (value * 31) ^ hash(self.environment_context) + value = (value * 31) ^ hash(self.req) return value def __repr__(self): @@ -29504,17 +29449,19 @@ def __ne__(self, other): class alter_partitions_with_environment_context_result: """ Attributes: + - success - o1 - o2 """ thrift_spec = ( - None, # 0 + (0, TType.STRUCT, 'success', (AlterPartitionsResponse, AlterPartitionsResponse.thrift_spec), None, ), # 0 (1, TType.STRUCT, 'o1', (InvalidOperationException, InvalidOperationException.thrift_spec), None, ), # 1 (2, TType.STRUCT, 'o2', (MetaException, MetaException.thrift_spec), None, ), # 2 ) - def __init__(self, o1=None, o2=None,): + def __init__(self, success=None, o1=None, o2=None,): + self.success = success self.o1 = o1 self.o2 = o2 @@ -29527,7 +29474,13 @@ def read(self, iprot): (fname, ftype, fid) = iprot.readFieldBegin() if ftype == TType.STOP: break - if fid == 1: + if fid == 0: + if ftype == TType.STRUCT: + self.success = AlterPartitionsResponse() + self.success.read(iprot) + else: + iprot.skip(ftype) + elif fid == 1: if ftype == TType.STRUCT: self.o1 = InvalidOperationException() self.o1.read(iprot) @@ -29549,6 +29502,10 @@ def write(self, oprot): oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))) return oprot.writeStructBegin('alter_partitions_with_environment_context_result') + if self.success is not None: + oprot.writeFieldBegin('success', TType.STRUCT, 0) + self.success.write(oprot) + oprot.writeFieldEnd() if self.o1 is not None: oprot.writeFieldBegin('o1', TType.STRUCT, 1) self.o1.write(oprot) @@ -29566,6 +29523,7 @@ def validate(self): def __hash__(self): value = 17 + value = (value * 31) ^ hash(self.success) value = (value * 31) ^ hash(self.o1) value = (value * 31) ^ hash(self.o2) return value diff --git standalone-metastore/src/gen/thrift/gen-py/hive_metastore/ttypes.py standalone-metastore/src/gen/thrift/gen-py/hive_metastore/ttypes.py index 11affe375b..ccca4e97ad 100644 --- standalone-metastore/src/gen/thrift/gen-py/hive_metastore/ttypes.py +++ standalone-metastore/src/gen/thrift/gen-py/hive_metastore/ttypes.py @@ -305,6 +305,23 @@ class SchemaVersionState: "DELETED": 8, } +class IsolationLevelCompliance: + YES = 1 + NO = 2 + UNKNOWN = 3 + + _VALUES_TO_NAMES = { + 1: "YES", + 2: "NO", + 3: "UNKNOWN", + } + + _NAMES_TO_VALUES = { + "YES": 1, + "NO": 2, + "UNKNOWN": 3, + } + class FunctionType: JAVA = 1 @@ -4550,6 +4567,9 @@ class Table: - creationMetadata - catName - ownerType + - txnId + - validWriteIdList + - isStatsCompliant """ thrift_spec = ( @@ -4572,9 +4592,12 @@ class Table: (16, TType.STRUCT, 'creationMetadata', (CreationMetadata, CreationMetadata.thrift_spec), None, ), # 16 (17, TType.STRING, 'catName', None, None, ), # 17 (18, TType.I32, 'ownerType', None, 1, ), # 18 + (19, TType.I64, 'txnId', None, -1, ), # 19 + (20, TType.STRING, 'validWriteIdList', None, None, ), # 20 + (21, TType.I32, 'isStatsCompliant', None, None, ), # 21 ) - def __init__(self, tableName=None, dbName=None, owner=None, createTime=None, lastAccessTime=None, retention=None, sd=None, partitionKeys=None, parameters=None, viewOriginalText=None, viewExpandedText=None, tableType=None, privileges=None, temporary=thrift_spec[14][4], rewriteEnabled=None, creationMetadata=None, catName=None, ownerType=thrift_spec[18][4],): + def __init__(self, tableName=None, dbName=None, owner=None, createTime=None, lastAccessTime=None, retention=None, sd=None, partitionKeys=None, parameters=None, viewOriginalText=None, viewExpandedText=None, tableType=None, privileges=None, temporary=thrift_spec[14][4], rewriteEnabled=None, creationMetadata=None, catName=None, ownerType=thrift_spec[18][4], txnId=thrift_spec[19][4], validWriteIdList=None, isStatsCompliant=None,): self.tableName = tableName self.dbName = dbName self.owner = owner @@ -4593,6 +4616,9 @@ def __init__(self, tableName=None, dbName=None, owner=None, createTime=None, las self.creationMetadata = creationMetadata self.catName = catName self.ownerType = ownerType + self.txnId = txnId + self.validWriteIdList = validWriteIdList + self.isStatsCompliant = isStatsCompliant def read(self, iprot): if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: @@ -4708,6 +4734,21 @@ def read(self, iprot): self.ownerType = iprot.readI32() else: iprot.skip(ftype) + elif fid == 19: + if ftype == TType.I64: + self.txnId = iprot.readI64() + else: + iprot.skip(ftype) + elif fid == 20: + if ftype == TType.STRING: + self.validWriteIdList = iprot.readString() + else: + iprot.skip(ftype) + elif fid == 21: + if ftype == TType.I32: + self.isStatsCompliant = iprot.readI32() + else: + iprot.skip(ftype) else: iprot.skip(ftype) iprot.readFieldEnd() @@ -4797,6 +4838,18 @@ def write(self, oprot): oprot.writeFieldBegin('ownerType', TType.I32, 18) oprot.writeI32(self.ownerType) oprot.writeFieldEnd() + if self.txnId is not None: + oprot.writeFieldBegin('txnId', TType.I64, 19) + oprot.writeI64(self.txnId) + oprot.writeFieldEnd() + if self.validWriteIdList is not None: + oprot.writeFieldBegin('validWriteIdList', TType.STRING, 20) + oprot.writeString(self.validWriteIdList) + oprot.writeFieldEnd() + if self.isStatsCompliant is not None: + oprot.writeFieldBegin('isStatsCompliant', TType.I32, 21) + oprot.writeI32(self.isStatsCompliant) + oprot.writeFieldEnd() oprot.writeFieldStop() oprot.writeStructEnd() @@ -4824,6 +4877,9 @@ def __hash__(self): value = (value * 31) ^ hash(self.creationMetadata) value = (value * 31) ^ hash(self.catName) value = (value * 31) ^ hash(self.ownerType) + value = (value * 31) ^ hash(self.txnId) + value = (value * 31) ^ hash(self.validWriteIdList) + value = (value * 31) ^ hash(self.isStatsCompliant) return value def __repr__(self): @@ -4849,6 +4905,9 @@ class Partition: - parameters - privileges - catName + - txnId + - validWriteIdList + - isStatsCompliant """ thrift_spec = ( @@ -4862,9 +4921,12 @@ class Partition: (7, TType.MAP, 'parameters', (TType.STRING,None,TType.STRING,None), None, ), # 7 (8, TType.STRUCT, 'privileges', (PrincipalPrivilegeSet, PrincipalPrivilegeSet.thrift_spec), None, ), # 8 (9, TType.STRING, 'catName', None, None, ), # 9 + (10, TType.I64, 'txnId', None, -1, ), # 10 + (11, TType.STRING, 'validWriteIdList', None, None, ), # 11 + (12, TType.I32, 'isStatsCompliant', None, None, ), # 12 ) - def __init__(self, values=None, dbName=None, tableName=None, createTime=None, lastAccessTime=None, sd=None, parameters=None, privileges=None, catName=None,): + def __init__(self, values=None, dbName=None, tableName=None, createTime=None, lastAccessTime=None, sd=None, parameters=None, privileges=None, catName=None, txnId=thrift_spec[10][4], validWriteIdList=None, isStatsCompliant=None,): self.values = values self.dbName = dbName self.tableName = tableName @@ -4874,6 +4936,9 @@ def __init__(self, values=None, dbName=None, tableName=None, createTime=None, la self.parameters = parameters self.privileges = privileges self.catName = catName + self.txnId = txnId + self.validWriteIdList = validWriteIdList + self.isStatsCompliant = isStatsCompliant def read(self, iprot): if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: @@ -4942,6 +5007,21 @@ def read(self, iprot): self.catName = iprot.readString() else: iprot.skip(ftype) + elif fid == 10: + if ftype == TType.I64: + self.txnId = iprot.readI64() + else: + iprot.skip(ftype) + elif fid == 11: + if ftype == TType.STRING: + self.validWriteIdList = iprot.readString() + else: + iprot.skip(ftype) + elif fid == 12: + if ftype == TType.I32: + self.isStatsCompliant = iprot.readI32() + else: + iprot.skip(ftype) else: iprot.skip(ftype) iprot.readFieldEnd() @@ -4995,6 +5075,18 @@ def write(self, oprot): oprot.writeFieldBegin('catName', TType.STRING, 9) oprot.writeString(self.catName) oprot.writeFieldEnd() + if self.txnId is not None: + oprot.writeFieldBegin('txnId', TType.I64, 10) + oprot.writeI64(self.txnId) + oprot.writeFieldEnd() + if self.validWriteIdList is not None: + oprot.writeFieldBegin('validWriteIdList', TType.STRING, 11) + oprot.writeString(self.validWriteIdList) + oprot.writeFieldEnd() + if self.isStatsCompliant is not None: + oprot.writeFieldBegin('isStatsCompliant', TType.I32, 12) + oprot.writeI32(self.isStatsCompliant) + oprot.writeFieldEnd() oprot.writeFieldStop() oprot.writeStructEnd() @@ -5013,6 +5105,9 @@ def __hash__(self): value = (value * 31) ^ hash(self.parameters) value = (value * 31) ^ hash(self.privileges) value = (value * 31) ^ hash(self.catName) + value = (value * 31) ^ hash(self.txnId) + value = (value * 31) ^ hash(self.validWriteIdList) + value = (value * 31) ^ hash(self.isStatsCompliant) return value def __repr__(self): @@ -5346,6 +5441,9 @@ class PartitionSpec: - sharedSDPartitionSpec - partitionList - catName + - txnId + - validWriteIdList + - isStatsCompliant """ thrift_spec = ( @@ -5356,15 +5454,21 @@ class PartitionSpec: (4, TType.STRUCT, 'sharedSDPartitionSpec', (PartitionSpecWithSharedSD, PartitionSpecWithSharedSD.thrift_spec), None, ), # 4 (5, TType.STRUCT, 'partitionList', (PartitionListComposingSpec, PartitionListComposingSpec.thrift_spec), None, ), # 5 (6, TType.STRING, 'catName', None, None, ), # 6 + (7, TType.I64, 'txnId', None, -1, ), # 7 + (8, TType.STRING, 'validWriteIdList', None, None, ), # 8 + (9, TType.I32, 'isStatsCompliant', None, None, ), # 9 ) - def __init__(self, dbName=None, tableName=None, rootPath=None, sharedSDPartitionSpec=None, partitionList=None, catName=None,): + def __init__(self, dbName=None, tableName=None, rootPath=None, sharedSDPartitionSpec=None, partitionList=None, catName=None, txnId=thrift_spec[7][4], validWriteIdList=None, isStatsCompliant=None,): self.dbName = dbName self.tableName = tableName self.rootPath = rootPath self.sharedSDPartitionSpec = sharedSDPartitionSpec self.partitionList = partitionList self.catName = catName + self.txnId = txnId + self.validWriteIdList = validWriteIdList + self.isStatsCompliant = isStatsCompliant def read(self, iprot): if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: @@ -5407,6 +5511,21 @@ def read(self, iprot): self.catName = iprot.readString() else: iprot.skip(ftype) + elif fid == 7: + if ftype == TType.I64: + self.txnId = iprot.readI64() + else: + iprot.skip(ftype) + elif fid == 8: + if ftype == TType.STRING: + self.validWriteIdList = iprot.readString() + else: + iprot.skip(ftype) + elif fid == 9: + if ftype == TType.I32: + self.isStatsCompliant = iprot.readI32() + else: + iprot.skip(ftype) else: iprot.skip(ftype) iprot.readFieldEnd() @@ -5441,6 +5560,18 @@ def write(self, oprot): oprot.writeFieldBegin('catName', TType.STRING, 6) oprot.writeString(self.catName) oprot.writeFieldEnd() + if self.txnId is not None: + oprot.writeFieldBegin('txnId', TType.I64, 7) + oprot.writeI64(self.txnId) + oprot.writeFieldEnd() + if self.validWriteIdList is not None: + oprot.writeFieldBegin('validWriteIdList', TType.STRING, 8) + oprot.writeString(self.validWriteIdList) + oprot.writeFieldEnd() + if self.isStatsCompliant is not None: + oprot.writeFieldBegin('isStatsCompliant', TType.I32, 9) + oprot.writeI32(self.isStatsCompliant) + oprot.writeFieldEnd() oprot.writeFieldStop() oprot.writeStructEnd() @@ -5456,6 +5587,9 @@ def __hash__(self): value = (value * 31) ^ hash(self.sharedSDPartitionSpec) value = (value * 31) ^ hash(self.partitionList) value = (value * 31) ^ hash(self.catName) + value = (value * 31) ^ hash(self.txnId) + value = (value * 31) ^ hash(self.validWriteIdList) + value = (value * 31) ^ hash(self.isStatsCompliant) return value def __repr__(self): @@ -6841,17 +6975,26 @@ class ColumnStatistics: Attributes: - statsDesc - statsObj + - txnId + - validWriteIdList + - isStatsCompliant """ thrift_spec = ( None, # 0 (1, TType.STRUCT, 'statsDesc', (ColumnStatisticsDesc, ColumnStatisticsDesc.thrift_spec), None, ), # 1 (2, TType.LIST, 'statsObj', (TType.STRUCT,(ColumnStatisticsObj, ColumnStatisticsObj.thrift_spec)), None, ), # 2 + (3, TType.I64, 'txnId', None, -1, ), # 3 + (4, TType.STRING, 'validWriteIdList', None, None, ), # 4 + (5, TType.I32, 'isStatsCompliant', None, None, ), # 5 ) - def __init__(self, statsDesc=None, statsObj=None,): + def __init__(self, statsDesc=None, statsObj=None, txnId=thrift_spec[3][4], validWriteIdList=None, isStatsCompliant=None,): self.statsDesc = statsDesc self.statsObj = statsObj + self.txnId = txnId + self.validWriteIdList = validWriteIdList + self.isStatsCompliant = isStatsCompliant def read(self, iprot): if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: @@ -6879,6 +7022,21 @@ def read(self, iprot): iprot.readListEnd() else: iprot.skip(ftype) + elif fid == 3: + if ftype == TType.I64: + self.txnId = iprot.readI64() + else: + iprot.skip(ftype) + elif fid == 4: + if ftype == TType.STRING: + self.validWriteIdList = iprot.readString() + else: + iprot.skip(ftype) + elif fid == 5: + if ftype == TType.I32: + self.isStatsCompliant = iprot.readI32() + else: + iprot.skip(ftype) else: iprot.skip(ftype) iprot.readFieldEnd() @@ -6900,6 +7058,18 @@ def write(self, oprot): iter243.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() + if self.txnId is not None: + oprot.writeFieldBegin('txnId', TType.I64, 3) + oprot.writeI64(self.txnId) + oprot.writeFieldEnd() + if self.validWriteIdList is not None: + oprot.writeFieldBegin('validWriteIdList', TType.STRING, 4) + oprot.writeString(self.validWriteIdList) + oprot.writeFieldEnd() + if self.isStatsCompliant is not None: + oprot.writeFieldBegin('isStatsCompliant', TType.I32, 5) + oprot.writeI32(self.isStatsCompliant) + oprot.writeFieldEnd() oprot.writeFieldStop() oprot.writeStructEnd() @@ -6915,6 +7085,9 @@ def __hash__(self): value = 17 value = (value * 31) ^ hash(self.statsDesc) value = (value * 31) ^ hash(self.statsObj) + value = (value * 31) ^ hash(self.txnId) + value = (value * 31) ^ hash(self.validWriteIdList) + value = (value * 31) ^ hash(self.isStatsCompliant) return value def __repr__(self): @@ -6933,17 +7106,20 @@ class AggrStats: Attributes: - colStats - partsFound + - isStatsCompliant """ thrift_spec = ( None, # 0 (1, TType.LIST, 'colStats', (TType.STRUCT,(ColumnStatisticsObj, ColumnStatisticsObj.thrift_spec)), None, ), # 1 (2, TType.I64, 'partsFound', None, None, ), # 2 + (3, TType.I32, 'isStatsCompliant', None, None, ), # 3 ) - def __init__(self, colStats=None, partsFound=None,): + def __init__(self, colStats=None, partsFound=None, isStatsCompliant=None,): self.colStats = colStats self.partsFound = partsFound + self.isStatsCompliant = isStatsCompliant def read(self, iprot): if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: @@ -6970,6 +7146,11 @@ def read(self, iprot): self.partsFound = iprot.readI64() else: iprot.skip(ftype) + elif fid == 3: + if ftype == TType.I32: + self.isStatsCompliant = iprot.readI32() + else: + iprot.skip(ftype) else: iprot.skip(ftype) iprot.readFieldEnd() @@ -6991,6 +7172,10 @@ def write(self, oprot): oprot.writeFieldBegin('partsFound', TType.I64, 2) oprot.writeI64(self.partsFound) oprot.writeFieldEnd() + if self.isStatsCompliant is not None: + oprot.writeFieldBegin('isStatsCompliant', TType.I32, 3) + oprot.writeI32(self.isStatsCompliant) + oprot.writeFieldEnd() oprot.writeFieldStop() oprot.writeStructEnd() @@ -7006,6 +7191,7 @@ def __hash__(self): value = 17 value = (value * 31) ^ hash(self.colStats) value = (value * 31) ^ hash(self.partsFound) + value = (value * 31) ^ hash(self.isStatsCompliant) return value def __repr__(self): @@ -7024,17 +7210,23 @@ class SetPartitionsStatsRequest: Attributes: - colStats - needMerge + - txnId + - validWriteIdList """ thrift_spec = ( None, # 0 (1, TType.LIST, 'colStats', (TType.STRUCT,(ColumnStatistics, ColumnStatistics.thrift_spec)), None, ), # 1 (2, TType.BOOL, 'needMerge', None, None, ), # 2 + (3, TType.I64, 'txnId', None, -1, ), # 3 + (4, TType.STRING, 'validWriteIdList', None, None, ), # 4 ) - def __init__(self, colStats=None, needMerge=None,): + def __init__(self, colStats=None, needMerge=None, txnId=thrift_spec[3][4], validWriteIdList=None,): self.colStats = colStats self.needMerge = needMerge + self.txnId = txnId + self.validWriteIdList = validWriteIdList def read(self, iprot): if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: @@ -7061,6 +7253,16 @@ def read(self, iprot): self.needMerge = iprot.readBool() else: iprot.skip(ftype) + elif fid == 3: + if ftype == TType.I64: + self.txnId = iprot.readI64() + else: + iprot.skip(ftype) + elif fid == 4: + if ftype == TType.STRING: + self.validWriteIdList = iprot.readString() + else: + iprot.skip(ftype) else: iprot.skip(ftype) iprot.readFieldEnd() @@ -7082,6 +7284,14 @@ def write(self, oprot): oprot.writeFieldBegin('needMerge', TType.BOOL, 2) oprot.writeBool(self.needMerge) oprot.writeFieldEnd() + if self.txnId is not None: + oprot.writeFieldBegin('txnId', TType.I64, 3) + oprot.writeI64(self.txnId) + oprot.writeFieldEnd() + if self.validWriteIdList is not None: + oprot.writeFieldBegin('validWriteIdList', TType.STRING, 4) + oprot.writeString(self.validWriteIdList) + oprot.writeFieldEnd() oprot.writeFieldStop() oprot.writeStructEnd() @@ -7095,6 +7305,8 @@ def __hash__(self): value = 17 value = (value * 31) ^ hash(self.colStats) value = (value * 31) ^ hash(self.needMerge) + value = (value * 31) ^ hash(self.txnId) + value = (value * 31) ^ hash(self.validWriteIdList) return value def __repr__(self): @@ -9133,15 +9345,18 @@ class TableStatsResult: """ Attributes: - tableStats + - isStatsCompliant """ thrift_spec = ( None, # 0 (1, TType.LIST, 'tableStats', (TType.STRUCT,(ColumnStatisticsObj, ColumnStatisticsObj.thrift_spec)), None, ), # 1 + (2, TType.I32, 'isStatsCompliant', None, None, ), # 2 ) - def __init__(self, tableStats=None,): + def __init__(self, tableStats=None, isStatsCompliant=None,): self.tableStats = tableStats + self.isStatsCompliant = isStatsCompliant def read(self, iprot): if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: @@ -9163,6 +9378,11 @@ def read(self, iprot): iprot.readListEnd() else: iprot.skip(ftype) + elif fid == 2: + if ftype == TType.I32: + self.isStatsCompliant = iprot.readI32() + else: + iprot.skip(ftype) else: iprot.skip(ftype) iprot.readFieldEnd() @@ -9180,6 +9400,10 @@ def write(self, oprot): iter380.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() + if self.isStatsCompliant is not None: + oprot.writeFieldBegin('isStatsCompliant', TType.I32, 2) + oprot.writeI32(self.isStatsCompliant) + oprot.writeFieldEnd() oprot.writeFieldStop() oprot.writeStructEnd() @@ -9192,6 +9416,7 @@ def validate(self): def __hash__(self): value = 17 value = (value * 31) ^ hash(self.tableStats) + value = (value * 31) ^ hash(self.isStatsCompliant) return value def __repr__(self): @@ -9209,15 +9434,18 @@ class PartitionsStatsResult: """ Attributes: - partStats + - isStatsCompliant """ thrift_spec = ( None, # 0 (1, TType.MAP, 'partStats', (TType.STRING,None,TType.LIST,(TType.STRUCT,(ColumnStatisticsObj, ColumnStatisticsObj.thrift_spec))), None, ), # 1 + (2, TType.I32, 'isStatsCompliant', None, None, ), # 2 ) - def __init__(self, partStats=None,): + def __init__(self, partStats=None, isStatsCompliant=None,): self.partStats = partStats + self.isStatsCompliant = isStatsCompliant def read(self, iprot): if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: @@ -9245,6 +9473,11 @@ def read(self, iprot): iprot.readMapEnd() else: iprot.skip(ftype) + elif fid == 2: + if ftype == TType.I32: + self.isStatsCompliant = iprot.readI32() + else: + iprot.skip(ftype) else: iprot.skip(ftype) iprot.readFieldEnd() @@ -9266,6 +9499,10 @@ def write(self, oprot): oprot.writeListEnd() oprot.writeMapEnd() oprot.writeFieldEnd() + if self.isStatsCompliant is not None: + oprot.writeFieldBegin('isStatsCompliant', TType.I32, 2) + oprot.writeI32(self.isStatsCompliant) + oprot.writeFieldEnd() oprot.writeFieldStop() oprot.writeStructEnd() @@ -9278,6 +9515,7 @@ def validate(self): def __hash__(self): value = 17 value = (value * 31) ^ hash(self.partStats) + value = (value * 31) ^ hash(self.isStatsCompliant) return value def __repr__(self): @@ -9298,6 +9536,8 @@ class TableStatsRequest: - tblName - colNames - catName + - txnId + - validWriteIdList """ thrift_spec = ( @@ -9306,13 +9546,17 @@ class TableStatsRequest: (2, TType.STRING, 'tblName', None, None, ), # 2 (3, TType.LIST, 'colNames', (TType.STRING,None), None, ), # 3 (4, TType.STRING, 'catName', None, None, ), # 4 + (5, TType.I64, 'txnId', None, -1, ), # 5 + (6, TType.STRING, 'validWriteIdList', None, None, ), # 6 ) - def __init__(self, dbName=None, tblName=None, colNames=None, catName=None,): + def __init__(self, dbName=None, tblName=None, colNames=None, catName=None, txnId=thrift_spec[5][4], validWriteIdList=None,): self.dbName = dbName self.tblName = tblName self.colNames = colNames self.catName = catName + self.txnId = txnId + self.validWriteIdList = validWriteIdList def read(self, iprot): if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: @@ -9348,6 +9592,16 @@ def read(self, iprot): self.catName = iprot.readString() else: iprot.skip(ftype) + elif fid == 5: + if ftype == TType.I64: + self.txnId = iprot.readI64() + else: + iprot.skip(ftype) + elif fid == 6: + if ftype == TType.STRING: + self.validWriteIdList = iprot.readString() + else: + iprot.skip(ftype) else: iprot.skip(ftype) iprot.readFieldEnd() @@ -9377,6 +9631,14 @@ def write(self, oprot): oprot.writeFieldBegin('catName', TType.STRING, 4) oprot.writeString(self.catName) oprot.writeFieldEnd() + if self.txnId is not None: + oprot.writeFieldBegin('txnId', TType.I64, 5) + oprot.writeI64(self.txnId) + oprot.writeFieldEnd() + if self.validWriteIdList is not None: + oprot.writeFieldBegin('validWriteIdList', TType.STRING, 6) + oprot.writeString(self.validWriteIdList) + oprot.writeFieldEnd() oprot.writeFieldStop() oprot.writeStructEnd() @@ -9396,6 +9658,8 @@ def __hash__(self): value = (value * 31) ^ hash(self.tblName) value = (value * 31) ^ hash(self.colNames) value = (value * 31) ^ hash(self.catName) + value = (value * 31) ^ hash(self.txnId) + value = (value * 31) ^ hash(self.validWriteIdList) return value def __repr__(self): @@ -9417,6 +9681,8 @@ class PartitionsStatsRequest: - colNames - partNames - catName + - txnId + - validWriteIdList """ thrift_spec = ( @@ -9426,14 +9692,18 @@ class PartitionsStatsRequest: (3, TType.LIST, 'colNames', (TType.STRING,None), None, ), # 3 (4, TType.LIST, 'partNames', (TType.STRING,None), None, ), # 4 (5, TType.STRING, 'catName', None, None, ), # 5 + (6, TType.I64, 'txnId', None, -1, ), # 6 + (7, TType.STRING, 'validWriteIdList', None, None, ), # 7 ) - def __init__(self, dbName=None, tblName=None, colNames=None, partNames=None, catName=None,): + def __init__(self, dbName=None, tblName=None, colNames=None, partNames=None, catName=None, txnId=thrift_spec[6][4], validWriteIdList=None,): self.dbName = dbName self.tblName = tblName self.colNames = colNames self.partNames = partNames self.catName = catName + self.txnId = txnId + self.validWriteIdList = validWriteIdList def read(self, iprot): if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: @@ -9479,6 +9749,16 @@ def read(self, iprot): self.catName = iprot.readString() else: iprot.skip(ftype) + elif fid == 6: + if ftype == TType.I64: + self.txnId = iprot.readI64() + else: + iprot.skip(ftype) + elif fid == 7: + if ftype == TType.STRING: + self.validWriteIdList = iprot.readString() + else: + iprot.skip(ftype) else: iprot.skip(ftype) iprot.readFieldEnd() @@ -9515,6 +9795,14 @@ def write(self, oprot): oprot.writeFieldBegin('catName', TType.STRING, 5) oprot.writeString(self.catName) oprot.writeFieldEnd() + if self.txnId is not None: + oprot.writeFieldBegin('txnId', TType.I64, 6) + oprot.writeI64(self.txnId) + oprot.writeFieldEnd() + if self.validWriteIdList is not None: + oprot.writeFieldBegin('validWriteIdList', TType.STRING, 7) + oprot.writeString(self.validWriteIdList) + oprot.writeFieldEnd() oprot.writeFieldStop() oprot.writeStructEnd() @@ -9537,6 +9825,8 @@ def __hash__(self): value = (value * 31) ^ hash(self.colNames) value = (value * 31) ^ hash(self.partNames) value = (value * 31) ^ hash(self.catName) + value = (value * 31) ^ hash(self.txnId) + value = (value * 31) ^ hash(self.validWriteIdList) return value def __repr__(self): @@ -9554,15 +9844,18 @@ class AddPartitionsResult: """ Attributes: - partitions + - isStatsCompliant """ thrift_spec = ( None, # 0 (1, TType.LIST, 'partitions', (TType.STRUCT,(Partition, Partition.thrift_spec)), None, ), # 1 + (2, TType.I32, 'isStatsCompliant', None, None, ), # 2 ) - def __init__(self, partitions=None,): + def __init__(self, partitions=None, isStatsCompliant=None,): self.partitions = partitions + self.isStatsCompliant = isStatsCompliant def read(self, iprot): if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: @@ -9584,6 +9877,11 @@ def read(self, iprot): iprot.readListEnd() else: iprot.skip(ftype) + elif fid == 2: + if ftype == TType.I32: + self.isStatsCompliant = iprot.readI32() + else: + iprot.skip(ftype) else: iprot.skip(ftype) iprot.readFieldEnd() @@ -9601,6 +9899,10 @@ def write(self, oprot): iter424.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() + if self.isStatsCompliant is not None: + oprot.writeFieldBegin('isStatsCompliant', TType.I32, 2) + oprot.writeI32(self.isStatsCompliant) + oprot.writeFieldEnd() oprot.writeFieldStop() oprot.writeStructEnd() @@ -9611,6 +9913,7 @@ def validate(self): def __hash__(self): value = 17 value = (value * 31) ^ hash(self.partitions) + value = (value * 31) ^ hash(self.isStatsCompliant) return value def __repr__(self): @@ -9633,6 +9936,8 @@ class AddPartitionsRequest: - ifNotExists - needResult - catName + - txnId + - validWriteIdList """ thrift_spec = ( @@ -9643,15 +9948,19 @@ class AddPartitionsRequest: (4, TType.BOOL, 'ifNotExists', None, None, ), # 4 (5, TType.BOOL, 'needResult', None, True, ), # 5 (6, TType.STRING, 'catName', None, None, ), # 6 + (7, TType.I64, 'txnId', None, -1, ), # 7 + (8, TType.STRING, 'validWriteIdList', None, None, ), # 8 ) - def __init__(self, dbName=None, tblName=None, parts=None, ifNotExists=None, needResult=thrift_spec[5][4], catName=None,): + def __init__(self, dbName=None, tblName=None, parts=None, ifNotExists=None, needResult=thrift_spec[5][4], catName=None, txnId=thrift_spec[7][4], validWriteIdList=None,): self.dbName = dbName self.tblName = tblName self.parts = parts self.ifNotExists = ifNotExists self.needResult = needResult self.catName = catName + self.txnId = txnId + self.validWriteIdList = validWriteIdList def read(self, iprot): if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: @@ -9698,6 +10007,16 @@ def read(self, iprot): self.catName = iprot.readString() else: iprot.skip(ftype) + elif fid == 7: + if ftype == TType.I64: + self.txnId = iprot.readI64() + else: + iprot.skip(ftype) + elif fid == 8: + if ftype == TType.STRING: + self.validWriteIdList = iprot.readString() + else: + iprot.skip(ftype) else: iprot.skip(ftype) iprot.readFieldEnd() @@ -9735,6 +10054,14 @@ def write(self, oprot): oprot.writeFieldBegin('catName', TType.STRING, 6) oprot.writeString(self.catName) oprot.writeFieldEnd() + if self.txnId is not None: + oprot.writeFieldBegin('txnId', TType.I64, 7) + oprot.writeI64(self.txnId) + oprot.writeFieldEnd() + if self.validWriteIdList is not None: + oprot.writeFieldBegin('validWriteIdList', TType.STRING, 8) + oprot.writeString(self.validWriteIdList) + oprot.writeFieldEnd() oprot.writeFieldStop() oprot.writeStructEnd() @@ -9758,6 +10085,8 @@ def __hash__(self): value = (value * 31) ^ hash(self.ifNotExists) value = (value * 31) ^ hash(self.needResult) value = (value * 31) ^ hash(self.catName) + value = (value * 31) ^ hash(self.txnId) + value = (value * 31) ^ hash(self.validWriteIdList) return value def __repr__(self): @@ -16609,6 +16938,8 @@ class GetTableRequest: - tblName - capabilities - catName + - txnId + - validWriteIdList """ thrift_spec = ( @@ -16617,13 +16948,17 @@ class GetTableRequest: (2, TType.STRING, 'tblName', None, None, ), # 2 (3, TType.STRUCT, 'capabilities', (ClientCapabilities, ClientCapabilities.thrift_spec), None, ), # 3 (4, TType.STRING, 'catName', None, None, ), # 4 + (5, TType.I64, 'txnId', None, -1, ), # 5 + (6, TType.STRING, 'validWriteIdList', None, None, ), # 6 ) - def __init__(self, dbName=None, tblName=None, capabilities=None, catName=None,): + def __init__(self, dbName=None, tblName=None, capabilities=None, catName=None, txnId=thrift_spec[5][4], validWriteIdList=None,): self.dbName = dbName self.tblName = tblName self.capabilities = capabilities self.catName = catName + self.txnId = txnId + self.validWriteIdList = validWriteIdList def read(self, iprot): if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: @@ -16655,6 +16990,16 @@ def read(self, iprot): self.catName = iprot.readString() else: iprot.skip(ftype) + elif fid == 5: + if ftype == TType.I64: + self.txnId = iprot.readI64() + else: + iprot.skip(ftype) + elif fid == 6: + if ftype == TType.STRING: + self.validWriteIdList = iprot.readString() + else: + iprot.skip(ftype) else: iprot.skip(ftype) iprot.readFieldEnd() @@ -16681,6 +17026,14 @@ def write(self, oprot): oprot.writeFieldBegin('catName', TType.STRING, 4) oprot.writeString(self.catName) oprot.writeFieldEnd() + if self.txnId is not None: + oprot.writeFieldBegin('txnId', TType.I64, 5) + oprot.writeI64(self.txnId) + oprot.writeFieldEnd() + if self.validWriteIdList is not None: + oprot.writeFieldBegin('validWriteIdList', TType.STRING, 6) + oprot.writeString(self.validWriteIdList) + oprot.writeFieldEnd() oprot.writeFieldStop() oprot.writeStructEnd() @@ -16698,6 +17051,8 @@ def __hash__(self): value = (value * 31) ^ hash(self.tblName) value = (value * 31) ^ hash(self.capabilities) value = (value * 31) ^ hash(self.catName) + value = (value * 31) ^ hash(self.txnId) + value = (value * 31) ^ hash(self.validWriteIdList) return value def __repr__(self): @@ -16715,15 +17070,18 @@ class GetTableResult: """ Attributes: - table + - isStatsCompliant """ thrift_spec = ( None, # 0 (1, TType.STRUCT, 'table', (Table, Table.thrift_spec), None, ), # 1 + (2, TType.I32, 'isStatsCompliant', None, None, ), # 2 ) - def __init__(self, table=None,): + def __init__(self, table=None, isStatsCompliant=None,): self.table = table + self.isStatsCompliant = isStatsCompliant def read(self, iprot): if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: @@ -16740,6 +17098,11 @@ def read(self, iprot): self.table.read(iprot) else: iprot.skip(ftype) + elif fid == 2: + if ftype == TType.I32: + self.isStatsCompliant = iprot.readI32() + else: + iprot.skip(ftype) else: iprot.skip(ftype) iprot.readFieldEnd() @@ -16754,6 +17117,10 @@ def write(self, oprot): oprot.writeFieldBegin('table', TType.STRUCT, 1) self.table.write(oprot) oprot.writeFieldEnd() + if self.isStatsCompliant is not None: + oprot.writeFieldBegin('isStatsCompliant', TType.I32, 2) + oprot.writeI32(self.isStatsCompliant) + oprot.writeFieldEnd() oprot.writeFieldStop() oprot.writeStructEnd() @@ -16766,6 +17133,7 @@ def validate(self): def __hash__(self): value = 17 value = (value * 31) ^ hash(self.table) + value = (value * 31) ^ hash(self.isStatsCompliant) return value def __repr__(self): @@ -21666,6 +22034,200 @@ def __eq__(self, other): def __ne__(self, other): return not (self == other) +class AlterPartitionsRequest: + """ + Attributes: + - dbName + - tableName + - partitions + - environmentContext + - txnId + - validWriteIdList + """ + + thrift_spec = ( + None, # 0 + (1, TType.STRING, 'dbName', None, None, ), # 1 + (2, TType.STRING, 'tableName', None, None, ), # 2 + (3, TType.LIST, 'partitions', (TType.STRUCT,(Partition, Partition.thrift_spec)), None, ), # 3 + (4, TType.STRUCT, 'environmentContext', (EnvironmentContext, EnvironmentContext.thrift_spec), None, ), # 4 + (5, TType.I64, 'txnId', None, -1, ), # 5 + (6, TType.STRING, 'validWriteIdList', None, None, ), # 6 + ) + + def __init__(self, dbName=None, tableName=None, partitions=None, environmentContext=None, txnId=thrift_spec[5][4], validWriteIdList=None,): + self.dbName = dbName + self.tableName = tableName + self.partitions = partitions + self.environmentContext = environmentContext + self.txnId = txnId + self.validWriteIdList = validWriteIdList + + def read(self, iprot): + if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: + fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec)) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 1: + if ftype == TType.STRING: + self.dbName = iprot.readString() + else: + iprot.skip(ftype) + elif fid == 2: + if ftype == TType.STRING: + self.tableName = iprot.readString() + else: + iprot.skip(ftype) + elif fid == 3: + if ftype == TType.LIST: + self.partitions = [] + (_etype819, _size816) = iprot.readListBegin() + for _i820 in xrange(_size816): + _elem821 = Partition() + _elem821.read(iprot) + self.partitions.append(_elem821) + iprot.readListEnd() + else: + iprot.skip(ftype) + elif fid == 4: + if ftype == TType.STRUCT: + self.environmentContext = EnvironmentContext() + self.environmentContext.read(iprot) + else: + iprot.skip(ftype) + elif fid == 5: + if ftype == TType.I64: + self.txnId = iprot.readI64() + else: + iprot.skip(ftype) + elif fid == 6: + if ftype == TType.STRING: + self.validWriteIdList = iprot.readString() + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None: + oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))) + return + oprot.writeStructBegin('AlterPartitionsRequest') + if self.dbName is not None: + oprot.writeFieldBegin('dbName', TType.STRING, 1) + oprot.writeString(self.dbName) + oprot.writeFieldEnd() + if self.tableName is not None: + oprot.writeFieldBegin('tableName', TType.STRING, 2) + oprot.writeString(self.tableName) + oprot.writeFieldEnd() + if self.partitions is not None: + oprot.writeFieldBegin('partitions', TType.LIST, 3) + oprot.writeListBegin(TType.STRUCT, len(self.partitions)) + for iter822 in self.partitions: + iter822.write(oprot) + oprot.writeListEnd() + oprot.writeFieldEnd() + if self.environmentContext is not None: + oprot.writeFieldBegin('environmentContext', TType.STRUCT, 4) + self.environmentContext.write(oprot) + oprot.writeFieldEnd() + if self.txnId is not None: + oprot.writeFieldBegin('txnId', TType.I64, 5) + oprot.writeI64(self.txnId) + oprot.writeFieldEnd() + if self.validWriteIdList is not None: + oprot.writeFieldBegin('validWriteIdList', TType.STRING, 6) + oprot.writeString(self.validWriteIdList) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + if self.dbName is None: + raise TProtocol.TProtocolException(message='Required field dbName is unset!') + if self.tableName is None: + raise TProtocol.TProtocolException(message='Required field tableName is unset!') + if self.partitions is None: + raise TProtocol.TProtocolException(message='Required field partitions is unset!') + if self.environmentContext is None: + raise TProtocol.TProtocolException(message='Required field environmentContext is unset!') + return + + + def __hash__(self): + value = 17 + value = (value * 31) ^ hash(self.dbName) + value = (value * 31) ^ hash(self.tableName) + value = (value * 31) ^ hash(self.partitions) + value = (value * 31) ^ hash(self.environmentContext) + value = (value * 31) ^ hash(self.txnId) + value = (value * 31) ^ hash(self.validWriteIdList) + return value + + def __repr__(self): + L = ['%s=%r' % (key, value) + for key, value in self.__dict__.iteritems()] + return '%s(%s)' % (self.__class__.__name__, ', '.join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + +class AlterPartitionsResponse: + + thrift_spec = ( + ) + + def read(self, iprot): + if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: + fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec)) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None: + oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))) + return + oprot.writeStructBegin('AlterPartitionsResponse') + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + + def __hash__(self): + value = 17 + return value + + def __repr__(self): + L = ['%s=%r' % (key, value) + for key, value in self.__dict__.iteritems()] + return '%s(%s)' % (self.__class__.__name__, ', '.join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + class MetaException(TException): """ Attributes: diff --git standalone-metastore/src/gen/thrift/gen-rb/hive_metastore_types.rb standalone-metastore/src/gen/thrift/gen-rb/hive_metastore_types.rb index fc640d07c1..7b5132c7f1 100644 --- standalone-metastore/src/gen/thrift/gen-rb/hive_metastore_types.rb +++ standalone-metastore/src/gen/thrift/gen-rb/hive_metastore_types.rb @@ -141,6 +141,14 @@ module SchemaVersionState VALID_VALUES = Set.new([INITIATED, START_REVIEW, CHANGES_REQUIRED, REVIEWED, ENABLED, DISABLED, ARCHIVED, DELETED]).freeze end +module IsolationLevelCompliance + YES = 1 + NO = 2 + UNKNOWN = 3 + VALUE_MAP = {1 => "YES", 2 => "NO", 3 => "UNKNOWN"} + VALID_VALUES = Set.new([YES, NO, UNKNOWN]).freeze +end + module FunctionType JAVA = 1 VALUE_MAP = {1 => "JAVA"} @@ -1062,6 +1070,9 @@ class Table CREATIONMETADATA = 16 CATNAME = 17 OWNERTYPE = 18 + TXNID = 19 + VALIDWRITEIDLIST = 20 + ISSTATSCOMPLIANT = 21 FIELDS = { TABLENAME => {:type => ::Thrift::Types::STRING, :name => 'tableName'}, @@ -1081,7 +1092,10 @@ class Table REWRITEENABLED => {:type => ::Thrift::Types::BOOL, :name => 'rewriteEnabled', :optional => true}, CREATIONMETADATA => {:type => ::Thrift::Types::STRUCT, :name => 'creationMetadata', :class => ::CreationMetadata, :optional => true}, CATNAME => {:type => ::Thrift::Types::STRING, :name => 'catName', :optional => true}, - OWNERTYPE => {:type => ::Thrift::Types::I32, :name => 'ownerType', :default => 1, :optional => true, :enum_class => ::PrincipalType} + OWNERTYPE => {:type => ::Thrift::Types::I32, :name => 'ownerType', :default => 1, :optional => true, :enum_class => ::PrincipalType}, + TXNID => {:type => ::Thrift::Types::I64, :name => 'txnId', :default => -1, :optional => true}, + VALIDWRITEIDLIST => {:type => ::Thrift::Types::STRING, :name => 'validWriteIdList', :optional => true}, + ISSTATSCOMPLIANT => {:type => ::Thrift::Types::I32, :name => 'isStatsCompliant', :optional => true, :enum_class => ::IsolationLevelCompliance} } def struct_fields; FIELDS; end @@ -1090,6 +1104,9 @@ class Table unless @ownerType.nil? || ::PrincipalType::VALID_VALUES.include?(@ownerType) raise ::Thrift::ProtocolException.new(::Thrift::ProtocolException::UNKNOWN, 'Invalid value of field ownerType!') end + unless @isStatsCompliant.nil? || ::IsolationLevelCompliance::VALID_VALUES.include?(@isStatsCompliant) + raise ::Thrift::ProtocolException.new(::Thrift::ProtocolException::UNKNOWN, 'Invalid value of field isStatsCompliant!') + end end ::Thrift::Struct.generate_accessors self @@ -1106,6 +1123,9 @@ class Partition PARAMETERS = 7 PRIVILEGES = 8 CATNAME = 9 + TXNID = 10 + VALIDWRITEIDLIST = 11 + ISSTATSCOMPLIANT = 12 FIELDS = { VALUES => {:type => ::Thrift::Types::LIST, :name => 'values', :element => {:type => ::Thrift::Types::STRING}}, @@ -1116,12 +1136,18 @@ class Partition SD => {:type => ::Thrift::Types::STRUCT, :name => 'sd', :class => ::StorageDescriptor}, PARAMETERS => {:type => ::Thrift::Types::MAP, :name => 'parameters', :key => {:type => ::Thrift::Types::STRING}, :value => {:type => ::Thrift::Types::STRING}}, PRIVILEGES => {:type => ::Thrift::Types::STRUCT, :name => 'privileges', :class => ::PrincipalPrivilegeSet, :optional => true}, - CATNAME => {:type => ::Thrift::Types::STRING, :name => 'catName', :optional => true} + CATNAME => {:type => ::Thrift::Types::STRING, :name => 'catName', :optional => true}, + TXNID => {:type => ::Thrift::Types::I64, :name => 'txnId', :default => -1, :optional => true}, + VALIDWRITEIDLIST => {:type => ::Thrift::Types::STRING, :name => 'validWriteIdList', :optional => true}, + ISSTATSCOMPLIANT => {:type => ::Thrift::Types::I32, :name => 'isStatsCompliant', :optional => true, :enum_class => ::IsolationLevelCompliance} } def struct_fields; FIELDS; end def validate + unless @isStatsCompliant.nil? || ::IsolationLevelCompliance::VALID_VALUES.include?(@isStatsCompliant) + raise ::Thrift::ProtocolException.new(::Thrift::ProtocolException::UNKNOWN, 'Invalid value of field isStatsCompliant!') + end end ::Thrift::Struct.generate_accessors self @@ -1195,6 +1221,9 @@ class PartitionSpec SHAREDSDPARTITIONSPEC = 4 PARTITIONLIST = 5 CATNAME = 6 + TXNID = 7 + VALIDWRITEIDLIST = 8 + ISSTATSCOMPLIANT = 9 FIELDS = { DBNAME => {:type => ::Thrift::Types::STRING, :name => 'dbName'}, @@ -1202,12 +1231,18 @@ class PartitionSpec ROOTPATH => {:type => ::Thrift::Types::STRING, :name => 'rootPath'}, SHAREDSDPARTITIONSPEC => {:type => ::Thrift::Types::STRUCT, :name => 'sharedSDPartitionSpec', :class => ::PartitionSpecWithSharedSD, :optional => true}, PARTITIONLIST => {:type => ::Thrift::Types::STRUCT, :name => 'partitionList', :class => ::PartitionListComposingSpec, :optional => true}, - CATNAME => {:type => ::Thrift::Types::STRING, :name => 'catName', :optional => true} + CATNAME => {:type => ::Thrift::Types::STRING, :name => 'catName', :optional => true}, + TXNID => {:type => ::Thrift::Types::I64, :name => 'txnId', :default => -1, :optional => true}, + VALIDWRITEIDLIST => {:type => ::Thrift::Types::STRING, :name => 'validWriteIdList', :optional => true}, + ISSTATSCOMPLIANT => {:type => ::Thrift::Types::I32, :name => 'isStatsCompliant', :optional => true, :enum_class => ::IsolationLevelCompliance} } def struct_fields; FIELDS; end def validate + unless @isStatsCompliant.nil? || ::IsolationLevelCompliance::VALID_VALUES.include?(@isStatsCompliant) + raise ::Thrift::ProtocolException.new(::Thrift::ProtocolException::UNKNOWN, 'Invalid value of field isStatsCompliant!') + end end ::Thrift::Struct.generate_accessors self @@ -1547,10 +1582,16 @@ class ColumnStatistics include ::Thrift::Struct, ::Thrift::Struct_Union STATSDESC = 1 STATSOBJ = 2 + TXNID = 3 + VALIDWRITEIDLIST = 4 + ISSTATSCOMPLIANT = 5 FIELDS = { STATSDESC => {:type => ::Thrift::Types::STRUCT, :name => 'statsDesc', :class => ::ColumnStatisticsDesc}, - STATSOBJ => {:type => ::Thrift::Types::LIST, :name => 'statsObj', :element => {:type => ::Thrift::Types::STRUCT, :class => ::ColumnStatisticsObj}} + STATSOBJ => {:type => ::Thrift::Types::LIST, :name => 'statsObj', :element => {:type => ::Thrift::Types::STRUCT, :class => ::ColumnStatisticsObj}}, + TXNID => {:type => ::Thrift::Types::I64, :name => 'txnId', :default => -1, :optional => true}, + VALIDWRITEIDLIST => {:type => ::Thrift::Types::STRING, :name => 'validWriteIdList', :optional => true}, + ISSTATSCOMPLIANT => {:type => ::Thrift::Types::I32, :name => 'isStatsCompliant', :optional => true, :enum_class => ::IsolationLevelCompliance} } def struct_fields; FIELDS; end @@ -1558,6 +1599,9 @@ class ColumnStatistics def validate raise ::Thrift::ProtocolException.new(::Thrift::ProtocolException::UNKNOWN, 'Required field statsDesc is unset!') unless @statsDesc raise ::Thrift::ProtocolException.new(::Thrift::ProtocolException::UNKNOWN, 'Required field statsObj is unset!') unless @statsObj + unless @isStatsCompliant.nil? || ::IsolationLevelCompliance::VALID_VALUES.include?(@isStatsCompliant) + raise ::Thrift::ProtocolException.new(::Thrift::ProtocolException::UNKNOWN, 'Invalid value of field isStatsCompliant!') + end end ::Thrift::Struct.generate_accessors self @@ -1567,10 +1611,12 @@ class AggrStats include ::Thrift::Struct, ::Thrift::Struct_Union COLSTATS = 1 PARTSFOUND = 2 + ISSTATSCOMPLIANT = 3 FIELDS = { COLSTATS => {:type => ::Thrift::Types::LIST, :name => 'colStats', :element => {:type => ::Thrift::Types::STRUCT, :class => ::ColumnStatisticsObj}}, - PARTSFOUND => {:type => ::Thrift::Types::I64, :name => 'partsFound'} + PARTSFOUND => {:type => ::Thrift::Types::I64, :name => 'partsFound'}, + ISSTATSCOMPLIANT => {:type => ::Thrift::Types::I32, :name => 'isStatsCompliant', :optional => true, :enum_class => ::IsolationLevelCompliance} } def struct_fields; FIELDS; end @@ -1578,6 +1624,9 @@ class AggrStats def validate raise ::Thrift::ProtocolException.new(::Thrift::ProtocolException::UNKNOWN, 'Required field colStats is unset!') unless @colStats raise ::Thrift::ProtocolException.new(::Thrift::ProtocolException::UNKNOWN, 'Required field partsFound is unset!') unless @partsFound + unless @isStatsCompliant.nil? || ::IsolationLevelCompliance::VALID_VALUES.include?(@isStatsCompliant) + raise ::Thrift::ProtocolException.new(::Thrift::ProtocolException::UNKNOWN, 'Invalid value of field isStatsCompliant!') + end end ::Thrift::Struct.generate_accessors self @@ -1587,10 +1636,14 @@ class SetPartitionsStatsRequest include ::Thrift::Struct, ::Thrift::Struct_Union COLSTATS = 1 NEEDMERGE = 2 + TXNID = 3 + VALIDWRITEIDLIST = 4 FIELDS = { COLSTATS => {:type => ::Thrift::Types::LIST, :name => 'colStats', :element => {:type => ::Thrift::Types::STRUCT, :class => ::ColumnStatistics}}, - NEEDMERGE => {:type => ::Thrift::Types::BOOL, :name => 'needMerge', :optional => true} + NEEDMERGE => {:type => ::Thrift::Types::BOOL, :name => 'needMerge', :optional => true}, + TXNID => {:type => ::Thrift::Types::I64, :name => 'txnId', :default => -1, :optional => true}, + VALIDWRITEIDLIST => {:type => ::Thrift::Types::STRING, :name => 'validWriteIdList', :optional => true} } def struct_fields; FIELDS; end @@ -2055,15 +2108,20 @@ end class TableStatsResult include ::Thrift::Struct, ::Thrift::Struct_Union TABLESTATS = 1 + ISSTATSCOMPLIANT = 2 FIELDS = { - TABLESTATS => {:type => ::Thrift::Types::LIST, :name => 'tableStats', :element => {:type => ::Thrift::Types::STRUCT, :class => ::ColumnStatisticsObj}} + TABLESTATS => {:type => ::Thrift::Types::LIST, :name => 'tableStats', :element => {:type => ::Thrift::Types::STRUCT, :class => ::ColumnStatisticsObj}}, + ISSTATSCOMPLIANT => {:type => ::Thrift::Types::I32, :name => 'isStatsCompliant', :optional => true, :enum_class => ::IsolationLevelCompliance} } def struct_fields; FIELDS; end def validate raise ::Thrift::ProtocolException.new(::Thrift::ProtocolException::UNKNOWN, 'Required field tableStats is unset!') unless @tableStats + unless @isStatsCompliant.nil? || ::IsolationLevelCompliance::VALID_VALUES.include?(@isStatsCompliant) + raise ::Thrift::ProtocolException.new(::Thrift::ProtocolException::UNKNOWN, 'Invalid value of field isStatsCompliant!') + end end ::Thrift::Struct.generate_accessors self @@ -2072,15 +2130,20 @@ end class PartitionsStatsResult include ::Thrift::Struct, ::Thrift::Struct_Union PARTSTATS = 1 + ISSTATSCOMPLIANT = 2 FIELDS = { - PARTSTATS => {:type => ::Thrift::Types::MAP, :name => 'partStats', :key => {:type => ::Thrift::Types::STRING}, :value => {:type => ::Thrift::Types::LIST, :element => {:type => ::Thrift::Types::STRUCT, :class => ::ColumnStatisticsObj}}} + PARTSTATS => {:type => ::Thrift::Types::MAP, :name => 'partStats', :key => {:type => ::Thrift::Types::STRING}, :value => {:type => ::Thrift::Types::LIST, :element => {:type => ::Thrift::Types::STRUCT, :class => ::ColumnStatisticsObj}}}, + ISSTATSCOMPLIANT => {:type => ::Thrift::Types::I32, :name => 'isStatsCompliant', :optional => true, :enum_class => ::IsolationLevelCompliance} } def struct_fields; FIELDS; end def validate raise ::Thrift::ProtocolException.new(::Thrift::ProtocolException::UNKNOWN, 'Required field partStats is unset!') unless @partStats + unless @isStatsCompliant.nil? || ::IsolationLevelCompliance::VALID_VALUES.include?(@isStatsCompliant) + raise ::Thrift::ProtocolException.new(::Thrift::ProtocolException::UNKNOWN, 'Invalid value of field isStatsCompliant!') + end end ::Thrift::Struct.generate_accessors self @@ -2092,12 +2155,16 @@ class TableStatsRequest TBLNAME = 2 COLNAMES = 3 CATNAME = 4 + TXNID = 5 + VALIDWRITEIDLIST = 6 FIELDS = { DBNAME => {:type => ::Thrift::Types::STRING, :name => 'dbName'}, TBLNAME => {:type => ::Thrift::Types::STRING, :name => 'tblName'}, COLNAMES => {:type => ::Thrift::Types::LIST, :name => 'colNames', :element => {:type => ::Thrift::Types::STRING}}, - CATNAME => {:type => ::Thrift::Types::STRING, :name => 'catName', :optional => true} + CATNAME => {:type => ::Thrift::Types::STRING, :name => 'catName', :optional => true}, + TXNID => {:type => ::Thrift::Types::I64, :name => 'txnId', :default => -1, :optional => true}, + VALIDWRITEIDLIST => {:type => ::Thrift::Types::STRING, :name => 'validWriteIdList', :optional => true} } def struct_fields; FIELDS; end @@ -2118,13 +2185,17 @@ class PartitionsStatsRequest COLNAMES = 3 PARTNAMES = 4 CATNAME = 5 + TXNID = 6 + VALIDWRITEIDLIST = 7 FIELDS = { DBNAME => {:type => ::Thrift::Types::STRING, :name => 'dbName'}, TBLNAME => {:type => ::Thrift::Types::STRING, :name => 'tblName'}, COLNAMES => {:type => ::Thrift::Types::LIST, :name => 'colNames', :element => {:type => ::Thrift::Types::STRING}}, PARTNAMES => {:type => ::Thrift::Types::LIST, :name => 'partNames', :element => {:type => ::Thrift::Types::STRING}}, - CATNAME => {:type => ::Thrift::Types::STRING, :name => 'catName', :optional => true} + CATNAME => {:type => ::Thrift::Types::STRING, :name => 'catName', :optional => true}, + TXNID => {:type => ::Thrift::Types::I64, :name => 'txnId', :default => -1, :optional => true}, + VALIDWRITEIDLIST => {:type => ::Thrift::Types::STRING, :name => 'validWriteIdList', :optional => true} } def struct_fields; FIELDS; end @@ -2142,14 +2213,19 @@ end class AddPartitionsResult include ::Thrift::Struct, ::Thrift::Struct_Union PARTITIONS = 1 + ISSTATSCOMPLIANT = 2 FIELDS = { - PARTITIONS => {:type => ::Thrift::Types::LIST, :name => 'partitions', :element => {:type => ::Thrift::Types::STRUCT, :class => ::Partition}, :optional => true} + PARTITIONS => {:type => ::Thrift::Types::LIST, :name => 'partitions', :element => {:type => ::Thrift::Types::STRUCT, :class => ::Partition}, :optional => true}, + ISSTATSCOMPLIANT => {:type => ::Thrift::Types::I32, :name => 'isStatsCompliant', :optional => true, :enum_class => ::IsolationLevelCompliance} } def struct_fields; FIELDS; end def validate + unless @isStatsCompliant.nil? || ::IsolationLevelCompliance::VALID_VALUES.include?(@isStatsCompliant) + raise ::Thrift::ProtocolException.new(::Thrift::ProtocolException::UNKNOWN, 'Invalid value of field isStatsCompliant!') + end end ::Thrift::Struct.generate_accessors self @@ -2163,6 +2239,8 @@ class AddPartitionsRequest IFNOTEXISTS = 4 NEEDRESULT = 5 CATNAME = 6 + TXNID = 7 + VALIDWRITEIDLIST = 8 FIELDS = { DBNAME => {:type => ::Thrift::Types::STRING, :name => 'dbName'}, @@ -2170,7 +2248,9 @@ class AddPartitionsRequest PARTS => {:type => ::Thrift::Types::LIST, :name => 'parts', :element => {:type => ::Thrift::Types::STRUCT, :class => ::Partition}}, IFNOTEXISTS => {:type => ::Thrift::Types::BOOL, :name => 'ifNotExists'}, NEEDRESULT => {:type => ::Thrift::Types::BOOL, :name => 'needResult', :default => true, :optional => true}, - CATNAME => {:type => ::Thrift::Types::STRING, :name => 'catName', :optional => true} + CATNAME => {:type => ::Thrift::Types::STRING, :name => 'catName', :optional => true}, + TXNID => {:type => ::Thrift::Types::I64, :name => 'txnId', :default => -1, :optional => true}, + VALIDWRITEIDLIST => {:type => ::Thrift::Types::STRING, :name => 'validWriteIdList', :optional => true} } def struct_fields; FIELDS; end @@ -3731,12 +3811,16 @@ class GetTableRequest TBLNAME = 2 CAPABILITIES = 3 CATNAME = 4 + TXNID = 5 + VALIDWRITEIDLIST = 6 FIELDS = { DBNAME => {:type => ::Thrift::Types::STRING, :name => 'dbName'}, TBLNAME => {:type => ::Thrift::Types::STRING, :name => 'tblName'}, CAPABILITIES => {:type => ::Thrift::Types::STRUCT, :name => 'capabilities', :class => ::ClientCapabilities, :optional => true}, - CATNAME => {:type => ::Thrift::Types::STRING, :name => 'catName', :optional => true} + CATNAME => {:type => ::Thrift::Types::STRING, :name => 'catName', :optional => true}, + TXNID => {:type => ::Thrift::Types::I64, :name => 'txnId', :default => -1, :optional => true}, + VALIDWRITEIDLIST => {:type => ::Thrift::Types::STRING, :name => 'validWriteIdList', :optional => true} } def struct_fields; FIELDS; end @@ -3752,15 +3836,20 @@ end class GetTableResult include ::Thrift::Struct, ::Thrift::Struct_Union TABLE = 1 + ISSTATSCOMPLIANT = 2 FIELDS = { - TABLE => {:type => ::Thrift::Types::STRUCT, :name => 'table', :class => ::Table} + TABLE => {:type => ::Thrift::Types::STRUCT, :name => 'table', :class => ::Table}, + ISSTATSCOMPLIANT => {:type => ::Thrift::Types::I32, :name => 'isStatsCompliant', :optional => true, :enum_class => ::IsolationLevelCompliance} } def struct_fields; FIELDS; end def validate raise ::Thrift::ProtocolException.new(::Thrift::ProtocolException::UNKNOWN, 'Required field table is unset!') unless @table + unless @isStatsCompliant.nil? || ::IsolationLevelCompliance::VALID_VALUES.include?(@isStatsCompliant) + raise ::Thrift::ProtocolException.new(::Thrift::ProtocolException::UNKNOWN, 'Invalid value of field isStatsCompliant!') + end end ::Thrift::Struct.generate_accessors self @@ -4923,6 +5012,51 @@ class GetRuntimeStatsRequest ::Thrift::Struct.generate_accessors self end +class AlterPartitionsRequest + include ::Thrift::Struct, ::Thrift::Struct_Union + DBNAME = 1 + TABLENAME = 2 + PARTITIONS = 3 + ENVIRONMENTCONTEXT = 4 + TXNID = 5 + VALIDWRITEIDLIST = 6 + + FIELDS = { + DBNAME => {:type => ::Thrift::Types::STRING, :name => 'dbName'}, + TABLENAME => {:type => ::Thrift::Types::STRING, :name => 'tableName'}, + PARTITIONS => {:type => ::Thrift::Types::LIST, :name => 'partitions', :element => {:type => ::Thrift::Types::STRUCT, :class => ::Partition}}, + ENVIRONMENTCONTEXT => {:type => ::Thrift::Types::STRUCT, :name => 'environmentContext', :class => ::EnvironmentContext}, + TXNID => {:type => ::Thrift::Types::I64, :name => 'txnId', :default => -1, :optional => true}, + VALIDWRITEIDLIST => {:type => ::Thrift::Types::STRING, :name => 'validWriteIdList', :optional => true} + } + + def struct_fields; FIELDS; end + + def validate + raise ::Thrift::ProtocolException.new(::Thrift::ProtocolException::UNKNOWN, 'Required field dbName is unset!') unless @dbName + raise ::Thrift::ProtocolException.new(::Thrift::ProtocolException::UNKNOWN, 'Required field tableName is unset!') unless @tableName + raise ::Thrift::ProtocolException.new(::Thrift::ProtocolException::UNKNOWN, 'Required field partitions is unset!') unless @partitions + raise ::Thrift::ProtocolException.new(::Thrift::ProtocolException::UNKNOWN, 'Required field environmentContext is unset!') unless @environmentContext + end + + ::Thrift::Struct.generate_accessors self +end + +class AlterPartitionsResponse + include ::Thrift::Struct, ::Thrift::Struct_Union + + FIELDS = { + + } + + def struct_fields; FIELDS; end + + def validate + end + + ::Thrift::Struct.generate_accessors self +end + class MetaException < ::Thrift::Exception include ::Thrift::Struct, ::Thrift::Struct_Union def initialize(message=nil) diff --git standalone-metastore/src/gen/thrift/gen-rb/thrift_hive_metastore.rb standalone-metastore/src/gen/thrift/gen-rb/thrift_hive_metastore.rb index bbf3f12d6b..3987ee9d5b 100644 --- standalone-metastore/src/gen/thrift/gen-rb/thrift_hive_metastore.rb +++ standalone-metastore/src/gen/thrift/gen-rb/thrift_hive_metastore.rb @@ -1416,20 +1416,21 @@ module ThriftHiveMetastore return end - def alter_partitions_with_environment_context(db_name, tbl_name, new_parts, environment_context) - send_alter_partitions_with_environment_context(db_name, tbl_name, new_parts, environment_context) - recv_alter_partitions_with_environment_context() + def alter_partitions_with_environment_context(req) + send_alter_partitions_with_environment_context(req) + return recv_alter_partitions_with_environment_context() end - def send_alter_partitions_with_environment_context(db_name, tbl_name, new_parts, environment_context) - send_message('alter_partitions_with_environment_context', Alter_partitions_with_environment_context_args, :db_name => db_name, :tbl_name => tbl_name, :new_parts => new_parts, :environment_context => environment_context) + def send_alter_partitions_with_environment_context(req) + send_message('alter_partitions_with_environment_context', Alter_partitions_with_environment_context_args, :req => req) end def recv_alter_partitions_with_environment_context() result = receive_message(Alter_partitions_with_environment_context_result) + return result.success unless result.success.nil? raise result.o1 unless result.o1.nil? raise result.o2 unless result.o2.nil? - return + raise ::Thrift::ApplicationException.new(::Thrift::ApplicationException::MISSING_RESULT, 'alter_partitions_with_environment_context failed: unknown result') end def alter_partition_with_environment_context(db_name, tbl_name, new_part, environment_context) @@ -4580,7 +4581,7 @@ module ThriftHiveMetastore args = read_args(iprot, Alter_partitions_with_environment_context_args) result = Alter_partitions_with_environment_context_result.new() begin - @handler.alter_partitions_with_environment_context(args.db_name, args.tbl_name, args.new_parts, args.environment_context) + result.success = @handler.alter_partitions_with_environment_context(args.req) rescue ::InvalidOperationException => o1 result.o1 = o1 rescue ::MetaException => o2 @@ -9272,16 +9273,10 @@ module ThriftHiveMetastore class Alter_partitions_with_environment_context_args include ::Thrift::Struct, ::Thrift::Struct_Union - DB_NAME = 1 - TBL_NAME = 2 - NEW_PARTS = 3 - ENVIRONMENT_CONTEXT = 4 + REQ = 1 FIELDS = { - DB_NAME => {:type => ::Thrift::Types::STRING, :name => 'db_name'}, - TBL_NAME => {:type => ::Thrift::Types::STRING, :name => 'tbl_name'}, - NEW_PARTS => {:type => ::Thrift::Types::LIST, :name => 'new_parts', :element => {:type => ::Thrift::Types::STRUCT, :class => ::Partition}}, - ENVIRONMENT_CONTEXT => {:type => ::Thrift::Types::STRUCT, :name => 'environment_context', :class => ::EnvironmentContext} + REQ => {:type => ::Thrift::Types::STRUCT, :name => 'req', :class => ::AlterPartitionsRequest} } def struct_fields; FIELDS; end @@ -9294,10 +9289,12 @@ module ThriftHiveMetastore class Alter_partitions_with_environment_context_result include ::Thrift::Struct, ::Thrift::Struct_Union + SUCCESS = 0 O1 = 1 O2 = 2 FIELDS = { + SUCCESS => {:type => ::Thrift::Types::STRUCT, :name => 'success', :class => ::AlterPartitionsResponse}, O1 => {:type => ::Thrift::Types::STRUCT, :name => 'o1', :class => ::InvalidOperationException}, O2 => {:type => ::Thrift::Types::STRUCT, :name => 'o2', :class => ::MetaException} } diff --git standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/AlterHandler.java standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/AlterHandler.java index 050dca9abf..010870dcf1 100644 --- standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/AlterHandler.java +++ standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/AlterHandler.java @@ -197,6 +197,6 @@ Partition alterPartition(final RawStore msdb, Warehouse wh, final String catName */ List alterPartitions(final RawStore msdb, Warehouse wh, final String catName, final String dbname, final String name, final List new_parts, - EnvironmentContext environmentContext,IHMSHandler handler) + EnvironmentContext environmentContext, long txnId, String writeIdList, IHMSHandler handler) throws InvalidOperationException, InvalidObjectException, AlreadyExistsException, MetaException; } \ No newline at end of file diff --git standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/HiveAlterHandler.java standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/HiveAlterHandler.java index c2da6d362f..5b70307920 100644 --- standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/HiveAlterHandler.java +++ standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/HiveAlterHandler.java @@ -144,7 +144,7 @@ public void alterTable(RawStore msdb, Warehouse wh, String catName, String dbnam // check if table with the new name already exists if (!newTblName.equals(name) || !newDbName.equals(dbname)) { - if (msdb.getTable(catName, newDbName, newTblName) != null) { + if (msdb.getTable(catName, newDbName, newTblName, -1, null) != null) { throw new InvalidOperationException("new table " + newDbName + "." + newTblName + " already exists"); } @@ -153,7 +153,7 @@ public void alterTable(RawStore msdb, Warehouse wh, String catName, String dbnam msdb.openTransaction(); // get old table - oldt = msdb.getTable(catName, dbname, name); + oldt = msdb.getTable(catName, dbname, name, -1, null); if (oldt == null) { throw new InvalidOperationException("table " + TableName.getQualified(catName, dbname, name) + " doesn't exist"); @@ -296,7 +296,8 @@ public void alterTable(RawStore msdb, Warehouse wh, String catName, String dbnam for (Partition part : partBatch) { partValues.add(part.getValues()); } - msdb.alterPartitions(catName, newDbName, newTblName, partValues, partBatch); + msdb.alterPartitions( + catName, newDbName, newTblName, partValues, partBatch, -1, null); } } @@ -453,7 +454,7 @@ public Partition alterPartition(final RawStore msdb, Warehouse wh, final String try { msdb.openTransaction(); - Table tbl = msdb.getTable(catName, dbname, name); + Table tbl = msdb.getTable(catName, dbname, name, -1, null); if (tbl == null) { throw new InvalidObjectException( "Unable to alter partition because table or database does not exist."); @@ -509,7 +510,7 @@ public Partition alterPartition(final RawStore msdb, Warehouse wh, final String Database db; try { msdb.openTransaction(); - Table tbl = msdb.getTable(DEFAULT_CATALOG_NAME, dbname, name); + Table tbl = msdb.getTable(DEFAULT_CATALOG_NAME, dbname, name, -1, null); if (tbl == null) { throw new InvalidObjectException( "Unable to alter partition because table or database does not exist."); @@ -658,14 +659,15 @@ public Partition alterPartition(final RawStore msdb, Warehouse wh, final String EnvironmentContext environmentContext) throws InvalidOperationException, InvalidObjectException, AlreadyExistsException, MetaException { return alterPartitions(msdb, wh, DEFAULT_CATALOG_NAME, dbname, name, new_parts, - environmentContext, null); + environmentContext, -1, null, null); } @Override public List alterPartitions(final RawStore msdb, Warehouse wh, final String catName, final String dbname, final String name, final List new_parts, - EnvironmentContext environmentContext, IHMSHandler handler) + EnvironmentContext environmentContext, + long txnId, String writeIdList, IHMSHandler handler) throws InvalidOperationException, InvalidObjectException, AlreadyExistsException, MetaException { List oldParts = new ArrayList<>(); List> partValsList = new ArrayList<>(); @@ -678,7 +680,7 @@ public Partition alterPartition(final RawStore msdb, Warehouse wh, final String try { msdb.openTransaction(); - Table tbl = msdb.getTable(catName, dbname, name); + Table tbl = msdb.getTable(catName, dbname, name, -1, null); if (tbl == null) { throw new InvalidObjectException( "Unable to alter partitions because table or database does not exist."); @@ -713,7 +715,7 @@ public Partition alterPartition(final RawStore msdb, Warehouse wh, final String } } - msdb.alterPartitions(catName, dbname, name, partValsList, new_parts); + msdb.alterPartitions(catName, dbname, name, partValsList, new_parts, txnId, writeIdList); Iterator oldPartsIt = oldParts.iterator(); for (Partition newPart : new_parts) { Partition oldPart; diff --git standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/HiveMetaStore.java standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/HiveMetaStore.java index 35c0f5c8c2..fb15cdabb5 100644 --- standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/HiveMetaStore.java +++ standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/HiveMetaStore.java @@ -2425,7 +2425,7 @@ public void add_check_constraint(AddCheckConstraintRequest req) private boolean is_table_exists(RawStore ms, String catName, String dbname, String name) throws MetaException { - return (ms.getTable(catName, dbname, name) != null); + return (ms.getTable(catName, dbname, name, -1, null) != null); } private boolean drop_table_core(final RawStore ms, final String catName, final String dbname, @@ -2838,7 +2838,8 @@ private boolean isExternalTablePurge(Table table) { public Table get_table(final String dbname, final String name) throws MetaException, NoSuchObjectException { String[] parsedDbName = parseDbName(dbname, conf); - return getTableInternal(parsedDbName[CAT_NAME], parsedDbName[DB_NAME], name, null); + return getTableInternal( + parsedDbName[CAT_NAME], parsedDbName[DB_NAME], name, null, -1, null); } @Override @@ -2846,11 +2847,12 @@ public GetTableResult get_table_req(GetTableRequest req) throws MetaException, NoSuchObjectException { String catName = req.isSetCatName() ? req.getCatName() : getDefaultCatalog(conf); return new GetTableResult(getTableInternal(catName, req.getDbName(), req.getTblName(), - req.getCapabilities())); + req.getCapabilities(), req.getTxnId(), req.getValidWriteIdList())); } private Table getTableInternal(String catName, String dbname, String name, - ClientCapabilities capabilities) throws MetaException, NoSuchObjectException { + ClientCapabilities capabilities, long txnId, String writeIdList) + throws MetaException, NoSuchObjectException { if (isInTest) { assertClientHasCapability(capabilities, ClientCapability.TEST_CAPABILITY, "Hive tests", "get_table_req"); @@ -2860,7 +2862,7 @@ private Table getTableInternal(String catName, String dbname, String name, startTableFunction("get_table", catName, dbname, name); Exception ex = null; try { - t = get_table_core(catName, dbname, name); + t = get_table_core(catName, dbname, name, txnId, writeIdList); if (MetaStoreUtils.isInsertOnlyTableParam(t.getParameters())) { assertClientHasCapability(capabilities, ClientCapability.INSERT_ONLY_TABLES, "insert-only tables", "get_table_req"); @@ -2895,11 +2897,25 @@ private Table getTableInternal(String catName, String dbname, String name, } @Override - public Table get_table_core(final String catName, final String dbname, final String name) + public Table get_table_core( + final String catName, + final String dbname, + final String name) + throws MetaException, NoSuchObjectException { + return get_table_core(catName, dbname, name, -1, null); + } + + @Override + public Table get_table_core( + final String catName, + final String dbname, + final String name, + final long txnId, + final String writeIdList) throws MetaException, NoSuchObjectException { Table t = null; try { - t = getMS().getTable(catName, dbname, name); + t = getMS().getTable(catName, dbname, name, txnId, writeIdList); if (t == null) { throw new NoSuchObjectException(TableName.getQualified(catName, dbname, name) + " table not found"); @@ -3081,7 +3097,7 @@ private Partition append_partition_common(RawStore ms, String catName, String db MetaStoreUtils.validatePartitionNameCharacters(part_vals, partitionValidationPattern); - tbl = ms.getTable(part.getCatName(), part.getDbName(), part.getTableName()); + tbl = ms.getTable(part.getCatName(), part.getDbName(), part.getTableName(), -1, null); if (tbl == null) { throw new InvalidObjectException( "Unable to add partition because table or database do not exist"); @@ -3275,7 +3291,7 @@ public boolean equals(Object obj) { try { ms.openTransaction(); - tbl = ms.getTable(catName, dbName, tblName); + tbl = ms.getTable(catName, dbName, tblName, -1, null); if (tbl == null) { throw new InvalidObjectException("Unable to add partitions because " + TableName.getQualified(catName, dbName, tblName) + @@ -3546,7 +3562,7 @@ private int add_partitions_pspec_core(RawStore ms, String catName, String dbName Database db = null; try { ms.openTransaction(); - tbl = ms.getTable(catName, dbName, tblName); + tbl = ms.getTable(catName, dbName, tblName, -1, null); if (tbl == null) { throw new InvalidObjectException("Unable to add partitions because " + "database or table " + dbName + "." + tblName + " does not exist"); @@ -3800,7 +3816,7 @@ private Partition add_partition_core(final RawStore ms, } try { ms.openTransaction(); - tbl = ms.getTable(part.getCatName(), part.getDbName(), part.getTableName()); + tbl = ms.getTable(part.getCatName(), part.getDbName(), part.getTableName(), -1, null); if (tbl == null) { throw new InvalidObjectException( "Unable to add partition because table or database do not exist"); @@ -3921,14 +3937,16 @@ public Partition exchange_partition(Map partitionSpecs, ms.openTransaction(); Table destinationTable = - ms.getTable(parsedDestDbName[CAT_NAME], parsedDestDbName[DB_NAME], destTableName); + ms.getTable( + parsedDestDbName[CAT_NAME], parsedDestDbName[DB_NAME], destTableName, -1, null); if (destinationTable == null) { throw new MetaException( "The destination table " + TableName.getQualified(parsedDestDbName[CAT_NAME], parsedDestDbName[DB_NAME], destTableName) + " not found"); } Table sourceTable = - ms.getTable(parsedSourceDbName[CAT_NAME], parsedSourceDbName[DB_NAME], sourceTableName); + ms.getTable( + parsedSourceDbName[CAT_NAME], parsedSourceDbName[DB_NAME], sourceTableName, -1, null); if (sourceTable == null) { throw new MetaException("The source table " + TableName.getQualified(parsedSourceDbName[CAT_NAME], @@ -4105,7 +4123,7 @@ private boolean drop_partition_common(RawStore ms, String catName, String db_nam try { ms.openTransaction(); part = ms.getPartition(catName, db_name, tbl_name, part_vals); - tbl = get_table_core(catName, db_name, tbl_name); + tbl = get_table_core(catName, db_name, tbl_name, -1, null); tableDataShouldBeDeleted = checkTableDataShouldBeDeleted(tbl, deleteData); firePreEvent(new PreDropPartitionEvent(tbl, part, deleteData, this)); mustPurge = isMustPurge(envContext, tbl); @@ -4833,7 +4851,7 @@ private void rename_partition(final String catName, final String db_name, final Table table = null; if (!listeners.isEmpty()) { if (table == null) { - table = getMS().getTable(catName, db_name, tbl_name); + table = getMS().getTable(catName, db_name, tbl_name, -1, null); } MetaStoreListenerNotifier.notifyEvent(listeners, @@ -4862,12 +4880,24 @@ private void rename_partition(final String catName, final String db_name, final public void alter_partitions(final String db_name, final String tbl_name, final List new_parts) throws TException { - alter_partitions_with_environment_context(db_name, tbl_name, new_parts, null); + alter_partitions_with_environment_context( + db_name, tbl_name, new_parts, null, -1, null); } @Override - public void alter_partitions_with_environment_context(final String db_name, final String tbl_name, - final List new_parts, EnvironmentContext environmentContext) + public AlterPartitionsResponse alter_partitions_with_environment_context( + AlterPartitionsRequest req) + throws TException { + alter_partitions_with_environment_context( + req.getDbName(), req.getTableName(), req.getPartitions(), req.getEnvironmentContext(), + req.isSetTxnId() ? req.getTxnId() : -1, + req.isSetValidWriteIdList() ? req.getValidWriteIdList() : null); + return new AlterPartitionsResponse(); + } + + private void alter_partitions_with_environment_context(final String db_name, final String tbl_name, + final List new_parts, EnvironmentContext environmentContext, + long txnId, String writeIdList) throws TException { String[] parsedDbName = parseDbName(db_name, conf); @@ -4891,7 +4921,7 @@ public void alter_partitions_with_environment_context(final String db_name, fina firePreEvent(new PreAlterPartitionEvent(parsedDbName[DB_NAME], tbl_name, null, tmpPart, this)); } oldParts = alterHandler.alterPartitions(getMS(), wh, parsedDbName[CAT_NAME], - parsedDbName[DB_NAME], tbl_name, new_parts, environmentContext, this); + parsedDbName[DB_NAME], tbl_name, new_parts, environmentContext, txnId, writeIdList, this); Iterator olditr = oldParts.iterator(); // Only fetch the table if we have a listener that needs it. Table table = null; @@ -4905,7 +4935,8 @@ public void alter_partitions_with_environment_context(final String db_name, fina } if (table == null) { - table = getMS().getTable(parsedDbName[CAT_NAME], parsedDbName[DB_NAME], tbl_name); + table = getMS().getTable( + parsedDbName[CAT_NAME], parsedDbName[DB_NAME], tbl_name, -1, null); } if (!listeners.isEmpty()) { @@ -5325,7 +5356,7 @@ public String get_config_value(String name, String defaultValue) private List getPartValsFromName(RawStore ms, String catName, String dbName, String tblName, String partName) throws MetaException, InvalidObjectException { - Table t = ms.getTable(catName, dbName, tblName); + Table t = ms.getTable(catName, dbName, tblName, -1, null); if (t == null) { throw new InvalidObjectException(dbName + "." + tblName + " table not found"); @@ -5580,7 +5611,8 @@ public ColumnStatistics get_table_column_statistics(String dbName, String tableN ColumnStatistics statsObj = null; try { statsObj = getMS().getTableColumnStatistics( - parsedDbName[CAT_NAME], parsedDbName[DB_NAME], tableName, Lists.newArrayList(colName)); + parsedDbName[CAT_NAME], parsedDbName[DB_NAME], tableName, Lists.newArrayList(colName), + -1, null); if (statsObj != null) { assert statsObj.getStatsObjSize() <= 1; } @@ -5604,7 +5636,9 @@ public TableStatsResult get_table_statistics_req(TableStatsRequest request) thro lowerCaseColNames.add(colName.toLowerCase()); } try { - ColumnStatistics cs = getMS().getTableColumnStatistics(catName, dbName, tblName, lowerCaseColNames); + ColumnStatistics cs = getMS().getTableColumnStatistics( + catName, dbName, tblName, lowerCaseColNames, + request.getTxnId(), request.getValidWriteIdList()); result = new TableStatsResult((cs == null || cs.getStatsObj() == null) ? Lists.newArrayList() : cs.getStatsObj()); } finally { @@ -7311,8 +7345,9 @@ public AggrStats get_aggr_stats_for(PartitionsStatsRequest request) throws TExce AggrStats aggrStats = null; try { - aggrStats = new AggrStats(getMS().get_aggr_stats_for(catName, dbName, tblName, - lowerCasePartNames, lowerCaseColNames)); + aggrStats = getMS().get_aggr_stats_for(catName, dbName, tblName, + lowerCasePartNames, lowerCaseColNames, request.getTxnId(), + request.getValidWriteIdList()); return aggrStats; } finally { endFunction("get_aggr_stats_for", aggrStats == null, null, request.getTblName()); @@ -7346,7 +7381,10 @@ public boolean set_aggr_stats_for(SetPartitionsStatsRequest request) throws TExc } else { if (request.isSetNeedMerge() && request.isNeedMerge()) { // one single call to get all column stats - ColumnStatistics csOld = getMS().getTableColumnStatistics(catName, dbName, tableName, colNames); + ColumnStatistics csOld = + getMS().getTableColumnStatistics( + catName, dbName, tableName, colNames, + request.getTxnId(), request.getValidWriteIdList()); Table t = getTable(catName, dbName, tableName); // we first use t.getParameters() to prune the stats MetaStoreUtils.getMergableCols(firstColStats, t.getParameters()); @@ -7386,8 +7424,10 @@ public boolean set_aggr_stats_for(SetPartitionsStatsRequest request) throws TExc // a single call to get all column stats for all partitions List partitionNames = new ArrayList<>(); partitionNames.addAll(newStatsMap.keySet()); - List csOlds = getMS().getPartitionColumnStatistics(catName, dbName, - tableName, partitionNames, colNames); + List csOlds = + getMS().getPartitionColumnStatistics( + catName, dbName, tableName, partitionNames, colNames, + request.getTxnId(), request.getValidWriteIdList()); if (newStatsMap.values().size() != csOlds.size()) { // some of the partitions miss stats. LOG.debug("Some of the partitions miss stats."); @@ -7401,7 +7441,8 @@ public boolean set_aggr_stats_for(SetPartitionsStatsRequest request) throws TExc mapToPart.put(partitionNames.get(index), partitions.get(index)); } } - Table t = getTable(catName, dbName, tableName); + Table t = getTable(catName, dbName, tableName, + request.getTxnId(), request.getValidWriteIdList()); for (Entry entry : newStatsMap.entrySet()) { ColumnStatistics csNew = entry.getValue(); ColumnStatistics csOld = oldStatsMap.get(entry.getKey()); @@ -7428,7 +7469,13 @@ public boolean set_aggr_stats_for(SetPartitionsStatsRequest request) throws TExc private Table getTable(String catName, String dbName, String tableName) throws MetaException, InvalidObjectException { - Table t = getMS().getTable(catName, dbName, tableName); + return getTable(catName, dbName, tableName, -1, null); + } + + private Table getTable(String catName, String dbName, String tableName, + long txnId, String writeIdList) + throws MetaException, InvalidObjectException { + Table t = getMS().getTable(catName, dbName, tableName, txnId, writeIdList); if (t == null) { throw new InvalidObjectException(TableName.getQualified(catName, dbName, tableName) + " table not found"); diff --git standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/HiveMetaStoreClient.java standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/HiveMetaStoreClient.java index da41e6eb2b..1e50ba7b0e 100644 --- standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/HiveMetaStoreClient.java +++ standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/HiveMetaStoreClient.java @@ -791,6 +791,50 @@ public Partition exchange_partition(Map partitionSpecs, String s } @Override + public Map> getPartitionColumnStatistics( + String dbName, String tableName, List partNames, List colNames, + long txnId, String validWriteIdList) + throws NoSuchObjectException, MetaException, TException { + return getPartitionColumnStatistics(getDefaultCatalog(conf), dbName, tableName, + partNames, colNames, txnId, validWriteIdList); + } + + @Override + public Map> getPartitionColumnStatistics( + String catName, String dbName, String tableName, List partNames, + List colNames, long txnId, String validWriteIdList) + throws NoSuchObjectException, MetaException, TException { + PartitionsStatsRequest rqst = new PartitionsStatsRequest(dbName, tableName, colNames, + partNames); + rqst.setCatName(catName); + rqst.setTxnId(txnId); + rqst.setValidWriteIdList(validWriteIdList); + return client.get_partitions_statistics_req(rqst).getPartStats(); + } + + @Override + public AggrStats getAggrColStatsFor(String dbName, String tblName, List colNames, + List partNames, long txnId, String writeIdList) + throws NoSuchObjectException, MetaException, TException { + return getAggrColStatsFor(getDefaultCatalog(conf), dbName, tblName, colNames, + partNames, txnId, writeIdList); } + + @Override + public AggrStats getAggrColStatsFor(String catName, String dbName, String tblName, List colNames, + List partNames, long txnId, String writeIdList) + throws NoSuchObjectException, MetaException, TException { + if (colNames.isEmpty() || partNames.isEmpty()) { + LOG.debug("Columns is empty or partNames is empty : Short-circuiting stats eval on client side."); + return new AggrStats(new ArrayList<>(),0); // Nothing to aggregate + } + PartitionsStatsRequest req = new PartitionsStatsRequest(dbName, tblName, colNames, partNames); + req.setCatName(catName); + req.setTxnId(txnId); + req.setValidWriteIdList(writeIdList); + return client.get_aggr_stats_for(req); + } + + @Override public List exchange_partitions(Map partitionSpecs, String sourceCat, String sourceDb, String sourceTable, String destCat, String destDb, String destTableName) throws TException { @@ -1584,6 +1628,14 @@ public Table getTable(String dbname, String name) throws TException { } @Override + public Table getTable(String dbname, String name, + long txnId, String validWriteIdList) + throws MetaException, TException, NoSuchObjectException{ + return getTable(getDefaultCatalog(conf), dbname, name, + txnId, validWriteIdList); + }; + + @Override public Table getTable(String catName, String dbName, String tableName) throws TException { GetTableRequest req = new GetTableRequest(dbName, tableName); req.setCatName(catName); @@ -1593,6 +1645,18 @@ public Table getTable(String catName, String dbName, String tableName) throws TE } @Override + public Table getTable(String catName, String dbName, String tableName, + long txnId, String validWriteIdList) throws TException { + GetTableRequest req = new GetTableRequest(dbName, tableName); + req.setCatName(catName); + req.setCapabilities(version); + req.setTxnId(txnId); + req.setValidWriteIdList(validWriteIdList); + Table t = client.get_table_req(req).getTable(); + return deepCopy(filterHook.filterTable(t)); + } + + @Override public List
getTableObjectsByName(String dbName, List tableNames) throws TException { return getTableObjectsByName(getDefaultCatalog(conf), dbName, tableNames); @@ -1821,21 +1885,42 @@ public void alter_partition(String catName, String dbName, String tblName, Parti @Override public void alter_partitions(String dbName, String tblName, List newParts) throws TException { - alter_partitions(getDefaultCatalog(conf), dbName, tblName, newParts, null); + alter_partitions( + getDefaultCatalog(conf), dbName, tblName, newParts, new EnvironmentContext(), -1, null); } @Override public void alter_partitions(String dbName, String tblName, List newParts, EnvironmentContext environmentContext) throws TException { - alter_partitions(getDefaultCatalog(conf), dbName, tblName, newParts, environmentContext); + alter_partitions( + getDefaultCatalog(conf), dbName, tblName, newParts, environmentContext, -1, null); + } + + @Override + public void alter_partitions(String dbName, String tblName, List newParts, + EnvironmentContext environmentContext, + long txnId, String writeIdList) + throws InvalidOperationException, MetaException, TException { + //client.alter_partition_with_environment_context(getDefaultCatalog(conf), + // dbName, tblName, newParts, environmentContext); + alter_partitions(getDefaultCatalog(conf), + dbName, tblName, newParts, environmentContext, txnId, writeIdList); + } @Override public void alter_partitions(String catName, String dbName, String tblName, List newParts, - EnvironmentContext environmentContext) throws TException { - client.alter_partitions_with_environment_context(prependCatalogToDbName(catName, dbName, conf), - tblName, newParts, environmentContext); + EnvironmentContext environmentContext, + long txnId, String writeIdList) throws TException { + AlterPartitionsRequest req = new AlterPartitionsRequest(); + req.setDbName(prependCatalogToDbName(catName, dbName, conf)); + req.setTableName(tblName); + req.setPartitions(newParts); + req.setEnvironmentContext(environmentContext); + req.setTxnId(txnId); + req.setValidWriteIdList(writeIdList); + client.alter_partitions_with_environment_context(req); } @Override @@ -1967,6 +2052,28 @@ public void flushCache() { } @Override + public List getTableColumnStatistics(String dbName, String tableName, + List colNames, + long txnId, + String validWriteIdList) throws TException { + return getTableColumnStatistics(getDefaultCatalog(conf), dbName, tableName, colNames, + txnId, validWriteIdList); + } + + @Override + public List getTableColumnStatistics(String catName, String dbName, + String tableName, + List colNames, + long txnId, + String validWriteIdList) throws TException { + TableStatsRequest rqst = new TableStatsRequest(dbName, tableName, colNames); + rqst.setCatName(catName); + rqst.setTxnId(txnId); + rqst.setValidWriteIdList(validWriteIdList); + return client.get_table_statistics_req(rqst).getTableStats(); + } + + @Override public Map> getPartitionColumnStatistics( String dbName, String tableName, List partNames, List colNames) throws TException { @@ -3319,4 +3426,5 @@ public void addRuntimeStat(RuntimeStat stat) throws TException { req.setMaxCreateTime(maxCreateTime); return client.get_runtime_stats(req); } + } diff --git standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/IHMSHandler.java standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/IHMSHandler.java index 29c98d1fa7..3a65f77be4 100644 --- standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/IHMSHandler.java +++ standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/IHMSHandler.java @@ -90,6 +90,11 @@ Database get_database_core(final String catName, final String name) Table get_table_core(final String catName, final String dbname, final String name) throws MetaException, NoSuchObjectException; + Table get_table_core(final String catName, final String dbname, + final String name, final long txnId, + final String writeIdList) + throws MetaException, NoSuchObjectException; + /** * Get a list of all transactional listeners. * @return list of listeners. diff --git standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/IMetaStoreClient.java standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/IMetaStoreClient.java index bc09076415..8bc3df58ea 100644 --- standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/IMetaStoreClient.java +++ standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/IMetaStoreClient.java @@ -712,6 +712,10 @@ Database getDatabase(String catalogName, String databaseName) Table getTable(String dbName, String tableName) throws MetaException, TException, NoSuchObjectException; + Table getTable(String dbName, String tableName, + long txnId, String validWriteIdList) + throws MetaException, TException, NoSuchObjectException; + /** * Get a table object. * @param catName catalog the table is in. @@ -723,6 +727,8 @@ Table getTable(String dbName, String tableName) throws MetaException, */ Table getTable(String catName, String dbName, String tableName) throws MetaException, TException; + Table getTable(String catName, String dbName, String tableName, + long txnId, String validWriteIdList) throws TException; /** * Get tables as objects (rather than just fetching their names). This is more expensive and * should only be used if you actually need all the information about the tables. @@ -2125,6 +2131,11 @@ void alter_partitions(String dbName, String tblName, List newParts, EnvironmentContext environmentContext) throws InvalidOperationException, MetaException, TException; + void alter_partitions(String dbName, String tblName, List newParts, + EnvironmentContext environmentContext, + long txnId, String writeIdList) + throws InvalidOperationException, MetaException, TException; + /** * updates a list of partitions * @param catName catalog name. @@ -2144,7 +2155,7 @@ void alter_partitions(String dbName, String tblName, List newParts, default void alter_partitions(String catName, String dbName, String tblName, List newParts) throws InvalidOperationException, MetaException, TException { - alter_partitions(catName, dbName, tblName, newParts, null); + alter_partitions(catName, dbName, tblName, newParts, new EnvironmentContext(), -1, null); } /** @@ -2165,7 +2176,8 @@ default void alter_partitions(String catName, String dbName, String tblName, * if error in communicating with metastore server */ void alter_partitions(String catName, String dbName, String tblName, List newParts, - EnvironmentContext environmentContext) + EnvironmentContext environmentContext, + long txnId, String writeIdList) throws InvalidOperationException, MetaException, TException; /** @@ -2346,6 +2358,12 @@ boolean updatePartitionColumnStatistics(ColumnStatistics statsObj) List getTableColumnStatistics(String dbName, String tableName, List colNames) throws NoSuchObjectException, MetaException, TException; + List getTableColumnStatistics(String dbName, String tableName, + List colNames, + long txnId, + String validWriteIdList) + throws NoSuchObjectException, MetaException, TException; + /** * Get the column statistics for a set of columns in a table. This should only be used for * non-partitioned tables. For partitioned tables use @@ -2363,6 +2381,11 @@ boolean updatePartitionColumnStatistics(ColumnStatistics statsObj) List colNames) throws NoSuchObjectException, MetaException, TException; + List getTableColumnStatistics(String catName, String dbName, String tableName, + List colNames, + long txnId, + String validWriteIdList) + throws NoSuchObjectException, MetaException, TException; /** * Get the column statistics for a set of columns in a partition. * @param dbName database name @@ -2379,6 +2402,11 @@ boolean updatePartitionColumnStatistics(ColumnStatistics statsObj) String tableName, List partNames, List colNames) throws NoSuchObjectException, MetaException, TException; + Map> getPartitionColumnStatistics(String dbName, + String tableName, List partNames, List colNames, + long txnId, String validWriteIdList) + throws NoSuchObjectException, MetaException, TException; + /** * Get the column statistics for a set of columns in a partition. * @param catName catalog name @@ -2396,6 +2424,11 @@ boolean updatePartitionColumnStatistics(ColumnStatistics statsObj) String catName, String dbName, String tableName, List partNames, List colNames) throws NoSuchObjectException, MetaException, TException; + Map> getPartitionColumnStatistics( + String catName, String dbName, String tableName, + List partNames, List colNames, + long txnId, String validWriteIdList) + throws NoSuchObjectException, MetaException, TException; /** * Delete partition level column statistics given dbName, tableName, partName and colName, or * all columns in a partition. @@ -3237,6 +3270,10 @@ GetRoleGrantsForPrincipalResponse get_role_grants_for_principal( AggrStats getAggrColStatsFor(String dbName, String tblName, List colNames, List partName) throws NoSuchObjectException, MetaException, TException; + AggrStats getAggrColStatsFor(String dbName, String tblName, + List colNames, List partName, + long txnId, String writeIdList) throws NoSuchObjectException, MetaException, TException; + /** * Get aggregated column stats for a set of partitions. * @param catName catalog name @@ -3253,6 +3290,10 @@ AggrStats getAggrColStatsFor(String catName, String dbName, String tblName, List colNames, List partNames) throws NoSuchObjectException, MetaException, TException; + AggrStats getAggrColStatsFor(String catName, String dbName, String tblName, + List colNames, List partNames, + long txnId, String writeIdList) + throws NoSuchObjectException, MetaException, TException; /** * Set table or partition column statistics. * @param request request object, contains all the table, partition, and statistics information diff --git standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/ObjectStore.java standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/ObjectStore.java index 0d2da7a200..2c3554edc4 100644 --- standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/ObjectStore.java +++ standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/ObjectStore.java @@ -80,140 +80,29 @@ import org.apache.hadoop.conf.Configurable; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.Path; -import org.apache.hadoop.hive.common.DatabaseName; -import org.apache.hadoop.hive.common.StatsSetupConst; -import org.apache.hadoop.hive.common.TableName; +import org.apache.hadoop.hive.common.*; import org.apache.hadoop.hive.metastore.MetaStoreDirectSql.SqlFilterForPushdown; -import org.apache.hadoop.hive.metastore.api.AggrStats; -import org.apache.hadoop.hive.metastore.api.AlreadyExistsException; -import org.apache.hadoop.hive.metastore.api.Catalog; -import org.apache.hadoop.hive.metastore.api.ColumnStatistics; -import org.apache.hadoop.hive.metastore.api.ColumnStatisticsDesc; -import org.apache.hadoop.hive.metastore.api.ColumnStatisticsObj; -import org.apache.hadoop.hive.metastore.api.CreationMetadata; -import org.apache.hadoop.hive.metastore.api.CurrentNotificationEventId; -import org.apache.hadoop.hive.metastore.api.Database; -import org.apache.hadoop.hive.metastore.api.FieldSchema; -import org.apache.hadoop.hive.metastore.api.FileMetadataExprType; -import org.apache.hadoop.hive.metastore.api.Function; -import org.apache.hadoop.hive.metastore.api.FunctionType; -import org.apache.hadoop.hive.metastore.api.HiveObjectPrivilege; -import org.apache.hadoop.hive.metastore.api.HiveObjectRef; -import org.apache.hadoop.hive.metastore.api.HiveObjectType; -import org.apache.hadoop.hive.metastore.api.ISchema; -import org.apache.hadoop.hive.metastore.api.ISchemaName; -import org.apache.hadoop.hive.metastore.api.InvalidInputException; -import org.apache.hadoop.hive.metastore.api.InvalidObjectException; -import org.apache.hadoop.hive.metastore.api.InvalidOperationException; -import org.apache.hadoop.hive.metastore.api.InvalidPartitionException; -import org.apache.hadoop.hive.metastore.api.MetaException; -import org.apache.hadoop.hive.metastore.api.NoSuchObjectException; -import org.apache.hadoop.hive.metastore.api.NotificationEvent; -import org.apache.hadoop.hive.metastore.api.NotificationEventRequest; -import org.apache.hadoop.hive.metastore.api.NotificationEventResponse; -import org.apache.hadoop.hive.metastore.api.NotificationEventsCountRequest; -import org.apache.hadoop.hive.metastore.api.NotificationEventsCountResponse; -import org.apache.hadoop.hive.metastore.api.Order; -import org.apache.hadoop.hive.metastore.api.Partition; -import org.apache.hadoop.hive.metastore.api.PartitionEventType; -import org.apache.hadoop.hive.metastore.api.PartitionValuesResponse; -import org.apache.hadoop.hive.metastore.api.PartitionValuesRow; -import org.apache.hadoop.hive.metastore.api.PrincipalPrivilegeSet; -import org.apache.hadoop.hive.metastore.api.PrincipalType; -import org.apache.hadoop.hive.metastore.api.PrivilegeBag; -import org.apache.hadoop.hive.metastore.api.PrivilegeGrantInfo; -import org.apache.hadoop.hive.metastore.api.ResourceType; -import org.apache.hadoop.hive.metastore.api.ResourceUri; -import org.apache.hadoop.hive.metastore.api.Role; -import org.apache.hadoop.hive.metastore.api.RolePrincipalGrant; -import org.apache.hadoop.hive.metastore.api.RuntimeStat; -import org.apache.hadoop.hive.metastore.api.SQLCheckConstraint; -import org.apache.hadoop.hive.metastore.api.SQLDefaultConstraint; -import org.apache.hadoop.hive.metastore.api.SQLForeignKey; -import org.apache.hadoop.hive.metastore.api.SQLNotNullConstraint; -import org.apache.hadoop.hive.metastore.api.SQLPrimaryKey; -import org.apache.hadoop.hive.metastore.api.SQLUniqueConstraint; -import org.apache.hadoop.hive.metastore.api.SchemaCompatibility; -import org.apache.hadoop.hive.metastore.api.SchemaType; -import org.apache.hadoop.hive.metastore.api.SchemaValidation; -import org.apache.hadoop.hive.metastore.api.SchemaVersion; -import org.apache.hadoop.hive.metastore.api.SchemaVersionDescriptor; -import org.apache.hadoop.hive.metastore.api.SchemaVersionState; -import org.apache.hadoop.hive.metastore.api.SerDeInfo; -import org.apache.hadoop.hive.metastore.api.SerdeType; -import org.apache.hadoop.hive.metastore.api.SkewedInfo; -import org.apache.hadoop.hive.metastore.api.StorageDescriptor; -import org.apache.hadoop.hive.metastore.api.Table; -import org.apache.hadoop.hive.metastore.api.TableMeta; -import org.apache.hadoop.hive.metastore.api.Type; -import org.apache.hadoop.hive.metastore.api.UnknownDBException; -import org.apache.hadoop.hive.metastore.api.UnknownPartitionException; -import org.apache.hadoop.hive.metastore.api.UnknownTableException; -import org.apache.hadoop.hive.metastore.api.WMFullResourcePlan; -import org.apache.hadoop.hive.metastore.api.WMMapping; -import org.apache.hadoop.hive.metastore.api.WMNullablePool; -import org.apache.hadoop.hive.metastore.api.WMNullableResourcePlan; -import org.apache.hadoop.hive.metastore.api.WMPool; -import org.apache.hadoop.hive.metastore.api.WMPoolTrigger; -import org.apache.hadoop.hive.metastore.api.WMResourcePlan; -import org.apache.hadoop.hive.metastore.api.WMResourcePlanStatus; -import org.apache.hadoop.hive.metastore.api.WMTrigger; -import org.apache.hadoop.hive.metastore.api.WMValidateResourcePlanResponse; +import org.apache.hadoop.hive.metastore.api.*; import org.apache.hadoop.hive.metastore.conf.MetastoreConf; import org.apache.hadoop.hive.metastore.conf.MetastoreConf.ConfVars; import org.apache.hadoop.hive.metastore.datasource.DataSourceProvider; import org.apache.hadoop.hive.metastore.datasource.DataSourceProviderFactory; import org.apache.hadoop.hive.metastore.metrics.Metrics; import org.apache.hadoop.hive.metastore.metrics.MetricsConstants; -import org.apache.hadoop.hive.metastore.model.MCatalog; -import org.apache.hadoop.hive.metastore.model.MColumnDescriptor; -import org.apache.hadoop.hive.metastore.model.MConstraint; -import org.apache.hadoop.hive.metastore.model.MCreationMetadata; -import org.apache.hadoop.hive.metastore.model.MDBPrivilege; -import org.apache.hadoop.hive.metastore.model.MDatabase; -import org.apache.hadoop.hive.metastore.model.MDelegationToken; -import org.apache.hadoop.hive.metastore.model.MFieldSchema; -import org.apache.hadoop.hive.metastore.model.MFunction; -import org.apache.hadoop.hive.metastore.model.MGlobalPrivilege; -import org.apache.hadoop.hive.metastore.model.MISchema; -import org.apache.hadoop.hive.metastore.model.MMasterKey; -import org.apache.hadoop.hive.metastore.model.MMetastoreDBProperties; -import org.apache.hadoop.hive.metastore.model.MNotificationLog; -import org.apache.hadoop.hive.metastore.model.MNotificationNextId; -import org.apache.hadoop.hive.metastore.model.MOrder; -import org.apache.hadoop.hive.metastore.model.MPartition; -import org.apache.hadoop.hive.metastore.model.MPartitionColumnPrivilege; -import org.apache.hadoop.hive.metastore.model.MPartitionColumnStatistics; -import org.apache.hadoop.hive.metastore.model.MPartitionEvent; -import org.apache.hadoop.hive.metastore.model.MPartitionPrivilege; -import org.apache.hadoop.hive.metastore.model.MResourceUri; -import org.apache.hadoop.hive.metastore.model.MRole; -import org.apache.hadoop.hive.metastore.model.MRoleMap; -import org.apache.hadoop.hive.metastore.model.MRuntimeStat; -import org.apache.hadoop.hive.metastore.model.MSchemaVersion; -import org.apache.hadoop.hive.metastore.model.MSerDeInfo; -import org.apache.hadoop.hive.metastore.model.MStorageDescriptor; -import org.apache.hadoop.hive.metastore.model.MStringList; -import org.apache.hadoop.hive.metastore.model.MTable; -import org.apache.hadoop.hive.metastore.model.MTableColumnPrivilege; -import org.apache.hadoop.hive.metastore.model.MTableColumnStatistics; -import org.apache.hadoop.hive.metastore.model.MTablePrivilege; -import org.apache.hadoop.hive.metastore.model.MType; -import org.apache.hadoop.hive.metastore.model.MVersionTable; -import org.apache.hadoop.hive.metastore.model.MWMMapping; +import org.apache.hadoop.hive.metastore.model.*; import org.apache.hadoop.hive.metastore.model.MWMMapping.EntityType; -import org.apache.hadoop.hive.metastore.model.MWMPool; -import org.apache.hadoop.hive.metastore.model.MWMResourcePlan; import org.apache.hadoop.hive.metastore.model.MWMResourcePlan.Status; -import org.apache.hadoop.hive.metastore.model.MWMTrigger; import org.apache.hadoop.hive.metastore.parser.ExpressionTree; import org.apache.hadoop.hive.metastore.parser.ExpressionTree.FilterBuilder; import org.apache.hadoop.hive.metastore.partition.spec.PartitionSpecProxy; import org.apache.hadoop.hive.metastore.tools.SQLGenerator; +import org.apache.hadoop.hive.metastore.txn.TxnDbUtil; +import org.apache.hadoop.hive.metastore.txn.TxnUtils; import org.apache.hadoop.hive.metastore.utils.FileUtils; import org.apache.hadoop.hive.metastore.utils.JavaUtils; import org.apache.hadoop.hive.metastore.utils.MetaStoreUtils; import org.apache.hadoop.hive.metastore.utils.ObjectPair; +import org.apache.hive.common.util.TxnIdUtils; import org.apache.thrift.TException; import org.datanucleus.AbstractNucleusContext; import org.datanucleus.ClassLoaderResolver; @@ -1303,10 +1192,16 @@ public boolean dropType(String typeName) { @Override public void createTable(Table tbl) throws InvalidObjectException, MetaException { boolean commited = false; + MTable mtbl = null; + try { openTransaction(); - MTable mtbl = convertToMTable(tbl); + mtbl = convertToMTable(tbl); + if (TxnUtils.isTransactionalTable(tbl)) { + mtbl.setTxnId(tbl.getTxnId()); + mtbl.setWriteIdList(tbl.getValidWriteIdList()); + } pm.makePersistent(mtbl); if (tbl.getCreationMetadata() != null) { @@ -1417,6 +1312,8 @@ public boolean dropTable(String catName, String dbName, String tableName) TableName.getQualified(catName, dbName, tableName)); } + Table table = convertToTable(tbl); + List tabConstraints = listAllTableConstraintsWithOptionalConstraintName( catName, dbName, tableName, null); if (CollectionUtils.isNotEmpty(tabConstraints)) { @@ -1515,17 +1412,51 @@ private boolean dropCreationMetadata(String catName, String dbName, String table return mConstraints; } + private static String getFullyQualifiedTableName(String dbName, String tblName) { + return ((dbName == null || dbName.isEmpty()) ? "" : "\"" + dbName + "\".\"") + + "\"" + tblName + "\""; + } + @Override - public Table getTable(String catName, String dbName, String tableName) throws MetaException { + public Table + getTable(String catName, String dbName, String tableName) + throws MetaException { + return getTable(catName, dbName, tableName, -1, null); + } + + @Override + public Table getTable(String catName, String dbName, String tableName, + long txnId, String writeIdList) + throws MetaException { boolean commited = false; Table tbl = null; try { openTransaction(); - tbl = convertToTable(getMTable(catName, dbName, tableName)); + MTable mtable = getMTable(catName, dbName, tableName); + tbl = convertToTable(mtable); // Retrieve creation metadata if needed if (tbl != null && TableType.MATERIALIZED_VIEW.toString().equals(tbl.getTableType())) { tbl.setCreationMetadata( - convertToCreationMetadata(getCreationMetadata(catName, dbName, tableName))); + convertToCreationMetadata(getCreationMetadata(catName, dbName, tableName))); + } + + // If transactional non partitioned table, + // check whether the current version table statistics + // in the metastore comply with the client query's snapshot isolation. + // Note: a partitioned table has table stats and table snapshot in MPartiiton. + if (writeIdList != null) { + if (tbl != null + && TxnUtils.isTransactionalTable(tbl) + && tbl.getPartitionKeysSize() == 0) { + if (isCurrentStatsValidForTheQuery(mtable, txnId, writeIdList, -1, false)) { + tbl.setIsStatsCompliant(IsolationLevelCompliance.YES); + } else { + tbl.setIsStatsCompliant(IsolationLevelCompliance.NO); + // Do not make persistent the following state since it is the query specific (not global). + StatsSetupConst.setBasicStatsState(tbl.getParameters(), StatsSetupConst.FALSE); + LOG.info("Removed COLUMN_STATS_ACCURATE from Table's parameters."); + } + } } commited = commitTransaction(); } finally { @@ -2049,12 +1980,17 @@ private MTable convertToMTable(Table tbl) throws InvalidObjectException, String ownerType = (ownerPrincipalType == null) ? PrincipalType.USER.name() : ownerPrincipalType.name(); // A new table is always created with a new column descriptor - return new MTable(normalizeIdentifier(tbl.getTableName()), mdb, + MTable mtable = new MTable(normalizeIdentifier(tbl.getTableName()), mdb, convertToMStorageDescriptor(tbl.getSd()), tbl.getOwner(), ownerType, tbl .getCreateTime(), tbl.getLastAccessTime(), tbl.getRetention(), convertToMFieldSchemas(tbl.getPartitionKeys()), tbl.getParameters(), tbl.getViewOriginalText(), tbl.getViewExpandedText(), tbl.isRewriteEnabled(), tableType); + if (TxnUtils.isTransactionalTable(tbl)) { + mtable.setTxnId(tbl.getTxnId()); + mtable.setWriteIdList(tbl.getValidWriteIdList()); + } + return mtable; } private List convertToMFieldSchemas(List keys) { @@ -2331,6 +2267,7 @@ public boolean addPartitions(String catName, String dbName, String tblName, List + dbName + "." + tblName + ": " + part); } MPartition mpart = convertToMPart(part, table, true); + toPersist.add(mpart); int now = (int)(System.currentTimeMillis()/1000); if (tabGrants != null) { @@ -2442,7 +2379,9 @@ public boolean addPartition(Partition part) throws InvalidObjectException, MetaException { boolean success = false; boolean commited = false; + try { + openTransaction(); String catName = part.isSetCatName() ? part.getCatName() : getDefaultCatalog(conf); MTable table = this.getMTable(catName, part.getDbName(), part.getTableName()); List tabGrants = null; @@ -2452,8 +2391,7 @@ public boolean addPartition(Partition part) throws InvalidObjectException, tabColumnGrants = this.listTableAllColumnGrants( catName, part.getDbName(), part.getTableName()); } - openTransaction(); - MPartition mpart = convertToMPart(part, true); + MPartition mpart = convertToMPart(part, table, true); pm.makePersistent(mpart); int now = (int)(System.currentTimeMillis()/1000); @@ -2495,14 +2433,38 @@ public boolean addPartition(Partition part) throws InvalidObjectException, @Override public Partition getPartition(String catName, String dbName, String tableName, List part_vals) throws NoSuchObjectException, MetaException { + return getPartition(catName, dbName, tableName, part_vals, -1, null); + } + + @Override + public Partition getPartition(String catName, String dbName, String tableName, + List part_vals, + long txnId, String writeIdList) + throws NoSuchObjectException, MetaException { openTransaction(); - Partition part = convertToPart(getMPartition(catName, dbName, tableName, part_vals)); + MTable table = this.getMTable(catName, dbName, tableName); + MPartition mpart = getMPartition(catName, dbName, tableName, part_vals); + Partition part = convertToPart(mpart); commitTransaction(); if(part == null) { throw new NoSuchObjectException("partition values=" + part_vals.toString()); } part.setValues(part_vals); + // If transactional table partition, check whether the current version partition + // statistics in the metastore comply with the client query's snapshot isolation. + if (writeIdList != null) { + if (TxnUtils.isTransactionalTable(table.getParameters())) { + if (isCurrentStatsValidForTheQuery(mpart, txnId, writeIdList, -1, false)) { + part.setIsStatsCompliant(IsolationLevelCompliance.YES); + } else { + part.setIsStatsCompliant(IsolationLevelCompliance.NO); + // Do not make persistent the following state since it is query specific (not global). + StatsSetupConst.setBasicStatsState(part.getParameters(), StatsSetupConst.FALSE); + LOG.info("Removed COLUMN_STATS_ACCURATE from Partition object's parameters."); + } + } + } return part; } @@ -2601,26 +2563,6 @@ private MPartition getMPartition(String catName, String dbName, String tableName * is true, then this partition's storage descriptor's column descriptor will point * to the same one as the table's storage descriptor. * @param part the partition to convert - * @param useTableCD whether to try to use the parent table's column descriptor. - * @return the model partition object, and null if the input partition is null. - * @throws InvalidObjectException - * @throws MetaException - */ - private MPartition convertToMPart(Partition part, boolean useTableCD) - throws InvalidObjectException, MetaException { - if (part == null) { - return null; - } - MTable mt = getMTable(part.getCatName(), part.getDbName(), part.getTableName()); - return convertToMPart(part, mt, useTableCD); - } - - /** - * Convert a Partition object into an MPartition, which is an object backed by the db - * If the Partition's set of columns is the same as the parent table's AND useTableCD - * is true, then this partition's storage descriptor's column descriptor will point - * to the same one as the table's storage descriptor. - * @param part the partition to convert * @param mt the parent table object * @param useTableCD whether to try to use the parent table's column descriptor. * @return the model partition object, and null if the input partition is null. @@ -2652,10 +2594,15 @@ private MPartition convertToMPart(Partition part, MTable mt, boolean useTableCD) msd = convertToMStorageDescriptor(part.getSd()); } - return new MPartition(Warehouse.makePartName(convertToFieldSchemas(mt + MPartition mpart = new MPartition(Warehouse.makePartName(convertToFieldSchemas(mt .getPartitionKeys()), part.getValues()), mt, part.getValues(), part .getCreateTime(), part.getLastAccessTime(), msd, part.getParameters()); + if (TxnUtils.isTransactionalTable(mt.getParameters())) { + mpart.setTxnId(part.getTxnId()); + mpart.setWriteIdList(part.getValidWriteIdList()); + } + return mpart; } private Partition convertToPart(MPartition mpart) throws MetaException { @@ -3077,7 +3024,7 @@ private PartitionValuesResponse extractPartitionNamesByFilter( TableName.getQualified(catName, dbName, tableName), filter, cols); List partitionNames = null; List partitions = null; - Table tbl = getTable(catName, dbName, tableName); + Table tbl = getTable(catName, dbName, tableName, -1, null); try { // Get partitions by name - ascending or descending partitionNames = getPartitionNamesByFilter(catName, dbName, tableName, filter, ascending, @@ -3210,7 +3157,8 @@ private PartitionValuesResponse getDistinctValuesForPartitionsNoTxn( if (applyDistinct) { partValuesSelect.append("DISTINCT "); } - List partitionKeys = getTable(catName, dbName, tableName).getPartitionKeys(); + List partitionKeys = + getTable(catName, dbName, tableName, -1, null).getPartitionKeys(); for (FieldSchema key : cols) { partValuesSelect.append(extractPartitionKey(key, partitionKeys)).append(", "); } @@ -3292,7 +3240,7 @@ private Collection getPartitionPsQueryResults(String catName, String dbName, Str catName = normalizeIdentifier(catName); dbName = normalizeIdentifier(dbName); tableName = normalizeIdentifier(tableName); - Table table = getTable(catName, dbName, tableName); + Table table = getTable(catName, dbName, tableName, -1, null); if (table == null) { throw new NoSuchObjectException(TableName.getQualified(catName, dbName, tableName) + " table not found"); @@ -3668,7 +3616,8 @@ private void dropPartitionsNoTxn(String catName, String dbName, String tblName, protected T results = null; public GetHelper(String catalogName, String dbName, String tblName, - boolean allowSql, boolean allowJdo) throws MetaException { + boolean allowSql, boolean allowJdo) + throws MetaException { assert allowSql || allowJdo; this.allowJdo = allowJdo; this.catName = (catalogName != null) ? normalizeIdentifier(catalogName) : null; @@ -3886,7 +3835,7 @@ protected String describeResult() { private abstract class GetStatHelper extends GetHelper { public GetStatHelper(String catalogName, String dbName, String tblName, boolean allowSql, - boolean allowJdo) throws MetaException { + boolean allowJdo, String writeIdList) throws MetaException { super(catalogName, dbName, tblName, allowSql, allowJdo); } @@ -4186,6 +4135,21 @@ public void alterTable(String catName, String dbname, String name, Table newTabl oldt.setViewExpandedText(newt.getViewExpandedText()); oldt.setRewriteEnabled(newt.isRewriteEnabled()); + // If transactional, update MTable to have txnId and the writeIdList + // for the current Stats updater query. + if (newTable.getValidWriteIdList() != null && + TxnUtils.isTransactionalTable(newTable)) { + // Check concurrent INSERT case and set false to the flag. + if (!isCurrentStatsValidForTheQuery(oldt, newt.getTxnId(), newt.getWriteIdList(), + -1, true)) { + StatsSetupConst.setBasicStatsState(oldt.getParameters(), StatsSetupConst.FALSE); + LOG.info("Removed COLUMN_STATS_ACCURATE from the parameters of the table " + + dbname + "." + name + ". will be made persistent."); + } + oldt.setTxnId(newTable.getTxnId()); + oldt.setWriteIdList(newTable.getValidWriteIdList()); + } + // commit the changes success = commitTransaction(); } finally { @@ -4238,8 +4202,9 @@ private MColumnDescriptor alterPartitionNoTxn(String catName, String dbname, Str catName = normalizeIdentifier(catName); name = normalizeIdentifier(name); dbname = normalizeIdentifier(dbname); + MTable table = this.getMTable(newPart.getCatName(), newPart.getDbName(), newPart.getTableName()); MPartition oldp = getMPartition(catName, dbname, name, part_vals); - MPartition newp = convertToMPart(newPart, false); + MPartition newp = convertToMPart(newPart, table, false); MColumnDescriptor oldCD = null; MStorageDescriptor oldSD = oldp.getSd(); if (oldSD != null) { @@ -4260,6 +4225,20 @@ private MColumnDescriptor alterPartitionNoTxn(String catName, String dbname, Str if (newp.getLastAccessTime() != oldp.getLastAccessTime()) { oldp.setLastAccessTime(newp.getLastAccessTime()); } + // If transactional, add/update the MUPdaterTransaction + // for the current updater query. + if (newPart.getValidWriteIdList() != null && + TxnUtils.isTransactionalTable(table.getParameters())) { + // Check concurrent INSERT case and set false to the flag. + if (!isCurrentStatsValidForTheQuery(oldp, newp.getTxnId(), newp.getWriteIdList(), + -1, true)) { + StatsSetupConst.setBasicStatsState(oldp.getParameters(), StatsSetupConst.FALSE); + LOG.info("Removed COLUMN_STATS_ACCURATE from the parameters of the partition " + + dbname + "." + name + "." + oldp.getPartitionName() + " will be made persistent."); + } + oldp.setTxnId(newPart.getTxnId()); + oldp.setWriteIdList(newPart.getValidWriteIdList()); + } return oldCD; } @@ -4291,7 +4270,8 @@ public void alterPartition(String catName, String dbname, String name, List> part_vals, List newParts) + List> part_vals, List newParts, + long txnId, String writeIdList) throws InvalidObjectException, MetaException { boolean success = false; Exception e = null; @@ -4301,6 +4281,10 @@ public void alterPartitions(String catName, String dbname, String name, Set oldCds = new HashSet<>(); for (Partition tmpPart: newParts) { List tmpPartVals = part_val_itr.next(); + if (txnId > 0) { + tmpPart.setTxnId(txnId); + tmpPart.setValidWriteIdList(writeIdList); + } MColumnDescriptor oldCd = alterPartitionNoTxn(catName, dbname, name, tmpPartVals, tmpPart); if (oldCd != null) { oldCds.add(oldCd); @@ -6177,7 +6161,9 @@ public boolean revokePrivileges(PrivilegeBag privileges, boolean grantOption) } else if (hiveObject.getObjectType() == HiveObjectType.PARTITION) { boolean found = false; - Table tabObj = this.getTable(catName, hiveObject.getDbName(), hiveObject.getObjectName()); + Table tabObj = + this.getTable(catName, hiveObject.getDbName(), + hiveObject.getObjectName(), -1, null); String partName = null; if (hiveObject.getPartValues() != null) { partName = Warehouse.makePartName(tabObj.getPartitionKeys(), hiveObject.getPartValues()); @@ -6211,7 +6197,7 @@ public boolean revokePrivileges(PrivilegeBag privileges, boolean grantOption) } else if (hiveObject.getObjectType() == HiveObjectType.COLUMN) { Table tabObj = this.getTable(catName, hiveObject.getDbName(), hiveObject - .getObjectName()); + .getObjectName(), -1, null); String partName = null; if (hiveObject.getPartValues() != null) { partName = Warehouse.makePartName(tabObj.getPartitionKeys(), @@ -7733,7 +7719,7 @@ public boolean isPartitionMarkedForEvent(String catName, String dbName, String t query .declareParameters("java.lang.String t1, java.lang.String t2, java.lang.String t3, int t4," + "java.lang.String t5"); - Table tbl = getTable(catName, dbName, tblName); // Make sure dbName and tblName are valid. + Table tbl = getTable(catName, dbName, tblName, -1, null); // Make sure dbName and tblName are valid. if (null == tbl) { throw new UnknownTableException("Table: " + tblName + " is not found."); } @@ -7759,7 +7745,7 @@ public Table markPartitionForEvent(String catName, String dbName, String tblName Table tbl = null; try{ openTransaction(); - tbl = getTable(catName, dbName, tblName); // Make sure dbName and tblName are valid. + tbl = getTable(catName, dbName, tblName, -1, null); // Make sure dbName and tblName are valid. if(null == tbl) { throw new UnknownTableException("Table: "+ tblName + " is not found."); } @@ -8488,7 +8474,10 @@ public boolean updatePartitionColumnStatistics(ColumnStatistics colStats, List getMTableColumnStatistics(Table table, List colNames, QueryWrapper queryWrapper) + private List getMTableColumnStatistics( + Table table, + List colNames, + QueryWrapper queryWrapper) throws MetaException { if (colNames == null || colNames.isEmpty()) { return Collections.emptyList(); @@ -8563,9 +8552,40 @@ public void validateTableCols(Table table, List colNames) throws MetaExc } @Override - public ColumnStatistics getTableColumnStatistics(String catName, String dbName, String tableName, + public ColumnStatistics getTableColumnStatistics( + String catName, + String dbName, + String tableName, List colNames) throws MetaException, NoSuchObjectException { - return getTableColumnStatisticsInternal(catName, dbName, tableName, colNames, true, true); + return getTableColumnStatisticsInternal( + catName, dbName, tableName, colNames, true, true); + } + + @Override + public ColumnStatistics getTableColumnStatistics( + String catName, + String dbName, + String tableName, + List colNames, + long txnId, + String writeIdList) throws MetaException, NoSuchObjectException { + IsolationLevelCompliance iLL = IsolationLevelCompliance.UNKNOWN; + // If the current stats in the metastore doesn't comply with + // the isolation level of the query, set No to the compliance flag. + if (writeIdList != null) { + MTable table = this.getMTable(catName, dbName, tableName); + if (!isCurrentStatsValidForTheQuery(table, txnId, writeIdList, -1, false)) { + iLL = IsolationLevelCompliance.NO; + } else { + iLL = IsolationLevelCompliance.YES; + } + } + ColumnStatistics cS = getTableColumnStatisticsInternal( + catName, dbName, tableName, colNames, true, true); + if (cS != null) { + cS.setIsStatsCompliant(iLL); + } + return cS; } protected ColumnStatistics getTableColumnStatisticsInternal( @@ -8573,7 +8593,7 @@ protected ColumnStatistics getTableColumnStatisticsInternal( boolean allowJdo) throws MetaException, NoSuchObjectException { final boolean enableBitVector = MetastoreConf.getBoolVar(getConf(), ConfVars.STATS_FETCH_BITVECTOR); return new GetStatHelper(normalizeIdentifier(catName), normalizeIdentifier(dbName), - normalizeIdentifier(tableName), allowSql, allowJdo) { + normalizeIdentifier(tableName), allowSql, allowJdo, null) { @Override protected ColumnStatistics getSqlResult(GetHelper ctx) throws MetaException { return directSql.getTableStats(catName, dbName, tblName, colNames, enableBitVector); @@ -8584,7 +8604,8 @@ protected ColumnStatistics getJdoResult( QueryWrapper queryWrapper = new QueryWrapper(); try { - List mStats = getMTableColumnStatistics(getTable(), colNames, queryWrapper); + List mStats = + getMTableColumnStatistics(getTable(), colNames, queryWrapper); if (mStats.isEmpty()) { return null; } @@ -8614,6 +8635,35 @@ protected ColumnStatistics getJdoResult( catName, dbName, tableName, partNames, colNames, true, true); } + @Override + public List getPartitionColumnStatistics( + String catName, String dbName, String tableName, + List partNames, List colNames, + long txnId, String writeIdList) + throws MetaException, NoSuchObjectException { + // If any of the current partition stats in the metastore doesn't comply with + // the isolation level of the query, return null. + if (writeIdList != null) { + if (partNames == null && partNames.isEmpty()) { + LOG.warn("The given partNames does not have any name."); + return null; + } + // Loop through the given "partNames" list + // checking isolation-level-compliance of each partition column stats. + for(String partName : partNames) { + MPartition mpart = getMPartition(catName, dbName, tableName, Warehouse.getPartValuesFromPartName(partName)); + if (!isCurrentStatsValidForTheQuery(mpart, txnId, writeIdList, -1, false)) { + LOG.debug("The current metastore transactional partition column statistics " + + "for " + dbName + "." + tableName + "." + mpart.getPartitionName() + " is not valid " + + "for the current query."); + return null; + } + } + } + return getPartitionColumnStatisticsInternal( + catName, dbName, tableName, partNames, colNames, true, true); + } + protected List getPartitionColumnStatisticsInternal( String catName, String dbName, String tableName, final List partNames, final List colNames, boolean allowSql, boolean allowJdo) throws MetaException, NoSuchObjectException { @@ -8662,10 +8712,36 @@ protected ColumnStatistics getJdoResult( }.run(true); } + @Override + public AggrStats get_aggr_stats_for(String catName, String dbName, String tblName, + final List partNames, final List colNames, + long txnId, String writeIdList) throws MetaException, NoSuchObjectException { + // If the current stats in the metastore doesn't comply with + // the isolation level of the query, return null. + if (writeIdList != null) { + if (partNames == null && partNames.isEmpty()) { + LOG.warn("The given partNames does not have any name."); + return null; + } + // Loop through the given "partNames" list + // checking isolation-level-compliance of each partition column stats. + for(String partName : partNames) { + MPartition mpart = getMPartition(catName, dbName, tblName, Warehouse.getPartValuesFromPartName(partName)); + if (!isCurrentStatsValidForTheQuery(mpart, txnId, writeIdList, -1, false)) { + LOG.debug("The current metastore transactional partition column statistics " + + "for " + dbName + "." + tblName + "." + mpart.getPartitionName() + " is not valid " + + "for the current query."); + return null; + } + } + } + return get_aggr_stats_for(catName, dbName, tblName, partNames, colNames); + } @Override public AggrStats get_aggr_stats_for(String catName, String dbName, String tblName, - final List partNames, final List colNames) throws MetaException, NoSuchObjectException { + final List partNames, final List colNames) + throws MetaException, NoSuchObjectException { final boolean useDensityFunctionForNDVEstimation = MetastoreConf.getBoolVar(getConf(), ConfVars.STATS_NDV_DENSITY_FUNCTION); final double ndvTuner = MetastoreConf.getDoubleVar(getConf(), ConfVars.STATS_NDV_TUNER); @@ -8697,7 +8773,8 @@ protected String describeResult() { throws MetaException, NoSuchObjectException { final boolean enableBitVector = MetastoreConf.getBoolVar(getConf(), ConfVars.STATS_FETCH_BITVECTOR); - return new GetHelper>(catName, dbName, null, true, false) { + return new GetHelper>( + catName, dbName, null, true, false) { @Override protected List getSqlResult( GetHelper> ctx) throws MetaException { @@ -12155,4 +12232,93 @@ public int deleteRuntimeStats(int maxRetainSecs) throws MetaException { return ret; } + /** + * Return true if the current statistics in the Metastore is valid + * for the query of the given "txnId" and "queryValidWriteIdList". + * + * Note that a statistics entity is valid iff + * the stats is written by the current query or + * the conjunction of the following two are true: + * ~ COLUMN_STATE_ACCURATE(CSA) state is true + * ~ Isolation-level (snapshot) compliant with the query + * @param tbl MTable of the stats entity + * @param txnId transaction id of the query + * @param queryValidWriteIdList valid writeId list of the query + * @Precondition "tbl" should be retrieved from the TBLS table. + */ + private boolean isCurrentStatsValidForTheQuery( + MTable tbl, long txnId, String queryValidWriteIdList, + long statsWriteId, boolean checkConcurrentWrites) + throws MetaException { + return isCurrentStatsValidForTheQuery(tbl.getTxnId(), tbl.getParameters(), tbl.getWriteIdList(), + txnId, queryValidWriteIdList, statsWriteId, checkConcurrentWrites); + } + + /** + * Return true if the current statistics in the Metastore is valid + * for the query of the given "txnId" and "queryValidWriteIdList". + * + * Note that a statistics entity is valid iff + * the stats is written by the current query or + * the conjunction of the following two are true: + * ~ COLUMN_STATE_ACCURATE(CSA) state is true + * ~ Isolation-level (snapshot) compliant with the query + * @param part MPartition of the stats entity + * @param txnId transaction id of the query + * @param queryValidWriteIdList valid writeId list of the query + * @Precondition "part" should be retrieved from the PARTITIONS table. + */ + private boolean isCurrentStatsValidForTheQuery( + MPartition part, long txnId, String queryValidWriteIdList, + long statsWriteId, boolean checkConcurrentWrites) + throws MetaException { + return isCurrentStatsValidForTheQuery(part.getTxnId(), part.getParameters(), part.getWriteIdList(), + txnId, queryValidWriteIdList, statsWriteId, checkConcurrentWrites); + } + + private boolean isCurrentStatsValidForTheQuery( + long statsTxnId, Map statsParams, String statsWriteIdList, + long queryTxnId, String queryValidWriteIdList, + long statsWriteId, boolean checkConcurrentWrites) + throws MetaException { + // if statsWriteIdList is null, + // return true since the stats does not seem to be transactional. + if (statsWriteIdList == null) { + return true; + } + // If the current query is a stats updater, then we can return true + // to avoid implementing a logic inside TxnIdUtils.checkEquivalentWriteIds(). + if (statsTxnId == queryTxnId) { + return true; + } + + // If the Metastore stats's writer transaction is open or aborted + // we should return false. + try { + if (TxnDbUtil.isOpenOrAbortedTransaction(conf, statsTxnId)) { + return false; + } + } catch (Exception e) { + throw new MetaException("Cannot check transaction state."); + } + + // This COLUMN_STATS_ACCURATE(CSA) state checking also includes the case that the stats is + // written by an aborted transaction but TXNS has no entry for the transaction + // after compaction. + if (!StatsSetupConst.areBasicStatsUptoDate(statsParams)) { + return false; + } + + // If the NUM_FILES of the table/partition is 0, return 'true' from this method. + // Since newly initialized empty table has 0 for the parameter. + if (Long.parseLong(statsParams.get(StatsSetupConst.NUM_FILES)) == 0) { + return true; + } + + ValidWriteIdList list4Stats = new ValidReaderWriteIdList(statsWriteIdList); + ValidWriteIdList list4TheQuery = new ValidReaderWriteIdList(queryValidWriteIdList); + + return !checkConcurrentWrites ? TxnIdUtils.checkEquivalentWriteIds(list4Stats, list4TheQuery) : + !TxnIdUtils.areTheseConcurrentWrites(list4Stats, list4TheQuery, statsWriteId); + } } diff --git standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/RawStore.java standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/RawStore.java index c8905c8698..8cc9d2c586 100644 --- standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/RawStore.java +++ standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/RawStore.java @@ -19,10 +19,7 @@ package org.apache.hadoop.hive.metastore; import org.apache.hadoop.hive.common.TableName; -import org.apache.hadoop.hive.metastore.api.CreationMetadata; -import org.apache.hadoop.hive.metastore.api.ISchemaName; -import org.apache.hadoop.hive.metastore.api.SchemaVersionDescriptor; -import org.apache.hadoop.hive.metastore.api.WMFullResourcePlan; +import org.apache.hadoop.hive.metastore.api.*; import java.lang.annotation.ElementType; import java.lang.annotation.Retention; @@ -34,59 +31,6 @@ import org.apache.hadoop.classification.InterfaceStability; import org.apache.hadoop.conf.Configurable; -import org.apache.hadoop.hive.metastore.api.AggrStats; -import org.apache.hadoop.hive.metastore.api.AlreadyExistsException; -import org.apache.hadoop.hive.metastore.api.Catalog; -import org.apache.hadoop.hive.metastore.api.ColumnStatistics; -import org.apache.hadoop.hive.metastore.api.CurrentNotificationEventId; -import org.apache.hadoop.hive.metastore.api.Database; -import org.apache.hadoop.hive.metastore.api.FieldSchema; -import org.apache.hadoop.hive.metastore.api.FileMetadataExprType; -import org.apache.hadoop.hive.metastore.api.Function; -import org.apache.hadoop.hive.metastore.api.HiveObjectPrivilege; -import org.apache.hadoop.hive.metastore.api.HiveObjectRef; -import org.apache.hadoop.hive.metastore.api.ISchema; -import org.apache.hadoop.hive.metastore.api.InvalidInputException; -import org.apache.hadoop.hive.metastore.api.InvalidObjectException; -import org.apache.hadoop.hive.metastore.api.InvalidOperationException; -import org.apache.hadoop.hive.metastore.api.InvalidPartitionException; -import org.apache.hadoop.hive.metastore.api.MetaException; -import org.apache.hadoop.hive.metastore.api.NoSuchObjectException; -import org.apache.hadoop.hive.metastore.api.NotificationEvent; -import org.apache.hadoop.hive.metastore.api.NotificationEventRequest; -import org.apache.hadoop.hive.metastore.api.NotificationEventResponse; -import org.apache.hadoop.hive.metastore.api.NotificationEventsCountRequest; -import org.apache.hadoop.hive.metastore.api.NotificationEventsCountResponse; -import org.apache.hadoop.hive.metastore.api.Partition; -import org.apache.hadoop.hive.metastore.api.PartitionEventType; -import org.apache.hadoop.hive.metastore.api.PartitionValuesResponse; -import org.apache.hadoop.hive.metastore.api.PrincipalPrivilegeSet; -import org.apache.hadoop.hive.metastore.api.PrincipalType; -import org.apache.hadoop.hive.metastore.api.PrivilegeBag; -import org.apache.hadoop.hive.metastore.api.Role; -import org.apache.hadoop.hive.metastore.api.RolePrincipalGrant; -import org.apache.hadoop.hive.metastore.api.RuntimeStat; -import org.apache.hadoop.hive.metastore.api.SQLCheckConstraint; -import org.apache.hadoop.hive.metastore.api.SQLDefaultConstraint; -import org.apache.hadoop.hive.metastore.api.SQLForeignKey; -import org.apache.hadoop.hive.metastore.api.SQLNotNullConstraint; -import org.apache.hadoop.hive.metastore.api.SQLPrimaryKey; -import org.apache.hadoop.hive.metastore.api.SQLUniqueConstraint; -import org.apache.hadoop.hive.metastore.api.SchemaVersion; -import org.apache.hadoop.hive.metastore.api.SerDeInfo; -import org.apache.hadoop.hive.metastore.api.Table; -import org.apache.hadoop.hive.metastore.api.TableMeta; -import org.apache.hadoop.hive.metastore.api.Type; -import org.apache.hadoop.hive.metastore.api.UnknownDBException; -import org.apache.hadoop.hive.metastore.api.UnknownPartitionException; -import org.apache.hadoop.hive.metastore.api.UnknownTableException; -import org.apache.hadoop.hive.metastore.api.WMMapping; -import org.apache.hadoop.hive.metastore.api.WMNullablePool; -import org.apache.hadoop.hive.metastore.api.WMNullableResourcePlan; -import org.apache.hadoop.hive.metastore.api.WMPool; -import org.apache.hadoop.hive.metastore.api.WMResourcePlan; -import org.apache.hadoop.hive.metastore.api.WMTrigger; -import org.apache.hadoop.hive.metastore.api.WMValidateResourcePlanResponse; import org.apache.hadoop.hive.metastore.partition.spec.PartitionSpecProxy; import org.apache.hadoop.hive.metastore.utils.MetaStoreUtils.ColStatsObjWithSourceInfo; import org.apache.thrift.TException; @@ -266,6 +210,20 @@ boolean dropTable(String catalogName, String dbName, String tableName) Table getTable(String catalogName, String dbName, String tableName) throws MetaException; /** + * Get a table object. + * @param catalogName catalog the table is in. + * @param dbName database the table is in. + * @param tableName table name. + * @param txnId transaction id of the calling transaction + * @param writeIdList string format of valid writeId transaction list + * @return table object, or null if no such table exists (wow it would be nice if we either + * consistently returned null or consistently threw NoSuchObjectException). + * @throws MetaException something went wrong in the RDBMS + */ + Table getTable(String catalogName, String dbName, String tableName, + long txnId, String writeIdList) throws MetaException; + + /** * Add a partition. * @param part partition to add * @return true if the partition was successfully added. @@ -317,6 +275,22 @@ boolean addPartitions(String catName, String dbName, String tblName, */ Partition getPartition(String catName, String dbName, String tableName, List part_vals) throws MetaException, NoSuchObjectException; + /** + * Get a partition. + * @param catName catalog name. + * @param dbName database name. + * @param tableName table name. + * @param part_vals partition values for this table. + * @param txnId transaction id of the calling transaction + * @param writeIdList string format of valid writeId transaction list + * @return the partition. + * @throws MetaException error reading from RDBMS. + * @throws NoSuchObjectException no partition matching this specification exists. + */ + Partition getPartition(String catName, String dbName, String tableName, + List part_vals, + long txnId, String writeIdList) + throws MetaException, NoSuchObjectException; /** * Check whether a partition exists. @@ -540,11 +514,14 @@ void alterPartition(String catName, String db_name, String tbl_name, List> part_vals_list, List new_parts) + List> part_vals_list, List new_parts, + long txnId, String writeIdList) throws InvalidObjectException, MetaException; /** @@ -916,6 +893,25 @@ ColumnStatistics getTableColumnStatistics(String catName, String dbName, String List colName) throws MetaException, NoSuchObjectException; /** + * Returns the relevant column statistics for a given column in a given table in a given database + * if such statistics exist. + * @param catName catalog name. + * @param dbName name of the database, defaults to current database + * @param tableName name of the table + * @param colName names of the columns for which statistics is requested + * @param txnId transaction id of the calling transaction + * @param writeIdList string format of valid writeId transaction list + * @return Relevant column statistics for the column for the given table + * @throws NoSuchObjectException No such table + * @throws MetaException error accessing the RDBMS + * + */ + ColumnStatistics getTableColumnStatistics( + String catName, String dbName, String tableName, + List colName, long txnId, String writeIdList) + throws MetaException, NoSuchObjectException; + + /** * Get statistics for a partition for a set of columns. * @param catName catalog name. * @param dbName database name. @@ -931,6 +927,25 @@ ColumnStatistics getTableColumnStatistics(String catName, String dbName, String throws MetaException, NoSuchObjectException; /** + * Get statistics for a partition for a set of columns. + * @param catName catalog name. + * @param dbName database name. + * @param tblName table name. + * @param partNames list of partition names. These are names so must be key1=val1[/key2=val2...] + * @param colNames list of columns to get stats for + * @param txnId transaction id of the calling transaction + * @param writeIdList string format of valid writeId transaction list + * @return list of statistics objects + * @throws MetaException error accessing the RDBMS + * @throws NoSuchObjectException no such partition. + */ + List getPartitionColumnStatistics( + String catName, String dbName, String tblName, + List partNames, List colNames, + long txnId, String writeIdList) + throws MetaException, NoSuchObjectException; + + /** * Deletes column statistics if present associated with a given db, table, partition and col. If * null is passed instead of a colName, stats when present for all columns associated * with a given db, table and partition are deleted. @@ -1174,6 +1189,25 @@ AggrStats get_aggr_stats_for(String catName, String dbName, String tblName, List partNames, List colNames) throws MetaException, NoSuchObjectException; /** + * Get aggregated stats for a table or partition(s). + * @param catName catalog name. + * @param dbName database name. + * @param tblName table name. + * @param partNames list of partition names. These are the names of the partitions, not + * values. + * @param colNames list of column names + * @param txnId transaction id of the calling transaction + * @param writeIdList string format of valid writeId transaction list + * @return aggregated stats + * @throws MetaException error accessing RDBMS + * @throws NoSuchObjectException no such table or partition + */ + AggrStats get_aggr_stats_for(String catName, String dbName, String tblName, + List partNames, List colNames, + long txnId, String writeIdList) + throws MetaException, NoSuchObjectException; + + /** * Get column stats for all partitions of all tables in the database * @param catName catalog name * @param dbName database name diff --git standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/cache/CachedStore.java standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/cache/CachedStore.java index 1da9798093..e4894fa12b 100644 --- standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/cache/CachedStore.java +++ standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/cache/CachedStore.java @@ -49,68 +49,10 @@ import org.apache.hadoop.hive.metastore.RawStore; import org.apache.hadoop.hive.metastore.TableType; import org.apache.hadoop.hive.metastore.Warehouse; -import org.apache.hadoop.hive.metastore.api.AggrStats; -import org.apache.hadoop.hive.metastore.api.AlreadyExistsException; -import org.apache.hadoop.hive.metastore.api.Catalog; -import org.apache.hadoop.hive.metastore.api.ColumnStatistics; -import org.apache.hadoop.hive.metastore.api.ColumnStatisticsDesc; -import org.apache.hadoop.hive.metastore.api.ColumnStatisticsObj; -import org.apache.hadoop.hive.metastore.api.CreationMetadata; -import org.apache.hadoop.hive.metastore.api.CurrentNotificationEventId; -import org.apache.hadoop.hive.metastore.api.Database; -import org.apache.hadoop.hive.metastore.api.FieldSchema; -import org.apache.hadoop.hive.metastore.api.FileMetadataExprType; -import org.apache.hadoop.hive.metastore.api.Function; -import org.apache.hadoop.hive.metastore.api.HiveObjectPrivilege; -import org.apache.hadoop.hive.metastore.api.HiveObjectRef; -import org.apache.hadoop.hive.metastore.api.ISchema; -import org.apache.hadoop.hive.metastore.api.ISchemaName; -import org.apache.hadoop.hive.metastore.api.InvalidInputException; -import org.apache.hadoop.hive.metastore.api.InvalidObjectException; -import org.apache.hadoop.hive.metastore.api.InvalidOperationException; -import org.apache.hadoop.hive.metastore.api.InvalidPartitionException; -import org.apache.hadoop.hive.metastore.api.MetaException; -import org.apache.hadoop.hive.metastore.api.NoSuchObjectException; -import org.apache.hadoop.hive.metastore.api.NotificationEvent; -import org.apache.hadoop.hive.metastore.api.NotificationEventRequest; -import org.apache.hadoop.hive.metastore.api.NotificationEventResponse; -import org.apache.hadoop.hive.metastore.api.NotificationEventsCountRequest; -import org.apache.hadoop.hive.metastore.api.NotificationEventsCountResponse; -import org.apache.hadoop.hive.metastore.api.Partition; -import org.apache.hadoop.hive.metastore.api.PartitionEventType; -import org.apache.hadoop.hive.metastore.api.PartitionValuesResponse; -import org.apache.hadoop.hive.metastore.api.PrincipalPrivilegeSet; -import org.apache.hadoop.hive.metastore.api.PrincipalType; -import org.apache.hadoop.hive.metastore.api.PrivilegeBag; -import org.apache.hadoop.hive.metastore.api.WMNullablePool; -import org.apache.hadoop.hive.metastore.api.WMNullableResourcePlan; -import org.apache.hadoop.hive.metastore.api.WMResourcePlan; -import org.apache.hadoop.hive.metastore.api.WMTrigger; -import org.apache.hadoop.hive.metastore.api.WMValidateResourcePlanResponse; +import org.apache.hadoop.hive.metastore.api.*; import org.apache.hadoop.hive.metastore.cache.SharedCache.StatsType; import org.apache.hadoop.hive.metastore.columnstats.aggr.ColumnStatsAggregator; import org.apache.hadoop.hive.metastore.columnstats.aggr.ColumnStatsAggregatorFactory; -import org.apache.hadoop.hive.metastore.api.Role; -import org.apache.hadoop.hive.metastore.api.RolePrincipalGrant; -import org.apache.hadoop.hive.metastore.api.RuntimeStat; -import org.apache.hadoop.hive.metastore.api.SQLCheckConstraint; -import org.apache.hadoop.hive.metastore.api.SQLDefaultConstraint; -import org.apache.hadoop.hive.metastore.api.SQLForeignKey; -import org.apache.hadoop.hive.metastore.api.SQLNotNullConstraint; -import org.apache.hadoop.hive.metastore.api.SQLPrimaryKey; -import org.apache.hadoop.hive.metastore.api.SQLUniqueConstraint; -import org.apache.hadoop.hive.metastore.api.SchemaVersion; -import org.apache.hadoop.hive.metastore.api.SchemaVersionDescriptor; -import org.apache.hadoop.hive.metastore.api.SerDeInfo; -import org.apache.hadoop.hive.metastore.api.Table; -import org.apache.hadoop.hive.metastore.api.TableMeta; -import org.apache.hadoop.hive.metastore.api.Type; -import org.apache.hadoop.hive.metastore.api.UnknownDBException; -import org.apache.hadoop.hive.metastore.api.UnknownPartitionException; -import org.apache.hadoop.hive.metastore.api.UnknownTableException; -import org.apache.hadoop.hive.metastore.api.WMFullResourcePlan; -import org.apache.hadoop.hive.metastore.api.WMMapping; -import org.apache.hadoop.hive.metastore.api.WMPool; import org.apache.hadoop.hive.metastore.conf.MetastoreConf; import org.apache.hadoop.hive.metastore.conf.MetastoreConf.ConfVars; import org.apache.hadoop.hive.metastore.partition.spec.PartitionSpecProxy; @@ -879,20 +821,29 @@ public boolean dropTable(String catName, String dbName, String tblName) @Override public Table getTable(String catName, String dbName, String tblName) throws MetaException { + return getTable(catName, dbName, tblName, -1, null); + } + + // TODO: if writeIdList is not null, check isolation level compliance for SVS, + // possibly with getTableFromCache() with table snapshot in cache. + @Override + public Table getTable(String catName, String dbName, String tblName, + long txnId, String writeIdList) + throws MetaException { catName = normalizeIdentifier(catName); dbName = StringUtils.normalizeIdentifier(dbName); tblName = StringUtils.normalizeIdentifier(tblName); if (!shouldCacheTable(catName, dbName, tblName)) { - return rawStore.getTable(catName, dbName, tblName); + return rawStore.getTable(catName, dbName, tblName, txnId,writeIdList); } Table tbl = sharedCache.getTableFromCache(catName, dbName, tblName); - if (tbl == null) { + if (tbl == null || writeIdList != null) { // This table is not yet loaded in cache // If the prewarm thread is working on this table's database, // let's move this table to the top of tblNamesBeingPrewarmed stack, // so that it gets loaded to the cache faster and is available for subsequent requests tblsPendingPrewarm.prioritizeTableForPrewarm(tblName); - return rawStore.getTable(catName, dbName, tblName); + return rawStore.getTable(catName, dbName, tblName, txnId, writeIdList); } if (tbl != null) { tbl.unsetPrivileges(); @@ -955,16 +906,26 @@ public boolean addPartitions(String catName, String dbName, String tblName, Part @Override public Partition getPartition(String catName, String dbName, String tblName, List part_vals) throws MetaException, NoSuchObjectException { + return getPartition(catName, dbName, tblName, part_vals, -1, null); + } + + // TODO: the same as getTable() + @Override + public Partition getPartition(String catName, String dbName, String tblName, + List part_vals, long txnId, String writeIdList) + throws MetaException, NoSuchObjectException { catName = normalizeIdentifier(catName); dbName = StringUtils.normalizeIdentifier(dbName); tblName = StringUtils.normalizeIdentifier(tblName); if (!shouldCacheTable(catName, dbName, tblName)) { - return rawStore.getPartition(catName, dbName, tblName, part_vals); + return rawStore.getPartition( + catName, dbName, tblName, part_vals, txnId, writeIdList); } Partition part = sharedCache.getPartitionFromCache(catName, dbName, tblName, part_vals); - if (part == null) { + if (part == null || writeIdList != null) { // The table containing the partition is not yet loaded in cache - return rawStore.getPartition(catName, dbName, tblName, part_vals); + return rawStore.getPartition( + catName, dbName, tblName, part_vals, txnId, writeIdList); } return part; } @@ -1210,15 +1171,17 @@ public void alterPartition(String catName, String dbName, String tblName, List> partValsList, List newParts) + List> partValsList, List newParts, + long txnId, String writeIdList) throws InvalidObjectException, MetaException { - rawStore.alterPartitions(catName, dbName, tblName, partValsList, newParts); + rawStore.alterPartitions(catName, dbName, tblName, partValsList, newParts, txnId, writeIdList); catName = normalizeIdentifier(catName); dbName = normalizeIdentifier(dbName); tblName = normalizeIdentifier(tblName); if (!shouldCacheTable(catName, dbName, tblName)) { return; } + // TODO: modify the following method for the case when writeIdList != null. sharedCache.alterPartitionsInCache(catName, dbName, tblName, partValsList, newParts); } @@ -1662,16 +1625,27 @@ public boolean updateTableColumnStatistics(ColumnStatistics colStats) @Override public ColumnStatistics getTableColumnStatistics(String catName, String dbName, String tblName, List colNames) throws MetaException, NoSuchObjectException { + return getTableColumnStatistics(catName, dbName, tblName, colNames, -1, null); + } + + // TODO: the same as getTable() + @Override + public ColumnStatistics getTableColumnStatistics( + String catName, String dbName, String tblName, List colNames, + long txnId, String writeIdList) + throws MetaException, NoSuchObjectException { catName = StringUtils.normalizeIdentifier(catName); dbName = StringUtils.normalizeIdentifier(dbName); tblName = StringUtils.normalizeIdentifier(tblName); if (!shouldCacheTable(catName, dbName, tblName)) { - return rawStore.getTableColumnStatistics(catName, dbName, tblName, colNames); + return rawStore.getTableColumnStatistics( + catName, dbName, tblName, colNames, txnId, writeIdList); } Table table = sharedCache.getTableFromCache(catName, dbName, tblName); - if (table == null) { + if (table == null || writeIdList != null) { // The table is not yet loaded in cache - return rawStore.getTableColumnStatistics(catName, dbName, tblName, colNames); + return rawStore.getTableColumnStatistics( + catName, dbName, tblName, colNames, txnId, writeIdList); } ColumnStatisticsDesc csd = new ColumnStatisticsDesc(true, dbName, tblName); List colStatObjs = @@ -1729,6 +1703,15 @@ public boolean updatePartitionColumnStatistics(ColumnStatistics colStats, List getPartitionColumnStatistics( + String catName, String dbName, String tblName, List partNames, + List colNames, long txnId, String writeIdList) + throws MetaException, NoSuchObjectException { + return rawStore.getPartitionColumnStatistics( + catName, dbName, tblName, partNames, colNames, txnId, writeIdList); + } + + @Override public boolean deletePartitionColumnStatistics(String catName, String dbName, String tblName, String partName, List partVals, String colName) throws NoSuchObjectException, MetaException, InvalidObjectException, InvalidInputException { @@ -1749,17 +1732,28 @@ public boolean deletePartitionColumnStatistics(String catName, String dbName, St @Override public AggrStats get_aggr_stats_for(String catName, String dbName, String tblName, List partNames, List colNames) throws MetaException, NoSuchObjectException { + return get_aggr_stats_for(catName, dbName, tblName, partNames, colNames, -1, null); + } + + @Override + // TODO: the same as getTable() for transactional stats. + public AggrStats get_aggr_stats_for(String catName, String dbName, String tblName, + List partNames, List colNames, + long txnId, String writeIdList) + throws MetaException, NoSuchObjectException { List colStats; catName = normalizeIdentifier(catName); dbName = StringUtils.normalizeIdentifier(dbName); tblName = StringUtils.normalizeIdentifier(tblName); if (!shouldCacheTable(catName, dbName, tblName)) { - rawStore.get_aggr_stats_for(catName, dbName, tblName, partNames, colNames); + rawStore.get_aggr_stats_for( + catName, dbName, tblName, partNames, colNames, txnId, writeIdList); } Table table = sharedCache.getTableFromCache(catName, dbName, tblName); - if (table == null) { + if (table == null || writeIdList != null) { // The table is not yet loaded in cache - return rawStore.get_aggr_stats_for(catName, dbName, tblName, partNames, colNames); + return rawStore.get_aggr_stats_for( + catName, dbName, tblName, partNames, colNames, txnId, writeIdList); } List allPartNames = rawStore.listPartitionNames(catName, dbName, tblName, (short) -1); if (partNames.size() == allPartNames.size()) { diff --git standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/model/MPartition.java standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/model/MPartition.java index 4a97f891fe..56f904835e 100644 --- standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/model/MPartition.java +++ standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/model/MPartition.java @@ -30,7 +30,8 @@ private int lastAccessTime; private MStorageDescriptor sd; private Map parameters; - + private long txnId; + private String writeIdList; public MPartition() {} @@ -152,4 +153,19 @@ public void setCreateTime(int createTime) { this.createTime = createTime; } + public long getTxnId() { + return txnId; + } + + public void setTxnId(long txnId) { + this.txnId = txnId; + } + + public String getWriteIdList() { + return writeIdList; + } + + public void setWriteIdList(String writeIdList) { + this.writeIdList = writeIdList; + } } diff --git standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/model/MTable.java standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/model/MTable.java index 38ad47915b..7ef1ef65d5 100644 --- standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/model/MTable.java +++ standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/model/MTable.java @@ -1,3 +1,4 @@ + /* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file @@ -37,6 +38,8 @@ private String viewExpandedText; private boolean rewriteEnabled; private String tableType; + private long txnId; + private String writeIdList; public MTable() {} @@ -270,4 +273,20 @@ public void setTableType(String tableType) { public String getTableType() { return tableType; } + + public long getTxnId() { + return txnId; + } + + public void setTxnId(long txnId) { + this.txnId = txnId; + } + + public String getWriteIdList() { + return writeIdList; + } + + public void setWriteIdList(String writeIdList) { + this.writeIdList = writeIdList; + } } diff --git standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/txn/CompactionTxnHandler.java standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/txn/CompactionTxnHandler.java index 4e3068d7eb..7d8f1647d6 100644 --- standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/txn/CompactionTxnHandler.java +++ standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/txn/CompactionTxnHandler.java @@ -17,6 +17,7 @@ */ package org.apache.hadoop.hive.metastore.txn; +import org.apache.hadoop.hive.common.StatsSetupConst; import org.apache.hadoop.hive.common.classification.RetrySemantics; import org.apache.hadoop.hive.metastore.api.CompactionType; import org.apache.hadoop.hive.metastore.api.MetaException; @@ -576,8 +577,8 @@ public void cleanEmptyAbortedTxns() throws MetaException { dbConn = getDbConn(Connection.TRANSACTION_READ_COMMITTED); stmt = dbConn.createStatement(); String s = "select txn_id from TXNS where " + - "txn_id not in (select tc_txnid from TXN_COMPONENTS) and " + - "txn_state = '" + TXN_ABORTED + "'"; + "txn_id not in (select tc_txnid from TXN_COMPONENTS) and " + + "txn_state = '" + TXN_ABORTED + "'"; LOG.debug("Going to execute query <" + s + ">"); rs = stmt.executeQuery(s); List txnids = new ArrayList<>(); @@ -587,10 +588,54 @@ public void cleanEmptyAbortedTxns() throws MetaException { return; } Collections.sort(txnids);//easier to read logs + List queries = new ArrayList<>(); StringBuilder prefix = new StringBuilder(); StringBuilder suffix = new StringBuilder(); + // Turn off COLUMN_STATS_ACCURATE for txnids' components in TBLS and PARTITIONS + prefix.append("select tbl_id from TBLS where "); + suffix.append(""); + TxnUtils.buildQueryWithINClause(conf, queries, prefix, suffix, txnids, "txn_id", true, false); + + // Delete COLUMN_STATS_ACCURATE.BASIC_STATS rows from TABLE_PARAMS for the txnids. + List finalCommands = new ArrayList<>(queries.size()); + for (int i = 0; i < queries.size(); i++) { + String query = queries.get(i); + finalCommands.add(i, new StringBuilder("delete from TABLE_PARAMS " + + " where param_key = '" + "COLUMN_STATS_ACCURATE" + "' and tbl_id in (")); + finalCommands.get(i).append(query + ")"); + LOG.debug("Going to execute update <" + finalCommands.get(i) + ">"); + int rc = stmt.executeUpdate(finalCommands.get(i).toString()); + LOG.info("Turned off " + rc + " COLUMN_STATE_ACCURATE.BASIC_STATS states from TBLS"); + } + + queries.clear(); + prefix.setLength(0); + suffix.setLength(0); + finalCommands.clear(); + + // Delete COLUMN_STATS_ACCURATE.BASIC_STATS rows from PARTITIONS_PARAMS for the txnids. + prefix.append("select part_id from PARTITIONS where "); + suffix.append(""); + TxnUtils.buildQueryWithINClause(conf, queries, prefix, suffix, txnids, "txn_id", true, false); + + for (int i = 0; i < queries.size(); i++) { + String query = queries.get(i); + finalCommands.add(i, new StringBuilder("delete from PARTITION_PARAMS " + + " where param_key = '" + "COLUMN_STATS_ACCURATE" + "' and part_id in (")); + finalCommands.get(i).append(query + ")"); + LOG.debug("Going to execute update <" + finalCommands.get(i) + ">"); + int rc = stmt.executeUpdate(finalCommands.get(i).toString()); + LOG.info("Turned off " + rc + " COLUMN_STATE_ACCURATE.BASIC_STATS states from PARTITIONS"); + } + + queries.clear(); + prefix.setLength(0); + suffix.setLength(0); + finalCommands.clear(); + + // Delete from TXNS. prefix.append("delete from TXNS where "); suffix.append(""); diff --git standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/txn/TxnDbUtil.java standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/txn/TxnDbUtil.java index 50bfca3885..bfbd928b0b 100644 --- standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/txn/TxnDbUtil.java +++ standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/txn/TxnDbUtil.java @@ -28,9 +28,12 @@ import java.util.Properties; import com.google.common.annotations.VisibleForTesting; +import jline.internal.Log; import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hive.metastore.api.MetaException; import org.apache.hadoop.hive.metastore.conf.MetastoreConf; import org.apache.hadoop.hive.metastore.conf.MetastoreConf.ConfVars; +import org.apache.zookeeper.txn.TxnHeader; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -195,6 +198,68 @@ public static void prepDb(Configuration conf) throws Exception { ); try { + stmt.execute("CREATE TABLE \"APP\".\"TBLS\" (\"TBL_ID\" BIGINT NOT NULL, " + + " \"CREATE_TIME\" INTEGER NOT NULL, \"DB_ID\" BIGINT, \"LAST_ACCESS_TIME\" INTEGER NOT NULL, " + + " \"OWNER\" VARCHAR(767), \"OWNER_TYPE\" VARCHAR(10), \"RETENTION\" INTEGER NOT NULL, " + + " \"SD_ID\" BIGINT, \"TBL_NAME\" VARCHAR(256), \"TBL_TYPE\" VARCHAR(128), " + + " \"VIEW_EXPANDED_TEXT\" LONG VARCHAR, \"VIEW_ORIGINAL_TEXT\" LONG VARCHAR, " + + " \"IS_REWRITE_ENABLED\" CHAR(1) NOT NULL DEFAULT \'N\', \"TXN_ID\" BIGINT DEFAULT 0, " + + " \"WRITEID_LIST\" CLOB, " + + " PRIMARY KEY (TBL_ID))" + ); + } catch (SQLException e) { + if (e.getMessage() != null && e.getMessage().contains("already exists")) { + LOG.info("TBLS table already exist, ignoring"); + } else { + throw e; + } + } + + try { + stmt.execute("CREATE TABLE \"APP\".\"PARTITIONS\" (" + + " \"PART_ID\" BIGINT NOT NULL, \"CREATE_TIME\" INTEGER NOT NULL, " + + " \"LAST_ACCESS_TIME\" INTEGER NOT NULL, \"PART_NAME\" VARCHAR(767), " + + " \"SD_ID\" BIGINT, \"TBL_ID\" BIGINT, \"TXN_ID\" BIGINT DEFAULT 0, " + + " \"WRITEID_LIST\" CLOB, " + + " PRIMARY KEY (PART_ID))" + ); + } catch (SQLException e) { + if (e.getMessage() != null && e.getMessage().contains("already exists")) { + LOG.info("PARTITIONS table already exist, ignoring"); + } else { + throw e; + } + } + + try { + stmt.execute("CREATE TABLE \"APP\".\"TABLE_PARAMS\" (" + + " \"TBL_ID\" BIGINT NOT NULL, \"PARAM_KEY\" VARCHAR(256) NOT NULL, " + + " \"PARAM_VALUE\" CLOB, " + + " PRIMARY KEY (TBL_ID, PARAM_KEY))" + ); + } catch (SQLException e) { + if (e.getMessage() != null && e.getMessage().contains("already exists")) { + LOG.info("TABLE_PARAMS table already exist, ignoring"); + } else { + throw e; + } + } + + try { + stmt.execute("CREATE TABLE \"APP\".\"PARTITION_PARAMS\" (" + + " \"PART_ID\" BIGINT NOT NULL, \"PARAM_KEY\" VARCHAR(256) NOT NULL, " + + " \"PARAM_VALUE\" VARCHAR(4000), " + + " PRIMARY KEY (PART_ID, PARAM_KEY))" + ); + } catch (SQLException e) { + if (e.getMessage() != null && e.getMessage().contains("already exists")) { + LOG.info("PARTITION_PARAMS table already exist, ignoring"); + } else { + throw e; + } + } + + try { stmt.execute("CREATE TABLE \"APP\".\"SEQUENCE_TABLE\" (\"SEQUENCE_NAME\" VARCHAR(256) NOT " + "NULL, \"NEXT_VAL\" BIGINT NOT NULL)" @@ -376,6 +441,35 @@ public static int countLockComponents(Configuration conf, long lockId) throws Ex } /** + * Return true if the transaction of the given txnId is open. + * @param conf HiveConf + * @param txnId transaction id to search for + * @return + * @throws Exception + */ + public static boolean isOpenOrAbortedTransaction(Configuration conf, long txnId) throws Exception { + Connection conn = null; + PreparedStatement stmt = null; + ResultSet rs = null; + try { + conn = getConnection(conf); + conn.setAutoCommit(false); + conn.setTransactionIsolation(Connection.TRANSACTION_READ_COMMITTED); + + stmt = conn.prepareStatement("SELECT txn_id FROM TXNS WHERE txn_id = ?"); + stmt.setLong(1, txnId); + rs = stmt.executeQuery(); + if (!rs.next()) { + return false; + } else { + return true; + } + } finally { + closeResources(conn, stmt, rs); + } + } + + /** * Utility method used to run COUNT queries like "select count(*) from ..." against metastore tables * @param countQuery countQuery text * @return count countQuery result diff --git standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/txn/TxnUtils.java standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/txn/TxnUtils.java index fa291d5f20..aac58110f9 100644 --- standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/txn/TxnUtils.java +++ standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/txn/TxnUtils.java @@ -25,11 +25,7 @@ import org.apache.hadoop.hive.common.ValidTxnWriteIdList; import org.apache.hadoop.hive.common.ValidWriteIdList; import org.apache.hadoop.hive.metastore.TransactionalValidationListener; -import org.apache.hadoop.hive.metastore.api.GetOpenTxnsResponse; -import org.apache.hadoop.hive.metastore.api.GetValidWriteIdsResponse; -import org.apache.hadoop.hive.metastore.api.Table; -import org.apache.hadoop.hive.metastore.api.TableValidWriteIds; -import org.apache.hadoop.hive.metastore.api.hive_metastoreConstants; +import org.apache.hadoop.hive.metastore.api.*; import org.apache.hadoop.hive.metastore.conf.MetastoreConf; import org.apache.hadoop.hive.metastore.conf.MetastoreConf.ConfVars; import org.apache.hadoop.hive.metastore.utils.JavaUtils; @@ -46,6 +42,12 @@ public class TxnUtils { private static final Logger LOG = LoggerFactory.getLogger(TxnUtils.class); + // Transactional stats states + static final public char STAT_OPEN = 'o'; + static final public char STAT_INVALID = 'i'; + static final public char STAT_COMMITTED = 'c'; + static final public char STAT_OBSOLETE = 's'; + /** * Transform a {@link org.apache.hadoop.hive.metastore.api.GetOpenTxnsResponse} to a * {@link org.apache.hadoop.hive.common.ValidTxnList}. This assumes that the caller intends to @@ -223,6 +225,14 @@ public static boolean isTransactionalTable(Table table) { return tableIsTransactional != null && tableIsTransactional.equalsIgnoreCase("true"); } + public static boolean isTransactionalTable(Map parameters) { + if (parameters == null) { + return false; + } + String tableIsTransactional = parameters.get(hive_metastoreConstants.TABLE_IS_TRANSACTIONAL); + return tableIsTransactional != null && tableIsTransactional.equalsIgnoreCase("true"); + } + /** * Should produce the same result as * {@link org.apache.hadoop.hive.ql.io.AcidUtils#isAcidTable(org.apache.hadoop.hive.ql.metadata.Table)}. diff --git standalone-metastore/src/main/resources/package.jdo standalone-metastore/src/main/resources/package.jdo index 1be3e986a5..92b803f350 100644 --- standalone-metastore/src/main/resources/package.jdo +++ standalone-metastore/src/main/resources/package.jdo @@ -210,6 +210,12 @@ + + + + + + @@ -489,6 +495,12 @@ + + + + + + diff --git standalone-metastore/src/main/sql/derby/hive-schema-4.0.0.derby.sql standalone-metastore/src/main/sql/derby/hive-schema-4.0.0.derby.sql index bb691053a5..062e374ac0 100644 --- standalone-metastore/src/main/sql/derby/hive-schema-4.0.0.derby.sql +++ standalone-metastore/src/main/sql/derby/hive-schema-4.0.0.derby.sql @@ -47,7 +47,7 @@ CREATE TABLE "APP"."IDXS" ("INDEX_ID" BIGINT NOT NULL, "CREATE_TIME" INTEGER NOT CREATE TABLE "APP"."INDEX_PARAMS" ("INDEX_ID" BIGINT NOT NULL, "PARAM_KEY" VARCHAR(256) NOT NULL, "PARAM_VALUE" VARCHAR(4000)); -CREATE TABLE "APP"."PARTITIONS" ("PART_ID" BIGINT NOT NULL, "CREATE_TIME" INTEGER NOT NULL, "LAST_ACCESS_TIME" INTEGER NOT NULL, "PART_NAME" VARCHAR(767), "SD_ID" BIGINT, "TBL_ID" BIGINT); +CREATE TABLE "APP"."PARTITIONS" ("PART_ID" BIGINT NOT NULL, "CREATE_TIME" INTEGER NOT NULL, "LAST_ACCESS_TIME" INTEGER NOT NULL, "PART_NAME" VARCHAR(767), "SD_ID" BIGINT, "TBL_ID" BIGINT, "TXN_ID" BIGINT DEFAULT 0, "WRITEID_LIST" CLOB); CREATE TABLE "APP"."SERDES" ("SERDE_ID" BIGINT NOT NULL, "NAME" VARCHAR(128), "SLIB" VARCHAR(4000), "DESCRIPTION" VARCHAR(4000), "SERIALIZER_CLASS" VARCHAR(4000), "DESERIALIZER_CLASS" VARCHAR(4000), SERDE_TYPE INTEGER); @@ -75,7 +75,7 @@ CREATE TABLE "APP"."COLUMNS" ("SD_ID" BIGINT NOT NULL, "COMMENT" VARCHAR(256), " CREATE TABLE "APP"."ROLES" ("ROLE_ID" BIGINT NOT NULL, "CREATE_TIME" INTEGER NOT NULL, "OWNER_NAME" VARCHAR(128), "ROLE_NAME" VARCHAR(128)); -CREATE TABLE "APP"."TBLS" ("TBL_ID" BIGINT NOT NULL, "CREATE_TIME" INTEGER NOT NULL, "DB_ID" BIGINT, "LAST_ACCESS_TIME" INTEGER NOT NULL, "OWNER" VARCHAR(767), "OWNER_TYPE" VARCHAR(10), "RETENTION" INTEGER NOT NULL, "SD_ID" BIGINT, "TBL_NAME" VARCHAR(256), "TBL_TYPE" VARCHAR(128), "VIEW_EXPANDED_TEXT" LONG VARCHAR, "VIEW_ORIGINAL_TEXT" LONG VARCHAR, "IS_REWRITE_ENABLED" CHAR(1) NOT NULL DEFAULT 'N'); +CREATE TABLE "APP"."TBLS" ("TBL_ID" BIGINT NOT NULL, "CREATE_TIME" INTEGER NOT NULL, "DB_ID" BIGINT, "LAST_ACCESS_TIME" INTEGER NOT NULL, "OWNER" VARCHAR(767), "OWNER_TYPE" VARCHAR(10), "RETENTION" INTEGER NOT NULL, "SD_ID" BIGINT, "TBL_NAME" VARCHAR(256), "TBL_TYPE" VARCHAR(128), "VIEW_EXPANDED_TEXT" LONG VARCHAR, "VIEW_ORIGINAL_TEXT" LONG VARCHAR, "IS_REWRITE_ENABLED" CHAR(1) NOT NULL DEFAULT 'N', "TXN_ID" BIGINT DEFAULT 0, "WRITEID_LIST" CLOB); CREATE TABLE "APP"."PARTITION_KEYS" ("TBL_ID" BIGINT NOT NULL, "PKEY_COMMENT" VARCHAR(4000), "PKEY_NAME" VARCHAR(128) NOT NULL, "PKEY_TYPE" VARCHAR(767) NOT NULL, "INTEGER_IDX" INTEGER NOT NULL); diff --git standalone-metastore/src/main/sql/derby/upgrade-3.1.0-to-4.0.0.derby.sql standalone-metastore/src/main/sql/derby/upgrade-3.1.0-to-4.0.0.derby.sql index a51137636f..38eecd970a 100644 --- standalone-metastore/src/main/sql/derby/upgrade-3.1.0-to-4.0.0.derby.sql +++ standalone-metastore/src/main/sql/derby/upgrade-3.1.0-to-4.0.0.derby.sql @@ -1,5 +1,9 @@ -- Upgrade MetaStore schema from 3.1.0 to 4.0.0 - +-- HIVE-19416 +ALTER TABLE "APP"."TBLS" ADD WRITEID_LIST CLOB; +ALTER TABLE "APP"."TBLS" ADD TXN_ID bigint DEFAULT 0; +ALTER TABLE "APP"."PARTITIONS" ADD WRITEID_LIST CLOB; +ALTER TABLE "APP"."PARTITIONS" ADD TXN_ID bigint DEFAULT 0; -- This needs to be the last thing done. Insert any changes above this line. UPDATE "APP".VERSION SET SCHEMA_VERSION='4.0.0', VERSION_COMMENT='Hive release version 4.0.0' where VER_ID=1; diff --git standalone-metastore/src/main/sql/mssql/hive-schema-4.0.0.mssql.sql standalone-metastore/src/main/sql/mssql/hive-schema-4.0.0.mssql.sql index 922e8fef38..1f31341113 100644 --- standalone-metastore/src/main/sql/mssql/hive-schema-4.0.0.mssql.sql +++ standalone-metastore/src/main/sql/mssql/hive-schema-4.0.0.mssql.sql @@ -145,7 +145,9 @@ CREATE TABLE PARTITIONS LAST_ACCESS_TIME int NOT NULL, PART_NAME nvarchar(767) NULL, SD_ID bigint NULL, - TBL_ID bigint NULL + TBL_ID bigint NULL, + TXN_ID bigint NULL, + WRITEID_LIST text NULL ); ALTER TABLE PARTITIONS ADD CONSTRAINT PARTITIONS_PK PRIMARY KEY (PART_ID); @@ -377,7 +379,9 @@ CREATE TABLE TBLS TBL_TYPE nvarchar(128) NULL, VIEW_EXPANDED_TEXT text NULL, VIEW_ORIGINAL_TEXT text NULL, - IS_REWRITE_ENABLED bit NOT NULL DEFAULT 0 + IS_REWRITE_ENABLED bit NOT NULL DEFAULT 0, + TXN_ID bigint NULL, + WRITEID_LIST text NULL ); ALTER TABLE TBLS ADD CONSTRAINT TBLS_PK PRIMARY KEY (TBL_ID); diff --git standalone-metastore/src/main/sql/mssql/upgrade-3.1.0-to-4.0.0.mssql.sql standalone-metastore/src/main/sql/mssql/upgrade-3.1.0-to-4.0.0.mssql.sql index 27b7026bbc..594d165ef2 100644 --- standalone-metastore/src/main/sql/mssql/upgrade-3.1.0-to-4.0.0.mssql.sql +++ standalone-metastore/src/main/sql/mssql/upgrade-3.1.0-to-4.0.0.mssql.sql @@ -1,5 +1,11 @@ SELECT 'Upgrading MetaStore schema from 3.1.0 to 4.0.0' AS MESSAGE; +-- HIVE-19416 +ALTER TABLE TBLS ADD WRITEID_LIST text NULL; +ALTER TABLE TBLS ADD TXN_ID bigint NULL; +ALTER TABLE PARTITIONS ADD WRITEID_LIST text NULL; +ALTER TABLE PARTITIONS ADD TXN_ID bigint NULL; + -- These lines need to be last. Insert any changes above. UPDATE VERSION SET SCHEMA_VERSION='4.0.0', VERSION_COMMENT='Hive release version 4.0.0' where VER_ID=1; SELECT 'Finished upgrading MetaStore schema from 3.1.0 to 4.0.0' AS MESSAGE; diff --git standalone-metastore/src/main/sql/mysql/hive-schema-4.0.0.mysql.sql standalone-metastore/src/main/sql/mysql/hive-schema-4.0.0.mysql.sql index 6c40e6e4f6..90f45ac224 100644 --- standalone-metastore/src/main/sql/mysql/hive-schema-4.0.0.mysql.sql +++ standalone-metastore/src/main/sql/mysql/hive-schema-4.0.0.mysql.sql @@ -224,6 +224,8 @@ CREATE TABLE IF NOT EXISTS `PARTITIONS` ( `PART_NAME` varchar(767) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL, `SD_ID` bigint(20) DEFAULT NULL, `TBL_ID` bigint(20) DEFAULT NULL, + `TXN_ID` bigint(20) DEFAULT 0, + `WRITEID_LIST` text DEFAULT NULL, PRIMARY KEY (`PART_ID`), UNIQUE KEY `UNIQUEPARTITION` (`PART_NAME`,`TBL_ID`), KEY `PARTITIONS_N49` (`TBL_ID`), @@ -629,6 +631,8 @@ CREATE TABLE IF NOT EXISTS `TBLS` ( `VIEW_EXPANDED_TEXT` mediumtext, `VIEW_ORIGINAL_TEXT` mediumtext, `IS_REWRITE_ENABLED` bit(1) NOT NULL DEFAULT 0, + `TXN_ID` bigint(20) DEFAULT 0, + `WRITEID_LIST` text DEFAULT NULL, PRIMARY KEY (`TBL_ID`), UNIQUE KEY `UNIQUETABLE` (`TBL_NAME`,`DB_ID`), KEY `TBLS_N50` (`SD_ID`), diff --git standalone-metastore/src/main/sql/mysql/upgrade-2.3.0-to-3.0.0.mysql.sql standalone-metastore/src/main/sql/mysql/upgrade-2.3.0-to-3.0.0.mysql.sql index 9b87563b8a..5877c93fab 100644 --- standalone-metastore/src/main/sql/mysql/upgrade-2.3.0-to-3.0.0.mysql.sql +++ standalone-metastore/src/main/sql/mysql/upgrade-2.3.0-to-3.0.0.mysql.sql @@ -323,4 +323,4 @@ ALTER TABLE TXN_COMPONENTS MODIFY COLUMN TC_TABLE varchar(128) NULL; UPDATE VERSION SET SCHEMA_VERSION='3.0.0', VERSION_COMMENT='Hive release version 3.0.0' where VER_ID=1; SELECT 'Finished upgrading MetaStore schema from 2.3.0 to 3.0.0' AS ' '; -ALTER TABLE `TBLS` ADD COLUMN `OWNER_TYPE` VARCHAR(10) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL; \ No newline at end of file +ALTER TABLE `TBLS` ADD COLUMN `OWNER_TYPE` VARCHAR(10) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL; diff --git standalone-metastore/src/main/sql/mysql/upgrade-3.1.0-to-4.0.0.mysql.sql standalone-metastore/src/main/sql/mysql/upgrade-3.1.0-to-4.0.0.mysql.sql index b3789f9822..dc011c245d 100644 --- standalone-metastore/src/main/sql/mysql/upgrade-3.1.0-to-4.0.0.mysql.sql +++ standalone-metastore/src/main/sql/mysql/upgrade-3.1.0-to-4.0.0.mysql.sql @@ -1,5 +1,11 @@ SELECT 'Upgrading MetaStore schema from 3.1.0 to 4.0.0' AS ' '; +-- HIVE-19416 +ALTER TABLE TBLS ADD TXN_ID bigint; +ALTER TABLE TBLS ADD WRITEID_LIST CLOB; +ALTER TABLE PARTITIONS ADD TXN_ID bigint; +ALTER TABLE PARTITIONS ADD WRITEID_LIST CLOB; + -- These lines need to be last. Insert any changes above. UPDATE VERSION SET SCHEMA_VERSION='4.0.0', VERSION_COMMENT='Hive release version 4.0.0' where VER_ID=1; SELECT 'Finished upgrading MetaStore schema from 3.1.0 to 4.0.0' AS ' '; diff --git standalone-metastore/src/main/sql/oracle/hive-schema-4.0.0.oracle.sql standalone-metastore/src/main/sql/oracle/hive-schema-4.0.0.oracle.sql index e12150a438..cc08dc1db9 100644 --- standalone-metastore/src/main/sql/oracle/hive-schema-4.0.0.oracle.sql +++ standalone-metastore/src/main/sql/oracle/hive-schema-4.0.0.oracle.sql @@ -163,7 +163,9 @@ CREATE TABLE PARTITIONS LAST_ACCESS_TIME NUMBER (10) NOT NULL, PART_NAME VARCHAR2(767) NULL, SD_ID NUMBER NULL, - TBL_ID NUMBER NULL + TBL_ID NUMBER NULL, + TXN_ID NUMBER NULL, + WRITEID_LIST CLOB NULL ); ALTER TABLE PARTITIONS ADD CONSTRAINT PARTITIONS_PK PRIMARY KEY (PART_ID); @@ -398,7 +400,9 @@ CREATE TABLE TBLS TBL_TYPE VARCHAR2(128) NULL, VIEW_EXPANDED_TEXT CLOB NULL, VIEW_ORIGINAL_TEXT CLOB NULL, - IS_REWRITE_ENABLED NUMBER(1) DEFAULT 0 NOT NULL CHECK (IS_REWRITE_ENABLED IN (1,0)) + IS_REWRITE_ENABLED NUMBER(1) DEFAULT 0 NOT NULL CHECK (IS_REWRITE_ENABLED IN (1,0)), + TXN_ID NUMBER NULL, + WRITEID_LIST CLOB NULL ); ALTER TABLE TBLS ADD CONSTRAINT TBLS_PK PRIMARY KEY (TBL_ID); diff --git standalone-metastore/src/main/sql/oracle/upgrade-2.3.0-to-3.0.0.oracle.sql standalone-metastore/src/main/sql/oracle/upgrade-2.3.0-to-3.0.0.oracle.sql index ce3437f723..5b767bc285 100644 --- standalone-metastore/src/main/sql/oracle/upgrade-2.3.0-to-3.0.0.oracle.sql +++ standalone-metastore/src/main/sql/oracle/upgrade-2.3.0-to-3.0.0.oracle.sql @@ -339,4 +339,4 @@ UPDATE COMPLETED_TXN_COMPONENTS SET CTC_WRITEID = CTC_TXNID; UPDATE VERSION SET SCHEMA_VERSION='3.0.0', VERSION_COMMENT='Hive release version 3.0.0' where VER_ID=1; SELECT 'Finished upgrading MetaStore schema from 2.3.0 to 3.0.0' AS Status from dual; -ALTER TABLE TBLS ADD OWNER_TYPE VARCHAR2(10) NULL; \ No newline at end of file +ALTER TABLE TBLS ADD OWNER_TYPE VARCHAR2(10) NULL; diff --git standalone-metastore/src/main/sql/oracle/upgrade-3.1.0-to-4.0.0.oracle.sql standalone-metastore/src/main/sql/oracle/upgrade-3.1.0-to-4.0.0.oracle.sql index 6fa5e2dadc..9e1e6cb539 100644 --- standalone-metastore/src/main/sql/oracle/upgrade-3.1.0-to-4.0.0.oracle.sql +++ standalone-metastore/src/main/sql/oracle/upgrade-3.1.0-to-4.0.0.oracle.sql @@ -1,5 +1,10 @@ SELECT 'Upgrading MetaStore schema from 3.1.0 to 4.0.0' AS Status from dual; +ALTER TABLE TBLS ADD TXN_ID number NULL; +ALTER TABLE TBLS ADD WRITEID_LIST CLOB NULL; +ALTER TABLE PARTITIONS ADD TXN_ID number NULL; +ALTER TABLE PARTITIONS ADD WRITEID_LIST CLOB NULL; + -- These lines need to be last. Insert any changes above. UPDATE VERSION SET SCHEMA_VERSION='4.0.0', VERSION_COMMENT='Hive release version 4.0.0' where VER_ID=1; SELECT 'Finished upgrading MetaStore schema from 3.1.0 to 4.0.0' AS Status from dual; diff --git standalone-metastore/src/main/sql/postgres/hive-schema-4.0.0.postgres.sql standalone-metastore/src/main/sql/postgres/hive-schema-4.0.0.postgres.sql index b73e1d19f6..c7add637e1 100644 --- standalone-metastore/src/main/sql/postgres/hive-schema-4.0.0.postgres.sql +++ standalone-metastore/src/main/sql/postgres/hive-schema-4.0.0.postgres.sql @@ -168,7 +168,9 @@ CREATE TABLE "PARTITIONS" ( "LAST_ACCESS_TIME" bigint NOT NULL, "PART_NAME" character varying(767) DEFAULT NULL::character varying, "SD_ID" bigint, - "TBL_ID" bigint + "TBL_ID" bigint, + "TXN_ID" bigint, + "WRITEID_LIST" text ); @@ -392,7 +394,9 @@ CREATE TABLE "TBLS" ( "TBL_TYPE" character varying(128) DEFAULT NULL::character varying, "VIEW_EXPANDED_TEXT" text, "VIEW_ORIGINAL_TEXT" text, - "IS_REWRITE_ENABLED" boolean NOT NULL DEFAULT false + "IS_REWRITE_ENABLED" boolean NOT NULL DEFAULT false, + "TXN_ID" bigint, + "WRITEID_LIST" text ); -- diff --git standalone-metastore/src/main/sql/postgres/upgrade-3.1.0-to-4.0.0.postgres.sql standalone-metastore/src/main/sql/postgres/upgrade-3.1.0-to-4.0.0.postgres.sql index 40d2795e91..0692db1976 100644 --- standalone-metastore/src/main/sql/postgres/upgrade-3.1.0-to-4.0.0.postgres.sql +++ standalone-metastore/src/main/sql/postgres/upgrade-3.1.0-to-4.0.0.postgres.sql @@ -1,5 +1,11 @@ SELECT 'Upgrading MetaStore schema from 3.1.0 to 4.0.0'; +-- HIVE-19416 +ALTER TABLE "TBLS" ADD "TXN_ID" bigint; +ALTER TABLE "TBLS" ADD "WRITEID_LIST" text; +ALTER TABLE "PARTITIONS" ADD "TXN_ID" bigint; +ALTER TABLE "PARTITIONS" ADD "WRITEID_LIST" text; + -- These lines need to be last. Insert any changes above. UPDATE "VERSION" SET "SCHEMA_VERSION"='4.0.0', "VERSION_COMMENT"='Hive release version 4.0.0' where "VER_ID"=1; SELECT 'Finished upgrading MetaStore schema from 3.1.0 to 4.0.0'; diff --git standalone-metastore/src/main/thrift/hive_metastore.thrift standalone-metastore/src/main/thrift/hive_metastore.thrift index 6e503eb908..ecd2001e11 100644 --- standalone-metastore/src/main/thrift/hive_metastore.thrift +++ standalone-metastore/src/main/thrift/hive_metastore.thrift @@ -233,6 +233,12 @@ enum SchemaVersionState { DELETED = 8 } +enum IsolationLevelCompliance { + YES = 1, + NO = 2, + UNKNOWN = 3 +} + struct HiveObjectRef{ 1: HiveObjectType objectType, 2: string dbName, @@ -430,7 +436,10 @@ struct Table { 15: optional bool rewriteEnabled, // rewrite enabled or not 16: optional CreationMetadata creationMetadata, // only for MVs, it stores table names used and txn list at MV creation 17: optional string catName, // Name of the catalog the table is in - 18: optional PrincipalType ownerType = PrincipalType.USER // owner type of this table (default to USER for backward compatibility) + 18: optional PrincipalType ownerType = PrincipalType.USER, // owner type of this table (default to USER for backward compatibility) + 19: optional i64 txnId=-1, + 20: optional string validWriteIdList, + 21: optional IsolationLevelCompliance isStatsCompliant } struct Partition { @@ -442,7 +451,10 @@ struct Partition { 6: StorageDescriptor sd, 7: map parameters, 8: optional PrincipalPrivilegeSet privileges, - 9: optional string catName + 9: optional string catName, + 10: optional i64 txnId=-1, + 11: optional string validWriteIdList, + 12: optional IsolationLevelCompliance isStatsCompliant } struct PartitionWithoutSD { @@ -469,7 +481,10 @@ struct PartitionSpec { 3: string rootPath, 4: optional PartitionSpecWithSharedSD sharedSDPartitionSpec, 5: optional PartitionListComposingSpec partitionList, - 6: optional string catName + 6: optional string catName, + 7: optional i64 txnId=-1, + 8: optional string validWriteIdList, + 9: optional IsolationLevelCompliance isStatsCompliant } // column statistics @@ -564,17 +579,24 @@ struct ColumnStatisticsDesc { struct ColumnStatistics { 1: required ColumnStatisticsDesc statsDesc, -2: required list statsObj; +2: required list statsObj, +3: optional i64 txnId=-1, // transaction id of the query that sends this structure +4: optional string validWriteIdList, // valid write id list for the table for which this struct is being sent +5: optional IsolationLevelCompliance isStatsCompliant // Are the stats isolation-level-compliant with the + // the calling query? } struct AggrStats { 1: required list colStats, -2: required i64 partsFound // number of partitions for which stats were found +2: required i64 partsFound, // number of partitions for which stats were found +3: optional IsolationLevelCompliance isStatsCompliant } struct SetPartitionsStatsRequest { 1: required list colStats, -2: optional bool needMerge //stats need to be merged with the existing stats +2: optional bool needMerge, //stats need to be merged with the existing stats +3: optional i64 txnId=-1, // transaction id of the query that sends this structure +4: optional string validWriteIdList // valid write id list for the table for which this struct is being sent } // schema of the table/query results etc. @@ -703,18 +725,22 @@ struct PartitionsByExprRequest { } struct TableStatsResult { - 1: required list tableStats + 1: required list tableStats, + 2: optional IsolationLevelCompliance isStatsCompliant } struct PartitionsStatsResult { - 1: required map> partStats + 1: required map> partStats, + 2: optional IsolationLevelCompliance isStatsCompliant } struct TableStatsRequest { 1: required string dbName, 2: required string tblName, 3: required list colNames - 4: optional string catName + 4: optional string catName, + 5: optional i64 txnId=-1, // transaction id of the query that sends this structure + 6: optional string validWriteIdList // valid write id list for the table for which this struct is being sent } struct PartitionsStatsRequest { @@ -722,12 +748,15 @@ struct PartitionsStatsRequest { 2: required string tblName, 3: required list colNames, 4: required list partNames, - 5: optional string catName + 5: optional string catName, + 6: optional i64 txnId=-1, // transaction id of the query that sends this structure + 7: optional string validWriteIdList // valid write id list for the table for which this struct is being sent } // Return type for add_partitions_req struct AddPartitionsResult { 1: optional list partitions, + 2: optional IsolationLevelCompliance isStatsCompliant } // Request type for add_partitions_req @@ -737,7 +766,9 @@ struct AddPartitionsRequest { 3: required list parts, 4: required bool ifNotExists, 5: optional bool needResult=true, - 6: optional string catName + 6: optional string catName, + 7: optional i64 txnId=-1, + 8: optional string validWriteIdList } // Return type for drop_partitions_req @@ -1209,11 +1240,14 @@ struct GetTableRequest { 1: required string dbName, 2: required string tblName, 3: optional ClientCapabilities capabilities, - 4: optional string catName + 4: optional string catName, + 5: optional i64 txnId=-1, + 6: optional string validWriteIdList } struct GetTableResult { - 1: required Table table + 1: required Table table, + 2: optional IsolationLevelCompliance isStatsCompliant } struct GetTablesRequest { @@ -1544,6 +1578,18 @@ struct GetRuntimeStatsRequest { 2: required i32 maxCreateTime } +struct AlterPartitionsRequest { + 1: required string dbName, + 2: required string tableName, + 3: required list partitions, + 4: required EnvironmentContext environmentContext, + 5: optional i64 txnId=-1, + 6: optional string validWriteIdList +} + +struct AlterPartitionsResponse { +} + // Exceptions. exception MetaException { @@ -1874,7 +1920,9 @@ service ThriftHiveMetastore extends fb303.FacebookService // prehooks are fired together followed by all post hooks void alter_partitions(1:string db_name, 2:string tbl_name, 3:list new_parts) throws (1:InvalidOperationException o1, 2:MetaException o2) - void alter_partitions_with_environment_context(1:string db_name, 2:string tbl_name, 3:list new_parts, 4:EnvironmentContext environment_context) throws (1:InvalidOperationException o1, 2:MetaException o2) + + AlterPartitionsResponse alter_partitions_with_environment_context(1:AlterPartitionsRequest req) + throws (1:InvalidOperationException o1, 2:MetaException o2) void alter_partition_with_environment_context(1:string db_name, 2:string tbl_name, 3:Partition new_part, diff --git standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/DummyRawStoreControlledCommit.java standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/DummyRawStoreControlledCommit.java index 8e195d04fe..001c3edcff 100644 --- standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/DummyRawStoreControlledCommit.java +++ standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/DummyRawStoreControlledCommit.java @@ -19,24 +19,12 @@ package org.apache.hadoop.hive.metastore; import org.apache.hadoop.hive.common.TableName; -import org.apache.hadoop.hive.metastore.api.CreationMetadata; -import org.apache.hadoop.hive.metastore.api.ISchemaName; -import org.apache.hadoop.hive.metastore.api.SchemaVersionDescriptor; -import org.apache.hadoop.hive.metastore.api.Catalog; -import org.apache.hadoop.hive.metastore.api.WMFullResourcePlan; - -import java.nio.ByteBuffer; -import java.util.ArrayList; -import java.util.Collections; -import java.util.List; -import java.util.Map; - -import org.apache.hadoop.conf.Configurable; -import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hive.metastore.api.AggrStats; import org.apache.hadoop.hive.metastore.api.AlreadyExistsException; +import org.apache.hadoop.hive.metastore.api.Catalog; import org.apache.hadoop.hive.metastore.api.ColumnStatistics; import org.apache.hadoop.hive.metastore.api.CurrentNotificationEventId; +import org.apache.hadoop.hive.metastore.api.CreationMetadata; import org.apache.hadoop.hive.metastore.api.Database; import org.apache.hadoop.hive.metastore.api.FieldSchema; import org.apache.hadoop.hive.metastore.api.FileMetadataExprType; @@ -85,6 +73,18 @@ import org.apache.hadoop.hive.metastore.api.UnknownTableException; import org.apache.hadoop.hive.metastore.api.WMMapping; import org.apache.hadoop.hive.metastore.api.WMPool; +import org.apache.hadoop.hive.metastore.api.WMFullResourcePlan; +import org.apache.hadoop.hive.metastore.api.ISchemaName; +import org.apache.hadoop.hive.metastore.api.SchemaVersionDescriptor; + +import java.nio.ByteBuffer; +import java.util.ArrayList; +import java.util.Collections; +import java.util.List; +import java.util.Map; + +import org.apache.hadoop.conf.Configurable; +import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hive.metastore.partition.spec.PartitionSpecProxy; import org.apache.hadoop.hive.metastore.utils.MetaStoreUtils.ColStatsObjWithSourceInfo; import org.apache.thrift.TException; @@ -247,6 +247,12 @@ public Table getTable(String catName, String dbName, String tableName) throws Me } @Override + public Table getTable(String catName, String dbName, String tableName, long txnId, String writeIdList) + throws MetaException { + return objectStore.getTable(catName, dbName, tableName, txnId, writeIdList); + } + + @Override public boolean addPartition(Partition part) throws InvalidObjectException, MetaException { return objectStore.addPartition(part); @@ -259,6 +265,13 @@ public Partition getPartition(String catName, String dbName, String tableName, L } @Override + public Partition getPartition(String catName, String dbName, String tableName, + List partVals, long txnId, String writeIdList) + throws MetaException, NoSuchObjectException { + return objectStore.getPartition(catName, dbName, tableName, partVals, txnId, writeIdList); + } + + @Override public boolean dropPartition(String catName, String dbName, String tableName, List partVals) throws MetaException, NoSuchObjectException, InvalidObjectException, InvalidInputException { @@ -349,9 +362,11 @@ public void alterPartition(String catName, String dbName, String tblName, List> partValsList, List newParts) + List> partValsList, List newParts, + long txnId, String writeIdList) throws InvalidObjectException, MetaException { - objectStore.alterPartitions(catName, dbName, tblName, partValsList, newParts); + objectStore.alterPartitions( + catName, dbName, tblName, partValsList, newParts, txnId, writeIdList); } @Override @@ -653,6 +668,15 @@ public ColumnStatistics getTableColumnStatistics(String catName, String dbName, } @Override + public ColumnStatistics getTableColumnStatistics(String catName, String dbName, + String tableName, List colNames, + long txnId, String writeIdList) + throws MetaException, NoSuchObjectException { + return objectStore.getTableColumnStatistics( + catName, dbName, tableName, colNames, txnId, writeIdList); + } + + @Override public boolean deleteTableColumnStatistics(String catName, String dbName, String tableName, String colName) throws NoSuchObjectException, MetaException, InvalidObjectException, InvalidInputException { @@ -745,6 +769,15 @@ public void setMetaStoreSchemaVersion(String schemaVersion, String comment) thro } @Override + public List getPartitionColumnStatistics( + String catName, String dbName, String tblName, List partNames, + List colNames, long txnId, String writeIdList) + throws MetaException, NoSuchObjectException { + return objectStore.getPartitionColumnStatistics( + catName, dbName, tblName , colNames, partNames, txnId, writeIdList); + } + + @Override public boolean doesPartitionExist(String catName, String dbName, String tableName, List partKeys, List partVals) throws MetaException, NoSuchObjectException { @@ -813,6 +846,15 @@ public AggrStats get_aggr_stats_for(String catName, String dbName, } @Override + public AggrStats get_aggr_stats_for(String catName, String dbName, + String tblName, List partNames, + List colNames, + long txnId, String writeIdList) + throws MetaException, NoSuchObjectException { + return null; + } + + @Override public NotificationEventResponse getNextNotification(NotificationEventRequest rqst) { return objectStore.getNextNotification(rqst); } diff --git standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/DummyRawStoreForJdoConnection.java standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/DummyRawStoreForJdoConnection.java index 85eb6d554b..d6a882e8e9 100644 --- standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/DummyRawStoreForJdoConnection.java +++ standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/DummyRawStoreForJdoConnection.java @@ -243,6 +243,12 @@ public Table getTable(String catName, String dbName, String tableName) throws Me } @Override + public Table getTable(String catalogName, String dbName, String tableName, + long txnid, String writeIdList) throws MetaException { + return null; + } + + @Override public boolean addPartition(Partition part) throws InvalidObjectException, MetaException { return false; @@ -256,6 +262,13 @@ public Partition getPartition(String catName, String dbName, String tableName, L } @Override + public Partition getPartition(String catName, String dbName, String tableName, List part_vals, + long txnid, String writeIdList) + throws MetaException, NoSuchObjectException { + return null; + } + + @Override public boolean dropPartition(String catName, String dbName, String tableName, List part_vals) throws MetaException { @@ -350,10 +363,10 @@ public void alterPartition(String catName, String db_name, String tbl_name, List @Override public void alterPartitions(String catName, String db_name, String tbl_name, - List> part_vals_list, List new_parts) + List> part_vals_list, List new_parts, + long txnId, String writeIdList) throws InvalidObjectException, MetaException { - } @Override @@ -706,6 +719,14 @@ public ColumnStatistics getTableColumnStatistics(String catName, String dbName, } @Override + public ColumnStatistics getTableColumnStatistics( + String catName, String dbName, String tableName, List colName, + long txnid, String writeIdList) + throws MetaException, NoSuchObjectException { + return null; + } + + @Override public boolean deleteTableColumnStatistics(String catName, String dbName, String tableName, String colName) throws NoSuchObjectException, MetaException, InvalidObjectException { @@ -755,6 +776,14 @@ public void setMetaStoreSchemaVersion(String version, String comment) throws Met } @Override + public List getPartitionColumnStatistics( + String catName, String dbName, String tblName, List partNames, + List colNames, long txnid, String writeIdList) + throws MetaException, NoSuchObjectException { + return Collections.emptyList(); + } + + @Override public boolean doesPartitionExist(String catName, String dbName, String tableName, List partKeys, List partVals) throws MetaException, NoSuchObjectException { @@ -818,6 +847,14 @@ public AggrStats get_aggr_stats_for(String catName, String dbName, } @Override + public AggrStats get_aggr_stats_for( + String catName, String dbName, String tblName, List partNames, + List colNames, long txnid, String writeIdList) + throws MetaException, NoSuchObjectException { + return null; + } + + @Override public NotificationEventResponse getNextNotification(NotificationEventRequest rqst) { return null; } diff --git standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/HiveMetaStoreClientPreCatalog.java standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/HiveMetaStoreClientPreCatalog.java index 2d87a2fa96..3899f03aba 100644 --- standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/HiveMetaStoreClientPreCatalog.java +++ standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/HiveMetaStoreClientPreCatalog.java @@ -20,6 +20,7 @@ import static org.apache.hadoop.hive.metastore.Warehouse.DEFAULT_DATABASE_NAME; import static org.apache.hadoop.hive.metastore.utils.MetaStoreUtils.getDefaultCatalog; +import static org.apache.hadoop.hive.metastore.utils.MetaStoreUtils.prependCatalogToDbName; import java.io.IOException; import java.lang.reflect.Constructor; @@ -1429,6 +1430,17 @@ public Table getTable(String dbname, String name) throws MetaException, return fastpath ? t : deepCopy(filterHook.filterTable(t)); } + @Override + public Table getTable(String dbName, String tableName, long txnId, String validWriteIdList) + throws MetaException, TException, NoSuchObjectException { + GetTableRequest req = new GetTableRequest(dbName, tableName); + req.setCapabilities(version); + req.setTxnId(txnId); + req.setValidWriteIdList(validWriteIdList); + Table t = client.get_table_req(req).getTable(); + return fastpath ? t : deepCopy(filterHook.filterTable(t)); + } + /** {@inheritDoc} */ @Override public List
getTableObjectsByName(String dbName, List tableNames) @@ -1612,13 +1624,33 @@ public void alter_partition(String dbName, String tblName, Partition newPart, En @Override public void alter_partitions(String dbName, String tblName, List newParts) throws InvalidOperationException, MetaException, TException { - client.alter_partitions_with_environment_context(dbName, tblName, newParts, null); + client.alter_partitions(dbName, tblName, newParts); } @Override public void alter_partitions(String dbName, String tblName, List newParts, EnvironmentContext environmentContext) throws InvalidOperationException, MetaException, TException { - client.alter_partitions_with_environment_context(dbName, tblName, newParts, environmentContext); + AlterPartitionsRequest req = new AlterPartitionsRequest(); + req.setDbName(dbName); + req.setTableName(tblName); + req.setPartitions(newParts); + req.setEnvironmentContext(environmentContext); + client.alter_partitions_with_environment_context(req); + } + + @Override + public void alter_partitions(String dbName, String tblName, List newParts, + EnvironmentContext environmentContext, + long txnId, String writeIdList) + throws InvalidOperationException, MetaException, TException { + AlterPartitionsRequest req = new AlterPartitionsRequest(); + req.setDbName(dbName); + req.setTableName(tblName); + req.setPartitions(newParts); + req.setEnvironmentContext(environmentContext); + req.setTxnId(txnId); + req.setValidWriteIdList(writeIdList); + client.alter_partitions_with_environment_context(req); } @Override @@ -1727,6 +1759,17 @@ public void flushCache() { new TableStatsRequest(dbName, tableName, colNames)).getTableStats(); } + @Override + public List getTableColumnStatistics( + String dbName, String tableName, List colNames, long txnId, String validWriteIdList) + throws NoSuchObjectException, MetaException, TException { + TableStatsRequest tsr = new TableStatsRequest(dbName, tableName, colNames); + tsr.setTxnId(txnId); + tsr.setValidWriteIdList(validWriteIdList); + + return client.get_table_statistics_req(tsr).getTableStats(); + } + /** {@inheritDoc} */ @Override public Map> getPartitionColumnStatistics( @@ -1736,6 +1779,18 @@ public void flushCache() { new PartitionsStatsRequest(dbName, tableName, colNames, partNames)).getPartStats(); } + @Override + public Map> getPartitionColumnStatistics( + String dbName, String tableName, List partNames, + List colNames, long txnId, String validWriteIdList) + throws NoSuchObjectException, MetaException, TException { + PartitionsStatsRequest psr = new PartitionsStatsRequest(dbName, tableName, colNames, partNames); + psr.setTxnId(txnId); + psr.setValidWriteIdList(validWriteIdList); + return client.get_partitions_statistics_req( + psr).getPartStats(); + } + /** {@inheritDoc} */ @Override public boolean deletePartitionColumnStatistics(String dbName, String tableName, String partName, @@ -2593,6 +2648,21 @@ public AggrStats getAggrColStatsFor(String dbName, String tblName, } @Override + public AggrStats getAggrColStatsFor( + String dbName, String tblName, List colNames, + List partName, long txnId, String writeIdList) + throws NoSuchObjectException, MetaException, TException { + if (colNames.isEmpty() || partName.isEmpty()) { + LOG.debug("Columns is empty or partNames is empty : Short-circuiting stats eval on client side."); + return new AggrStats(new ArrayList<>(),0); // Nothing to aggregate + } + PartitionsStatsRequest req = new PartitionsStatsRequest(dbName, tblName, colNames, partName); + req.setTxnId(txnId); + req.setValidWriteIdList(writeIdList); + return client.get_aggr_stats_for(req); + } + + @Override public Iterable> getFileMetadata( final List fileIds) throws TException { return new MetastoreMapIterable() { @@ -3000,6 +3070,12 @@ public Table getTable(String catName, String dbName, String tableName) throws Me } @Override + public Table getTable(String catName, String dbName, String tableName, + long txnId, String validWriteIdList) throws TException { + throw new UnsupportedOperationException(); + } + + @Override public List
getTableObjectsByName(String catName, String dbName, List tableNames) throws MetaException, InvalidOperationException, UnknownDBException, TException { @@ -3226,7 +3302,8 @@ public void alter_partition(String catName, String dbName, String tblName, Parti @Override public void alter_partitions(String catName, String dbName, String tblName, List newParts, - EnvironmentContext environmentContext) throws + EnvironmentContext environmentContext, + long txnId, String writeIdList) throws InvalidOperationException, MetaException, TException { throw new UnsupportedOperationException(); } @@ -3259,6 +3336,14 @@ public void renamePartition(String catName, String dbname, String tableName, } @Override + public List getTableColumnStatistics( + String catName, String dbName, String tableName, List colNames, + long txnId, String validWriteIdList) + throws NoSuchObjectException, MetaException, TException { + throw new UnsupportedOperationException(); + } + + @Override public Map> getPartitionColumnStatistics(String catName, String dbName, String tableName, @@ -3269,6 +3354,14 @@ public void renamePartition(String catName, String dbname, String tableName, } @Override + public Map> getPartitionColumnStatistics( + String catName, String dbName, String tableName, List partNames, + List colNames, long txnId, String validWriteIdList) + throws NoSuchObjectException, MetaException, TException { + throw new UnsupportedOperationException(); + } + + @Override public boolean deletePartitionColumnStatistics(String catName, String dbName, String tableName, String partName, String colName) throws NoSuchObjectException, MetaException, InvalidObjectException, TException, @@ -3316,6 +3409,14 @@ public AggrStats getAggrColStatsFor(String catName, String dbName, String tblNam } @Override + public AggrStats getAggrColStatsFor(String catName, String dbName, String tblName, + List colNames, List partNames, + long txnId, String writeIdList) + throws NoSuchObjectException, MetaException, TException { + throw new UnsupportedOperationException(); + } + + @Override public void dropConstraint(String catName, String dbName, String tableName, String constraintName) throws MetaException, NoSuchObjectException, TException { @@ -3420,4 +3521,5 @@ public void addRuntimeStat(RuntimeStat stat) throws TException { public List getRuntimeStats(int maxWeight, int maxCreateTime) throws TException { throw new UnsupportedOperationException(); } + } diff --git standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/client/TestAlterPartitions.java standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/client/TestAlterPartitions.java index 54bf3d7e25..0a62ac1cc4 100644 --- standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/client/TestAlterPartitions.java +++ standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/client/TestAlterPartitions.java @@ -18,6 +18,7 @@ package org.apache.hadoop.hive.metastore.client; +import java.net.ProtocolException; import java.util.ArrayList; import java.util.Arrays; import java.util.Collections; @@ -41,6 +42,7 @@ import org.apache.hadoop.hive.metastore.client.builder.TableBuilder; import org.apache.hadoop.hive.metastore.minihms.AbstractMetaStoreService; import org.apache.thrift.TException; +import org.apache.thrift.protocol.TProtocolException; import org.apache.thrift.transport.TTransportException; import com.google.common.collect.Lists; @@ -692,11 +694,16 @@ public void testAlterPartitionsNoTblName() throws Exception { client.alter_partitions(DB_NAME, "", Lists.newArrayList(part)); } - @Test(expected = MetaException.class) + @Test public void testAlterPartitionsNullTblName() throws Exception { createTable4PartColsParts(client); Partition part = client.listPartitions(DB_NAME, TABLE_NAME, (short)-1).get(0); - client.alter_partitions(DB_NAME, null, Lists.newArrayList(part)); + try { + client.alter_partitions(DB_NAME, null, Lists.newArrayList(part)); + Assert.fail("didn't throw"); + } catch (TProtocolException | MetaException e) { + // By design + } } @Test(expected = NullPointerException.class) @@ -720,7 +727,7 @@ public void testAlterPartitionsNullPartitionList() throws Exception { Partition part = client.listPartitions(DB_NAME, TABLE_NAME, (short)-1).get(0); client.alter_partitions(DB_NAME, TABLE_NAME, null); fail("Should have thrown exception"); - } catch (NullPointerException | TTransportException e) { + } catch (NullPointerException | TTransportException | TProtocolException e) { //TODO: should not throw different exceptions for different HMS deployment types } } @@ -786,7 +793,7 @@ public void testAlterPartitionsWithEnvironmentCtx() throws Exception { assertPartitionsHaveCorrectValues(newParts, testValues); client.alter_partitions(DB_NAME, TABLE_NAME, newParts, new EnvironmentContext()); - client.alter_partitions(DB_NAME, TABLE_NAME, newParts, null); + client.alter_partitions(DB_NAME, TABLE_NAME, newParts); for (int i = 0; i < testValues.size(); ++i) { assertPartitionChanged(oldParts.get(i), testValues.get(i), PARTCOL_SCHEMA); @@ -835,7 +842,8 @@ public void testAlterPartitionsWithEnvironmentCtxMissingPartitionVals() throws E public void testAlterPartitionsWithEnvironmentCtxBogusCatalogName() throws Exception { createTable4PartColsParts(client); Partition part = client.listPartitions(DB_NAME, TABLE_NAME, (short)-1).get(0); - client.alter_partitions("nosuch", DB_NAME, TABLE_NAME, Lists.newArrayList(part), new EnvironmentContext()); + client.alter_partitions("nosuch", DB_NAME, TABLE_NAME, Lists.newArrayList(part), new EnvironmentContext(), + -1, null); } @Test(expected = InvalidOperationException.class) @@ -859,11 +867,16 @@ public void testAlterPartitionsWithEnvironmentCtxNoTblName() throws Exception { client.alter_partitions(DB_NAME, "", Lists.newArrayList(part), new EnvironmentContext()); } - @Test(expected = MetaException.class) + @Test public void testAlterPartitionsWithEnvironmentCtxNullTblName() throws Exception { createTable4PartColsParts(client); Partition part = client.listPartitions(DB_NAME, TABLE_NAME, (short)-1).get(0); - client.alter_partitions(DB_NAME, null, Lists.newArrayList(part), new EnvironmentContext()); + try { + client.alter_partitions(DB_NAME, null, Lists.newArrayList(part), new EnvironmentContext()); + Assert.fail("didn't throw"); + } catch (MetaException | TProtocolException ex) { + // By design. + } } @Test(expected = NullPointerException.class) @@ -889,7 +902,7 @@ public void testAlterPartitionsWithEnvironmentCtxNullPartitionList() throws Exce Partition part = client.listPartitions(DB_NAME, TABLE_NAME, (short)-1).get(0); client.alter_partitions(DB_NAME, TABLE_NAME, null, new EnvironmentContext()); fail("Should have thrown exception"); - } catch (NullPointerException | TTransportException e) { + } catch (NullPointerException | TTransportException | TProtocolException e) { //TODO: should not throw different exceptions for different HMS deployment types } } diff --git storage-api/src/java/org/apache/hadoop/hive/common/ValidTxnWriteIdList.java storage-api/src/java/org/apache/hadoop/hive/common/ValidTxnWriteIdList.java index 9867a81a7b..cfe01feed0 100644 --- storage-api/src/java/org/apache/hadoop/hive/common/ValidTxnWriteIdList.java +++ storage-api/src/java/org/apache/hadoop/hive/common/ValidTxnWriteIdList.java @@ -63,6 +63,10 @@ public ValidWriteIdList getTableValidWriteIdList(String fullTableName) { return null; } + public boolean isEmpty() { + return tablesValidWriteIdList.isEmpty(); + } + // Each ValidWriteIdList is separated with "$" and each one maps to one table // Format $::::$... private void readFromString(String src) { diff --git storage-api/src/java/org/apache/hive/common/util/TxnIdUtils.java storage-api/src/java/org/apache/hive/common/util/TxnIdUtils.java index 17f3777fbd..dc50f1b554 100644 --- storage-api/src/java/org/apache/hive/common/util/TxnIdUtils.java +++ storage-api/src/java/org/apache/hive/common/util/TxnIdUtils.java @@ -36,8 +36,24 @@ public static boolean checkEquivalentWriteIds(ValidWriteIdList a, ValidWriteIdLi } return checkEquivalentCommittedIds( - older.getHighWatermark(), older.getInvalidWriteIds(), - newer.getHighWatermark(), newer.getInvalidWriteIds()); + older.getHighWatermark(), older.getInvalidWriteIds(), + newer.getHighWatermark(), newer.getInvalidWriteIds()); + } + + /** + * Check if the give two write id lists are for concurrent writes + * on the table. + */ + public static boolean areTheseConcurrentWrites( + ValidWriteIdList older, ValidWriteIdList newer, long statsWriteId) { + if (!older.getTableName().equalsIgnoreCase(newer.getTableName())) { + return false; + } + + assert(older.getHighWatermark() <= newer.getHighWatermark()); + + // TODO: Just return false for now. + return false; } /**