diff --git itests/hcatalog-unit/src/test/java/org/apache/hive/hcatalog/listener/DummyRawStoreFailEvent.java itests/hcatalog-unit/src/test/java/org/apache/hive/hcatalog/listener/DummyRawStoreFailEvent.java index b4b118eb70..cd036e6563 100644 --- itests/hcatalog-unit/src/test/java/org/apache/hive/hcatalog/listener/DummyRawStoreFailEvent.java +++ itests/hcatalog-unit/src/test/java/org/apache/hive/hcatalog/listener/DummyRawStoreFailEvent.java @@ -267,6 +267,12 @@ public Table getTable(String catName, String dbName, String tableName) throws Me } @Override + public Table getTable(String catName, String dbName, String tableName, + long txnId, String writeIdList) throws MetaException { + return objectStore.getTable(catName, dbName, tableName, txnId, writeIdList); + } + + @Override public boolean addPartition(Partition part) throws InvalidObjectException, MetaException { return objectStore.addPartition(part); @@ -279,6 +285,13 @@ public Partition getPartition(String catName, String dbName, String tableName, L } @Override + public Partition getPartition(String catName, String dbName, String tableName, + List partVals, long txnId, String writeIdList) + throws MetaException, NoSuchObjectException { + return objectStore.getPartition(catName, dbName, tableName, partVals, txnId, writeIdList); + } + + @Override public boolean dropPartition(String catName, String dbName, String tableName, List partVals) throws MetaException, NoSuchObjectException, InvalidObjectException, InvalidInputException { @@ -308,10 +321,10 @@ public void updateCreationMetadata(String catName, String dbname, String tablena } @Override - public void alterTable(String catName, String dbName, String name, Table newTable) + public void alterTable(String catName, String dbName, String name, Table newTable, long queryTxnId, String queryValidWriteIds) throws InvalidObjectException, MetaException { if (shouldEventSucceed) { - objectStore.alterTable(catName, dbName, name, newTable); + objectStore.alterTable(catName, dbName, name, newTable, queryTxnId, queryValidWriteIds); } else { throw new RuntimeException("Event failed."); } @@ -373,9 +386,9 @@ public PartitionValuesResponse listPartitionValues(String catName, String db_nam @Override public void alterPartition(String catName, String dbName, String tblName, List partVals, - Partition newPart) throws InvalidObjectException, MetaException { + Partition newPart, long queryTxnId, String queryValidWriteIds) throws InvalidObjectException, MetaException { if (shouldEventSucceed) { - objectStore.alterPartition(catName, dbName, tblName, partVals, newPart); + objectStore.alterPartition(catName, dbName, tblName, partVals, newPart, queryTxnId, queryValidWriteIds); } else { throw new RuntimeException("Event failed."); } @@ -383,9 +396,10 @@ public void alterPartition(String catName, String dbName, String tblName, List> partValsList, List newParts) + List> partValsList, List newParts, + long writeId, long queryTxnId, String queryValidWriteIds) throws InvalidObjectException, MetaException { - objectStore.alterPartitions(catName, dbName, tblName, partValsList, newParts); + objectStore.alterPartitions(catName, dbName, tblName, partValsList, newParts, writeId, queryTxnId, queryValidWriteIds); } @Override @@ -692,6 +706,14 @@ public ColumnStatistics getTableColumnStatistics(String catName, String dbName, } @Override + public ColumnStatistics getTableColumnStatistics(String catName, String dbName, String tableName, + List colNames, + long txnId, String writeIdList) + throws MetaException, NoSuchObjectException { + return objectStore.getTableColumnStatistics(catName, dbName, tableName, colNames, txnId, writeIdList); + } + + @Override public boolean deleteTableColumnStatistics(String catName, String dbName, String tableName, String colName) throws NoSuchObjectException, MetaException, InvalidObjectException, InvalidInputException { @@ -785,6 +807,17 @@ public void setMetaStoreSchemaVersion(String schemaVersion, String comment) thro } @Override + public List getPartitionColumnStatistics(String catName, String dbName, + String tblName, List colNames, + List partNames, + long txnId, + String writeIdList) + throws MetaException, NoSuchObjectException { + return objectStore.getPartitionColumnStatistics( + catName, dbName, tblName , colNames, partNames, txnId, writeIdList); + } + + @Override public boolean doesPartitionExist(String catName, String dbName, String tableName, List partKeys, List partVals) throws MetaException, NoSuchObjectException { @@ -862,6 +895,14 @@ public AggrStats get_aggr_stats_for(String catName, String dbName, } @Override + public AggrStats get_aggr_stats_for(String catName, String dbName, + String tblName, List partNames, List colNames, + long txnId, String writeIdList) + throws MetaException { + return null; + } + + @Override public NotificationEventResponse getNextNotification(NotificationEventRequest rqst) { return objectStore.getNextNotification(rqst); } diff --git itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/parse/TestReplicationScenarios.java itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/parse/TestReplicationScenarios.java index 73ac6d8658..f0098bfe57 100644 --- itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/parse/TestReplicationScenarios.java +++ itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/parse/TestReplicationScenarios.java @@ -152,6 +152,7 @@ public static void setUpBeforeClass() throws Exception { hconf.set(HiveConf.ConfVars.PREEXECHOOKS.varname, ""); hconf.set(HiveConf.ConfVars.POSTEXECHOOKS.varname, ""); hconf.set(HiveConf.ConfVars.HIVE_IN_TEST_REPL.varname, "true"); + hconf.setBoolVar(HiveConf.ConfVars.HIVE_IN_TEST, true); hconf.set(HiveConf.ConfVars.HIVE_SUPPORT_CONCURRENCY.varname, "false"); hconf.set(HiveConf.ConfVars.HIVE_TXN_MANAGER.varname, "org.apache.hadoop.hive.ql.lockmgr.DummyTxnManager"); diff --git pom.xml pom.xml index 28ad152545..dc1adab6b5 100644 --- pom.xml +++ pom.xml @@ -66,7 +66,7 @@ - 3.1.0 + 4.0.0 UTF-8 diff --git ql/src/java/org/apache/hadoop/hive/ql/exec/DDLTask.java ql/src/java/org/apache/hadoop/hive/ql/exec/DDLTask.java index b7babd623d..71b9587697 100644 --- ql/src/java/org/apache/hadoop/hive/ql/exec/DDLTask.java +++ ql/src/java/org/apache/hadoop/hive/ql/exec/DDLTask.java @@ -1301,7 +1301,7 @@ private int alterMaterializedView(Hive db, AlterMaterializedViewDesc alterMVDesc throw new AssertionError("Unsupported alter materialized view type! : " + alterMVDesc.getOp()); } - db.alterTable(mv, environmentContext); + db.alterTable(mv, false, environmentContext, true); return 0; } @@ -1452,7 +1452,7 @@ private int alterTableAlterPart(Hive db, AlterTableAlterPartDesc alterPartitionD tbl.getTTable().setPartitionKeys(newPartitionKeys); - db.alterTable(tbl, null); + db.alterTable(tbl, false, null, true); work.getInputs().add(new ReadEntity(tbl)); // We've already locked the table as the input, don't relock it as the output. @@ -1478,7 +1478,7 @@ private int touch(Hive db, AlterTableSimpleDesc touchDesc) environmentContext.putToProperties(StatsSetupConst.DO_NOT_UPDATE_STATS, StatsSetupConst.TRUE); if (touchDesc.getPartSpec() == null) { - db.alterTable(tbl, environmentContext); + db.alterTable(tbl, false, environmentContext, true); work.getInputs().add(new ReadEntity(tbl)); addIfAbsentByName(new WriteEntity(tbl, WriteEntity.WriteType.DDL_NO_LOCK)); } else { @@ -1487,7 +1487,7 @@ private int touch(Hive db, AlterTableSimpleDesc touchDesc) throw new HiveException("Specified partition does not exist"); } try { - db.alterPartition(touchDesc.getTableName(), part, environmentContext); + db.alterPartition(touchDesc.getTableName(), part, environmentContext, true); } catch (InvalidOperationException e) { throw new HiveException(e); } @@ -1836,7 +1836,7 @@ private int archive(Hive db, AlterTableSimpleDesc simpleDesc, authority.toString(), harPartitionDir.getPath()); // make in Path to ensure no slash at the end setArchived(p, harPath, partSpecInfo.values.size()); - db.alterPartition(simpleDesc.getTableName(), p, null); + db.alterPartition(simpleDesc.getTableName(), p, null, true); } } catch (Exception e) { throw new HiveException("Unable to change the partition info for HAR", e); @@ -2042,7 +2042,7 @@ private int unarchive(Hive db, AlterTableSimpleDesc simpleDesc) for(Partition p: partitions) { setUnArchived(p); try { - db.alterPartition(simpleDesc.getTableName(), p, null); + db.alterPartition(simpleDesc.getTableName(), p, null, true); } catch (InvalidOperationException e) { throw new HiveException(e); } @@ -3731,7 +3731,8 @@ private int describeTable(Hive db, DescTableDesc descTbl) throws HiveException, } else { cols = Hive.getFieldsFromDeserializer(colPath, deserializer); List parts = db.getPartitionNames(dbTab[0].toLowerCase(), dbTab[1].toLowerCase(), (short) -1); - AggrStats aggrStats = db.getAggrColStatsFor(dbTab[0].toLowerCase(), dbTab[1].toLowerCase(), colNames, parts); + AggrStats aggrStats = db.getAggrColStatsFor( + dbTab[0].toLowerCase(), dbTab[1].toLowerCase(), colNames, parts, false); colStats = aggrStats.getColStats(); if (parts.size() == aggrStats.getPartsFound()) { StatsSetupConst.setColumnStatsState(tblProps, colNames); @@ -3742,13 +3743,15 @@ private int describeTable(Hive db, DescTableDesc descTbl) throws HiveException, tbl.setParameters(tblProps); } else { cols = Hive.getFieldsFromDeserializer(colPath, deserializer); - colStats = db.getTableColumnStatistics(dbTab[0].toLowerCase(), dbTab[1].toLowerCase(), colNames); + colStats = db.getTableColumnStatistics( + dbTab[0].toLowerCase(), dbTab[1].toLowerCase(), colNames, false); } } else { List partitions = new ArrayList(); partitions.add(part.getName()); cols = Hive.getFieldsFromDeserializer(colPath, deserializer); - colStats = db.getPartitionColumnStatistics(dbTab[0].toLowerCase(), dbTab[1].toLowerCase(), partitions, colNames).get(part.getName()); + colStats = db.getPartitionColumnStatistics(dbTab[0].toLowerCase(), + dbTab[1].toLowerCase(), partitions, colNames, false).get(part.getName()); } } else { cols = Hive.getFieldsFromDeserializer(colPath, deserializer); @@ -3966,10 +3969,14 @@ private int alterTable(Hive db, AlterTableDesc alterTbl) throws HiveException { environmentContext = new EnvironmentContext(); } environmentContext.putToProperties(HiveMetaHook.ALTER_TABLE_OPERATION_TYPE, alterTbl.getOp().name()); + // Note: in the old default overloads that I've removed, "transactional" was true for tables, + // but false for partitions. Seems to be ok here because we are not updating + // partition-stats-related stuff from this call (alterTable). if (allPartitions == null) { - db.alterTable(alterTbl.getOldName(), tbl, alterTbl.getIsCascade(), environmentContext); + db.alterTable(alterTbl.getOldName(), tbl, alterTbl.getIsCascade(), environmentContext, true); } else { - db.alterPartitions(Warehouse.getQualifiedName(tbl.getTTable()), allPartitions, environmentContext); + db.alterPartitions( + Warehouse.getQualifiedName(tbl.getTTable()), allPartitions, environmentContext, false); } // Add constraints if necessary addConstraints(db, alterTbl); @@ -4944,7 +4951,7 @@ private int createTable(Hive db, CreateTableDesc crtTbl) throws HiveException { // create the table if (crtTbl.getReplaceMode()) { // replace-mode creates are really alters using CreateTableDesc. - db.alterTable(tbl, null); + db.alterTable(tbl, false, null, true); } else { if ((foreignKeys != null && foreignKeys.size() > 0) || (primaryKeys != null && primaryKeys.size() > 0) || @@ -5174,7 +5181,7 @@ private int createView(Hive db, CreateViewDesc crtView) throws HiveException { oldview.setOutputFormatClass(crtView.getOutputFormat()); } oldview.checkValidity(null); - db.alterTable(crtView.getViewName(), oldview, null); + db.alterTable(crtView.getViewName(), oldview, false, null, true); addIfAbsentByName(new WriteEntity(oldview, WriteEntity.WriteType.DDL_NO_LOCK)); } else { // We create new view diff --git ql/src/java/org/apache/hadoop/hive/ql/exec/FileSinkOperator.java ql/src/java/org/apache/hadoop/hive/ql/exec/FileSinkOperator.java index 267d6029ac..9593975c6c 100644 --- ql/src/java/org/apache/hadoop/hive/ql/exec/FileSinkOperator.java +++ ql/src/java/org/apache/hadoop/hive/ql/exec/FileSinkOperator.java @@ -1208,7 +1208,7 @@ protected FSPaths getDynOutPaths(List row, String lbDir) throws HiveExce } else if (prevFsp.updaters[0] != null) { stats = prevFsp.updaters[0].getStats(); } - if (stats != null) { + if (stats != null && !conf.isFullAcidTable()) { prevFsp.addToStat(StatsSetupConst.RAW_DATA_SIZE, stats.getRawDataSize()); prevFsp.addToStat(StatsSetupConst.ROW_COUNT, stats.getRowCount()); } diff --git ql/src/java/org/apache/hadoop/hive/ql/exec/MoveTask.java ql/src/java/org/apache/hadoop/hive/ql/exec/MoveTask.java index bf7749df9b..259d95166e 100644 --- ql/src/java/org/apache/hadoop/hive/ql/exec/MoveTask.java +++ ql/src/java/org/apache/hadoop/hive/ql/exec/MoveTask.java @@ -797,7 +797,7 @@ private void updatePartitionBucketSortColumns(Hive db, Table table, Partition pa } if (updateBucketCols || updateSortCols) { - db.alterPartition(table.getDbName(), table.getTableName(), partn, null); + db.alterPartition(table.getDbName(), table.getTableName(), partn, null, true); } } diff --git ql/src/java/org/apache/hadoop/hive/ql/hooks/UpdateInputAccessTimeHook.java ql/src/java/org/apache/hadoop/hive/ql/hooks/UpdateInputAccessTimeHook.java index fc56a8be3b..4cf7c25a82 100644 --- ql/src/java/org/apache/hadoop/hive/ql/hooks/UpdateInputAccessTimeHook.java +++ ql/src/java/org/apache/hadoop/hive/ql/hooks/UpdateInputAccessTimeHook.java @@ -63,7 +63,7 @@ public void run(HookContext hookContext) throws Exception { String tblName = re.getTable().getTableName(); Table t = db.getTable(dbName, tblName); t.setLastAccessTime(lastAccessTime); - db.alterTable(dbName + "." + tblName, t, null); + db.alterTable(dbName + "." + tblName, t, false, null, true); break; } case PARTITION: { @@ -73,9 +73,9 @@ public void run(HookContext hookContext) throws Exception { Table t = db.getTable(dbName, tblName); p = db.getPartition(t, p.getSpec(), false); p.setLastAccessTime(lastAccessTime); - db.alterPartition(dbName, tblName, p, null); + db.alterPartition(dbName, tblName, p, null, true); t.setLastAccessTime(lastAccessTime); - db.alterTable(dbName + "." + tblName, t, null); + db.alterTable(dbName + "." + tblName, t, false, null, true); break; } default: diff --git ql/src/java/org/apache/hadoop/hive/ql/io/AcidUtils.java ql/src/java/org/apache/hadoop/hive/ql/io/AcidUtils.java index 16ba82ef6d..e54afc4362 100644 --- ql/src/java/org/apache/hadoop/hive/ql/io/AcidUtils.java +++ ql/src/java/org/apache/hadoop/hive/ql/io/AcidUtils.java @@ -32,6 +32,7 @@ import java.util.Set; import java.util.regex.Pattern; +import org.apache.avro.generic.GenericData; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FSDataInputStream; import org.apache.hadoop.fs.FSDataOutputStream; @@ -39,13 +40,11 @@ import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.PathFilter; -import org.apache.hadoop.hive.common.HiveStatsUtils; -import org.apache.hadoop.hive.common.ValidReaderWriteIdList; -import org.apache.hadoop.hive.common.ValidTxnWriteIdList; -import org.apache.hadoop.hive.common.ValidWriteIdList; +import org.apache.hadoop.hive.common.*; import org.apache.hadoop.hive.conf.HiveConf; import org.apache.hadoop.hive.conf.HiveConf.ConfVars; import org.apache.hadoop.hive.metastore.TransactionalValidationListener; +import org.apache.hadoop.hive.metastore.Warehouse; import org.apache.hadoop.hive.metastore.api.DataOperationType; import org.apache.hadoop.hive.metastore.api.hive_metastoreConstants; import org.apache.hadoop.hive.ql.ErrorMsg; @@ -56,9 +55,12 @@ import org.apache.hadoop.hive.ql.io.orc.OrcRecordUpdater; import org.apache.hadoop.hive.ql.io.orc.Reader; import org.apache.hadoop.hive.ql.io.orc.Writer; +import org.apache.hadoop.hive.ql.lockmgr.HiveTxnManager; +import org.apache.hadoop.hive.ql.lockmgr.LockException; import org.apache.hadoop.hive.ql.metadata.Table; import org.apache.hadoop.hive.ql.plan.CreateTableDesc; import org.apache.hadoop.hive.ql.plan.TableScanDesc; +import org.apache.hadoop.hive.ql.session.SessionState; import org.apache.hadoop.hive.shims.HadoopShims; import org.apache.hadoop.hive.shims.HadoopShims.HdfsFileStatusWithId; import org.apache.hadoop.hive.shims.ShimLoader; @@ -1606,6 +1608,133 @@ public static void setValidWriteIdList(Configuration conf, TableScanDesc tsDesc) } } + public static class TableSnapshot { + private long txnId; + private long writeId; + private String validWriteIdList; + + public TableSnapshot() { + } + + public TableSnapshot(long txnId, long writeId, String validWriteIdList) { + this.txnId = txnId; + this.writeId = writeId; + this.validWriteIdList = validWriteIdList; + } + + public long getTxnId() { + return txnId; + } + + public String getValidWriteIdList() { + return validWriteIdList; + } + + public void setTxnId(long txnId) { + this.txnId = txnId; + } + + public long getWriteId() { + return writeId; + } + + public void setWriteId(long writeId) { + this.writeId = writeId; + } + + public void setValidWriteIdList(String validWriteIdList) { + this.validWriteIdList = validWriteIdList; + } + + @Override + public String toString() { + return "[txnId=" + txnId + ", validWriteIdList=" + validWriteIdList + "]"; + } + } + + public static TableSnapshot getTableSnapshot( + Configuration conf, + Table tbl) throws LockException { + return getTableSnapshot(conf, tbl, false); + } + + + public static TableSnapshot getTableSnapshot( + Configuration conf, Table tbl, boolean isStatsUpdater) throws LockException { + if (!isTransactionalTable(tbl)) { + return null; + } else { + long txnId = -1; + long writeId = -1; + ValidWriteIdList validWriteIdList = null; + + HiveTxnManager sessionTxnMgr = SessionState.get().getTxnMgr(); + + if (sessionTxnMgr != null) { + txnId = sessionTxnMgr.getCurrentTxnId(); + } + String fullTableName = getFullTableName(tbl.getDbName(), tbl.getTableName()); + if (txnId > 0 && isTransactionalTable(tbl)) { + validWriteIdList = getTableValidWriteIdList(conf, fullTableName); + if (isStatsUpdater) { + writeId = SessionState.get().getTxnMgr() != null ? + SessionState.get().getTxnMgr().getAllocatedTableWriteId( + tbl.getDbName(), tbl.getTableName()) : -1; + if (writeId < 1) { + // TODO: this is not ideal... stats updater that doesn't have write ID is currently + // "create table"; writeId would be 0/-1 here. No need to call this w/true. + LOG.debug("Stats updater for {}.{} doesn't have a write ID", + tbl.getDbName(), tbl.getTableName()); + } + } + + + if (HiveConf.getBoolVar(conf, ConfVars.HIVE_IN_TEST) + && conf.get(ValidTxnList.VALID_TXNS_KEY) == null) { + return null; + } + if (validWriteIdList == null) { + validWriteIdList = getTableValidWriteIdListWithTxnList( + conf, tbl.getDbName(), tbl.getTableName()); + } + if (validWriteIdList == null) { + throw new AssertionError("Cannot find valid write ID list for " + tbl.getTableName()); + } + } + return new TableSnapshot(txnId, writeId, + validWriteIdList != null ? validWriteIdList.toString() : null); + } + } + + /** + * Returns ValidWriteIdList for the table with the given "dbName" and "tableName". + * This is called when HiveConf has no list for the table. + * Otherwise use getTableSnapshot(). + * @param conf Configuration + * @param dbName + * @param tableName + * @return ValidWriteIdList on success, null on failure to get a list. + * @throws LockException + */ + public static ValidWriteIdList getTableValidWriteIdListWithTxnList( + Configuration conf, String dbName, String tableName) throws LockException { + HiveTxnManager sessionTxnMgr = SessionState.get().getTxnMgr(); + if (sessionTxnMgr == null) { + return null; + } + ValidWriteIdList validWriteIdList = null; + ValidTxnWriteIdList validTxnWriteIdList = null; + + String validTxnList = conf.get(ValidTxnList.VALID_TXNS_KEY); + List tablesInput = new ArrayList<>(); + String fullTableName = getFullTableName(dbName, tableName); + tablesInput.add(fullTableName); + + validTxnWriteIdList = sessionTxnMgr.getValidWriteIds(tablesInput, validTxnList); + return validTxnWriteIdList != null ? + validTxnWriteIdList.getTableValidWriteIdList(fullTableName) : null; + } + public static String getFullTableName(String dbName, String tableName) { return dbName.toLowerCase() + "." + tableName.toLowerCase(); } @@ -1915,8 +2044,8 @@ public static String getFirstLevelAcidDirPath(Path dataPath, FileSystem fileSyst } public static boolean isAcidEnabled(HiveConf hiveConf) { - String txnMgr = hiveConf.getVar(HiveConf.ConfVars.HIVE_TXN_MANAGER); - boolean concurrency = hiveConf.getBoolVar(HiveConf.ConfVars.HIVE_SUPPORT_CONCURRENCY); + String txnMgr = hiveConf.getVar(ConfVars.HIVE_TXN_MANAGER); + boolean concurrency = hiveConf.getBoolVar(ConfVars.HIVE_SUPPORT_CONCURRENCY); String dbTxnMgr = "org.apache.hadoop.hive.ql.lockmgr.DbTxnManager"; if (txnMgr.equals(dbTxnMgr) && concurrency) { return true; diff --git ql/src/java/org/apache/hadoop/hive/ql/lockmgr/DbTxnManager.java ql/src/java/org/apache/hadoop/hive/ql/lockmgr/DbTxnManager.java index 78980fad93..a05ae0c374 100644 --- ql/src/java/org/apache/hadoop/hive/ql/lockmgr/DbTxnManager.java +++ ql/src/java/org/apache/hadoop/hive/ql/lockmgr/DbTxnManager.java @@ -56,6 +56,7 @@ Licensed to the Apache Software Foundation (ASF) under one import org.apache.hadoop.hive.ql.plan.LockTableDesc; import org.apache.hadoop.hive.ql.plan.UnlockDatabaseDesc; import org.apache.hadoop.hive.ql.plan.UnlockTableDesc; +import org.apache.hadoop.hive.ql.session.*; import org.apache.hadoop.security.UserGroupInformation; import org.apache.hive.common.util.ShutdownHookManager; import org.apache.thrift.TException; @@ -1023,12 +1024,30 @@ public int getCurrentStmtId() { @Override public long getTableWriteId(String dbName, String tableName) throws LockException { assert isTxnOpen(); + return getTableWriteId(dbName, tableName, true); + } + + @Override + public long getAllocatedTableWriteId(String dbName, String tableName) throws LockException { + assert isTxnOpen(); + // Calls getTableWriteId() with allocateIfNotYet being false + // to return 0 if the dbName:tableName's writeId is yet allocated. + // This happens when the current context is before + // Driver.acquireLocks() is called. + return getTableWriteId(dbName, tableName, false); + } + + private long getTableWriteId( + String dbName, String tableName, boolean allocateIfNotYet) throws LockException { String fullTableName = AcidUtils.getFullTableName(dbName, tableName); if (tableWriteIds.containsKey(fullTableName)) { return tableWriteIds.get(fullTableName); + } else if (!allocateIfNotYet) { + return 0; } try { long writeId = getMS().allocateTableWriteId(txnId, dbName, tableName); + LOG.debug("Allocated write ID {} for {}.{}", writeId, dbName, tableName); tableWriteIds.put(fullTableName, writeId); return writeId; } catch (TException e) { diff --git ql/src/java/org/apache/hadoop/hive/ql/lockmgr/DummyTxnManager.java ql/src/java/org/apache/hadoop/hive/ql/lockmgr/DummyTxnManager.java index 1feddebf2b..03f2ff31be 100644 --- ql/src/java/org/apache/hadoop/hive/ql/lockmgr/DummyTxnManager.java +++ ql/src/java/org/apache/hadoop/hive/ql/lockmgr/DummyTxnManager.java @@ -82,6 +82,12 @@ public int getCurrentStmtId() { public long getTableWriteId(String dbName, String tableName) throws LockException { return 0L; } + + @Override + public long getAllocatedTableWriteId(String dbName, String tableName) throws LockException { + return 0L; + } + @Override public void replAllocateTableWriteIdsBatch(String dbName, String tableName, String replPolicy, List srcTxnToWriteIdList) throws LockException { diff --git ql/src/java/org/apache/hadoop/hive/ql/lockmgr/HiveTxnManager.java ql/src/java/org/apache/hadoop/hive/ql/lockmgr/HiveTxnManager.java index 9575552ff0..6a01abc9fa 100644 --- ql/src/java/org/apache/hadoop/hive/ql/lockmgr/HiveTxnManager.java +++ ql/src/java/org/apache/hadoop/hive/ql/lockmgr/HiveTxnManager.java @@ -278,7 +278,18 @@ void replTableWriteIdState(String validWriteIdList, String dbName, String tableN */ long getTableWriteId(String dbName, String tableName) throws LockException; - /** + /** + * if {@code isTxnOpen()}, returns the already allocated table write ID of the table with + * the given "dbName.tableName" for the current active transaction. + * If not allocated, then returns 0. + * @param dbName + * @param tableName + * @return 0 if not yet allocated + * @throws LockException + */ + public long getAllocatedTableWriteId(String dbName, String tableName) throws LockException; + + /** * Allocates write id for each transaction in the list. * @param dbName database name * @param tableName the name of the table to allocate the write id diff --git ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java index cd62d496a3..07fe43fc5e 100644 --- ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java +++ ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java @@ -18,14 +18,15 @@ package org.apache.hadoop.hive.ql.metadata; +import com.google.common.annotations.VisibleForTesting; import com.google.common.base.Splitter; import com.google.common.collect.ImmutableMap; import com.google.common.collect.Lists; import com.google.common.collect.Maps; import com.google.common.collect.Sets; import com.google.common.util.concurrent.ThreadFactoryBuilder; -import static org.apache.hadoop.hive.conf.Constants.MATERIALIZED_VIEW_REWRITING_TIME_WINDOW; +import static org.apache.hadoop.hive.conf.Constants.MATERIALIZED_VIEW_REWRITING_TIME_WINDOW; import static org.apache.hadoop.hive.metastore.api.hive_metastoreConstants.META_TABLE_STORAGE; import static org.apache.hadoop.hive.metastore.utils.MetaStoreUtils.getDefaultCatalog; import static org.apache.hadoop.hive.ql.parse.DDLSemanticAnalyzer.makeBinaryPredicate; @@ -62,6 +63,7 @@ import javax.jdo.JDODataStoreException; import com.google.common.collect.ImmutableList; + import org.apache.calcite.plan.RelOptMaterialization; import org.apache.calcite.plan.hep.HepPlanner; import org.apache.calcite.plan.hep.HepProgramBuilder; @@ -80,11 +82,7 @@ import org.apache.hadoop.fs.PathFilter; import org.apache.hadoop.fs.permission.FsAction; import org.apache.hadoop.hdfs.DistributedFileSystem; -import org.apache.hadoop.hive.common.FileUtils; -import org.apache.hadoop.hive.common.HiveStatsUtils; -import org.apache.hadoop.hive.common.ObjectPair; -import org.apache.hadoop.hive.common.StatsSetupConst; -import org.apache.hadoop.hive.common.ValidTxnWriteIdList; +import org.apache.hadoop.hive.common.*; import org.apache.hadoop.hive.common.classification.InterfaceAudience.LimitedPrivate; import org.apache.hadoop.hive.common.classification.InterfaceStability.Unstable; import org.apache.hadoop.hive.common.log.InPlaceUpdate; @@ -105,61 +103,7 @@ import org.apache.hadoop.hive.metastore.TableType; import org.apache.hadoop.hive.metastore.Warehouse; import org.apache.hadoop.hive.metastore.ReplChangeManager; -import org.apache.hadoop.hive.metastore.api.AggrStats; -import org.apache.hadoop.hive.metastore.api.AlreadyExistsException; -import org.apache.hadoop.hive.metastore.api.CheckConstraintsRequest; -import org.apache.hadoop.hive.metastore.api.CmRecycleRequest; -import org.apache.hadoop.hive.metastore.api.ColumnStatisticsObj; -import org.apache.hadoop.hive.metastore.api.CompactionResponse; -import org.apache.hadoop.hive.metastore.api.CompactionType; -import org.apache.hadoop.hive.metastore.api.CreationMetadata; -import org.apache.hadoop.hive.metastore.api.Database; -import org.apache.hadoop.hive.metastore.api.DefaultConstraintsRequest; -import org.apache.hadoop.hive.metastore.api.EnvironmentContext; -import org.apache.hadoop.hive.metastore.api.FieldSchema; -import org.apache.hadoop.hive.metastore.api.FireEventRequest; -import org.apache.hadoop.hive.metastore.api.FireEventRequestData; -import org.apache.hadoop.hive.metastore.api.ForeignKeysRequest; -import org.apache.hadoop.hive.metastore.api.Function; -import org.apache.hadoop.hive.metastore.api.GetOpenTxnsInfoResponse; -import org.apache.hadoop.hive.metastore.api.GetRoleGrantsForPrincipalRequest; -import org.apache.hadoop.hive.metastore.api.GetRoleGrantsForPrincipalResponse; -import org.apache.hadoop.hive.metastore.api.HiveObjectPrivilege; -import org.apache.hadoop.hive.metastore.api.HiveObjectRef; -import org.apache.hadoop.hive.metastore.api.HiveObjectType; -import org.apache.hadoop.hive.metastore.api.InsertEventRequestData; -import org.apache.hadoop.hive.metastore.api.InvalidOperationException; -import org.apache.hadoop.hive.metastore.api.Materialization; -import org.apache.hadoop.hive.metastore.api.MetaException; -import org.apache.hadoop.hive.metastore.api.MetadataPpdResult; -import org.apache.hadoop.hive.metastore.api.NoSuchObjectException; -import org.apache.hadoop.hive.metastore.api.NotNullConstraintsRequest; -import org.apache.hadoop.hive.metastore.api.PrimaryKeysRequest; -import org.apache.hadoop.hive.metastore.api.PrincipalPrivilegeSet; -import org.apache.hadoop.hive.metastore.api.PrincipalType; -import org.apache.hadoop.hive.metastore.api.PrivilegeBag; -import org.apache.hadoop.hive.metastore.api.Role; -import org.apache.hadoop.hive.metastore.api.RolePrincipalGrant; -import org.apache.hadoop.hive.metastore.api.SQLCheckConstraint; -import org.apache.hadoop.hive.metastore.api.SQLDefaultConstraint; -import org.apache.hadoop.hive.metastore.api.SQLForeignKey; -import org.apache.hadoop.hive.metastore.api.SQLNotNullConstraint; -import org.apache.hadoop.hive.metastore.api.SQLPrimaryKey; -import org.apache.hadoop.hive.metastore.api.SQLUniqueConstraint; -import org.apache.hadoop.hive.metastore.api.SetPartitionsStatsRequest; -import org.apache.hadoop.hive.metastore.api.ShowCompactResponse; -import org.apache.hadoop.hive.metastore.api.SkewedInfo; -import org.apache.hadoop.hive.metastore.api.UniqueConstraintsRequest; -import org.apache.hadoop.hive.metastore.api.WMFullResourcePlan; -import org.apache.hadoop.hive.metastore.api.WMMapping; -import org.apache.hadoop.hive.metastore.api.WMNullablePool; -import org.apache.hadoop.hive.metastore.api.WMNullableResourcePlan; -import org.apache.hadoop.hive.metastore.api.WMPool; -import org.apache.hadoop.hive.metastore.api.WMResourcePlan; -import org.apache.hadoop.hive.metastore.api.WMTrigger; -import org.apache.hadoop.hive.metastore.api.WMValidateResourcePlanResponse; -import org.apache.hadoop.hive.metastore.api.hive_metastoreConstants; -import org.apache.hadoop.hive.metastore.api.WriteNotificationLogRequest; +import org.apache.hadoop.hive.metastore.api.*; import org.apache.hadoop.hive.metastore.utils.MetaStoreUtils; import org.apache.hadoop.hive.ql.ErrorMsg; import org.apache.hadoop.hive.ql.exec.AbstractFileMergeOperator; @@ -637,9 +581,11 @@ public void createTable(String tableName, List columns, List par createTable(tbl); } - public void alterTable(Table newTbl, EnvironmentContext environmentContext) - throws HiveException { - alterTable(newTbl.getDbName(), newTbl.getTableName(), newTbl, false, environmentContext); + + public void alterTable(Table newTbl, boolean cascade, EnvironmentContext environmentContext, + boolean transactional) throws HiveException { + alterTable(newTbl.getDbName(), + newTbl.getTableName(), newTbl, cascade, environmentContext, transactional); } /** @@ -649,23 +595,28 @@ public void alterTable(Table newTbl, EnvironmentContext environmentContext) * name of the existing table * @param newTbl * new name of the table. could be the old name + * @param transactional + * Need to generate and save a table snapshot into the metastore? * @throws InvalidOperationException * if the changes in metadata is not acceptable * @throws TException */ - public void alterTable(String fullyQlfdTblName, Table newTbl, EnvironmentContext environmentContext) + public void alterTable(String fullyQlfdTblName, Table newTbl, EnvironmentContext environmentContext, + boolean transactional) throws HiveException { - alterTable(fullyQlfdTblName, newTbl, false, environmentContext); + String[] names = Utilities.getDbTableName(fullyQlfdTblName); + alterTable(names[0], names[1], newTbl, false, environmentContext, transactional); } - public void alterTable(String fullyQlfdTblName, Table newTbl, boolean cascade, EnvironmentContext environmentContext) + public void alterTable(String fullyQlfdTblName, Table newTbl, boolean cascade, + EnvironmentContext environmentContext, boolean transactional) throws HiveException { String[] names = Utilities.getDbTableName(fullyQlfdTblName); - alterTable(names[0], names[1], newTbl, cascade, environmentContext); + alterTable(names[0], names[1], newTbl, cascade, environmentContext, transactional); } public void alterTable(String dbName, String tblName, Table newTbl, boolean cascade, - EnvironmentContext environmentContext) + EnvironmentContext environmentContext, boolean transactional) throws HiveException { try { @@ -680,7 +631,14 @@ public void alterTable(String dbName, String tblName, Table newTbl, boolean casc if (cascade) { environmentContext.putToProperties(StatsSetupConst.CASCADE, StatsSetupConst.TRUE); } - getMSC().alter_table_with_environmentContext(dbName, tblName, newTbl.getTTable(), environmentContext); + + // Take a table snapshot and set it to newTbl. + if (transactional) { + setTableSnapshotForTransactionalTable(environmentContext, conf, newTbl, true); + } + + getMSC().alter_table_with_environmentContext( + dbName, tblName, newTbl.getTTable(), environmentContext); } catch (MetaException e) { throw new HiveException("Unable to alter table. " + e.getMessage(), e); } catch (TException e) { @@ -708,10 +666,11 @@ public void updateCreationMetadata(String dbName, String tableName, CreationMeta * if the changes in metadata is not acceptable * @throws TException */ - public void alterPartition(String tblName, Partition newPart, EnvironmentContext environmentContext) + public void alterPartition(String tblName, Partition newPart, + EnvironmentContext environmentContext, boolean transactional) throws InvalidOperationException, HiveException { String[] names = Utilities.getDbTableName(tblName); - alterPartition(names[0], names[1], newPart, environmentContext); + alterPartition(names[0], names[1], newPart, environmentContext, transactional); } /** @@ -723,11 +682,16 @@ public void alterPartition(String tblName, Partition newPart, EnvironmentContext * name of the existing table * @param newPart * new partition + * @param environmentContext + * environment context for the method + * @param transactional + * indicates this call is for transaction stats * @throws InvalidOperationException * if the changes in metadata is not acceptable * @throws TException */ - public void alterPartition(String dbName, String tblName, Partition newPart, EnvironmentContext environmentContext) + public void alterPartition(String dbName, String tblName, Partition newPart, + EnvironmentContext environmentContext, boolean transactional) throws InvalidOperationException, HiveException { try { validatePartition(newPart); @@ -736,7 +700,14 @@ public void alterPartition(String dbName, String tblName, Partition newPart, Env location = Utilities.getQualifiedPath(conf, new Path(location)); newPart.setLocation(location); } - getSynchronizedMSC().alter_partition(dbName, tblName, newPart.getTPartition(), environmentContext); + if (environmentContext == null) { + environmentContext = new EnvironmentContext(); + } + if (transactional) { + setTableSnapshotForTransactionalPartition(environmentContext, conf, newPart, true); + } + getSynchronizedMSC().alter_partition( + dbName, tblName, newPart.getTPartition(), environmentContext); } catch (MetaException e) { throw new HiveException("Unable to alter partition. " + e.getMessage(), e); @@ -760,16 +731,23 @@ private void validatePartition(Partition newPart) throws HiveException { * name of the existing table * @param newParts * new partitions + * @param transactional + * Need to generate and save a table snapshot into the metastore? * @throws InvalidOperationException * if the changes in metadata is not acceptable * @throws TException */ - public void alterPartitions(String tblName, List newParts, EnvironmentContext environmentContext) + public void alterPartitions(String tblName, List newParts, + EnvironmentContext environmentContext, boolean transactional) throws InvalidOperationException, HiveException { String[] names = Utilities.getDbTableName(tblName); List newTParts = new ArrayList(); try { + AcidUtils.TableSnapshot tableSnapshot = null; + if (transactional) { + tableSnapshot = AcidUtils.getTableSnapshot(conf, newParts.get(0).getTable(), true); + } // Remove the DDL time so that it gets refreshed for (Partition tmpPart: newParts) { if (tmpPart.getParameters() != null) { @@ -782,7 +760,10 @@ public void alterPartitions(String tblName, List newParts, Environmen } newTParts.add(tmpPart.getTPartition()); } - getMSC().alter_partitions(names[0], names[1], newTParts, environmentContext); + getMSC().alter_partitions(names[0], names[1], newTParts, environmentContext, + tableSnapshot != null ? tableSnapshot.getTxnId() : -1, + tableSnapshot != null ? tableSnapshot.getValidWriteIdList() : null, + tableSnapshot != null ? tableSnapshot.getWriteId() : -1 ); } catch (MetaException e) { throw new HiveException("Unable to alter partition. " + e.getMessage(), e); } catch (TException e) { @@ -913,6 +894,8 @@ public void createTable(Table tbl, boolean ifNotExists, tTbl.setPrivileges(principalPrivs); } } + // Set table snapshot to api.Table to make it persistent. + setTableSnapshotForTransactionalTable(null, conf, tbl, true); if (primaryKeys == null && foreignKeys == null && uniqueConstraints == null && notNullConstraints == null && defaultConstraints == null && checkConstraints == null) { @@ -1115,7 +1098,27 @@ public Table getTable(final String dbName, final String tableName) throws HiveEx * @throws HiveException */ public Table getTable(final String dbName, final String tableName, - boolean throwException) throws HiveException { + boolean throwException) throws HiveException { + return this.getTable(dbName, tableName, throwException, false); + } + + /** + * Returns metadata of the table + * + * @param dbName + * the name of the database + * @param tableName + * the name of the table + * @param throwException + * controls whether an exception is thrown or a returns a null + * @param checkTransactional + * checks whether the metadata table stats are valid (or + * compilant with the snapshot isolation of) for the current transaction. + * @return the table or if throwException is false a null value. + * @throws HiveException + */ + public Table getTable(final String dbName, final String tableName, + boolean throwException, boolean checkTransactional) throws HiveException { if (tableName == null || tableName.equals("")) { throw new HiveException("empty table creation??"); @@ -1124,7 +1127,20 @@ public Table getTable(final String dbName, final String tableName, // Get the table from metastore org.apache.hadoop.hive.metastore.api.Table tTable = null; try { - tTable = getMSC().getTable(dbName, tableName); + // Note: this is currently called w/true from StatsOptimizer only. + if (checkTransactional) { + ValidWriteIdList validWriteIdList = null; + long txnId = SessionState.get().getTxnMgr() != null ? + SessionState.get().getTxnMgr().getCurrentTxnId() : 0; + if (txnId > 0) { + validWriteIdList = AcidUtils.getTableValidWriteIdListWithTxnList(conf, + dbName, tableName); + } + tTable = getMSC().getTable(dbName, tableName, txnId, + validWriteIdList != null ? validWriteIdList.toString() : null); + } else { + tTable = getMSC().getTable(dbName, tableName); + } } catch (NoSuchObjectException e) { if (throwException) { LOG.error("Table " + dbName + "." + tableName + " not found: " + e.getMessage()); @@ -1783,6 +1799,8 @@ public Partition loadPartition(Path loadPath, Table tbl, Map par Partition newTPart = oldPart != null ? oldPart : new Partition(tbl, partSpec, newPartPath); alterPartitionSpecInMemory(tbl, partSpec, newTPart.getTPartition(), inheritTableSpecs, newPartPath.toString()); validatePartition(newTPart); + EnvironmentContext ec = new EnvironmentContext(); + setTableSnapshotForTransactionalPartition(ec, conf, newTPart, true); // If config is set, table is not temporary and partition being inserted exists, capture // the list of files added. For not yet existing partitions (insert overwrite to new partition @@ -1855,7 +1873,7 @@ public Partition loadPartition(Path loadPath, Table tbl, Map par // insert into table T partition (ds) values ('Joe', 'today'); -- will fail with AlreadyExistsException // In that case, we want to retry with alterPartition. LOG.debug("Caught AlreadyExistsException, trying to alter partition instead"); - setStatsPropAndAlterPartition(hasFollowingStatsTask, tbl, newTPart); + setStatsPropAndAlterPartition(hasFollowingStatsTask, tbl, newTPart, ec); } catch (Exception e) { try { final FileSystem newPathFileSystem = newPartPath.getFileSystem(this.getConf()); @@ -1874,7 +1892,7 @@ public Partition loadPartition(Path loadPath, Table tbl, Map par addWriteNotificationLog(tbl, partSpec, newFiles, writeId); } } else { - setStatsPropAndAlterPartition(hasFollowingStatsTask, tbl, newTPart); + setStatsPropAndAlterPartition(hasFollowingStatsTask, tbl, newTPart, ec); } perfLogger.PerfLogEnd("MoveTask", PerfLogger.LOAD_PARTITION); @@ -1971,15 +1989,13 @@ private void listFilesCreatedByQuery(Path loadPath, long writeId, int stmtId, } private void setStatsPropAndAlterPartition(boolean hasFollowingStatsTask, Table tbl, - Partition newTPart) throws MetaException, TException { - EnvironmentContext environmentContext = null; + Partition newTPart, EnvironmentContext ec) throws MetaException, TException { if (hasFollowingStatsTask) { - environmentContext = new EnvironmentContext(); - environmentContext.putToProperties(StatsSetupConst.DO_NOT_UPDATE_STATS, StatsSetupConst.TRUE); + ec.putToProperties(StatsSetupConst.DO_NOT_UPDATE_STATS, StatsSetupConst.TRUE); } LOG.debug("Altering existing partition " + newTPart.getSpec()); getSynchronizedMSC().alter_partition(tbl.getDbName(), tbl.getTableName(), - newTPart.getTPartition(), environmentContext); + newTPart.getTPartition(), ec); } /** @@ -2414,7 +2430,7 @@ public void loadTable(Path loadPath, String tableName, LoadFileType loadFileType environmentContext.putToProperties(StatsSetupConst.DO_NOT_UPDATE_STATS, StatsSetupConst.TRUE); } - alterTable(tbl, environmentContext); + alterTable(tbl, false, environmentContext, true); if (AcidUtils.isTransactionalTable(tbl)) { addWriteNotificationLog(tbl, null, newFiles, writeId); @@ -2436,10 +2452,14 @@ public void loadTable(Path loadPath, String tableName, LoadFileType loadFileType * @throws HiveException * if table doesn't exist or partition already exists */ + @VisibleForTesting public Partition createPartition(Table tbl, Map partSpec) throws HiveException { try { - return new Partition(tbl, getMSC().add_partition( - Partition.createMetaPartitionObject(tbl, partSpec, null))); + org.apache.hadoop.hive.metastore.api.Partition part = + Partition.createMetaPartitionObject(tbl, partSpec, null); + AcidUtils.TableSnapshot tableSnapshot = AcidUtils.getTableSnapshot(conf, tbl); + part.setWriteId(tableSnapshot != null ? tableSnapshot.getWriteId() : 0); + return new Partition(tbl, getMSC().add_partition(part)); } catch (Exception e) { LOG.error(StringUtils.stringifyException(e)); throw new HiveException(e); @@ -2451,8 +2471,15 @@ public Partition createPartition(Table tbl, Map partSpec) throws int size = addPartitionDesc.getPartitionCount(); List in = new ArrayList(size); + AcidUtils.TableSnapshot tableSnapshot = + AcidUtils.getTableSnapshot(conf, tbl); for (int i = 0; i < size; ++i) { - in.add(convertAddSpecToMetaPartition(tbl, addPartitionDesc.getPartition(i), conf)); + org.apache.hadoop.hive.metastore.api.Partition tmpPart = + convertAddSpecToMetaPartition(tbl, addPartitionDesc.getPartition(i), conf); + if (tmpPart != null && tableSnapshot != null && tableSnapshot.getWriteId() > 0) { + tmpPart.setWriteId(tableSnapshot.getWriteId()); + } + in.add(tmpPart); } List out = new ArrayList(); try { @@ -2488,7 +2515,8 @@ public Partition createPartition(Table tbl, Map partSpec) throws : getMSC().add_partitions(partsToAdd, addPartitionDesc.isIfNotExists(), true)) { out.add(new Partition(tbl, outPart)); } - getMSC().alter_partitions(addPartitionDesc.getDbName(), addPartitionDesc.getTableName(), partsToAlter, null); + getMSC().alter_partitions(addPartitionDesc.getDbName(), addPartitionDesc.getTableName(), + partsToAlter, new EnvironmentContext()); for ( org.apache.hadoop.hive.metastore.api.Partition outPart : getMSC().getPartitionsByNames(addPartitionDesc.getDbName(), addPartitionDesc.getTableName(),part_names)){ @@ -2647,7 +2675,7 @@ private void alterPartitionSpec(Table tbl, if (!org.apache.commons.lang.StringUtils.isEmpty(tbl.getDbName())) { fullName = tbl.getFullyQualifiedName(); } - alterPartition(fullName, new Partition(tbl, tpart), null); + alterPartition(fullName, new Partition(tbl, tpart), null, true); } private void alterPartitionSpecInMemory(Table tbl, @@ -4460,8 +4488,17 @@ private static String getUserName() { } } - public boolean setPartitionColumnStatistics(SetPartitionsStatsRequest request) throws HiveException { + public boolean setPartitionColumnStatistics( + SetPartitionsStatsRequest request) throws HiveException { try { + ColumnStatistics colStat = request.getColStats().get(0); + ColumnStatisticsDesc statsDesc = colStat.getStatsDesc(); + Table tbl = getTable(statsDesc.getDbName(), statsDesc.getTableName()); + + AcidUtils.TableSnapshot tableSnapshot = AcidUtils.getTableSnapshot(conf, tbl, true); + request.setTxnId(tableSnapshot != null ? tableSnapshot.getTxnId() : 0); + request.setValidWriteIdList(tableSnapshot != null ? tableSnapshot.getValidWriteIdList() : null); + request.setWriteId(tableSnapshot != null ? tableSnapshot.getWriteId() : 0); return getMSC().setPartitionColumnStatistics(request); } catch (Exception e) { LOG.debug(StringUtils.stringifyException(e)); @@ -4470,19 +4507,45 @@ public boolean setPartitionColumnStatistics(SetPartitionsStatsRequest request) t } public List getTableColumnStatistics( - String dbName, String tableName, List colNames) throws HiveException { + String dbName, String tableName, List colNames, boolean checkTransactional) + throws HiveException { + + List retv = null; try { - return getMSC().getTableColumnStatistics(dbName, tableName, colNames); + if (checkTransactional) { + Table tbl = getTable(dbName, tableName); + AcidUtils.TableSnapshot tableSnapshot = AcidUtils.getTableSnapshot(conf, tbl); + if (tableSnapshot.getTxnId() > 0) { + retv = getMSC().getTableColumnStatistics(dbName, tableName, colNames, + tableSnapshot != null ? tableSnapshot.getTxnId() : -1, + tableSnapshot != null ? tableSnapshot.getValidWriteIdList() : null); + } + } else { + retv = getMSC().getTableColumnStatistics(dbName, tableName, colNames); + } + return retv; } catch (Exception e) { LOG.debug(StringUtils.stringifyException(e)); throw new HiveException(e); } } - public Map> getPartitionColumnStatistics(String dbName, - String tableName, List partNames, List colNames) throws HiveException { - try { - return getMSC().getPartitionColumnStatistics(dbName, tableName, partNames, colNames); + public Map> getPartitionColumnStatistics( + String dbName, String tableName, List partNames, List colNames, + boolean checkTransactional) + throws HiveException { + long txnId = -1; + String writeIdList = null; + try { + if (checkTransactional) { + Table tbl = getTable(dbName, tableName); + AcidUtils.TableSnapshot tableSnapshot = AcidUtils.getTableSnapshot(conf, tbl); + txnId = tableSnapshot != null ? tableSnapshot.getTxnId() : -1; + writeIdList = tableSnapshot != null ? tableSnapshot.getValidWriteIdList() : null; + } + + return getMSC().getPartitionColumnStatistics(dbName, tableName, partNames, colNames, + txnId, writeIdList); } catch (Exception e) { LOG.debug(StringUtils.stringifyException(e)); throw new HiveException(e); @@ -4490,9 +4553,18 @@ public boolean setPartitionColumnStatistics(SetPartitionsStatsRequest request) t } public AggrStats getAggrColStatsFor(String dbName, String tblName, - List colNames, List partName) { - try { - return getMSC().getAggrColStatsFor(dbName, tblName, colNames, partName); + List colNames, List partName, boolean checkTransactional) { + long txnId = -1; + String writeIdList = null; + try { + if (checkTransactional) { + Table tbl = getTable(dbName, tblName); + AcidUtils.TableSnapshot tableSnapshot = AcidUtils.getTableSnapshot(conf, tbl); + txnId = tableSnapshot != null ? tableSnapshot.getTxnId() : -1; + writeIdList = tableSnapshot != null ? tableSnapshot.getValidWriteIdList() : null; + } + return getMSC().getAggrColStatsFor(dbName, tblName, colNames, partName, + txnId, writeIdList); } catch (Exception e) { LOG.debug(StringUtils.stringifyException(e)); return new AggrStats(new ArrayList(),0); @@ -5290,4 +5362,43 @@ public StorageHandlerInfo getStorageHandlerInfo(Table table) throw new HiveException(e); } } + + private void setTableSnapshotForTransactionalTable(EnvironmentContext ec, HiveConf conf, + Table newTbl, boolean isStatsUpdater) throws LockException { + + org.apache.hadoop.hive.metastore.api.Table newTTbl = newTbl.getTTable(); + AcidUtils.TableSnapshot tableSnapshot = + AcidUtils.getTableSnapshot(conf, newTbl, isStatsUpdater); + if (tableSnapshot == null) return; + if (ec != null) { // Can be null for create table case; we don't need to verify txn stats. + ec.putToProperties(StatsSetupConst.TXN_ID, Long.toString(tableSnapshot.getTxnId())); + if (tableSnapshot.getValidWriteIdList() != null) { + ec.putToProperties(StatsSetupConst.VALID_WRITE_IDS, tableSnapshot.getValidWriteIdList()); + } else { + LOG.warn("Table snapshot has null write IDs for " + newTbl); + } + } + + if (isStatsUpdater) { + newTTbl.setWriteId(tableSnapshot.getWriteId()); + } + } + + private void setTableSnapshotForTransactionalPartition(EnvironmentContext ec, HiveConf conf, + Partition partition, boolean isStatsUpdater) throws LockException { + AcidUtils.TableSnapshot tableSnapshot = + AcidUtils.getTableSnapshot(conf, partition.getTable(), isStatsUpdater); + org.apache.hadoop.hive.metastore.api.Partition tpartition = partition.getTPartition(); + if (tableSnapshot == null) return; + ec.putToProperties(StatsSetupConst.TXN_ID, Long.toString(tableSnapshot.getTxnId())); + if (tableSnapshot.getValidWriteIdList() != null) { + ec.putToProperties(StatsSetupConst.VALID_WRITE_IDS, tableSnapshot.getValidWriteIdList()); + } else { + LOG.warn("Table snapshot has null write IDs for " + partition); + } + + if (isStatsUpdater) { + tpartition.setWriteId(tableSnapshot.getWriteId()); + } + } } diff --git ql/src/java/org/apache/hadoop/hive/ql/optimizer/StatsOptimizer.java ql/src/java/org/apache/hadoop/hive/ql/optimizer/StatsOptimizer.java index f3ec70926f..6c922eed26 100644 --- ql/src/java/org/apache/hadoop/hive/ql/optimizer/StatsOptimizer.java +++ ql/src/java/org/apache/hadoop/hive/ql/optimizer/StatsOptimizer.java @@ -46,6 +46,7 @@ import org.apache.hadoop.hive.ql.lib.NodeProcessorCtx; import org.apache.hadoop.hive.ql.lib.Rule; import org.apache.hadoop.hive.ql.lib.RuleRegExp; +import org.apache.hadoop.hive.ql.lockmgr.LockException; import org.apache.hadoop.hive.ql.metadata.Hive; import org.apache.hadoop.hive.ql.metadata.HiveException; import org.apache.hadoop.hive.ql.metadata.Partition; @@ -282,7 +283,17 @@ public Object process(Node nd, Stack stack, NodeProcessorCtx procCtx, // limit. In order to be safe, we do not use it now. return null; } + + Hive hive = Hive.get(pctx.getConf()); Table tbl = tsOp.getConf().getTableMetadata(); + boolean isTransactionalTable = AcidUtils.isTransactionalTable(tbl); + + // If the table is transactional, get stats state by calling getTable() with + // transactional flag on to check the validity of table stats. + if (isTransactionalTable) { + tbl = hive.getTable(tbl.getDbName(), tbl.getTableName(), true, true); + } + if (MetaStoreUtils.isExternalTable(tbl.getTTable())) { Logger.info("Table " + tbl.getTableName() + " is external. Skip StatsOptimizer."); return null; @@ -291,11 +302,7 @@ public Object process(Node nd, Stack stack, NodeProcessorCtx procCtx, Logger.info("Table " + tbl.getTableName() + " is non Native table. Skip StatsOptimizer."); return null; } - if (AcidUtils.isTransactionalTable(tbl)) { - //todo: should this be OK for MM table? - Logger.info("Table " + tbl.getTableName() + " is ACID table. Skip StatsOptimizer."); - return null; - } + Long rowCnt = getRowCnt(pctx, tsOp, tbl); // if we can not have correct table stats, then both the table stats and column stats are not useful. if (rowCnt == null) { @@ -375,7 +382,8 @@ else if (getGbyKeyType(cgbyOp) == GbyKeyType.CONSTANT && rowCnt == 0) { List oneRow = new ArrayList(); - Hive hive = Hive.get(pctx.getConf()); + AcidUtils.TableSnapshot tableSnapshot = + AcidUtils.getTableSnapshot(pctx.getConf(), tbl); for (AggregationDesc aggr : pgbyOp.getConf().getAggregators()) { if (aggr.getDistinct()) { @@ -462,8 +470,13 @@ else if (udaf instanceof GenericUDAFCount) { + " are not up to date."); return null; } - List stats = hive.getMSC().getTableColumnStatistics( - tbl.getDbName(), tbl.getTableName(), Lists.newArrayList(colName)); + + List stats = + hive.getMSC().getTableColumnStatistics( + tbl.getDbName(), tbl.getTableName(), + Lists.newArrayList(colName), + tableSnapshot != null ? tableSnapshot.getTxnId() : -1, + tableSnapshot != null ? tableSnapshot.getValidWriteIdList() : null); if (stats.isEmpty()) { Logger.debug("No stats for " + tbl.getTableName() + " column " + colName); return null; @@ -523,8 +536,13 @@ else if (udaf instanceof GenericUDAFCount) { + " are not up to date."); return null; } - List stats = hive.getMSC().getTableColumnStatistics( - tbl.getDbName(),tbl.getTableName(), Lists.newArrayList(colName)); + + List stats = + hive.getMSC().getTableColumnStatistics( + tbl.getDbName(), tbl.getTableName(), + Lists.newArrayList(colName), + tableSnapshot != null ? tableSnapshot.getTxnId() : -1, + tableSnapshot != null ? tableSnapshot.getValidWriteIdList() : null); if (stats.isEmpty()) { Logger.debug("No stats for " + tbl.getTableName() + " column " + colName); return null; @@ -664,9 +682,12 @@ else if (udaf instanceof GenericUDAFCount) { + " are not up to date."); return null; } - ColumnStatisticsData statData = hive.getMSC().getTableColumnStatistics( - tbl.getDbName(), tbl.getTableName(), Lists.newArrayList(colName)) - .get(0).getStatsData(); + ColumnStatisticsData statData = + hive.getMSC().getTableColumnStatistics( + tbl.getDbName(), tbl.getTableName(), Lists.newArrayList(colName), + tableSnapshot != null ? tableSnapshot.getTxnId() : -1, + tableSnapshot != null ? tableSnapshot.getValidWriteIdList() : null) + .get(0).getStatsData(); String name = colDesc.getTypeString().toUpperCase(); switch (type) { case Integer: { @@ -887,7 +908,7 @@ private ColumnStatisticsData validateSingleColStat(List sta } private Collection> verifyAndGetPartColumnStats( - Hive hive, Table tbl, String colName, Set parts) throws TException { + Hive hive, Table tbl, String colName, Set parts) throws TException, LockException { List partNames = new ArrayList(parts.size()); for (Partition part : parts) { if (!StatsUtils.areColumnStatsUptoDateForQueryAnswering(part.getTable(), part.getParameters(), colName)) { @@ -897,8 +918,13 @@ private ColumnStatisticsData validateSingleColStat(List sta } partNames.add(part.getName()); } + AcidUtils.TableSnapshot tableSnapshot = + AcidUtils.getTableSnapshot(hive.getConf(), tbl); + Map> result = hive.getMSC().getPartitionColumnStatistics( - tbl.getDbName(), tbl.getTableName(), partNames, Lists.newArrayList(colName)); + tbl.getDbName(), tbl.getTableName(), partNames, Lists.newArrayList(colName), + tableSnapshot != null ? tableSnapshot.getTxnId() : -1, + tableSnapshot != null ? tableSnapshot.getValidWriteIdList() : null); if (result.size() != parts.size()) { Logger.debug("Received " + result.size() + " stats for " + parts.size() + " partitions"); return null; diff --git ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/RelOptHiveTable.java ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/RelOptHiveTable.java index 6cc6d02b14..f66f47a838 100644 --- ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/RelOptHiveTable.java +++ ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/RelOptHiveTable.java @@ -593,6 +593,7 @@ private void updateColStats(Set projIndxLst, boolean allowMissingStats) return getColStat(projIndxLst, HiveConf.getBoolVar(hiveConf, HiveConf.ConfVars.HIVE_STATS_ESTIMATE_STATS)); } + /** Note: DOES NOT CHECK txn stats. */ public List getColStat(List projIndxLst, boolean allowMissingStats) { List colStatsBldr = Lists.newArrayList(); Set projIndxSet = new HashSet(projIndxLst); diff --git ql/src/java/org/apache/hadoop/hive/ql/plan/ConditionalResolverMergeFiles.java ql/src/java/org/apache/hadoop/hive/ql/plan/ConditionalResolverMergeFiles.java index e77fc3eac8..80f77b9f0c 100644 --- ql/src/java/org/apache/hadoop/hive/ql/plan/ConditionalResolverMergeFiles.java +++ ql/src/java/org/apache/hadoop/hive/ql/plan/ConditionalResolverMergeFiles.java @@ -186,7 +186,7 @@ public void setLbCtx(ListBucketingCtx lbCtx) { } } } else { - Utilities.FILE_OP_LOGGER.info("Resolver returning movetask for " + dirPath, new Exception()); + Utilities.FILE_OP_LOGGER.info("Resolver returning movetask for " + dirPath); resTsks.add(mvTask); } } catch (IOException e) { diff --git ql/src/java/org/apache/hadoop/hive/ql/stats/BasicStatsNoJobTask.java ql/src/java/org/apache/hadoop/hive/ql/stats/BasicStatsNoJobTask.java index d4d46a3671..9a271a2431 100644 --- ql/src/java/org/apache/hadoop/hive/ql/stats/BasicStatsNoJobTask.java +++ ql/src/java/org/apache/hadoop/hive/ql/stats/BasicStatsNoJobTask.java @@ -344,12 +344,12 @@ private int updatePartitions(Hive db, List scs, Table table } if (values.get(0).result instanceof Table) { - db.alterTable(tableFullName, (Table) values.get(0).result, environmentContext); + db.alterTable(tableFullName, (Table) values.get(0).result, environmentContext, true); LOG.debug("Updated stats for {}.", tableFullName); } else { if (values.get(0).result instanceof Partition) { List results = Lists.transform(values, FooterStatCollector.EXTRACT_RESULT_FUNCTION); - db.alterPartitions(tableFullName, results, environmentContext); + db.alterPartitions(tableFullName, results, environmentContext, true); LOG.debug("Bulk updated {} partitions of {}.", results.size(), tableFullName); } else { throw new RuntimeException("inconsistent"); diff --git ql/src/java/org/apache/hadoop/hive/ql/stats/BasicStatsTask.java ql/src/java/org/apache/hadoop/hive/ql/stats/BasicStatsTask.java index f31c170a30..978a7a2372 100644 --- ql/src/java/org/apache/hadoop/hive/ql/stats/BasicStatsTask.java +++ ql/src/java/org/apache/hadoop/hive/ql/stats/BasicStatsTask.java @@ -127,10 +127,7 @@ public BasicStatsProcessor(Partish partish, BasicStatsWork work, HiveConf conf, public Object process(StatsAggregator statsAggregator) throws HiveException, MetaException { Partish p = partish; Map parameters = p.getPartParameters(); - if (p.isTransactionalTable()) { - // TODO: this should also happen on any error. Right now this task will just fail. - StatsSetupConst.setBasicStatsState(parameters, StatsSetupConst.FALSE); - } else if (work.isTargetRewritten()) { + if (work.isTargetRewritten()) { StatsSetupConst.setBasicStatsState(parameters, StatsSetupConst.TRUE); } @@ -266,7 +263,7 @@ private int aggregateStats(Hive db) { if (res == null) { return 0; } - db.alterTable(tableFullName, res, environmentContext); + db.alterTable(tableFullName, res, environmentContext, true); if (conf.getBoolVar(ConfVars.TEZ_EXEC_SUMMARY)) { console.printInfo("Table " + tableFullName + " stats: [" + toString(p.getPartParameters()) + ']'); @@ -334,7 +331,7 @@ public Void call() throws Exception { } if (!updates.isEmpty()) { - db.alterPartitions(tableFullName, updates, environmentContext); + db.alterPartitions(tableFullName, updates, environmentContext, true); } if (work.isStatsReliable() && updates.size() != processors.size()) { LOG.info("Stats should be reliadble...however seems like there were some issue.. => ret 1"); diff --git ql/src/java/org/apache/hadoop/hive/ql/stats/ColStatsProcessor.java ql/src/java/org/apache/hadoop/hive/ql/stats/ColStatsProcessor.java index d4cfd0ad62..39209b3bb3 100644 --- ql/src/java/org/apache/hadoop/hive/ql/stats/ColStatsProcessor.java +++ ql/src/java/org/apache/hadoop/hive/ql/stats/ColStatsProcessor.java @@ -34,12 +34,15 @@ import org.apache.hadoop.hive.metastore.api.SetPartitionsStatsRequest; import org.apache.hadoop.hive.ql.CompilationOpContext; import org.apache.hadoop.hive.ql.exec.FetchOperator; +import org.apache.hadoop.hive.ql.io.AcidUtils; +import org.apache.hadoop.hive.ql.lockmgr.HiveTxnManager; import org.apache.hadoop.hive.ql.metadata.Hive; import org.apache.hadoop.hive.ql.metadata.HiveException; import org.apache.hadoop.hive.ql.metadata.Partition; import org.apache.hadoop.hive.ql.metadata.Table; import org.apache.hadoop.hive.ql.plan.ColumnStatsDesc; import org.apache.hadoop.hive.ql.plan.FetchWork; +import org.apache.hadoop.hive.ql.session.SessionState; import org.apache.hadoop.hive.serde2.objectinspector.InspectableObject; import org.apache.hadoop.hive.serde2.objectinspector.ObjectInspector; import org.apache.hadoop.hive.serde2.objectinspector.PrimitiveObjectInspector; @@ -176,6 +179,14 @@ public int persistColumnStats(Hive db, Table tbl) throws HiveException, MetaExce } SetPartitionsStatsRequest request = new SetPartitionsStatsRequest(colStats); request.setNeedMerge(colStatDesc.isNeedMerge()); + HiveTxnManager txnMgr = AcidUtils.isTransactionalTable(tbl) + ? SessionState.get().getTxnMgr() : null; + if (txnMgr != null) { + request.setTxnId(txnMgr.getCurrentTxnId()); + request.setValidWriteIdList(AcidUtils.getTableValidWriteIdList(conf, + AcidUtils.getFullTableName(tbl.getDbName(), tbl.getTableName())).toString()); + request.setWriteId(txnMgr.getAllocatedTableWriteId(tbl.getDbName(), tbl.getTableName())); + } db.setPartitionColumnStatistics(request); return 0; } diff --git ql/src/java/org/apache/hadoop/hive/ql/stats/StatsUpdaterThread.java ql/src/java/org/apache/hadoop/hive/ql/stats/StatsUpdaterThread.java index ddca70497a..bb181a192a 100644 --- ql/src/java/org/apache/hadoop/hive/ql/stats/StatsUpdaterThread.java +++ ql/src/java/org/apache/hadoop/hive/ql/stats/StatsUpdaterThread.java @@ -409,6 +409,8 @@ private String buildPartColStr(Table table) { List allCols) throws MetaException { ColumnStatistics existingStats = null; try { + // Note: this should NOT do txn verification - we want to get outdated stats, to + // see if we need to update anything. existingStats = rs.getTableColumnStatistics(cat, db, tbl, allCols); } catch (NoSuchObjectException e) { LOG.error("Cannot retrieve existing stats, skipping " + fullTableName, e); diff --git ql/src/java/org/apache/hadoop/hive/ql/stats/StatsUtils.java ql/src/java/org/apache/hadoop/hive/ql/stats/StatsUtils.java index 494939a799..cb6913e131 100644 --- ql/src/java/org/apache/hadoop/hive/ql/stats/StatsUtils.java +++ ql/src/java/org/apache/hadoop/hive/ql/stats/StatsUtils.java @@ -128,6 +128,7 @@ /** * Collect table, partition and column level statistics + * Note: DOES NOT CHECK txn stats. * @param conf * - hive configuration * @param partList @@ -226,6 +227,7 @@ private static void estimateStatsForMissingCols(List neededColumns, List } } + /** Note: DOES NOT CHECK txn stats. */ public static Statistics collectStatistics(HiveConf conf, PrunedPartitionList partList, Table table, List schema, List neededColumns, ColumnStatsList colStatsCache, List referencedColumns, boolean fetchColStats) @@ -262,7 +264,10 @@ private static Statistics collectStatistics(HiveConf conf, PrunedPartitionList p List colStats = Lists.newArrayList(); if (fetchColStats) { - colStats = getTableColumnStats(table, schema, neededColumns, colStatsCache); + // Note: this is currently called from two notable places (w/false for checkTxn) + // 1) StatsRulesProcFactory.TableScanStatsRule via collectStatistics + // 2) RelOptHiveTable via getColStats and updateColStats. + colStats = getTableColumnStats(table, schema, neededColumns, colStatsCache, false); if(colStats == null) { colStats = Lists.newArrayList(); } @@ -378,8 +383,11 @@ private static Statistics collectStatistics(HiveConf conf, PrunedPartitionList p // size is 0, aggrStats is null after several retries. Thus, we can // skip the step to connect to the metastore. if (neededColsToRetrieve.size() > 0 && partNames.size() > 0) { + // Note: this is currently called from two notable places (w/false for checkTxn) + // 1) StatsRulesProcFactory.TableScanStatsRule via collectStatistics + // 2) RelOptHiveTable via getColStats and updateColStats. aggrStats = Hive.get().getAggrColStatsFor(table.getDbName(), table.getTableName(), - neededColsToRetrieve, partNames); + neededColsToRetrieve, partNames, false); } boolean statsRetrieved = aggrStats != null && @@ -990,7 +998,7 @@ else if(colTypeLowerCase.equals(serdeConstants.SMALLINT_TYPE_NAME)){ */ public static List getTableColumnStats( Table table, List schema, List neededColumns, - ColumnStatsList colStatsCache) { + ColumnStatsList colStatsCache, boolean checkTransactional) { if (table.isMaterializedTable()) { LOG.debug("Materialized table does not contain table statistics"); return null; @@ -1019,7 +1027,7 @@ else if(colTypeLowerCase.equals(serdeConstants.SMALLINT_TYPE_NAME)){ List stats = null; try { List colStat = Hive.get().getTableColumnStatistics( - dbName, tabName, colStatsToRetrieve); + dbName, tabName, colStatsToRetrieve, checkTransactional); stats = convertColStats(colStat, tabName); } catch (HiveException e) { LOG.error("Failed to retrieve table statistics: ", e); diff --git ql/src/test/org/apache/hadoop/hive/ql/metadata/TestHive.java ql/src/test/org/apache/hadoop/hive/ql/metadata/TestHive.java index a24b6423ba..d30bbde071 100755 --- ql/src/test/org/apache/hadoop/hive/ql/metadata/TestHive.java +++ ql/src/test/org/apache/hadoop/hive/ql/metadata/TestHive.java @@ -324,6 +324,10 @@ private void validateTable(Table tbl, String tableName) throws MetaException { tbl.setCreateTime(ft.getTTable().getCreateTime()); tbl.getParameters().put(hive_metastoreConstants.DDL_TIME, ft.getParameters().get(hive_metastoreConstants.DDL_TIME)); + // Txn stuff set by metastore + if (tbl.getTTable().isSetWriteId()) { + ft.getTTable().setWriteId(tbl.getTTable().getWriteId()); + } assertTrue("Tables doesn't match: " + tableName + " (" + ft.getTTable() + "; " + tbl.getTTable() + ")", ft.getTTable().equals(tbl.getTTable())); assertEquals("SerializationLib is not set correctly", tbl @@ -593,7 +597,7 @@ public void testAutoPurgeTablesAndPartitions() throws Throwable { Table table = createPartitionedTable(dbName, tableName); table.getParameters().put("auto.purge", "true"); - hm.alterTable(tableName, table, null); + hm.alterTable(tableName, table, false, null, true); Map partitionSpec = new ImmutableMap.Builder() .put("ds", "20141216") diff --git ql/src/test/org/apache/hadoop/hive/ql/parse/TestUpdateDeleteSemanticAnalyzer.java ql/src/test/org/apache/hadoop/hive/ql/parse/TestUpdateDeleteSemanticAnalyzer.java index ffd0445db0..257a6adf8e 100644 --- ql/src/test/org/apache/hadoop/hive/ql/parse/TestUpdateDeleteSemanticAnalyzer.java +++ ql/src/test/org/apache/hadoop/hive/ql/parse/TestUpdateDeleteSemanticAnalyzer.java @@ -231,6 +231,7 @@ public void setup() { conf.setVar(HiveConf.ConfVars.DYNAMICPARTITIONINGMODE, "nonstrict"); conf.setVar(HiveConf.ConfVars.HIVEMAPREDMODE, "nonstrict"); conf.setVar(HiveConf.ConfVars.HIVE_TXN_MANAGER, "org.apache.hadoop.hive.ql.lockmgr.DbTxnManager"); + conf.setBoolVar(HiveConf.ConfVars.HIVE_IN_TEST, true); } public void cleanupTables() throws HiveException { diff --git ql/src/test/queries/clientpositive/acid_stats.q ql/src/test/queries/clientpositive/acid_stats.q new file mode 100644 index 0000000000..1e1c9b005a --- /dev/null +++ ql/src/test/queries/clientpositive/acid_stats.q @@ -0,0 +1,46 @@ +set hive.stats.dbclass=fs; +set hive.stats.fetch.column.stats=true; +set datanucleus.cache.collections=false; + +set hive.merge.mapfiles=false; +set hive.merge.mapredfiles=false; + +set hive.stats.autogather=true; +set hive.stats.column.autogather=true; +set hive.compute.query.using.stats=true; +set hive.mapred.mode=nonstrict; +set hive.explain.user=false; + +set hive.fetch.task.conversion=none; +set hive.support.concurrency=true; +set hive.txn.manager=org.apache.hadoop.hive.ql.lockmgr.DbTxnManager; +set hive.query.results.cache.enabled=false; + +-- test simple partition case + +create table stats_part(key int,value string) partitioned by (p int) tblproperties ("transactional"="true", "transactional_properties"="insert_only"); + +insert into table stats_part partition(p=101) values (1, "foo"); +explain select count(key) from stats_part; +insert into table stats_part partition(p=102) values (1, "bar"); +explain select count(key) from stats_part; + +alter table stats_part drop partition (p=102); +explain select count(key) from stats_part; + +drop table stats_part; + +-- test the case where we insert without updating stats... just in case + +create table stats2(key int,value string) tblproperties ("transactional"="true", "transactional_properties"="insert_only"); +insert into table stats2 values (1, "foo"); +explain select count(*) from stats2; +insert into table stats2 values (1, "bar"); +explain select count(*) from stats2; + +set hive.stats.autogather=false; +set hive.stats.column.autogather=false; +insert into table stats2 values (1, "baz"); +explain select count(*) from stats2; + +drop table stats2; diff --git ql/src/test/queries/clientpositive/acid_stats2.q ql/src/test/queries/clientpositive/acid_stats2.q new file mode 100644 index 0000000000..cf96731985 --- /dev/null +++ ql/src/test/queries/clientpositive/acid_stats2.q @@ -0,0 +1,42 @@ +set hive.stats.dbclass=fs; +set hive.stats.fetch.column.stats=true; +set datanucleus.cache.collections=false; + +set hive.merge.mapfiles=false; +set hive.merge.mapredfiles=false; + +set hive.stats.autogather=true; +set hive.stats.column.autogather=true; +set hive.compute.query.using.stats=true; +set hive.mapred.mode=nonstrict; +set hive.explain.user=false; + +set hive.fetch.task.conversion=none; +set hive.support.concurrency=true; +set hive.txn.manager=org.apache.hadoop.hive.ql.lockmgr.DbTxnManager; +set hive.query.results.cache.enabled=false; + +create table stats3(key int,value string) stored as orc tblproperties ("transactional"="true"); +insert into table stats3 values (1, "foo"); +explain select count(*) from stats3; +select count(*) from stats3; +insert into table stats3 values (2, "bar"); +explain select count(*) from stats3; +select count(*) from stats3; +update stats3 set value = "baz" where key = 4; +explain select count(*) from stats3; +select count(*) from stats3; +update stats3 set value = "baz" where key = 1; +explain select count(*) from stats3; +select count(*) from stats3; +delete from stats3 where key = 3; +explain select count(*) from stats3; +select count(*) from stats3; +delete from stats3 where key = 1; +explain select count(*) from stats3; +select count(*) from stats3; +delete from stats3 where key = 2; +explain select count(*) from stats3; +select count(*) from stats3; + +drop table stats3; \ No newline at end of file diff --git ql/src/test/queries/clientpositive/stats_nonpart.q ql/src/test/queries/clientpositive/stats_nonpart.q new file mode 100644 index 0000000000..f6019cc497 --- /dev/null +++ ql/src/test/queries/clientpositive/stats_nonpart.q @@ -0,0 +1,53 @@ +set hive.stats.dbclass=fs; +set hive.stats.fetch.column.stats=true; +set datanucleus.cache.collections=false; + +set hive.merge.mapfiles=false; +set hive.merge.mapredfiles=false; + +set hive.stats.autogather=true; +set hive.stats.column.autogather=true; +set hive.compute.query.using.stats=true; +set hive.mapred.mode=nonstrict; +set hive.explain.user=false; + +set hive.support.concurrency=true; +set hive.txn.manager=org.apache.hadoop.hive.ql.lockmgr.DbTxnManager; +set hive.query.results.cache.enabled=false; + +-- create source. +drop table if exists mysource; +create table mysource (p int,key int); +insert into mysource values (100,20), (101,40), (102,50); +insert into mysource values (100,30), (101,50), (102,60); + +-- test nonpartitioned table +drop table if exists stats_nonpartitioned; + +--create table stats_nonpartitioned(key int, value int) stored as orc; +create table stats_nonpartitioned(key int, value int) stored as orc tblproperties ("transactional"="true"); +--create table stats_nonpartitioned(key int, value int) stored as orc tblproperties tblproperties ("transactional"="true", "transactional_properties"="insert_only"); + + +explain select count(*) from stats_nonpartitioned; +select count(*) from stats_nonpartitioned; +desc formatted stats_nonpartitioned; + +explain insert into table stats_nonpartitioned select * from mysource where p == 100; +insert into table stats_nonpartitioned select * from mysource where p == 100; + +desc formatted stats_nonpartitioned; + +explain select count(*) from stats_nonpartitioned; +select count(*) from stats_nonpartitioned; +explain select count(key) from stats_nonpartitioned; +select count(key) from stats_nonpartitioned; + +--analyze table stats_nonpartitioned compute statistics; +analyze table stats_nonpartitioned compute statistics for columns key, value; + +explain select count(*) from stats_nonpartitioned; +select count(*) from stats_nonpartitioned; +explain select count(key) from stats_nonpartitioned; +select count(key) from stats_nonpartitioned; + diff --git ql/src/test/queries/clientpositive/stats_part.q ql/src/test/queries/clientpositive/stats_part.q new file mode 100644 index 0000000000..d0812e1007 --- /dev/null +++ ql/src/test/queries/clientpositive/stats_part.q @@ -0,0 +1,98 @@ +set hive.stats.dbclass=fs; +set hive.stats.fetch.column.stats=true; +set datanucleus.cache.collections=false; + +set hive.merge.mapfiles=false; +set hive.merge.mapredfiles=false; + +set hive.stats.autogather=true; +set hive.stats.column.autogather=true; +set hive.compute.query.using.stats=true; +set hive.mapred.mode=nonstrict; +set hive.explain.user=false; + +set hive.support.concurrency=true; +set hive.txn.manager=org.apache.hadoop.hive.ql.lockmgr.DbTxnManager; +set hive.query.results.cache.enabled=false; + +-- create source. +drop table if exists mysource; +create table mysource (p int, key int, value int); +insert into mysource values (100,20,201), (101,40,401), (102,50,501); +insert into mysource values (100,21,211), (101,41,411), (102,51,511); + +--explain select count(*) from mysource; +--select count(*) from mysource; + +-- Gather col stats manually +--analyze table mysource compute statistics for columns p, key; + +--explain select count(*) from mysource; +--select count(*) from mysource; +--explain select count(key) from mysource; +--select count(key) from mysource; + +-- test partitioned table +drop table if exists stats_partitioned; + +--create table stats_part(key int,value string) partitioned by (p int) stored as orc; +create table stats_part(key int,value string) partitioned by (p int) stored as orc tblproperties ("transactional"="true"); +--create table stats_part(key int,value string) partitioned by (p int) stored as orc tblproperties ("transactional"="true", "transactional_properties"="insert_only"); + +explain select count(key) from stats_part; +--select count(*) from stats_part; +--explain select count(*) from stats_part where p = 100; +--select count(*) from stats_part where p = 100; +explain select count(key) from stats_part where p > 100; +--select count(*) from stats_part where p > 100; +desc formatted stats_part; + +--explain insert into table stats_part partition(p=100) select distinct key, value from mysource where p == 100; +insert into table stats_part partition(p=100) select distinct key, value from mysource where p == 100; +insert into table stats_part partition(p=101) select distinct key, value from mysource where p == 101; +insert into table stats_part partition(p=102) select distinct key, value from mysource where p == 102; + +desc formatted stats_part; + +insert into table mysource values (103,20,200), (103,83,832), (103,53,530); +insert into table stats_part partition(p=102) select distinct key, value from mysource where p == 102; + +desc formatted stats_part; +show partitions stats_part; + +explain select count(*) from stats_part; +select count(*) from stats_part; +explain select count(key) from stats_part; +select count(key) from stats_part; +explain select count(key) from stats_part where p > 100; +select count(key) from stats_part where p > 100; +explain select max(key) from stats_part where p > 100; +select max(key) from stats_part where p > 100; + +--update stats_part set key = key + 100 where key in(-50,40) and p > 100; +desc formatted stats_part; +explain select max(key) from stats_part where p > 100; +select max(key) from stats_part where p > 100; + +select count(value) from stats_part; +--update stats_part set value = concat(value, 'updated') where cast(key as integer) in(40,53) and p > 100; +select count(value) from stats_part; + +--delete from stats_part where key in (20, 41); +desc formatted stats_part; + +explain select count(*) from stats_part where p = 100; +select count(*) from stats_part where p = 100; +explain select count(*) from stats_part where p > 100; +select count(*) from stats_part where p > 100; +explain select count(key) from stats_part; +select count(key) from stats_part; +explain select count(*) from stats_part where p > 100; +select count(*) from stats_part where p > 100; +explain select max(key) from stats_part where p > 100; +select max(key) from stats_part where p > 100; + +describe extended stats_part partition (p=101); +describe extended stats_part; + + diff --git ql/src/test/queries/clientpositive/stats_part2.q ql/src/test/queries/clientpositive/stats_part2.q new file mode 100644 index 0000000000..24be2185d0 --- /dev/null +++ ql/src/test/queries/clientpositive/stats_part2.q @@ -0,0 +1,100 @@ +set hive.stats.dbclass=fs; +set hive.stats.fetch.column.stats=true; +set datanucleus.cache.collections=false; + +set hive.merge.mapfiles=false; +set hive.merge.mapredfiles=false; + +set hive.stats.autogather=true; +set hive.stats.column.autogather=true; +set hive.compute.query.using.stats=true; +set hive.mapred.mode=nonstrict; +set hive.explain.user=false; + +set hive.support.concurrency=true; +set hive.txn.manager=org.apache.hadoop.hive.ql.lockmgr.DbTxnManager; +set hive.query.results.cache.enabled=false; + +-- create source. +drop table if exists mysource; +create table mysource (p int, key int, value string); +insert into mysource values (100,20,'value20'), (101,40,'string40'), (102,50,'string50'); +insert into mysource values (100,21,'value21'), (101,41,'value41'), (102,51,'value51'); + +-- test partitioned table +drop table if exists stats_partitioned; + +--create table stats_part(key int,value string) partitioned by (p int) stored as orc; +create table stats_part(key int,value string) partitioned by (p int) stored as orc tblproperties ("transactional"="true"); +--create table stats_part(key int,value string) partitioned by (p int) stored as orc tblproperties ("transactional"="true", "transactional_properties"="insert_only"); + +--explain select count(*) from stats_part; +--select count(*) from stats_part; +--explain select count(*) from stats_part where p = 100; +--select count(*) from stats_part where p = 100; +explain select count(*) from stats_part where p > 100; +explain select max(key) from stats_part where p > 100; +--select count(*) from stats_part where p > 100; +desc formatted stats_part; + +--explain insert into table stats_part partition(p=100) select distinct key, value from mysource where p == 100; +insert into table stats_part partition(p=100) select distinct key, value from mysource where p == 100; +insert into table stats_part partition(p=101) select distinct key, value from mysource where p == 101; +insert into table stats_part partition(p=102) select distinct key, value from mysource where p == 102; + +desc formatted stats_part; +explain select count(key) from stats_part where p > 100; +explain select max(key) from stats_part where p > 100; + +insert into table mysource values (103,20,'value20'), (103,83,'value83'), (103,53,'value53'); +insert into table stats_part partition(p=102) select distinct key, value from mysource where p == 102; + +desc formatted stats_part; +show partitions stats_part; + +explain select count(*) from stats_part; +select count(*) from stats_part; +explain select count(key) from stats_part; +select count(key) from stats_part; +explain select count(key) from stats_part where p > 100; +select count(key) from stats_part where p > 100; +explain select max(key) from stats_part where p > 100; +select max(key) from stats_part where p > 100; + +desc formatted stats_part partition(p = 100); +desc formatted stats_part partition(p = 101); +desc formatted stats_part partition(p = 102); +update stats_part set key = key + 100 where key in(-50,40) and p > 100; +explain select max(key) from stats_part where p > 100; +select max(key) from stats_part where p > 100; +desc formatted stats_part partition(p = 100); +desc formatted stats_part partition(p = 101); +desc formatted stats_part partition(p = 102); + +select count(value) from stats_part; +update stats_part set value = concat(value, 'updated') where cast(key as integer) in(40,53) and p > 100; +desc formatted stats_part partition(p = 100); +desc formatted stats_part partition(p = 101); +desc formatted stats_part partition(p = 102); +select count(value) from stats_part; + +delete from stats_part where key in (20, 41); +desc formatted stats_part partition(p = 100); +desc formatted stats_part partition(p = 101); +desc formatted stats_part partition(p = 102); + +explain select count(*) from stats_part where p = 100; +select count(*) from stats_part where p = 100; +explain select count(*) from stats_part where p > 100; +select count(*) from stats_part where p > 100; +explain select count(key) from stats_part; +select count(key) from stats_part; +explain select count(*) from stats_part where p > 100; +select count(*) from stats_part where p > 100; +explain select max(key) from stats_part where p > 100; +select max(key) from stats_part where p > 100; + +describe extended stats_part partition (p=101); +describe extended stats_part; + + diff --git ql/src/test/queries/clientpositive/stats_sizebug.q ql/src/test/queries/clientpositive/stats_sizebug.q new file mode 100644 index 0000000000..7108766e34 --- /dev/null +++ ql/src/test/queries/clientpositive/stats_sizebug.q @@ -0,0 +1,37 @@ +set hive.stats.dbclass=fs; +set hive.stats.fetch.column.stats=true; +set datanucleus.cache.collections=false; + +set hive.merge.mapfiles=false; +set hive.merge.mapredfiles=false; + +set hive.stats.autogather=true; +set hive.stats.column.autogather=true; +set hive.compute.query.using.stats=true; +set hive.mapred.mode=nonstrict; +set hive.explain.user=false; + +set hive.support.concurrency=true; +set hive.txn.manager=org.apache.hadoop.hive.ql.lockmgr.DbTxnManager; +set hive.query.results.cache.enabled=false; + +-- create source. +drop table if exists mysource; +create table mysource (p int,key int); +insert into mysource values (100,20), (101,40), (102,50); +insert into mysource values (100,20), (101,40), (102,50); + +-- test nonpartitioned table +drop table if exists stats_nonpartitioned; + +--create table stats_nonpartitioned(key int, value int) stored as orc; +create table stats_nonpartitioned(key int, value int) stored as orc tblproperties ("transactional"="true"); +--create table stats_nonpartitioned(key int, value int) stored as orc tblproperties ("transactional"="true", "transactional_properties"="insert_only"); +explain insert into table stats_nonpartitioned select * from mysource where p == 100; +insert into table stats_nonpartitioned select * from mysource where p == 100; + +desc formatted stats_nonpartitioned; +analyze table mysource compute statistics for columns p, key; +desc formatted stats_nonpartitioned; + + diff --git ql/src/test/results/clientpositive/acid_nullscan.q.out ql/src/test/results/clientpositive/acid_nullscan.q.out index 19fcc8c457..b7d7dd86b9 100644 --- ql/src/test/results/clientpositive/acid_nullscan.q.out +++ ql/src/test/results/clientpositive/acid_nullscan.q.out @@ -69,6 +69,7 @@ STAGE PLANS: input format: org.apache.hadoop.hive.ql.io.OneNullRowInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat properties: + COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"a":"true","b":"true"}} bucket_count 2 bucket_field_name a bucketing_version 2 @@ -93,6 +94,7 @@ STAGE PLANS: input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat output format: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat properties: + COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"a":"true","b":"true"}} bucket_count 2 bucket_field_name a bucketing_version 2 diff --git ql/src/test/results/clientpositive/acid_stats.q.out ql/src/test/results/clientpositive/acid_stats.q.out new file mode 100644 index 0000000000..fd4ebe75d4 --- /dev/null +++ ql/src/test/results/clientpositive/acid_stats.q.out @@ -0,0 +1,207 @@ +PREHOOK: query: create table stats_part(key int,value string) partitioned by (p int) tblproperties ("transactional"="true", "transactional_properties"="insert_only") +PREHOOK: type: CREATETABLE +PREHOOK: Output: database:default +PREHOOK: Output: default@stats_part +POSTHOOK: query: create table stats_part(key int,value string) partitioned by (p int) tblproperties ("transactional"="true", "transactional_properties"="insert_only") +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: database:default +POSTHOOK: Output: default@stats_part +PREHOOK: query: insert into table stats_part partition(p=101) values (1, "foo") +PREHOOK: type: QUERY +PREHOOK: Input: _dummy_database@_dummy_table +PREHOOK: Output: default@stats_part@p=101 +POSTHOOK: query: insert into table stats_part partition(p=101) values (1, "foo") +POSTHOOK: type: QUERY +POSTHOOK: Input: _dummy_database@_dummy_table +POSTHOOK: Output: default@stats_part@p=101 +POSTHOOK: Lineage: stats_part PARTITION(p=101).key SCRIPT [] +POSTHOOK: Lineage: stats_part PARTITION(p=101).value SCRIPT [] +PREHOOK: query: explain select count(key) from stats_part +PREHOOK: type: QUERY +POSTHOOK: query: explain select count(key) from stats_part +POSTHOOK: type: QUERY +STAGE DEPENDENCIES: + Stage-0 is a root stage + +STAGE PLANS: + Stage: Stage-0 + Fetch Operator + limit: 1 + Processor Tree: + ListSink + +PREHOOK: query: insert into table stats_part partition(p=102) values (1, "bar") +PREHOOK: type: QUERY +PREHOOK: Input: _dummy_database@_dummy_table +PREHOOK: Output: default@stats_part@p=102 +POSTHOOK: query: insert into table stats_part partition(p=102) values (1, "bar") +POSTHOOK: type: QUERY +POSTHOOK: Input: _dummy_database@_dummy_table +POSTHOOK: Output: default@stats_part@p=102 +POSTHOOK: Lineage: stats_part PARTITION(p=102).key SCRIPT [] +POSTHOOK: Lineage: stats_part PARTITION(p=102).value SCRIPT [] +PREHOOK: query: explain select count(key) from stats_part +PREHOOK: type: QUERY +POSTHOOK: query: explain select count(key) from stats_part +POSTHOOK: type: QUERY +STAGE DEPENDENCIES: + Stage-0 is a root stage + +STAGE PLANS: + Stage: Stage-0 + Fetch Operator + limit: 1 + Processor Tree: + ListSink + +PREHOOK: query: alter table stats_part drop partition (p=102) +PREHOOK: type: ALTERTABLE_DROPPARTS +PREHOOK: Input: default@stats_part +PREHOOK: Output: default@stats_part@p=102 +POSTHOOK: query: alter table stats_part drop partition (p=102) +POSTHOOK: type: ALTERTABLE_DROPPARTS +POSTHOOK: Input: default@stats_part +POSTHOOK: Output: default@stats_part@p=102 +PREHOOK: query: explain select count(key) from stats_part +PREHOOK: type: QUERY +POSTHOOK: query: explain select count(key) from stats_part +POSTHOOK: type: QUERY +STAGE DEPENDENCIES: + Stage-0 is a root stage + +STAGE PLANS: + Stage: Stage-0 + Fetch Operator + limit: 1 + Processor Tree: + ListSink + +PREHOOK: query: drop table stats_part +PREHOOK: type: DROPTABLE +PREHOOK: Input: default@stats_part +PREHOOK: Output: default@stats_part +POSTHOOK: query: drop table stats_part +POSTHOOK: type: DROPTABLE +POSTHOOK: Input: default@stats_part +POSTHOOK: Output: default@stats_part +PREHOOK: query: create table stats2(key int,value string) tblproperties ("transactional"="true", "transactional_properties"="insert_only") +PREHOOK: type: CREATETABLE +PREHOOK: Output: database:default +PREHOOK: Output: default@stats2 +POSTHOOK: query: create table stats2(key int,value string) tblproperties ("transactional"="true", "transactional_properties"="insert_only") +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: database:default +POSTHOOK: Output: default@stats2 +PREHOOK: query: insert into table stats2 values (1, "foo") +PREHOOK: type: QUERY +PREHOOK: Input: _dummy_database@_dummy_table +PREHOOK: Output: default@stats2 +POSTHOOK: query: insert into table stats2 values (1, "foo") +POSTHOOK: type: QUERY +POSTHOOK: Input: _dummy_database@_dummy_table +POSTHOOK: Output: default@stats2 +POSTHOOK: Lineage: stats2.key SCRIPT [] +POSTHOOK: Lineage: stats2.value SCRIPT [] +PREHOOK: query: explain select count(*) from stats2 +PREHOOK: type: QUERY +POSTHOOK: query: explain select count(*) from stats2 +POSTHOOK: type: QUERY +STAGE DEPENDENCIES: + Stage-0 is a root stage + +STAGE PLANS: + Stage: Stage-0 + Fetch Operator + limit: 1 + Processor Tree: + ListSink + +PREHOOK: query: insert into table stats2 values (1, "bar") +PREHOOK: type: QUERY +PREHOOK: Input: _dummy_database@_dummy_table +PREHOOK: Output: default@stats2 +POSTHOOK: query: insert into table stats2 values (1, "bar") +POSTHOOK: type: QUERY +POSTHOOK: Input: _dummy_database@_dummy_table +POSTHOOK: Output: default@stats2 +POSTHOOK: Lineage: stats2.key SCRIPT [] +POSTHOOK: Lineage: stats2.value SCRIPT [] +PREHOOK: query: explain select count(*) from stats2 +PREHOOK: type: QUERY +POSTHOOK: query: explain select count(*) from stats2 +POSTHOOK: type: QUERY +STAGE DEPENDENCIES: + Stage-0 is a root stage + +STAGE PLANS: + Stage: Stage-0 + Fetch Operator + limit: 1 + Processor Tree: + ListSink + +PREHOOK: query: insert into table stats2 values (1, "baz") +PREHOOK: type: QUERY +PREHOOK: Input: _dummy_database@_dummy_table +PREHOOK: Output: default@stats2 +POSTHOOK: query: insert into table stats2 values (1, "baz") +POSTHOOK: type: QUERY +POSTHOOK: Input: _dummy_database@_dummy_table +POSTHOOK: Output: default@stats2 +POSTHOOK: Lineage: stats2.key SCRIPT [] +POSTHOOK: Lineage: stats2.value SCRIPT [] +PREHOOK: query: explain select count(*) from stats2 +PREHOOK: type: QUERY +POSTHOOK: query: explain select count(*) from stats2 +POSTHOOK: type: QUERY +STAGE DEPENDENCIES: + Stage-1 is a root stage + Stage-0 depends on stages: Stage-1 + +STAGE PLANS: + Stage: Stage-1 + Map Reduce + Map Operator Tree: + TableScan + alias: stats2 + Statistics: Num rows: 2 Data size: 10 Basic stats: COMPLETE Column stats: COMPLETE + Select Operator + Statistics: Num rows: 2 Data size: 10 Basic stats: COMPLETE Column stats: COMPLETE + Group By Operator + aggregations: count() + mode: hash + outputColumnNames: _col0 + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE + Reduce Output Operator + sort order: + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE + value expressions: _col0 (type: bigint) + Execution mode: vectorized + Reduce Operator Tree: + Group By Operator + aggregations: count(VALUE._col0) + mode: mergepartial + outputColumnNames: _col0 + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE + File Output Operator + compressed: false + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE + table: + input format: org.apache.hadoop.mapred.SequenceFileInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + + Stage: Stage-0 + Fetch Operator + limit: -1 + Processor Tree: + ListSink + +PREHOOK: query: drop table stats2 +PREHOOK: type: DROPTABLE +PREHOOK: Input: default@stats2 +PREHOOK: Output: default@stats2 +POSTHOOK: query: drop table stats2 +POSTHOOK: type: DROPTABLE +POSTHOOK: Input: default@stats2 +POSTHOOK: Output: default@stats2 diff --git ql/src/test/results/clientpositive/acid_stats2.q.out ql/src/test/results/clientpositive/acid_stats2.q.out new file mode 100644 index 0000000000..5fc0505462 --- /dev/null +++ ql/src/test/results/clientpositive/acid_stats2.q.out @@ -0,0 +1,237 @@ +PREHOOK: query: create table stats3(key int,value string) stored as orc tblproperties ("transactional"="true") +PREHOOK: type: CREATETABLE +PREHOOK: Output: database:default +PREHOOK: Output: default@stats3 +POSTHOOK: query: create table stats3(key int,value string) stored as orc tblproperties ("transactional"="true") +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: database:default +POSTHOOK: Output: default@stats3 +PREHOOK: query: insert into table stats3 values (1, "foo") +PREHOOK: type: QUERY +PREHOOK: Input: _dummy_database@_dummy_table +PREHOOK: Output: default@stats3 +POSTHOOK: query: insert into table stats3 values (1, "foo") +POSTHOOK: type: QUERY +POSTHOOK: Input: _dummy_database@_dummy_table +POSTHOOK: Output: default@stats3 +POSTHOOK: Lineage: stats3.key SCRIPT [] +POSTHOOK: Lineage: stats3.value SCRIPT [] +PREHOOK: query: explain select count(*) from stats3 +PREHOOK: type: QUERY +POSTHOOK: query: explain select count(*) from stats3 +POSTHOOK: type: QUERY +STAGE DEPENDENCIES: + Stage-0 is a root stage + +STAGE PLANS: + Stage: Stage-0 + Fetch Operator + limit: 1 + Processor Tree: + ListSink + +PREHOOK: query: select count(*) from stats3 +PREHOOK: type: QUERY +PREHOOK: Input: default@stats3 +#### A masked pattern was here #### +POSTHOOK: query: select count(*) from stats3 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@stats3 +#### A masked pattern was here #### +1 +PREHOOK: query: insert into table stats3 values (2, "bar") +PREHOOK: type: QUERY +PREHOOK: Input: _dummy_database@_dummy_table +PREHOOK: Output: default@stats3 +POSTHOOK: query: insert into table stats3 values (2, "bar") +POSTHOOK: type: QUERY +POSTHOOK: Input: _dummy_database@_dummy_table +POSTHOOK: Output: default@stats3 +POSTHOOK: Lineage: stats3.key SCRIPT [] +POSTHOOK: Lineage: stats3.value SCRIPT [] +PREHOOK: query: explain select count(*) from stats3 +PREHOOK: type: QUERY +POSTHOOK: query: explain select count(*) from stats3 +POSTHOOK: type: QUERY +STAGE DEPENDENCIES: + Stage-0 is a root stage + +STAGE PLANS: + Stage: Stage-0 + Fetch Operator + limit: 1 + Processor Tree: + ListSink + +PREHOOK: query: select count(*) from stats3 +PREHOOK: type: QUERY +PREHOOK: Input: default@stats3 +#### A masked pattern was here #### +POSTHOOK: query: select count(*) from stats3 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@stats3 +#### A masked pattern was here #### +2 +PREHOOK: query: update stats3 set value = "baz" where key = 4 +PREHOOK: type: QUERY +PREHOOK: Input: default@stats3 +PREHOOK: Output: default@stats3 +POSTHOOK: query: update stats3 set value = "baz" where key = 4 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@stats3 +POSTHOOK: Output: default@stats3 +PREHOOK: query: explain select count(*) from stats3 +PREHOOK: type: QUERY +POSTHOOK: query: explain select count(*) from stats3 +POSTHOOK: type: QUERY +STAGE DEPENDENCIES: + Stage-0 is a root stage + +STAGE PLANS: + Stage: Stage-0 + Fetch Operator + limit: 1 + Processor Tree: + ListSink + +PREHOOK: query: select count(*) from stats3 +PREHOOK: type: QUERY +PREHOOK: Input: default@stats3 +#### A masked pattern was here #### +POSTHOOK: query: select count(*) from stats3 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@stats3 +#### A masked pattern was here #### +2 +PREHOOK: query: update stats3 set value = "baz" where key = 1 +PREHOOK: type: QUERY +PREHOOK: Input: default@stats3 +PREHOOK: Output: default@stats3 +POSTHOOK: query: update stats3 set value = "baz" where key = 1 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@stats3 +POSTHOOK: Output: default@stats3 +PREHOOK: query: explain select count(*) from stats3 +PREHOOK: type: QUERY +POSTHOOK: query: explain select count(*) from stats3 +POSTHOOK: type: QUERY +STAGE DEPENDENCIES: + Stage-0 is a root stage + +STAGE PLANS: + Stage: Stage-0 + Fetch Operator + limit: 1 + Processor Tree: + ListSink + +PREHOOK: query: select count(*) from stats3 +PREHOOK: type: QUERY +PREHOOK: Input: default@stats3 +#### A masked pattern was here #### +POSTHOOK: query: select count(*) from stats3 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@stats3 +#### A masked pattern was here #### +2 +PREHOOK: query: delete from stats3 where key = 3 +PREHOOK: type: QUERY +PREHOOK: Input: default@stats3 +PREHOOK: Output: default@stats3 +POSTHOOK: query: delete from stats3 where key = 3 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@stats3 +POSTHOOK: Output: default@stats3 +PREHOOK: query: explain select count(*) from stats3 +PREHOOK: type: QUERY +POSTHOOK: query: explain select count(*) from stats3 +POSTHOOK: type: QUERY +STAGE DEPENDENCIES: + Stage-0 is a root stage + +STAGE PLANS: + Stage: Stage-0 + Fetch Operator + limit: 1 + Processor Tree: + ListSink + +PREHOOK: query: select count(*) from stats3 +PREHOOK: type: QUERY +PREHOOK: Input: default@stats3 +#### A masked pattern was here #### +POSTHOOK: query: select count(*) from stats3 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@stats3 +#### A masked pattern was here #### +2 +PREHOOK: query: delete from stats3 where key = 1 +PREHOOK: type: QUERY +PREHOOK: Input: default@stats3 +PREHOOK: Output: default@stats3 +POSTHOOK: query: delete from stats3 where key = 1 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@stats3 +POSTHOOK: Output: default@stats3 +PREHOOK: query: explain select count(*) from stats3 +PREHOOK: type: QUERY +POSTHOOK: query: explain select count(*) from stats3 +POSTHOOK: type: QUERY +STAGE DEPENDENCIES: + Stage-0 is a root stage + +STAGE PLANS: + Stage: Stage-0 + Fetch Operator + limit: 1 + Processor Tree: + ListSink + +PREHOOK: query: select count(*) from stats3 +PREHOOK: type: QUERY +PREHOOK: Input: default@stats3 +#### A masked pattern was here #### +POSTHOOK: query: select count(*) from stats3 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@stats3 +#### A masked pattern was here #### +1 +PREHOOK: query: delete from stats3 where key = 2 +PREHOOK: type: QUERY +PREHOOK: Input: default@stats3 +PREHOOK: Output: default@stats3 +POSTHOOK: query: delete from stats3 where key = 2 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@stats3 +POSTHOOK: Output: default@stats3 +PREHOOK: query: explain select count(*) from stats3 +PREHOOK: type: QUERY +POSTHOOK: query: explain select count(*) from stats3 +POSTHOOK: type: QUERY +STAGE DEPENDENCIES: + Stage-0 is a root stage + +STAGE PLANS: + Stage: Stage-0 + Fetch Operator + limit: 1 + Processor Tree: + ListSink + +PREHOOK: query: select count(*) from stats3 +PREHOOK: type: QUERY +PREHOOK: Input: default@stats3 +#### A masked pattern was here #### +POSTHOOK: query: select count(*) from stats3 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@stats3 +#### A masked pattern was here #### +0 +PREHOOK: query: drop table stats3 +PREHOOK: type: DROPTABLE +PREHOOK: Input: default@stats3 +PREHOOK: Output: default@stats3 +POSTHOOK: query: drop table stats3 +POSTHOOK: type: DROPTABLE +POSTHOOK: Input: default@stats3 +POSTHOOK: Output: default@stats3 diff --git ql/src/test/results/clientpositive/acid_table_stats.q.out ql/src/test/results/clientpositive/acid_table_stats.q.out index 841a5a42ae..fb064f8736 100644 --- ql/src/test/results/clientpositive/acid_table_stats.q.out +++ ql/src/test/results/clientpositive/acid_table_stats.q.out @@ -93,8 +93,9 @@ Database: default Table: acid #### A masked pattern was here #### Partition Parameters: + COLUMN_STATS_ACCURATE {\"BASIC_STATS\":\"true\",\"COLUMN_STATS\":{\"key\":\"true\",\"value\":\"true\"}} numFiles 2 - numRows 2000 + numRows 1000 rawDataSize 0 totalSize 4063 #### A masked pattern was here #### @@ -125,58 +126,22 @@ PREHOOK: type: QUERY POSTHOOK: query: explain select count(*) from acid where ds='2008-04-08' POSTHOOK: type: QUERY STAGE DEPENDENCIES: - Stage-1 is a root stage - Stage-0 depends on stages: Stage-1 + Stage-0 is a root stage STAGE PLANS: - Stage: Stage-1 - Map Reduce - Map Operator Tree: - TableScan - alias: acid - filterExpr: (ds = '2008-04-08') (type: boolean) - Statistics: Num rows: 2000 Data size: 40630 Basic stats: COMPLETE Column stats: NONE - Select Operator - Statistics: Num rows: 2000 Data size: 40630 Basic stats: COMPLETE Column stats: NONE - Group By Operator - aggregations: count() - mode: hash - outputColumnNames: _col0 - Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE - Reduce Output Operator - sort order: - Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE - value expressions: _col0 (type: bigint) - Execution mode: vectorized - Reduce Operator Tree: - Group By Operator - aggregations: count(VALUE._col0) - mode: mergepartial - outputColumnNames: _col0 - Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE - File Output Operator - compressed: false - Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE - table: - input format: org.apache.hadoop.mapred.SequenceFileInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - Stage: Stage-0 Fetch Operator - limit: -1 + limit: 1 Processor Tree: ListSink PREHOOK: query: select count(*) from acid where ds='2008-04-08' PREHOOK: type: QUERY PREHOOK: Input: default@acid -PREHOOK: Input: default@acid@ds=2008-04-08 #### A masked pattern was here #### POSTHOOK: query: select count(*) from acid where ds='2008-04-08' POSTHOOK: type: QUERY POSTHOOK: Input: default@acid -POSTHOOK: Input: default@acid@ds=2008-04-08 #### A masked pattern was here #### 1000 PREHOOK: query: analyze table acid partition(ds='2008-04-08') compute statistics @@ -209,7 +174,7 @@ Database: default Table: acid #### A masked pattern was here #### Partition Parameters: - COLUMN_STATS_ACCURATE {\"BASIC_STATS\":\"true\"} + COLUMN_STATS_ACCURATE {\"BASIC_STATS\":\"true\",\"COLUMN_STATS\":{\"key\":\"true\",\"value\":\"true\"}} numFiles 2 numRows 1000 rawDataSize 208000 @@ -293,58 +258,22 @@ PREHOOK: type: QUERY POSTHOOK: query: explain select count(*) from acid where ds='2008-04-08' POSTHOOK: type: QUERY STAGE DEPENDENCIES: - Stage-1 is a root stage - Stage-0 depends on stages: Stage-1 + Stage-0 is a root stage STAGE PLANS: - Stage: Stage-1 - Map Reduce - Map Operator Tree: - TableScan - alias: acid - filterExpr: (ds = '2008-04-08') (type: boolean) - Statistics: Num rows: 1000 Data size: 208000 Basic stats: COMPLETE Column stats: NONE - Select Operator - Statistics: Num rows: 1000 Data size: 208000 Basic stats: COMPLETE Column stats: NONE - Group By Operator - aggregations: count() - mode: hash - outputColumnNames: _col0 - Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE - Reduce Output Operator - sort order: - Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE - value expressions: _col0 (type: bigint) - Execution mode: vectorized - Reduce Operator Tree: - Group By Operator - aggregations: count(VALUE._col0) - mode: mergepartial - outputColumnNames: _col0 - Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE - File Output Operator - compressed: false - Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE - table: - input format: org.apache.hadoop.mapred.SequenceFileInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - Stage: Stage-0 Fetch Operator - limit: -1 + limit: 1 Processor Tree: ListSink PREHOOK: query: select count(*) from acid where ds='2008-04-08' PREHOOK: type: QUERY PREHOOK: Input: default@acid -PREHOOK: Input: default@acid@ds=2008-04-08 #### A masked pattern was here #### POSTHOOK: query: select count(*) from acid where ds='2008-04-08' POSTHOOK: type: QUERY POSTHOOK: Input: default@acid -POSTHOOK: Input: default@acid@ds=2008-04-08 #### A masked pattern was here #### 1000 PREHOOK: query: insert into table acid partition(ds) select key,value,ds from srcpart @@ -388,8 +317,9 @@ Database: default Table: acid #### A masked pattern was here #### Partition Parameters: + COLUMN_STATS_ACCURATE {\"BASIC_STATS\":\"true\",\"COLUMN_STATS\":{\"key\":\"true\",\"value\":\"true\"}} numFiles 4 - numRows 3000 + numRows 2000 rawDataSize 208000 totalSize 8118 #### A masked pattern was here #### @@ -434,7 +364,7 @@ Database: default Table: acid #### A masked pattern was here #### Partition Parameters: - COLUMN_STATS_ACCURATE {\"BASIC_STATS\":\"true\"} + COLUMN_STATS_ACCURATE {\"BASIC_STATS\":\"true\",\"COLUMN_STATS\":{\"key\":\"true\",\"value\":\"true\"}} numFiles 4 numRows 2000 rawDataSize 416000 @@ -456,58 +386,22 @@ PREHOOK: type: QUERY POSTHOOK: query: explain select count(*) from acid where ds='2008-04-08' POSTHOOK: type: QUERY STAGE DEPENDENCIES: - Stage-1 is a root stage - Stage-0 depends on stages: Stage-1 + Stage-0 is a root stage STAGE PLANS: - Stage: Stage-1 - Map Reduce - Map Operator Tree: - TableScan - alias: acid - filterExpr: (ds = '2008-04-08') (type: boolean) - Statistics: Num rows: 2000 Data size: 416000 Basic stats: COMPLETE Column stats: NONE - Select Operator - Statistics: Num rows: 2000 Data size: 416000 Basic stats: COMPLETE Column stats: NONE - Group By Operator - aggregations: count() - mode: hash - outputColumnNames: _col0 - Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE - Reduce Output Operator - sort order: - Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE - value expressions: _col0 (type: bigint) - Execution mode: vectorized - Reduce Operator Tree: - Group By Operator - aggregations: count(VALUE._col0) - mode: mergepartial - outputColumnNames: _col0 - Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE - File Output Operator - compressed: false - Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE - table: - input format: org.apache.hadoop.mapred.SequenceFileInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - Stage: Stage-0 Fetch Operator - limit: -1 + limit: 1 Processor Tree: ListSink PREHOOK: query: select count(*) from acid where ds='2008-04-08' PREHOOK: type: QUERY PREHOOK: Input: default@acid -PREHOOK: Input: default@acid@ds=2008-04-08 #### A masked pattern was here #### POSTHOOK: query: select count(*) from acid where ds='2008-04-08' POSTHOOK: type: QUERY POSTHOOK: Input: default@acid -POSTHOOK: Input: default@acid@ds=2008-04-08 #### A masked pattern was here #### 2000 PREHOOK: query: analyze table acid partition(ds='2008-04-08') compute statistics for columns diff --git ql/src/test/results/clientpositive/autoColumnStats_4.q.out ql/src/test/results/clientpositive/autoColumnStats_4.q.out index 42c7b43132..190686547c 100644 --- ql/src/test/results/clientpositive/autoColumnStats_4.q.out +++ ql/src/test/results/clientpositive/autoColumnStats_4.q.out @@ -199,6 +199,7 @@ Retention: 0 #### A masked pattern was here #### Table Type: MANAGED_TABLE Table Parameters: + COLUMN_STATS_ACCURATE {\"BASIC_STATS\":\"true\",\"COLUMN_STATS\":{\"a\":\"true\",\"b\":\"true\"}} bucketing_version 2 numFiles 2 numRows 10 @@ -243,7 +244,7 @@ Retention: 0 #### A masked pattern was here #### Table Type: MANAGED_TABLE Table Parameters: - COLUMN_STATS_ACCURATE {} + COLUMN_STATS_ACCURATE {\"BASIC_STATS\":\"true\"} bucketing_version 2 numFiles 4 numRows 8 diff --git ql/src/test/results/clientpositive/llap/acid_bucket_pruning.q.out ql/src/test/results/clientpositive/llap/acid_bucket_pruning.q.out index 29a05aec69..cfb9f1b2e2 100644 --- ql/src/test/results/clientpositive/llap/acid_bucket_pruning.q.out +++ ql/src/test/results/clientpositive/llap/acid_bucket_pruning.q.out @@ -45,22 +45,22 @@ STAGE PLANS: alias: acidtbldefault filterExpr: (a = 1) (type: boolean) buckets included: [13,] of 16 - Statistics: Num rows: 9174 Data size: 34868 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 9174 Data size: 36696 Basic stats: COMPLETE Column stats: COMPLETE GatherStats: false Filter Operator isSamplingPred: false predicate: (a = 1) (type: boolean) - Statistics: Num rows: 5 Data size: 19 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 2 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE Select Operator expressions: 1 (type: int) outputColumnNames: _col0 - Statistics: Num rows: 5 Data size: 19 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 2 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE File Output Operator compressed: false GlobalTableId: 0 directory: hdfs://### HDFS PATH ### NumFilesPerFileSink: 1 - Statistics: Num rows: 5 Data size: 19 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 2 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE Stats Publishing Key Prefix: hdfs://### HDFS PATH ### table: input format: org.apache.hadoop.mapred.SequenceFileInputFormat @@ -88,6 +88,7 @@ STAGE PLANS: input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat output format: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat properties: + COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"a":"true"}} bucket_count 16 bucket_field_name a bucketing_version 2 @@ -113,6 +114,7 @@ STAGE PLANS: input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat output format: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat properties: + COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"a":"true"}} bucket_count 16 bucket_field_name a bucketing_version 2 diff --git ql/src/test/results/clientpositive/llap/dynpart_sort_optimization_acid.q.out ql/src/test/results/clientpositive/llap/dynpart_sort_optimization_acid.q.out index 790544174d..2ad9a4d1fb 100644 --- ql/src/test/results/clientpositive/llap/dynpart_sort_optimization_acid.q.out +++ ql/src/test/results/clientpositive/llap/dynpart_sort_optimization_acid.q.out @@ -95,19 +95,19 @@ STAGE PLANS: TableScan alias: acid_part filterExpr: ((key = 'foo') and (ds = '2008-04-08')) (type: boolean) - Statistics: Num rows: 1601 Data size: 150414 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 1601 Data size: 139287 Basic stats: COMPLETE Column stats: PARTIAL Filter Operator predicate: (key = 'foo') (type: boolean) - Statistics: Num rows: 5 Data size: 469 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 5 Data size: 435 Basic stats: COMPLETE Column stats: PARTIAL Select Operator expressions: ROW__ID (type: struct) outputColumnNames: _col0 - Statistics: Num rows: 5 Data size: 469 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 5 Data size: 1720 Basic stats: COMPLETE Column stats: PARTIAL Reduce Output Operator key expressions: _col0 (type: struct) sort order: + Map-reduce partition columns: UDFToInteger(_col0) (type: int) - Statistics: Num rows: 5 Data size: 469 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 5 Data size: 1720 Basic stats: COMPLETE Column stats: PARTIAL Execution mode: llap LLAP IO: may be used (ACID table) Reducer 2 @@ -116,10 +116,10 @@ STAGE PLANS: Select Operator expressions: KEY.reducesinkkey0 (type: struct), 'foo' (type: string), 'bar' (type: string), '2008-04-08' (type: string) outputColumnNames: _col0, _col1, _col2, _col3 - Statistics: Num rows: 5 Data size: 469 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 5 Data size: 1720 Basic stats: COMPLETE Column stats: PARTIAL File Output Operator compressed: false - Statistics: Num rows: 5 Data size: 469 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 5 Data size: 1720 Basic stats: COMPLETE Column stats: PARTIAL table: input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat output format: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat @@ -191,7 +191,7 @@ STAGE PLANS: TableScan alias: acid_part filterExpr: ((key = 'foo') and (ds) IN ('2008-04-08')) (type: boolean) - Statistics: Num rows: 1601 Data size: 444998 Basic stats: COMPLETE Column stats: PARTIAL + Statistics: Num rows: 1601 Data size: 433871 Basic stats: COMPLETE Column stats: PARTIAL Filter Operator predicate: (key = 'foo') (type: boolean) Statistics: Num rows: 5 Data size: 1355 Basic stats: COMPLETE Column stats: PARTIAL @@ -680,19 +680,19 @@ STAGE PLANS: TableScan alias: acid_2l_part filterExpr: ((key = 'foo') and (ds = '2008-04-08') and (hr = 11)) (type: boolean) - Statistics: Num rows: 1601 Data size: 150414 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 1601 Data size: 139287 Basic stats: COMPLETE Column stats: PARTIAL Filter Operator predicate: (key = 'foo') (type: boolean) - Statistics: Num rows: 5 Data size: 469 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 5 Data size: 435 Basic stats: COMPLETE Column stats: PARTIAL Select Operator expressions: ROW__ID (type: struct) outputColumnNames: _col0 - Statistics: Num rows: 5 Data size: 469 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 5 Data size: 1740 Basic stats: COMPLETE Column stats: PARTIAL Reduce Output Operator key expressions: _col0 (type: struct) sort order: + Map-reduce partition columns: UDFToInteger(_col0) (type: int) - Statistics: Num rows: 5 Data size: 469 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 5 Data size: 1740 Basic stats: COMPLETE Column stats: PARTIAL Execution mode: llap LLAP IO: may be used (ACID table) Reducer 2 @@ -701,10 +701,10 @@ STAGE PLANS: Select Operator expressions: KEY.reducesinkkey0 (type: struct), 'foo' (type: string), 'bar' (type: string), '2008-04-08' (type: string), 11 (type: int) outputColumnNames: _col0, _col1, _col2, _col3, _col4 - Statistics: Num rows: 5 Data size: 469 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 5 Data size: 1740 Basic stats: COMPLETE Column stats: PARTIAL File Output Operator compressed: false - Statistics: Num rows: 5 Data size: 469 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 5 Data size: 1740 Basic stats: COMPLETE Column stats: PARTIAL table: input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat output format: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat @@ -777,19 +777,19 @@ STAGE PLANS: TableScan alias: acid_2l_part filterExpr: ((key = 'foo') and (ds = '2008-04-08') and (hr >= 11)) (type: boolean) - Statistics: Num rows: 3201 Data size: 313458 Basic stats: COMPLETE Column stats: PARTIAL + Statistics: Num rows: 3201 Data size: 291291 Basic stats: COMPLETE Column stats: PARTIAL Filter Operator predicate: (key = 'foo') (type: boolean) - Statistics: Num rows: 5 Data size: 455 Basic stats: COMPLETE Column stats: PARTIAL + Statistics: Num rows: 10 Data size: 910 Basic stats: COMPLETE Column stats: PARTIAL Select Operator expressions: ROW__ID (type: struct), hr (type: int) outputColumnNames: _col0, _col4 - Statistics: Num rows: 5 Data size: 1740 Basic stats: COMPLETE Column stats: PARTIAL + Statistics: Num rows: 10 Data size: 3480 Basic stats: COMPLETE Column stats: PARTIAL Reduce Output Operator key expressions: _col0 (type: struct) sort order: + Map-reduce partition columns: UDFToInteger(_col0) (type: int) - Statistics: Num rows: 5 Data size: 1740 Basic stats: COMPLETE Column stats: PARTIAL + Statistics: Num rows: 10 Data size: 3480 Basic stats: COMPLETE Column stats: PARTIAL value expressions: _col4 (type: int) Execution mode: llap LLAP IO: may be used (ACID table) @@ -799,10 +799,10 @@ STAGE PLANS: Select Operator expressions: KEY.reducesinkkey0 (type: struct), 'foo' (type: string), 'bar' (type: string), '2008-04-08' (type: string), VALUE._col2 (type: int) outputColumnNames: _col0, _col1, _col2, _col3, _col4 - Statistics: Num rows: 5 Data size: 1740 Basic stats: COMPLETE Column stats: PARTIAL + Statistics: Num rows: 10 Data size: 3480 Basic stats: COMPLETE Column stats: PARTIAL File Output Operator compressed: false - Statistics: Num rows: 5 Data size: 1740 Basic stats: COMPLETE Column stats: PARTIAL + Statistics: Num rows: 10 Data size: 3480 Basic stats: COMPLETE Column stats: PARTIAL table: input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat output format: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat @@ -904,19 +904,19 @@ STAGE PLANS: TableScan alias: acid_2l_part filterExpr: (value = 'bar') (type: boolean) - Statistics: Num rows: 4200 Data size: 1253037 Basic stats: COMPLETE Column stats: PARTIAL + Statistics: Num rows: 4200 Data size: 1171800 Basic stats: COMPLETE Column stats: PARTIAL Filter Operator predicate: (value = 'bar') (type: boolean) - Statistics: Num rows: 5 Data size: 1375 Basic stats: COMPLETE Column stats: PARTIAL + Statistics: Num rows: 14 Data size: 3906 Basic stats: COMPLETE Column stats: PARTIAL Select Operator expressions: ROW__ID (type: struct), ds (type: string), hr (type: int) outputColumnNames: _col0, _col1, _col2 - Statistics: Num rows: 5 Data size: 1320 Basic stats: COMPLETE Column stats: PARTIAL + Statistics: Num rows: 14 Data size: 3696 Basic stats: COMPLETE Column stats: PARTIAL Reduce Output Operator key expressions: _col0 (type: struct) sort order: + Map-reduce partition columns: UDFToInteger(_col0) (type: int) - Statistics: Num rows: 5 Data size: 1320 Basic stats: COMPLETE Column stats: PARTIAL + Statistics: Num rows: 14 Data size: 3696 Basic stats: COMPLETE Column stats: PARTIAL value expressions: _col1 (type: string), _col2 (type: int) Execution mode: llap LLAP IO: may be used (ACID table) @@ -926,10 +926,10 @@ STAGE PLANS: Select Operator expressions: KEY.reducesinkkey0 (type: struct), VALUE._col0 (type: string), VALUE._col1 (type: int) outputColumnNames: _col0, _col1, _col2 - Statistics: Num rows: 5 Data size: 1320 Basic stats: COMPLETE Column stats: PARTIAL + Statistics: Num rows: 14 Data size: 3696 Basic stats: COMPLETE Column stats: PARTIAL File Output Operator compressed: false - Statistics: Num rows: 5 Data size: 1320 Basic stats: COMPLETE Column stats: PARTIAL + Statistics: Num rows: 14 Data size: 3696 Basic stats: COMPLETE Column stats: PARTIAL table: input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat output format: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat diff --git ql/src/test/results/clientpositive/llap/enforce_constraint_notnull.q.out ql/src/test/results/clientpositive/llap/enforce_constraint_notnull.q.out index 5a3f51956a..ecf79ae090 100644 --- ql/src/test/results/clientpositive/llap/enforce_constraint_notnull.q.out +++ ql/src/test/results/clientpositive/llap/enforce_constraint_notnull.q.out @@ -3237,19 +3237,19 @@ STAGE PLANS: TableScan alias: acid_uami_n1 filterExpr: (((de = 109.23) or (de = 119.23)) and enforce_constraint(vc is not null)) (type: boolean) - Statistics: Num rows: 1002 Data size: 312584 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 1002 Data size: 225450 Basic stats: COMPLETE Column stats: COMPLETE Filter Operator predicate: (((de = 109.23) or (de = 119.23)) and enforce_constraint(vc is not null)) (type: boolean) - Statistics: Num rows: 5 Data size: 1559 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 3 Data size: 675 Basic stats: COMPLETE Column stats: COMPLETE Select Operator expressions: ROW__ID (type: struct), i (type: int), vc (type: varchar(128)) outputColumnNames: _col0, _col1, _col3 - Statistics: Num rows: 5 Data size: 1559 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 3 Data size: 903 Basic stats: COMPLETE Column stats: COMPLETE Reduce Output Operator key expressions: _col0 (type: struct) sort order: + Map-reduce partition columns: UDFToInteger(_col0) (type: int) - Statistics: Num rows: 5 Data size: 1559 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 3 Data size: 903 Basic stats: COMPLETE Column stats: COMPLETE value expressions: _col1 (type: int), _col3 (type: varchar(128)) Execution mode: vectorized, llap LLAP IO: may be used (ACID table) @@ -3259,10 +3259,10 @@ STAGE PLANS: Select Operator expressions: KEY.reducesinkkey0 (type: struct), VALUE._col0 (type: int), 3.14 (type: decimal(5,2)), VALUE._col1 (type: varchar(128)) outputColumnNames: _col0, _col1, _col2, _col3 - Statistics: Num rows: 5 Data size: 1559 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 3 Data size: 903 Basic stats: COMPLETE Column stats: COMPLETE File Output Operator compressed: false - Statistics: Num rows: 5 Data size: 1559 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 3 Data size: 903 Basic stats: COMPLETE Column stats: COMPLETE table: input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat output format: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat @@ -3331,19 +3331,19 @@ STAGE PLANS: TableScan alias: acid_uami_n1 filterExpr: ((de = 3.14) and enforce_constraint((i is not null and vc is not null))) (type: boolean) - Statistics: Num rows: 1002 Data size: 312584 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 1002 Data size: 225450 Basic stats: COMPLETE Column stats: COMPLETE Filter Operator predicate: ((de = 3.14) and enforce_constraint((i is not null and vc is not null))) (type: boolean) - Statistics: Num rows: 2 Data size: 623 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 1 Data size: 225 Basic stats: COMPLETE Column stats: COMPLETE Select Operator expressions: ROW__ID (type: struct), i (type: int), vc (type: varchar(128)) outputColumnNames: _col0, _col1, _col3 - Statistics: Num rows: 2 Data size: 623 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 1 Data size: 301 Basic stats: COMPLETE Column stats: COMPLETE Reduce Output Operator key expressions: _col0 (type: struct) sort order: + Map-reduce partition columns: UDFToInteger(_col0) (type: int) - Statistics: Num rows: 2 Data size: 623 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 1 Data size: 301 Basic stats: COMPLETE Column stats: COMPLETE value expressions: _col1 (type: int), _col3 (type: varchar(128)) Execution mode: vectorized, llap LLAP IO: may be used (ACID table) @@ -3353,10 +3353,10 @@ STAGE PLANS: Select Operator expressions: KEY.reducesinkkey0 (type: struct), VALUE._col0 (type: int), 3.14 (type: decimal(5,2)), VALUE._col1 (type: varchar(128)) outputColumnNames: _col0, _col1, _col2, _col3 - Statistics: Num rows: 2 Data size: 623 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 1 Data size: 301 Basic stats: COMPLETE Column stats: COMPLETE File Output Operator compressed: false - Statistics: Num rows: 2 Data size: 623 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 1 Data size: 301 Basic stats: COMPLETE Column stats: COMPLETE table: input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat output format: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat diff --git ql/src/test/results/clientpositive/llap/insert_into_default_keyword.q.out ql/src/test/results/clientpositive/llap/insert_into_default_keyword.q.out index 4c82e0977e..a93593f3ec 100644 --- ql/src/test/results/clientpositive/llap/insert_into_default_keyword.q.out +++ ql/src/test/results/clientpositive/llap/insert_into_default_keyword.q.out @@ -1705,19 +1705,19 @@ STAGE PLANS: TableScan alias: insert_into1_n0 filterExpr: (value = 1) (type: boolean) - Statistics: Num rows: 1 Data size: 188 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 1 Data size: 89 Basic stats: COMPLETE Column stats: COMPLETE Filter Operator predicate: (value = 1) (type: boolean) - Statistics: Num rows: 1 Data size: 188 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 1 Data size: 89 Basic stats: COMPLETE Column stats: COMPLETE Select Operator expressions: ROW__ID (type: struct), value (type: string), i (type: int) outputColumnNames: _col0, _col2, _col3 - Statistics: Num rows: 1 Data size: 188 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 1 Data size: 169 Basic stats: COMPLETE Column stats: COMPLETE Reduce Output Operator key expressions: _col0 (type: struct) sort order: + Map-reduce partition columns: UDFToInteger(_col0) (type: int) - Statistics: Num rows: 1 Data size: 188 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 1 Data size: 169 Basic stats: COMPLETE Column stats: COMPLETE value expressions: _col2 (type: string), _col3 (type: int) Execution mode: vectorized, llap LLAP IO: may be used (ACID table) @@ -1727,10 +1727,10 @@ STAGE PLANS: Select Operator expressions: KEY.reducesinkkey0 (type: struct), 1 (type: int), VALUE._col0 (type: string), VALUE._col1 (type: int) outputColumnNames: _col0, _col1, _col2, _col3 - Statistics: Num rows: 1 Data size: 188 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 1 Data size: 169 Basic stats: COMPLETE Column stats: COMPLETE File Output Operator compressed: false - Statistics: Num rows: 1 Data size: 188 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 1 Data size: 169 Basic stats: COMPLETE Column stats: COMPLETE table: input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat output format: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat @@ -1813,19 +1813,19 @@ STAGE PLANS: TableScan alias: insert_into1_n0 filterExpr: (value = 1) (type: boolean) - Statistics: Num rows: 1 Data size: 188 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 1 Data size: 89 Basic stats: COMPLETE Column stats: COMPLETE Filter Operator predicate: (value = 1) (type: boolean) - Statistics: Num rows: 1 Data size: 188 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 1 Data size: 89 Basic stats: COMPLETE Column stats: COMPLETE Select Operator expressions: ROW__ID (type: struct), i (type: int) outputColumnNames: _col0, _col3 - Statistics: Num rows: 1 Data size: 188 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 1 Data size: 88 Basic stats: COMPLETE Column stats: COMPLETE Reduce Output Operator key expressions: _col0 (type: struct) sort order: + Map-reduce partition columns: UDFToInteger(_col0) (type: int) - Statistics: Num rows: 1 Data size: 188 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 1 Data size: 88 Basic stats: COMPLETE Column stats: COMPLETE value expressions: _col3 (type: int) Execution mode: vectorized, llap LLAP IO: may be used (ACID table) @@ -1835,10 +1835,10 @@ STAGE PLANS: Select Operator expressions: KEY.reducesinkkey0 (type: struct), 1 (type: int), null (type: string), VALUE._col0 (type: int) outputColumnNames: _col0, _col1, _col2, _col3 - Statistics: Num rows: 1 Data size: 188 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 1 Data size: 168 Basic stats: COMPLETE Column stats: COMPLETE File Output Operator compressed: false - Statistics: Num rows: 1 Data size: 188 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 1 Data size: 168 Basic stats: COMPLETE Column stats: COMPLETE table: input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat output format: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat @@ -2371,15 +2371,15 @@ STAGE PLANS: TableScan alias: t filterExpr: enforce_constraint(key is not null) (type: boolean) - Statistics: Num rows: 1 Data size: 4 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 1 Data size: 4 Basic stats: COMPLETE Column stats: COMPLETE Filter Operator predicate: enforce_constraint(key is not null) (type: boolean) - Statistics: Num rows: 1 Data size: 4 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 1 Data size: 4 Basic stats: COMPLETE Column stats: COMPLETE Reduce Output Operator key expressions: key (type: int) sort order: + Map-reduce partition columns: key (type: int) - Statistics: Num rows: 1 Data size: 4 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 1 Data size: 4 Basic stats: COMPLETE Column stats: COMPLETE Execution mode: vectorized, llap LLAP IO: may be used (ACID table) Map 5 @@ -2408,18 +2408,18 @@ STAGE PLANS: 0 key (type: int) 1 key (type: int) outputColumnNames: _col0, _col6 - Statistics: Num rows: 1 Data size: 4 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE Filter Operator predicate: _col0 is null (type: boolean) - Statistics: Num rows: 1 Data size: 4 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE Select Operator expressions: _col6 (type: int) outputColumnNames: _col0 - Statistics: Num rows: 1 Data size: 4 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 1 Data size: 94 Basic stats: COMPLETE Column stats: COMPLETE Reduce Output Operator sort order: Map-reduce partition columns: null (type: string) - Statistics: Num rows: 1 Data size: 4 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 1 Data size: 94 Basic stats: COMPLETE Column stats: COMPLETE value expressions: _col0 (type: int) Reducer 3 Execution mode: llap @@ -2427,10 +2427,10 @@ STAGE PLANS: Select Operator expressions: VALUE._col0 (type: int), 'a1' (type: string), null (type: string) outputColumnNames: _col0, _col1, _col2 - Statistics: Num rows: 1 Data size: 4 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 1 Data size: 174 Basic stats: COMPLETE Column stats: COMPLETE File Output Operator compressed: false - Statistics: Num rows: 1 Data size: 4 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 1 Data size: 174 Basic stats: COMPLETE Column stats: COMPLETE table: input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat output format: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat @@ -2440,15 +2440,15 @@ STAGE PLANS: Select Operator expressions: _col0 (type: int), 'a1' (type: string), null (type: string) outputColumnNames: key, a1, value - Statistics: Num rows: 1 Data size: 4 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 1 Data size: 174 Basic stats: COMPLETE Column stats: COMPLETE Group By Operator aggregations: compute_stats(key, 'hll'), compute_stats(a1, 'hll'), compute_stats(value, 'hll') mode: hash outputColumnNames: _col0, _col1, _col2 - Statistics: Num rows: 1 Data size: 1304 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 1 Data size: 1304 Basic stats: COMPLETE Column stats: COMPLETE Reduce Output Operator sort order: - Statistics: Num rows: 1 Data size: 1304 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 1 Data size: 1304 Basic stats: COMPLETE Column stats: COMPLETE value expressions: _col0 (type: struct), _col1 (type: struct), _col2 (type: struct) Reducer 4 Execution mode: llap @@ -2457,10 +2457,10 @@ STAGE PLANS: aggregations: compute_stats(VALUE._col0), compute_stats(VALUE._col1), compute_stats(VALUE._col2) mode: mergepartial outputColumnNames: _col0, _col1, _col2 - Statistics: Num rows: 1 Data size: 1320 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 1 Data size: 1320 Basic stats: COMPLETE Column stats: COMPLETE File Output Operator compressed: false - Statistics: Num rows: 1 Data size: 1320 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 1 Data size: 1320 Basic stats: COMPLETE Column stats: COMPLETE table: input format: org.apache.hadoop.mapred.SequenceFileInputFormat output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat @@ -2569,12 +2569,12 @@ STAGE PLANS: Map Operator Tree: TableScan alias: t - Statistics: Num rows: 1 Data size: 188 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 1 Data size: 94 Basic stats: COMPLETE Column stats: COMPLETE Reduce Output Operator key expressions: key (type: int) sort order: + Map-reduce partition columns: key (type: int) - Statistics: Num rows: 1 Data size: 188 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 1 Data size: 94 Basic stats: COMPLETE Column stats: COMPLETE value expressions: value (type: string), ROW__ID (type: struct) Execution mode: vectorized, llap LLAP IO: may be used (ACID table) @@ -2601,62 +2601,62 @@ STAGE PLANS: 0 key (type: int) 1 key (type: int) outputColumnNames: _col0, _col2, _col5, _col6, _col7 - Statistics: Num rows: 2 Data size: 200 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 2 Data size: 432 Basic stats: COMPLETE Column stats: COMPLETE Filter Operator predicate: ((_col0 = _col6) and (_col6 < 3)) (type: boolean) - Statistics: Num rows: 1 Data size: 100 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 1 Data size: 261 Basic stats: COMPLETE Column stats: COMPLETE Select Operator expressions: _col5 (type: struct) outputColumnNames: _col0 - Statistics: Num rows: 1 Data size: 100 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 1 Data size: 76 Basic stats: COMPLETE Column stats: COMPLETE Reduce Output Operator key expressions: _col0 (type: struct) sort order: + Map-reduce partition columns: UDFToInteger(_col0) (type: int) - Statistics: Num rows: 1 Data size: 100 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 1 Data size: 76 Basic stats: COMPLETE Column stats: COMPLETE Filter Operator predicate: ((_col0 = _col6) and (_col6 > 3) and (_col6 >= 3) and enforce_constraint(_col0 is not null)) (type: boolean) - Statistics: Num rows: 1 Data size: 100 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 1 Data size: 261 Basic stats: COMPLETE Column stats: COMPLETE Select Operator expressions: _col5 (type: struct), _col0 (type: int), _col2 (type: string) outputColumnNames: _col0, _col1, _col3 - Statistics: Num rows: 1 Data size: 100 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 1 Data size: 256 Basic stats: COMPLETE Column stats: COMPLETE Reduce Output Operator key expressions: _col0 (type: struct) sort order: + Map-reduce partition columns: UDFToInteger(_col0) (type: int) - Statistics: Num rows: 1 Data size: 100 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 1 Data size: 256 Basic stats: COMPLETE Column stats: COMPLETE value expressions: _col1 (type: int), _col3 (type: string) Filter Operator predicate: (_col0 = _col6) (type: boolean) - Statistics: Num rows: 1 Data size: 100 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 1 Data size: 261 Basic stats: COMPLETE Column stats: COMPLETE Select Operator expressions: _col5 (type: struct) outputColumnNames: _col5 - Statistics: Num rows: 1 Data size: 100 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 1 Data size: 261 Basic stats: COMPLETE Column stats: COMPLETE Group By Operator aggregations: count() keys: _col5 (type: struct) mode: hash outputColumnNames: _col0, _col1 - Statistics: Num rows: 1 Data size: 100 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 1 Data size: 84 Basic stats: COMPLETE Column stats: COMPLETE Reduce Output Operator key expressions: _col0 (type: struct) sort order: + Map-reduce partition columns: _col0 (type: struct) - Statistics: Num rows: 1 Data size: 100 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 1 Data size: 84 Basic stats: COMPLETE Column stats: COMPLETE value expressions: _col1 (type: bigint) Filter Operator predicate: (_col0 is null and enforce_constraint(_col6 is not null)) (type: boolean) - Statistics: Num rows: 1 Data size: 100 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 1 Data size: 261 Basic stats: COMPLETE Column stats: COMPLETE Select Operator expressions: _col6 (type: int), _col7 (type: string) outputColumnNames: _col0, _col1 - Statistics: Num rows: 1 Data size: 100 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 1 Data size: 95 Basic stats: COMPLETE Column stats: COMPLETE Reduce Output Operator sort order: Map-reduce partition columns: null (type: string) - Statistics: Num rows: 1 Data size: 100 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 1 Data size: 95 Basic stats: COMPLETE Column stats: COMPLETE value expressions: _col0 (type: int), _col1 (type: string) Reducer 3 Execution mode: vectorized, llap @@ -2664,10 +2664,10 @@ STAGE PLANS: Select Operator expressions: KEY.reducesinkkey0 (type: struct) outputColumnNames: _col0 - Statistics: Num rows: 1 Data size: 100 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 1 Data size: 76 Basic stats: COMPLETE Column stats: COMPLETE File Output Operator compressed: false - Statistics: Num rows: 1 Data size: 100 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 1 Data size: 76 Basic stats: COMPLETE Column stats: COMPLETE table: input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat output format: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat @@ -2680,10 +2680,10 @@ STAGE PLANS: Select Operator expressions: KEY.reducesinkkey0 (type: struct), VALUE._col0 (type: int), 'a1' (type: string), VALUE._col1 (type: string) outputColumnNames: _col0, _col1, _col2, _col3 - Statistics: Num rows: 1 Data size: 100 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 1 Data size: 256 Basic stats: COMPLETE Column stats: COMPLETE File Output Operator compressed: false - Statistics: Num rows: 1 Data size: 100 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 1 Data size: 256 Basic stats: COMPLETE Column stats: COMPLETE table: input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat output format: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat @@ -2698,17 +2698,17 @@ STAGE PLANS: keys: KEY._col0 (type: struct) mode: mergepartial outputColumnNames: _col0, _col1 - Statistics: Num rows: 1 Data size: 100 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 1 Data size: 84 Basic stats: COMPLETE Column stats: COMPLETE Filter Operator predicate: (_col1 > 1L) (type: boolean) - Statistics: Num rows: 1 Data size: 100 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 1 Data size: 84 Basic stats: COMPLETE Column stats: COMPLETE Select Operator expressions: cardinality_violation(_col0) (type: int) outputColumnNames: _col0 - Statistics: Num rows: 1 Data size: 100 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 1 Data size: 4 Basic stats: COMPLETE Column stats: COMPLETE File Output Operator compressed: false - Statistics: Num rows: 1 Data size: 100 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 1 Data size: 4 Basic stats: COMPLETE Column stats: COMPLETE table: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat @@ -2717,19 +2717,19 @@ STAGE PLANS: Select Operator expressions: _col0 (type: int) outputColumnNames: val - Statistics: Num rows: 1 Data size: 100 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 1 Data size: 4 Basic stats: COMPLETE Column stats: COMPLETE Group By Operator aggregations: compute_stats(val, 'hll') mode: complete outputColumnNames: _col0 - Statistics: Num rows: 1 Data size: 432 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 1 Data size: 424 Basic stats: COMPLETE Column stats: COMPLETE Select Operator expressions: _col0 (type: struct) outputColumnNames: _col0 - Statistics: Num rows: 1 Data size: 432 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 1 Data size: 424 Basic stats: COMPLETE Column stats: COMPLETE File Output Operator compressed: false - Statistics: Num rows: 1 Data size: 432 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 1 Data size: 424 Basic stats: COMPLETE Column stats: COMPLETE table: input format: org.apache.hadoop.mapred.SequenceFileInputFormat output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat @@ -2740,10 +2740,10 @@ STAGE PLANS: Select Operator expressions: VALUE._col0 (type: int), VALUE._col1 (type: string), null (type: string) outputColumnNames: _col0, _col1, _col2 - Statistics: Num rows: 1 Data size: 100 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 1 Data size: 175 Basic stats: COMPLETE Column stats: COMPLETE File Output Operator compressed: false - Statistics: Num rows: 1 Data size: 100 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 1 Data size: 175 Basic stats: COMPLETE Column stats: COMPLETE table: input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat output format: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat @@ -2753,15 +2753,15 @@ STAGE PLANS: Select Operator expressions: _col0 (type: int), _col1 (type: string), null (type: string) outputColumnNames: key, a1, value - Statistics: Num rows: 1 Data size: 100 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 1 Data size: 175 Basic stats: COMPLETE Column stats: COMPLETE Group By Operator aggregations: compute_stats(key, 'hll'), compute_stats(a1, 'hll'), compute_stats(value, 'hll') mode: hash outputColumnNames: _col0, _col1, _col2 - Statistics: Num rows: 1 Data size: 1304 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 1 Data size: 1304 Basic stats: COMPLETE Column stats: COMPLETE Reduce Output Operator sort order: - Statistics: Num rows: 1 Data size: 1304 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 1 Data size: 1304 Basic stats: COMPLETE Column stats: COMPLETE value expressions: _col0 (type: struct), _col1 (type: struct), _col2 (type: struct) Reducer 7 Execution mode: llap @@ -2770,10 +2770,10 @@ STAGE PLANS: aggregations: compute_stats(VALUE._col0), compute_stats(VALUE._col1), compute_stats(VALUE._col2) mode: mergepartial outputColumnNames: _col0, _col1, _col2 - Statistics: Num rows: 1 Data size: 1320 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 1 Data size: 1320 Basic stats: COMPLETE Column stats: COMPLETE File Output Operator compressed: false - Statistics: Num rows: 1 Data size: 1320 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 1 Data size: 1320 Basic stats: COMPLETE Column stats: COMPLETE table: input format: org.apache.hadoop.mapred.SequenceFileInputFormat output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat diff --git ql/src/test/results/clientpositive/llap/insert_values_orig_table_use_metadata.q.out ql/src/test/results/clientpositive/llap/insert_values_orig_table_use_metadata.q.out index 8d432a9d8e..bd88883782 100644 --- ql/src/test/results/clientpositive/llap/insert_values_orig_table_use_metadata.q.out +++ ql/src/test/results/clientpositive/llap/insert_values_orig_table_use_metadata.q.out @@ -168,6 +168,7 @@ Retention: 0 #### A masked pattern was here #### Table Type: MANAGED_TABLE Table Parameters: + COLUMN_STATS_ACCURATE {\"BASIC_STATS\":\"true\",\"COLUMN_STATS\":{\"cbigint\":\"true\",\"cboolean1\":\"true\",\"cboolean2\":\"true\",\"cdouble\":\"true\",\"cfloat\":\"true\",\"cint\":\"true\",\"csmallint\":\"true\",\"cstring1\":\"true\",\"cstring2\":\"true\",\"ctimestamp1\":\"true\",\"ctimestamp2\":\"true\",\"ctinyint\":\"true\"}} bucketing_version 2 numFiles 1 numRows 12288 @@ -192,54 +193,12 @@ PREHOOK: type: QUERY POSTHOOK: query: explain select count(*) from acid_ivot POSTHOOK: type: QUERY STAGE DEPENDENCIES: - Stage-1 is a root stage - Stage-0 depends on stages: Stage-1 + Stage-0 is a root stage STAGE PLANS: - Stage: Stage-1 - Tez -#### A masked pattern was here #### - Edges: - Reducer 2 <- Map 1 (CUSTOM_SIMPLE_EDGE) -#### A masked pattern was here #### - Vertices: - Map 1 - Map Operator Tree: - TableScan - alias: acid_ivot - Statistics: Num rows: 12288 Data size: 2956380 Basic stats: COMPLETE Column stats: COMPLETE - Select Operator - Statistics: Num rows: 12288 Data size: 2956380 Basic stats: COMPLETE Column stats: COMPLETE - Group By Operator - aggregations: count() - mode: hash - outputColumnNames: _col0 - Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE - Reduce Output Operator - sort order: - Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE - value expressions: _col0 (type: bigint) - Execution mode: vectorized, llap - LLAP IO: may be used (ACID table) - Reducer 2 - Execution mode: vectorized, llap - Reduce Operator Tree: - Group By Operator - aggregations: count(VALUE._col0) - mode: mergepartial - outputColumnNames: _col0 - Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE - File Output Operator - compressed: false - Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE - table: - input format: org.apache.hadoop.mapred.SequenceFileInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - Stage: Stage-0 Fetch Operator - limit: -1 + limit: 1 Processor Tree: ListSink @@ -376,6 +335,7 @@ Retention: 0 #### A masked pattern was here #### Table Type: MANAGED_TABLE Table Parameters: + COLUMN_STATS_ACCURATE {\"BASIC_STATS\":\"true\",\"COLUMN_STATS\":{\"cbigint\":\"true\",\"cboolean1\":\"true\",\"cboolean2\":\"true\",\"cdouble\":\"true\",\"cfloat\":\"true\",\"cint\":\"true\",\"csmallint\":\"true\",\"cstring1\":\"true\",\"cstring2\":\"true\",\"ctimestamp1\":\"true\",\"ctimestamp2\":\"true\",\"ctinyint\":\"true\"}} bucketing_version 2 numFiles 1 numRows 2 @@ -400,54 +360,12 @@ PREHOOK: type: QUERY POSTHOOK: query: explain select count(*) from acid_ivot POSTHOOK: type: QUERY STAGE DEPENDENCIES: - Stage-1 is a root stage - Stage-0 depends on stages: Stage-1 + Stage-0 is a root stage STAGE PLANS: - Stage: Stage-1 - Tez -#### A masked pattern was here #### - Edges: - Reducer 2 <- Map 1 (CUSTOM_SIMPLE_EDGE) -#### A masked pattern was here #### - Vertices: - Map 1 - Map Operator Tree: - TableScan - alias: acid_ivot - Statistics: Num rows: 2 Data size: 16520 Basic stats: COMPLETE Column stats: COMPLETE - Select Operator - Statistics: Num rows: 2 Data size: 16520 Basic stats: COMPLETE Column stats: COMPLETE - Group By Operator - aggregations: count() - mode: hash - outputColumnNames: _col0 - Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE - Reduce Output Operator - sort order: - Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE - value expressions: _col0 (type: bigint) - Execution mode: vectorized, llap - LLAP IO: may be used (ACID table) - Reducer 2 - Execution mode: vectorized, llap - Reduce Operator Tree: - Group By Operator - aggregations: count(VALUE._col0) - mode: mergepartial - outputColumnNames: _col0 - Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE - File Output Operator - compressed: false - Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE - table: - input format: org.apache.hadoop.mapred.SequenceFileInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - Stage: Stage-0 Fetch Operator - limit: -1 + limit: 1 Processor Tree: ListSink @@ -511,6 +429,7 @@ Retention: 0 #### A masked pattern was here #### Table Type: MANAGED_TABLE Table Parameters: + COLUMN_STATS_ACCURATE {\"BASIC_STATS\":\"true\",\"COLUMN_STATS\":{\"cbigint\":\"true\",\"cboolean1\":\"true\",\"cboolean2\":\"true\",\"cdouble\":\"true\",\"cfloat\":\"true\",\"cint\":\"true\",\"csmallint\":\"true\",\"cstring1\":\"true\",\"cstring2\":\"true\",\"ctimestamp1\":\"true\",\"ctimestamp2\":\"true\",\"ctinyint\":\"true\"}} bucketing_version 2 numFiles 2 numRows 4 @@ -535,54 +454,12 @@ PREHOOK: type: QUERY POSTHOOK: query: explain select count(*) from acid_ivot POSTHOOK: type: QUERY STAGE DEPENDENCIES: - Stage-1 is a root stage - Stage-0 depends on stages: Stage-1 + Stage-0 is a root stage STAGE PLANS: - Stage: Stage-1 - Tez -#### A masked pattern was here #### - Edges: - Reducer 2 <- Map 1 (CUSTOM_SIMPLE_EDGE) -#### A masked pattern was here #### - Vertices: - Map 1 - Map Operator Tree: - TableScan - alias: acid_ivot - Statistics: Num rows: 4 Data size: 33040 Basic stats: COMPLETE Column stats: COMPLETE - Select Operator - Statistics: Num rows: 4 Data size: 33040 Basic stats: COMPLETE Column stats: COMPLETE - Group By Operator - aggregations: count() - mode: hash - outputColumnNames: _col0 - Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE - Reduce Output Operator - sort order: - Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE - value expressions: _col0 (type: bigint) - Execution mode: vectorized, llap - LLAP IO: may be used (ACID table) - Reducer 2 - Execution mode: vectorized, llap - Reduce Operator Tree: - Group By Operator - aggregations: count(VALUE._col0) - mode: mergepartial - outputColumnNames: _col0 - Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE - File Output Operator - compressed: false - Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE - table: - input format: org.apache.hadoop.mapred.SequenceFileInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - Stage: Stage-0 Fetch Operator - limit: -1 + limit: 1 Processor Tree: ListSink @@ -642,6 +519,7 @@ Retention: 0 #### A masked pattern was here #### Table Type: MANAGED_TABLE Table Parameters: + COLUMN_STATS_ACCURATE {\"BASIC_STATS\":\"true\",\"COLUMN_STATS\":{\"cbigint\":\"true\",\"cboolean1\":\"true\",\"cboolean2\":\"true\",\"cdouble\":\"true\",\"cfloat\":\"true\",\"cint\":\"true\",\"csmallint\":\"true\",\"cstring1\":\"true\",\"cstring2\":\"true\",\"ctimestamp1\":\"true\",\"ctimestamp2\":\"true\",\"ctinyint\":\"true\"}} bucketing_version 2 numFiles 3 numRows 12292 @@ -666,54 +544,12 @@ PREHOOK: type: QUERY POSTHOOK: query: explain select count(*) from acid_ivot POSTHOOK: type: QUERY STAGE DEPENDENCIES: - Stage-1 is a root stage - Stage-0 depends on stages: Stage-1 + Stage-0 is a root stage STAGE PLANS: - Stage: Stage-1 - Tez -#### A masked pattern was here #### - Edges: - Reducer 2 <- Map 1 (CUSTOM_SIMPLE_EDGE) -#### A masked pattern was here #### - Vertices: - Map 1 - Map Operator Tree: - TableScan - alias: acid_ivot - Statistics: Num rows: 12292 Data size: 2989430 Basic stats: COMPLETE Column stats: COMPLETE - Select Operator - Statistics: Num rows: 12292 Data size: 2989430 Basic stats: COMPLETE Column stats: COMPLETE - Group By Operator - aggregations: count() - mode: hash - outputColumnNames: _col0 - Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE - Reduce Output Operator - sort order: - Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE - value expressions: _col0 (type: bigint) - Execution mode: vectorized, llap - LLAP IO: may be used (ACID table) - Reducer 2 - Execution mode: vectorized, llap - Reduce Operator Tree: - Group By Operator - aggregations: count(VALUE._col0) - mode: mergepartial - outputColumnNames: _col0 - Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE - File Output Operator - compressed: false - Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE - table: - input format: org.apache.hadoop.mapred.SequenceFileInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - Stage: Stage-0 Fetch Operator - limit: -1 + limit: 1 Processor Tree: ListSink diff --git ql/src/test/results/clientpositive/llap/materialized_view_create_rewrite_4.q.out ql/src/test/results/clientpositive/llap/materialized_view_create_rewrite_4.q.out index eda3985d0a..a80463785d 100644 --- ql/src/test/results/clientpositive/llap/materialized_view_create_rewrite_4.q.out +++ ql/src/test/results/clientpositive/llap/materialized_view_create_rewrite_4.q.out @@ -945,6 +945,7 @@ Retention: 0 #### A masked pattern was here #### Table Type: MATERIALIZED_VIEW Table Parameters: + COLUMN_STATS_ACCURATE {\"BASIC_STATS\":\"true\"} bucketing_version 2 numFiles 3 numRows 3 diff --git ql/src/test/results/clientpositive/llap/materialized_view_create_rewrite_5.q.out ql/src/test/results/clientpositive/llap/materialized_view_create_rewrite_5.q.out index 2c4ee3d79a..fae47575b5 100644 --- ql/src/test/results/clientpositive/llap/materialized_view_create_rewrite_5.q.out +++ ql/src/test/results/clientpositive/llap/materialized_view_create_rewrite_5.q.out @@ -402,6 +402,7 @@ Retention: 0 #### A masked pattern was here #### Table Type: MATERIALIZED_VIEW Table Parameters: + COLUMN_STATS_ACCURATE {\"BASIC_STATS\":\"true\",\"COLUMN_STATS\":{\"a\":\"true\",\"c\":\"true\"}} bucketing_version 2 numFiles 2 numRows 5 diff --git ql/src/test/results/clientpositive/llap/mm_all.q.out ql/src/test/results/clientpositive/llap/mm_all.q.out index 95734b6b4f..500c7fa71f 100644 --- ql/src/test/results/clientpositive/llap/mm_all.q.out +++ ql/src/test/results/clientpositive/llap/mm_all.q.out @@ -1815,6 +1815,7 @@ Retention: 0 #### A masked pattern was here #### Table Type: MANAGED_TABLE Table Parameters: + COLUMN_STATS_ACCURATE {\"BASIC_STATS\":\"true\",\"COLUMN_STATS\":{\"key\":\"true\"}} bucketing_version 2 numFiles 3 numRows 6 @@ -1865,6 +1866,7 @@ Retention: 0 #### A masked pattern was here #### Table Type: MANAGED_TABLE Table Parameters: + COLUMN_STATS_ACCURATE {\"BASIC_STATS\":\"true\",\"COLUMN_STATS\":{\"key\":\"true\"}} bucketing_version 2 numFiles 6 numRows 12 @@ -1923,7 +1925,7 @@ Retention: 0 #### A masked pattern was here #### Table Type: MANAGED_TABLE Table Parameters: - COLUMN_STATS_ACCURATE {} + COLUMN_STATS_ACCURATE {\"BASIC_STATS\":\"true\"} bucketing_version 2 numFiles 55 numRows 500 diff --git ql/src/test/results/clientpositive/llap/mm_exim.q.out ql/src/test/results/clientpositive/llap/mm_exim.q.out index 37d3952d37..ee6cf06ea8 100644 --- ql/src/test/results/clientpositive/llap/mm_exim.q.out +++ ql/src/test/results/clientpositive/llap/mm_exim.q.out @@ -386,6 +386,7 @@ Retention: 0 #### A masked pattern was here #### Table Type: MANAGED_TABLE Table Parameters: + COLUMN_STATS_ACCURATE {\"BASIC_STATS\":\"true\"} bucketing_version 2 numFiles 3 numPartitions 3 diff --git ql/src/test/results/clientpositive/llap/results_cache_invalidation.q.out ql/src/test/results/clientpositive/llap/results_cache_invalidation.q.out index 7d96f5b206..d4b55bbf90 100644 --- ql/src/test/results/clientpositive/llap/results_cache_invalidation.q.out +++ ql/src/test/results/clientpositive/llap/results_cache_invalidation.q.out @@ -58,20 +58,20 @@ STAGE PLANS: TableScan alias: a filterExpr: (UDFToDouble(key) >= 0.0D) (type: boolean) - Statistics: Num rows: 500 Data size: 87584 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 500 Data size: 43500 Basic stats: COMPLETE Column stats: COMPLETE Filter Operator predicate: (UDFToDouble(key) >= 0.0D) (type: boolean) - Statistics: Num rows: 166 Data size: 29077 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 166 Data size: 14442 Basic stats: COMPLETE Column stats: COMPLETE Select Operator - Statistics: Num rows: 166 Data size: 29077 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 166 Data size: 14442 Basic stats: COMPLETE Column stats: COMPLETE Group By Operator aggregations: count() mode: hash outputColumnNames: _col0 - Statistics: Num rows: 1 Data size: 192 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE Reduce Output Operator sort order: - Statistics: Num rows: 1 Data size: 192 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE value expressions: _col0 (type: bigint) Execution mode: vectorized, llap LLAP IO: may be used (ACID table) @@ -82,10 +82,10 @@ STAGE PLANS: aggregations: count(VALUE._col0) mode: mergepartial outputColumnNames: _col0 - Statistics: Num rows: 1 Data size: 192 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE File Output Operator compressed: false - Statistics: Num rows: 1 Data size: 192 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE table: input format: org.apache.hadoop.mapred.SequenceFileInputFormat output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat @@ -128,19 +128,19 @@ STAGE PLANS: Map Operator Tree: TableScan alias: tab2_n5 - Statistics: Num rows: 500 Data size: 87584 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 500 Data size: 43500 Basic stats: COMPLETE Column stats: COMPLETE Select Operator expressions: key (type: string) outputColumnNames: key - Statistics: Num rows: 500 Data size: 87584 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 500 Data size: 43500 Basic stats: COMPLETE Column stats: COMPLETE Group By Operator aggregations: max(key) mode: hash outputColumnNames: _col0 - Statistics: Num rows: 1 Data size: 368 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 1 Data size: 184 Basic stats: COMPLETE Column stats: COMPLETE Reduce Output Operator sort order: - Statistics: Num rows: 1 Data size: 368 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 1 Data size: 184 Basic stats: COMPLETE Column stats: COMPLETE value expressions: _col0 (type: string) Execution mode: vectorized, llap LLAP IO: may be used (ACID table) @@ -151,10 +151,10 @@ STAGE PLANS: aggregations: max(VALUE._col0) mode: mergepartial outputColumnNames: _col0 - Statistics: Num rows: 1 Data size: 368 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 1 Data size: 184 Basic stats: COMPLETE Column stats: COMPLETE File Output Operator compressed: false - Statistics: Num rows: 1 Data size: 368 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 1 Data size: 184 Basic stats: COMPLETE Column stats: COMPLETE table: input format: org.apache.hadoop.mapred.SequenceFileInputFormat output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat @@ -199,19 +199,19 @@ STAGE PLANS: TableScan alias: tab1_n6 filterExpr: key is not null (type: boolean) - Statistics: Num rows: 500 Data size: 87584 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 500 Data size: 43500 Basic stats: COMPLETE Column stats: COMPLETE Filter Operator predicate: key is not null (type: boolean) - Statistics: Num rows: 475 Data size: 83204 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 500 Data size: 43500 Basic stats: COMPLETE Column stats: COMPLETE Select Operator expressions: key (type: string) outputColumnNames: _col0 - Statistics: Num rows: 475 Data size: 83204 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 500 Data size: 43500 Basic stats: COMPLETE Column stats: COMPLETE Reduce Output Operator key expressions: _col0 (type: string) sort order: + Map-reduce partition columns: _col0 (type: string) - Statistics: Num rows: 475 Data size: 83204 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 500 Data size: 43500 Basic stats: COMPLETE Column stats: COMPLETE Execution mode: vectorized, llap LLAP IO: may be used (ACID table) Map 4 @@ -219,19 +219,19 @@ STAGE PLANS: TableScan alias: tab2_n5 filterExpr: key is not null (type: boolean) - Statistics: Num rows: 500 Data size: 87584 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 500 Data size: 43500 Basic stats: COMPLETE Column stats: COMPLETE Filter Operator predicate: key is not null (type: boolean) - Statistics: Num rows: 475 Data size: 83204 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 500 Data size: 43500 Basic stats: COMPLETE Column stats: COMPLETE Select Operator expressions: key (type: string) outputColumnNames: _col0 - Statistics: Num rows: 475 Data size: 83204 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 500 Data size: 43500 Basic stats: COMPLETE Column stats: COMPLETE Reduce Output Operator key expressions: _col0 (type: string) sort order: + Map-reduce partition columns: _col0 (type: string) - Statistics: Num rows: 475 Data size: 83204 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 500 Data size: 43500 Basic stats: COMPLETE Column stats: COMPLETE Execution mode: vectorized, llap LLAP IO: may be used (ACID table) Reducer 2 @@ -243,15 +243,15 @@ STAGE PLANS: keys: 0 _col0 (type: string) 1 _col0 (type: string) - Statistics: Num rows: 522 Data size: 91524 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 791 Data size: 6328 Basic stats: COMPLETE Column stats: COMPLETE Group By Operator aggregations: count() mode: hash outputColumnNames: _col0 - Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE Reduce Output Operator sort order: - Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE value expressions: _col0 (type: bigint) Reducer 3 Execution mode: vectorized, llap @@ -260,10 +260,10 @@ STAGE PLANS: aggregations: count(VALUE._col0) mode: mergepartial outputColumnNames: _col0 - Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE File Output Operator compressed: false - Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE table: input format: org.apache.hadoop.mapred.SequenceFileInputFormat output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat @@ -397,20 +397,20 @@ STAGE PLANS: TableScan alias: a filterExpr: (UDFToDouble(key) >= 0.0D) (type: boolean) - Statistics: Num rows: 501 Data size: 87768 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 501 Data size: 43587 Basic stats: COMPLETE Column stats: COMPLETE Filter Operator predicate: (UDFToDouble(key) >= 0.0D) (type: boolean) - Statistics: Num rows: 167 Data size: 29256 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 167 Data size: 14529 Basic stats: COMPLETE Column stats: COMPLETE Select Operator - Statistics: Num rows: 167 Data size: 29256 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 167 Data size: 14529 Basic stats: COMPLETE Column stats: COMPLETE Group By Operator aggregations: count() mode: hash outputColumnNames: _col0 - Statistics: Num rows: 1 Data size: 192 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE Reduce Output Operator sort order: - Statistics: Num rows: 1 Data size: 192 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE value expressions: _col0 (type: bigint) Execution mode: vectorized, llap LLAP IO: may be used (ACID table) @@ -421,10 +421,10 @@ STAGE PLANS: aggregations: count(VALUE._col0) mode: mergepartial outputColumnNames: _col0 - Statistics: Num rows: 1 Data size: 192 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE File Output Operator compressed: false - Statistics: Num rows: 1 Data size: 192 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE table: input format: org.apache.hadoop.mapred.SequenceFileInputFormat output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat @@ -495,19 +495,19 @@ STAGE PLANS: TableScan alias: tab1_n6 filterExpr: key is not null (type: boolean) - Statistics: Num rows: 501 Data size: 87768 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 501 Data size: 43587 Basic stats: COMPLETE Column stats: COMPLETE Filter Operator predicate: key is not null (type: boolean) - Statistics: Num rows: 476 Data size: 83388 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 501 Data size: 43587 Basic stats: COMPLETE Column stats: COMPLETE Select Operator expressions: key (type: string) outputColumnNames: _col0 - Statistics: Num rows: 476 Data size: 83388 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 501 Data size: 43587 Basic stats: COMPLETE Column stats: COMPLETE Reduce Output Operator key expressions: _col0 (type: string) sort order: + Map-reduce partition columns: _col0 (type: string) - Statistics: Num rows: 476 Data size: 83388 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 501 Data size: 43587 Basic stats: COMPLETE Column stats: COMPLETE Execution mode: vectorized, llap LLAP IO: may be used (ACID table) Map 4 @@ -515,19 +515,19 @@ STAGE PLANS: TableScan alias: tab2_n5 filterExpr: key is not null (type: boolean) - Statistics: Num rows: 500 Data size: 87584 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 500 Data size: 43500 Basic stats: COMPLETE Column stats: COMPLETE Filter Operator predicate: key is not null (type: boolean) - Statistics: Num rows: 475 Data size: 83204 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 500 Data size: 43500 Basic stats: COMPLETE Column stats: COMPLETE Select Operator expressions: key (type: string) outputColumnNames: _col0 - Statistics: Num rows: 475 Data size: 83204 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 500 Data size: 43500 Basic stats: COMPLETE Column stats: COMPLETE Reduce Output Operator key expressions: _col0 (type: string) sort order: + Map-reduce partition columns: _col0 (type: string) - Statistics: Num rows: 475 Data size: 83204 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 500 Data size: 43500 Basic stats: COMPLETE Column stats: COMPLETE Execution mode: vectorized, llap LLAP IO: may be used (ACID table) Reducer 2 @@ -539,15 +539,15 @@ STAGE PLANS: keys: 0 _col0 (type: string) 1 _col0 (type: string) - Statistics: Num rows: 523 Data size: 91726 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 792 Data size: 6336 Basic stats: COMPLETE Column stats: COMPLETE Group By Operator aggregations: count() mode: hash outputColumnNames: _col0 - Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE Reduce Output Operator sort order: - Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE value expressions: _col0 (type: bigint) Reducer 3 Execution mode: vectorized, llap @@ -556,10 +556,10 @@ STAGE PLANS: aggregations: count(VALUE._col0) mode: mergepartial outputColumnNames: _col0 - Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE File Output Operator compressed: false - Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE table: input format: org.apache.hadoop.mapred.SequenceFileInputFormat output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat @@ -640,19 +640,19 @@ STAGE PLANS: Map Operator Tree: TableScan alias: tab2_n5 - Statistics: Num rows: 501 Data size: 87768 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 501 Data size: 43587 Basic stats: COMPLETE Column stats: COMPLETE Select Operator expressions: key (type: string) outputColumnNames: key - Statistics: Num rows: 501 Data size: 87768 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 501 Data size: 43587 Basic stats: COMPLETE Column stats: COMPLETE Group By Operator aggregations: max(key) mode: hash outputColumnNames: _col0 - Statistics: Num rows: 1 Data size: 368 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 1 Data size: 184 Basic stats: COMPLETE Column stats: COMPLETE Reduce Output Operator sort order: - Statistics: Num rows: 1 Data size: 368 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 1 Data size: 184 Basic stats: COMPLETE Column stats: COMPLETE value expressions: _col0 (type: string) Execution mode: vectorized, llap LLAP IO: may be used (ACID table) @@ -663,10 +663,10 @@ STAGE PLANS: aggregations: max(VALUE._col0) mode: mergepartial outputColumnNames: _col0 - Statistics: Num rows: 1 Data size: 368 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 1 Data size: 184 Basic stats: COMPLETE Column stats: COMPLETE File Output Operator compressed: false - Statistics: Num rows: 1 Data size: 368 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 1 Data size: 184 Basic stats: COMPLETE Column stats: COMPLETE table: input format: org.apache.hadoop.mapred.SequenceFileInputFormat output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat @@ -712,19 +712,19 @@ STAGE PLANS: TableScan alias: tab1_n6 filterExpr: key is not null (type: boolean) - Statistics: Num rows: 501 Data size: 87768 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 501 Data size: 43587 Basic stats: COMPLETE Column stats: COMPLETE Filter Operator predicate: key is not null (type: boolean) - Statistics: Num rows: 476 Data size: 83388 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 501 Data size: 43587 Basic stats: COMPLETE Column stats: COMPLETE Select Operator expressions: key (type: string) outputColumnNames: _col0 - Statistics: Num rows: 476 Data size: 83388 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 501 Data size: 43587 Basic stats: COMPLETE Column stats: COMPLETE Reduce Output Operator key expressions: _col0 (type: string) sort order: + Map-reduce partition columns: _col0 (type: string) - Statistics: Num rows: 476 Data size: 83388 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 501 Data size: 43587 Basic stats: COMPLETE Column stats: COMPLETE Execution mode: vectorized, llap LLAP IO: may be used (ACID table) Map 4 @@ -732,19 +732,19 @@ STAGE PLANS: TableScan alias: tab2_n5 filterExpr: key is not null (type: boolean) - Statistics: Num rows: 501 Data size: 87768 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 501 Data size: 43587 Basic stats: COMPLETE Column stats: COMPLETE Filter Operator predicate: key is not null (type: boolean) - Statistics: Num rows: 476 Data size: 83388 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 501 Data size: 43587 Basic stats: COMPLETE Column stats: COMPLETE Select Operator expressions: key (type: string) outputColumnNames: _col0 - Statistics: Num rows: 476 Data size: 83388 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 501 Data size: 43587 Basic stats: COMPLETE Column stats: COMPLETE Reduce Output Operator key expressions: _col0 (type: string) sort order: + Map-reduce partition columns: _col0 (type: string) - Statistics: Num rows: 476 Data size: 83388 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 501 Data size: 43587 Basic stats: COMPLETE Column stats: COMPLETE Execution mode: vectorized, llap LLAP IO: may be used (ACID table) Reducer 2 @@ -756,15 +756,15 @@ STAGE PLANS: keys: 0 _col0 (type: string) 1 _col0 (type: string) - Statistics: Num rows: 523 Data size: 91726 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 794 Data size: 6352 Basic stats: COMPLETE Column stats: COMPLETE Group By Operator aggregations: count() mode: hash outputColumnNames: _col0 - Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE Reduce Output Operator sort order: - Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE value expressions: _col0 (type: bigint) Reducer 3 Execution mode: vectorized, llap @@ -773,10 +773,10 @@ STAGE PLANS: aggregations: count(VALUE._col0) mode: mergepartial outputColumnNames: _col0 - Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE File Output Operator compressed: false - Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE table: input format: org.apache.hadoop.mapred.SequenceFileInputFormat output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat diff --git ql/src/test/results/clientpositive/llap/results_cache_transactional.q.out ql/src/test/results/clientpositive/llap/results_cache_transactional.q.out index a01190c433..b57730a2f3 100644 --- ql/src/test/results/clientpositive/llap/results_cache_transactional.q.out +++ ql/src/test/results/clientpositive/llap/results_cache_transactional.q.out @@ -56,19 +56,19 @@ STAGE PLANS: Map Operator Tree: TableScan alias: tab1_n1 - Statistics: Num rows: 500 Data size: 87584 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 500 Data size: 43500 Basic stats: COMPLETE Column stats: COMPLETE Select Operator expressions: key (type: string) outputColumnNames: key - Statistics: Num rows: 500 Data size: 87584 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 500 Data size: 43500 Basic stats: COMPLETE Column stats: COMPLETE Group By Operator aggregations: max(key) mode: hash outputColumnNames: _col0 - Statistics: Num rows: 1 Data size: 368 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 1 Data size: 184 Basic stats: COMPLETE Column stats: COMPLETE Reduce Output Operator sort order: - Statistics: Num rows: 1 Data size: 368 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 1 Data size: 184 Basic stats: COMPLETE Column stats: COMPLETE value expressions: _col0 (type: string) Execution mode: vectorized, llap LLAP IO: may be used (ACID table) @@ -79,10 +79,10 @@ STAGE PLANS: aggregations: max(VALUE._col0) mode: mergepartial outputColumnNames: _col0 - Statistics: Num rows: 1 Data size: 368 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 1 Data size: 184 Basic stats: COMPLETE Column stats: COMPLETE File Output Operator compressed: false - Statistics: Num rows: 1 Data size: 368 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 1 Data size: 184 Basic stats: COMPLETE Column stats: COMPLETE table: input format: org.apache.hadoop.mapred.SequenceFileInputFormat output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat @@ -152,19 +152,19 @@ STAGE PLANS: TableScan alias: tab1_n1 filterExpr: key is not null (type: boolean) - Statistics: Num rows: 500 Data size: 87584 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 500 Data size: 43500 Basic stats: COMPLETE Column stats: COMPLETE Filter Operator predicate: key is not null (type: boolean) - Statistics: Num rows: 475 Data size: 83204 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 500 Data size: 43500 Basic stats: COMPLETE Column stats: COMPLETE Select Operator expressions: key (type: string) outputColumnNames: _col0 - Statistics: Num rows: 475 Data size: 83204 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 500 Data size: 43500 Basic stats: COMPLETE Column stats: COMPLETE Reduce Output Operator key expressions: _col0 (type: string) sort order: + Map-reduce partition columns: _col0 (type: string) - Statistics: Num rows: 475 Data size: 83204 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 500 Data size: 43500 Basic stats: COMPLETE Column stats: COMPLETE Execution mode: vectorized, llap LLAP IO: may be used (ACID table) Map 4 @@ -172,19 +172,19 @@ STAGE PLANS: TableScan alias: tab2_n1 filterExpr: key is not null (type: boolean) - Statistics: Num rows: 500 Data size: 87584 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 500 Data size: 43500 Basic stats: COMPLETE Column stats: COMPLETE Filter Operator predicate: key is not null (type: boolean) - Statistics: Num rows: 475 Data size: 83204 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 500 Data size: 43500 Basic stats: COMPLETE Column stats: COMPLETE Select Operator expressions: key (type: string) outputColumnNames: _col0 - Statistics: Num rows: 475 Data size: 83204 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 500 Data size: 43500 Basic stats: COMPLETE Column stats: COMPLETE Reduce Output Operator key expressions: _col0 (type: string) sort order: + Map-reduce partition columns: _col0 (type: string) - Statistics: Num rows: 475 Data size: 83204 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 500 Data size: 43500 Basic stats: COMPLETE Column stats: COMPLETE Execution mode: vectorized, llap LLAP IO: may be used (ACID table) Reducer 2 @@ -196,15 +196,15 @@ STAGE PLANS: keys: 0 _col0 (type: string) 1 _col0 (type: string) - Statistics: Num rows: 522 Data size: 91524 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 791 Data size: 6328 Basic stats: COMPLETE Column stats: COMPLETE Group By Operator aggregations: count() mode: hash outputColumnNames: _col0 - Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE Reduce Output Operator sort order: - Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE value expressions: _col0 (type: bigint) Reducer 3 Execution mode: vectorized, llap @@ -213,10 +213,10 @@ STAGE PLANS: aggregations: count(VALUE._col0) mode: mergepartial outputColumnNames: _col0 - Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE File Output Operator compressed: false - Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE table: input format: org.apache.hadoop.mapred.SequenceFileInputFormat output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat @@ -429,19 +429,19 @@ STAGE PLANS: TableScan alias: tab1_n1 filterExpr: key is not null (type: boolean) - Statistics: Num rows: 500 Data size: 87584 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 500 Data size: 43500 Basic stats: COMPLETE Column stats: COMPLETE Filter Operator predicate: key is not null (type: boolean) - Statistics: Num rows: 475 Data size: 83204 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 500 Data size: 43500 Basic stats: COMPLETE Column stats: COMPLETE Select Operator expressions: key (type: string) outputColumnNames: _col0 - Statistics: Num rows: 475 Data size: 83204 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 500 Data size: 43500 Basic stats: COMPLETE Column stats: COMPLETE Reduce Output Operator key expressions: _col0 (type: string) sort order: + Map-reduce partition columns: _col0 (type: string) - Statistics: Num rows: 475 Data size: 83204 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 500 Data size: 43500 Basic stats: COMPLETE Column stats: COMPLETE Execution mode: vectorized, llap LLAP IO: may be used (ACID table) Map 4 @@ -473,15 +473,15 @@ STAGE PLANS: keys: 0 _col0 (type: string) 1 _col0 (type: string) - Statistics: Num rows: 550 Data size: 47850 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 791 Data size: 6328 Basic stats: COMPLETE Column stats: COMPLETE Group By Operator aggregations: count() mode: hash outputColumnNames: _col0 - Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE Reduce Output Operator sort order: - Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE value expressions: _col0 (type: bigint) Reducer 3 Execution mode: vectorized, llap @@ -490,10 +490,10 @@ STAGE PLANS: aggregations: count(VALUE._col0) mode: mergepartial outputColumnNames: _col0 - Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE File Output Operator compressed: false - Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE table: input format: org.apache.hadoop.mapred.SequenceFileInputFormat output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat @@ -541,19 +541,19 @@ STAGE PLANS: TableScan alias: tab1_n1 filterExpr: key is not null (type: boolean) - Statistics: Num rows: 500 Data size: 87584 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 500 Data size: 43500 Basic stats: COMPLETE Column stats: COMPLETE Filter Operator predicate: key is not null (type: boolean) - Statistics: Num rows: 475 Data size: 83204 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 500 Data size: 43500 Basic stats: COMPLETE Column stats: COMPLETE Select Operator expressions: key (type: string) outputColumnNames: _col0 - Statistics: Num rows: 475 Data size: 83204 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 500 Data size: 43500 Basic stats: COMPLETE Column stats: COMPLETE Reduce Output Operator key expressions: _col0 (type: string) sort order: + Map-reduce partition columns: _col0 (type: string) - Statistics: Num rows: 475 Data size: 83204 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 500 Data size: 43500 Basic stats: COMPLETE Column stats: COMPLETE Execution mode: vectorized, llap LLAP IO: may be used (ACID table) Map 4 @@ -585,15 +585,15 @@ STAGE PLANS: keys: 0 _col0 (type: string) 1 _col0 (type: string) - Statistics: Num rows: 550 Data size: 47850 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 791 Data size: 6328 Basic stats: COMPLETE Column stats: COMPLETE Group By Operator aggregations: count() mode: hash outputColumnNames: _col0 - Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE Reduce Output Operator sort order: - Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE value expressions: _col0 (type: bigint) Reducer 3 Execution mode: vectorized, llap @@ -602,10 +602,10 @@ STAGE PLANS: aggregations: count(VALUE._col0) mode: mergepartial outputColumnNames: _col0 - Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE File Output Operator compressed: false - Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE table: input format: org.apache.hadoop.mapred.SequenceFileInputFormat output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat diff --git ql/src/test/results/clientpositive/mm_all.q.out ql/src/test/results/clientpositive/mm_all.q.out index e7df4c0a29..e5428bbe07 100644 --- ql/src/test/results/clientpositive/mm_all.q.out +++ ql/src/test/results/clientpositive/mm_all.q.out @@ -1829,6 +1829,7 @@ Retention: 0 #### A masked pattern was here #### Table Type: MANAGED_TABLE Table Parameters: + COLUMN_STATS_ACCURATE {\"BASIC_STATS\":\"true\",\"COLUMN_STATS\":{\"key\":\"true\"}} bucketing_version 2 numFiles 1 numRows 6 @@ -1879,6 +1880,7 @@ Retention: 0 #### A masked pattern was here #### Table Type: MANAGED_TABLE Table Parameters: + COLUMN_STATS_ACCURATE {\"BASIC_STATS\":\"true\",\"COLUMN_STATS\":{\"key\":\"true\"}} bucketing_version 2 numFiles 2 numRows 12 @@ -1937,7 +1939,7 @@ Retention: 0 #### A masked pattern was here #### Table Type: MANAGED_TABLE Table Parameters: - COLUMN_STATS_ACCURATE {} + COLUMN_STATS_ACCURATE {\"BASIC_STATS\":\"true\"} bucketing_version 2 numFiles 1 numRows 500 diff --git ql/src/test/results/clientpositive/mm_default.q.out ql/src/test/results/clientpositive/mm_default.q.out index 4ba6aa5223..5a855544bb 100644 --- ql/src/test/results/clientpositive/mm_default.q.out +++ ql/src/test/results/clientpositive/mm_default.q.out @@ -180,7 +180,7 @@ Retention: 0 #### A masked pattern was here #### Table Type: MANAGED_TABLE Table Parameters: - COLUMN_STATS_ACCURATE {} + COLUMN_STATS_ACCURATE {\"BASIC_STATS\":\"true\"} bucketing_version 2 numFiles 1 numRows 1 diff --git ql/src/test/results/clientpositive/stats_nonpart.q.out ql/src/test/results/clientpositive/stats_nonpart.q.out new file mode 100644 index 0000000000..7df570a121 --- /dev/null +++ ql/src/test/results/clientpositive/stats_nonpart.q.out @@ -0,0 +1,332 @@ +PREHOOK: query: drop table if exists mysource +PREHOOK: type: DROPTABLE +POSTHOOK: query: drop table if exists mysource +POSTHOOK: type: DROPTABLE +PREHOOK: query: create table mysource (p int,key int) +PREHOOK: type: CREATETABLE +PREHOOK: Output: database:default +PREHOOK: Output: default@mysource +POSTHOOK: query: create table mysource (p int,key int) +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: database:default +POSTHOOK: Output: default@mysource +PREHOOK: query: insert into mysource values (100,20), (101,40), (102,50) +PREHOOK: type: QUERY +PREHOOK: Input: _dummy_database@_dummy_table +PREHOOK: Output: default@mysource +POSTHOOK: query: insert into mysource values (100,20), (101,40), (102,50) +POSTHOOK: type: QUERY +POSTHOOK: Input: _dummy_database@_dummy_table +POSTHOOK: Output: default@mysource +POSTHOOK: Lineage: mysource.key SCRIPT [] +POSTHOOK: Lineage: mysource.p SCRIPT [] +PREHOOK: query: insert into mysource values (100,30), (101,50), (102,60) +PREHOOK: type: QUERY +PREHOOK: Input: _dummy_database@_dummy_table +PREHOOK: Output: default@mysource +POSTHOOK: query: insert into mysource values (100,30), (101,50), (102,60) +POSTHOOK: type: QUERY +POSTHOOK: Input: _dummy_database@_dummy_table +POSTHOOK: Output: default@mysource +POSTHOOK: Lineage: mysource.key SCRIPT [] +POSTHOOK: Lineage: mysource.p SCRIPT [] +PREHOOK: query: drop table if exists stats_nonpartitioned +PREHOOK: type: DROPTABLE +POSTHOOK: query: drop table if exists stats_nonpartitioned +POSTHOOK: type: DROPTABLE +PREHOOK: query: create table stats_nonpartitioned(key int, value int) stored as orc tblproperties ("transactional"="true") +PREHOOK: type: CREATETABLE +PREHOOK: Output: database:default +PREHOOK: Output: default@stats_nonpartitioned +POSTHOOK: query: create table stats_nonpartitioned(key int, value int) stored as orc tblproperties ("transactional"="true") +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: database:default +POSTHOOK: Output: default@stats_nonpartitioned +PREHOOK: query: explain select count(*) from stats_nonpartitioned +PREHOOK: type: QUERY +POSTHOOK: query: explain select count(*) from stats_nonpartitioned +POSTHOOK: type: QUERY +STAGE DEPENDENCIES: + Stage-0 is a root stage + +STAGE PLANS: + Stage: Stage-0 + Fetch Operator + limit: 1 + Processor Tree: + ListSink + +PREHOOK: query: select count(*) from stats_nonpartitioned +PREHOOK: type: QUERY +PREHOOK: Input: default@stats_nonpartitioned +#### A masked pattern was here #### +POSTHOOK: query: select count(*) from stats_nonpartitioned +POSTHOOK: type: QUERY +POSTHOOK: Input: default@stats_nonpartitioned +#### A masked pattern was here #### +0 +PREHOOK: query: desc formatted stats_nonpartitioned +PREHOOK: type: DESCTABLE +PREHOOK: Input: default@stats_nonpartitioned +POSTHOOK: query: desc formatted stats_nonpartitioned +POSTHOOK: type: DESCTABLE +POSTHOOK: Input: default@stats_nonpartitioned +# col_name data_type comment +key int +value int + +# Detailed Table Information +Database: default +#### A masked pattern was here #### +Retention: 0 +#### A masked pattern was here #### +Table Type: MANAGED_TABLE +Table Parameters: + COLUMN_STATS_ACCURATE {\"BASIC_STATS\":\"true\",\"COLUMN_STATS\":{\"key\":\"true\",\"value\":\"true\"}} + bucketing_version 2 + numFiles 0 + numRows 0 + rawDataSize 0 + totalSize 0 + transactional true + transactional_properties default +#### A masked pattern was here #### + +# Storage Information +SerDe Library: org.apache.hadoop.hive.ql.io.orc.OrcSerde +InputFormat: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat +OutputFormat: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat +Compressed: No +Num Buckets: -1 +Bucket Columns: [] +Sort Columns: [] +Storage Desc Params: + serialization.format 1 +PREHOOK: query: explain insert into table stats_nonpartitioned select * from mysource where p == 100 +PREHOOK: type: QUERY +POSTHOOK: query: explain insert into table stats_nonpartitioned select * from mysource where p == 100 +POSTHOOK: type: QUERY +STAGE DEPENDENCIES: + Stage-1 is a root stage + Stage-0 depends on stages: Stage-1 + Stage-2 depends on stages: Stage-0 + +STAGE PLANS: + Stage: Stage-1 + Map Reduce + Map Operator Tree: + TableScan + alias: mysource + filterExpr: (p = 100) (type: boolean) + Statistics: Num rows: 6 Data size: 48 Basic stats: COMPLETE Column stats: COMPLETE + Filter Operator + predicate: (p = 100) (type: boolean) + Statistics: Num rows: 2 Data size: 16 Basic stats: COMPLETE Column stats: COMPLETE + Select Operator + expressions: 100 (type: int), key (type: int) + outputColumnNames: _col0, _col1 + Statistics: Num rows: 2 Data size: 16 Basic stats: COMPLETE Column stats: COMPLETE + File Output Operator + compressed: false + Statistics: Num rows: 2 Data size: 16 Basic stats: COMPLETE Column stats: COMPLETE + table: + input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat + output format: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat + serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde + name: default.stats_nonpartitioned + Write Type: INSERT + Select Operator + expressions: _col0 (type: int), _col1 (type: int) + outputColumnNames: key, value + Statistics: Num rows: 2 Data size: 16 Basic stats: COMPLETE Column stats: COMPLETE + Group By Operator + aggregations: compute_stats(key, 'hll'), compute_stats(value, 'hll') + mode: hash + outputColumnNames: _col0, _col1 + Statistics: Num rows: 1 Data size: 848 Basic stats: COMPLETE Column stats: COMPLETE + Reduce Output Operator + sort order: + Statistics: Num rows: 1 Data size: 848 Basic stats: COMPLETE Column stats: COMPLETE + value expressions: _col0 (type: struct), _col1 (type: struct) + Reduce Operator Tree: + Group By Operator + aggregations: compute_stats(VALUE._col0), compute_stats(VALUE._col1) + mode: mergepartial + outputColumnNames: _col0, _col1 + Statistics: Num rows: 1 Data size: 880 Basic stats: COMPLETE Column stats: COMPLETE + File Output Operator + compressed: false + Statistics: Num rows: 1 Data size: 880 Basic stats: COMPLETE Column stats: COMPLETE + table: + input format: org.apache.hadoop.mapred.SequenceFileInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + + Stage: Stage-0 + Move Operator + tables: + replace: false + table: + input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat + output format: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat + serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde + name: default.stats_nonpartitioned + Write Type: INSERT + + Stage: Stage-2 + Stats Work + Basic Stats Work: + Column Stats Desc: + Columns: key, value + Column Types: int, int + Table: default.stats_nonpartitioned + +PREHOOK: query: insert into table stats_nonpartitioned select * from mysource where p == 100 +PREHOOK: type: QUERY +PREHOOK: Input: default@mysource +PREHOOK: Output: default@stats_nonpartitioned +POSTHOOK: query: insert into table stats_nonpartitioned select * from mysource where p == 100 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@mysource +POSTHOOK: Output: default@stats_nonpartitioned +POSTHOOK: Lineage: stats_nonpartitioned.key SIMPLE [] +POSTHOOK: Lineage: stats_nonpartitioned.value SIMPLE [(mysource)mysource.FieldSchema(name:key, type:int, comment:null), ] +PREHOOK: query: desc formatted stats_nonpartitioned +PREHOOK: type: DESCTABLE +PREHOOK: Input: default@stats_nonpartitioned +POSTHOOK: query: desc formatted stats_nonpartitioned +POSTHOOK: type: DESCTABLE +POSTHOOK: Input: default@stats_nonpartitioned +# col_name data_type comment +key int +value int + +# Detailed Table Information +Database: default +#### A masked pattern was here #### +Retention: 0 +#### A masked pattern was here #### +Table Type: MANAGED_TABLE +Table Parameters: + COLUMN_STATS_ACCURATE {\"BASIC_STATS\":\"true\",\"COLUMN_STATS\":{\"key\":\"true\",\"value\":\"true\"}} + bucketing_version 2 + numFiles 1 + numRows 2 + rawDataSize 0 + totalSize 719 + transactional true + transactional_properties default +#### A masked pattern was here #### + +# Storage Information +SerDe Library: org.apache.hadoop.hive.ql.io.orc.OrcSerde +InputFormat: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat +OutputFormat: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat +Compressed: No +Num Buckets: -1 +Bucket Columns: [] +Sort Columns: [] +Storage Desc Params: + serialization.format 1 +PREHOOK: query: explain select count(*) from stats_nonpartitioned +PREHOOK: type: QUERY +POSTHOOK: query: explain select count(*) from stats_nonpartitioned +POSTHOOK: type: QUERY +STAGE DEPENDENCIES: + Stage-0 is a root stage + +STAGE PLANS: + Stage: Stage-0 + Fetch Operator + limit: 1 + Processor Tree: + ListSink + +PREHOOK: query: select count(*) from stats_nonpartitioned +PREHOOK: type: QUERY +PREHOOK: Input: default@stats_nonpartitioned +#### A masked pattern was here #### +POSTHOOK: query: select count(*) from stats_nonpartitioned +POSTHOOK: type: QUERY +POSTHOOK: Input: default@stats_nonpartitioned +#### A masked pattern was here #### +2 +PREHOOK: query: explain select count(key) from stats_nonpartitioned +PREHOOK: type: QUERY +POSTHOOK: query: explain select count(key) from stats_nonpartitioned +POSTHOOK: type: QUERY +STAGE DEPENDENCIES: + Stage-0 is a root stage + +STAGE PLANS: + Stage: Stage-0 + Fetch Operator + limit: 1 + Processor Tree: + ListSink + +PREHOOK: query: select count(key) from stats_nonpartitioned +PREHOOK: type: QUERY +PREHOOK: Input: default@stats_nonpartitioned +#### A masked pattern was here #### +POSTHOOK: query: select count(key) from stats_nonpartitioned +POSTHOOK: type: QUERY +POSTHOOK: Input: default@stats_nonpartitioned +#### A masked pattern was here #### +2 +PREHOOK: query: analyze table stats_nonpartitioned compute statistics for columns key, value +PREHOOK: type: ANALYZE_TABLE +PREHOOK: Input: default@stats_nonpartitioned +PREHOOK: Output: default@stats_nonpartitioned +#### A masked pattern was here #### +POSTHOOK: query: analyze table stats_nonpartitioned compute statistics for columns key, value +POSTHOOK: type: ANALYZE_TABLE +POSTHOOK: Input: default@stats_nonpartitioned +POSTHOOK: Output: default@stats_nonpartitioned +#### A masked pattern was here #### +PREHOOK: query: explain select count(*) from stats_nonpartitioned +PREHOOK: type: QUERY +POSTHOOK: query: explain select count(*) from stats_nonpartitioned +POSTHOOK: type: QUERY +STAGE DEPENDENCIES: + Stage-0 is a root stage + +STAGE PLANS: + Stage: Stage-0 + Fetch Operator + limit: 1 + Processor Tree: + ListSink + +PREHOOK: query: select count(*) from stats_nonpartitioned +PREHOOK: type: QUERY +PREHOOK: Input: default@stats_nonpartitioned +#### A masked pattern was here #### +POSTHOOK: query: select count(*) from stats_nonpartitioned +POSTHOOK: type: QUERY +POSTHOOK: Input: default@stats_nonpartitioned +#### A masked pattern was here #### +2 +PREHOOK: query: explain select count(key) from stats_nonpartitioned +PREHOOK: type: QUERY +POSTHOOK: query: explain select count(key) from stats_nonpartitioned +POSTHOOK: type: QUERY +STAGE DEPENDENCIES: + Stage-0 is a root stage + +STAGE PLANS: + Stage: Stage-0 + Fetch Operator + limit: 1 + Processor Tree: + ListSink + +PREHOOK: query: select count(key) from stats_nonpartitioned +PREHOOK: type: QUERY +PREHOOK: Input: default@stats_nonpartitioned +#### A masked pattern was here #### +POSTHOOK: query: select count(key) from stats_nonpartitioned +POSTHOOK: type: QUERY +POSTHOOK: Input: default@stats_nonpartitioned +#### A masked pattern was here #### +2 diff --git ql/src/test/results/clientpositive/stats_part.q.out ql/src/test/results/clientpositive/stats_part.q.out new file mode 100644 index 0000000000..51bdfabacf --- /dev/null +++ ql/src/test/results/clientpositive/stats_part.q.out @@ -0,0 +1,661 @@ +PREHOOK: query: drop table if exists mysource +PREHOOK: type: DROPTABLE +POSTHOOK: query: drop table if exists mysource +POSTHOOK: type: DROPTABLE +PREHOOK: query: create table mysource (p int, key int, value int) +PREHOOK: type: CREATETABLE +PREHOOK: Output: database:default +PREHOOK: Output: default@mysource +POSTHOOK: query: create table mysource (p int, key int, value int) +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: database:default +POSTHOOK: Output: default@mysource +PREHOOK: query: insert into mysource values (100,20,201), (101,40,401), (102,50,501) +PREHOOK: type: QUERY +PREHOOK: Input: _dummy_database@_dummy_table +PREHOOK: Output: default@mysource +POSTHOOK: query: insert into mysource values (100,20,201), (101,40,401), (102,50,501) +POSTHOOK: type: QUERY +POSTHOOK: Input: _dummy_database@_dummy_table +POSTHOOK: Output: default@mysource +POSTHOOK: Lineage: mysource.key SCRIPT [] +POSTHOOK: Lineage: mysource.p SCRIPT [] +POSTHOOK: Lineage: mysource.value SCRIPT [] +PREHOOK: query: insert into mysource values (100,21,211), (101,41,411), (102,51,511) +PREHOOK: type: QUERY +PREHOOK: Input: _dummy_database@_dummy_table +PREHOOK: Output: default@mysource +POSTHOOK: query: insert into mysource values (100,21,211), (101,41,411), (102,51,511) +POSTHOOK: type: QUERY +POSTHOOK: Input: _dummy_database@_dummy_table +POSTHOOK: Output: default@mysource +POSTHOOK: Lineage: mysource.key SCRIPT [] +POSTHOOK: Lineage: mysource.p SCRIPT [] +POSTHOOK: Lineage: mysource.value SCRIPT [] +PREHOOK: query: drop table if exists stats_partitioned +PREHOOK: type: DROPTABLE +POSTHOOK: query: drop table if exists stats_partitioned +POSTHOOK: type: DROPTABLE +PREHOOK: query: create table stats_part(key int,value string) partitioned by (p int) stored as orc tblproperties ("transactional"="true") +PREHOOK: type: CREATETABLE +PREHOOK: Output: database:default +PREHOOK: Output: default@stats_part +POSTHOOK: query: create table stats_part(key int,value string) partitioned by (p int) stored as orc tblproperties ("transactional"="true") +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: database:default +POSTHOOK: Output: default@stats_part +PREHOOK: query: explain select count(key) from stats_part +PREHOOK: type: QUERY +POSTHOOK: query: explain select count(key) from stats_part +POSTHOOK: type: QUERY +STAGE DEPENDENCIES: + Stage-0 is a root stage + +STAGE PLANS: + Stage: Stage-0 + Fetch Operator + limit: 1 + Processor Tree: + ListSink + +PREHOOK: query: explain select count(key) from stats_part where p > 100 +PREHOOK: type: QUERY +POSTHOOK: query: explain select count(key) from stats_part where p > 100 +POSTHOOK: type: QUERY +STAGE DEPENDENCIES: + Stage-1 is a root stage + Stage-0 depends on stages: Stage-1 + +STAGE PLANS: + Stage: Stage-1 + Map Reduce + Map Operator Tree: + TableScan + alias: stats_part + filterExpr: (p > 100) (type: boolean) + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: PARTIAL + Filter Operator + predicate: (p > 100) (type: boolean) + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: PARTIAL + Select Operator + expressions: key (type: int) + outputColumnNames: key + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: PARTIAL + Group By Operator + aggregations: count(key) + mode: hash + outputColumnNames: _col0 + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: PARTIAL + Reduce Output Operator + sort order: + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: PARTIAL + value expressions: _col0 (type: bigint) + Execution mode: vectorized + Reduce Operator Tree: + Group By Operator + aggregations: count(VALUE._col0) + mode: mergepartial + outputColumnNames: _col0 + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: PARTIAL + File Output Operator + compressed: false + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: PARTIAL + table: + input format: org.apache.hadoop.mapred.SequenceFileInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + + Stage: Stage-0 + Fetch Operator + limit: -1 + Processor Tree: + ListSink + +PREHOOK: query: desc formatted stats_part +PREHOOK: type: DESCTABLE +PREHOOK: Input: default@stats_part +POSTHOOK: query: desc formatted stats_part +POSTHOOK: type: DESCTABLE +POSTHOOK: Input: default@stats_part +# col_name data_type comment +key int +value string + +# Partition Information +# col_name data_type comment +p int + +# Detailed Table Information +Database: default +#### A masked pattern was here #### +Retention: 0 +#### A masked pattern was here #### +Table Type: MANAGED_TABLE +Table Parameters: + COLUMN_STATS_ACCURATE {\"BASIC_STATS\":\"true\"} + bucketing_version 2 + numFiles 0 + numPartitions 0 + numRows 0 + rawDataSize 0 + totalSize 0 + transactional true + transactional_properties default +#### A masked pattern was here #### + +# Storage Information +SerDe Library: org.apache.hadoop.hive.ql.io.orc.OrcSerde +InputFormat: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat +OutputFormat: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat +Compressed: No +Num Buckets: -1 +Bucket Columns: [] +Sort Columns: [] +Storage Desc Params: + serialization.format 1 +PREHOOK: query: insert into table stats_part partition(p=100) select distinct key, value from mysource where p == 100 +PREHOOK: type: QUERY +PREHOOK: Input: default@mysource +PREHOOK: Output: default@stats_part@p=100 +POSTHOOK: query: insert into table stats_part partition(p=100) select distinct key, value from mysource where p == 100 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@mysource +POSTHOOK: Output: default@stats_part@p=100 +POSTHOOK: Lineage: stats_part PARTITION(p=100).key SIMPLE [(mysource)mysource.FieldSchema(name:key, type:int, comment:null), ] +POSTHOOK: Lineage: stats_part PARTITION(p=100).value EXPRESSION [(mysource)mysource.FieldSchema(name:value, type:int, comment:null), ] +PREHOOK: query: insert into table stats_part partition(p=101) select distinct key, value from mysource where p == 101 +PREHOOK: type: QUERY +PREHOOK: Input: default@mysource +PREHOOK: Output: default@stats_part@p=101 +POSTHOOK: query: insert into table stats_part partition(p=101) select distinct key, value from mysource where p == 101 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@mysource +POSTHOOK: Output: default@stats_part@p=101 +POSTHOOK: Lineage: stats_part PARTITION(p=101).key SIMPLE [(mysource)mysource.FieldSchema(name:key, type:int, comment:null), ] +POSTHOOK: Lineage: stats_part PARTITION(p=101).value EXPRESSION [(mysource)mysource.FieldSchema(name:value, type:int, comment:null), ] +PREHOOK: query: insert into table stats_part partition(p=102) select distinct key, value from mysource where p == 102 +PREHOOK: type: QUERY +PREHOOK: Input: default@mysource +PREHOOK: Output: default@stats_part@p=102 +POSTHOOK: query: insert into table stats_part partition(p=102) select distinct key, value from mysource where p == 102 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@mysource +POSTHOOK: Output: default@stats_part@p=102 +POSTHOOK: Lineage: stats_part PARTITION(p=102).key SIMPLE [(mysource)mysource.FieldSchema(name:key, type:int, comment:null), ] +POSTHOOK: Lineage: stats_part PARTITION(p=102).value EXPRESSION [(mysource)mysource.FieldSchema(name:value, type:int, comment:null), ] +PREHOOK: query: desc formatted stats_part +PREHOOK: type: DESCTABLE +PREHOOK: Input: default@stats_part +POSTHOOK: query: desc formatted stats_part +POSTHOOK: type: DESCTABLE +POSTHOOK: Input: default@stats_part +# col_name data_type comment +key int +value string + +# Partition Information +# col_name data_type comment +p int + +# Detailed Table Information +Database: default +#### A masked pattern was here #### +Retention: 0 +#### A masked pattern was here #### +Table Type: MANAGED_TABLE +Table Parameters: + COLUMN_STATS_ACCURATE {\"BASIC_STATS\":\"true\"} + bucketing_version 2 + numFiles 3 + numPartitions 3 + numRows 6 + rawDataSize 0 + totalSize 2244 + transactional true + transactional_properties default +#### A masked pattern was here #### + +# Storage Information +SerDe Library: org.apache.hadoop.hive.ql.io.orc.OrcSerde +InputFormat: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat +OutputFormat: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat +Compressed: No +Num Buckets: -1 +Bucket Columns: [] +Sort Columns: [] +Storage Desc Params: + serialization.format 1 +PREHOOK: query: insert into table mysource values (103,20,200), (103,83,832), (103,53,530) +PREHOOK: type: QUERY +PREHOOK: Input: _dummy_database@_dummy_table +PREHOOK: Output: default@mysource +POSTHOOK: query: insert into table mysource values (103,20,200), (103,83,832), (103,53,530) +POSTHOOK: type: QUERY +POSTHOOK: Input: _dummy_database@_dummy_table +POSTHOOK: Output: default@mysource +POSTHOOK: Lineage: mysource.key SCRIPT [] +POSTHOOK: Lineage: mysource.p SCRIPT [] +POSTHOOK: Lineage: mysource.value SCRIPT [] +PREHOOK: query: insert into table stats_part partition(p=102) select distinct key, value from mysource where p == 102 +PREHOOK: type: QUERY +PREHOOK: Input: default@mysource +PREHOOK: Output: default@stats_part@p=102 +POSTHOOK: query: insert into table stats_part partition(p=102) select distinct key, value from mysource where p == 102 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@mysource +POSTHOOK: Output: default@stats_part@p=102 +POSTHOOK: Lineage: stats_part PARTITION(p=102).key SIMPLE [(mysource)mysource.FieldSchema(name:key, type:int, comment:null), ] +POSTHOOK: Lineage: stats_part PARTITION(p=102).value EXPRESSION [(mysource)mysource.FieldSchema(name:value, type:int, comment:null), ] +PREHOOK: query: desc formatted stats_part +PREHOOK: type: DESCTABLE +PREHOOK: Input: default@stats_part +POSTHOOK: query: desc formatted stats_part +POSTHOOK: type: DESCTABLE +POSTHOOK: Input: default@stats_part +# col_name data_type comment +key int +value string + +# Partition Information +# col_name data_type comment +p int + +# Detailed Table Information +Database: default +#### A masked pattern was here #### +Retention: 0 +#### A masked pattern was here #### +Table Type: MANAGED_TABLE +Table Parameters: + COLUMN_STATS_ACCURATE {\"BASIC_STATS\":\"true\"} + bucketing_version 2 + numFiles 4 + numPartitions 3 + numRows 8 + rawDataSize 0 + totalSize 2998 + transactional true + transactional_properties default +#### A masked pattern was here #### + +# Storage Information +SerDe Library: org.apache.hadoop.hive.ql.io.orc.OrcSerde +InputFormat: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat +OutputFormat: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat +Compressed: No +Num Buckets: -1 +Bucket Columns: [] +Sort Columns: [] +Storage Desc Params: + serialization.format 1 +PREHOOK: query: show partitions stats_part +PREHOOK: type: SHOWPARTITIONS +PREHOOK: Input: default@stats_part +POSTHOOK: query: show partitions stats_part +POSTHOOK: type: SHOWPARTITIONS +POSTHOOK: Input: default@stats_part +p=100 +p=101 +p=102 +PREHOOK: query: explain select count(*) from stats_part +PREHOOK: type: QUERY +POSTHOOK: query: explain select count(*) from stats_part +POSTHOOK: type: QUERY +STAGE DEPENDENCIES: + Stage-0 is a root stage + +STAGE PLANS: + Stage: Stage-0 + Fetch Operator + limit: 1 + Processor Tree: + ListSink + +PREHOOK: query: select count(*) from stats_part +PREHOOK: type: QUERY +PREHOOK: Input: default@stats_part +#### A masked pattern was here #### +POSTHOOK: query: select count(*) from stats_part +POSTHOOK: type: QUERY +POSTHOOK: Input: default@stats_part +#### A masked pattern was here #### +8 +PREHOOK: query: explain select count(key) from stats_part +PREHOOK: type: QUERY +POSTHOOK: query: explain select count(key) from stats_part +POSTHOOK: type: QUERY +STAGE DEPENDENCIES: + Stage-0 is a root stage + +STAGE PLANS: + Stage: Stage-0 + Fetch Operator + limit: 1 + Processor Tree: + ListSink + +PREHOOK: query: select count(key) from stats_part +PREHOOK: type: QUERY +PREHOOK: Input: default@stats_part +#### A masked pattern was here #### +POSTHOOK: query: select count(key) from stats_part +POSTHOOK: type: QUERY +POSTHOOK: Input: default@stats_part +#### A masked pattern was here #### +8 +PREHOOK: query: explain select count(key) from stats_part where p > 100 +PREHOOK: type: QUERY +POSTHOOK: query: explain select count(key) from stats_part where p > 100 +POSTHOOK: type: QUERY +STAGE DEPENDENCIES: + Stage-0 is a root stage + +STAGE PLANS: + Stage: Stage-0 + Fetch Operator + limit: 1 + Processor Tree: + ListSink + +PREHOOK: query: select count(key) from stats_part where p > 100 +PREHOOK: type: QUERY +PREHOOK: Input: default@stats_part +#### A masked pattern was here #### +POSTHOOK: query: select count(key) from stats_part where p > 100 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@stats_part +#### A masked pattern was here #### +6 +PREHOOK: query: explain select max(key) from stats_part where p > 100 +PREHOOK: type: QUERY +POSTHOOK: query: explain select max(key) from stats_part where p > 100 +POSTHOOK: type: QUERY +STAGE DEPENDENCIES: + Stage-0 is a root stage + +STAGE PLANS: + Stage: Stage-0 + Fetch Operator + limit: 1 + Processor Tree: + ListSink + +PREHOOK: query: select max(key) from stats_part where p > 100 +PREHOOK: type: QUERY +PREHOOK: Input: default@stats_part +#### A masked pattern was here #### +POSTHOOK: query: select max(key) from stats_part where p > 100 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@stats_part +#### A masked pattern was here #### +51 +PREHOOK: query: desc formatted stats_part +PREHOOK: type: DESCTABLE +PREHOOK: Input: default@stats_part +POSTHOOK: query: desc formatted stats_part +POSTHOOK: type: DESCTABLE +POSTHOOK: Input: default@stats_part +# col_name data_type comment +key int +value string + +# Partition Information +# col_name data_type comment +p int + +# Detailed Table Information +Database: default +#### A masked pattern was here #### +Retention: 0 +#### A masked pattern was here #### +Table Type: MANAGED_TABLE +Table Parameters: + COLUMN_STATS_ACCURATE {\"BASIC_STATS\":\"true\"} + bucketing_version 2 + numFiles 4 + numPartitions 3 + numRows 8 + rawDataSize 0 + totalSize 2998 + transactional true + transactional_properties default +#### A masked pattern was here #### + +# Storage Information +SerDe Library: org.apache.hadoop.hive.ql.io.orc.OrcSerde +InputFormat: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat +OutputFormat: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat +Compressed: No +Num Buckets: -1 +Bucket Columns: [] +Sort Columns: [] +Storage Desc Params: + serialization.format 1 +PREHOOK: query: explain select max(key) from stats_part where p > 100 +PREHOOK: type: QUERY +POSTHOOK: query: explain select max(key) from stats_part where p > 100 +POSTHOOK: type: QUERY +STAGE DEPENDENCIES: + Stage-0 is a root stage + +STAGE PLANS: + Stage: Stage-0 + Fetch Operator + limit: 1 + Processor Tree: + ListSink + +PREHOOK: query: select max(key) from stats_part where p > 100 +PREHOOK: type: QUERY +PREHOOK: Input: default@stats_part +#### A masked pattern was here #### +POSTHOOK: query: select max(key) from stats_part where p > 100 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@stats_part +#### A masked pattern was here #### +51 +PREHOOK: query: select count(value) from stats_part +PREHOOK: type: QUERY +PREHOOK: Input: default@stats_part +#### A masked pattern was here #### +POSTHOOK: query: select count(value) from stats_part +POSTHOOK: type: QUERY +POSTHOOK: Input: default@stats_part +#### A masked pattern was here #### +8 +PREHOOK: query: select count(value) from stats_part +PREHOOK: type: QUERY +PREHOOK: Input: default@stats_part +#### A masked pattern was here #### +POSTHOOK: query: select count(value) from stats_part +POSTHOOK: type: QUERY +POSTHOOK: Input: default@stats_part +#### A masked pattern was here #### +8 +PREHOOK: query: desc formatted stats_part +PREHOOK: type: DESCTABLE +PREHOOK: Input: default@stats_part +POSTHOOK: query: desc formatted stats_part +POSTHOOK: type: DESCTABLE +POSTHOOK: Input: default@stats_part +# col_name data_type comment +key int +value string + +# Partition Information +# col_name data_type comment +p int + +# Detailed Table Information +Database: default +#### A masked pattern was here #### +Retention: 0 +#### A masked pattern was here #### +Table Type: MANAGED_TABLE +Table Parameters: + COLUMN_STATS_ACCURATE {\"BASIC_STATS\":\"true\"} + bucketing_version 2 + numFiles 4 + numPartitions 3 + numRows 8 + rawDataSize 0 + totalSize 2998 + transactional true + transactional_properties default +#### A masked pattern was here #### + +# Storage Information +SerDe Library: org.apache.hadoop.hive.ql.io.orc.OrcSerde +InputFormat: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat +OutputFormat: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat +Compressed: No +Num Buckets: -1 +Bucket Columns: [] +Sort Columns: [] +Storage Desc Params: + serialization.format 1 +PREHOOK: query: explain select count(*) from stats_part where p = 100 +PREHOOK: type: QUERY +POSTHOOK: query: explain select count(*) from stats_part where p = 100 +POSTHOOK: type: QUERY +STAGE DEPENDENCIES: + Stage-0 is a root stage + +STAGE PLANS: + Stage: Stage-0 + Fetch Operator + limit: 1 + Processor Tree: + ListSink + +PREHOOK: query: select count(*) from stats_part where p = 100 +PREHOOK: type: QUERY +PREHOOK: Input: default@stats_part +#### A masked pattern was here #### +POSTHOOK: query: select count(*) from stats_part where p = 100 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@stats_part +#### A masked pattern was here #### +2 +PREHOOK: query: explain select count(*) from stats_part where p > 100 +PREHOOK: type: QUERY +POSTHOOK: query: explain select count(*) from stats_part where p > 100 +POSTHOOK: type: QUERY +STAGE DEPENDENCIES: + Stage-0 is a root stage + +STAGE PLANS: + Stage: Stage-0 + Fetch Operator + limit: 1 + Processor Tree: + ListSink + +PREHOOK: query: select count(*) from stats_part where p > 100 +PREHOOK: type: QUERY +PREHOOK: Input: default@stats_part +#### A masked pattern was here #### +POSTHOOK: query: select count(*) from stats_part where p > 100 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@stats_part +#### A masked pattern was here #### +6 +PREHOOK: query: explain select count(key) from stats_part +PREHOOK: type: QUERY +POSTHOOK: query: explain select count(key) from stats_part +POSTHOOK: type: QUERY +STAGE DEPENDENCIES: + Stage-0 is a root stage + +STAGE PLANS: + Stage: Stage-0 + Fetch Operator + limit: 1 + Processor Tree: + ListSink + +PREHOOK: query: select count(key) from stats_part +PREHOOK: type: QUERY +PREHOOK: Input: default@stats_part +#### A masked pattern was here #### +POSTHOOK: query: select count(key) from stats_part +POSTHOOK: type: QUERY +POSTHOOK: Input: default@stats_part +#### A masked pattern was here #### +8 +PREHOOK: query: explain select count(*) from stats_part where p > 100 +PREHOOK: type: QUERY +POSTHOOK: query: explain select count(*) from stats_part where p > 100 +POSTHOOK: type: QUERY +STAGE DEPENDENCIES: + Stage-0 is a root stage + +STAGE PLANS: + Stage: Stage-0 + Fetch Operator + limit: 1 + Processor Tree: + ListSink + +PREHOOK: query: select count(*) from stats_part where p > 100 +PREHOOK: type: QUERY +PREHOOK: Input: default@stats_part +#### A masked pattern was here #### +POSTHOOK: query: select count(*) from stats_part where p > 100 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@stats_part +#### A masked pattern was here #### +6 +PREHOOK: query: explain select max(key) from stats_part where p > 100 +PREHOOK: type: QUERY +POSTHOOK: query: explain select max(key) from stats_part where p > 100 +POSTHOOK: type: QUERY +STAGE DEPENDENCIES: + Stage-0 is a root stage + +STAGE PLANS: + Stage: Stage-0 + Fetch Operator + limit: 1 + Processor Tree: + ListSink + +PREHOOK: query: select max(key) from stats_part where p > 100 +PREHOOK: type: QUERY +PREHOOK: Input: default@stats_part +#### A masked pattern was here #### +POSTHOOK: query: select max(key) from stats_part where p > 100 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@stats_part +#### A masked pattern was here #### +51 +PREHOOK: query: describe extended stats_part partition (p=101) +PREHOOK: type: DESCTABLE +PREHOOK: Input: default@stats_part +POSTHOOK: query: describe extended stats_part partition (p=101) +POSTHOOK: type: DESCTABLE +POSTHOOK: Input: default@stats_part +key int +value string +p int + +# Partition Information +# col_name data_type comment +p int + +#### A masked pattern was here #### +PREHOOK: query: describe extended stats_part +PREHOOK: type: DESCTABLE +PREHOOK: Input: default@stats_part +POSTHOOK: query: describe extended stats_part +POSTHOOK: type: DESCTABLE +POSTHOOK: Input: default@stats_part +key int +value string +p int + +# Partition Information +# col_name data_type comment +p int + +#### A masked pattern was here #### diff --git ql/src/test/results/clientpositive/stats_part2.q.out ql/src/test/results/clientpositive/stats_part2.q.out new file mode 100644 index 0000000000..9c22ce7702 --- /dev/null +++ ql/src/test/results/clientpositive/stats_part2.q.out @@ -0,0 +1,1265 @@ +PREHOOK: query: drop table if exists mysource +PREHOOK: type: DROPTABLE +POSTHOOK: query: drop table if exists mysource +POSTHOOK: type: DROPTABLE +PREHOOK: query: create table mysource (p int, key int, value string) +PREHOOK: type: CREATETABLE +PREHOOK: Output: database:default +PREHOOK: Output: default@mysource +POSTHOOK: query: create table mysource (p int, key int, value string) +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: database:default +POSTHOOK: Output: default@mysource +PREHOOK: query: insert into mysource values (100,20,'value20'), (101,40,'string40'), (102,50,'string50') +PREHOOK: type: QUERY +PREHOOK: Input: _dummy_database@_dummy_table +PREHOOK: Output: default@mysource +POSTHOOK: query: insert into mysource values (100,20,'value20'), (101,40,'string40'), (102,50,'string50') +POSTHOOK: type: QUERY +POSTHOOK: Input: _dummy_database@_dummy_table +POSTHOOK: Output: default@mysource +POSTHOOK: Lineage: mysource.key SCRIPT [] +POSTHOOK: Lineage: mysource.p SCRIPT [] +POSTHOOK: Lineage: mysource.value SCRIPT [] +PREHOOK: query: insert into mysource values (100,21,'value21'), (101,41,'value41'), (102,51,'value51') +PREHOOK: type: QUERY +PREHOOK: Input: _dummy_database@_dummy_table +PREHOOK: Output: default@mysource +POSTHOOK: query: insert into mysource values (100,21,'value21'), (101,41,'value41'), (102,51,'value51') +POSTHOOK: type: QUERY +POSTHOOK: Input: _dummy_database@_dummy_table +POSTHOOK: Output: default@mysource +POSTHOOK: Lineage: mysource.key SCRIPT [] +POSTHOOK: Lineage: mysource.p SCRIPT [] +POSTHOOK: Lineage: mysource.value SCRIPT [] +PREHOOK: query: drop table if exists stats_partitioned +PREHOOK: type: DROPTABLE +POSTHOOK: query: drop table if exists stats_partitioned +POSTHOOK: type: DROPTABLE +PREHOOK: query: create table stats_part(key int,value string) partitioned by (p int) stored as orc tblproperties ("transactional"="true") +PREHOOK: type: CREATETABLE +PREHOOK: Output: database:default +PREHOOK: Output: default@stats_part +POSTHOOK: query: create table stats_part(key int,value string) partitioned by (p int) stored as orc tblproperties ("transactional"="true") +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: database:default +POSTHOOK: Output: default@stats_part +PREHOOK: query: explain select count(*) from stats_part where p > 100 +PREHOOK: type: QUERY +POSTHOOK: query: explain select count(*) from stats_part where p > 100 +POSTHOOK: type: QUERY +STAGE DEPENDENCIES: + Stage-1 is a root stage + Stage-0 depends on stages: Stage-1 + +STAGE PLANS: + Stage: Stage-1 + Map Reduce + Map Operator Tree: + TableScan + alias: stats_part + filterExpr: (p > 100) (type: boolean) + Statistics: Num rows: 1 Data size: 4 Basic stats: COMPLETE Column stats: COMPLETE + Filter Operator + predicate: (p > 100) (type: boolean) + Statistics: Num rows: 1 Data size: 4 Basic stats: COMPLETE Column stats: COMPLETE + Select Operator + Statistics: Num rows: 1 Data size: 4 Basic stats: COMPLETE Column stats: COMPLETE + Group By Operator + aggregations: count() + mode: hash + outputColumnNames: _col0 + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE + Reduce Output Operator + sort order: + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE + value expressions: _col0 (type: bigint) + Execution mode: vectorized + Reduce Operator Tree: + Group By Operator + aggregations: count(VALUE._col0) + mode: mergepartial + outputColumnNames: _col0 + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE + File Output Operator + compressed: false + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE + table: + input format: org.apache.hadoop.mapred.SequenceFileInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + + Stage: Stage-0 + Fetch Operator + limit: -1 + Processor Tree: + ListSink + +PREHOOK: query: explain select max(key) from stats_part where p > 100 +PREHOOK: type: QUERY +POSTHOOK: query: explain select max(key) from stats_part where p > 100 +POSTHOOK: type: QUERY +STAGE DEPENDENCIES: + Stage-1 is a root stage + Stage-0 depends on stages: Stage-1 + +STAGE PLANS: + Stage: Stage-1 + Map Reduce + Map Operator Tree: + TableScan + alias: stats_part + filterExpr: (p > 100) (type: boolean) + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: PARTIAL + Filter Operator + predicate: (p > 100) (type: boolean) + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: PARTIAL + Select Operator + expressions: key (type: int) + outputColumnNames: key + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: PARTIAL + Group By Operator + aggregations: max(key) + mode: hash + outputColumnNames: _col0 + Statistics: Num rows: 1 Data size: 4 Basic stats: COMPLETE Column stats: PARTIAL + Reduce Output Operator + sort order: + Statistics: Num rows: 1 Data size: 4 Basic stats: COMPLETE Column stats: PARTIAL + value expressions: _col0 (type: int) + Execution mode: vectorized + Reduce Operator Tree: + Group By Operator + aggregations: max(VALUE._col0) + mode: mergepartial + outputColumnNames: _col0 + Statistics: Num rows: 1 Data size: 4 Basic stats: COMPLETE Column stats: PARTIAL + File Output Operator + compressed: false + Statistics: Num rows: 1 Data size: 4 Basic stats: COMPLETE Column stats: PARTIAL + table: + input format: org.apache.hadoop.mapred.SequenceFileInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + + Stage: Stage-0 + Fetch Operator + limit: -1 + Processor Tree: + ListSink + +PREHOOK: query: desc formatted stats_part +PREHOOK: type: DESCTABLE +PREHOOK: Input: default@stats_part +POSTHOOK: query: desc formatted stats_part +POSTHOOK: type: DESCTABLE +POSTHOOK: Input: default@stats_part +# col_name data_type comment +key int +value string + +# Partition Information +# col_name data_type comment +p int + +# Detailed Table Information +Database: default +#### A masked pattern was here #### +Retention: 0 +#### A masked pattern was here #### +Table Type: MANAGED_TABLE +Table Parameters: + COLUMN_STATS_ACCURATE {\"BASIC_STATS\":\"true\"} + bucketing_version 2 + numFiles 0 + numPartitions 0 + numRows 0 + rawDataSize 0 + totalSize 0 + transactional true + transactional_properties default +#### A masked pattern was here #### + +# Storage Information +SerDe Library: org.apache.hadoop.hive.ql.io.orc.OrcSerde +InputFormat: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat +OutputFormat: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat +Compressed: No +Num Buckets: -1 +Bucket Columns: [] +Sort Columns: [] +Storage Desc Params: + serialization.format 1 +PREHOOK: query: insert into table stats_part partition(p=100) select distinct key, value from mysource where p == 100 +PREHOOK: type: QUERY +PREHOOK: Input: default@mysource +PREHOOK: Output: default@stats_part@p=100 +POSTHOOK: query: insert into table stats_part partition(p=100) select distinct key, value from mysource where p == 100 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@mysource +POSTHOOK: Output: default@stats_part@p=100 +POSTHOOK: Lineage: stats_part PARTITION(p=100).key SIMPLE [(mysource)mysource.FieldSchema(name:key, type:int, comment:null), ] +POSTHOOK: Lineage: stats_part PARTITION(p=100).value SIMPLE [(mysource)mysource.FieldSchema(name:value, type:string, comment:null), ] +PREHOOK: query: insert into table stats_part partition(p=101) select distinct key, value from mysource where p == 101 +PREHOOK: type: QUERY +PREHOOK: Input: default@mysource +PREHOOK: Output: default@stats_part@p=101 +POSTHOOK: query: insert into table stats_part partition(p=101) select distinct key, value from mysource where p == 101 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@mysource +POSTHOOK: Output: default@stats_part@p=101 +POSTHOOK: Lineage: stats_part PARTITION(p=101).key SIMPLE [(mysource)mysource.FieldSchema(name:key, type:int, comment:null), ] +POSTHOOK: Lineage: stats_part PARTITION(p=101).value SIMPLE [(mysource)mysource.FieldSchema(name:value, type:string, comment:null), ] +PREHOOK: query: insert into table stats_part partition(p=102) select distinct key, value from mysource where p == 102 +PREHOOK: type: QUERY +PREHOOK: Input: default@mysource +PREHOOK: Output: default@stats_part@p=102 +POSTHOOK: query: insert into table stats_part partition(p=102) select distinct key, value from mysource where p == 102 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@mysource +POSTHOOK: Output: default@stats_part@p=102 +POSTHOOK: Lineage: stats_part PARTITION(p=102).key SIMPLE [(mysource)mysource.FieldSchema(name:key, type:int, comment:null), ] +POSTHOOK: Lineage: stats_part PARTITION(p=102).value SIMPLE [(mysource)mysource.FieldSchema(name:value, type:string, comment:null), ] +PREHOOK: query: desc formatted stats_part +PREHOOK: type: DESCTABLE +PREHOOK: Input: default@stats_part +POSTHOOK: query: desc formatted stats_part +POSTHOOK: type: DESCTABLE +POSTHOOK: Input: default@stats_part +# col_name data_type comment +key int +value string + +# Partition Information +# col_name data_type comment +p int + +# Detailed Table Information +Database: default +#### A masked pattern was here #### +Retention: 0 +#### A masked pattern was here #### +Table Type: MANAGED_TABLE +Table Parameters: + COLUMN_STATS_ACCURATE {\"BASIC_STATS\":\"true\"} + bucketing_version 2 + numFiles 3 + numPartitions 3 + numRows 6 + rawDataSize 0 + totalSize 2335 + transactional true + transactional_properties default +#### A masked pattern was here #### + +# Storage Information +SerDe Library: org.apache.hadoop.hive.ql.io.orc.OrcSerde +InputFormat: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat +OutputFormat: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat +Compressed: No +Num Buckets: -1 +Bucket Columns: [] +Sort Columns: [] +Storage Desc Params: + serialization.format 1 +PREHOOK: query: explain select count(key) from stats_part where p > 100 +PREHOOK: type: QUERY +POSTHOOK: query: explain select count(key) from stats_part where p > 100 +POSTHOOK: type: QUERY +STAGE DEPENDENCIES: + Stage-0 is a root stage + +STAGE PLANS: + Stage: Stage-0 + Fetch Operator + limit: 1 + Processor Tree: + ListSink + +PREHOOK: query: explain select max(key) from stats_part where p > 100 +PREHOOK: type: QUERY +POSTHOOK: query: explain select max(key) from stats_part where p > 100 +POSTHOOK: type: QUERY +STAGE DEPENDENCIES: + Stage-0 is a root stage + +STAGE PLANS: + Stage: Stage-0 + Fetch Operator + limit: 1 + Processor Tree: + ListSink + +PREHOOK: query: insert into table mysource values (103,20,'value20'), (103,83,'value83'), (103,53,'value53') +PREHOOK: type: QUERY +PREHOOK: Input: _dummy_database@_dummy_table +PREHOOK: Output: default@mysource +POSTHOOK: query: insert into table mysource values (103,20,'value20'), (103,83,'value83'), (103,53,'value53') +POSTHOOK: type: QUERY +POSTHOOK: Input: _dummy_database@_dummy_table +POSTHOOK: Output: default@mysource +POSTHOOK: Lineage: mysource.key SCRIPT [] +POSTHOOK: Lineage: mysource.p SCRIPT [] +POSTHOOK: Lineage: mysource.value SCRIPT [] +PREHOOK: query: insert into table stats_part partition(p=102) select distinct key, value from mysource where p == 102 +PREHOOK: type: QUERY +PREHOOK: Input: default@mysource +PREHOOK: Output: default@stats_part@p=102 +POSTHOOK: query: insert into table stats_part partition(p=102) select distinct key, value from mysource where p == 102 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@mysource +POSTHOOK: Output: default@stats_part@p=102 +POSTHOOK: Lineage: stats_part PARTITION(p=102).key SIMPLE [(mysource)mysource.FieldSchema(name:key, type:int, comment:null), ] +POSTHOOK: Lineage: stats_part PARTITION(p=102).value SIMPLE [(mysource)mysource.FieldSchema(name:value, type:string, comment:null), ] +PREHOOK: query: desc formatted stats_part +PREHOOK: type: DESCTABLE +PREHOOK: Input: default@stats_part +POSTHOOK: query: desc formatted stats_part +POSTHOOK: type: DESCTABLE +POSTHOOK: Input: default@stats_part +# col_name data_type comment +key int +value string + +# Partition Information +# col_name data_type comment +p int + +# Detailed Table Information +Database: default +#### A masked pattern was here #### +Retention: 0 +#### A masked pattern was here #### +Table Type: MANAGED_TABLE +Table Parameters: + COLUMN_STATS_ACCURATE {\"BASIC_STATS\":\"true\"} + bucketing_version 2 + numFiles 4 + numPartitions 3 + numRows 8 + rawDataSize 0 + totalSize 3124 + transactional true + transactional_properties default +#### A masked pattern was here #### + +# Storage Information +SerDe Library: org.apache.hadoop.hive.ql.io.orc.OrcSerde +InputFormat: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat +OutputFormat: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat +Compressed: No +Num Buckets: -1 +Bucket Columns: [] +Sort Columns: [] +Storage Desc Params: + serialization.format 1 +PREHOOK: query: show partitions stats_part +PREHOOK: type: SHOWPARTITIONS +PREHOOK: Input: default@stats_part +POSTHOOK: query: show partitions stats_part +POSTHOOK: type: SHOWPARTITIONS +POSTHOOK: Input: default@stats_part +p=100 +p=101 +p=102 +PREHOOK: query: explain select count(*) from stats_part +PREHOOK: type: QUERY +POSTHOOK: query: explain select count(*) from stats_part +POSTHOOK: type: QUERY +STAGE DEPENDENCIES: + Stage-0 is a root stage + +STAGE PLANS: + Stage: Stage-0 + Fetch Operator + limit: 1 + Processor Tree: + ListSink + +PREHOOK: query: select count(*) from stats_part +PREHOOK: type: QUERY +PREHOOK: Input: default@stats_part +#### A masked pattern was here #### +POSTHOOK: query: select count(*) from stats_part +POSTHOOK: type: QUERY +POSTHOOK: Input: default@stats_part +#### A masked pattern was here #### +8 +PREHOOK: query: explain select count(key) from stats_part +PREHOOK: type: QUERY +POSTHOOK: query: explain select count(key) from stats_part +POSTHOOK: type: QUERY +STAGE DEPENDENCIES: + Stage-0 is a root stage + +STAGE PLANS: + Stage: Stage-0 + Fetch Operator + limit: 1 + Processor Tree: + ListSink + +PREHOOK: query: select count(key) from stats_part +PREHOOK: type: QUERY +PREHOOK: Input: default@stats_part +#### A masked pattern was here #### +POSTHOOK: query: select count(key) from stats_part +POSTHOOK: type: QUERY +POSTHOOK: Input: default@stats_part +#### A masked pattern was here #### +8 +PREHOOK: query: explain select count(key) from stats_part where p > 100 +PREHOOK: type: QUERY +POSTHOOK: query: explain select count(key) from stats_part where p > 100 +POSTHOOK: type: QUERY +STAGE DEPENDENCIES: + Stage-0 is a root stage + +STAGE PLANS: + Stage: Stage-0 + Fetch Operator + limit: 1 + Processor Tree: + ListSink + +PREHOOK: query: select count(key) from stats_part where p > 100 +PREHOOK: type: QUERY +PREHOOK: Input: default@stats_part +#### A masked pattern was here #### +POSTHOOK: query: select count(key) from stats_part where p > 100 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@stats_part +#### A masked pattern was here #### +6 +PREHOOK: query: explain select max(key) from stats_part where p > 100 +PREHOOK: type: QUERY +POSTHOOK: query: explain select max(key) from stats_part where p > 100 +POSTHOOK: type: QUERY +STAGE DEPENDENCIES: + Stage-0 is a root stage + +STAGE PLANS: + Stage: Stage-0 + Fetch Operator + limit: 1 + Processor Tree: + ListSink + +PREHOOK: query: select max(key) from stats_part where p > 100 +PREHOOK: type: QUERY +PREHOOK: Input: default@stats_part +#### A masked pattern was here #### +POSTHOOK: query: select max(key) from stats_part where p > 100 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@stats_part +#### A masked pattern was here #### +51 +PREHOOK: query: desc formatted stats_part partition(p = 100) +PREHOOK: type: DESCTABLE +PREHOOK: Input: default@stats_part +POSTHOOK: query: desc formatted stats_part partition(p = 100) +POSTHOOK: type: DESCTABLE +POSTHOOK: Input: default@stats_part +# col_name data_type comment +key int +value string + +# Partition Information +# col_name data_type comment +p int + +# Detailed Partition Information +Partition Value: [100] +Database: default +Table: stats_part +#### A masked pattern was here #### +Partition Parameters: + COLUMN_STATS_ACCURATE {\"BASIC_STATS\":\"true\",\"COLUMN_STATS\":{\"key\":\"true\",\"value\":\"true\"}} + numFiles 1 + numRows 2 + rawDataSize 0 + totalSize 756 +#### A masked pattern was here #### + +# Storage Information +SerDe Library: org.apache.hadoop.hive.ql.io.orc.OrcSerde +InputFormat: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat +OutputFormat: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat +Compressed: No +Num Buckets: -1 +Bucket Columns: [] +Sort Columns: [] +Storage Desc Params: + serialization.format 1 +PREHOOK: query: desc formatted stats_part partition(p = 101) +PREHOOK: type: DESCTABLE +PREHOOK: Input: default@stats_part +POSTHOOK: query: desc formatted stats_part partition(p = 101) +POSTHOOK: type: DESCTABLE +POSTHOOK: Input: default@stats_part +# col_name data_type comment +key int +value string + +# Partition Information +# col_name data_type comment +p int + +# Detailed Partition Information +Partition Value: [101] +Database: default +Table: stats_part +#### A masked pattern was here #### +Partition Parameters: + COLUMN_STATS_ACCURATE {\"BASIC_STATS\":\"true\",\"COLUMN_STATS\":{\"key\":\"true\",\"value\":\"true\"}} + numFiles 1 + numRows 2 + rawDataSize 0 + totalSize 789 +#### A masked pattern was here #### + +# Storage Information +SerDe Library: org.apache.hadoop.hive.ql.io.orc.OrcSerde +InputFormat: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat +OutputFormat: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat +Compressed: No +Num Buckets: -1 +Bucket Columns: [] +Sort Columns: [] +Storage Desc Params: + serialization.format 1 +PREHOOK: query: desc formatted stats_part partition(p = 102) +PREHOOK: type: DESCTABLE +PREHOOK: Input: default@stats_part +POSTHOOK: query: desc formatted stats_part partition(p = 102) +POSTHOOK: type: DESCTABLE +POSTHOOK: Input: default@stats_part +# col_name data_type comment +key int +value string + +# Partition Information +# col_name data_type comment +p int + +# Detailed Partition Information +Partition Value: [102] +Database: default +Table: stats_part +#### A masked pattern was here #### +Partition Parameters: + COLUMN_STATS_ACCURATE {\"BASIC_STATS\":\"true\",\"COLUMN_STATS\":{\"key\":\"true\",\"value\":\"true\"}} + numFiles 2 + numRows 4 + rawDataSize 0 + totalSize 1579 +#### A masked pattern was here #### + +# Storage Information +SerDe Library: org.apache.hadoop.hive.ql.io.orc.OrcSerde +InputFormat: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat +OutputFormat: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat +Compressed: No +Num Buckets: -1 +Bucket Columns: [] +Sort Columns: [] +Storage Desc Params: + serialization.format 1 +PREHOOK: query: update stats_part set key = key + 100 where key in(-50,40) and p > 100 +PREHOOK: type: QUERY +PREHOOK: Input: default@stats_part +PREHOOK: Input: default@stats_part@p=101 +PREHOOK: Input: default@stats_part@p=102 +PREHOOK: Output: default@stats_part@p=101 +PREHOOK: Output: default@stats_part@p=102 +POSTHOOK: query: update stats_part set key = key + 100 where key in(-50,40) and p > 100 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@stats_part +POSTHOOK: Input: default@stats_part@p=101 +POSTHOOK: Input: default@stats_part@p=102 +POSTHOOK: Output: default@stats_part@p=101 +POSTHOOK: Output: default@stats_part@p=102 +PREHOOK: query: explain select max(key) from stats_part where p > 100 +PREHOOK: type: QUERY +POSTHOOK: query: explain select max(key) from stats_part where p > 100 +POSTHOOK: type: QUERY +STAGE DEPENDENCIES: + Stage-1 is a root stage + Stage-0 depends on stages: Stage-1 + +STAGE PLANS: + Stage: Stage-1 + Map Reduce + Map Operator Tree: + TableScan + alias: stats_part + filterExpr: (p > 100) (type: boolean) + Statistics: Num rows: 6 Data size: 24 Basic stats: COMPLETE Column stats: PARTIAL + Select Operator + expressions: key (type: int) + outputColumnNames: key + Statistics: Num rows: 6 Data size: 24 Basic stats: COMPLETE Column stats: PARTIAL + Group By Operator + aggregations: max(key) + mode: hash + outputColumnNames: _col0 + Statistics: Num rows: 1 Data size: 4 Basic stats: COMPLETE Column stats: PARTIAL + Reduce Output Operator + sort order: + Statistics: Num rows: 1 Data size: 4 Basic stats: COMPLETE Column stats: PARTIAL + value expressions: _col0 (type: int) + Execution mode: vectorized + Reduce Operator Tree: + Group By Operator + aggregations: max(VALUE._col0) + mode: mergepartial + outputColumnNames: _col0 + Statistics: Num rows: 1 Data size: 4 Basic stats: COMPLETE Column stats: PARTIAL + File Output Operator + compressed: false + Statistics: Num rows: 1 Data size: 4 Basic stats: COMPLETE Column stats: PARTIAL + table: + input format: org.apache.hadoop.mapred.SequenceFileInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + + Stage: Stage-0 + Fetch Operator + limit: -1 + Processor Tree: + ListSink + +PREHOOK: query: select max(key) from stats_part where p > 100 +PREHOOK: type: QUERY +PREHOOK: Input: default@stats_part +PREHOOK: Input: default@stats_part@p=101 +PREHOOK: Input: default@stats_part@p=102 +#### A masked pattern was here #### +POSTHOOK: query: select max(key) from stats_part where p > 100 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@stats_part +POSTHOOK: Input: default@stats_part@p=101 +POSTHOOK: Input: default@stats_part@p=102 +#### A masked pattern was here #### +140 +PREHOOK: query: desc formatted stats_part partition(p = 100) +PREHOOK: type: DESCTABLE +PREHOOK: Input: default@stats_part +POSTHOOK: query: desc formatted stats_part partition(p = 100) +POSTHOOK: type: DESCTABLE +POSTHOOK: Input: default@stats_part +# col_name data_type comment +key int +value string + +# Partition Information +# col_name data_type comment +p int + +# Detailed Partition Information +Partition Value: [100] +Database: default +Table: stats_part +#### A masked pattern was here #### +Partition Parameters: + COLUMN_STATS_ACCURATE {\"BASIC_STATS\":\"true\",\"COLUMN_STATS\":{\"key\":\"true\",\"value\":\"true\"}} + numFiles 1 + numRows 2 + rawDataSize 0 + totalSize 756 +#### A masked pattern was here #### + +# Storage Information +SerDe Library: org.apache.hadoop.hive.ql.io.orc.OrcSerde +InputFormat: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat +OutputFormat: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat +Compressed: No +Num Buckets: -1 +Bucket Columns: [] +Sort Columns: [] +Storage Desc Params: + serialization.format 1 +PREHOOK: query: desc formatted stats_part partition(p = 101) +PREHOOK: type: DESCTABLE +PREHOOK: Input: default@stats_part +POSTHOOK: query: desc formatted stats_part partition(p = 101) +POSTHOOK: type: DESCTABLE +POSTHOOK: Input: default@stats_part +# col_name data_type comment +key int +value string + +# Partition Information +# col_name data_type comment +p int + +# Detailed Partition Information +Partition Value: [101] +Database: default +Table: stats_part +#### A masked pattern was here #### +Partition Parameters: + COLUMN_STATS_ACCURATE {\"BASIC_STATS\":\"true\"} + numFiles 3 + numRows 2 + rawDataSize 0 + totalSize 2235 +#### A masked pattern was here #### + +# Storage Information +SerDe Library: org.apache.hadoop.hive.ql.io.orc.OrcSerde +InputFormat: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat +OutputFormat: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat +Compressed: No +Num Buckets: -1 +Bucket Columns: [] +Sort Columns: [] +Storage Desc Params: + serialization.format 1 +PREHOOK: query: desc formatted stats_part partition(p = 102) +PREHOOK: type: DESCTABLE +PREHOOK: Input: default@stats_part +POSTHOOK: query: desc formatted stats_part partition(p = 102) +POSTHOOK: type: DESCTABLE +POSTHOOK: Input: default@stats_part +# col_name data_type comment +key int +value string + +# Partition Information +# col_name data_type comment +p int + +# Detailed Partition Information +Partition Value: [102] +Database: default +Table: stats_part +#### A masked pattern was here #### +Partition Parameters: + COLUMN_STATS_ACCURATE {\"BASIC_STATS\":\"true\",\"COLUMN_STATS\":{\"key\":\"true\",\"value\":\"true\"}} + numFiles 2 + numRows 4 + rawDataSize 0 + totalSize 1579 +#### A masked pattern was here #### + +# Storage Information +SerDe Library: org.apache.hadoop.hive.ql.io.orc.OrcSerde +InputFormat: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat +OutputFormat: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat +Compressed: No +Num Buckets: -1 +Bucket Columns: [] +Sort Columns: [] +Storage Desc Params: + serialization.format 1 +PREHOOK: query: select count(value) from stats_part +PREHOOK: type: QUERY +PREHOOK: Input: default@stats_part +PREHOOK: Input: default@stats_part@p=100 +PREHOOK: Input: default@stats_part@p=101 +PREHOOK: Input: default@stats_part@p=102 +#### A masked pattern was here #### +POSTHOOK: query: select count(value) from stats_part +POSTHOOK: type: QUERY +POSTHOOK: Input: default@stats_part +POSTHOOK: Input: default@stats_part@p=100 +POSTHOOK: Input: default@stats_part@p=101 +POSTHOOK: Input: default@stats_part@p=102 +#### A masked pattern was here #### +8 +PREHOOK: query: update stats_part set value = concat(value, 'updated') where cast(key as integer) in(40,53) and p > 100 +PREHOOK: type: QUERY +PREHOOK: Input: default@stats_part +PREHOOK: Input: default@stats_part@p=101 +PREHOOK: Input: default@stats_part@p=102 +PREHOOK: Output: default@stats_part@p=101 +PREHOOK: Output: default@stats_part@p=102 +POSTHOOK: query: update stats_part set value = concat(value, 'updated') where cast(key as integer) in(40,53) and p > 100 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@stats_part +POSTHOOK: Input: default@stats_part@p=101 +POSTHOOK: Input: default@stats_part@p=102 +POSTHOOK: Output: default@stats_part@p=101 +POSTHOOK: Output: default@stats_part@p=102 +PREHOOK: query: desc formatted stats_part partition(p = 100) +PREHOOK: type: DESCTABLE +PREHOOK: Input: default@stats_part +POSTHOOK: query: desc formatted stats_part partition(p = 100) +POSTHOOK: type: DESCTABLE +POSTHOOK: Input: default@stats_part +# col_name data_type comment +key int +value string + +# Partition Information +# col_name data_type comment +p int + +# Detailed Partition Information +Partition Value: [100] +Database: default +Table: stats_part +#### A masked pattern was here #### +Partition Parameters: + COLUMN_STATS_ACCURATE {\"BASIC_STATS\":\"true\",\"COLUMN_STATS\":{\"key\":\"true\",\"value\":\"true\"}} + numFiles 1 + numRows 2 + rawDataSize 0 + totalSize 756 +#### A masked pattern was here #### + +# Storage Information +SerDe Library: org.apache.hadoop.hive.ql.io.orc.OrcSerde +InputFormat: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat +OutputFormat: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat +Compressed: No +Num Buckets: -1 +Bucket Columns: [] +Sort Columns: [] +Storage Desc Params: + serialization.format 1 +PREHOOK: query: desc formatted stats_part partition(p = 101) +PREHOOK: type: DESCTABLE +PREHOOK: Input: default@stats_part +POSTHOOK: query: desc formatted stats_part partition(p = 101) +POSTHOOK: type: DESCTABLE +POSTHOOK: Input: default@stats_part +# col_name data_type comment +key int +value string + +# Partition Information +# col_name data_type comment +p int + +# Detailed Partition Information +Partition Value: [101] +Database: default +Table: stats_part +#### A masked pattern was here #### +Partition Parameters: + COLUMN_STATS_ACCURATE {\"BASIC_STATS\":\"true\"} + numFiles 3 + numRows 2 + rawDataSize 0 + totalSize 2235 +#### A masked pattern was here #### + +# Storage Information +SerDe Library: org.apache.hadoop.hive.ql.io.orc.OrcSerde +InputFormat: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat +OutputFormat: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat +Compressed: No +Num Buckets: -1 +Bucket Columns: [] +Sort Columns: [] +Storage Desc Params: + serialization.format 1 +PREHOOK: query: desc formatted stats_part partition(p = 102) +PREHOOK: type: DESCTABLE +PREHOOK: Input: default@stats_part +POSTHOOK: query: desc formatted stats_part partition(p = 102) +POSTHOOK: type: DESCTABLE +POSTHOOK: Input: default@stats_part +# col_name data_type comment +key int +value string + +# Partition Information +# col_name data_type comment +p int + +# Detailed Partition Information +Partition Value: [102] +Database: default +Table: stats_part +#### A masked pattern was here #### +Partition Parameters: + COLUMN_STATS_ACCURATE {\"BASIC_STATS\":\"true\",\"COLUMN_STATS\":{\"key\":\"true\",\"value\":\"true\"}} + numFiles 2 + numRows 4 + rawDataSize 0 + totalSize 1579 +#### A masked pattern was here #### + +# Storage Information +SerDe Library: org.apache.hadoop.hive.ql.io.orc.OrcSerde +InputFormat: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat +OutputFormat: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat +Compressed: No +Num Buckets: -1 +Bucket Columns: [] +Sort Columns: [] +Storage Desc Params: + serialization.format 1 +PREHOOK: query: select count(value) from stats_part +PREHOOK: type: QUERY +PREHOOK: Input: default@stats_part +PREHOOK: Input: default@stats_part@p=100 +PREHOOK: Input: default@stats_part@p=101 +PREHOOK: Input: default@stats_part@p=102 +#### A masked pattern was here #### +POSTHOOK: query: select count(value) from stats_part +POSTHOOK: type: QUERY +POSTHOOK: Input: default@stats_part +POSTHOOK: Input: default@stats_part@p=100 +POSTHOOK: Input: default@stats_part@p=101 +POSTHOOK: Input: default@stats_part@p=102 +#### A masked pattern was here #### +8 +PREHOOK: query: delete from stats_part where key in (20, 41) +PREHOOK: type: QUERY +PREHOOK: Input: default@stats_part +PREHOOK: Input: default@stats_part@p=100 +PREHOOK: Input: default@stats_part@p=101 +PREHOOK: Input: default@stats_part@p=102 +PREHOOK: Output: default@stats_part@p=100 +PREHOOK: Output: default@stats_part@p=101 +PREHOOK: Output: default@stats_part@p=102 +POSTHOOK: query: delete from stats_part where key in (20, 41) +POSTHOOK: type: QUERY +POSTHOOK: Input: default@stats_part +POSTHOOK: Input: default@stats_part@p=100 +POSTHOOK: Input: default@stats_part@p=101 +POSTHOOK: Input: default@stats_part@p=102 +POSTHOOK: Output: default@stats_part@p=100 +POSTHOOK: Output: default@stats_part@p=101 +POSTHOOK: Output: default@stats_part@p=102 +PREHOOK: query: desc formatted stats_part partition(p = 100) +PREHOOK: type: DESCTABLE +PREHOOK: Input: default@stats_part +POSTHOOK: query: desc formatted stats_part partition(p = 100) +POSTHOOK: type: DESCTABLE +POSTHOOK: Input: default@stats_part +# col_name data_type comment +key int +value string + +# Partition Information +# col_name data_type comment +p int + +# Detailed Partition Information +Partition Value: [100] +Database: default +Table: stats_part +#### A masked pattern was here #### +Partition Parameters: + COLUMN_STATS_ACCURATE {\"BASIC_STATS\":\"true\"} + numFiles 2 + numRows 1 + rawDataSize 0 + totalSize 1453 +#### A masked pattern was here #### + +# Storage Information +SerDe Library: org.apache.hadoop.hive.ql.io.orc.OrcSerde +InputFormat: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat +OutputFormat: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat +Compressed: No +Num Buckets: -1 +Bucket Columns: [] +Sort Columns: [] +Storage Desc Params: + serialization.format 1 +PREHOOK: query: desc formatted stats_part partition(p = 101) +PREHOOK: type: DESCTABLE +PREHOOK: Input: default@stats_part +POSTHOOK: query: desc formatted stats_part partition(p = 101) +POSTHOOK: type: DESCTABLE +POSTHOOK: Input: default@stats_part +# col_name data_type comment +key int +value string + +# Partition Information +# col_name data_type comment +p int + +# Detailed Partition Information +Partition Value: [101] +Database: default +Table: stats_part +#### A masked pattern was here #### +Partition Parameters: + COLUMN_STATS_ACCURATE {\"BASIC_STATS\":\"true\"} + numFiles 4 + numRows 1 + rawDataSize 0 + totalSize 2929 +#### A masked pattern was here #### + +# Storage Information +SerDe Library: org.apache.hadoop.hive.ql.io.orc.OrcSerde +InputFormat: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat +OutputFormat: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat +Compressed: No +Num Buckets: -1 +Bucket Columns: [] +Sort Columns: [] +Storage Desc Params: + serialization.format 1 +PREHOOK: query: desc formatted stats_part partition(p = 102) +PREHOOK: type: DESCTABLE +PREHOOK: Input: default@stats_part +POSTHOOK: query: desc formatted stats_part partition(p = 102) +POSTHOOK: type: DESCTABLE +POSTHOOK: Input: default@stats_part +# col_name data_type comment +key int +value string + +# Partition Information +# col_name data_type comment +p int + +# Detailed Partition Information +Partition Value: [102] +Database: default +Table: stats_part +#### A masked pattern was here #### +Partition Parameters: + COLUMN_STATS_ACCURATE {\"BASIC_STATS\":\"true\",\"COLUMN_STATS\":{\"key\":\"true\",\"value\":\"true\"}} + numFiles 2 + numRows 4 + rawDataSize 0 + totalSize 1579 +#### A masked pattern was here #### + +# Storage Information +SerDe Library: org.apache.hadoop.hive.ql.io.orc.OrcSerde +InputFormat: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat +OutputFormat: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat +Compressed: No +Num Buckets: -1 +Bucket Columns: [] +Sort Columns: [] +Storage Desc Params: + serialization.format 1 +PREHOOK: query: explain select count(*) from stats_part where p = 100 +PREHOOK: type: QUERY +POSTHOOK: query: explain select count(*) from stats_part where p = 100 +POSTHOOK: type: QUERY +STAGE DEPENDENCIES: + Stage-0 is a root stage + +STAGE PLANS: + Stage: Stage-0 + Fetch Operator + limit: 1 + Processor Tree: + ListSink + +PREHOOK: query: select count(*) from stats_part where p = 100 +PREHOOK: type: QUERY +PREHOOK: Input: default@stats_part +#### A masked pattern was here #### +POSTHOOK: query: select count(*) from stats_part where p = 100 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@stats_part +#### A masked pattern was here #### +1 +PREHOOK: query: explain select count(*) from stats_part where p > 100 +PREHOOK: type: QUERY +POSTHOOK: query: explain select count(*) from stats_part where p > 100 +POSTHOOK: type: QUERY +STAGE DEPENDENCIES: + Stage-0 is a root stage + +STAGE PLANS: + Stage: Stage-0 + Fetch Operator + limit: 1 + Processor Tree: + ListSink + +PREHOOK: query: select count(*) from stats_part where p > 100 +PREHOOK: type: QUERY +PREHOOK: Input: default@stats_part +#### A masked pattern was here #### +POSTHOOK: query: select count(*) from stats_part where p > 100 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@stats_part +#### A masked pattern was here #### +5 +PREHOOK: query: explain select count(key) from stats_part +PREHOOK: type: QUERY +POSTHOOK: query: explain select count(key) from stats_part +POSTHOOK: type: QUERY +STAGE DEPENDENCIES: + Stage-1 is a root stage + Stage-0 depends on stages: Stage-1 + +STAGE PLANS: + Stage: Stage-1 + Map Reduce + Map Operator Tree: + TableScan + alias: stats_part + Statistics: Num rows: 6 Data size: 24 Basic stats: COMPLETE Column stats: COMPLETE + Select Operator + expressions: key (type: int) + outputColumnNames: key + Statistics: Num rows: 6 Data size: 24 Basic stats: COMPLETE Column stats: COMPLETE + Group By Operator + aggregations: count(key) + mode: hash + outputColumnNames: _col0 + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE + Reduce Output Operator + sort order: + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE + value expressions: _col0 (type: bigint) + Execution mode: vectorized + Reduce Operator Tree: + Group By Operator + aggregations: count(VALUE._col0) + mode: mergepartial + outputColumnNames: _col0 + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE + File Output Operator + compressed: false + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE + table: + input format: org.apache.hadoop.mapred.SequenceFileInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + + Stage: Stage-0 + Fetch Operator + limit: -1 + Processor Tree: + ListSink + +PREHOOK: query: select count(key) from stats_part +PREHOOK: type: QUERY +PREHOOK: Input: default@stats_part +PREHOOK: Input: default@stats_part@p=100 +PREHOOK: Input: default@stats_part@p=101 +PREHOOK: Input: default@stats_part@p=102 +#### A masked pattern was here #### +POSTHOOK: query: select count(key) from stats_part +POSTHOOK: type: QUERY +POSTHOOK: Input: default@stats_part +POSTHOOK: Input: default@stats_part@p=100 +POSTHOOK: Input: default@stats_part@p=101 +POSTHOOK: Input: default@stats_part@p=102 +#### A masked pattern was here #### +6 +PREHOOK: query: explain select count(*) from stats_part where p > 100 +PREHOOK: type: QUERY +POSTHOOK: query: explain select count(*) from stats_part where p > 100 +POSTHOOK: type: QUERY +STAGE DEPENDENCIES: + Stage-0 is a root stage + +STAGE PLANS: + Stage: Stage-0 + Fetch Operator + limit: 1 + Processor Tree: + ListSink + +PREHOOK: query: select count(*) from stats_part where p > 100 +PREHOOK: type: QUERY +PREHOOK: Input: default@stats_part +#### A masked pattern was here #### +POSTHOOK: query: select count(*) from stats_part where p > 100 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@stats_part +#### A masked pattern was here #### +5 +PREHOOK: query: explain select max(key) from stats_part where p > 100 +PREHOOK: type: QUERY +POSTHOOK: query: explain select max(key) from stats_part where p > 100 +POSTHOOK: type: QUERY +STAGE DEPENDENCIES: + Stage-1 is a root stage + Stage-0 depends on stages: Stage-1 + +STAGE PLANS: + Stage: Stage-1 + Map Reduce + Map Operator Tree: + TableScan + alias: stats_part + filterExpr: (p > 100) (type: boolean) + Statistics: Num rows: 5 Data size: 20 Basic stats: COMPLETE Column stats: PARTIAL + Select Operator + expressions: key (type: int) + outputColumnNames: key + Statistics: Num rows: 5 Data size: 20 Basic stats: COMPLETE Column stats: PARTIAL + Group By Operator + aggregations: max(key) + mode: hash + outputColumnNames: _col0 + Statistics: Num rows: 1 Data size: 4 Basic stats: COMPLETE Column stats: PARTIAL + Reduce Output Operator + sort order: + Statistics: Num rows: 1 Data size: 4 Basic stats: COMPLETE Column stats: PARTIAL + value expressions: _col0 (type: int) + Execution mode: vectorized + Reduce Operator Tree: + Group By Operator + aggregations: max(VALUE._col0) + mode: mergepartial + outputColumnNames: _col0 + Statistics: Num rows: 1 Data size: 4 Basic stats: COMPLETE Column stats: PARTIAL + File Output Operator + compressed: false + Statistics: Num rows: 1 Data size: 4 Basic stats: COMPLETE Column stats: PARTIAL + table: + input format: org.apache.hadoop.mapred.SequenceFileInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + + Stage: Stage-0 + Fetch Operator + limit: -1 + Processor Tree: + ListSink + +PREHOOK: query: select max(key) from stats_part where p > 100 +PREHOOK: type: QUERY +PREHOOK: Input: default@stats_part +PREHOOK: Input: default@stats_part@p=101 +PREHOOK: Input: default@stats_part@p=102 +#### A masked pattern was here #### +POSTHOOK: query: select max(key) from stats_part where p > 100 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@stats_part +POSTHOOK: Input: default@stats_part@p=101 +POSTHOOK: Input: default@stats_part@p=102 +#### A masked pattern was here #### +140 +PREHOOK: query: describe extended stats_part partition (p=101) +PREHOOK: type: DESCTABLE +PREHOOK: Input: default@stats_part +POSTHOOK: query: describe extended stats_part partition (p=101) +POSTHOOK: type: DESCTABLE +POSTHOOK: Input: default@stats_part +key int +value string +p int + +# Partition Information +# col_name data_type comment +p int + +#### A masked pattern was here #### +PREHOOK: query: describe extended stats_part +PREHOOK: type: DESCTABLE +PREHOOK: Input: default@stats_part +POSTHOOK: query: describe extended stats_part +POSTHOOK: type: DESCTABLE +POSTHOOK: Input: default@stats_part +key int +value string +p int + +# Partition Information +# col_name data_type comment +p int + +#### A masked pattern was here #### diff --git ql/src/test/results/clientpositive/stats_sizebug.q.out ql/src/test/results/clientpositive/stats_sizebug.q.out new file mode 100644 index 0000000000..648a9fa562 --- /dev/null +++ ql/src/test/results/clientpositive/stats_sizebug.q.out @@ -0,0 +1,217 @@ +PREHOOK: query: drop table if exists mysource +PREHOOK: type: DROPTABLE +POSTHOOK: query: drop table if exists mysource +POSTHOOK: type: DROPTABLE +PREHOOK: query: create table mysource (p int,key int) +PREHOOK: type: CREATETABLE +PREHOOK: Output: database:default +PREHOOK: Output: default@mysource +POSTHOOK: query: create table mysource (p int,key int) +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: database:default +POSTHOOK: Output: default@mysource +PREHOOK: query: insert into mysource values (100,20), (101,40), (102,50) +PREHOOK: type: QUERY +PREHOOK: Input: _dummy_database@_dummy_table +PREHOOK: Output: default@mysource +POSTHOOK: query: insert into mysource values (100,20), (101,40), (102,50) +POSTHOOK: type: QUERY +POSTHOOK: Input: _dummy_database@_dummy_table +POSTHOOK: Output: default@mysource +POSTHOOK: Lineage: mysource.key SCRIPT [] +POSTHOOK: Lineage: mysource.p SCRIPT [] +PREHOOK: query: insert into mysource values (100,20), (101,40), (102,50) +PREHOOK: type: QUERY +PREHOOK: Input: _dummy_database@_dummy_table +PREHOOK: Output: default@mysource +POSTHOOK: query: insert into mysource values (100,20), (101,40), (102,50) +POSTHOOK: type: QUERY +POSTHOOK: Input: _dummy_database@_dummy_table +POSTHOOK: Output: default@mysource +POSTHOOK: Lineage: mysource.key SCRIPT [] +POSTHOOK: Lineage: mysource.p SCRIPT [] +PREHOOK: query: drop table if exists stats_nonpartitioned +PREHOOK: type: DROPTABLE +POSTHOOK: query: drop table if exists stats_nonpartitioned +POSTHOOK: type: DROPTABLE +PREHOOK: query: create table stats_nonpartitioned(key int, value int) stored as orc tblproperties ("transactional"="true") +PREHOOK: type: CREATETABLE +PREHOOK: Output: database:default +PREHOOK: Output: default@stats_nonpartitioned +POSTHOOK: query: create table stats_nonpartitioned(key int, value int) stored as orc tblproperties ("transactional"="true") +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: database:default +POSTHOOK: Output: default@stats_nonpartitioned +PREHOOK: query: explain insert into table stats_nonpartitioned select * from mysource where p == 100 +PREHOOK: type: QUERY +POSTHOOK: query: explain insert into table stats_nonpartitioned select * from mysource where p == 100 +POSTHOOK: type: QUERY +STAGE DEPENDENCIES: + Stage-1 is a root stage + Stage-0 depends on stages: Stage-1 + Stage-2 depends on stages: Stage-0 + +STAGE PLANS: + Stage: Stage-1 + Map Reduce + Map Operator Tree: + TableScan + alias: mysource + filterExpr: (p = 100) (type: boolean) + Statistics: Num rows: 6 Data size: 48 Basic stats: COMPLETE Column stats: COMPLETE + Filter Operator + predicate: (p = 100) (type: boolean) + Statistics: Num rows: 2 Data size: 16 Basic stats: COMPLETE Column stats: COMPLETE + Select Operator + expressions: 100 (type: int), key (type: int) + outputColumnNames: _col0, _col1 + Statistics: Num rows: 2 Data size: 16 Basic stats: COMPLETE Column stats: COMPLETE + File Output Operator + compressed: false + Statistics: Num rows: 2 Data size: 16 Basic stats: COMPLETE Column stats: COMPLETE + table: + input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat + output format: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat + serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde + name: default.stats_nonpartitioned + Write Type: INSERT + Select Operator + expressions: _col0 (type: int), _col1 (type: int) + outputColumnNames: key, value + Statistics: Num rows: 2 Data size: 16 Basic stats: COMPLETE Column stats: COMPLETE + Group By Operator + aggregations: compute_stats(key, 'hll'), compute_stats(value, 'hll') + mode: hash + outputColumnNames: _col0, _col1 + Statistics: Num rows: 1 Data size: 848 Basic stats: COMPLETE Column stats: COMPLETE + Reduce Output Operator + sort order: + Statistics: Num rows: 1 Data size: 848 Basic stats: COMPLETE Column stats: COMPLETE + value expressions: _col0 (type: struct), _col1 (type: struct) + Reduce Operator Tree: + Group By Operator + aggregations: compute_stats(VALUE._col0), compute_stats(VALUE._col1) + mode: mergepartial + outputColumnNames: _col0, _col1 + Statistics: Num rows: 1 Data size: 880 Basic stats: COMPLETE Column stats: COMPLETE + File Output Operator + compressed: false + Statistics: Num rows: 1 Data size: 880 Basic stats: COMPLETE Column stats: COMPLETE + table: + input format: org.apache.hadoop.mapred.SequenceFileInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + + Stage: Stage-0 + Move Operator + tables: + replace: false + table: + input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat + output format: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat + serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde + name: default.stats_nonpartitioned + Write Type: INSERT + + Stage: Stage-2 + Stats Work + Basic Stats Work: + Column Stats Desc: + Columns: key, value + Column Types: int, int + Table: default.stats_nonpartitioned + +PREHOOK: query: insert into table stats_nonpartitioned select * from mysource where p == 100 +PREHOOK: type: QUERY +PREHOOK: Input: default@mysource +PREHOOK: Output: default@stats_nonpartitioned +POSTHOOK: query: insert into table stats_nonpartitioned select * from mysource where p == 100 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@mysource +POSTHOOK: Output: default@stats_nonpartitioned +POSTHOOK: Lineage: stats_nonpartitioned.key SIMPLE [] +POSTHOOK: Lineage: stats_nonpartitioned.value SIMPLE [(mysource)mysource.FieldSchema(name:key, type:int, comment:null), ] +PREHOOK: query: desc formatted stats_nonpartitioned +PREHOOK: type: DESCTABLE +PREHOOK: Input: default@stats_nonpartitioned +POSTHOOK: query: desc formatted stats_nonpartitioned +POSTHOOK: type: DESCTABLE +POSTHOOK: Input: default@stats_nonpartitioned +# col_name data_type comment +key int +value int + +# Detailed Table Information +Database: default +#### A masked pattern was here #### +Retention: 0 +#### A masked pattern was here #### +Table Type: MANAGED_TABLE +Table Parameters: + COLUMN_STATS_ACCURATE {\"BASIC_STATS\":\"true\",\"COLUMN_STATS\":{\"key\":\"true\",\"value\":\"true\"}} + bucketing_version 2 + numFiles 1 + numRows 2 + rawDataSize 0 + totalSize 718 + transactional true + transactional_properties default +#### A masked pattern was here #### + +# Storage Information +SerDe Library: org.apache.hadoop.hive.ql.io.orc.OrcSerde +InputFormat: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat +OutputFormat: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat +Compressed: No +Num Buckets: -1 +Bucket Columns: [] +Sort Columns: [] +Storage Desc Params: + serialization.format 1 +PREHOOK: query: analyze table mysource compute statistics for columns p, key +PREHOOK: type: ANALYZE_TABLE +PREHOOK: Input: default@mysource +PREHOOK: Output: default@mysource +#### A masked pattern was here #### +POSTHOOK: query: analyze table mysource compute statistics for columns p, key +POSTHOOK: type: ANALYZE_TABLE +POSTHOOK: Input: default@mysource +POSTHOOK: Output: default@mysource +#### A masked pattern was here #### +PREHOOK: query: desc formatted stats_nonpartitioned +PREHOOK: type: DESCTABLE +PREHOOK: Input: default@stats_nonpartitioned +POSTHOOK: query: desc formatted stats_nonpartitioned +POSTHOOK: type: DESCTABLE +POSTHOOK: Input: default@stats_nonpartitioned +# col_name data_type comment +key int +value int + +# Detailed Table Information +Database: default +#### A masked pattern was here #### +Retention: 0 +#### A masked pattern was here #### +Table Type: MANAGED_TABLE +Table Parameters: + COLUMN_STATS_ACCURATE {\"BASIC_STATS\":\"true\",\"COLUMN_STATS\":{\"key\":\"true\",\"value\":\"true\"}} + bucketing_version 2 + numFiles 1 + numRows 2 + rawDataSize 0 + totalSize 718 + transactional true + transactional_properties default +#### A masked pattern was here #### + +# Storage Information +SerDe Library: org.apache.hadoop.hive.ql.io.orc.OrcSerde +InputFormat: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat +OutputFormat: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat +Compressed: No +Num Buckets: -1 +Bucket Columns: [] +Sort Columns: [] +Storage Desc Params: + serialization.format 1 diff --git ql/src/test/results/clientpositive/tez/explainanalyze_5.q.out ql/src/test/results/clientpositive/tez/explainanalyze_5.q.out index 7093fc676b..5a50431d26 100644 --- ql/src/test/results/clientpositive/tez/explainanalyze_5.q.out +++ ql/src/test/results/clientpositive/tez/explainanalyze_5.q.out @@ -295,17 +295,17 @@ Stage-3 Reducer 2 File Output Operator [FS_8] table:{"name:":"default.acid_uami_n2"} - Select Operator [SEL_4] (rows=4/2 width=328) + Select Operator [SEL_4] (rows=2/2 width=302) Output:["_col0","_col1","_col2","_col3"] <-Map 1 [SIMPLE_EDGE] SHUFFLE [RS_3] PartitionCols:UDFToInteger(_col0) - Select Operator [SEL_2] (rows=4/2 width=328) + Select Operator [SEL_2] (rows=2/2 width=302) Output:["_col0","_col1","_col3"] - Filter Operator [FIL_9] (rows=4/2 width=328) + Filter Operator [FIL_9] (rows=2/2 width=226) predicate:((de = 109.23) or (de = 119.23)) - TableScan [TS_0] (rows=4/4 width=328) - default@acid_uami_n2,acid_uami_n2, ACID table,Tbl:COMPLETE,Col:NONE,Output:["i","de","vc"] + TableScan [TS_0] (rows=4/4 width=226) + default@acid_uami_n2,acid_uami_n2, ACID table,Tbl:COMPLETE,Col:COMPLETE,Output:["i","de","vc"] PREHOOK: query: select * from acid_uami_n2 order by de PREHOOK: type: QUERY diff --git standalone-metastore/pom.xml standalone-metastore/pom.xml index 67d8fb41d1..7369ebbaa2 100644 --- standalone-metastore/pom.xml +++ standalone-metastore/pom.xml @@ -30,7 +30,7 @@ Hive Standalone Metastore - 3.1.0 + 4.0.0 UTF-8 diff --git standalone-metastore/src/gen/thrift/gen-cpp/ThriftHiveMetastore.cpp standalone-metastore/src/gen/thrift/gen-cpp/ThriftHiveMetastore.cpp index a816ae7566..644e75df6e 100644 --- standalone-metastore/src/gen/thrift/gen-cpp/ThriftHiveMetastore.cpp +++ standalone-metastore/src/gen/thrift/gen-cpp/ThriftHiveMetastore.cpp @@ -2334,14 +2334,14 @@ uint32_t ThriftHiveMetastore_get_databases_result::read(::apache::thrift::protoc if (ftype == ::apache::thrift::protocol::T_LIST) { { this->success.clear(); - uint32_t _size1226; - ::apache::thrift::protocol::TType _etype1229; - xfer += iprot->readListBegin(_etype1229, _size1226); - this->success.resize(_size1226); - uint32_t _i1230; - for (_i1230 = 0; _i1230 < _size1226; ++_i1230) + uint32_t _size1236; + ::apache::thrift::protocol::TType _etype1239; + xfer += iprot->readListBegin(_etype1239, _size1236); + this->success.resize(_size1236); + uint32_t _i1240; + for (_i1240 = 0; _i1240 < _size1236; ++_i1240) { - xfer += iprot->readString(this->success[_i1230]); + xfer += iprot->readString(this->success[_i1240]); } xfer += iprot->readListEnd(); } @@ -2380,10 +2380,10 @@ uint32_t ThriftHiveMetastore_get_databases_result::write(::apache::thrift::proto xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_LIST, 0); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast(this->success.size())); - std::vector ::const_iterator _iter1231; - for (_iter1231 = this->success.begin(); _iter1231 != this->success.end(); ++_iter1231) + std::vector ::const_iterator _iter1241; + for (_iter1241 = this->success.begin(); _iter1241 != this->success.end(); ++_iter1241) { - xfer += oprot->writeString((*_iter1231)); + xfer += oprot->writeString((*_iter1241)); } xfer += oprot->writeListEnd(); } @@ -2428,14 +2428,14 @@ uint32_t ThriftHiveMetastore_get_databases_presult::read(::apache::thrift::proto if (ftype == ::apache::thrift::protocol::T_LIST) { { (*(this->success)).clear(); - uint32_t _size1232; - ::apache::thrift::protocol::TType _etype1235; - xfer += iprot->readListBegin(_etype1235, _size1232); - (*(this->success)).resize(_size1232); - uint32_t _i1236; - for (_i1236 = 0; _i1236 < _size1232; ++_i1236) + uint32_t _size1242; + ::apache::thrift::protocol::TType _etype1245; + xfer += iprot->readListBegin(_etype1245, _size1242); + (*(this->success)).resize(_size1242); + uint32_t _i1246; + for (_i1246 = 0; _i1246 < _size1242; ++_i1246) { - xfer += iprot->readString((*(this->success))[_i1236]); + xfer += iprot->readString((*(this->success))[_i1246]); } xfer += iprot->readListEnd(); } @@ -2552,14 +2552,14 @@ uint32_t ThriftHiveMetastore_get_all_databases_result::read(::apache::thrift::pr if (ftype == ::apache::thrift::protocol::T_LIST) { { this->success.clear(); - uint32_t _size1237; - ::apache::thrift::protocol::TType _etype1240; - xfer += iprot->readListBegin(_etype1240, _size1237); - this->success.resize(_size1237); - uint32_t _i1241; - for (_i1241 = 0; _i1241 < _size1237; ++_i1241) + uint32_t _size1247; + ::apache::thrift::protocol::TType _etype1250; + xfer += iprot->readListBegin(_etype1250, _size1247); + this->success.resize(_size1247); + uint32_t _i1251; + for (_i1251 = 0; _i1251 < _size1247; ++_i1251) { - xfer += iprot->readString(this->success[_i1241]); + xfer += iprot->readString(this->success[_i1251]); } xfer += iprot->readListEnd(); } @@ -2598,10 +2598,10 @@ uint32_t ThriftHiveMetastore_get_all_databases_result::write(::apache::thrift::p xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_LIST, 0); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast(this->success.size())); - std::vector ::const_iterator _iter1242; - for (_iter1242 = this->success.begin(); _iter1242 != this->success.end(); ++_iter1242) + std::vector ::const_iterator _iter1252; + for (_iter1252 = this->success.begin(); _iter1252 != this->success.end(); ++_iter1252) { - xfer += oprot->writeString((*_iter1242)); + xfer += oprot->writeString((*_iter1252)); } xfer += oprot->writeListEnd(); } @@ -2646,14 +2646,14 @@ uint32_t ThriftHiveMetastore_get_all_databases_presult::read(::apache::thrift::p if (ftype == ::apache::thrift::protocol::T_LIST) { { (*(this->success)).clear(); - uint32_t _size1243; - ::apache::thrift::protocol::TType _etype1246; - xfer += iprot->readListBegin(_etype1246, _size1243); - (*(this->success)).resize(_size1243); - uint32_t _i1247; - for (_i1247 = 0; _i1247 < _size1243; ++_i1247) + uint32_t _size1253; + ::apache::thrift::protocol::TType _etype1256; + xfer += iprot->readListBegin(_etype1256, _size1253); + (*(this->success)).resize(_size1253); + uint32_t _i1257; + for (_i1257 = 0; _i1257 < _size1253; ++_i1257) { - xfer += iprot->readString((*(this->success))[_i1247]); + xfer += iprot->readString((*(this->success))[_i1257]); } xfer += iprot->readListEnd(); } @@ -3715,17 +3715,17 @@ uint32_t ThriftHiveMetastore_get_type_all_result::read(::apache::thrift::protoco if (ftype == ::apache::thrift::protocol::T_MAP) { { this->success.clear(); - uint32_t _size1248; - ::apache::thrift::protocol::TType _ktype1249; - ::apache::thrift::protocol::TType _vtype1250; - xfer += iprot->readMapBegin(_ktype1249, _vtype1250, _size1248); - uint32_t _i1252; - for (_i1252 = 0; _i1252 < _size1248; ++_i1252) + uint32_t _size1258; + ::apache::thrift::protocol::TType _ktype1259; + ::apache::thrift::protocol::TType _vtype1260; + xfer += iprot->readMapBegin(_ktype1259, _vtype1260, _size1258); + uint32_t _i1262; + for (_i1262 = 0; _i1262 < _size1258; ++_i1262) { - std::string _key1253; - xfer += iprot->readString(_key1253); - Type& _val1254 = this->success[_key1253]; - xfer += _val1254.read(iprot); + std::string _key1263; + xfer += iprot->readString(_key1263); + Type& _val1264 = this->success[_key1263]; + xfer += _val1264.read(iprot); } xfer += iprot->readMapEnd(); } @@ -3764,11 +3764,11 @@ uint32_t ThriftHiveMetastore_get_type_all_result::write(::apache::thrift::protoc xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_MAP, 0); { xfer += oprot->writeMapBegin(::apache::thrift::protocol::T_STRING, ::apache::thrift::protocol::T_STRUCT, static_cast(this->success.size())); - std::map ::const_iterator _iter1255; - for (_iter1255 = this->success.begin(); _iter1255 != this->success.end(); ++_iter1255) + std::map ::const_iterator _iter1265; + for (_iter1265 = this->success.begin(); _iter1265 != this->success.end(); ++_iter1265) { - xfer += oprot->writeString(_iter1255->first); - xfer += _iter1255->second.write(oprot); + xfer += oprot->writeString(_iter1265->first); + xfer += _iter1265->second.write(oprot); } xfer += oprot->writeMapEnd(); } @@ -3813,17 +3813,17 @@ uint32_t ThriftHiveMetastore_get_type_all_presult::read(::apache::thrift::protoc if (ftype == ::apache::thrift::protocol::T_MAP) { { (*(this->success)).clear(); - uint32_t _size1256; - ::apache::thrift::protocol::TType _ktype1257; - ::apache::thrift::protocol::TType _vtype1258; - xfer += iprot->readMapBegin(_ktype1257, _vtype1258, _size1256); - uint32_t _i1260; - for (_i1260 = 0; _i1260 < _size1256; ++_i1260) + uint32_t _size1266; + ::apache::thrift::protocol::TType _ktype1267; + ::apache::thrift::protocol::TType _vtype1268; + xfer += iprot->readMapBegin(_ktype1267, _vtype1268, _size1266); + uint32_t _i1270; + for (_i1270 = 0; _i1270 < _size1266; ++_i1270) { - std::string _key1261; - xfer += iprot->readString(_key1261); - Type& _val1262 = (*(this->success))[_key1261]; - xfer += _val1262.read(iprot); + std::string _key1271; + xfer += iprot->readString(_key1271); + Type& _val1272 = (*(this->success))[_key1271]; + xfer += _val1272.read(iprot); } xfer += iprot->readMapEnd(); } @@ -3977,14 +3977,14 @@ uint32_t ThriftHiveMetastore_get_fields_result::read(::apache::thrift::protocol: if (ftype == ::apache::thrift::protocol::T_LIST) { { this->success.clear(); - uint32_t _size1263; - ::apache::thrift::protocol::TType _etype1266; - xfer += iprot->readListBegin(_etype1266, _size1263); - this->success.resize(_size1263); - uint32_t _i1267; - for (_i1267 = 0; _i1267 < _size1263; ++_i1267) + uint32_t _size1273; + ::apache::thrift::protocol::TType _etype1276; + xfer += iprot->readListBegin(_etype1276, _size1273); + this->success.resize(_size1273); + uint32_t _i1277; + for (_i1277 = 0; _i1277 < _size1273; ++_i1277) { - xfer += this->success[_i1267].read(iprot); + xfer += this->success[_i1277].read(iprot); } xfer += iprot->readListEnd(); } @@ -4039,10 +4039,10 @@ uint32_t ThriftHiveMetastore_get_fields_result::write(::apache::thrift::protocol xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_LIST, 0); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast(this->success.size())); - std::vector ::const_iterator _iter1268; - for (_iter1268 = this->success.begin(); _iter1268 != this->success.end(); ++_iter1268) + std::vector ::const_iterator _iter1278; + for (_iter1278 = this->success.begin(); _iter1278 != this->success.end(); ++_iter1278) { - xfer += (*_iter1268).write(oprot); + xfer += (*_iter1278).write(oprot); } xfer += oprot->writeListEnd(); } @@ -4095,14 +4095,14 @@ uint32_t ThriftHiveMetastore_get_fields_presult::read(::apache::thrift::protocol if (ftype == ::apache::thrift::protocol::T_LIST) { { (*(this->success)).clear(); - uint32_t _size1269; - ::apache::thrift::protocol::TType _etype1272; - xfer += iprot->readListBegin(_etype1272, _size1269); - (*(this->success)).resize(_size1269); - uint32_t _i1273; - for (_i1273 = 0; _i1273 < _size1269; ++_i1273) + uint32_t _size1279; + ::apache::thrift::protocol::TType _etype1282; + xfer += iprot->readListBegin(_etype1282, _size1279); + (*(this->success)).resize(_size1279); + uint32_t _i1283; + for (_i1283 = 0; _i1283 < _size1279; ++_i1283) { - xfer += (*(this->success))[_i1273].read(iprot); + xfer += (*(this->success))[_i1283].read(iprot); } xfer += iprot->readListEnd(); } @@ -4288,14 +4288,14 @@ uint32_t ThriftHiveMetastore_get_fields_with_environment_context_result::read(:: if (ftype == ::apache::thrift::protocol::T_LIST) { { this->success.clear(); - uint32_t _size1274; - ::apache::thrift::protocol::TType _etype1277; - xfer += iprot->readListBegin(_etype1277, _size1274); - this->success.resize(_size1274); - uint32_t _i1278; - for (_i1278 = 0; _i1278 < _size1274; ++_i1278) + uint32_t _size1284; + ::apache::thrift::protocol::TType _etype1287; + xfer += iprot->readListBegin(_etype1287, _size1284); + this->success.resize(_size1284); + uint32_t _i1288; + for (_i1288 = 0; _i1288 < _size1284; ++_i1288) { - xfer += this->success[_i1278].read(iprot); + xfer += this->success[_i1288].read(iprot); } xfer += iprot->readListEnd(); } @@ -4350,10 +4350,10 @@ uint32_t ThriftHiveMetastore_get_fields_with_environment_context_result::write(: xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_LIST, 0); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast(this->success.size())); - std::vector ::const_iterator _iter1279; - for (_iter1279 = this->success.begin(); _iter1279 != this->success.end(); ++_iter1279) + std::vector ::const_iterator _iter1289; + for (_iter1289 = this->success.begin(); _iter1289 != this->success.end(); ++_iter1289) { - xfer += (*_iter1279).write(oprot); + xfer += (*_iter1289).write(oprot); } xfer += oprot->writeListEnd(); } @@ -4406,14 +4406,14 @@ uint32_t ThriftHiveMetastore_get_fields_with_environment_context_presult::read(: if (ftype == ::apache::thrift::protocol::T_LIST) { { (*(this->success)).clear(); - uint32_t _size1280; - ::apache::thrift::protocol::TType _etype1283; - xfer += iprot->readListBegin(_etype1283, _size1280); - (*(this->success)).resize(_size1280); - uint32_t _i1284; - for (_i1284 = 0; _i1284 < _size1280; ++_i1284) + uint32_t _size1290; + ::apache::thrift::protocol::TType _etype1293; + xfer += iprot->readListBegin(_etype1293, _size1290); + (*(this->success)).resize(_size1290); + uint32_t _i1294; + for (_i1294 = 0; _i1294 < _size1290; ++_i1294) { - xfer += (*(this->success))[_i1284].read(iprot); + xfer += (*(this->success))[_i1294].read(iprot); } xfer += iprot->readListEnd(); } @@ -4583,14 +4583,14 @@ uint32_t ThriftHiveMetastore_get_schema_result::read(::apache::thrift::protocol: if (ftype == ::apache::thrift::protocol::T_LIST) { { this->success.clear(); - uint32_t _size1285; - ::apache::thrift::protocol::TType _etype1288; - xfer += iprot->readListBegin(_etype1288, _size1285); - this->success.resize(_size1285); - uint32_t _i1289; - for (_i1289 = 0; _i1289 < _size1285; ++_i1289) + uint32_t _size1295; + ::apache::thrift::protocol::TType _etype1298; + xfer += iprot->readListBegin(_etype1298, _size1295); + this->success.resize(_size1295); + uint32_t _i1299; + for (_i1299 = 0; _i1299 < _size1295; ++_i1299) { - xfer += this->success[_i1289].read(iprot); + xfer += this->success[_i1299].read(iprot); } xfer += iprot->readListEnd(); } @@ -4645,10 +4645,10 @@ uint32_t ThriftHiveMetastore_get_schema_result::write(::apache::thrift::protocol xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_LIST, 0); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast(this->success.size())); - std::vector ::const_iterator _iter1290; - for (_iter1290 = this->success.begin(); _iter1290 != this->success.end(); ++_iter1290) + std::vector ::const_iterator _iter1300; + for (_iter1300 = this->success.begin(); _iter1300 != this->success.end(); ++_iter1300) { - xfer += (*_iter1290).write(oprot); + xfer += (*_iter1300).write(oprot); } xfer += oprot->writeListEnd(); } @@ -4701,14 +4701,14 @@ uint32_t ThriftHiveMetastore_get_schema_presult::read(::apache::thrift::protocol if (ftype == ::apache::thrift::protocol::T_LIST) { { (*(this->success)).clear(); - uint32_t _size1291; - ::apache::thrift::protocol::TType _etype1294; - xfer += iprot->readListBegin(_etype1294, _size1291); - (*(this->success)).resize(_size1291); - uint32_t _i1295; - for (_i1295 = 0; _i1295 < _size1291; ++_i1295) + uint32_t _size1301; + ::apache::thrift::protocol::TType _etype1304; + xfer += iprot->readListBegin(_etype1304, _size1301); + (*(this->success)).resize(_size1301); + uint32_t _i1305; + for (_i1305 = 0; _i1305 < _size1301; ++_i1305) { - xfer += (*(this->success))[_i1295].read(iprot); + xfer += (*(this->success))[_i1305].read(iprot); } xfer += iprot->readListEnd(); } @@ -4894,14 +4894,14 @@ uint32_t ThriftHiveMetastore_get_schema_with_environment_context_result::read(:: if (ftype == ::apache::thrift::protocol::T_LIST) { { this->success.clear(); - uint32_t _size1296; - ::apache::thrift::protocol::TType _etype1299; - xfer += iprot->readListBegin(_etype1299, _size1296); - this->success.resize(_size1296); - uint32_t _i1300; - for (_i1300 = 0; _i1300 < _size1296; ++_i1300) + uint32_t _size1306; + ::apache::thrift::protocol::TType _etype1309; + xfer += iprot->readListBegin(_etype1309, _size1306); + this->success.resize(_size1306); + uint32_t _i1310; + for (_i1310 = 0; _i1310 < _size1306; ++_i1310) { - xfer += this->success[_i1300].read(iprot); + xfer += this->success[_i1310].read(iprot); } xfer += iprot->readListEnd(); } @@ -4956,10 +4956,10 @@ uint32_t ThriftHiveMetastore_get_schema_with_environment_context_result::write(: xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_LIST, 0); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast(this->success.size())); - std::vector ::const_iterator _iter1301; - for (_iter1301 = this->success.begin(); _iter1301 != this->success.end(); ++_iter1301) + std::vector ::const_iterator _iter1311; + for (_iter1311 = this->success.begin(); _iter1311 != this->success.end(); ++_iter1311) { - xfer += (*_iter1301).write(oprot); + xfer += (*_iter1311).write(oprot); } xfer += oprot->writeListEnd(); } @@ -5012,14 +5012,14 @@ uint32_t ThriftHiveMetastore_get_schema_with_environment_context_presult::read(: if (ftype == ::apache::thrift::protocol::T_LIST) { { (*(this->success)).clear(); - uint32_t _size1302; - ::apache::thrift::protocol::TType _etype1305; - xfer += iprot->readListBegin(_etype1305, _size1302); - (*(this->success)).resize(_size1302); - uint32_t _i1306; - for (_i1306 = 0; _i1306 < _size1302; ++_i1306) + uint32_t _size1312; + ::apache::thrift::protocol::TType _etype1315; + xfer += iprot->readListBegin(_etype1315, _size1312); + (*(this->success)).resize(_size1312); + uint32_t _i1316; + for (_i1316 = 0; _i1316 < _size1312; ++_i1316) { - xfer += (*(this->success))[_i1306].read(iprot); + xfer += (*(this->success))[_i1316].read(iprot); } xfer += iprot->readListEnd(); } @@ -5612,14 +5612,14 @@ uint32_t ThriftHiveMetastore_create_table_with_constraints_args::read(::apache:: if (ftype == ::apache::thrift::protocol::T_LIST) { { this->primaryKeys.clear(); - uint32_t _size1307; - ::apache::thrift::protocol::TType _etype1310; - xfer += iprot->readListBegin(_etype1310, _size1307); - this->primaryKeys.resize(_size1307); - uint32_t _i1311; - for (_i1311 = 0; _i1311 < _size1307; ++_i1311) + uint32_t _size1317; + ::apache::thrift::protocol::TType _etype1320; + xfer += iprot->readListBegin(_etype1320, _size1317); + this->primaryKeys.resize(_size1317); + uint32_t _i1321; + for (_i1321 = 0; _i1321 < _size1317; ++_i1321) { - xfer += this->primaryKeys[_i1311].read(iprot); + xfer += this->primaryKeys[_i1321].read(iprot); } xfer += iprot->readListEnd(); } @@ -5632,14 +5632,14 @@ uint32_t ThriftHiveMetastore_create_table_with_constraints_args::read(::apache:: if (ftype == ::apache::thrift::protocol::T_LIST) { { this->foreignKeys.clear(); - uint32_t _size1312; - ::apache::thrift::protocol::TType _etype1315; - xfer += iprot->readListBegin(_etype1315, _size1312); - this->foreignKeys.resize(_size1312); - uint32_t _i1316; - for (_i1316 = 0; _i1316 < _size1312; ++_i1316) + uint32_t _size1322; + ::apache::thrift::protocol::TType _etype1325; + xfer += iprot->readListBegin(_etype1325, _size1322); + this->foreignKeys.resize(_size1322); + uint32_t _i1326; + for (_i1326 = 0; _i1326 < _size1322; ++_i1326) { - xfer += this->foreignKeys[_i1316].read(iprot); + xfer += this->foreignKeys[_i1326].read(iprot); } xfer += iprot->readListEnd(); } @@ -5652,14 +5652,14 @@ uint32_t ThriftHiveMetastore_create_table_with_constraints_args::read(::apache:: if (ftype == ::apache::thrift::protocol::T_LIST) { { this->uniqueConstraints.clear(); - uint32_t _size1317; - ::apache::thrift::protocol::TType _etype1320; - xfer += iprot->readListBegin(_etype1320, _size1317); - this->uniqueConstraints.resize(_size1317); - uint32_t _i1321; - for (_i1321 = 0; _i1321 < _size1317; ++_i1321) + uint32_t _size1327; + ::apache::thrift::protocol::TType _etype1330; + xfer += iprot->readListBegin(_etype1330, _size1327); + this->uniqueConstraints.resize(_size1327); + uint32_t _i1331; + for (_i1331 = 0; _i1331 < _size1327; ++_i1331) { - xfer += this->uniqueConstraints[_i1321].read(iprot); + xfer += this->uniqueConstraints[_i1331].read(iprot); } xfer += iprot->readListEnd(); } @@ -5672,14 +5672,14 @@ uint32_t ThriftHiveMetastore_create_table_with_constraints_args::read(::apache:: if (ftype == ::apache::thrift::protocol::T_LIST) { { this->notNullConstraints.clear(); - uint32_t _size1322; - ::apache::thrift::protocol::TType _etype1325; - xfer += iprot->readListBegin(_etype1325, _size1322); - this->notNullConstraints.resize(_size1322); - uint32_t _i1326; - for (_i1326 = 0; _i1326 < _size1322; ++_i1326) + uint32_t _size1332; + ::apache::thrift::protocol::TType _etype1335; + xfer += iprot->readListBegin(_etype1335, _size1332); + this->notNullConstraints.resize(_size1332); + uint32_t _i1336; + for (_i1336 = 0; _i1336 < _size1332; ++_i1336) { - xfer += this->notNullConstraints[_i1326].read(iprot); + xfer += this->notNullConstraints[_i1336].read(iprot); } xfer += iprot->readListEnd(); } @@ -5692,14 +5692,14 @@ uint32_t ThriftHiveMetastore_create_table_with_constraints_args::read(::apache:: if (ftype == ::apache::thrift::protocol::T_LIST) { { this->defaultConstraints.clear(); - uint32_t _size1327; - ::apache::thrift::protocol::TType _etype1330; - xfer += iprot->readListBegin(_etype1330, _size1327); - this->defaultConstraints.resize(_size1327); - uint32_t _i1331; - for (_i1331 = 0; _i1331 < _size1327; ++_i1331) + uint32_t _size1337; + ::apache::thrift::protocol::TType _etype1340; + xfer += iprot->readListBegin(_etype1340, _size1337); + this->defaultConstraints.resize(_size1337); + uint32_t _i1341; + for (_i1341 = 0; _i1341 < _size1337; ++_i1341) { - xfer += this->defaultConstraints[_i1331].read(iprot); + xfer += this->defaultConstraints[_i1341].read(iprot); } xfer += iprot->readListEnd(); } @@ -5712,14 +5712,14 @@ uint32_t ThriftHiveMetastore_create_table_with_constraints_args::read(::apache:: if (ftype == ::apache::thrift::protocol::T_LIST) { { this->checkConstraints.clear(); - uint32_t _size1332; - ::apache::thrift::protocol::TType _etype1335; - xfer += iprot->readListBegin(_etype1335, _size1332); - this->checkConstraints.resize(_size1332); - uint32_t _i1336; - for (_i1336 = 0; _i1336 < _size1332; ++_i1336) + uint32_t _size1342; + ::apache::thrift::protocol::TType _etype1345; + xfer += iprot->readListBegin(_etype1345, _size1342); + this->checkConstraints.resize(_size1342); + uint32_t _i1346; + for (_i1346 = 0; _i1346 < _size1342; ++_i1346) { - xfer += this->checkConstraints[_i1336].read(iprot); + xfer += this->checkConstraints[_i1346].read(iprot); } xfer += iprot->readListEnd(); } @@ -5752,10 +5752,10 @@ uint32_t ThriftHiveMetastore_create_table_with_constraints_args::write(::apache: xfer += oprot->writeFieldBegin("primaryKeys", ::apache::thrift::protocol::T_LIST, 2); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast(this->primaryKeys.size())); - std::vector ::const_iterator _iter1337; - for (_iter1337 = this->primaryKeys.begin(); _iter1337 != this->primaryKeys.end(); ++_iter1337) + std::vector ::const_iterator _iter1347; + for (_iter1347 = this->primaryKeys.begin(); _iter1347 != this->primaryKeys.end(); ++_iter1347) { - xfer += (*_iter1337).write(oprot); + xfer += (*_iter1347).write(oprot); } xfer += oprot->writeListEnd(); } @@ -5764,10 +5764,10 @@ uint32_t ThriftHiveMetastore_create_table_with_constraints_args::write(::apache: xfer += oprot->writeFieldBegin("foreignKeys", ::apache::thrift::protocol::T_LIST, 3); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast(this->foreignKeys.size())); - std::vector ::const_iterator _iter1338; - for (_iter1338 = this->foreignKeys.begin(); _iter1338 != this->foreignKeys.end(); ++_iter1338) + std::vector ::const_iterator _iter1348; + for (_iter1348 = this->foreignKeys.begin(); _iter1348 != this->foreignKeys.end(); ++_iter1348) { - xfer += (*_iter1338).write(oprot); + xfer += (*_iter1348).write(oprot); } xfer += oprot->writeListEnd(); } @@ -5776,10 +5776,10 @@ uint32_t ThriftHiveMetastore_create_table_with_constraints_args::write(::apache: xfer += oprot->writeFieldBegin("uniqueConstraints", ::apache::thrift::protocol::T_LIST, 4); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast(this->uniqueConstraints.size())); - std::vector ::const_iterator _iter1339; - for (_iter1339 = this->uniqueConstraints.begin(); _iter1339 != this->uniqueConstraints.end(); ++_iter1339) + std::vector ::const_iterator _iter1349; + for (_iter1349 = this->uniqueConstraints.begin(); _iter1349 != this->uniqueConstraints.end(); ++_iter1349) { - xfer += (*_iter1339).write(oprot); + xfer += (*_iter1349).write(oprot); } xfer += oprot->writeListEnd(); } @@ -5788,10 +5788,10 @@ uint32_t ThriftHiveMetastore_create_table_with_constraints_args::write(::apache: xfer += oprot->writeFieldBegin("notNullConstraints", ::apache::thrift::protocol::T_LIST, 5); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast(this->notNullConstraints.size())); - std::vector ::const_iterator _iter1340; - for (_iter1340 = this->notNullConstraints.begin(); _iter1340 != this->notNullConstraints.end(); ++_iter1340) + std::vector ::const_iterator _iter1350; + for (_iter1350 = this->notNullConstraints.begin(); _iter1350 != this->notNullConstraints.end(); ++_iter1350) { - xfer += (*_iter1340).write(oprot); + xfer += (*_iter1350).write(oprot); } xfer += oprot->writeListEnd(); } @@ -5800,10 +5800,10 @@ uint32_t ThriftHiveMetastore_create_table_with_constraints_args::write(::apache: xfer += oprot->writeFieldBegin("defaultConstraints", ::apache::thrift::protocol::T_LIST, 6); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast(this->defaultConstraints.size())); - std::vector ::const_iterator _iter1341; - for (_iter1341 = this->defaultConstraints.begin(); _iter1341 != this->defaultConstraints.end(); ++_iter1341) + std::vector ::const_iterator _iter1351; + for (_iter1351 = this->defaultConstraints.begin(); _iter1351 != this->defaultConstraints.end(); ++_iter1351) { - xfer += (*_iter1341).write(oprot); + xfer += (*_iter1351).write(oprot); } xfer += oprot->writeListEnd(); } @@ -5812,10 +5812,10 @@ uint32_t ThriftHiveMetastore_create_table_with_constraints_args::write(::apache: xfer += oprot->writeFieldBegin("checkConstraints", ::apache::thrift::protocol::T_LIST, 7); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast(this->checkConstraints.size())); - std::vector ::const_iterator _iter1342; - for (_iter1342 = this->checkConstraints.begin(); _iter1342 != this->checkConstraints.end(); ++_iter1342) + std::vector ::const_iterator _iter1352; + for (_iter1352 = this->checkConstraints.begin(); _iter1352 != this->checkConstraints.end(); ++_iter1352) { - xfer += (*_iter1342).write(oprot); + xfer += (*_iter1352).write(oprot); } xfer += oprot->writeListEnd(); } @@ -5843,10 +5843,10 @@ uint32_t ThriftHiveMetastore_create_table_with_constraints_pargs::write(::apache xfer += oprot->writeFieldBegin("primaryKeys", ::apache::thrift::protocol::T_LIST, 2); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast((*(this->primaryKeys)).size())); - std::vector ::const_iterator _iter1343; - for (_iter1343 = (*(this->primaryKeys)).begin(); _iter1343 != (*(this->primaryKeys)).end(); ++_iter1343) + std::vector ::const_iterator _iter1353; + for (_iter1353 = (*(this->primaryKeys)).begin(); _iter1353 != (*(this->primaryKeys)).end(); ++_iter1353) { - xfer += (*_iter1343).write(oprot); + xfer += (*_iter1353).write(oprot); } xfer += oprot->writeListEnd(); } @@ -5855,10 +5855,10 @@ uint32_t ThriftHiveMetastore_create_table_with_constraints_pargs::write(::apache xfer += oprot->writeFieldBegin("foreignKeys", ::apache::thrift::protocol::T_LIST, 3); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast((*(this->foreignKeys)).size())); - std::vector ::const_iterator _iter1344; - for (_iter1344 = (*(this->foreignKeys)).begin(); _iter1344 != (*(this->foreignKeys)).end(); ++_iter1344) + std::vector ::const_iterator _iter1354; + for (_iter1354 = (*(this->foreignKeys)).begin(); _iter1354 != (*(this->foreignKeys)).end(); ++_iter1354) { - xfer += (*_iter1344).write(oprot); + xfer += (*_iter1354).write(oprot); } xfer += oprot->writeListEnd(); } @@ -5867,10 +5867,10 @@ uint32_t ThriftHiveMetastore_create_table_with_constraints_pargs::write(::apache xfer += oprot->writeFieldBegin("uniqueConstraints", ::apache::thrift::protocol::T_LIST, 4); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast((*(this->uniqueConstraints)).size())); - std::vector ::const_iterator _iter1345; - for (_iter1345 = (*(this->uniqueConstraints)).begin(); _iter1345 != (*(this->uniqueConstraints)).end(); ++_iter1345) + std::vector ::const_iterator _iter1355; + for (_iter1355 = (*(this->uniqueConstraints)).begin(); _iter1355 != (*(this->uniqueConstraints)).end(); ++_iter1355) { - xfer += (*_iter1345).write(oprot); + xfer += (*_iter1355).write(oprot); } xfer += oprot->writeListEnd(); } @@ -5879,10 +5879,10 @@ uint32_t ThriftHiveMetastore_create_table_with_constraints_pargs::write(::apache xfer += oprot->writeFieldBegin("notNullConstraints", ::apache::thrift::protocol::T_LIST, 5); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast((*(this->notNullConstraints)).size())); - std::vector ::const_iterator _iter1346; - for (_iter1346 = (*(this->notNullConstraints)).begin(); _iter1346 != (*(this->notNullConstraints)).end(); ++_iter1346) + std::vector ::const_iterator _iter1356; + for (_iter1356 = (*(this->notNullConstraints)).begin(); _iter1356 != (*(this->notNullConstraints)).end(); ++_iter1356) { - xfer += (*_iter1346).write(oprot); + xfer += (*_iter1356).write(oprot); } xfer += oprot->writeListEnd(); } @@ -5891,10 +5891,10 @@ uint32_t ThriftHiveMetastore_create_table_with_constraints_pargs::write(::apache xfer += oprot->writeFieldBegin("defaultConstraints", ::apache::thrift::protocol::T_LIST, 6); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast((*(this->defaultConstraints)).size())); - std::vector ::const_iterator _iter1347; - for (_iter1347 = (*(this->defaultConstraints)).begin(); _iter1347 != (*(this->defaultConstraints)).end(); ++_iter1347) + std::vector ::const_iterator _iter1357; + for (_iter1357 = (*(this->defaultConstraints)).begin(); _iter1357 != (*(this->defaultConstraints)).end(); ++_iter1357) { - xfer += (*_iter1347).write(oprot); + xfer += (*_iter1357).write(oprot); } xfer += oprot->writeListEnd(); } @@ -5903,10 +5903,10 @@ uint32_t ThriftHiveMetastore_create_table_with_constraints_pargs::write(::apache xfer += oprot->writeFieldBegin("checkConstraints", ::apache::thrift::protocol::T_LIST, 7); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast((*(this->checkConstraints)).size())); - std::vector ::const_iterator _iter1348; - for (_iter1348 = (*(this->checkConstraints)).begin(); _iter1348 != (*(this->checkConstraints)).end(); ++_iter1348) + std::vector ::const_iterator _iter1358; + for (_iter1358 = (*(this->checkConstraints)).begin(); _iter1358 != (*(this->checkConstraints)).end(); ++_iter1358) { - xfer += (*_iter1348).write(oprot); + xfer += (*_iter1358).write(oprot); } xfer += oprot->writeListEnd(); } @@ -8074,14 +8074,14 @@ uint32_t ThriftHiveMetastore_truncate_table_args::read(::apache::thrift::protoco if (ftype == ::apache::thrift::protocol::T_LIST) { { this->partNames.clear(); - uint32_t _size1349; - ::apache::thrift::protocol::TType _etype1352; - xfer += iprot->readListBegin(_etype1352, _size1349); - this->partNames.resize(_size1349); - uint32_t _i1353; - for (_i1353 = 0; _i1353 < _size1349; ++_i1353) + uint32_t _size1359; + ::apache::thrift::protocol::TType _etype1362; + xfer += iprot->readListBegin(_etype1362, _size1359); + this->partNames.resize(_size1359); + uint32_t _i1363; + for (_i1363 = 0; _i1363 < _size1359; ++_i1363) { - xfer += iprot->readString(this->partNames[_i1353]); + xfer += iprot->readString(this->partNames[_i1363]); } xfer += iprot->readListEnd(); } @@ -8118,10 +8118,10 @@ uint32_t ThriftHiveMetastore_truncate_table_args::write(::apache::thrift::protoc xfer += oprot->writeFieldBegin("partNames", ::apache::thrift::protocol::T_LIST, 3); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast(this->partNames.size())); - std::vector ::const_iterator _iter1354; - for (_iter1354 = this->partNames.begin(); _iter1354 != this->partNames.end(); ++_iter1354) + std::vector ::const_iterator _iter1364; + for (_iter1364 = this->partNames.begin(); _iter1364 != this->partNames.end(); ++_iter1364) { - xfer += oprot->writeString((*_iter1354)); + xfer += oprot->writeString((*_iter1364)); } xfer += oprot->writeListEnd(); } @@ -8153,10 +8153,10 @@ uint32_t ThriftHiveMetastore_truncate_table_pargs::write(::apache::thrift::proto xfer += oprot->writeFieldBegin("partNames", ::apache::thrift::protocol::T_LIST, 3); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast((*(this->partNames)).size())); - std::vector ::const_iterator _iter1355; - for (_iter1355 = (*(this->partNames)).begin(); _iter1355 != (*(this->partNames)).end(); ++_iter1355) + std::vector ::const_iterator _iter1365; + for (_iter1365 = (*(this->partNames)).begin(); _iter1365 != (*(this->partNames)).end(); ++_iter1365) { - xfer += oprot->writeString((*_iter1355)); + xfer += oprot->writeString((*_iter1365)); } xfer += oprot->writeListEnd(); } @@ -8400,14 +8400,14 @@ uint32_t ThriftHiveMetastore_get_tables_result::read(::apache::thrift::protocol: if (ftype == ::apache::thrift::protocol::T_LIST) { { this->success.clear(); - uint32_t _size1356; - ::apache::thrift::protocol::TType _etype1359; - xfer += iprot->readListBegin(_etype1359, _size1356); - this->success.resize(_size1356); - uint32_t _i1360; - for (_i1360 = 0; _i1360 < _size1356; ++_i1360) + uint32_t _size1366; + ::apache::thrift::protocol::TType _etype1369; + xfer += iprot->readListBegin(_etype1369, _size1366); + this->success.resize(_size1366); + uint32_t _i1370; + for (_i1370 = 0; _i1370 < _size1366; ++_i1370) { - xfer += iprot->readString(this->success[_i1360]); + xfer += iprot->readString(this->success[_i1370]); } xfer += iprot->readListEnd(); } @@ -8446,10 +8446,10 @@ uint32_t ThriftHiveMetastore_get_tables_result::write(::apache::thrift::protocol xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_LIST, 0); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast(this->success.size())); - std::vector ::const_iterator _iter1361; - for (_iter1361 = this->success.begin(); _iter1361 != this->success.end(); ++_iter1361) + std::vector ::const_iterator _iter1371; + for (_iter1371 = this->success.begin(); _iter1371 != this->success.end(); ++_iter1371) { - xfer += oprot->writeString((*_iter1361)); + xfer += oprot->writeString((*_iter1371)); } xfer += oprot->writeListEnd(); } @@ -8494,14 +8494,14 @@ uint32_t ThriftHiveMetastore_get_tables_presult::read(::apache::thrift::protocol if (ftype == ::apache::thrift::protocol::T_LIST) { { (*(this->success)).clear(); - uint32_t _size1362; - ::apache::thrift::protocol::TType _etype1365; - xfer += iprot->readListBegin(_etype1365, _size1362); - (*(this->success)).resize(_size1362); - uint32_t _i1366; - for (_i1366 = 0; _i1366 < _size1362; ++_i1366) + uint32_t _size1372; + ::apache::thrift::protocol::TType _etype1375; + xfer += iprot->readListBegin(_etype1375, _size1372); + (*(this->success)).resize(_size1372); + uint32_t _i1376; + for (_i1376 = 0; _i1376 < _size1372; ++_i1376) { - xfer += iprot->readString((*(this->success))[_i1366]); + xfer += iprot->readString((*(this->success))[_i1376]); } xfer += iprot->readListEnd(); } @@ -8671,14 +8671,14 @@ uint32_t ThriftHiveMetastore_get_tables_by_type_result::read(::apache::thrift::p if (ftype == ::apache::thrift::protocol::T_LIST) { { this->success.clear(); - uint32_t _size1367; - ::apache::thrift::protocol::TType _etype1370; - xfer += iprot->readListBegin(_etype1370, _size1367); - this->success.resize(_size1367); - uint32_t _i1371; - for (_i1371 = 0; _i1371 < _size1367; ++_i1371) + uint32_t _size1377; + ::apache::thrift::protocol::TType _etype1380; + xfer += iprot->readListBegin(_etype1380, _size1377); + this->success.resize(_size1377); + uint32_t _i1381; + for (_i1381 = 0; _i1381 < _size1377; ++_i1381) { - xfer += iprot->readString(this->success[_i1371]); + xfer += iprot->readString(this->success[_i1381]); } xfer += iprot->readListEnd(); } @@ -8717,10 +8717,10 @@ uint32_t ThriftHiveMetastore_get_tables_by_type_result::write(::apache::thrift:: xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_LIST, 0); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast(this->success.size())); - std::vector ::const_iterator _iter1372; - for (_iter1372 = this->success.begin(); _iter1372 != this->success.end(); ++_iter1372) + std::vector ::const_iterator _iter1382; + for (_iter1382 = this->success.begin(); _iter1382 != this->success.end(); ++_iter1382) { - xfer += oprot->writeString((*_iter1372)); + xfer += oprot->writeString((*_iter1382)); } xfer += oprot->writeListEnd(); } @@ -8765,14 +8765,14 @@ uint32_t ThriftHiveMetastore_get_tables_by_type_presult::read(::apache::thrift:: if (ftype == ::apache::thrift::protocol::T_LIST) { { (*(this->success)).clear(); - uint32_t _size1373; - ::apache::thrift::protocol::TType _etype1376; - xfer += iprot->readListBegin(_etype1376, _size1373); - (*(this->success)).resize(_size1373); - uint32_t _i1377; - for (_i1377 = 0; _i1377 < _size1373; ++_i1377) + uint32_t _size1383; + ::apache::thrift::protocol::TType _etype1386; + xfer += iprot->readListBegin(_etype1386, _size1383); + (*(this->success)).resize(_size1383); + uint32_t _i1387; + for (_i1387 = 0; _i1387 < _size1383; ++_i1387) { - xfer += iprot->readString((*(this->success))[_i1377]); + xfer += iprot->readString((*(this->success))[_i1387]); } xfer += iprot->readListEnd(); } @@ -8910,14 +8910,14 @@ uint32_t ThriftHiveMetastore_get_materialized_views_for_rewriting_result::read(: if (ftype == ::apache::thrift::protocol::T_LIST) { { this->success.clear(); - uint32_t _size1378; - ::apache::thrift::protocol::TType _etype1381; - xfer += iprot->readListBegin(_etype1381, _size1378); - this->success.resize(_size1378); - uint32_t _i1382; - for (_i1382 = 0; _i1382 < _size1378; ++_i1382) + uint32_t _size1388; + ::apache::thrift::protocol::TType _etype1391; + xfer += iprot->readListBegin(_etype1391, _size1388); + this->success.resize(_size1388); + uint32_t _i1392; + for (_i1392 = 0; _i1392 < _size1388; ++_i1392) { - xfer += iprot->readString(this->success[_i1382]); + xfer += iprot->readString(this->success[_i1392]); } xfer += iprot->readListEnd(); } @@ -8956,10 +8956,10 @@ uint32_t ThriftHiveMetastore_get_materialized_views_for_rewriting_result::write( xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_LIST, 0); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast(this->success.size())); - std::vector ::const_iterator _iter1383; - for (_iter1383 = this->success.begin(); _iter1383 != this->success.end(); ++_iter1383) + std::vector ::const_iterator _iter1393; + for (_iter1393 = this->success.begin(); _iter1393 != this->success.end(); ++_iter1393) { - xfer += oprot->writeString((*_iter1383)); + xfer += oprot->writeString((*_iter1393)); } xfer += oprot->writeListEnd(); } @@ -9004,14 +9004,14 @@ uint32_t ThriftHiveMetastore_get_materialized_views_for_rewriting_presult::read( if (ftype == ::apache::thrift::protocol::T_LIST) { { (*(this->success)).clear(); - uint32_t _size1384; - ::apache::thrift::protocol::TType _etype1387; - xfer += iprot->readListBegin(_etype1387, _size1384); - (*(this->success)).resize(_size1384); - uint32_t _i1388; - for (_i1388 = 0; _i1388 < _size1384; ++_i1388) + uint32_t _size1394; + ::apache::thrift::protocol::TType _etype1397; + xfer += iprot->readListBegin(_etype1397, _size1394); + (*(this->success)).resize(_size1394); + uint32_t _i1398; + for (_i1398 = 0; _i1398 < _size1394; ++_i1398) { - xfer += iprot->readString((*(this->success))[_i1388]); + xfer += iprot->readString((*(this->success))[_i1398]); } xfer += iprot->readListEnd(); } @@ -9086,14 +9086,14 @@ uint32_t ThriftHiveMetastore_get_table_meta_args::read(::apache::thrift::protoco if (ftype == ::apache::thrift::protocol::T_LIST) { { this->tbl_types.clear(); - uint32_t _size1389; - ::apache::thrift::protocol::TType _etype1392; - xfer += iprot->readListBegin(_etype1392, _size1389); - this->tbl_types.resize(_size1389); - uint32_t _i1393; - for (_i1393 = 0; _i1393 < _size1389; ++_i1393) + uint32_t _size1399; + ::apache::thrift::protocol::TType _etype1402; + xfer += iprot->readListBegin(_etype1402, _size1399); + this->tbl_types.resize(_size1399); + uint32_t _i1403; + for (_i1403 = 0; _i1403 < _size1399; ++_i1403) { - xfer += iprot->readString(this->tbl_types[_i1393]); + xfer += iprot->readString(this->tbl_types[_i1403]); } xfer += iprot->readListEnd(); } @@ -9130,10 +9130,10 @@ uint32_t ThriftHiveMetastore_get_table_meta_args::write(::apache::thrift::protoc xfer += oprot->writeFieldBegin("tbl_types", ::apache::thrift::protocol::T_LIST, 3); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast(this->tbl_types.size())); - std::vector ::const_iterator _iter1394; - for (_iter1394 = this->tbl_types.begin(); _iter1394 != this->tbl_types.end(); ++_iter1394) + std::vector ::const_iterator _iter1404; + for (_iter1404 = this->tbl_types.begin(); _iter1404 != this->tbl_types.end(); ++_iter1404) { - xfer += oprot->writeString((*_iter1394)); + xfer += oprot->writeString((*_iter1404)); } xfer += oprot->writeListEnd(); } @@ -9165,10 +9165,10 @@ uint32_t ThriftHiveMetastore_get_table_meta_pargs::write(::apache::thrift::proto xfer += oprot->writeFieldBegin("tbl_types", ::apache::thrift::protocol::T_LIST, 3); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast((*(this->tbl_types)).size())); - std::vector ::const_iterator _iter1395; - for (_iter1395 = (*(this->tbl_types)).begin(); _iter1395 != (*(this->tbl_types)).end(); ++_iter1395) + std::vector ::const_iterator _iter1405; + for (_iter1405 = (*(this->tbl_types)).begin(); _iter1405 != (*(this->tbl_types)).end(); ++_iter1405) { - xfer += oprot->writeString((*_iter1395)); + xfer += oprot->writeString((*_iter1405)); } xfer += oprot->writeListEnd(); } @@ -9209,14 +9209,14 @@ uint32_t ThriftHiveMetastore_get_table_meta_result::read(::apache::thrift::proto if (ftype == ::apache::thrift::protocol::T_LIST) { { this->success.clear(); - uint32_t _size1396; - ::apache::thrift::protocol::TType _etype1399; - xfer += iprot->readListBegin(_etype1399, _size1396); - this->success.resize(_size1396); - uint32_t _i1400; - for (_i1400 = 0; _i1400 < _size1396; ++_i1400) + uint32_t _size1406; + ::apache::thrift::protocol::TType _etype1409; + xfer += iprot->readListBegin(_etype1409, _size1406); + this->success.resize(_size1406); + uint32_t _i1410; + for (_i1410 = 0; _i1410 < _size1406; ++_i1410) { - xfer += this->success[_i1400].read(iprot); + xfer += this->success[_i1410].read(iprot); } xfer += iprot->readListEnd(); } @@ -9255,10 +9255,10 @@ uint32_t ThriftHiveMetastore_get_table_meta_result::write(::apache::thrift::prot xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_LIST, 0); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast(this->success.size())); - std::vector ::const_iterator _iter1401; - for (_iter1401 = this->success.begin(); _iter1401 != this->success.end(); ++_iter1401) + std::vector ::const_iterator _iter1411; + for (_iter1411 = this->success.begin(); _iter1411 != this->success.end(); ++_iter1411) { - xfer += (*_iter1401).write(oprot); + xfer += (*_iter1411).write(oprot); } xfer += oprot->writeListEnd(); } @@ -9303,14 +9303,14 @@ uint32_t ThriftHiveMetastore_get_table_meta_presult::read(::apache::thrift::prot if (ftype == ::apache::thrift::protocol::T_LIST) { { (*(this->success)).clear(); - uint32_t _size1402; - ::apache::thrift::protocol::TType _etype1405; - xfer += iprot->readListBegin(_etype1405, _size1402); - (*(this->success)).resize(_size1402); - uint32_t _i1406; - for (_i1406 = 0; _i1406 < _size1402; ++_i1406) + uint32_t _size1412; + ::apache::thrift::protocol::TType _etype1415; + xfer += iprot->readListBegin(_etype1415, _size1412); + (*(this->success)).resize(_size1412); + uint32_t _i1416; + for (_i1416 = 0; _i1416 < _size1412; ++_i1416) { - xfer += (*(this->success))[_i1406].read(iprot); + xfer += (*(this->success))[_i1416].read(iprot); } xfer += iprot->readListEnd(); } @@ -9448,14 +9448,14 @@ uint32_t ThriftHiveMetastore_get_all_tables_result::read(::apache::thrift::proto if (ftype == ::apache::thrift::protocol::T_LIST) { { this->success.clear(); - uint32_t _size1407; - ::apache::thrift::protocol::TType _etype1410; - xfer += iprot->readListBegin(_etype1410, _size1407); - this->success.resize(_size1407); - uint32_t _i1411; - for (_i1411 = 0; _i1411 < _size1407; ++_i1411) + uint32_t _size1417; + ::apache::thrift::protocol::TType _etype1420; + xfer += iprot->readListBegin(_etype1420, _size1417); + this->success.resize(_size1417); + uint32_t _i1421; + for (_i1421 = 0; _i1421 < _size1417; ++_i1421) { - xfer += iprot->readString(this->success[_i1411]); + xfer += iprot->readString(this->success[_i1421]); } xfer += iprot->readListEnd(); } @@ -9494,10 +9494,10 @@ uint32_t ThriftHiveMetastore_get_all_tables_result::write(::apache::thrift::prot xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_LIST, 0); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast(this->success.size())); - std::vector ::const_iterator _iter1412; - for (_iter1412 = this->success.begin(); _iter1412 != this->success.end(); ++_iter1412) + std::vector ::const_iterator _iter1422; + for (_iter1422 = this->success.begin(); _iter1422 != this->success.end(); ++_iter1422) { - xfer += oprot->writeString((*_iter1412)); + xfer += oprot->writeString((*_iter1422)); } xfer += oprot->writeListEnd(); } @@ -9542,14 +9542,14 @@ uint32_t ThriftHiveMetastore_get_all_tables_presult::read(::apache::thrift::prot if (ftype == ::apache::thrift::protocol::T_LIST) { { (*(this->success)).clear(); - uint32_t _size1413; - ::apache::thrift::protocol::TType _etype1416; - xfer += iprot->readListBegin(_etype1416, _size1413); - (*(this->success)).resize(_size1413); - uint32_t _i1417; - for (_i1417 = 0; _i1417 < _size1413; ++_i1417) + uint32_t _size1423; + ::apache::thrift::protocol::TType _etype1426; + xfer += iprot->readListBegin(_etype1426, _size1423); + (*(this->success)).resize(_size1423); + uint32_t _i1427; + for (_i1427 = 0; _i1427 < _size1423; ++_i1427) { - xfer += iprot->readString((*(this->success))[_i1417]); + xfer += iprot->readString((*(this->success))[_i1427]); } xfer += iprot->readListEnd(); } @@ -9859,14 +9859,14 @@ uint32_t ThriftHiveMetastore_get_table_objects_by_name_args::read(::apache::thri if (ftype == ::apache::thrift::protocol::T_LIST) { { this->tbl_names.clear(); - uint32_t _size1418; - ::apache::thrift::protocol::TType _etype1421; - xfer += iprot->readListBegin(_etype1421, _size1418); - this->tbl_names.resize(_size1418); - uint32_t _i1422; - for (_i1422 = 0; _i1422 < _size1418; ++_i1422) + uint32_t _size1428; + ::apache::thrift::protocol::TType _etype1431; + xfer += iprot->readListBegin(_etype1431, _size1428); + this->tbl_names.resize(_size1428); + uint32_t _i1432; + for (_i1432 = 0; _i1432 < _size1428; ++_i1432) { - xfer += iprot->readString(this->tbl_names[_i1422]); + xfer += iprot->readString(this->tbl_names[_i1432]); } xfer += iprot->readListEnd(); } @@ -9899,10 +9899,10 @@ uint32_t ThriftHiveMetastore_get_table_objects_by_name_args::write(::apache::thr xfer += oprot->writeFieldBegin("tbl_names", ::apache::thrift::protocol::T_LIST, 2); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast(this->tbl_names.size())); - std::vector ::const_iterator _iter1423; - for (_iter1423 = this->tbl_names.begin(); _iter1423 != this->tbl_names.end(); ++_iter1423) + std::vector ::const_iterator _iter1433; + for (_iter1433 = this->tbl_names.begin(); _iter1433 != this->tbl_names.end(); ++_iter1433) { - xfer += oprot->writeString((*_iter1423)); + xfer += oprot->writeString((*_iter1433)); } xfer += oprot->writeListEnd(); } @@ -9930,10 +9930,10 @@ uint32_t ThriftHiveMetastore_get_table_objects_by_name_pargs::write(::apache::th xfer += oprot->writeFieldBegin("tbl_names", ::apache::thrift::protocol::T_LIST, 2); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast((*(this->tbl_names)).size())); - std::vector ::const_iterator _iter1424; - for (_iter1424 = (*(this->tbl_names)).begin(); _iter1424 != (*(this->tbl_names)).end(); ++_iter1424) + std::vector ::const_iterator _iter1434; + for (_iter1434 = (*(this->tbl_names)).begin(); _iter1434 != (*(this->tbl_names)).end(); ++_iter1434) { - xfer += oprot->writeString((*_iter1424)); + xfer += oprot->writeString((*_iter1434)); } xfer += oprot->writeListEnd(); } @@ -9974,14 +9974,14 @@ uint32_t ThriftHiveMetastore_get_table_objects_by_name_result::read(::apache::th if (ftype == ::apache::thrift::protocol::T_LIST) { { this->success.clear(); - uint32_t _size1425; - ::apache::thrift::protocol::TType _etype1428; - xfer += iprot->readListBegin(_etype1428, _size1425); - this->success.resize(_size1425); - uint32_t _i1429; - for (_i1429 = 0; _i1429 < _size1425; ++_i1429) + uint32_t _size1435; + ::apache::thrift::protocol::TType _etype1438; + xfer += iprot->readListBegin(_etype1438, _size1435); + this->success.resize(_size1435); + uint32_t _i1439; + for (_i1439 = 0; _i1439 < _size1435; ++_i1439) { - xfer += this->success[_i1429].read(iprot); + xfer += this->success[_i1439].read(iprot); } xfer += iprot->readListEnd(); } @@ -10012,10 +10012,10 @@ uint32_t ThriftHiveMetastore_get_table_objects_by_name_result::write(::apache::t xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_LIST, 0); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast(this->success.size())); - std::vector ::const_iterator _iter1430; - for (_iter1430 = this->success.begin(); _iter1430 != this->success.end(); ++_iter1430) + std::vector
::const_iterator _iter1440; + for (_iter1440 = this->success.begin(); _iter1440 != this->success.end(); ++_iter1440) { - xfer += (*_iter1430).write(oprot); + xfer += (*_iter1440).write(oprot); } xfer += oprot->writeListEnd(); } @@ -10056,14 +10056,14 @@ uint32_t ThriftHiveMetastore_get_table_objects_by_name_presult::read(::apache::t if (ftype == ::apache::thrift::protocol::T_LIST) { { (*(this->success)).clear(); - uint32_t _size1431; - ::apache::thrift::protocol::TType _etype1434; - xfer += iprot->readListBegin(_etype1434, _size1431); - (*(this->success)).resize(_size1431); - uint32_t _i1435; - for (_i1435 = 0; _i1435 < _size1431; ++_i1435) + uint32_t _size1441; + ::apache::thrift::protocol::TType _etype1444; + xfer += iprot->readListBegin(_etype1444, _size1441); + (*(this->success)).resize(_size1441); + uint32_t _i1445; + for (_i1445 = 0; _i1445 < _size1441; ++_i1445) { - xfer += (*(this->success))[_i1435].read(iprot); + xfer += (*(this->success))[_i1445].read(iprot); } xfer += iprot->readListEnd(); } @@ -10596,14 +10596,14 @@ uint32_t ThriftHiveMetastore_get_materialization_invalidation_info_args::read(:: if (ftype == ::apache::thrift::protocol::T_LIST) { { this->tbl_names.clear(); - uint32_t _size1436; - ::apache::thrift::protocol::TType _etype1439; - xfer += iprot->readListBegin(_etype1439, _size1436); - this->tbl_names.resize(_size1436); - uint32_t _i1440; - for (_i1440 = 0; _i1440 < _size1436; ++_i1440) + uint32_t _size1446; + ::apache::thrift::protocol::TType _etype1449; + xfer += iprot->readListBegin(_etype1449, _size1446); + this->tbl_names.resize(_size1446); + uint32_t _i1450; + for (_i1450 = 0; _i1450 < _size1446; ++_i1450) { - xfer += iprot->readString(this->tbl_names[_i1440]); + xfer += iprot->readString(this->tbl_names[_i1450]); } xfer += iprot->readListEnd(); } @@ -10636,10 +10636,10 @@ uint32_t ThriftHiveMetastore_get_materialization_invalidation_info_args::write(: xfer += oprot->writeFieldBegin("tbl_names", ::apache::thrift::protocol::T_LIST, 2); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast(this->tbl_names.size())); - std::vector ::const_iterator _iter1441; - for (_iter1441 = this->tbl_names.begin(); _iter1441 != this->tbl_names.end(); ++_iter1441) + std::vector ::const_iterator _iter1451; + for (_iter1451 = this->tbl_names.begin(); _iter1451 != this->tbl_names.end(); ++_iter1451) { - xfer += oprot->writeString((*_iter1441)); + xfer += oprot->writeString((*_iter1451)); } xfer += oprot->writeListEnd(); } @@ -10667,10 +10667,10 @@ uint32_t ThriftHiveMetastore_get_materialization_invalidation_info_pargs::write( xfer += oprot->writeFieldBegin("tbl_names", ::apache::thrift::protocol::T_LIST, 2); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast((*(this->tbl_names)).size())); - std::vector ::const_iterator _iter1442; - for (_iter1442 = (*(this->tbl_names)).begin(); _iter1442 != (*(this->tbl_names)).end(); ++_iter1442) + std::vector ::const_iterator _iter1452; + for (_iter1452 = (*(this->tbl_names)).begin(); _iter1452 != (*(this->tbl_names)).end(); ++_iter1452) { - xfer += oprot->writeString((*_iter1442)); + xfer += oprot->writeString((*_iter1452)); } xfer += oprot->writeListEnd(); } @@ -10711,17 +10711,17 @@ uint32_t ThriftHiveMetastore_get_materialization_invalidation_info_result::read( if (ftype == ::apache::thrift::protocol::T_MAP) { { this->success.clear(); - uint32_t _size1443; - ::apache::thrift::protocol::TType _ktype1444; - ::apache::thrift::protocol::TType _vtype1445; - xfer += iprot->readMapBegin(_ktype1444, _vtype1445, _size1443); - uint32_t _i1447; - for (_i1447 = 0; _i1447 < _size1443; ++_i1447) + uint32_t _size1453; + ::apache::thrift::protocol::TType _ktype1454; + ::apache::thrift::protocol::TType _vtype1455; + xfer += iprot->readMapBegin(_ktype1454, _vtype1455, _size1453); + uint32_t _i1457; + for (_i1457 = 0; _i1457 < _size1453; ++_i1457) { - std::string _key1448; - xfer += iprot->readString(_key1448); - Materialization& _val1449 = this->success[_key1448]; - xfer += _val1449.read(iprot); + std::string _key1458; + xfer += iprot->readString(_key1458); + Materialization& _val1459 = this->success[_key1458]; + xfer += _val1459.read(iprot); } xfer += iprot->readMapEnd(); } @@ -10776,11 +10776,11 @@ uint32_t ThriftHiveMetastore_get_materialization_invalidation_info_result::write xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_MAP, 0); { xfer += oprot->writeMapBegin(::apache::thrift::protocol::T_STRING, ::apache::thrift::protocol::T_STRUCT, static_cast(this->success.size())); - std::map ::const_iterator _iter1450; - for (_iter1450 = this->success.begin(); _iter1450 != this->success.end(); ++_iter1450) + std::map ::const_iterator _iter1460; + for (_iter1460 = this->success.begin(); _iter1460 != this->success.end(); ++_iter1460) { - xfer += oprot->writeString(_iter1450->first); - xfer += _iter1450->second.write(oprot); + xfer += oprot->writeString(_iter1460->first); + xfer += _iter1460->second.write(oprot); } xfer += oprot->writeMapEnd(); } @@ -10833,17 +10833,17 @@ uint32_t ThriftHiveMetastore_get_materialization_invalidation_info_presult::read if (ftype == ::apache::thrift::protocol::T_MAP) { { (*(this->success)).clear(); - uint32_t _size1451; - ::apache::thrift::protocol::TType _ktype1452; - ::apache::thrift::protocol::TType _vtype1453; - xfer += iprot->readMapBegin(_ktype1452, _vtype1453, _size1451); - uint32_t _i1455; - for (_i1455 = 0; _i1455 < _size1451; ++_i1455) + uint32_t _size1461; + ::apache::thrift::protocol::TType _ktype1462; + ::apache::thrift::protocol::TType _vtype1463; + xfer += iprot->readMapBegin(_ktype1462, _vtype1463, _size1461); + uint32_t _i1465; + for (_i1465 = 0; _i1465 < _size1461; ++_i1465) { - std::string _key1456; - xfer += iprot->readString(_key1456); - Materialization& _val1457 = (*(this->success))[_key1456]; - xfer += _val1457.read(iprot); + std::string _key1466; + xfer += iprot->readString(_key1466); + Materialization& _val1467 = (*(this->success))[_key1466]; + xfer += _val1467.read(iprot); } xfer += iprot->readMapEnd(); } @@ -11304,14 +11304,14 @@ uint32_t ThriftHiveMetastore_get_table_names_by_filter_result::read(::apache::th if (ftype == ::apache::thrift::protocol::T_LIST) { { this->success.clear(); - uint32_t _size1458; - ::apache::thrift::protocol::TType _etype1461; - xfer += iprot->readListBegin(_etype1461, _size1458); - this->success.resize(_size1458); - uint32_t _i1462; - for (_i1462 = 0; _i1462 < _size1458; ++_i1462) + uint32_t _size1468; + ::apache::thrift::protocol::TType _etype1471; + xfer += iprot->readListBegin(_etype1471, _size1468); + this->success.resize(_size1468); + uint32_t _i1472; + for (_i1472 = 0; _i1472 < _size1468; ++_i1472) { - xfer += iprot->readString(this->success[_i1462]); + xfer += iprot->readString(this->success[_i1472]); } xfer += iprot->readListEnd(); } @@ -11366,10 +11366,10 @@ uint32_t ThriftHiveMetastore_get_table_names_by_filter_result::write(::apache::t xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_LIST, 0); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast(this->success.size())); - std::vector ::const_iterator _iter1463; - for (_iter1463 = this->success.begin(); _iter1463 != this->success.end(); ++_iter1463) + std::vector ::const_iterator _iter1473; + for (_iter1473 = this->success.begin(); _iter1473 != this->success.end(); ++_iter1473) { - xfer += oprot->writeString((*_iter1463)); + xfer += oprot->writeString((*_iter1473)); } xfer += oprot->writeListEnd(); } @@ -11422,14 +11422,14 @@ uint32_t ThriftHiveMetastore_get_table_names_by_filter_presult::read(::apache::t if (ftype == ::apache::thrift::protocol::T_LIST) { { (*(this->success)).clear(); - uint32_t _size1464; - ::apache::thrift::protocol::TType _etype1467; - xfer += iprot->readListBegin(_etype1467, _size1464); - (*(this->success)).resize(_size1464); - uint32_t _i1468; - for (_i1468 = 0; _i1468 < _size1464; ++_i1468) + uint32_t _size1474; + ::apache::thrift::protocol::TType _etype1477; + xfer += iprot->readListBegin(_etype1477, _size1474); + (*(this->success)).resize(_size1474); + uint32_t _i1478; + for (_i1478 = 0; _i1478 < _size1474; ++_i1478) { - xfer += iprot->readString((*(this->success))[_i1468]); + xfer += iprot->readString((*(this->success))[_i1478]); } xfer += iprot->readListEnd(); } @@ -12763,14 +12763,14 @@ uint32_t ThriftHiveMetastore_add_partitions_args::read(::apache::thrift::protoco if (ftype == ::apache::thrift::protocol::T_LIST) { { this->new_parts.clear(); - uint32_t _size1469; - ::apache::thrift::protocol::TType _etype1472; - xfer += iprot->readListBegin(_etype1472, _size1469); - this->new_parts.resize(_size1469); - uint32_t _i1473; - for (_i1473 = 0; _i1473 < _size1469; ++_i1473) + uint32_t _size1479; + ::apache::thrift::protocol::TType _etype1482; + xfer += iprot->readListBegin(_etype1482, _size1479); + this->new_parts.resize(_size1479); + uint32_t _i1483; + for (_i1483 = 0; _i1483 < _size1479; ++_i1483) { - xfer += this->new_parts[_i1473].read(iprot); + xfer += this->new_parts[_i1483].read(iprot); } xfer += iprot->readListEnd(); } @@ -12799,10 +12799,10 @@ uint32_t ThriftHiveMetastore_add_partitions_args::write(::apache::thrift::protoc xfer += oprot->writeFieldBegin("new_parts", ::apache::thrift::protocol::T_LIST, 1); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast(this->new_parts.size())); - std::vector ::const_iterator _iter1474; - for (_iter1474 = this->new_parts.begin(); _iter1474 != this->new_parts.end(); ++_iter1474) + std::vector ::const_iterator _iter1484; + for (_iter1484 = this->new_parts.begin(); _iter1484 != this->new_parts.end(); ++_iter1484) { - xfer += (*_iter1474).write(oprot); + xfer += (*_iter1484).write(oprot); } xfer += oprot->writeListEnd(); } @@ -12826,10 +12826,10 @@ uint32_t ThriftHiveMetastore_add_partitions_pargs::write(::apache::thrift::proto xfer += oprot->writeFieldBegin("new_parts", ::apache::thrift::protocol::T_LIST, 1); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast((*(this->new_parts)).size())); - std::vector ::const_iterator _iter1475; - for (_iter1475 = (*(this->new_parts)).begin(); _iter1475 != (*(this->new_parts)).end(); ++_iter1475) + std::vector ::const_iterator _iter1485; + for (_iter1485 = (*(this->new_parts)).begin(); _iter1485 != (*(this->new_parts)).end(); ++_iter1485) { - xfer += (*_iter1475).write(oprot); + xfer += (*_iter1485).write(oprot); } xfer += oprot->writeListEnd(); } @@ -13038,14 +13038,14 @@ uint32_t ThriftHiveMetastore_add_partitions_pspec_args::read(::apache::thrift::p if (ftype == ::apache::thrift::protocol::T_LIST) { { this->new_parts.clear(); - uint32_t _size1476; - ::apache::thrift::protocol::TType _etype1479; - xfer += iprot->readListBegin(_etype1479, _size1476); - this->new_parts.resize(_size1476); - uint32_t _i1480; - for (_i1480 = 0; _i1480 < _size1476; ++_i1480) + uint32_t _size1486; + ::apache::thrift::protocol::TType _etype1489; + xfer += iprot->readListBegin(_etype1489, _size1486); + this->new_parts.resize(_size1486); + uint32_t _i1490; + for (_i1490 = 0; _i1490 < _size1486; ++_i1490) { - xfer += this->new_parts[_i1480].read(iprot); + xfer += this->new_parts[_i1490].read(iprot); } xfer += iprot->readListEnd(); } @@ -13074,10 +13074,10 @@ uint32_t ThriftHiveMetastore_add_partitions_pspec_args::write(::apache::thrift:: xfer += oprot->writeFieldBegin("new_parts", ::apache::thrift::protocol::T_LIST, 1); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast(this->new_parts.size())); - std::vector ::const_iterator _iter1481; - for (_iter1481 = this->new_parts.begin(); _iter1481 != this->new_parts.end(); ++_iter1481) + std::vector ::const_iterator _iter1491; + for (_iter1491 = this->new_parts.begin(); _iter1491 != this->new_parts.end(); ++_iter1491) { - xfer += (*_iter1481).write(oprot); + xfer += (*_iter1491).write(oprot); } xfer += oprot->writeListEnd(); } @@ -13101,10 +13101,10 @@ uint32_t ThriftHiveMetastore_add_partitions_pspec_pargs::write(::apache::thrift: xfer += oprot->writeFieldBegin("new_parts", ::apache::thrift::protocol::T_LIST, 1); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast((*(this->new_parts)).size())); - std::vector ::const_iterator _iter1482; - for (_iter1482 = (*(this->new_parts)).begin(); _iter1482 != (*(this->new_parts)).end(); ++_iter1482) + std::vector ::const_iterator _iter1492; + for (_iter1492 = (*(this->new_parts)).begin(); _iter1492 != (*(this->new_parts)).end(); ++_iter1492) { - xfer += (*_iter1482).write(oprot); + xfer += (*_iter1492).write(oprot); } xfer += oprot->writeListEnd(); } @@ -13329,14 +13329,14 @@ uint32_t ThriftHiveMetastore_append_partition_args::read(::apache::thrift::proto if (ftype == ::apache::thrift::protocol::T_LIST) { { this->part_vals.clear(); - uint32_t _size1483; - ::apache::thrift::protocol::TType _etype1486; - xfer += iprot->readListBegin(_etype1486, _size1483); - this->part_vals.resize(_size1483); - uint32_t _i1487; - for (_i1487 = 0; _i1487 < _size1483; ++_i1487) + uint32_t _size1493; + ::apache::thrift::protocol::TType _etype1496; + xfer += iprot->readListBegin(_etype1496, _size1493); + this->part_vals.resize(_size1493); + uint32_t _i1497; + for (_i1497 = 0; _i1497 < _size1493; ++_i1497) { - xfer += iprot->readString(this->part_vals[_i1487]); + xfer += iprot->readString(this->part_vals[_i1497]); } xfer += iprot->readListEnd(); } @@ -13373,10 +13373,10 @@ uint32_t ThriftHiveMetastore_append_partition_args::write(::apache::thrift::prot xfer += oprot->writeFieldBegin("part_vals", ::apache::thrift::protocol::T_LIST, 3); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast(this->part_vals.size())); - std::vector ::const_iterator _iter1488; - for (_iter1488 = this->part_vals.begin(); _iter1488 != this->part_vals.end(); ++_iter1488) + std::vector ::const_iterator _iter1498; + for (_iter1498 = this->part_vals.begin(); _iter1498 != this->part_vals.end(); ++_iter1498) { - xfer += oprot->writeString((*_iter1488)); + xfer += oprot->writeString((*_iter1498)); } xfer += oprot->writeListEnd(); } @@ -13408,10 +13408,10 @@ uint32_t ThriftHiveMetastore_append_partition_pargs::write(::apache::thrift::pro xfer += oprot->writeFieldBegin("part_vals", ::apache::thrift::protocol::T_LIST, 3); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast((*(this->part_vals)).size())); - std::vector ::const_iterator _iter1489; - for (_iter1489 = (*(this->part_vals)).begin(); _iter1489 != (*(this->part_vals)).end(); ++_iter1489) + std::vector ::const_iterator _iter1499; + for (_iter1499 = (*(this->part_vals)).begin(); _iter1499 != (*(this->part_vals)).end(); ++_iter1499) { - xfer += oprot->writeString((*_iter1489)); + xfer += oprot->writeString((*_iter1499)); } xfer += oprot->writeListEnd(); } @@ -13883,14 +13883,14 @@ uint32_t ThriftHiveMetastore_append_partition_with_environment_context_args::rea if (ftype == ::apache::thrift::protocol::T_LIST) { { this->part_vals.clear(); - uint32_t _size1490; - ::apache::thrift::protocol::TType _etype1493; - xfer += iprot->readListBegin(_etype1493, _size1490); - this->part_vals.resize(_size1490); - uint32_t _i1494; - for (_i1494 = 0; _i1494 < _size1490; ++_i1494) + uint32_t _size1500; + ::apache::thrift::protocol::TType _etype1503; + xfer += iprot->readListBegin(_etype1503, _size1500); + this->part_vals.resize(_size1500); + uint32_t _i1504; + for (_i1504 = 0; _i1504 < _size1500; ++_i1504) { - xfer += iprot->readString(this->part_vals[_i1494]); + xfer += iprot->readString(this->part_vals[_i1504]); } xfer += iprot->readListEnd(); } @@ -13935,10 +13935,10 @@ uint32_t ThriftHiveMetastore_append_partition_with_environment_context_args::wri xfer += oprot->writeFieldBegin("part_vals", ::apache::thrift::protocol::T_LIST, 3); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast(this->part_vals.size())); - std::vector ::const_iterator _iter1495; - for (_iter1495 = this->part_vals.begin(); _iter1495 != this->part_vals.end(); ++_iter1495) + std::vector ::const_iterator _iter1505; + for (_iter1505 = this->part_vals.begin(); _iter1505 != this->part_vals.end(); ++_iter1505) { - xfer += oprot->writeString((*_iter1495)); + xfer += oprot->writeString((*_iter1505)); } xfer += oprot->writeListEnd(); } @@ -13974,10 +13974,10 @@ uint32_t ThriftHiveMetastore_append_partition_with_environment_context_pargs::wr xfer += oprot->writeFieldBegin("part_vals", ::apache::thrift::protocol::T_LIST, 3); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast((*(this->part_vals)).size())); - std::vector ::const_iterator _iter1496; - for (_iter1496 = (*(this->part_vals)).begin(); _iter1496 != (*(this->part_vals)).end(); ++_iter1496) + std::vector ::const_iterator _iter1506; + for (_iter1506 = (*(this->part_vals)).begin(); _iter1506 != (*(this->part_vals)).end(); ++_iter1506) { - xfer += oprot->writeString((*_iter1496)); + xfer += oprot->writeString((*_iter1506)); } xfer += oprot->writeListEnd(); } @@ -14780,14 +14780,14 @@ uint32_t ThriftHiveMetastore_drop_partition_args::read(::apache::thrift::protoco if (ftype == ::apache::thrift::protocol::T_LIST) { { this->part_vals.clear(); - uint32_t _size1497; - ::apache::thrift::protocol::TType _etype1500; - xfer += iprot->readListBegin(_etype1500, _size1497); - this->part_vals.resize(_size1497); - uint32_t _i1501; - for (_i1501 = 0; _i1501 < _size1497; ++_i1501) + uint32_t _size1507; + ::apache::thrift::protocol::TType _etype1510; + xfer += iprot->readListBegin(_etype1510, _size1507); + this->part_vals.resize(_size1507); + uint32_t _i1511; + for (_i1511 = 0; _i1511 < _size1507; ++_i1511) { - xfer += iprot->readString(this->part_vals[_i1501]); + xfer += iprot->readString(this->part_vals[_i1511]); } xfer += iprot->readListEnd(); } @@ -14832,10 +14832,10 @@ uint32_t ThriftHiveMetastore_drop_partition_args::write(::apache::thrift::protoc xfer += oprot->writeFieldBegin("part_vals", ::apache::thrift::protocol::T_LIST, 3); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast(this->part_vals.size())); - std::vector ::const_iterator _iter1502; - for (_iter1502 = this->part_vals.begin(); _iter1502 != this->part_vals.end(); ++_iter1502) + std::vector ::const_iterator _iter1512; + for (_iter1512 = this->part_vals.begin(); _iter1512 != this->part_vals.end(); ++_iter1512) { - xfer += oprot->writeString((*_iter1502)); + xfer += oprot->writeString((*_iter1512)); } xfer += oprot->writeListEnd(); } @@ -14871,10 +14871,10 @@ uint32_t ThriftHiveMetastore_drop_partition_pargs::write(::apache::thrift::proto xfer += oprot->writeFieldBegin("part_vals", ::apache::thrift::protocol::T_LIST, 3); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast((*(this->part_vals)).size())); - std::vector ::const_iterator _iter1503; - for (_iter1503 = (*(this->part_vals)).begin(); _iter1503 != (*(this->part_vals)).end(); ++_iter1503) + std::vector ::const_iterator _iter1513; + for (_iter1513 = (*(this->part_vals)).begin(); _iter1513 != (*(this->part_vals)).end(); ++_iter1513) { - xfer += oprot->writeString((*_iter1503)); + xfer += oprot->writeString((*_iter1513)); } xfer += oprot->writeListEnd(); } @@ -15083,14 +15083,14 @@ uint32_t ThriftHiveMetastore_drop_partition_with_environment_context_args::read( if (ftype == ::apache::thrift::protocol::T_LIST) { { this->part_vals.clear(); - uint32_t _size1504; - ::apache::thrift::protocol::TType _etype1507; - xfer += iprot->readListBegin(_etype1507, _size1504); - this->part_vals.resize(_size1504); - uint32_t _i1508; - for (_i1508 = 0; _i1508 < _size1504; ++_i1508) + uint32_t _size1514; + ::apache::thrift::protocol::TType _etype1517; + xfer += iprot->readListBegin(_etype1517, _size1514); + this->part_vals.resize(_size1514); + uint32_t _i1518; + for (_i1518 = 0; _i1518 < _size1514; ++_i1518) { - xfer += iprot->readString(this->part_vals[_i1508]); + xfer += iprot->readString(this->part_vals[_i1518]); } xfer += iprot->readListEnd(); } @@ -15143,10 +15143,10 @@ uint32_t ThriftHiveMetastore_drop_partition_with_environment_context_args::write xfer += oprot->writeFieldBegin("part_vals", ::apache::thrift::protocol::T_LIST, 3); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast(this->part_vals.size())); - std::vector ::const_iterator _iter1509; - for (_iter1509 = this->part_vals.begin(); _iter1509 != this->part_vals.end(); ++_iter1509) + std::vector ::const_iterator _iter1519; + for (_iter1519 = this->part_vals.begin(); _iter1519 != this->part_vals.end(); ++_iter1519) { - xfer += oprot->writeString((*_iter1509)); + xfer += oprot->writeString((*_iter1519)); } xfer += oprot->writeListEnd(); } @@ -15186,10 +15186,10 @@ uint32_t ThriftHiveMetastore_drop_partition_with_environment_context_pargs::writ xfer += oprot->writeFieldBegin("part_vals", ::apache::thrift::protocol::T_LIST, 3); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast((*(this->part_vals)).size())); - std::vector ::const_iterator _iter1510; - for (_iter1510 = (*(this->part_vals)).begin(); _iter1510 != (*(this->part_vals)).end(); ++_iter1510) + std::vector ::const_iterator _iter1520; + for (_iter1520 = (*(this->part_vals)).begin(); _iter1520 != (*(this->part_vals)).end(); ++_iter1520) { - xfer += oprot->writeString((*_iter1510)); + xfer += oprot->writeString((*_iter1520)); } xfer += oprot->writeListEnd(); } @@ -16195,14 +16195,14 @@ uint32_t ThriftHiveMetastore_get_partition_args::read(::apache::thrift::protocol if (ftype == ::apache::thrift::protocol::T_LIST) { { this->part_vals.clear(); - uint32_t _size1511; - ::apache::thrift::protocol::TType _etype1514; - xfer += iprot->readListBegin(_etype1514, _size1511); - this->part_vals.resize(_size1511); - uint32_t _i1515; - for (_i1515 = 0; _i1515 < _size1511; ++_i1515) + uint32_t _size1521; + ::apache::thrift::protocol::TType _etype1524; + xfer += iprot->readListBegin(_etype1524, _size1521); + this->part_vals.resize(_size1521); + uint32_t _i1525; + for (_i1525 = 0; _i1525 < _size1521; ++_i1525) { - xfer += iprot->readString(this->part_vals[_i1515]); + xfer += iprot->readString(this->part_vals[_i1525]); } xfer += iprot->readListEnd(); } @@ -16239,10 +16239,10 @@ uint32_t ThriftHiveMetastore_get_partition_args::write(::apache::thrift::protoco xfer += oprot->writeFieldBegin("part_vals", ::apache::thrift::protocol::T_LIST, 3); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast(this->part_vals.size())); - std::vector ::const_iterator _iter1516; - for (_iter1516 = this->part_vals.begin(); _iter1516 != this->part_vals.end(); ++_iter1516) + std::vector ::const_iterator _iter1526; + for (_iter1526 = this->part_vals.begin(); _iter1526 != this->part_vals.end(); ++_iter1526) { - xfer += oprot->writeString((*_iter1516)); + xfer += oprot->writeString((*_iter1526)); } xfer += oprot->writeListEnd(); } @@ -16274,10 +16274,10 @@ uint32_t ThriftHiveMetastore_get_partition_pargs::write(::apache::thrift::protoc xfer += oprot->writeFieldBegin("part_vals", ::apache::thrift::protocol::T_LIST, 3); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast((*(this->part_vals)).size())); - std::vector ::const_iterator _iter1517; - for (_iter1517 = (*(this->part_vals)).begin(); _iter1517 != (*(this->part_vals)).end(); ++_iter1517) + std::vector ::const_iterator _iter1527; + for (_iter1527 = (*(this->part_vals)).begin(); _iter1527 != (*(this->part_vals)).end(); ++_iter1527) { - xfer += oprot->writeString((*_iter1517)); + xfer += oprot->writeString((*_iter1527)); } xfer += oprot->writeListEnd(); } @@ -16466,17 +16466,17 @@ uint32_t ThriftHiveMetastore_exchange_partition_args::read(::apache::thrift::pro if (ftype == ::apache::thrift::protocol::T_MAP) { { this->partitionSpecs.clear(); - uint32_t _size1518; - ::apache::thrift::protocol::TType _ktype1519; - ::apache::thrift::protocol::TType _vtype1520; - xfer += iprot->readMapBegin(_ktype1519, _vtype1520, _size1518); - uint32_t _i1522; - for (_i1522 = 0; _i1522 < _size1518; ++_i1522) + uint32_t _size1528; + ::apache::thrift::protocol::TType _ktype1529; + ::apache::thrift::protocol::TType _vtype1530; + xfer += iprot->readMapBegin(_ktype1529, _vtype1530, _size1528); + uint32_t _i1532; + for (_i1532 = 0; _i1532 < _size1528; ++_i1532) { - std::string _key1523; - xfer += iprot->readString(_key1523); - std::string& _val1524 = this->partitionSpecs[_key1523]; - xfer += iprot->readString(_val1524); + std::string _key1533; + xfer += iprot->readString(_key1533); + std::string& _val1534 = this->partitionSpecs[_key1533]; + xfer += iprot->readString(_val1534); } xfer += iprot->readMapEnd(); } @@ -16537,11 +16537,11 @@ uint32_t ThriftHiveMetastore_exchange_partition_args::write(::apache::thrift::pr xfer += oprot->writeFieldBegin("partitionSpecs", ::apache::thrift::protocol::T_MAP, 1); { xfer += oprot->writeMapBegin(::apache::thrift::protocol::T_STRING, ::apache::thrift::protocol::T_STRING, static_cast(this->partitionSpecs.size())); - std::map ::const_iterator _iter1525; - for (_iter1525 = this->partitionSpecs.begin(); _iter1525 != this->partitionSpecs.end(); ++_iter1525) + std::map ::const_iterator _iter1535; + for (_iter1535 = this->partitionSpecs.begin(); _iter1535 != this->partitionSpecs.end(); ++_iter1535) { - xfer += oprot->writeString(_iter1525->first); - xfer += oprot->writeString(_iter1525->second); + xfer += oprot->writeString(_iter1535->first); + xfer += oprot->writeString(_iter1535->second); } xfer += oprot->writeMapEnd(); } @@ -16581,11 +16581,11 @@ uint32_t ThriftHiveMetastore_exchange_partition_pargs::write(::apache::thrift::p xfer += oprot->writeFieldBegin("partitionSpecs", ::apache::thrift::protocol::T_MAP, 1); { xfer += oprot->writeMapBegin(::apache::thrift::protocol::T_STRING, ::apache::thrift::protocol::T_STRING, static_cast((*(this->partitionSpecs)).size())); - std::map ::const_iterator _iter1526; - for (_iter1526 = (*(this->partitionSpecs)).begin(); _iter1526 != (*(this->partitionSpecs)).end(); ++_iter1526) + std::map ::const_iterator _iter1536; + for (_iter1536 = (*(this->partitionSpecs)).begin(); _iter1536 != (*(this->partitionSpecs)).end(); ++_iter1536) { - xfer += oprot->writeString(_iter1526->first); - xfer += oprot->writeString(_iter1526->second); + xfer += oprot->writeString(_iter1536->first); + xfer += oprot->writeString(_iter1536->second); } xfer += oprot->writeMapEnd(); } @@ -16830,17 +16830,17 @@ uint32_t ThriftHiveMetastore_exchange_partitions_args::read(::apache::thrift::pr if (ftype == ::apache::thrift::protocol::T_MAP) { { this->partitionSpecs.clear(); - uint32_t _size1527; - ::apache::thrift::protocol::TType _ktype1528; - ::apache::thrift::protocol::TType _vtype1529; - xfer += iprot->readMapBegin(_ktype1528, _vtype1529, _size1527); - uint32_t _i1531; - for (_i1531 = 0; _i1531 < _size1527; ++_i1531) + uint32_t _size1537; + ::apache::thrift::protocol::TType _ktype1538; + ::apache::thrift::protocol::TType _vtype1539; + xfer += iprot->readMapBegin(_ktype1538, _vtype1539, _size1537); + uint32_t _i1541; + for (_i1541 = 0; _i1541 < _size1537; ++_i1541) { - std::string _key1532; - xfer += iprot->readString(_key1532); - std::string& _val1533 = this->partitionSpecs[_key1532]; - xfer += iprot->readString(_val1533); + std::string _key1542; + xfer += iprot->readString(_key1542); + std::string& _val1543 = this->partitionSpecs[_key1542]; + xfer += iprot->readString(_val1543); } xfer += iprot->readMapEnd(); } @@ -16901,11 +16901,11 @@ uint32_t ThriftHiveMetastore_exchange_partitions_args::write(::apache::thrift::p xfer += oprot->writeFieldBegin("partitionSpecs", ::apache::thrift::protocol::T_MAP, 1); { xfer += oprot->writeMapBegin(::apache::thrift::protocol::T_STRING, ::apache::thrift::protocol::T_STRING, static_cast(this->partitionSpecs.size())); - std::map ::const_iterator _iter1534; - for (_iter1534 = this->partitionSpecs.begin(); _iter1534 != this->partitionSpecs.end(); ++_iter1534) + std::map ::const_iterator _iter1544; + for (_iter1544 = this->partitionSpecs.begin(); _iter1544 != this->partitionSpecs.end(); ++_iter1544) { - xfer += oprot->writeString(_iter1534->first); - xfer += oprot->writeString(_iter1534->second); + xfer += oprot->writeString(_iter1544->first); + xfer += oprot->writeString(_iter1544->second); } xfer += oprot->writeMapEnd(); } @@ -16945,11 +16945,11 @@ uint32_t ThriftHiveMetastore_exchange_partitions_pargs::write(::apache::thrift:: xfer += oprot->writeFieldBegin("partitionSpecs", ::apache::thrift::protocol::T_MAP, 1); { xfer += oprot->writeMapBegin(::apache::thrift::protocol::T_STRING, ::apache::thrift::protocol::T_STRING, static_cast((*(this->partitionSpecs)).size())); - std::map ::const_iterator _iter1535; - for (_iter1535 = (*(this->partitionSpecs)).begin(); _iter1535 != (*(this->partitionSpecs)).end(); ++_iter1535) + std::map ::const_iterator _iter1545; + for (_iter1545 = (*(this->partitionSpecs)).begin(); _iter1545 != (*(this->partitionSpecs)).end(); ++_iter1545) { - xfer += oprot->writeString(_iter1535->first); - xfer += oprot->writeString(_iter1535->second); + xfer += oprot->writeString(_iter1545->first); + xfer += oprot->writeString(_iter1545->second); } xfer += oprot->writeMapEnd(); } @@ -17006,14 +17006,14 @@ uint32_t ThriftHiveMetastore_exchange_partitions_result::read(::apache::thrift:: if (ftype == ::apache::thrift::protocol::T_LIST) { { this->success.clear(); - uint32_t _size1536; - ::apache::thrift::protocol::TType _etype1539; - xfer += iprot->readListBegin(_etype1539, _size1536); - this->success.resize(_size1536); - uint32_t _i1540; - for (_i1540 = 0; _i1540 < _size1536; ++_i1540) + uint32_t _size1546; + ::apache::thrift::protocol::TType _etype1549; + xfer += iprot->readListBegin(_etype1549, _size1546); + this->success.resize(_size1546); + uint32_t _i1550; + for (_i1550 = 0; _i1550 < _size1546; ++_i1550) { - xfer += this->success[_i1540].read(iprot); + xfer += this->success[_i1550].read(iprot); } xfer += iprot->readListEnd(); } @@ -17076,10 +17076,10 @@ uint32_t ThriftHiveMetastore_exchange_partitions_result::write(::apache::thrift: xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_LIST, 0); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast(this->success.size())); - std::vector ::const_iterator _iter1541; - for (_iter1541 = this->success.begin(); _iter1541 != this->success.end(); ++_iter1541) + std::vector ::const_iterator _iter1551; + for (_iter1551 = this->success.begin(); _iter1551 != this->success.end(); ++_iter1551) { - xfer += (*_iter1541).write(oprot); + xfer += (*_iter1551).write(oprot); } xfer += oprot->writeListEnd(); } @@ -17136,14 +17136,14 @@ uint32_t ThriftHiveMetastore_exchange_partitions_presult::read(::apache::thrift: if (ftype == ::apache::thrift::protocol::T_LIST) { { (*(this->success)).clear(); - uint32_t _size1542; - ::apache::thrift::protocol::TType _etype1545; - xfer += iprot->readListBegin(_etype1545, _size1542); - (*(this->success)).resize(_size1542); - uint32_t _i1546; - for (_i1546 = 0; _i1546 < _size1542; ++_i1546) + uint32_t _size1552; + ::apache::thrift::protocol::TType _etype1555; + xfer += iprot->readListBegin(_etype1555, _size1552); + (*(this->success)).resize(_size1552); + uint32_t _i1556; + for (_i1556 = 0; _i1556 < _size1552; ++_i1556) { - xfer += (*(this->success))[_i1546].read(iprot); + xfer += (*(this->success))[_i1556].read(iprot); } xfer += iprot->readListEnd(); } @@ -17242,14 +17242,14 @@ uint32_t ThriftHiveMetastore_get_partition_with_auth_args::read(::apache::thrift if (ftype == ::apache::thrift::protocol::T_LIST) { { this->part_vals.clear(); - uint32_t _size1547; - ::apache::thrift::protocol::TType _etype1550; - xfer += iprot->readListBegin(_etype1550, _size1547); - this->part_vals.resize(_size1547); - uint32_t _i1551; - for (_i1551 = 0; _i1551 < _size1547; ++_i1551) + uint32_t _size1557; + ::apache::thrift::protocol::TType _etype1560; + xfer += iprot->readListBegin(_etype1560, _size1557); + this->part_vals.resize(_size1557); + uint32_t _i1561; + for (_i1561 = 0; _i1561 < _size1557; ++_i1561) { - xfer += iprot->readString(this->part_vals[_i1551]); + xfer += iprot->readString(this->part_vals[_i1561]); } xfer += iprot->readListEnd(); } @@ -17270,14 +17270,14 @@ uint32_t ThriftHiveMetastore_get_partition_with_auth_args::read(::apache::thrift if (ftype == ::apache::thrift::protocol::T_LIST) { { this->group_names.clear(); - uint32_t _size1552; - ::apache::thrift::protocol::TType _etype1555; - xfer += iprot->readListBegin(_etype1555, _size1552); - this->group_names.resize(_size1552); - uint32_t _i1556; - for (_i1556 = 0; _i1556 < _size1552; ++_i1556) + uint32_t _size1562; + ::apache::thrift::protocol::TType _etype1565; + xfer += iprot->readListBegin(_etype1565, _size1562); + this->group_names.resize(_size1562); + uint32_t _i1566; + for (_i1566 = 0; _i1566 < _size1562; ++_i1566) { - xfer += iprot->readString(this->group_names[_i1556]); + xfer += iprot->readString(this->group_names[_i1566]); } xfer += iprot->readListEnd(); } @@ -17314,10 +17314,10 @@ uint32_t ThriftHiveMetastore_get_partition_with_auth_args::write(::apache::thrif xfer += oprot->writeFieldBegin("part_vals", ::apache::thrift::protocol::T_LIST, 3); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast(this->part_vals.size())); - std::vector ::const_iterator _iter1557; - for (_iter1557 = this->part_vals.begin(); _iter1557 != this->part_vals.end(); ++_iter1557) + std::vector ::const_iterator _iter1567; + for (_iter1567 = this->part_vals.begin(); _iter1567 != this->part_vals.end(); ++_iter1567) { - xfer += oprot->writeString((*_iter1557)); + xfer += oprot->writeString((*_iter1567)); } xfer += oprot->writeListEnd(); } @@ -17330,10 +17330,10 @@ uint32_t ThriftHiveMetastore_get_partition_with_auth_args::write(::apache::thrif xfer += oprot->writeFieldBegin("group_names", ::apache::thrift::protocol::T_LIST, 5); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast(this->group_names.size())); - std::vector ::const_iterator _iter1558; - for (_iter1558 = this->group_names.begin(); _iter1558 != this->group_names.end(); ++_iter1558) + std::vector ::const_iterator _iter1568; + for (_iter1568 = this->group_names.begin(); _iter1568 != this->group_names.end(); ++_iter1568) { - xfer += oprot->writeString((*_iter1558)); + xfer += oprot->writeString((*_iter1568)); } xfer += oprot->writeListEnd(); } @@ -17365,10 +17365,10 @@ uint32_t ThriftHiveMetastore_get_partition_with_auth_pargs::write(::apache::thri xfer += oprot->writeFieldBegin("part_vals", ::apache::thrift::protocol::T_LIST, 3); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast((*(this->part_vals)).size())); - std::vector ::const_iterator _iter1559; - for (_iter1559 = (*(this->part_vals)).begin(); _iter1559 != (*(this->part_vals)).end(); ++_iter1559) + std::vector ::const_iterator _iter1569; + for (_iter1569 = (*(this->part_vals)).begin(); _iter1569 != (*(this->part_vals)).end(); ++_iter1569) { - xfer += oprot->writeString((*_iter1559)); + xfer += oprot->writeString((*_iter1569)); } xfer += oprot->writeListEnd(); } @@ -17381,10 +17381,10 @@ uint32_t ThriftHiveMetastore_get_partition_with_auth_pargs::write(::apache::thri xfer += oprot->writeFieldBegin("group_names", ::apache::thrift::protocol::T_LIST, 5); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast((*(this->group_names)).size())); - std::vector ::const_iterator _iter1560; - for (_iter1560 = (*(this->group_names)).begin(); _iter1560 != (*(this->group_names)).end(); ++_iter1560) + std::vector ::const_iterator _iter1570; + for (_iter1570 = (*(this->group_names)).begin(); _iter1570 != (*(this->group_names)).end(); ++_iter1570) { - xfer += oprot->writeString((*_iter1560)); + xfer += oprot->writeString((*_iter1570)); } xfer += oprot->writeListEnd(); } @@ -17943,14 +17943,14 @@ uint32_t ThriftHiveMetastore_get_partitions_result::read(::apache::thrift::proto if (ftype == ::apache::thrift::protocol::T_LIST) { { this->success.clear(); - uint32_t _size1561; - ::apache::thrift::protocol::TType _etype1564; - xfer += iprot->readListBegin(_etype1564, _size1561); - this->success.resize(_size1561); - uint32_t _i1565; - for (_i1565 = 0; _i1565 < _size1561; ++_i1565) + uint32_t _size1571; + ::apache::thrift::protocol::TType _etype1574; + xfer += iprot->readListBegin(_etype1574, _size1571); + this->success.resize(_size1571); + uint32_t _i1575; + for (_i1575 = 0; _i1575 < _size1571; ++_i1575) { - xfer += this->success[_i1565].read(iprot); + xfer += this->success[_i1575].read(iprot); } xfer += iprot->readListEnd(); } @@ -17997,10 +17997,10 @@ uint32_t ThriftHiveMetastore_get_partitions_result::write(::apache::thrift::prot xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_LIST, 0); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast(this->success.size())); - std::vector ::const_iterator _iter1566; - for (_iter1566 = this->success.begin(); _iter1566 != this->success.end(); ++_iter1566) + std::vector ::const_iterator _iter1576; + for (_iter1576 = this->success.begin(); _iter1576 != this->success.end(); ++_iter1576) { - xfer += (*_iter1566).write(oprot); + xfer += (*_iter1576).write(oprot); } xfer += oprot->writeListEnd(); } @@ -18049,14 +18049,14 @@ uint32_t ThriftHiveMetastore_get_partitions_presult::read(::apache::thrift::prot if (ftype == ::apache::thrift::protocol::T_LIST) { { (*(this->success)).clear(); - uint32_t _size1567; - ::apache::thrift::protocol::TType _etype1570; - xfer += iprot->readListBegin(_etype1570, _size1567); - (*(this->success)).resize(_size1567); - uint32_t _i1571; - for (_i1571 = 0; _i1571 < _size1567; ++_i1571) + uint32_t _size1577; + ::apache::thrift::protocol::TType _etype1580; + xfer += iprot->readListBegin(_etype1580, _size1577); + (*(this->success)).resize(_size1577); + uint32_t _i1581; + for (_i1581 = 0; _i1581 < _size1577; ++_i1581) { - xfer += (*(this->success))[_i1571].read(iprot); + xfer += (*(this->success))[_i1581].read(iprot); } xfer += iprot->readListEnd(); } @@ -18155,14 +18155,14 @@ uint32_t ThriftHiveMetastore_get_partitions_with_auth_args::read(::apache::thrif if (ftype == ::apache::thrift::protocol::T_LIST) { { this->group_names.clear(); - uint32_t _size1572; - ::apache::thrift::protocol::TType _etype1575; - xfer += iprot->readListBegin(_etype1575, _size1572); - this->group_names.resize(_size1572); - uint32_t _i1576; - for (_i1576 = 0; _i1576 < _size1572; ++_i1576) + uint32_t _size1582; + ::apache::thrift::protocol::TType _etype1585; + xfer += iprot->readListBegin(_etype1585, _size1582); + this->group_names.resize(_size1582); + uint32_t _i1586; + for (_i1586 = 0; _i1586 < _size1582; ++_i1586) { - xfer += iprot->readString(this->group_names[_i1576]); + xfer += iprot->readString(this->group_names[_i1586]); } xfer += iprot->readListEnd(); } @@ -18207,10 +18207,10 @@ uint32_t ThriftHiveMetastore_get_partitions_with_auth_args::write(::apache::thri xfer += oprot->writeFieldBegin("group_names", ::apache::thrift::protocol::T_LIST, 5); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast(this->group_names.size())); - std::vector ::const_iterator _iter1577; - for (_iter1577 = this->group_names.begin(); _iter1577 != this->group_names.end(); ++_iter1577) + std::vector ::const_iterator _iter1587; + for (_iter1587 = this->group_names.begin(); _iter1587 != this->group_names.end(); ++_iter1587) { - xfer += oprot->writeString((*_iter1577)); + xfer += oprot->writeString((*_iter1587)); } xfer += oprot->writeListEnd(); } @@ -18250,10 +18250,10 @@ uint32_t ThriftHiveMetastore_get_partitions_with_auth_pargs::write(::apache::thr xfer += oprot->writeFieldBegin("group_names", ::apache::thrift::protocol::T_LIST, 5); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast((*(this->group_names)).size())); - std::vector ::const_iterator _iter1578; - for (_iter1578 = (*(this->group_names)).begin(); _iter1578 != (*(this->group_names)).end(); ++_iter1578) + std::vector ::const_iterator _iter1588; + for (_iter1588 = (*(this->group_names)).begin(); _iter1588 != (*(this->group_names)).end(); ++_iter1588) { - xfer += oprot->writeString((*_iter1578)); + xfer += oprot->writeString((*_iter1588)); } xfer += oprot->writeListEnd(); } @@ -18294,14 +18294,14 @@ uint32_t ThriftHiveMetastore_get_partitions_with_auth_result::read(::apache::thr if (ftype == ::apache::thrift::protocol::T_LIST) { { this->success.clear(); - uint32_t _size1579; - ::apache::thrift::protocol::TType _etype1582; - xfer += iprot->readListBegin(_etype1582, _size1579); - this->success.resize(_size1579); - uint32_t _i1583; - for (_i1583 = 0; _i1583 < _size1579; ++_i1583) + uint32_t _size1589; + ::apache::thrift::protocol::TType _etype1592; + xfer += iprot->readListBegin(_etype1592, _size1589); + this->success.resize(_size1589); + uint32_t _i1593; + for (_i1593 = 0; _i1593 < _size1589; ++_i1593) { - xfer += this->success[_i1583].read(iprot); + xfer += this->success[_i1593].read(iprot); } xfer += iprot->readListEnd(); } @@ -18348,10 +18348,10 @@ uint32_t ThriftHiveMetastore_get_partitions_with_auth_result::write(::apache::th xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_LIST, 0); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast(this->success.size())); - std::vector ::const_iterator _iter1584; - for (_iter1584 = this->success.begin(); _iter1584 != this->success.end(); ++_iter1584) + std::vector ::const_iterator _iter1594; + for (_iter1594 = this->success.begin(); _iter1594 != this->success.end(); ++_iter1594) { - xfer += (*_iter1584).write(oprot); + xfer += (*_iter1594).write(oprot); } xfer += oprot->writeListEnd(); } @@ -18400,14 +18400,14 @@ uint32_t ThriftHiveMetastore_get_partitions_with_auth_presult::read(::apache::th if (ftype == ::apache::thrift::protocol::T_LIST) { { (*(this->success)).clear(); - uint32_t _size1585; - ::apache::thrift::protocol::TType _etype1588; - xfer += iprot->readListBegin(_etype1588, _size1585); - (*(this->success)).resize(_size1585); - uint32_t _i1589; - for (_i1589 = 0; _i1589 < _size1585; ++_i1589) + uint32_t _size1595; + ::apache::thrift::protocol::TType _etype1598; + xfer += iprot->readListBegin(_etype1598, _size1595); + (*(this->success)).resize(_size1595); + uint32_t _i1599; + for (_i1599 = 0; _i1599 < _size1595; ++_i1599) { - xfer += (*(this->success))[_i1589].read(iprot); + xfer += (*(this->success))[_i1599].read(iprot); } xfer += iprot->readListEnd(); } @@ -18585,14 +18585,14 @@ uint32_t ThriftHiveMetastore_get_partitions_pspec_result::read(::apache::thrift: if (ftype == ::apache::thrift::protocol::T_LIST) { { this->success.clear(); - uint32_t _size1590; - ::apache::thrift::protocol::TType _etype1593; - xfer += iprot->readListBegin(_etype1593, _size1590); - this->success.resize(_size1590); - uint32_t _i1594; - for (_i1594 = 0; _i1594 < _size1590; ++_i1594) + uint32_t _size1600; + ::apache::thrift::protocol::TType _etype1603; + xfer += iprot->readListBegin(_etype1603, _size1600); + this->success.resize(_size1600); + uint32_t _i1604; + for (_i1604 = 0; _i1604 < _size1600; ++_i1604) { - xfer += this->success[_i1594].read(iprot); + xfer += this->success[_i1604].read(iprot); } xfer += iprot->readListEnd(); } @@ -18639,10 +18639,10 @@ uint32_t ThriftHiveMetastore_get_partitions_pspec_result::write(::apache::thrift xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_LIST, 0); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast(this->success.size())); - std::vector ::const_iterator _iter1595; - for (_iter1595 = this->success.begin(); _iter1595 != this->success.end(); ++_iter1595) + std::vector ::const_iterator _iter1605; + for (_iter1605 = this->success.begin(); _iter1605 != this->success.end(); ++_iter1605) { - xfer += (*_iter1595).write(oprot); + xfer += (*_iter1605).write(oprot); } xfer += oprot->writeListEnd(); } @@ -18691,14 +18691,14 @@ uint32_t ThriftHiveMetastore_get_partitions_pspec_presult::read(::apache::thrift if (ftype == ::apache::thrift::protocol::T_LIST) { { (*(this->success)).clear(); - uint32_t _size1596; - ::apache::thrift::protocol::TType _etype1599; - xfer += iprot->readListBegin(_etype1599, _size1596); - (*(this->success)).resize(_size1596); - uint32_t _i1600; - for (_i1600 = 0; _i1600 < _size1596; ++_i1600) + uint32_t _size1606; + ::apache::thrift::protocol::TType _etype1609; + xfer += iprot->readListBegin(_etype1609, _size1606); + (*(this->success)).resize(_size1606); + uint32_t _i1610; + for (_i1610 = 0; _i1610 < _size1606; ++_i1610) { - xfer += (*(this->success))[_i1600].read(iprot); + xfer += (*(this->success))[_i1610].read(iprot); } xfer += iprot->readListEnd(); } @@ -18876,14 +18876,14 @@ uint32_t ThriftHiveMetastore_get_partition_names_result::read(::apache::thrift:: if (ftype == ::apache::thrift::protocol::T_LIST) { { this->success.clear(); - uint32_t _size1601; - ::apache::thrift::protocol::TType _etype1604; - xfer += iprot->readListBegin(_etype1604, _size1601); - this->success.resize(_size1601); - uint32_t _i1605; - for (_i1605 = 0; _i1605 < _size1601; ++_i1605) + uint32_t _size1611; + ::apache::thrift::protocol::TType _etype1614; + xfer += iprot->readListBegin(_etype1614, _size1611); + this->success.resize(_size1611); + uint32_t _i1615; + for (_i1615 = 0; _i1615 < _size1611; ++_i1615) { - xfer += iprot->readString(this->success[_i1605]); + xfer += iprot->readString(this->success[_i1615]); } xfer += iprot->readListEnd(); } @@ -18930,10 +18930,10 @@ uint32_t ThriftHiveMetastore_get_partition_names_result::write(::apache::thrift: xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_LIST, 0); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast(this->success.size())); - std::vector ::const_iterator _iter1606; - for (_iter1606 = this->success.begin(); _iter1606 != this->success.end(); ++_iter1606) + std::vector ::const_iterator _iter1616; + for (_iter1616 = this->success.begin(); _iter1616 != this->success.end(); ++_iter1616) { - xfer += oprot->writeString((*_iter1606)); + xfer += oprot->writeString((*_iter1616)); } xfer += oprot->writeListEnd(); } @@ -18982,14 +18982,14 @@ uint32_t ThriftHiveMetastore_get_partition_names_presult::read(::apache::thrift: if (ftype == ::apache::thrift::protocol::T_LIST) { { (*(this->success)).clear(); - uint32_t _size1607; - ::apache::thrift::protocol::TType _etype1610; - xfer += iprot->readListBegin(_etype1610, _size1607); - (*(this->success)).resize(_size1607); - uint32_t _i1611; - for (_i1611 = 0; _i1611 < _size1607; ++_i1611) + uint32_t _size1617; + ::apache::thrift::protocol::TType _etype1620; + xfer += iprot->readListBegin(_etype1620, _size1617); + (*(this->success)).resize(_size1617); + uint32_t _i1621; + for (_i1621 = 0; _i1621 < _size1617; ++_i1621) { - xfer += iprot->readString((*(this->success))[_i1611]); + xfer += iprot->readString((*(this->success))[_i1621]); } xfer += iprot->readListEnd(); } @@ -19299,14 +19299,14 @@ uint32_t ThriftHiveMetastore_get_partitions_ps_args::read(::apache::thrift::prot if (ftype == ::apache::thrift::protocol::T_LIST) { { this->part_vals.clear(); - uint32_t _size1612; - ::apache::thrift::protocol::TType _etype1615; - xfer += iprot->readListBegin(_etype1615, _size1612); - this->part_vals.resize(_size1612); - uint32_t _i1616; - for (_i1616 = 0; _i1616 < _size1612; ++_i1616) + uint32_t _size1622; + ::apache::thrift::protocol::TType _etype1625; + xfer += iprot->readListBegin(_etype1625, _size1622); + this->part_vals.resize(_size1622); + uint32_t _i1626; + for (_i1626 = 0; _i1626 < _size1622; ++_i1626) { - xfer += iprot->readString(this->part_vals[_i1616]); + xfer += iprot->readString(this->part_vals[_i1626]); } xfer += iprot->readListEnd(); } @@ -19351,10 +19351,10 @@ uint32_t ThriftHiveMetastore_get_partitions_ps_args::write(::apache::thrift::pro xfer += oprot->writeFieldBegin("part_vals", ::apache::thrift::protocol::T_LIST, 3); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast(this->part_vals.size())); - std::vector ::const_iterator _iter1617; - for (_iter1617 = this->part_vals.begin(); _iter1617 != this->part_vals.end(); ++_iter1617) + std::vector ::const_iterator _iter1627; + for (_iter1627 = this->part_vals.begin(); _iter1627 != this->part_vals.end(); ++_iter1627) { - xfer += oprot->writeString((*_iter1617)); + xfer += oprot->writeString((*_iter1627)); } xfer += oprot->writeListEnd(); } @@ -19390,10 +19390,10 @@ uint32_t ThriftHiveMetastore_get_partitions_ps_pargs::write(::apache::thrift::pr xfer += oprot->writeFieldBegin("part_vals", ::apache::thrift::protocol::T_LIST, 3); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast((*(this->part_vals)).size())); - std::vector ::const_iterator _iter1618; - for (_iter1618 = (*(this->part_vals)).begin(); _iter1618 != (*(this->part_vals)).end(); ++_iter1618) + std::vector ::const_iterator _iter1628; + for (_iter1628 = (*(this->part_vals)).begin(); _iter1628 != (*(this->part_vals)).end(); ++_iter1628) { - xfer += oprot->writeString((*_iter1618)); + xfer += oprot->writeString((*_iter1628)); } xfer += oprot->writeListEnd(); } @@ -19438,14 +19438,14 @@ uint32_t ThriftHiveMetastore_get_partitions_ps_result::read(::apache::thrift::pr if (ftype == ::apache::thrift::protocol::T_LIST) { { this->success.clear(); - uint32_t _size1619; - ::apache::thrift::protocol::TType _etype1622; - xfer += iprot->readListBegin(_etype1622, _size1619); - this->success.resize(_size1619); - uint32_t _i1623; - for (_i1623 = 0; _i1623 < _size1619; ++_i1623) + uint32_t _size1629; + ::apache::thrift::protocol::TType _etype1632; + xfer += iprot->readListBegin(_etype1632, _size1629); + this->success.resize(_size1629); + uint32_t _i1633; + for (_i1633 = 0; _i1633 < _size1629; ++_i1633) { - xfer += this->success[_i1623].read(iprot); + xfer += this->success[_i1633].read(iprot); } xfer += iprot->readListEnd(); } @@ -19492,10 +19492,10 @@ uint32_t ThriftHiveMetastore_get_partitions_ps_result::write(::apache::thrift::p xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_LIST, 0); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast(this->success.size())); - std::vector ::const_iterator _iter1624; - for (_iter1624 = this->success.begin(); _iter1624 != this->success.end(); ++_iter1624) + std::vector ::const_iterator _iter1634; + for (_iter1634 = this->success.begin(); _iter1634 != this->success.end(); ++_iter1634) { - xfer += (*_iter1624).write(oprot); + xfer += (*_iter1634).write(oprot); } xfer += oprot->writeListEnd(); } @@ -19544,14 +19544,14 @@ uint32_t ThriftHiveMetastore_get_partitions_ps_presult::read(::apache::thrift::p if (ftype == ::apache::thrift::protocol::T_LIST) { { (*(this->success)).clear(); - uint32_t _size1625; - ::apache::thrift::protocol::TType _etype1628; - xfer += iprot->readListBegin(_etype1628, _size1625); - (*(this->success)).resize(_size1625); - uint32_t _i1629; - for (_i1629 = 0; _i1629 < _size1625; ++_i1629) + uint32_t _size1635; + ::apache::thrift::protocol::TType _etype1638; + xfer += iprot->readListBegin(_etype1638, _size1635); + (*(this->success)).resize(_size1635); + uint32_t _i1639; + for (_i1639 = 0; _i1639 < _size1635; ++_i1639) { - xfer += (*(this->success))[_i1629].read(iprot); + xfer += (*(this->success))[_i1639].read(iprot); } xfer += iprot->readListEnd(); } @@ -19634,14 +19634,14 @@ uint32_t ThriftHiveMetastore_get_partitions_ps_with_auth_args::read(::apache::th if (ftype == ::apache::thrift::protocol::T_LIST) { { this->part_vals.clear(); - uint32_t _size1630; - ::apache::thrift::protocol::TType _etype1633; - xfer += iprot->readListBegin(_etype1633, _size1630); - this->part_vals.resize(_size1630); - uint32_t _i1634; - for (_i1634 = 0; _i1634 < _size1630; ++_i1634) + uint32_t _size1640; + ::apache::thrift::protocol::TType _etype1643; + xfer += iprot->readListBegin(_etype1643, _size1640); + this->part_vals.resize(_size1640); + uint32_t _i1644; + for (_i1644 = 0; _i1644 < _size1640; ++_i1644) { - xfer += iprot->readString(this->part_vals[_i1634]); + xfer += iprot->readString(this->part_vals[_i1644]); } xfer += iprot->readListEnd(); } @@ -19670,14 +19670,14 @@ uint32_t ThriftHiveMetastore_get_partitions_ps_with_auth_args::read(::apache::th if (ftype == ::apache::thrift::protocol::T_LIST) { { this->group_names.clear(); - uint32_t _size1635; - ::apache::thrift::protocol::TType _etype1638; - xfer += iprot->readListBegin(_etype1638, _size1635); - this->group_names.resize(_size1635); - uint32_t _i1639; - for (_i1639 = 0; _i1639 < _size1635; ++_i1639) + uint32_t _size1645; + ::apache::thrift::protocol::TType _etype1648; + xfer += iprot->readListBegin(_etype1648, _size1645); + this->group_names.resize(_size1645); + uint32_t _i1649; + for (_i1649 = 0; _i1649 < _size1645; ++_i1649) { - xfer += iprot->readString(this->group_names[_i1639]); + xfer += iprot->readString(this->group_names[_i1649]); } xfer += iprot->readListEnd(); } @@ -19714,10 +19714,10 @@ uint32_t ThriftHiveMetastore_get_partitions_ps_with_auth_args::write(::apache::t xfer += oprot->writeFieldBegin("part_vals", ::apache::thrift::protocol::T_LIST, 3); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast(this->part_vals.size())); - std::vector ::const_iterator _iter1640; - for (_iter1640 = this->part_vals.begin(); _iter1640 != this->part_vals.end(); ++_iter1640) + std::vector ::const_iterator _iter1650; + for (_iter1650 = this->part_vals.begin(); _iter1650 != this->part_vals.end(); ++_iter1650) { - xfer += oprot->writeString((*_iter1640)); + xfer += oprot->writeString((*_iter1650)); } xfer += oprot->writeListEnd(); } @@ -19734,10 +19734,10 @@ uint32_t ThriftHiveMetastore_get_partitions_ps_with_auth_args::write(::apache::t xfer += oprot->writeFieldBegin("group_names", ::apache::thrift::protocol::T_LIST, 6); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast(this->group_names.size())); - std::vector ::const_iterator _iter1641; - for (_iter1641 = this->group_names.begin(); _iter1641 != this->group_names.end(); ++_iter1641) + std::vector ::const_iterator _iter1651; + for (_iter1651 = this->group_names.begin(); _iter1651 != this->group_names.end(); ++_iter1651) { - xfer += oprot->writeString((*_iter1641)); + xfer += oprot->writeString((*_iter1651)); } xfer += oprot->writeListEnd(); } @@ -19769,10 +19769,10 @@ uint32_t ThriftHiveMetastore_get_partitions_ps_with_auth_pargs::write(::apache:: xfer += oprot->writeFieldBegin("part_vals", ::apache::thrift::protocol::T_LIST, 3); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast((*(this->part_vals)).size())); - std::vector ::const_iterator _iter1642; - for (_iter1642 = (*(this->part_vals)).begin(); _iter1642 != (*(this->part_vals)).end(); ++_iter1642) + std::vector ::const_iterator _iter1652; + for (_iter1652 = (*(this->part_vals)).begin(); _iter1652 != (*(this->part_vals)).end(); ++_iter1652) { - xfer += oprot->writeString((*_iter1642)); + xfer += oprot->writeString((*_iter1652)); } xfer += oprot->writeListEnd(); } @@ -19789,10 +19789,10 @@ uint32_t ThriftHiveMetastore_get_partitions_ps_with_auth_pargs::write(::apache:: xfer += oprot->writeFieldBegin("group_names", ::apache::thrift::protocol::T_LIST, 6); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast((*(this->group_names)).size())); - std::vector ::const_iterator _iter1643; - for (_iter1643 = (*(this->group_names)).begin(); _iter1643 != (*(this->group_names)).end(); ++_iter1643) + std::vector ::const_iterator _iter1653; + for (_iter1653 = (*(this->group_names)).begin(); _iter1653 != (*(this->group_names)).end(); ++_iter1653) { - xfer += oprot->writeString((*_iter1643)); + xfer += oprot->writeString((*_iter1653)); } xfer += oprot->writeListEnd(); } @@ -19833,14 +19833,14 @@ uint32_t ThriftHiveMetastore_get_partitions_ps_with_auth_result::read(::apache:: if (ftype == ::apache::thrift::protocol::T_LIST) { { this->success.clear(); - uint32_t _size1644; - ::apache::thrift::protocol::TType _etype1647; - xfer += iprot->readListBegin(_etype1647, _size1644); - this->success.resize(_size1644); - uint32_t _i1648; - for (_i1648 = 0; _i1648 < _size1644; ++_i1648) + uint32_t _size1654; + ::apache::thrift::protocol::TType _etype1657; + xfer += iprot->readListBegin(_etype1657, _size1654); + this->success.resize(_size1654); + uint32_t _i1658; + for (_i1658 = 0; _i1658 < _size1654; ++_i1658) { - xfer += this->success[_i1648].read(iprot); + xfer += this->success[_i1658].read(iprot); } xfer += iprot->readListEnd(); } @@ -19887,10 +19887,10 @@ uint32_t ThriftHiveMetastore_get_partitions_ps_with_auth_result::write(::apache: xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_LIST, 0); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast(this->success.size())); - std::vector ::const_iterator _iter1649; - for (_iter1649 = this->success.begin(); _iter1649 != this->success.end(); ++_iter1649) + std::vector ::const_iterator _iter1659; + for (_iter1659 = this->success.begin(); _iter1659 != this->success.end(); ++_iter1659) { - xfer += (*_iter1649).write(oprot); + xfer += (*_iter1659).write(oprot); } xfer += oprot->writeListEnd(); } @@ -19939,14 +19939,14 @@ uint32_t ThriftHiveMetastore_get_partitions_ps_with_auth_presult::read(::apache: if (ftype == ::apache::thrift::protocol::T_LIST) { { (*(this->success)).clear(); - uint32_t _size1650; - ::apache::thrift::protocol::TType _etype1653; - xfer += iprot->readListBegin(_etype1653, _size1650); - (*(this->success)).resize(_size1650); - uint32_t _i1654; - for (_i1654 = 0; _i1654 < _size1650; ++_i1654) + uint32_t _size1660; + ::apache::thrift::protocol::TType _etype1663; + xfer += iprot->readListBegin(_etype1663, _size1660); + (*(this->success)).resize(_size1660); + uint32_t _i1664; + for (_i1664 = 0; _i1664 < _size1660; ++_i1664) { - xfer += (*(this->success))[_i1654].read(iprot); + xfer += (*(this->success))[_i1664].read(iprot); } xfer += iprot->readListEnd(); } @@ -20029,14 +20029,14 @@ uint32_t ThriftHiveMetastore_get_partition_names_ps_args::read(::apache::thrift: if (ftype == ::apache::thrift::protocol::T_LIST) { { this->part_vals.clear(); - uint32_t _size1655; - ::apache::thrift::protocol::TType _etype1658; - xfer += iprot->readListBegin(_etype1658, _size1655); - this->part_vals.resize(_size1655); - uint32_t _i1659; - for (_i1659 = 0; _i1659 < _size1655; ++_i1659) + uint32_t _size1665; + ::apache::thrift::protocol::TType _etype1668; + xfer += iprot->readListBegin(_etype1668, _size1665); + this->part_vals.resize(_size1665); + uint32_t _i1669; + for (_i1669 = 0; _i1669 < _size1665; ++_i1669) { - xfer += iprot->readString(this->part_vals[_i1659]); + xfer += iprot->readString(this->part_vals[_i1669]); } xfer += iprot->readListEnd(); } @@ -20081,10 +20081,10 @@ uint32_t ThriftHiveMetastore_get_partition_names_ps_args::write(::apache::thrift xfer += oprot->writeFieldBegin("part_vals", ::apache::thrift::protocol::T_LIST, 3); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast(this->part_vals.size())); - std::vector ::const_iterator _iter1660; - for (_iter1660 = this->part_vals.begin(); _iter1660 != this->part_vals.end(); ++_iter1660) + std::vector ::const_iterator _iter1670; + for (_iter1670 = this->part_vals.begin(); _iter1670 != this->part_vals.end(); ++_iter1670) { - xfer += oprot->writeString((*_iter1660)); + xfer += oprot->writeString((*_iter1670)); } xfer += oprot->writeListEnd(); } @@ -20120,10 +20120,10 @@ uint32_t ThriftHiveMetastore_get_partition_names_ps_pargs::write(::apache::thrif xfer += oprot->writeFieldBegin("part_vals", ::apache::thrift::protocol::T_LIST, 3); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast((*(this->part_vals)).size())); - std::vector ::const_iterator _iter1661; - for (_iter1661 = (*(this->part_vals)).begin(); _iter1661 != (*(this->part_vals)).end(); ++_iter1661) + std::vector ::const_iterator _iter1671; + for (_iter1671 = (*(this->part_vals)).begin(); _iter1671 != (*(this->part_vals)).end(); ++_iter1671) { - xfer += oprot->writeString((*_iter1661)); + xfer += oprot->writeString((*_iter1671)); } xfer += oprot->writeListEnd(); } @@ -20168,14 +20168,14 @@ uint32_t ThriftHiveMetastore_get_partition_names_ps_result::read(::apache::thrif if (ftype == ::apache::thrift::protocol::T_LIST) { { this->success.clear(); - uint32_t _size1662; - ::apache::thrift::protocol::TType _etype1665; - xfer += iprot->readListBegin(_etype1665, _size1662); - this->success.resize(_size1662); - uint32_t _i1666; - for (_i1666 = 0; _i1666 < _size1662; ++_i1666) + uint32_t _size1672; + ::apache::thrift::protocol::TType _etype1675; + xfer += iprot->readListBegin(_etype1675, _size1672); + this->success.resize(_size1672); + uint32_t _i1676; + for (_i1676 = 0; _i1676 < _size1672; ++_i1676) { - xfer += iprot->readString(this->success[_i1666]); + xfer += iprot->readString(this->success[_i1676]); } xfer += iprot->readListEnd(); } @@ -20222,10 +20222,10 @@ uint32_t ThriftHiveMetastore_get_partition_names_ps_result::write(::apache::thri xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_LIST, 0); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast(this->success.size())); - std::vector ::const_iterator _iter1667; - for (_iter1667 = this->success.begin(); _iter1667 != this->success.end(); ++_iter1667) + std::vector ::const_iterator _iter1677; + for (_iter1677 = this->success.begin(); _iter1677 != this->success.end(); ++_iter1677) { - xfer += oprot->writeString((*_iter1667)); + xfer += oprot->writeString((*_iter1677)); } xfer += oprot->writeListEnd(); } @@ -20274,14 +20274,14 @@ uint32_t ThriftHiveMetastore_get_partition_names_ps_presult::read(::apache::thri if (ftype == ::apache::thrift::protocol::T_LIST) { { (*(this->success)).clear(); - uint32_t _size1668; - ::apache::thrift::protocol::TType _etype1671; - xfer += iprot->readListBegin(_etype1671, _size1668); - (*(this->success)).resize(_size1668); - uint32_t _i1672; - for (_i1672 = 0; _i1672 < _size1668; ++_i1672) + uint32_t _size1678; + ::apache::thrift::protocol::TType _etype1681; + xfer += iprot->readListBegin(_etype1681, _size1678); + (*(this->success)).resize(_size1678); + uint32_t _i1682; + for (_i1682 = 0; _i1682 < _size1678; ++_i1682) { - xfer += iprot->readString((*(this->success))[_i1672]); + xfer += iprot->readString((*(this->success))[_i1682]); } xfer += iprot->readListEnd(); } @@ -20475,14 +20475,14 @@ uint32_t ThriftHiveMetastore_get_partitions_by_filter_result::read(::apache::thr if (ftype == ::apache::thrift::protocol::T_LIST) { { this->success.clear(); - uint32_t _size1673; - ::apache::thrift::protocol::TType _etype1676; - xfer += iprot->readListBegin(_etype1676, _size1673); - this->success.resize(_size1673); - uint32_t _i1677; - for (_i1677 = 0; _i1677 < _size1673; ++_i1677) + uint32_t _size1683; + ::apache::thrift::protocol::TType _etype1686; + xfer += iprot->readListBegin(_etype1686, _size1683); + this->success.resize(_size1683); + uint32_t _i1687; + for (_i1687 = 0; _i1687 < _size1683; ++_i1687) { - xfer += this->success[_i1677].read(iprot); + xfer += this->success[_i1687].read(iprot); } xfer += iprot->readListEnd(); } @@ -20529,10 +20529,10 @@ uint32_t ThriftHiveMetastore_get_partitions_by_filter_result::write(::apache::th xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_LIST, 0); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast(this->success.size())); - std::vector ::const_iterator _iter1678; - for (_iter1678 = this->success.begin(); _iter1678 != this->success.end(); ++_iter1678) + std::vector ::const_iterator _iter1688; + for (_iter1688 = this->success.begin(); _iter1688 != this->success.end(); ++_iter1688) { - xfer += (*_iter1678).write(oprot); + xfer += (*_iter1688).write(oprot); } xfer += oprot->writeListEnd(); } @@ -20581,14 +20581,14 @@ uint32_t ThriftHiveMetastore_get_partitions_by_filter_presult::read(::apache::th if (ftype == ::apache::thrift::protocol::T_LIST) { { (*(this->success)).clear(); - uint32_t _size1679; - ::apache::thrift::protocol::TType _etype1682; - xfer += iprot->readListBegin(_etype1682, _size1679); - (*(this->success)).resize(_size1679); - uint32_t _i1683; - for (_i1683 = 0; _i1683 < _size1679; ++_i1683) + uint32_t _size1689; + ::apache::thrift::protocol::TType _etype1692; + xfer += iprot->readListBegin(_etype1692, _size1689); + (*(this->success)).resize(_size1689); + uint32_t _i1693; + for (_i1693 = 0; _i1693 < _size1689; ++_i1693) { - xfer += (*(this->success))[_i1683].read(iprot); + xfer += (*(this->success))[_i1693].read(iprot); } xfer += iprot->readListEnd(); } @@ -20782,14 +20782,14 @@ uint32_t ThriftHiveMetastore_get_part_specs_by_filter_result::read(::apache::thr if (ftype == ::apache::thrift::protocol::T_LIST) { { this->success.clear(); - uint32_t _size1684; - ::apache::thrift::protocol::TType _etype1687; - xfer += iprot->readListBegin(_etype1687, _size1684); - this->success.resize(_size1684); - uint32_t _i1688; - for (_i1688 = 0; _i1688 < _size1684; ++_i1688) + uint32_t _size1694; + ::apache::thrift::protocol::TType _etype1697; + xfer += iprot->readListBegin(_etype1697, _size1694); + this->success.resize(_size1694); + uint32_t _i1698; + for (_i1698 = 0; _i1698 < _size1694; ++_i1698) { - xfer += this->success[_i1688].read(iprot); + xfer += this->success[_i1698].read(iprot); } xfer += iprot->readListEnd(); } @@ -20836,10 +20836,10 @@ uint32_t ThriftHiveMetastore_get_part_specs_by_filter_result::write(::apache::th xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_LIST, 0); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast(this->success.size())); - std::vector ::const_iterator _iter1689; - for (_iter1689 = this->success.begin(); _iter1689 != this->success.end(); ++_iter1689) + std::vector ::const_iterator _iter1699; + for (_iter1699 = this->success.begin(); _iter1699 != this->success.end(); ++_iter1699) { - xfer += (*_iter1689).write(oprot); + xfer += (*_iter1699).write(oprot); } xfer += oprot->writeListEnd(); } @@ -20888,14 +20888,14 @@ uint32_t ThriftHiveMetastore_get_part_specs_by_filter_presult::read(::apache::th if (ftype == ::apache::thrift::protocol::T_LIST) { { (*(this->success)).clear(); - uint32_t _size1690; - ::apache::thrift::protocol::TType _etype1693; - xfer += iprot->readListBegin(_etype1693, _size1690); - (*(this->success)).resize(_size1690); - uint32_t _i1694; - for (_i1694 = 0; _i1694 < _size1690; ++_i1694) + uint32_t _size1700; + ::apache::thrift::protocol::TType _etype1703; + xfer += iprot->readListBegin(_etype1703, _size1700); + (*(this->success)).resize(_size1700); + uint32_t _i1704; + for (_i1704 = 0; _i1704 < _size1700; ++_i1704) { - xfer += (*(this->success))[_i1694].read(iprot); + xfer += (*(this->success))[_i1704].read(iprot); } xfer += iprot->readListEnd(); } @@ -21464,14 +21464,14 @@ uint32_t ThriftHiveMetastore_get_partitions_by_names_args::read(::apache::thrift if (ftype == ::apache::thrift::protocol::T_LIST) { { this->names.clear(); - uint32_t _size1695; - ::apache::thrift::protocol::TType _etype1698; - xfer += iprot->readListBegin(_etype1698, _size1695); - this->names.resize(_size1695); - uint32_t _i1699; - for (_i1699 = 0; _i1699 < _size1695; ++_i1699) + uint32_t _size1705; + ::apache::thrift::protocol::TType _etype1708; + xfer += iprot->readListBegin(_etype1708, _size1705); + this->names.resize(_size1705); + uint32_t _i1709; + for (_i1709 = 0; _i1709 < _size1705; ++_i1709) { - xfer += iprot->readString(this->names[_i1699]); + xfer += iprot->readString(this->names[_i1709]); } xfer += iprot->readListEnd(); } @@ -21508,10 +21508,10 @@ uint32_t ThriftHiveMetastore_get_partitions_by_names_args::write(::apache::thrif xfer += oprot->writeFieldBegin("names", ::apache::thrift::protocol::T_LIST, 3); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast(this->names.size())); - std::vector ::const_iterator _iter1700; - for (_iter1700 = this->names.begin(); _iter1700 != this->names.end(); ++_iter1700) + std::vector ::const_iterator _iter1710; + for (_iter1710 = this->names.begin(); _iter1710 != this->names.end(); ++_iter1710) { - xfer += oprot->writeString((*_iter1700)); + xfer += oprot->writeString((*_iter1710)); } xfer += oprot->writeListEnd(); } @@ -21543,10 +21543,10 @@ uint32_t ThriftHiveMetastore_get_partitions_by_names_pargs::write(::apache::thri xfer += oprot->writeFieldBegin("names", ::apache::thrift::protocol::T_LIST, 3); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast((*(this->names)).size())); - std::vector ::const_iterator _iter1701; - for (_iter1701 = (*(this->names)).begin(); _iter1701 != (*(this->names)).end(); ++_iter1701) + std::vector ::const_iterator _iter1711; + for (_iter1711 = (*(this->names)).begin(); _iter1711 != (*(this->names)).end(); ++_iter1711) { - xfer += oprot->writeString((*_iter1701)); + xfer += oprot->writeString((*_iter1711)); } xfer += oprot->writeListEnd(); } @@ -21587,14 +21587,14 @@ uint32_t ThriftHiveMetastore_get_partitions_by_names_result::read(::apache::thri if (ftype == ::apache::thrift::protocol::T_LIST) { { this->success.clear(); - uint32_t _size1702; - ::apache::thrift::protocol::TType _etype1705; - xfer += iprot->readListBegin(_etype1705, _size1702); - this->success.resize(_size1702); - uint32_t _i1706; - for (_i1706 = 0; _i1706 < _size1702; ++_i1706) + uint32_t _size1712; + ::apache::thrift::protocol::TType _etype1715; + xfer += iprot->readListBegin(_etype1715, _size1712); + this->success.resize(_size1712); + uint32_t _i1716; + for (_i1716 = 0; _i1716 < _size1712; ++_i1716) { - xfer += this->success[_i1706].read(iprot); + xfer += this->success[_i1716].read(iprot); } xfer += iprot->readListEnd(); } @@ -21641,10 +21641,10 @@ uint32_t ThriftHiveMetastore_get_partitions_by_names_result::write(::apache::thr xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_LIST, 0); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast(this->success.size())); - std::vector ::const_iterator _iter1707; - for (_iter1707 = this->success.begin(); _iter1707 != this->success.end(); ++_iter1707) + std::vector ::const_iterator _iter1717; + for (_iter1717 = this->success.begin(); _iter1717 != this->success.end(); ++_iter1717) { - xfer += (*_iter1707).write(oprot); + xfer += (*_iter1717).write(oprot); } xfer += oprot->writeListEnd(); } @@ -21693,14 +21693,14 @@ uint32_t ThriftHiveMetastore_get_partitions_by_names_presult::read(::apache::thr if (ftype == ::apache::thrift::protocol::T_LIST) { { (*(this->success)).clear(); - uint32_t _size1708; - ::apache::thrift::protocol::TType _etype1711; - xfer += iprot->readListBegin(_etype1711, _size1708); - (*(this->success)).resize(_size1708); - uint32_t _i1712; - for (_i1712 = 0; _i1712 < _size1708; ++_i1712) + uint32_t _size1718; + ::apache::thrift::protocol::TType _etype1721; + xfer += iprot->readListBegin(_etype1721, _size1718); + (*(this->success)).resize(_size1718); + uint32_t _i1722; + for (_i1722 = 0; _i1722 < _size1718; ++_i1722) { - xfer += (*(this->success))[_i1712].read(iprot); + xfer += (*(this->success))[_i1722].read(iprot); } xfer += iprot->readListEnd(); } @@ -22022,14 +22022,14 @@ uint32_t ThriftHiveMetastore_alter_partitions_args::read(::apache::thrift::proto if (ftype == ::apache::thrift::protocol::T_LIST) { { this->new_parts.clear(); - uint32_t _size1713; - ::apache::thrift::protocol::TType _etype1716; - xfer += iprot->readListBegin(_etype1716, _size1713); - this->new_parts.resize(_size1713); - uint32_t _i1717; - for (_i1717 = 0; _i1717 < _size1713; ++_i1717) + uint32_t _size1723; + ::apache::thrift::protocol::TType _etype1726; + xfer += iprot->readListBegin(_etype1726, _size1723); + this->new_parts.resize(_size1723); + uint32_t _i1727; + for (_i1727 = 0; _i1727 < _size1723; ++_i1727) { - xfer += this->new_parts[_i1717].read(iprot); + xfer += this->new_parts[_i1727].read(iprot); } xfer += iprot->readListEnd(); } @@ -22066,10 +22066,10 @@ uint32_t ThriftHiveMetastore_alter_partitions_args::write(::apache::thrift::prot xfer += oprot->writeFieldBegin("new_parts", ::apache::thrift::protocol::T_LIST, 3); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast(this->new_parts.size())); - std::vector ::const_iterator _iter1718; - for (_iter1718 = this->new_parts.begin(); _iter1718 != this->new_parts.end(); ++_iter1718) + std::vector ::const_iterator _iter1728; + for (_iter1728 = this->new_parts.begin(); _iter1728 != this->new_parts.end(); ++_iter1728) { - xfer += (*_iter1718).write(oprot); + xfer += (*_iter1728).write(oprot); } xfer += oprot->writeListEnd(); } @@ -22101,10 +22101,10 @@ uint32_t ThriftHiveMetastore_alter_partitions_pargs::write(::apache::thrift::pro xfer += oprot->writeFieldBegin("new_parts", ::apache::thrift::protocol::T_LIST, 3); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast((*(this->new_parts)).size())); - std::vector ::const_iterator _iter1719; - for (_iter1719 = (*(this->new_parts)).begin(); _iter1719 != (*(this->new_parts)).end(); ++_iter1719) + std::vector ::const_iterator _iter1729; + for (_iter1729 = (*(this->new_parts)).begin(); _iter1729 != (*(this->new_parts)).end(); ++_iter1729) { - xfer += (*_iter1719).write(oprot); + xfer += (*_iter1729).write(oprot); } xfer += oprot->writeListEnd(); } @@ -22289,14 +22289,14 @@ uint32_t ThriftHiveMetastore_alter_partitions_with_environment_context_args::rea if (ftype == ::apache::thrift::protocol::T_LIST) { { this->new_parts.clear(); - uint32_t _size1720; - ::apache::thrift::protocol::TType _etype1723; - xfer += iprot->readListBegin(_etype1723, _size1720); - this->new_parts.resize(_size1720); - uint32_t _i1724; - for (_i1724 = 0; _i1724 < _size1720; ++_i1724) + uint32_t _size1730; + ::apache::thrift::protocol::TType _etype1733; + xfer += iprot->readListBegin(_etype1733, _size1730); + this->new_parts.resize(_size1730); + uint32_t _i1734; + for (_i1734 = 0; _i1734 < _size1730; ++_i1734) { - xfer += this->new_parts[_i1724].read(iprot); + xfer += this->new_parts[_i1734].read(iprot); } xfer += iprot->readListEnd(); } @@ -22341,10 +22341,10 @@ uint32_t ThriftHiveMetastore_alter_partitions_with_environment_context_args::wri xfer += oprot->writeFieldBegin("new_parts", ::apache::thrift::protocol::T_LIST, 3); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast(this->new_parts.size())); - std::vector ::const_iterator _iter1725; - for (_iter1725 = this->new_parts.begin(); _iter1725 != this->new_parts.end(); ++_iter1725) + std::vector ::const_iterator _iter1735; + for (_iter1735 = this->new_parts.begin(); _iter1735 != this->new_parts.end(); ++_iter1735) { - xfer += (*_iter1725).write(oprot); + xfer += (*_iter1735).write(oprot); } xfer += oprot->writeListEnd(); } @@ -22380,10 +22380,10 @@ uint32_t ThriftHiveMetastore_alter_partitions_with_environment_context_pargs::wr xfer += oprot->writeFieldBegin("new_parts", ::apache::thrift::protocol::T_LIST, 3); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast((*(this->new_parts)).size())); - std::vector ::const_iterator _iter1726; - for (_iter1726 = (*(this->new_parts)).begin(); _iter1726 != (*(this->new_parts)).end(); ++_iter1726) + std::vector ::const_iterator _iter1736; + for (_iter1736 = (*(this->new_parts)).begin(); _iter1736 != (*(this->new_parts)).end(); ++_iter1736) { - xfer += (*_iter1726).write(oprot); + xfer += (*_iter1736).write(oprot); } xfer += oprot->writeListEnd(); } @@ -22527,6 +22527,233 @@ uint32_t ThriftHiveMetastore_alter_partitions_with_environment_context_presult:: } +ThriftHiveMetastore_alter_partitions_with_environment_context_req_args::~ThriftHiveMetastore_alter_partitions_with_environment_context_req_args() throw() { +} + + +uint32_t ThriftHiveMetastore_alter_partitions_with_environment_context_req_args::read(::apache::thrift::protocol::TProtocol* iprot) { + + apache::thrift::protocol::TInputRecursionTracker tracker(*iprot); + uint32_t xfer = 0; + std::string fname; + ::apache::thrift::protocol::TType ftype; + int16_t fid; + + xfer += iprot->readStructBegin(fname); + + using ::apache::thrift::protocol::TProtocolException; + + + while (true) + { + xfer += iprot->readFieldBegin(fname, ftype, fid); + if (ftype == ::apache::thrift::protocol::T_STOP) { + break; + } + switch (fid) + { + case 1: + if (ftype == ::apache::thrift::protocol::T_STRUCT) { + xfer += this->req.read(iprot); + this->__isset.req = true; + } else { + xfer += iprot->skip(ftype); + } + break; + default: + xfer += iprot->skip(ftype); + break; + } + xfer += iprot->readFieldEnd(); + } + + xfer += iprot->readStructEnd(); + + return xfer; +} + +uint32_t ThriftHiveMetastore_alter_partitions_with_environment_context_req_args::write(::apache::thrift::protocol::TProtocol* oprot) const { + uint32_t xfer = 0; + apache::thrift::protocol::TOutputRecursionTracker tracker(*oprot); + xfer += oprot->writeStructBegin("ThriftHiveMetastore_alter_partitions_with_environment_context_req_args"); + + xfer += oprot->writeFieldBegin("req", ::apache::thrift::protocol::T_STRUCT, 1); + xfer += this->req.write(oprot); + xfer += oprot->writeFieldEnd(); + + xfer += oprot->writeFieldStop(); + xfer += oprot->writeStructEnd(); + return xfer; +} + + +ThriftHiveMetastore_alter_partitions_with_environment_context_req_pargs::~ThriftHiveMetastore_alter_partitions_with_environment_context_req_pargs() throw() { +} + + +uint32_t ThriftHiveMetastore_alter_partitions_with_environment_context_req_pargs::write(::apache::thrift::protocol::TProtocol* oprot) const { + uint32_t xfer = 0; + apache::thrift::protocol::TOutputRecursionTracker tracker(*oprot); + xfer += oprot->writeStructBegin("ThriftHiveMetastore_alter_partitions_with_environment_context_req_pargs"); + + xfer += oprot->writeFieldBegin("req", ::apache::thrift::protocol::T_STRUCT, 1); + xfer += (*(this->req)).write(oprot); + xfer += oprot->writeFieldEnd(); + + xfer += oprot->writeFieldStop(); + xfer += oprot->writeStructEnd(); + return xfer; +} + + +ThriftHiveMetastore_alter_partitions_with_environment_context_req_result::~ThriftHiveMetastore_alter_partitions_with_environment_context_req_result() throw() { +} + + +uint32_t ThriftHiveMetastore_alter_partitions_with_environment_context_req_result::read(::apache::thrift::protocol::TProtocol* iprot) { + + apache::thrift::protocol::TInputRecursionTracker tracker(*iprot); + uint32_t xfer = 0; + std::string fname; + ::apache::thrift::protocol::TType ftype; + int16_t fid; + + xfer += iprot->readStructBegin(fname); + + using ::apache::thrift::protocol::TProtocolException; + + + while (true) + { + xfer += iprot->readFieldBegin(fname, ftype, fid); + if (ftype == ::apache::thrift::protocol::T_STOP) { + break; + } + switch (fid) + { + case 0: + if (ftype == ::apache::thrift::protocol::T_STRUCT) { + xfer += this->success.read(iprot); + this->__isset.success = true; + } else { + xfer += iprot->skip(ftype); + } + break; + case 1: + if (ftype == ::apache::thrift::protocol::T_STRUCT) { + xfer += this->o1.read(iprot); + this->__isset.o1 = true; + } else { + xfer += iprot->skip(ftype); + } + break; + case 2: + if (ftype == ::apache::thrift::protocol::T_STRUCT) { + xfer += this->o2.read(iprot); + this->__isset.o2 = true; + } else { + xfer += iprot->skip(ftype); + } + break; + default: + xfer += iprot->skip(ftype); + break; + } + xfer += iprot->readFieldEnd(); + } + + xfer += iprot->readStructEnd(); + + return xfer; +} + +uint32_t ThriftHiveMetastore_alter_partitions_with_environment_context_req_result::write(::apache::thrift::protocol::TProtocol* oprot) const { + + uint32_t xfer = 0; + + xfer += oprot->writeStructBegin("ThriftHiveMetastore_alter_partitions_with_environment_context_req_result"); + + if (this->__isset.success) { + xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_STRUCT, 0); + xfer += this->success.write(oprot); + xfer += oprot->writeFieldEnd(); + } else if (this->__isset.o1) { + xfer += oprot->writeFieldBegin("o1", ::apache::thrift::protocol::T_STRUCT, 1); + xfer += this->o1.write(oprot); + xfer += oprot->writeFieldEnd(); + } else if (this->__isset.o2) { + xfer += oprot->writeFieldBegin("o2", ::apache::thrift::protocol::T_STRUCT, 2); + xfer += this->o2.write(oprot); + xfer += oprot->writeFieldEnd(); + } + xfer += oprot->writeFieldStop(); + xfer += oprot->writeStructEnd(); + return xfer; +} + + +ThriftHiveMetastore_alter_partitions_with_environment_context_req_presult::~ThriftHiveMetastore_alter_partitions_with_environment_context_req_presult() throw() { +} + + +uint32_t ThriftHiveMetastore_alter_partitions_with_environment_context_req_presult::read(::apache::thrift::protocol::TProtocol* iprot) { + + apache::thrift::protocol::TInputRecursionTracker tracker(*iprot); + uint32_t xfer = 0; + std::string fname; + ::apache::thrift::protocol::TType ftype; + int16_t fid; + + xfer += iprot->readStructBegin(fname); + + using ::apache::thrift::protocol::TProtocolException; + + + while (true) + { + xfer += iprot->readFieldBegin(fname, ftype, fid); + if (ftype == ::apache::thrift::protocol::T_STOP) { + break; + } + switch (fid) + { + case 0: + if (ftype == ::apache::thrift::protocol::T_STRUCT) { + xfer += (*(this->success)).read(iprot); + this->__isset.success = true; + } else { + xfer += iprot->skip(ftype); + } + break; + case 1: + if (ftype == ::apache::thrift::protocol::T_STRUCT) { + xfer += this->o1.read(iprot); + this->__isset.o1 = true; + } else { + xfer += iprot->skip(ftype); + } + break; + case 2: + if (ftype == ::apache::thrift::protocol::T_STRUCT) { + xfer += this->o2.read(iprot); + this->__isset.o2 = true; + } else { + xfer += iprot->skip(ftype); + } + break; + default: + xfer += iprot->skip(ftype); + break; + } + xfer += iprot->readFieldEnd(); + } + + xfer += iprot->readStructEnd(); + + return xfer; +} + + ThriftHiveMetastore_alter_partition_with_environment_context_args::~ThriftHiveMetastore_alter_partition_with_environment_context_args() throw() { } @@ -22827,14 +23054,14 @@ uint32_t ThriftHiveMetastore_rename_partition_args::read(::apache::thrift::proto if (ftype == ::apache::thrift::protocol::T_LIST) { { this->part_vals.clear(); - uint32_t _size1727; - ::apache::thrift::protocol::TType _etype1730; - xfer += iprot->readListBegin(_etype1730, _size1727); - this->part_vals.resize(_size1727); - uint32_t _i1731; - for (_i1731 = 0; _i1731 < _size1727; ++_i1731) + uint32_t _size1737; + ::apache::thrift::protocol::TType _etype1740; + xfer += iprot->readListBegin(_etype1740, _size1737); + this->part_vals.resize(_size1737); + uint32_t _i1741; + for (_i1741 = 0; _i1741 < _size1737; ++_i1741) { - xfer += iprot->readString(this->part_vals[_i1731]); + xfer += iprot->readString(this->part_vals[_i1741]); } xfer += iprot->readListEnd(); } @@ -22879,10 +23106,10 @@ uint32_t ThriftHiveMetastore_rename_partition_args::write(::apache::thrift::prot xfer += oprot->writeFieldBegin("part_vals", ::apache::thrift::protocol::T_LIST, 3); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast(this->part_vals.size())); - std::vector ::const_iterator _iter1732; - for (_iter1732 = this->part_vals.begin(); _iter1732 != this->part_vals.end(); ++_iter1732) + std::vector ::const_iterator _iter1742; + for (_iter1742 = this->part_vals.begin(); _iter1742 != this->part_vals.end(); ++_iter1742) { - xfer += oprot->writeString((*_iter1732)); + xfer += oprot->writeString((*_iter1742)); } xfer += oprot->writeListEnd(); } @@ -22918,10 +23145,10 @@ uint32_t ThriftHiveMetastore_rename_partition_pargs::write(::apache::thrift::pro xfer += oprot->writeFieldBegin("part_vals", ::apache::thrift::protocol::T_LIST, 3); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast((*(this->part_vals)).size())); - std::vector ::const_iterator _iter1733; - for (_iter1733 = (*(this->part_vals)).begin(); _iter1733 != (*(this->part_vals)).end(); ++_iter1733) + std::vector ::const_iterator _iter1743; + for (_iter1743 = (*(this->part_vals)).begin(); _iter1743 != (*(this->part_vals)).end(); ++_iter1743) { - xfer += oprot->writeString((*_iter1733)); + xfer += oprot->writeString((*_iter1743)); } xfer += oprot->writeListEnd(); } @@ -23094,14 +23321,14 @@ uint32_t ThriftHiveMetastore_partition_name_has_valid_characters_args::read(::ap if (ftype == ::apache::thrift::protocol::T_LIST) { { this->part_vals.clear(); - uint32_t _size1734; - ::apache::thrift::protocol::TType _etype1737; - xfer += iprot->readListBegin(_etype1737, _size1734); - this->part_vals.resize(_size1734); - uint32_t _i1738; - for (_i1738 = 0; _i1738 < _size1734; ++_i1738) + uint32_t _size1744; + ::apache::thrift::protocol::TType _etype1747; + xfer += iprot->readListBegin(_etype1747, _size1744); + this->part_vals.resize(_size1744); + uint32_t _i1748; + for (_i1748 = 0; _i1748 < _size1744; ++_i1748) { - xfer += iprot->readString(this->part_vals[_i1738]); + xfer += iprot->readString(this->part_vals[_i1748]); } xfer += iprot->readListEnd(); } @@ -23138,10 +23365,10 @@ uint32_t ThriftHiveMetastore_partition_name_has_valid_characters_args::write(::a xfer += oprot->writeFieldBegin("part_vals", ::apache::thrift::protocol::T_LIST, 1); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast(this->part_vals.size())); - std::vector ::const_iterator _iter1739; - for (_iter1739 = this->part_vals.begin(); _iter1739 != this->part_vals.end(); ++_iter1739) + std::vector ::const_iterator _iter1749; + for (_iter1749 = this->part_vals.begin(); _iter1749 != this->part_vals.end(); ++_iter1749) { - xfer += oprot->writeString((*_iter1739)); + xfer += oprot->writeString((*_iter1749)); } xfer += oprot->writeListEnd(); } @@ -23169,10 +23396,10 @@ uint32_t ThriftHiveMetastore_partition_name_has_valid_characters_pargs::write(:: xfer += oprot->writeFieldBegin("part_vals", ::apache::thrift::protocol::T_LIST, 1); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast((*(this->part_vals)).size())); - std::vector ::const_iterator _iter1740; - for (_iter1740 = (*(this->part_vals)).begin(); _iter1740 != (*(this->part_vals)).end(); ++_iter1740) + std::vector ::const_iterator _iter1750; + for (_iter1750 = (*(this->part_vals)).begin(); _iter1750 != (*(this->part_vals)).end(); ++_iter1750) { - xfer += oprot->writeString((*_iter1740)); + xfer += oprot->writeString((*_iter1750)); } xfer += oprot->writeListEnd(); } @@ -23647,14 +23874,14 @@ uint32_t ThriftHiveMetastore_partition_name_to_vals_result::read(::apache::thrif if (ftype == ::apache::thrift::protocol::T_LIST) { { this->success.clear(); - uint32_t _size1741; - ::apache::thrift::protocol::TType _etype1744; - xfer += iprot->readListBegin(_etype1744, _size1741); - this->success.resize(_size1741); - uint32_t _i1745; - for (_i1745 = 0; _i1745 < _size1741; ++_i1745) + uint32_t _size1751; + ::apache::thrift::protocol::TType _etype1754; + xfer += iprot->readListBegin(_etype1754, _size1751); + this->success.resize(_size1751); + uint32_t _i1755; + for (_i1755 = 0; _i1755 < _size1751; ++_i1755) { - xfer += iprot->readString(this->success[_i1745]); + xfer += iprot->readString(this->success[_i1755]); } xfer += iprot->readListEnd(); } @@ -23693,10 +23920,10 @@ uint32_t ThriftHiveMetastore_partition_name_to_vals_result::write(::apache::thri xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_LIST, 0); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast(this->success.size())); - std::vector ::const_iterator _iter1746; - for (_iter1746 = this->success.begin(); _iter1746 != this->success.end(); ++_iter1746) + std::vector ::const_iterator _iter1756; + for (_iter1756 = this->success.begin(); _iter1756 != this->success.end(); ++_iter1756) { - xfer += oprot->writeString((*_iter1746)); + xfer += oprot->writeString((*_iter1756)); } xfer += oprot->writeListEnd(); } @@ -23741,14 +23968,14 @@ uint32_t ThriftHiveMetastore_partition_name_to_vals_presult::read(::apache::thri if (ftype == ::apache::thrift::protocol::T_LIST) { { (*(this->success)).clear(); - uint32_t _size1747; - ::apache::thrift::protocol::TType _etype1750; - xfer += iprot->readListBegin(_etype1750, _size1747); - (*(this->success)).resize(_size1747); - uint32_t _i1751; - for (_i1751 = 0; _i1751 < _size1747; ++_i1751) + uint32_t _size1757; + ::apache::thrift::protocol::TType _etype1760; + xfer += iprot->readListBegin(_etype1760, _size1757); + (*(this->success)).resize(_size1757); + uint32_t _i1761; + for (_i1761 = 0; _i1761 < _size1757; ++_i1761) { - xfer += iprot->readString((*(this->success))[_i1751]); + xfer += iprot->readString((*(this->success))[_i1761]); } xfer += iprot->readListEnd(); } @@ -23886,17 +24113,17 @@ uint32_t ThriftHiveMetastore_partition_name_to_spec_result::read(::apache::thrif if (ftype == ::apache::thrift::protocol::T_MAP) { { this->success.clear(); - uint32_t _size1752; - ::apache::thrift::protocol::TType _ktype1753; - ::apache::thrift::protocol::TType _vtype1754; - xfer += iprot->readMapBegin(_ktype1753, _vtype1754, _size1752); - uint32_t _i1756; - for (_i1756 = 0; _i1756 < _size1752; ++_i1756) + uint32_t _size1762; + ::apache::thrift::protocol::TType _ktype1763; + ::apache::thrift::protocol::TType _vtype1764; + xfer += iprot->readMapBegin(_ktype1763, _vtype1764, _size1762); + uint32_t _i1766; + for (_i1766 = 0; _i1766 < _size1762; ++_i1766) { - std::string _key1757; - xfer += iprot->readString(_key1757); - std::string& _val1758 = this->success[_key1757]; - xfer += iprot->readString(_val1758); + std::string _key1767; + xfer += iprot->readString(_key1767); + std::string& _val1768 = this->success[_key1767]; + xfer += iprot->readString(_val1768); } xfer += iprot->readMapEnd(); } @@ -23935,11 +24162,11 @@ uint32_t ThriftHiveMetastore_partition_name_to_spec_result::write(::apache::thri xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_MAP, 0); { xfer += oprot->writeMapBegin(::apache::thrift::protocol::T_STRING, ::apache::thrift::protocol::T_STRING, static_cast(this->success.size())); - std::map ::const_iterator _iter1759; - for (_iter1759 = this->success.begin(); _iter1759 != this->success.end(); ++_iter1759) + std::map ::const_iterator _iter1769; + for (_iter1769 = this->success.begin(); _iter1769 != this->success.end(); ++_iter1769) { - xfer += oprot->writeString(_iter1759->first); - xfer += oprot->writeString(_iter1759->second); + xfer += oprot->writeString(_iter1769->first); + xfer += oprot->writeString(_iter1769->second); } xfer += oprot->writeMapEnd(); } @@ -23984,17 +24211,17 @@ uint32_t ThriftHiveMetastore_partition_name_to_spec_presult::read(::apache::thri if (ftype == ::apache::thrift::protocol::T_MAP) { { (*(this->success)).clear(); - uint32_t _size1760; - ::apache::thrift::protocol::TType _ktype1761; - ::apache::thrift::protocol::TType _vtype1762; - xfer += iprot->readMapBegin(_ktype1761, _vtype1762, _size1760); - uint32_t _i1764; - for (_i1764 = 0; _i1764 < _size1760; ++_i1764) + uint32_t _size1770; + ::apache::thrift::protocol::TType _ktype1771; + ::apache::thrift::protocol::TType _vtype1772; + xfer += iprot->readMapBegin(_ktype1771, _vtype1772, _size1770); + uint32_t _i1774; + for (_i1774 = 0; _i1774 < _size1770; ++_i1774) { - std::string _key1765; - xfer += iprot->readString(_key1765); - std::string& _val1766 = (*(this->success))[_key1765]; - xfer += iprot->readString(_val1766); + std::string _key1775; + xfer += iprot->readString(_key1775); + std::string& _val1776 = (*(this->success))[_key1775]; + xfer += iprot->readString(_val1776); } xfer += iprot->readMapEnd(); } @@ -24069,17 +24296,17 @@ uint32_t ThriftHiveMetastore_markPartitionForEvent_args::read(::apache::thrift:: if (ftype == ::apache::thrift::protocol::T_MAP) { { this->part_vals.clear(); - uint32_t _size1767; - ::apache::thrift::protocol::TType _ktype1768; - ::apache::thrift::protocol::TType _vtype1769; - xfer += iprot->readMapBegin(_ktype1768, _vtype1769, _size1767); - uint32_t _i1771; - for (_i1771 = 0; _i1771 < _size1767; ++_i1771) + uint32_t _size1777; + ::apache::thrift::protocol::TType _ktype1778; + ::apache::thrift::protocol::TType _vtype1779; + xfer += iprot->readMapBegin(_ktype1778, _vtype1779, _size1777); + uint32_t _i1781; + for (_i1781 = 0; _i1781 < _size1777; ++_i1781) { - std::string _key1772; - xfer += iprot->readString(_key1772); - std::string& _val1773 = this->part_vals[_key1772]; - xfer += iprot->readString(_val1773); + std::string _key1782; + xfer += iprot->readString(_key1782); + std::string& _val1783 = this->part_vals[_key1782]; + xfer += iprot->readString(_val1783); } xfer += iprot->readMapEnd(); } @@ -24090,9 +24317,9 @@ uint32_t ThriftHiveMetastore_markPartitionForEvent_args::read(::apache::thrift:: break; case 4: if (ftype == ::apache::thrift::protocol::T_I32) { - int32_t ecast1774; - xfer += iprot->readI32(ecast1774); - this->eventType = (PartitionEventType::type)ecast1774; + int32_t ecast1784; + xfer += iprot->readI32(ecast1784); + this->eventType = (PartitionEventType::type)ecast1784; this->__isset.eventType = true; } else { xfer += iprot->skip(ftype); @@ -24126,11 +24353,11 @@ uint32_t ThriftHiveMetastore_markPartitionForEvent_args::write(::apache::thrift: xfer += oprot->writeFieldBegin("part_vals", ::apache::thrift::protocol::T_MAP, 3); { xfer += oprot->writeMapBegin(::apache::thrift::protocol::T_STRING, ::apache::thrift::protocol::T_STRING, static_cast(this->part_vals.size())); - std::map ::const_iterator _iter1775; - for (_iter1775 = this->part_vals.begin(); _iter1775 != this->part_vals.end(); ++_iter1775) + std::map ::const_iterator _iter1785; + for (_iter1785 = this->part_vals.begin(); _iter1785 != this->part_vals.end(); ++_iter1785) { - xfer += oprot->writeString(_iter1775->first); - xfer += oprot->writeString(_iter1775->second); + xfer += oprot->writeString(_iter1785->first); + xfer += oprot->writeString(_iter1785->second); } xfer += oprot->writeMapEnd(); } @@ -24166,11 +24393,11 @@ uint32_t ThriftHiveMetastore_markPartitionForEvent_pargs::write(::apache::thrift xfer += oprot->writeFieldBegin("part_vals", ::apache::thrift::protocol::T_MAP, 3); { xfer += oprot->writeMapBegin(::apache::thrift::protocol::T_STRING, ::apache::thrift::protocol::T_STRING, static_cast((*(this->part_vals)).size())); - std::map ::const_iterator _iter1776; - for (_iter1776 = (*(this->part_vals)).begin(); _iter1776 != (*(this->part_vals)).end(); ++_iter1776) + std::map ::const_iterator _iter1786; + for (_iter1786 = (*(this->part_vals)).begin(); _iter1786 != (*(this->part_vals)).end(); ++_iter1786) { - xfer += oprot->writeString(_iter1776->first); - xfer += oprot->writeString(_iter1776->second); + xfer += oprot->writeString(_iter1786->first); + xfer += oprot->writeString(_iter1786->second); } xfer += oprot->writeMapEnd(); } @@ -24439,17 +24666,17 @@ uint32_t ThriftHiveMetastore_isPartitionMarkedForEvent_args::read(::apache::thri if (ftype == ::apache::thrift::protocol::T_MAP) { { this->part_vals.clear(); - uint32_t _size1777; - ::apache::thrift::protocol::TType _ktype1778; - ::apache::thrift::protocol::TType _vtype1779; - xfer += iprot->readMapBegin(_ktype1778, _vtype1779, _size1777); - uint32_t _i1781; - for (_i1781 = 0; _i1781 < _size1777; ++_i1781) + uint32_t _size1787; + ::apache::thrift::protocol::TType _ktype1788; + ::apache::thrift::protocol::TType _vtype1789; + xfer += iprot->readMapBegin(_ktype1788, _vtype1789, _size1787); + uint32_t _i1791; + for (_i1791 = 0; _i1791 < _size1787; ++_i1791) { - std::string _key1782; - xfer += iprot->readString(_key1782); - std::string& _val1783 = this->part_vals[_key1782]; - xfer += iprot->readString(_val1783); + std::string _key1792; + xfer += iprot->readString(_key1792); + std::string& _val1793 = this->part_vals[_key1792]; + xfer += iprot->readString(_val1793); } xfer += iprot->readMapEnd(); } @@ -24460,9 +24687,9 @@ uint32_t ThriftHiveMetastore_isPartitionMarkedForEvent_args::read(::apache::thri break; case 4: if (ftype == ::apache::thrift::protocol::T_I32) { - int32_t ecast1784; - xfer += iprot->readI32(ecast1784); - this->eventType = (PartitionEventType::type)ecast1784; + int32_t ecast1794; + xfer += iprot->readI32(ecast1794); + this->eventType = (PartitionEventType::type)ecast1794; this->__isset.eventType = true; } else { xfer += iprot->skip(ftype); @@ -24496,11 +24723,11 @@ uint32_t ThriftHiveMetastore_isPartitionMarkedForEvent_args::write(::apache::thr xfer += oprot->writeFieldBegin("part_vals", ::apache::thrift::protocol::T_MAP, 3); { xfer += oprot->writeMapBegin(::apache::thrift::protocol::T_STRING, ::apache::thrift::protocol::T_STRING, static_cast(this->part_vals.size())); - std::map ::const_iterator _iter1785; - for (_iter1785 = this->part_vals.begin(); _iter1785 != this->part_vals.end(); ++_iter1785) + std::map ::const_iterator _iter1795; + for (_iter1795 = this->part_vals.begin(); _iter1795 != this->part_vals.end(); ++_iter1795) { - xfer += oprot->writeString(_iter1785->first); - xfer += oprot->writeString(_iter1785->second); + xfer += oprot->writeString(_iter1795->first); + xfer += oprot->writeString(_iter1795->second); } xfer += oprot->writeMapEnd(); } @@ -24536,11 +24763,11 @@ uint32_t ThriftHiveMetastore_isPartitionMarkedForEvent_pargs::write(::apache::th xfer += oprot->writeFieldBegin("part_vals", ::apache::thrift::protocol::T_MAP, 3); { xfer += oprot->writeMapBegin(::apache::thrift::protocol::T_STRING, ::apache::thrift::protocol::T_STRING, static_cast((*(this->part_vals)).size())); - std::map ::const_iterator _iter1786; - for (_iter1786 = (*(this->part_vals)).begin(); _iter1786 != (*(this->part_vals)).end(); ++_iter1786) + std::map ::const_iterator _iter1796; + for (_iter1796 = (*(this->part_vals)).begin(); _iter1796 != (*(this->part_vals)).end(); ++_iter1796) { - xfer += oprot->writeString(_iter1786->first); - xfer += oprot->writeString(_iter1786->second); + xfer += oprot->writeString(_iter1796->first); + xfer += oprot->writeString(_iter1796->second); } xfer += oprot->writeMapEnd(); } @@ -29689,14 +29916,14 @@ uint32_t ThriftHiveMetastore_get_functions_result::read(::apache::thrift::protoc if (ftype == ::apache::thrift::protocol::T_LIST) { { this->success.clear(); - uint32_t _size1787; - ::apache::thrift::protocol::TType _etype1790; - xfer += iprot->readListBegin(_etype1790, _size1787); - this->success.resize(_size1787); - uint32_t _i1791; - for (_i1791 = 0; _i1791 < _size1787; ++_i1791) + uint32_t _size1797; + ::apache::thrift::protocol::TType _etype1800; + xfer += iprot->readListBegin(_etype1800, _size1797); + this->success.resize(_size1797); + uint32_t _i1801; + for (_i1801 = 0; _i1801 < _size1797; ++_i1801) { - xfer += iprot->readString(this->success[_i1791]); + xfer += iprot->readString(this->success[_i1801]); } xfer += iprot->readListEnd(); } @@ -29735,10 +29962,10 @@ uint32_t ThriftHiveMetastore_get_functions_result::write(::apache::thrift::proto xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_LIST, 0); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast(this->success.size())); - std::vector ::const_iterator _iter1792; - for (_iter1792 = this->success.begin(); _iter1792 != this->success.end(); ++_iter1792) + std::vector ::const_iterator _iter1802; + for (_iter1802 = this->success.begin(); _iter1802 != this->success.end(); ++_iter1802) { - xfer += oprot->writeString((*_iter1792)); + xfer += oprot->writeString((*_iter1802)); } xfer += oprot->writeListEnd(); } @@ -29783,14 +30010,14 @@ uint32_t ThriftHiveMetastore_get_functions_presult::read(::apache::thrift::proto if (ftype == ::apache::thrift::protocol::T_LIST) { { (*(this->success)).clear(); - uint32_t _size1793; - ::apache::thrift::protocol::TType _etype1796; - xfer += iprot->readListBegin(_etype1796, _size1793); - (*(this->success)).resize(_size1793); - uint32_t _i1797; - for (_i1797 = 0; _i1797 < _size1793; ++_i1797) + uint32_t _size1803; + ::apache::thrift::protocol::TType _etype1806; + xfer += iprot->readListBegin(_etype1806, _size1803); + (*(this->success)).resize(_size1803); + uint32_t _i1807; + for (_i1807 = 0; _i1807 < _size1803; ++_i1807) { - xfer += iprot->readString((*(this->success))[_i1797]); + xfer += iprot->readString((*(this->success))[_i1807]); } xfer += iprot->readListEnd(); } @@ -30750,14 +30977,14 @@ uint32_t ThriftHiveMetastore_get_role_names_result::read(::apache::thrift::proto if (ftype == ::apache::thrift::protocol::T_LIST) { { this->success.clear(); - uint32_t _size1798; - ::apache::thrift::protocol::TType _etype1801; - xfer += iprot->readListBegin(_etype1801, _size1798); - this->success.resize(_size1798); - uint32_t _i1802; - for (_i1802 = 0; _i1802 < _size1798; ++_i1802) + uint32_t _size1808; + ::apache::thrift::protocol::TType _etype1811; + xfer += iprot->readListBegin(_etype1811, _size1808); + this->success.resize(_size1808); + uint32_t _i1812; + for (_i1812 = 0; _i1812 < _size1808; ++_i1812) { - xfer += iprot->readString(this->success[_i1802]); + xfer += iprot->readString(this->success[_i1812]); } xfer += iprot->readListEnd(); } @@ -30796,10 +31023,10 @@ uint32_t ThriftHiveMetastore_get_role_names_result::write(::apache::thrift::prot xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_LIST, 0); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast(this->success.size())); - std::vector ::const_iterator _iter1803; - for (_iter1803 = this->success.begin(); _iter1803 != this->success.end(); ++_iter1803) + std::vector ::const_iterator _iter1813; + for (_iter1813 = this->success.begin(); _iter1813 != this->success.end(); ++_iter1813) { - xfer += oprot->writeString((*_iter1803)); + xfer += oprot->writeString((*_iter1813)); } xfer += oprot->writeListEnd(); } @@ -30844,14 +31071,14 @@ uint32_t ThriftHiveMetastore_get_role_names_presult::read(::apache::thrift::prot if (ftype == ::apache::thrift::protocol::T_LIST) { { (*(this->success)).clear(); - uint32_t _size1804; - ::apache::thrift::protocol::TType _etype1807; - xfer += iprot->readListBegin(_etype1807, _size1804); - (*(this->success)).resize(_size1804); - uint32_t _i1808; - for (_i1808 = 0; _i1808 < _size1804; ++_i1808) + uint32_t _size1814; + ::apache::thrift::protocol::TType _etype1817; + xfer += iprot->readListBegin(_etype1817, _size1814); + (*(this->success)).resize(_size1814); + uint32_t _i1818; + for (_i1818 = 0; _i1818 < _size1814; ++_i1818) { - xfer += iprot->readString((*(this->success))[_i1808]); + xfer += iprot->readString((*(this->success))[_i1818]); } xfer += iprot->readListEnd(); } @@ -30924,9 +31151,9 @@ uint32_t ThriftHiveMetastore_grant_role_args::read(::apache::thrift::protocol::T break; case 3: if (ftype == ::apache::thrift::protocol::T_I32) { - int32_t ecast1809; - xfer += iprot->readI32(ecast1809); - this->principal_type = (PrincipalType::type)ecast1809; + int32_t ecast1819; + xfer += iprot->readI32(ecast1819); + this->principal_type = (PrincipalType::type)ecast1819; this->__isset.principal_type = true; } else { xfer += iprot->skip(ftype); @@ -30942,9 +31169,9 @@ uint32_t ThriftHiveMetastore_grant_role_args::read(::apache::thrift::protocol::T break; case 5: if (ftype == ::apache::thrift::protocol::T_I32) { - int32_t ecast1810; - xfer += iprot->readI32(ecast1810); - this->grantorType = (PrincipalType::type)ecast1810; + int32_t ecast1820; + xfer += iprot->readI32(ecast1820); + this->grantorType = (PrincipalType::type)ecast1820; this->__isset.grantorType = true; } else { xfer += iprot->skip(ftype); @@ -31215,9 +31442,9 @@ uint32_t ThriftHiveMetastore_revoke_role_args::read(::apache::thrift::protocol:: break; case 3: if (ftype == ::apache::thrift::protocol::T_I32) { - int32_t ecast1811; - xfer += iprot->readI32(ecast1811); - this->principal_type = (PrincipalType::type)ecast1811; + int32_t ecast1821; + xfer += iprot->readI32(ecast1821); + this->principal_type = (PrincipalType::type)ecast1821; this->__isset.principal_type = true; } else { xfer += iprot->skip(ftype); @@ -31448,9 +31675,9 @@ uint32_t ThriftHiveMetastore_list_roles_args::read(::apache::thrift::protocol::T break; case 2: if (ftype == ::apache::thrift::protocol::T_I32) { - int32_t ecast1812; - xfer += iprot->readI32(ecast1812); - this->principal_type = (PrincipalType::type)ecast1812; + int32_t ecast1822; + xfer += iprot->readI32(ecast1822); + this->principal_type = (PrincipalType::type)ecast1822; this->__isset.principal_type = true; } else { xfer += iprot->skip(ftype); @@ -31539,14 +31766,14 @@ uint32_t ThriftHiveMetastore_list_roles_result::read(::apache::thrift::protocol: if (ftype == ::apache::thrift::protocol::T_LIST) { { this->success.clear(); - uint32_t _size1813; - ::apache::thrift::protocol::TType _etype1816; - xfer += iprot->readListBegin(_etype1816, _size1813); - this->success.resize(_size1813); - uint32_t _i1817; - for (_i1817 = 0; _i1817 < _size1813; ++_i1817) + uint32_t _size1823; + ::apache::thrift::protocol::TType _etype1826; + xfer += iprot->readListBegin(_etype1826, _size1823); + this->success.resize(_size1823); + uint32_t _i1827; + for (_i1827 = 0; _i1827 < _size1823; ++_i1827) { - xfer += this->success[_i1817].read(iprot); + xfer += this->success[_i1827].read(iprot); } xfer += iprot->readListEnd(); } @@ -31585,10 +31812,10 @@ uint32_t ThriftHiveMetastore_list_roles_result::write(::apache::thrift::protocol xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_LIST, 0); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast(this->success.size())); - std::vector ::const_iterator _iter1818; - for (_iter1818 = this->success.begin(); _iter1818 != this->success.end(); ++_iter1818) + std::vector ::const_iterator _iter1828; + for (_iter1828 = this->success.begin(); _iter1828 != this->success.end(); ++_iter1828) { - xfer += (*_iter1818).write(oprot); + xfer += (*_iter1828).write(oprot); } xfer += oprot->writeListEnd(); } @@ -31633,14 +31860,14 @@ uint32_t ThriftHiveMetastore_list_roles_presult::read(::apache::thrift::protocol if (ftype == ::apache::thrift::protocol::T_LIST) { { (*(this->success)).clear(); - uint32_t _size1819; - ::apache::thrift::protocol::TType _etype1822; - xfer += iprot->readListBegin(_etype1822, _size1819); - (*(this->success)).resize(_size1819); - uint32_t _i1823; - for (_i1823 = 0; _i1823 < _size1819; ++_i1823) + uint32_t _size1829; + ::apache::thrift::protocol::TType _etype1832; + xfer += iprot->readListBegin(_etype1832, _size1829); + (*(this->success)).resize(_size1829); + uint32_t _i1833; + for (_i1833 = 0; _i1833 < _size1829; ++_i1833) { - xfer += (*(this->success))[_i1823].read(iprot); + xfer += (*(this->success))[_i1833].read(iprot); } xfer += iprot->readListEnd(); } @@ -32336,14 +32563,14 @@ uint32_t ThriftHiveMetastore_get_privilege_set_args::read(::apache::thrift::prot if (ftype == ::apache::thrift::protocol::T_LIST) { { this->group_names.clear(); - uint32_t _size1824; - ::apache::thrift::protocol::TType _etype1827; - xfer += iprot->readListBegin(_etype1827, _size1824); - this->group_names.resize(_size1824); - uint32_t _i1828; - for (_i1828 = 0; _i1828 < _size1824; ++_i1828) + uint32_t _size1834; + ::apache::thrift::protocol::TType _etype1837; + xfer += iprot->readListBegin(_etype1837, _size1834); + this->group_names.resize(_size1834); + uint32_t _i1838; + for (_i1838 = 0; _i1838 < _size1834; ++_i1838) { - xfer += iprot->readString(this->group_names[_i1828]); + xfer += iprot->readString(this->group_names[_i1838]); } xfer += iprot->readListEnd(); } @@ -32380,10 +32607,10 @@ uint32_t ThriftHiveMetastore_get_privilege_set_args::write(::apache::thrift::pro xfer += oprot->writeFieldBegin("group_names", ::apache::thrift::protocol::T_LIST, 3); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast(this->group_names.size())); - std::vector ::const_iterator _iter1829; - for (_iter1829 = this->group_names.begin(); _iter1829 != this->group_names.end(); ++_iter1829) + std::vector ::const_iterator _iter1839; + for (_iter1839 = this->group_names.begin(); _iter1839 != this->group_names.end(); ++_iter1839) { - xfer += oprot->writeString((*_iter1829)); + xfer += oprot->writeString((*_iter1839)); } xfer += oprot->writeListEnd(); } @@ -32415,10 +32642,10 @@ uint32_t ThriftHiveMetastore_get_privilege_set_pargs::write(::apache::thrift::pr xfer += oprot->writeFieldBegin("group_names", ::apache::thrift::protocol::T_LIST, 3); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast((*(this->group_names)).size())); - std::vector ::const_iterator _iter1830; - for (_iter1830 = (*(this->group_names)).begin(); _iter1830 != (*(this->group_names)).end(); ++_iter1830) + std::vector ::const_iterator _iter1840; + for (_iter1840 = (*(this->group_names)).begin(); _iter1840 != (*(this->group_names)).end(); ++_iter1840) { - xfer += oprot->writeString((*_iter1830)); + xfer += oprot->writeString((*_iter1840)); } xfer += oprot->writeListEnd(); } @@ -32593,9 +32820,9 @@ uint32_t ThriftHiveMetastore_list_privileges_args::read(::apache::thrift::protoc break; case 2: if (ftype == ::apache::thrift::protocol::T_I32) { - int32_t ecast1831; - xfer += iprot->readI32(ecast1831); - this->principal_type = (PrincipalType::type)ecast1831; + int32_t ecast1841; + xfer += iprot->readI32(ecast1841); + this->principal_type = (PrincipalType::type)ecast1841; this->__isset.principal_type = true; } else { xfer += iprot->skip(ftype); @@ -32700,14 +32927,14 @@ uint32_t ThriftHiveMetastore_list_privileges_result::read(::apache::thrift::prot if (ftype == ::apache::thrift::protocol::T_LIST) { { this->success.clear(); - uint32_t _size1832; - ::apache::thrift::protocol::TType _etype1835; - xfer += iprot->readListBegin(_etype1835, _size1832); - this->success.resize(_size1832); - uint32_t _i1836; - for (_i1836 = 0; _i1836 < _size1832; ++_i1836) + uint32_t _size1842; + ::apache::thrift::protocol::TType _etype1845; + xfer += iprot->readListBegin(_etype1845, _size1842); + this->success.resize(_size1842); + uint32_t _i1846; + for (_i1846 = 0; _i1846 < _size1842; ++_i1846) { - xfer += this->success[_i1836].read(iprot); + xfer += this->success[_i1846].read(iprot); } xfer += iprot->readListEnd(); } @@ -32746,10 +32973,10 @@ uint32_t ThriftHiveMetastore_list_privileges_result::write(::apache::thrift::pro xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_LIST, 0); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast(this->success.size())); - std::vector ::const_iterator _iter1837; - for (_iter1837 = this->success.begin(); _iter1837 != this->success.end(); ++_iter1837) + std::vector ::const_iterator _iter1847; + for (_iter1847 = this->success.begin(); _iter1847 != this->success.end(); ++_iter1847) { - xfer += (*_iter1837).write(oprot); + xfer += (*_iter1847).write(oprot); } xfer += oprot->writeListEnd(); } @@ -32794,14 +33021,14 @@ uint32_t ThriftHiveMetastore_list_privileges_presult::read(::apache::thrift::pro if (ftype == ::apache::thrift::protocol::T_LIST) { { (*(this->success)).clear(); - uint32_t _size1838; - ::apache::thrift::protocol::TType _etype1841; - xfer += iprot->readListBegin(_etype1841, _size1838); - (*(this->success)).resize(_size1838); - uint32_t _i1842; - for (_i1842 = 0; _i1842 < _size1838; ++_i1842) + uint32_t _size1848; + ::apache::thrift::protocol::TType _etype1851; + xfer += iprot->readListBegin(_etype1851, _size1848); + (*(this->success)).resize(_size1848); + uint32_t _i1852; + for (_i1852 = 0; _i1852 < _size1848; ++_i1852) { - xfer += (*(this->success))[_i1842].read(iprot); + xfer += (*(this->success))[_i1852].read(iprot); } xfer += iprot->readListEnd(); } @@ -33728,14 +33955,14 @@ uint32_t ThriftHiveMetastore_set_ugi_args::read(::apache::thrift::protocol::TPro if (ftype == ::apache::thrift::protocol::T_LIST) { { this->group_names.clear(); - uint32_t _size1843; - ::apache::thrift::protocol::TType _etype1846; - xfer += iprot->readListBegin(_etype1846, _size1843); - this->group_names.resize(_size1843); - uint32_t _i1847; - for (_i1847 = 0; _i1847 < _size1843; ++_i1847) + uint32_t _size1853; + ::apache::thrift::protocol::TType _etype1856; + xfer += iprot->readListBegin(_etype1856, _size1853); + this->group_names.resize(_size1853); + uint32_t _i1857; + for (_i1857 = 0; _i1857 < _size1853; ++_i1857) { - xfer += iprot->readString(this->group_names[_i1847]); + xfer += iprot->readString(this->group_names[_i1857]); } xfer += iprot->readListEnd(); } @@ -33768,10 +33995,10 @@ uint32_t ThriftHiveMetastore_set_ugi_args::write(::apache::thrift::protocol::TPr xfer += oprot->writeFieldBegin("group_names", ::apache::thrift::protocol::T_LIST, 2); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast(this->group_names.size())); - std::vector ::const_iterator _iter1848; - for (_iter1848 = this->group_names.begin(); _iter1848 != this->group_names.end(); ++_iter1848) + std::vector ::const_iterator _iter1858; + for (_iter1858 = this->group_names.begin(); _iter1858 != this->group_names.end(); ++_iter1858) { - xfer += oprot->writeString((*_iter1848)); + xfer += oprot->writeString((*_iter1858)); } xfer += oprot->writeListEnd(); } @@ -33799,10 +34026,10 @@ uint32_t ThriftHiveMetastore_set_ugi_pargs::write(::apache::thrift::protocol::TP xfer += oprot->writeFieldBegin("group_names", ::apache::thrift::protocol::T_LIST, 2); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast((*(this->group_names)).size())); - std::vector ::const_iterator _iter1849; - for (_iter1849 = (*(this->group_names)).begin(); _iter1849 != (*(this->group_names)).end(); ++_iter1849) + std::vector ::const_iterator _iter1859; + for (_iter1859 = (*(this->group_names)).begin(); _iter1859 != (*(this->group_names)).end(); ++_iter1859) { - xfer += oprot->writeString((*_iter1849)); + xfer += oprot->writeString((*_iter1859)); } xfer += oprot->writeListEnd(); } @@ -33843,14 +34070,14 @@ uint32_t ThriftHiveMetastore_set_ugi_result::read(::apache::thrift::protocol::TP if (ftype == ::apache::thrift::protocol::T_LIST) { { this->success.clear(); - uint32_t _size1850; - ::apache::thrift::protocol::TType _etype1853; - xfer += iprot->readListBegin(_etype1853, _size1850); - this->success.resize(_size1850); - uint32_t _i1854; - for (_i1854 = 0; _i1854 < _size1850; ++_i1854) + uint32_t _size1860; + ::apache::thrift::protocol::TType _etype1863; + xfer += iprot->readListBegin(_etype1863, _size1860); + this->success.resize(_size1860); + uint32_t _i1864; + for (_i1864 = 0; _i1864 < _size1860; ++_i1864) { - xfer += iprot->readString(this->success[_i1854]); + xfer += iprot->readString(this->success[_i1864]); } xfer += iprot->readListEnd(); } @@ -33889,10 +34116,10 @@ uint32_t ThriftHiveMetastore_set_ugi_result::write(::apache::thrift::protocol::T xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_LIST, 0); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast(this->success.size())); - std::vector ::const_iterator _iter1855; - for (_iter1855 = this->success.begin(); _iter1855 != this->success.end(); ++_iter1855) + std::vector ::const_iterator _iter1865; + for (_iter1865 = this->success.begin(); _iter1865 != this->success.end(); ++_iter1865) { - xfer += oprot->writeString((*_iter1855)); + xfer += oprot->writeString((*_iter1865)); } xfer += oprot->writeListEnd(); } @@ -33937,14 +34164,14 @@ uint32_t ThriftHiveMetastore_set_ugi_presult::read(::apache::thrift::protocol::T if (ftype == ::apache::thrift::protocol::T_LIST) { { (*(this->success)).clear(); - uint32_t _size1856; - ::apache::thrift::protocol::TType _etype1859; - xfer += iprot->readListBegin(_etype1859, _size1856); - (*(this->success)).resize(_size1856); - uint32_t _i1860; - for (_i1860 = 0; _i1860 < _size1856; ++_i1860) + uint32_t _size1866; + ::apache::thrift::protocol::TType _etype1869; + xfer += iprot->readListBegin(_etype1869, _size1866); + (*(this->success)).resize(_size1866); + uint32_t _i1870; + for (_i1870 = 0; _i1870 < _size1866; ++_i1870) { - xfer += iprot->readString((*(this->success))[_i1860]); + xfer += iprot->readString((*(this->success))[_i1870]); } xfer += iprot->readListEnd(); } @@ -35255,14 +35482,14 @@ uint32_t ThriftHiveMetastore_get_all_token_identifiers_result::read(::apache::th if (ftype == ::apache::thrift::protocol::T_LIST) { { this->success.clear(); - uint32_t _size1861; - ::apache::thrift::protocol::TType _etype1864; - xfer += iprot->readListBegin(_etype1864, _size1861); - this->success.resize(_size1861); - uint32_t _i1865; - for (_i1865 = 0; _i1865 < _size1861; ++_i1865) + uint32_t _size1871; + ::apache::thrift::protocol::TType _etype1874; + xfer += iprot->readListBegin(_etype1874, _size1871); + this->success.resize(_size1871); + uint32_t _i1875; + for (_i1875 = 0; _i1875 < _size1871; ++_i1875) { - xfer += iprot->readString(this->success[_i1865]); + xfer += iprot->readString(this->success[_i1875]); } xfer += iprot->readListEnd(); } @@ -35293,10 +35520,10 @@ uint32_t ThriftHiveMetastore_get_all_token_identifiers_result::write(::apache::t xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_LIST, 0); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast(this->success.size())); - std::vector ::const_iterator _iter1866; - for (_iter1866 = this->success.begin(); _iter1866 != this->success.end(); ++_iter1866) + std::vector ::const_iterator _iter1876; + for (_iter1876 = this->success.begin(); _iter1876 != this->success.end(); ++_iter1876) { - xfer += oprot->writeString((*_iter1866)); + xfer += oprot->writeString((*_iter1876)); } xfer += oprot->writeListEnd(); } @@ -35337,14 +35564,14 @@ uint32_t ThriftHiveMetastore_get_all_token_identifiers_presult::read(::apache::t if (ftype == ::apache::thrift::protocol::T_LIST) { { (*(this->success)).clear(); - uint32_t _size1867; - ::apache::thrift::protocol::TType _etype1870; - xfer += iprot->readListBegin(_etype1870, _size1867); - (*(this->success)).resize(_size1867); - uint32_t _i1871; - for (_i1871 = 0; _i1871 < _size1867; ++_i1871) + uint32_t _size1877; + ::apache::thrift::protocol::TType _etype1880; + xfer += iprot->readListBegin(_etype1880, _size1877); + (*(this->success)).resize(_size1877); + uint32_t _i1881; + for (_i1881 = 0; _i1881 < _size1877; ++_i1881) { - xfer += iprot->readString((*(this->success))[_i1871]); + xfer += iprot->readString((*(this->success))[_i1881]); } xfer += iprot->readListEnd(); } @@ -36070,14 +36297,14 @@ uint32_t ThriftHiveMetastore_get_master_keys_result::read(::apache::thrift::prot if (ftype == ::apache::thrift::protocol::T_LIST) { { this->success.clear(); - uint32_t _size1872; - ::apache::thrift::protocol::TType _etype1875; - xfer += iprot->readListBegin(_etype1875, _size1872); - this->success.resize(_size1872); - uint32_t _i1876; - for (_i1876 = 0; _i1876 < _size1872; ++_i1876) + uint32_t _size1882; + ::apache::thrift::protocol::TType _etype1885; + xfer += iprot->readListBegin(_etype1885, _size1882); + this->success.resize(_size1882); + uint32_t _i1886; + for (_i1886 = 0; _i1886 < _size1882; ++_i1886) { - xfer += iprot->readString(this->success[_i1876]); + xfer += iprot->readString(this->success[_i1886]); } xfer += iprot->readListEnd(); } @@ -36108,10 +36335,10 @@ uint32_t ThriftHiveMetastore_get_master_keys_result::write(::apache::thrift::pro xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_LIST, 0); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast(this->success.size())); - std::vector ::const_iterator _iter1877; - for (_iter1877 = this->success.begin(); _iter1877 != this->success.end(); ++_iter1877) + std::vector ::const_iterator _iter1887; + for (_iter1887 = this->success.begin(); _iter1887 != this->success.end(); ++_iter1887) { - xfer += oprot->writeString((*_iter1877)); + xfer += oprot->writeString((*_iter1887)); } xfer += oprot->writeListEnd(); } @@ -36152,14 +36379,14 @@ uint32_t ThriftHiveMetastore_get_master_keys_presult::read(::apache::thrift::pro if (ftype == ::apache::thrift::protocol::T_LIST) { { (*(this->success)).clear(); - uint32_t _size1878; - ::apache::thrift::protocol::TType _etype1881; - xfer += iprot->readListBegin(_etype1881, _size1878); - (*(this->success)).resize(_size1878); - uint32_t _i1882; - for (_i1882 = 0; _i1882 < _size1878; ++_i1882) + uint32_t _size1888; + ::apache::thrift::protocol::TType _etype1891; + xfer += iprot->readListBegin(_etype1891, _size1888); + (*(this->success)).resize(_size1888); + uint32_t _i1892; + for (_i1892 = 0; _i1892 < _size1888; ++_i1892) { - xfer += iprot->readString((*(this->success))[_i1882]); + xfer += iprot->readString((*(this->success))[_i1892]); } xfer += iprot->readListEnd(); } @@ -48143,14 +48370,14 @@ uint32_t ThriftHiveMetastore_get_schema_all_versions_result::read(::apache::thri if (ftype == ::apache::thrift::protocol::T_LIST) { { this->success.clear(); - uint32_t _size1883; - ::apache::thrift::protocol::TType _etype1886; - xfer += iprot->readListBegin(_etype1886, _size1883); - this->success.resize(_size1883); - uint32_t _i1887; - for (_i1887 = 0; _i1887 < _size1883; ++_i1887) + uint32_t _size1893; + ::apache::thrift::protocol::TType _etype1896; + xfer += iprot->readListBegin(_etype1896, _size1893); + this->success.resize(_size1893); + uint32_t _i1897; + for (_i1897 = 0; _i1897 < _size1893; ++_i1897) { - xfer += this->success[_i1887].read(iprot); + xfer += this->success[_i1897].read(iprot); } xfer += iprot->readListEnd(); } @@ -48197,10 +48424,10 @@ uint32_t ThriftHiveMetastore_get_schema_all_versions_result::write(::apache::thr xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_LIST, 0); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast(this->success.size())); - std::vector ::const_iterator _iter1888; - for (_iter1888 = this->success.begin(); _iter1888 != this->success.end(); ++_iter1888) + std::vector ::const_iterator _iter1898; + for (_iter1898 = this->success.begin(); _iter1898 != this->success.end(); ++_iter1898) { - xfer += (*_iter1888).write(oprot); + xfer += (*_iter1898).write(oprot); } xfer += oprot->writeListEnd(); } @@ -48249,14 +48476,14 @@ uint32_t ThriftHiveMetastore_get_schema_all_versions_presult::read(::apache::thr if (ftype == ::apache::thrift::protocol::T_LIST) { { (*(this->success)).clear(); - uint32_t _size1889; - ::apache::thrift::protocol::TType _etype1892; - xfer += iprot->readListBegin(_etype1892, _size1889); - (*(this->success)).resize(_size1889); - uint32_t _i1893; - for (_i1893 = 0; _i1893 < _size1889; ++_i1893) + uint32_t _size1899; + ::apache::thrift::protocol::TType _etype1902; + xfer += iprot->readListBegin(_etype1902, _size1899); + (*(this->success)).resize(_size1899); + uint32_t _i1903; + for (_i1903 = 0; _i1903 < _size1899; ++_i1903) { - xfer += (*(this->success))[_i1893].read(iprot); + xfer += (*(this->success))[_i1903].read(iprot); } xfer += iprot->readListEnd(); } @@ -50309,14 +50536,14 @@ uint32_t ThriftHiveMetastore_get_runtime_stats_result::read(::apache::thrift::pr if (ftype == ::apache::thrift::protocol::T_LIST) { { this->success.clear(); - uint32_t _size1894; - ::apache::thrift::protocol::TType _etype1897; - xfer += iprot->readListBegin(_etype1897, _size1894); - this->success.resize(_size1894); - uint32_t _i1898; - for (_i1898 = 0; _i1898 < _size1894; ++_i1898) + uint32_t _size1904; + ::apache::thrift::protocol::TType _etype1907; + xfer += iprot->readListBegin(_etype1907, _size1904); + this->success.resize(_size1904); + uint32_t _i1908; + for (_i1908 = 0; _i1908 < _size1904; ++_i1908) { - xfer += this->success[_i1898].read(iprot); + xfer += this->success[_i1908].read(iprot); } xfer += iprot->readListEnd(); } @@ -50355,10 +50582,10 @@ uint32_t ThriftHiveMetastore_get_runtime_stats_result::write(::apache::thrift::p xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_LIST, 0); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast(this->success.size())); - std::vector ::const_iterator _iter1899; - for (_iter1899 = this->success.begin(); _iter1899 != this->success.end(); ++_iter1899) + std::vector ::const_iterator _iter1909; + for (_iter1909 = this->success.begin(); _iter1909 != this->success.end(); ++_iter1909) { - xfer += (*_iter1899).write(oprot); + xfer += (*_iter1909).write(oprot); } xfer += oprot->writeListEnd(); } @@ -50403,14 +50630,14 @@ uint32_t ThriftHiveMetastore_get_runtime_stats_presult::read(::apache::thrift::p if (ftype == ::apache::thrift::protocol::T_LIST) { { (*(this->success)).clear(); - uint32_t _size1900; - ::apache::thrift::protocol::TType _etype1903; - xfer += iprot->readListBegin(_etype1903, _size1900); - (*(this->success)).resize(_size1900); - uint32_t _i1904; - for (_i1904 = 0; _i1904 < _size1900; ++_i1904) + uint32_t _size1910; + ::apache::thrift::protocol::TType _etype1913; + xfer += iprot->readListBegin(_etype1913, _size1910); + (*(this->success)).resize(_size1910); + uint32_t _i1914; + for (_i1914 = 0; _i1914 < _size1910; ++_i1914) { - xfer += (*(this->success))[_i1904].read(iprot); + xfer += (*(this->success))[_i1914].read(iprot); } xfer += iprot->readListEnd(); } @@ -55859,6 +56086,70 @@ void ThriftHiveMetastoreClient::recv_alter_partitions_with_environment_context() return; } +void ThriftHiveMetastoreClient::alter_partitions_with_environment_context_req(AlterPartitionsResponse& _return, const AlterPartitionsRequest& req) +{ + send_alter_partitions_with_environment_context_req(req); + recv_alter_partitions_with_environment_context_req(_return); +} + +void ThriftHiveMetastoreClient::send_alter_partitions_with_environment_context_req(const AlterPartitionsRequest& req) +{ + int32_t cseqid = 0; + oprot_->writeMessageBegin("alter_partitions_with_environment_context_req", ::apache::thrift::protocol::T_CALL, cseqid); + + ThriftHiveMetastore_alter_partitions_with_environment_context_req_pargs args; + args.req = &req; + args.write(oprot_); + + oprot_->writeMessageEnd(); + oprot_->getTransport()->writeEnd(); + oprot_->getTransport()->flush(); +} + +void ThriftHiveMetastoreClient::recv_alter_partitions_with_environment_context_req(AlterPartitionsResponse& _return) +{ + + int32_t rseqid = 0; + std::string fname; + ::apache::thrift::protocol::TMessageType mtype; + + iprot_->readMessageBegin(fname, mtype, rseqid); + if (mtype == ::apache::thrift::protocol::T_EXCEPTION) { + ::apache::thrift::TApplicationException x; + x.read(iprot_); + iprot_->readMessageEnd(); + iprot_->getTransport()->readEnd(); + throw x; + } + if (mtype != ::apache::thrift::protocol::T_REPLY) { + iprot_->skip(::apache::thrift::protocol::T_STRUCT); + iprot_->readMessageEnd(); + iprot_->getTransport()->readEnd(); + } + if (fname.compare("alter_partitions_with_environment_context_req") != 0) { + iprot_->skip(::apache::thrift::protocol::T_STRUCT); + iprot_->readMessageEnd(); + iprot_->getTransport()->readEnd(); + } + ThriftHiveMetastore_alter_partitions_with_environment_context_req_presult result; + result.success = &_return; + result.read(iprot_); + iprot_->readMessageEnd(); + iprot_->getTransport()->readEnd(); + + if (result.__isset.success) { + // _return pointer has now been filled + return; + } + if (result.__isset.o1) { + throw result.o1; + } + if (result.__isset.o2) { + throw result.o2; + } + throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "alter_partitions_with_environment_context_req failed: unknown result"); +} + void ThriftHiveMetastoreClient::alter_partition_with_environment_context(const std::string& db_name, const std::string& tbl_name, const Partition& new_part, const EnvironmentContext& environment_context) { send_alter_partition_with_environment_context(db_name, tbl_name, new_part, environment_context); @@ -68658,6 +68949,66 @@ void ThriftHiveMetastoreProcessor::process_alter_partitions_with_environment_con } } +void ThriftHiveMetastoreProcessor::process_alter_partitions_with_environment_context_req(int32_t seqid, ::apache::thrift::protocol::TProtocol* iprot, ::apache::thrift::protocol::TProtocol* oprot, void* callContext) +{ + void* ctx = NULL; + if (this->eventHandler_.get() != NULL) { + ctx = this->eventHandler_->getContext("ThriftHiveMetastore.alter_partitions_with_environment_context_req", callContext); + } + ::apache::thrift::TProcessorContextFreer freer(this->eventHandler_.get(), ctx, "ThriftHiveMetastore.alter_partitions_with_environment_context_req"); + + if (this->eventHandler_.get() != NULL) { + this->eventHandler_->preRead(ctx, "ThriftHiveMetastore.alter_partitions_with_environment_context_req"); + } + + ThriftHiveMetastore_alter_partitions_with_environment_context_req_args args; + args.read(iprot); + iprot->readMessageEnd(); + uint32_t bytes = iprot->getTransport()->readEnd(); + + if (this->eventHandler_.get() != NULL) { + this->eventHandler_->postRead(ctx, "ThriftHiveMetastore.alter_partitions_with_environment_context_req", bytes); + } + + ThriftHiveMetastore_alter_partitions_with_environment_context_req_result result; + try { + iface_->alter_partitions_with_environment_context_req(result.success, args.req); + result.__isset.success = true; + } catch (InvalidOperationException &o1) { + result.o1 = o1; + result.__isset.o1 = true; + } catch (MetaException &o2) { + result.o2 = o2; + result.__isset.o2 = true; + } catch (const std::exception& e) { + if (this->eventHandler_.get() != NULL) { + this->eventHandler_->handlerError(ctx, "ThriftHiveMetastore.alter_partitions_with_environment_context_req"); + } + + ::apache::thrift::TApplicationException x(e.what()); + oprot->writeMessageBegin("alter_partitions_with_environment_context_req", ::apache::thrift::protocol::T_EXCEPTION, seqid); + x.write(oprot); + oprot->writeMessageEnd(); + oprot->getTransport()->writeEnd(); + oprot->getTransport()->flush(); + return; + } + + if (this->eventHandler_.get() != NULL) { + this->eventHandler_->preWrite(ctx, "ThriftHiveMetastore.alter_partitions_with_environment_context_req"); + } + + oprot->writeMessageBegin("alter_partitions_with_environment_context_req", ::apache::thrift::protocol::T_REPLY, seqid); + result.write(oprot); + oprot->writeMessageEnd(); + bytes = oprot->getTransport()->writeEnd(); + oprot->getTransport()->flush(); + + if (this->eventHandler_.get() != NULL) { + this->eventHandler_->postWrite(ctx, "ThriftHiveMetastore.alter_partitions_with_environment_context_req", bytes); + } +} + void ThriftHiveMetastoreProcessor::process_alter_partition_with_environment_context(int32_t seqid, ::apache::thrift::protocol::TProtocol* iprot, ::apache::thrift::protocol::TProtocol* oprot, void* callContext) { void* ctx = NULL; @@ -83714,6 +84065,98 @@ void ThriftHiveMetastoreConcurrentClient::recv_alter_partitions_with_environment } // end while(true) } +void ThriftHiveMetastoreConcurrentClient::alter_partitions_with_environment_context_req(AlterPartitionsResponse& _return, const AlterPartitionsRequest& req) +{ + int32_t seqid = send_alter_partitions_with_environment_context_req(req); + recv_alter_partitions_with_environment_context_req(_return, seqid); +} + +int32_t ThriftHiveMetastoreConcurrentClient::send_alter_partitions_with_environment_context_req(const AlterPartitionsRequest& req) +{ + int32_t cseqid = this->sync_.generateSeqId(); + ::apache::thrift::async::TConcurrentSendSentry sentry(&this->sync_); + oprot_->writeMessageBegin("alter_partitions_with_environment_context_req", ::apache::thrift::protocol::T_CALL, cseqid); + + ThriftHiveMetastore_alter_partitions_with_environment_context_req_pargs args; + args.req = &req; + args.write(oprot_); + + oprot_->writeMessageEnd(); + oprot_->getTransport()->writeEnd(); + oprot_->getTransport()->flush(); + + sentry.commit(); + return cseqid; +} + +void ThriftHiveMetastoreConcurrentClient::recv_alter_partitions_with_environment_context_req(AlterPartitionsResponse& _return, const int32_t seqid) +{ + + int32_t rseqid = 0; + std::string fname; + ::apache::thrift::protocol::TMessageType mtype; + + // the read mutex gets dropped and reacquired as part of waitForWork() + // The destructor of this sentry wakes up other clients + ::apache::thrift::async::TConcurrentRecvSentry sentry(&this->sync_, seqid); + + while(true) { + if(!this->sync_.getPending(fname, mtype, rseqid)) { + iprot_->readMessageBegin(fname, mtype, rseqid); + } + if(seqid == rseqid) { + if (mtype == ::apache::thrift::protocol::T_EXCEPTION) { + ::apache::thrift::TApplicationException x; + x.read(iprot_); + iprot_->readMessageEnd(); + iprot_->getTransport()->readEnd(); + sentry.commit(); + throw x; + } + if (mtype != ::apache::thrift::protocol::T_REPLY) { + iprot_->skip(::apache::thrift::protocol::T_STRUCT); + iprot_->readMessageEnd(); + iprot_->getTransport()->readEnd(); + } + if (fname.compare("alter_partitions_with_environment_context_req") != 0) { + iprot_->skip(::apache::thrift::protocol::T_STRUCT); + iprot_->readMessageEnd(); + iprot_->getTransport()->readEnd(); + + // in a bad state, don't commit + using ::apache::thrift::protocol::TProtocolException; + throw TProtocolException(TProtocolException::INVALID_DATA); + } + ThriftHiveMetastore_alter_partitions_with_environment_context_req_presult result; + result.success = &_return; + result.read(iprot_); + iprot_->readMessageEnd(); + iprot_->getTransport()->readEnd(); + + if (result.__isset.success) { + // _return pointer has now been filled + sentry.commit(); + return; + } + if (result.__isset.o1) { + sentry.commit(); + throw result.o1; + } + if (result.__isset.o2) { + sentry.commit(); + throw result.o2; + } + // in a bad state, don't commit + throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "alter_partitions_with_environment_context_req failed: unknown result"); + } + // seqid != rseqid + this->sync_.updatePending(fname, mtype, rseqid); + + // this will temporarily unlock the readMutex, and let other clients get work done + this->sync_.waitForWork(seqid); + } // end while(true) +} + void ThriftHiveMetastoreConcurrentClient::alter_partition_with_environment_context(const std::string& db_name, const std::string& tbl_name, const Partition& new_part, const EnvironmentContext& environment_context) { int32_t seqid = send_alter_partition_with_environment_context(db_name, tbl_name, new_part, environment_context); diff --git standalone-metastore/src/gen/thrift/gen-cpp/ThriftHiveMetastore.h standalone-metastore/src/gen/thrift/gen-cpp/ThriftHiveMetastore.h index 352f5c7fc4..b8872102fa 100644 --- standalone-metastore/src/gen/thrift/gen-cpp/ThriftHiveMetastore.h +++ standalone-metastore/src/gen/thrift/gen-cpp/ThriftHiveMetastore.h @@ -106,6 +106,7 @@ class ThriftHiveMetastoreIf : virtual public ::facebook::fb303::FacebookService virtual void alter_partition(const std::string& db_name, const std::string& tbl_name, const Partition& new_part) = 0; virtual void alter_partitions(const std::string& db_name, const std::string& tbl_name, const std::vector & new_parts) = 0; virtual void alter_partitions_with_environment_context(const std::string& db_name, const std::string& tbl_name, const std::vector & new_parts, const EnvironmentContext& environment_context) = 0; + virtual void alter_partitions_with_environment_context_req(AlterPartitionsResponse& _return, const AlterPartitionsRequest& req) = 0; virtual void alter_partition_with_environment_context(const std::string& db_name, const std::string& tbl_name, const Partition& new_part, const EnvironmentContext& environment_context) = 0; virtual void rename_partition(const std::string& db_name, const std::string& tbl_name, const std::vector & part_vals, const Partition& new_part) = 0; virtual bool partition_name_has_valid_characters(const std::vector & part_vals, const bool throw_exception) = 0; @@ -520,6 +521,9 @@ class ThriftHiveMetastoreNull : virtual public ThriftHiveMetastoreIf , virtual p void alter_partitions_with_environment_context(const std::string& /* db_name */, const std::string& /* tbl_name */, const std::vector & /* new_parts */, const EnvironmentContext& /* environment_context */) { return; } + void alter_partitions_with_environment_context_req(AlterPartitionsResponse& /* _return */, const AlterPartitionsRequest& /* req */) { + return; + } void alter_partition_with_environment_context(const std::string& /* db_name */, const std::string& /* tbl_name */, const Partition& /* new_part */, const EnvironmentContext& /* environment_context */) { return; } @@ -11773,6 +11777,126 @@ class ThriftHiveMetastore_alter_partitions_with_environment_context_presult { }; +typedef struct _ThriftHiveMetastore_alter_partitions_with_environment_context_req_args__isset { + _ThriftHiveMetastore_alter_partitions_with_environment_context_req_args__isset() : req(false) {} + bool req :1; +} _ThriftHiveMetastore_alter_partitions_with_environment_context_req_args__isset; + +class ThriftHiveMetastore_alter_partitions_with_environment_context_req_args { + public: + + ThriftHiveMetastore_alter_partitions_with_environment_context_req_args(const ThriftHiveMetastore_alter_partitions_with_environment_context_req_args&); + ThriftHiveMetastore_alter_partitions_with_environment_context_req_args& operator=(const ThriftHiveMetastore_alter_partitions_with_environment_context_req_args&); + ThriftHiveMetastore_alter_partitions_with_environment_context_req_args() { + } + + virtual ~ThriftHiveMetastore_alter_partitions_with_environment_context_req_args() throw(); + AlterPartitionsRequest req; + + _ThriftHiveMetastore_alter_partitions_with_environment_context_req_args__isset __isset; + + void __set_req(const AlterPartitionsRequest& val); + + bool operator == (const ThriftHiveMetastore_alter_partitions_with_environment_context_req_args & rhs) const + { + if (!(req == rhs.req)) + return false; + return true; + } + bool operator != (const ThriftHiveMetastore_alter_partitions_with_environment_context_req_args &rhs) const { + return !(*this == rhs); + } + + bool operator < (const ThriftHiveMetastore_alter_partitions_with_environment_context_req_args & ) const; + + uint32_t read(::apache::thrift::protocol::TProtocol* iprot); + uint32_t write(::apache::thrift::protocol::TProtocol* oprot) const; + +}; + + +class ThriftHiveMetastore_alter_partitions_with_environment_context_req_pargs { + public: + + + virtual ~ThriftHiveMetastore_alter_partitions_with_environment_context_req_pargs() throw(); + const AlterPartitionsRequest* req; + + uint32_t write(::apache::thrift::protocol::TProtocol* oprot) const; + +}; + +typedef struct _ThriftHiveMetastore_alter_partitions_with_environment_context_req_result__isset { + _ThriftHiveMetastore_alter_partitions_with_environment_context_req_result__isset() : success(false), o1(false), o2(false) {} + bool success :1; + bool o1 :1; + bool o2 :1; +} _ThriftHiveMetastore_alter_partitions_with_environment_context_req_result__isset; + +class ThriftHiveMetastore_alter_partitions_with_environment_context_req_result { + public: + + ThriftHiveMetastore_alter_partitions_with_environment_context_req_result(const ThriftHiveMetastore_alter_partitions_with_environment_context_req_result&); + ThriftHiveMetastore_alter_partitions_with_environment_context_req_result& operator=(const ThriftHiveMetastore_alter_partitions_with_environment_context_req_result&); + ThriftHiveMetastore_alter_partitions_with_environment_context_req_result() { + } + + virtual ~ThriftHiveMetastore_alter_partitions_with_environment_context_req_result() throw(); + AlterPartitionsResponse success; + InvalidOperationException o1; + MetaException o2; + + _ThriftHiveMetastore_alter_partitions_with_environment_context_req_result__isset __isset; + + void __set_success(const AlterPartitionsResponse& val); + + void __set_o1(const InvalidOperationException& val); + + void __set_o2(const MetaException& val); + + bool operator == (const ThriftHiveMetastore_alter_partitions_with_environment_context_req_result & rhs) const + { + if (!(success == rhs.success)) + return false; + if (!(o1 == rhs.o1)) + return false; + if (!(o2 == rhs.o2)) + return false; + return true; + } + bool operator != (const ThriftHiveMetastore_alter_partitions_with_environment_context_req_result &rhs) const { + return !(*this == rhs); + } + + bool operator < (const ThriftHiveMetastore_alter_partitions_with_environment_context_req_result & ) const; + + uint32_t read(::apache::thrift::protocol::TProtocol* iprot); + uint32_t write(::apache::thrift::protocol::TProtocol* oprot) const; + +}; + +typedef struct _ThriftHiveMetastore_alter_partitions_with_environment_context_req_presult__isset { + _ThriftHiveMetastore_alter_partitions_with_environment_context_req_presult__isset() : success(false), o1(false), o2(false) {} + bool success :1; + bool o1 :1; + bool o2 :1; +} _ThriftHiveMetastore_alter_partitions_with_environment_context_req_presult__isset; + +class ThriftHiveMetastore_alter_partitions_with_environment_context_req_presult { + public: + + + virtual ~ThriftHiveMetastore_alter_partitions_with_environment_context_req_presult() throw(); + AlterPartitionsResponse* success; + InvalidOperationException o1; + MetaException o2; + + _ThriftHiveMetastore_alter_partitions_with_environment_context_req_presult__isset __isset; + + uint32_t read(::apache::thrift::protocol::TProtocol* iprot); + +}; + typedef struct _ThriftHiveMetastore_alter_partition_with_environment_context_args__isset { _ThriftHiveMetastore_alter_partition_with_environment_context_args__isset() : db_name(false), tbl_name(false), new_part(false), environment_context(false) {} bool db_name :1; @@ -26583,6 +26707,9 @@ class ThriftHiveMetastoreClient : virtual public ThriftHiveMetastoreIf, public void alter_partitions_with_environment_context(const std::string& db_name, const std::string& tbl_name, const std::vector & new_parts, const EnvironmentContext& environment_context); void send_alter_partitions_with_environment_context(const std::string& db_name, const std::string& tbl_name, const std::vector & new_parts, const EnvironmentContext& environment_context); void recv_alter_partitions_with_environment_context(); + void alter_partitions_with_environment_context_req(AlterPartitionsResponse& _return, const AlterPartitionsRequest& req); + void send_alter_partitions_with_environment_context_req(const AlterPartitionsRequest& req); + void recv_alter_partitions_with_environment_context_req(AlterPartitionsResponse& _return); void alter_partition_with_environment_context(const std::string& db_name, const std::string& tbl_name, const Partition& new_part, const EnvironmentContext& environment_context); void send_alter_partition_with_environment_context(const std::string& db_name, const std::string& tbl_name, const Partition& new_part, const EnvironmentContext& environment_context); void recv_alter_partition_with_environment_context(); @@ -27049,6 +27176,7 @@ class ThriftHiveMetastoreProcessor : public ::facebook::fb303::FacebookServiceP void process_alter_partition(int32_t seqid, ::apache::thrift::protocol::TProtocol* iprot, ::apache::thrift::protocol::TProtocol* oprot, void* callContext); void process_alter_partitions(int32_t seqid, ::apache::thrift::protocol::TProtocol* iprot, ::apache::thrift::protocol::TProtocol* oprot, void* callContext); void process_alter_partitions_with_environment_context(int32_t seqid, ::apache::thrift::protocol::TProtocol* iprot, ::apache::thrift::protocol::TProtocol* oprot, void* callContext); + void process_alter_partitions_with_environment_context_req(int32_t seqid, ::apache::thrift::protocol::TProtocol* iprot, ::apache::thrift::protocol::TProtocol* oprot, void* callContext); void process_alter_partition_with_environment_context(int32_t seqid, ::apache::thrift::protocol::TProtocol* iprot, ::apache::thrift::protocol::TProtocol* oprot, void* callContext); void process_rename_partition(int32_t seqid, ::apache::thrift::protocol::TProtocol* iprot, ::apache::thrift::protocol::TProtocol* oprot, void* callContext); void process_partition_name_has_valid_characters(int32_t seqid, ::apache::thrift::protocol::TProtocol* iprot, ::apache::thrift::protocol::TProtocol* oprot, void* callContext); @@ -27261,6 +27389,7 @@ class ThriftHiveMetastoreProcessor : public ::facebook::fb303::FacebookServiceP processMap_["alter_partition"] = &ThriftHiveMetastoreProcessor::process_alter_partition; processMap_["alter_partitions"] = &ThriftHiveMetastoreProcessor::process_alter_partitions; processMap_["alter_partitions_with_environment_context"] = &ThriftHiveMetastoreProcessor::process_alter_partitions_with_environment_context; + processMap_["alter_partitions_with_environment_context_req"] = &ThriftHiveMetastoreProcessor::process_alter_partitions_with_environment_context_req; processMap_["alter_partition_with_environment_context"] = &ThriftHiveMetastoreProcessor::process_alter_partition_with_environment_context; processMap_["rename_partition"] = &ThriftHiveMetastoreProcessor::process_rename_partition; processMap_["partition_name_has_valid_characters"] = &ThriftHiveMetastoreProcessor::process_partition_name_has_valid_characters; @@ -28222,6 +28351,16 @@ class ThriftHiveMetastoreMultiface : virtual public ThriftHiveMetastoreIf, publi ifaces_[i]->alter_partitions_with_environment_context(db_name, tbl_name, new_parts, environment_context); } + void alter_partitions_with_environment_context_req(AlterPartitionsResponse& _return, const AlterPartitionsRequest& req) { + size_t sz = ifaces_.size(); + size_t i = 0; + for (; i < (sz - 1); ++i) { + ifaces_[i]->alter_partitions_with_environment_context_req(_return, req); + } + ifaces_[i]->alter_partitions_with_environment_context_req(_return, req); + return; + } + void alter_partition_with_environment_context(const std::string& db_name, const std::string& tbl_name, const Partition& new_part, const EnvironmentContext& environment_context) { size_t sz = ifaces_.size(); size_t i = 0; @@ -29685,6 +29824,9 @@ class ThriftHiveMetastoreConcurrentClient : virtual public ThriftHiveMetastoreIf void alter_partitions_with_environment_context(const std::string& db_name, const std::string& tbl_name, const std::vector & new_parts, const EnvironmentContext& environment_context); int32_t send_alter_partitions_with_environment_context(const std::string& db_name, const std::string& tbl_name, const std::vector & new_parts, const EnvironmentContext& environment_context); void recv_alter_partitions_with_environment_context(const int32_t seqid); + void alter_partitions_with_environment_context_req(AlterPartitionsResponse& _return, const AlterPartitionsRequest& req); + int32_t send_alter_partitions_with_environment_context_req(const AlterPartitionsRequest& req); + void recv_alter_partitions_with_environment_context_req(AlterPartitionsResponse& _return, const int32_t seqid); void alter_partition_with_environment_context(const std::string& db_name, const std::string& tbl_name, const Partition& new_part, const EnvironmentContext& environment_context); int32_t send_alter_partition_with_environment_context(const std::string& db_name, const std::string& tbl_name, const Partition& new_part, const EnvironmentContext& environment_context); void recv_alter_partition_with_environment_context(const int32_t seqid); diff --git standalone-metastore/src/gen/thrift/gen-cpp/ThriftHiveMetastore_server.skeleton.cpp standalone-metastore/src/gen/thrift/gen-cpp/ThriftHiveMetastore_server.skeleton.cpp index 789c150922..fba2f579d3 100644 --- standalone-metastore/src/gen/thrift/gen-cpp/ThriftHiveMetastore_server.skeleton.cpp +++ standalone-metastore/src/gen/thrift/gen-cpp/ThriftHiveMetastore_server.skeleton.cpp @@ -442,6 +442,11 @@ class ThriftHiveMetastoreHandler : virtual public ThriftHiveMetastoreIf { printf("alter_partitions_with_environment_context\n"); } + void alter_partitions_with_environment_context_req(AlterPartitionsResponse& _return, const AlterPartitionsRequest& req) { + // Your implementation goes here + printf("alter_partitions_with_environment_context_req\n"); + } + void alter_partition_with_environment_context(const std::string& db_name, const std::string& tbl_name, const Partition& new_part, const EnvironmentContext& environment_context) { // Your implementation goes here printf("alter_partition_with_environment_context\n"); diff --git standalone-metastore/src/gen/thrift/gen-cpp/hive_metastore_types.cpp standalone-metastore/src/gen/thrift/gen-cpp/hive_metastore_types.cpp index 7f06b3bcee..3b27199ef3 100644 --- standalone-metastore/src/gen/thrift/gen-cpp/hive_metastore_types.cpp +++ standalone-metastore/src/gen/thrift/gen-cpp/hive_metastore_types.cpp @@ -6435,6 +6435,16 @@ void Table::__set_ownerType(const PrincipalType::type val) { __isset.ownerType = true; } +void Table::__set_writeId(const int64_t val) { + this->writeId = val; +__isset.writeId = true; +} + +void Table::__set_isStatsCompliant(const bool val) { + this->isStatsCompliant = val; +__isset.isStatsCompliant = true; +} + uint32_t Table::read(::apache::thrift::protocol::TProtocol* iprot) { apache::thrift::protocol::TInputRecursionTracker tracker(*iprot); @@ -6629,6 +6639,22 @@ uint32_t Table::read(::apache::thrift::protocol::TProtocol* iprot) { xfer += iprot->skip(ftype); } break; + case 19: + if (ftype == ::apache::thrift::protocol::T_I64) { + xfer += iprot->readI64(this->writeId); + this->__isset.writeId = true; + } else { + xfer += iprot->skip(ftype); + } + break; + case 20: + if (ftype == ::apache::thrift::protocol::T_BOOL) { + xfer += iprot->readBool(this->isStatsCompliant); + this->__isset.isStatsCompliant = true; + } else { + xfer += iprot->skip(ftype); + } + break; default: xfer += iprot->skip(ftype); break; @@ -6741,6 +6767,16 @@ uint32_t Table::write(::apache::thrift::protocol::TProtocol* oprot) const { xfer += oprot->writeI32((int32_t)this->ownerType); xfer += oprot->writeFieldEnd(); } + if (this->__isset.writeId) { + xfer += oprot->writeFieldBegin("writeId", ::apache::thrift::protocol::T_I64, 19); + xfer += oprot->writeI64(this->writeId); + xfer += oprot->writeFieldEnd(); + } + if (this->__isset.isStatsCompliant) { + xfer += oprot->writeFieldBegin("isStatsCompliant", ::apache::thrift::protocol::T_BOOL, 20); + xfer += oprot->writeBool(this->isStatsCompliant); + xfer += oprot->writeFieldEnd(); + } xfer += oprot->writeFieldStop(); xfer += oprot->writeStructEnd(); return xfer; @@ -6766,6 +6802,8 @@ void swap(Table &a, Table &b) { swap(a.creationMetadata, b.creationMetadata); swap(a.catName, b.catName); swap(a.ownerType, b.ownerType); + swap(a.writeId, b.writeId); + swap(a.isStatsCompliant, b.isStatsCompliant); swap(a.__isset, b.__isset); } @@ -6788,6 +6826,8 @@ Table::Table(const Table& other251) { creationMetadata = other251.creationMetadata; catName = other251.catName; ownerType = other251.ownerType; + writeId = other251.writeId; + isStatsCompliant = other251.isStatsCompliant; __isset = other251.__isset; } Table& Table::operator=(const Table& other252) { @@ -6809,6 +6849,8 @@ Table& Table::operator=(const Table& other252) { creationMetadata = other252.creationMetadata; catName = other252.catName; ownerType = other252.ownerType; + writeId = other252.writeId; + isStatsCompliant = other252.isStatsCompliant; __isset = other252.__isset; return *this; } @@ -6833,6 +6875,8 @@ void Table::printTo(std::ostream& out) const { out << ", " << "creationMetadata="; (__isset.creationMetadata ? (out << to_string(creationMetadata)) : (out << "")); out << ", " << "catName="; (__isset.catName ? (out << to_string(catName)) : (out << "")); out << ", " << "ownerType="; (__isset.ownerType ? (out << to_string(ownerType)) : (out << "")); + out << ", " << "writeId="; (__isset.writeId ? (out << to_string(writeId)) : (out << "")); + out << ", " << "isStatsCompliant="; (__isset.isStatsCompliant ? (out << to_string(isStatsCompliant)) : (out << "")); out << ")"; } @@ -6879,6 +6923,16 @@ void Partition::__set_catName(const std::string& val) { __isset.catName = true; } +void Partition::__set_writeId(const int64_t val) { + this->writeId = val; +__isset.writeId = true; +} + +void Partition::__set_isStatsCompliant(const bool val) { + this->isStatsCompliant = val; +__isset.isStatsCompliant = true; +} + uint32_t Partition::read(::apache::thrift::protocol::TProtocol* iprot) { apache::thrift::protocol::TInputRecursionTracker tracker(*iprot); @@ -6999,6 +7053,22 @@ uint32_t Partition::read(::apache::thrift::protocol::TProtocol* iprot) { xfer += iprot->skip(ftype); } break; + case 10: + if (ftype == ::apache::thrift::protocol::T_I64) { + xfer += iprot->readI64(this->writeId); + this->__isset.writeId = true; + } else { + xfer += iprot->skip(ftype); + } + break; + case 11: + if (ftype == ::apache::thrift::protocol::T_BOOL) { + xfer += iprot->readBool(this->isStatsCompliant); + this->__isset.isStatsCompliant = true; + } else { + xfer += iprot->skip(ftype); + } + break; default: xfer += iprot->skip(ftype); break; @@ -7071,6 +7141,16 @@ uint32_t Partition::write(::apache::thrift::protocol::TProtocol* oprot) const { xfer += oprot->writeString(this->catName); xfer += oprot->writeFieldEnd(); } + if (this->__isset.writeId) { + xfer += oprot->writeFieldBegin("writeId", ::apache::thrift::protocol::T_I64, 10); + xfer += oprot->writeI64(this->writeId); + xfer += oprot->writeFieldEnd(); + } + if (this->__isset.isStatsCompliant) { + xfer += oprot->writeFieldBegin("isStatsCompliant", ::apache::thrift::protocol::T_BOOL, 11); + xfer += oprot->writeBool(this->isStatsCompliant); + xfer += oprot->writeFieldEnd(); + } xfer += oprot->writeFieldStop(); xfer += oprot->writeStructEnd(); return xfer; @@ -7087,6 +7167,8 @@ void swap(Partition &a, Partition &b) { swap(a.parameters, b.parameters); swap(a.privileges, b.privileges); swap(a.catName, b.catName); + swap(a.writeId, b.writeId); + swap(a.isStatsCompliant, b.isStatsCompliant); swap(a.__isset, b.__isset); } @@ -7100,6 +7182,8 @@ Partition::Partition(const Partition& other267) { parameters = other267.parameters; privileges = other267.privileges; catName = other267.catName; + writeId = other267.writeId; + isStatsCompliant = other267.isStatsCompliant; __isset = other267.__isset; } Partition& Partition::operator=(const Partition& other268) { @@ -7112,6 +7196,8 @@ Partition& Partition::operator=(const Partition& other268) { parameters = other268.parameters; privileges = other268.privileges; catName = other268.catName; + writeId = other268.writeId; + isStatsCompliant = other268.isStatsCompliant; __isset = other268.__isset; return *this; } @@ -7127,6 +7213,8 @@ void Partition::printTo(std::ostream& out) const { out << ", " << "parameters=" << to_string(parameters); out << ", " << "privileges="; (__isset.privileges ? (out << to_string(privileges)) : (out << "")); out << ", " << "catName="; (__isset.catName ? (out << to_string(catName)) : (out << "")); + out << ", " << "writeId="; (__isset.writeId ? (out << to_string(writeId)) : (out << "")); + out << ", " << "isStatsCompliant="; (__isset.isStatsCompliant ? (out << to_string(isStatsCompliant)) : (out << "")); out << ")"; } @@ -7626,6 +7714,16 @@ void PartitionSpec::__set_catName(const std::string& val) { __isset.catName = true; } +void PartitionSpec::__set_writeId(const int64_t val) { + this->writeId = val; +__isset.writeId = true; +} + +void PartitionSpec::__set_isStatsCompliant(const bool val) { + this->isStatsCompliant = val; +__isset.isStatsCompliant = true; +} + uint32_t PartitionSpec::read(::apache::thrift::protocol::TProtocol* iprot) { apache::thrift::protocol::TInputRecursionTracker tracker(*iprot); @@ -7695,6 +7793,22 @@ uint32_t PartitionSpec::read(::apache::thrift::protocol::TProtocol* iprot) { xfer += iprot->skip(ftype); } break; + case 7: + if (ftype == ::apache::thrift::protocol::T_I64) { + xfer += iprot->readI64(this->writeId); + this->__isset.writeId = true; + } else { + xfer += iprot->skip(ftype); + } + break; + case 8: + if (ftype == ::apache::thrift::protocol::T_BOOL) { + xfer += iprot->readBool(this->isStatsCompliant); + this->__isset.isStatsCompliant = true; + } else { + xfer += iprot->skip(ftype); + } + break; default: xfer += iprot->skip(ftype); break; @@ -7739,6 +7853,16 @@ uint32_t PartitionSpec::write(::apache::thrift::protocol::TProtocol* oprot) cons xfer += oprot->writeString(this->catName); xfer += oprot->writeFieldEnd(); } + if (this->__isset.writeId) { + xfer += oprot->writeFieldBegin("writeId", ::apache::thrift::protocol::T_I64, 7); + xfer += oprot->writeI64(this->writeId); + xfer += oprot->writeFieldEnd(); + } + if (this->__isset.isStatsCompliant) { + xfer += oprot->writeFieldBegin("isStatsCompliant", ::apache::thrift::protocol::T_BOOL, 8); + xfer += oprot->writeBool(this->isStatsCompliant); + xfer += oprot->writeFieldEnd(); + } xfer += oprot->writeFieldStop(); xfer += oprot->writeStructEnd(); return xfer; @@ -7752,6 +7876,8 @@ void swap(PartitionSpec &a, PartitionSpec &b) { swap(a.sharedSDPartitionSpec, b.sharedSDPartitionSpec); swap(a.partitionList, b.partitionList); swap(a.catName, b.catName); + swap(a.writeId, b.writeId); + swap(a.isStatsCompliant, b.isStatsCompliant); swap(a.__isset, b.__isset); } @@ -7762,6 +7888,8 @@ PartitionSpec::PartitionSpec(const PartitionSpec& other301) { sharedSDPartitionSpec = other301.sharedSDPartitionSpec; partitionList = other301.partitionList; catName = other301.catName; + writeId = other301.writeId; + isStatsCompliant = other301.isStatsCompliant; __isset = other301.__isset; } PartitionSpec& PartitionSpec::operator=(const PartitionSpec& other302) { @@ -7771,6 +7899,8 @@ PartitionSpec& PartitionSpec::operator=(const PartitionSpec& other302) { sharedSDPartitionSpec = other302.sharedSDPartitionSpec; partitionList = other302.partitionList; catName = other302.catName; + writeId = other302.writeId; + isStatsCompliant = other302.isStatsCompliant; __isset = other302.__isset; return *this; } @@ -7783,6 +7913,8 @@ void PartitionSpec::printTo(std::ostream& out) const { out << ", " << "sharedSDPartitionSpec="; (__isset.sharedSDPartitionSpec ? (out << to_string(sharedSDPartitionSpec)) : (out << "")); out << ", " << "partitionList="; (__isset.partitionList ? (out << to_string(partitionList)) : (out << "")); out << ", " << "catName="; (__isset.catName ? (out << to_string(catName)) : (out << "")); + out << ", " << "writeId="; (__isset.writeId ? (out << to_string(writeId)) : (out << "")); + out << ", " << "isStatsCompliant="; (__isset.isStatsCompliant ? (out << to_string(isStatsCompliant)) : (out << "")); out << ")"; } @@ -9739,6 +9871,21 @@ void ColumnStatistics::__set_statsObj(const std::vector & v this->statsObj = val; } +void ColumnStatistics::__set_txnId(const int64_t val) { + this->txnId = val; +__isset.txnId = true; +} + +void ColumnStatistics::__set_validWriteIdList(const std::string& val) { + this->validWriteIdList = val; +__isset.validWriteIdList = true; +} + +void ColumnStatistics::__set_isStatsCompliant(const bool val) { + this->isStatsCompliant = val; +__isset.isStatsCompliant = true; +} + uint32_t ColumnStatistics::read(::apache::thrift::protocol::TProtocol* iprot) { apache::thrift::protocol::TInputRecursionTracker tracker(*iprot); @@ -9790,6 +9937,30 @@ uint32_t ColumnStatistics::read(::apache::thrift::protocol::TProtocol* iprot) { xfer += iprot->skip(ftype); } break; + case 3: + if (ftype == ::apache::thrift::protocol::T_I64) { + xfer += iprot->readI64(this->txnId); + this->__isset.txnId = true; + } else { + xfer += iprot->skip(ftype); + } + break; + case 4: + if (ftype == ::apache::thrift::protocol::T_STRING) { + xfer += iprot->readString(this->validWriteIdList); + this->__isset.validWriteIdList = true; + } else { + xfer += iprot->skip(ftype); + } + break; + case 5: + if (ftype == ::apache::thrift::protocol::T_BOOL) { + xfer += iprot->readBool(this->isStatsCompliant); + this->__isset.isStatsCompliant = true; + } else { + xfer += iprot->skip(ftype); + } + break; default: xfer += iprot->skip(ftype); break; @@ -9827,6 +9998,21 @@ uint32_t ColumnStatistics::write(::apache::thrift::protocol::TProtocol* oprot) c } xfer += oprot->writeFieldEnd(); + if (this->__isset.txnId) { + xfer += oprot->writeFieldBegin("txnId", ::apache::thrift::protocol::T_I64, 3); + xfer += oprot->writeI64(this->txnId); + xfer += oprot->writeFieldEnd(); + } + if (this->__isset.validWriteIdList) { + xfer += oprot->writeFieldBegin("validWriteIdList", ::apache::thrift::protocol::T_STRING, 4); + xfer += oprot->writeString(this->validWriteIdList); + xfer += oprot->writeFieldEnd(); + } + if (this->__isset.isStatsCompliant) { + xfer += oprot->writeFieldBegin("isStatsCompliant", ::apache::thrift::protocol::T_BOOL, 5); + xfer += oprot->writeBool(this->isStatsCompliant); + xfer += oprot->writeFieldEnd(); + } xfer += oprot->writeFieldStop(); xfer += oprot->writeStructEnd(); return xfer; @@ -9836,15 +10022,27 @@ void swap(ColumnStatistics &a, ColumnStatistics &b) { using ::std::swap; swap(a.statsDesc, b.statsDesc); swap(a.statsObj, b.statsObj); + swap(a.txnId, b.txnId); + swap(a.validWriteIdList, b.validWriteIdList); + swap(a.isStatsCompliant, b.isStatsCompliant); + swap(a.__isset, b.__isset); } ColumnStatistics::ColumnStatistics(const ColumnStatistics& other333) { statsDesc = other333.statsDesc; statsObj = other333.statsObj; + txnId = other333.txnId; + validWriteIdList = other333.validWriteIdList; + isStatsCompliant = other333.isStatsCompliant; + __isset = other333.__isset; } ColumnStatistics& ColumnStatistics::operator=(const ColumnStatistics& other334) { statsDesc = other334.statsDesc; statsObj = other334.statsObj; + txnId = other334.txnId; + validWriteIdList = other334.validWriteIdList; + isStatsCompliant = other334.isStatsCompliant; + __isset = other334.__isset; return *this; } void ColumnStatistics::printTo(std::ostream& out) const { @@ -9852,6 +10050,9 @@ void ColumnStatistics::printTo(std::ostream& out) const { out << "ColumnStatistics("; out << "statsDesc=" << to_string(statsDesc); out << ", " << "statsObj=" << to_string(statsObj); + out << ", " << "txnId="; (__isset.txnId ? (out << to_string(txnId)) : (out << "")); + out << ", " << "validWriteIdList="; (__isset.validWriteIdList ? (out << to_string(validWriteIdList)) : (out << "")); + out << ", " << "isStatsCompliant="; (__isset.isStatsCompliant ? (out << to_string(isStatsCompliant)) : (out << "")); out << ")"; } @@ -9868,6 +10069,11 @@ void AggrStats::__set_partsFound(const int64_t val) { this->partsFound = val; } +void AggrStats::__set_isStatsCompliant(const bool val) { + this->isStatsCompliant = val; +__isset.isStatsCompliant = true; +} + uint32_t AggrStats::read(::apache::thrift::protocol::TProtocol* iprot) { apache::thrift::protocol::TInputRecursionTracker tracker(*iprot); @@ -9919,6 +10125,14 @@ uint32_t AggrStats::read(::apache::thrift::protocol::TProtocol* iprot) { xfer += iprot->skip(ftype); } break; + case 3: + if (ftype == ::apache::thrift::protocol::T_BOOL) { + xfer += iprot->readBool(this->isStatsCompliant); + this->__isset.isStatsCompliant = true; + } else { + xfer += iprot->skip(ftype); + } + break; default: xfer += iprot->skip(ftype); break; @@ -9956,6 +10170,11 @@ uint32_t AggrStats::write(::apache::thrift::protocol::TProtocol* oprot) const { xfer += oprot->writeI64(this->partsFound); xfer += oprot->writeFieldEnd(); + if (this->__isset.isStatsCompliant) { + xfer += oprot->writeFieldBegin("isStatsCompliant", ::apache::thrift::protocol::T_BOOL, 3); + xfer += oprot->writeBool(this->isStatsCompliant); + xfer += oprot->writeFieldEnd(); + } xfer += oprot->writeFieldStop(); xfer += oprot->writeStructEnd(); return xfer; @@ -9965,15 +10184,21 @@ void swap(AggrStats &a, AggrStats &b) { using ::std::swap; swap(a.colStats, b.colStats); swap(a.partsFound, b.partsFound); + swap(a.isStatsCompliant, b.isStatsCompliant); + swap(a.__isset, b.__isset); } AggrStats::AggrStats(const AggrStats& other341) { colStats = other341.colStats; partsFound = other341.partsFound; + isStatsCompliant = other341.isStatsCompliant; + __isset = other341.__isset; } AggrStats& AggrStats::operator=(const AggrStats& other342) { colStats = other342.colStats; partsFound = other342.partsFound; + isStatsCompliant = other342.isStatsCompliant; + __isset = other342.__isset; return *this; } void AggrStats::printTo(std::ostream& out) const { @@ -9981,6 +10206,7 @@ void AggrStats::printTo(std::ostream& out) const { out << "AggrStats("; out << "colStats=" << to_string(colStats); out << ", " << "partsFound=" << to_string(partsFound); + out << ", " << "isStatsCompliant="; (__isset.isStatsCompliant ? (out << to_string(isStatsCompliant)) : (out << "")); out << ")"; } @@ -9998,6 +10224,21 @@ void SetPartitionsStatsRequest::__set_needMerge(const bool val) { __isset.needMerge = true; } +void SetPartitionsStatsRequest::__set_txnId(const int64_t val) { + this->txnId = val; +__isset.txnId = true; +} + +void SetPartitionsStatsRequest::__set_writeId(const int64_t val) { + this->writeId = val; +__isset.writeId = true; +} + +void SetPartitionsStatsRequest::__set_validWriteIdList(const std::string& val) { + this->validWriteIdList = val; +__isset.validWriteIdList = true; +} + uint32_t SetPartitionsStatsRequest::read(::apache::thrift::protocol::TProtocol* iprot) { apache::thrift::protocol::TInputRecursionTracker tracker(*iprot); @@ -10048,6 +10289,30 @@ uint32_t SetPartitionsStatsRequest::read(::apache::thrift::protocol::TProtocol* xfer += iprot->skip(ftype); } break; + case 3: + if (ftype == ::apache::thrift::protocol::T_I64) { + xfer += iprot->readI64(this->txnId); + this->__isset.txnId = true; + } else { + xfer += iprot->skip(ftype); + } + break; + case 4: + if (ftype == ::apache::thrift::protocol::T_I64) { + xfer += iprot->readI64(this->writeId); + this->__isset.writeId = true; + } else { + xfer += iprot->skip(ftype); + } + break; + case 5: + if (ftype == ::apache::thrift::protocol::T_STRING) { + xfer += iprot->readString(this->validWriteIdList); + this->__isset.validWriteIdList = true; + } else { + xfer += iprot->skip(ftype); + } + break; default: xfer += iprot->skip(ftype); break; @@ -10084,6 +10349,21 @@ uint32_t SetPartitionsStatsRequest::write(::apache::thrift::protocol::TProtocol* xfer += oprot->writeBool(this->needMerge); xfer += oprot->writeFieldEnd(); } + if (this->__isset.txnId) { + xfer += oprot->writeFieldBegin("txnId", ::apache::thrift::protocol::T_I64, 3); + xfer += oprot->writeI64(this->txnId); + xfer += oprot->writeFieldEnd(); + } + if (this->__isset.writeId) { + xfer += oprot->writeFieldBegin("writeId", ::apache::thrift::protocol::T_I64, 4); + xfer += oprot->writeI64(this->writeId); + xfer += oprot->writeFieldEnd(); + } + if (this->__isset.validWriteIdList) { + xfer += oprot->writeFieldBegin("validWriteIdList", ::apache::thrift::protocol::T_STRING, 5); + xfer += oprot->writeString(this->validWriteIdList); + xfer += oprot->writeFieldEnd(); + } xfer += oprot->writeFieldStop(); xfer += oprot->writeStructEnd(); return xfer; @@ -10093,17 +10373,26 @@ void swap(SetPartitionsStatsRequest &a, SetPartitionsStatsRequest &b) { using ::std::swap; swap(a.colStats, b.colStats); swap(a.needMerge, b.needMerge); + swap(a.txnId, b.txnId); + swap(a.writeId, b.writeId); + swap(a.validWriteIdList, b.validWriteIdList); swap(a.__isset, b.__isset); } SetPartitionsStatsRequest::SetPartitionsStatsRequest(const SetPartitionsStatsRequest& other349) { colStats = other349.colStats; needMerge = other349.needMerge; + txnId = other349.txnId; + writeId = other349.writeId; + validWriteIdList = other349.validWriteIdList; __isset = other349.__isset; } SetPartitionsStatsRequest& SetPartitionsStatsRequest::operator=(const SetPartitionsStatsRequest& other350) { colStats = other350.colStats; needMerge = other350.needMerge; + txnId = other350.txnId; + writeId = other350.writeId; + validWriteIdList = other350.validWriteIdList; __isset = other350.__isset; return *this; } @@ -10112,6 +10401,9 @@ void SetPartitionsStatsRequest::printTo(std::ostream& out) const { out << "SetPartitionsStatsRequest("; out << "colStats=" << to_string(colStats); out << ", " << "needMerge="; (__isset.needMerge ? (out << to_string(needMerge)) : (out << "")); + out << ", " << "txnId="; (__isset.txnId ? (out << to_string(txnId)) : (out << "")); + out << ", " << "writeId="; (__isset.writeId ? (out << to_string(writeId)) : (out << "")); + out << ", " << "validWriteIdList="; (__isset.validWriteIdList ? (out << to_string(validWriteIdList)) : (out << "")); out << ")"; } @@ -12973,6 +13265,11 @@ void TableStatsResult::__set_tableStats(const std::vector & this->tableStats = val; } +void TableStatsResult::__set_isStatsCompliant(const bool val) { + this->isStatsCompliant = val; +__isset.isStatsCompliant = true; +} + uint32_t TableStatsResult::read(::apache::thrift::protocol::TProtocol* iprot) { apache::thrift::protocol::TInputRecursionTracker tracker(*iprot); @@ -13015,6 +13312,14 @@ uint32_t TableStatsResult::read(::apache::thrift::protocol::TProtocol* iprot) { xfer += iprot->skip(ftype); } break; + case 2: + if (ftype == ::apache::thrift::protocol::T_BOOL) { + xfer += iprot->readBool(this->isStatsCompliant); + this->__isset.isStatsCompliant = true; + } else { + xfer += iprot->skip(ftype); + } + break; default: xfer += iprot->skip(ftype); break; @@ -13046,6 +13351,11 @@ uint32_t TableStatsResult::write(::apache::thrift::protocol::TProtocol* oprot) c } xfer += oprot->writeFieldEnd(); + if (this->__isset.isStatsCompliant) { + xfer += oprot->writeFieldBegin("isStatsCompliant", ::apache::thrift::protocol::T_BOOL, 2); + xfer += oprot->writeBool(this->isStatsCompliant); + xfer += oprot->writeFieldEnd(); + } xfer += oprot->writeFieldStop(); xfer += oprot->writeStructEnd(); return xfer; @@ -13054,19 +13364,26 @@ uint32_t TableStatsResult::write(::apache::thrift::protocol::TProtocol* oprot) c void swap(TableStatsResult &a, TableStatsResult &b) { using ::std::swap; swap(a.tableStats, b.tableStats); + swap(a.isStatsCompliant, b.isStatsCompliant); + swap(a.__isset, b.__isset); } TableStatsResult::TableStatsResult(const TableStatsResult& other503) { tableStats = other503.tableStats; + isStatsCompliant = other503.isStatsCompliant; + __isset = other503.__isset; } TableStatsResult& TableStatsResult::operator=(const TableStatsResult& other504) { tableStats = other504.tableStats; + isStatsCompliant = other504.isStatsCompliant; + __isset = other504.__isset; return *this; } void TableStatsResult::printTo(std::ostream& out) const { using ::apache::thrift::to_string; out << "TableStatsResult("; out << "tableStats=" << to_string(tableStats); + out << ", " << "isStatsCompliant="; (__isset.isStatsCompliant ? (out << to_string(isStatsCompliant)) : (out << "")); out << ")"; } @@ -13079,6 +13396,11 @@ void PartitionsStatsResult::__set_partStats(const std::mappartStats = val; } +void PartitionsStatsResult::__set_isStatsCompliant(const bool val) { + this->isStatsCompliant = val; +__isset.isStatsCompliant = true; +} + uint32_t PartitionsStatsResult::read(::apache::thrift::protocol::TProtocol* iprot) { apache::thrift::protocol::TInputRecursionTracker tracker(*iprot); @@ -13136,6 +13458,14 @@ uint32_t PartitionsStatsResult::read(::apache::thrift::protocol::TProtocol* ipro xfer += iprot->skip(ftype); } break; + case 2: + if (ftype == ::apache::thrift::protocol::T_BOOL) { + xfer += iprot->readBool(this->isStatsCompliant); + this->__isset.isStatsCompliant = true; + } else { + xfer += iprot->skip(ftype); + } + break; default: xfer += iprot->skip(ftype); break; @@ -13176,6 +13506,11 @@ uint32_t PartitionsStatsResult::write(::apache::thrift::protocol::TProtocol* opr } xfer += oprot->writeFieldEnd(); + if (this->__isset.isStatsCompliant) { + xfer += oprot->writeFieldBegin("isStatsCompliant", ::apache::thrift::protocol::T_BOOL, 2); + xfer += oprot->writeBool(this->isStatsCompliant); + xfer += oprot->writeFieldEnd(); + } xfer += oprot->writeFieldStop(); xfer += oprot->writeStructEnd(); return xfer; @@ -13184,19 +13519,26 @@ uint32_t PartitionsStatsResult::write(::apache::thrift::protocol::TProtocol* opr void swap(PartitionsStatsResult &a, PartitionsStatsResult &b) { using ::std::swap; swap(a.partStats, b.partStats); + swap(a.isStatsCompliant, b.isStatsCompliant); + swap(a.__isset, b.__isset); } PartitionsStatsResult::PartitionsStatsResult(const PartitionsStatsResult& other519) { partStats = other519.partStats; + isStatsCompliant = other519.isStatsCompliant; + __isset = other519.__isset; } PartitionsStatsResult& PartitionsStatsResult::operator=(const PartitionsStatsResult& other520) { partStats = other520.partStats; + isStatsCompliant = other520.isStatsCompliant; + __isset = other520.__isset; return *this; } void PartitionsStatsResult::printTo(std::ostream& out) const { using ::apache::thrift::to_string; out << "PartitionsStatsResult("; out << "partStats=" << to_string(partStats); + out << ", " << "isStatsCompliant="; (__isset.isStatsCompliant ? (out << to_string(isStatsCompliant)) : (out << "")); out << ")"; } @@ -13222,6 +13564,16 @@ void TableStatsRequest::__set_catName(const std::string& val) { __isset.catName = true; } +void TableStatsRequest::__set_txnId(const int64_t val) { + this->txnId = val; +__isset.txnId = true; +} + +void TableStatsRequest::__set_validWriteIdList(const std::string& val) { + this->validWriteIdList = val; +__isset.validWriteIdList = true; +} + uint32_t TableStatsRequest::read(::apache::thrift::protocol::TProtocol* iprot) { apache::thrift::protocol::TInputRecursionTracker tracker(*iprot); @@ -13290,6 +13642,22 @@ uint32_t TableStatsRequest::read(::apache::thrift::protocol::TProtocol* iprot) { xfer += iprot->skip(ftype); } break; + case 5: + if (ftype == ::apache::thrift::protocol::T_I64) { + xfer += iprot->readI64(this->txnId); + this->__isset.txnId = true; + } else { + xfer += iprot->skip(ftype); + } + break; + case 6: + if (ftype == ::apache::thrift::protocol::T_STRING) { + xfer += iprot->readString(this->validWriteIdList); + this->__isset.validWriteIdList = true; + } else { + xfer += iprot->skip(ftype); + } + break; default: xfer += iprot->skip(ftype); break; @@ -13338,6 +13706,16 @@ uint32_t TableStatsRequest::write(::apache::thrift::protocol::TProtocol* oprot) xfer += oprot->writeString(this->catName); xfer += oprot->writeFieldEnd(); } + if (this->__isset.txnId) { + xfer += oprot->writeFieldBegin("txnId", ::apache::thrift::protocol::T_I64, 5); + xfer += oprot->writeI64(this->txnId); + xfer += oprot->writeFieldEnd(); + } + if (this->__isset.validWriteIdList) { + xfer += oprot->writeFieldBegin("validWriteIdList", ::apache::thrift::protocol::T_STRING, 6); + xfer += oprot->writeString(this->validWriteIdList); + xfer += oprot->writeFieldEnd(); + } xfer += oprot->writeFieldStop(); xfer += oprot->writeStructEnd(); return xfer; @@ -13349,6 +13727,8 @@ void swap(TableStatsRequest &a, TableStatsRequest &b) { swap(a.tblName, b.tblName); swap(a.colNames, b.colNames); swap(a.catName, b.catName); + swap(a.txnId, b.txnId); + swap(a.validWriteIdList, b.validWriteIdList); swap(a.__isset, b.__isset); } @@ -13357,6 +13737,8 @@ TableStatsRequest::TableStatsRequest(const TableStatsRequest& other527) { tblName = other527.tblName; colNames = other527.colNames; catName = other527.catName; + txnId = other527.txnId; + validWriteIdList = other527.validWriteIdList; __isset = other527.__isset; } TableStatsRequest& TableStatsRequest::operator=(const TableStatsRequest& other528) { @@ -13364,6 +13746,8 @@ TableStatsRequest& TableStatsRequest::operator=(const TableStatsRequest& other52 tblName = other528.tblName; colNames = other528.colNames; catName = other528.catName; + txnId = other528.txnId; + validWriteIdList = other528.validWriteIdList; __isset = other528.__isset; return *this; } @@ -13374,6 +13758,8 @@ void TableStatsRequest::printTo(std::ostream& out) const { out << ", " << "tblName=" << to_string(tblName); out << ", " << "colNames=" << to_string(colNames); out << ", " << "catName="; (__isset.catName ? (out << to_string(catName)) : (out << "")); + out << ", " << "txnId="; (__isset.txnId ? (out << to_string(txnId)) : (out << "")); + out << ", " << "validWriteIdList="; (__isset.validWriteIdList ? (out << to_string(validWriteIdList)) : (out << "")); out << ")"; } @@ -13403,6 +13789,16 @@ void PartitionsStatsRequest::__set_catName(const std::string& val) { __isset.catName = true; } +void PartitionsStatsRequest::__set_txnId(const int64_t val) { + this->txnId = val; +__isset.txnId = true; +} + +void PartitionsStatsRequest::__set_validWriteIdList(const std::string& val) { + this->validWriteIdList = val; +__isset.validWriteIdList = true; +} + uint32_t PartitionsStatsRequest::read(::apache::thrift::protocol::TProtocol* iprot) { apache::thrift::protocol::TInputRecursionTracker tracker(*iprot); @@ -13492,6 +13888,22 @@ uint32_t PartitionsStatsRequest::read(::apache::thrift::protocol::TProtocol* ipr xfer += iprot->skip(ftype); } break; + case 6: + if (ftype == ::apache::thrift::protocol::T_I64) { + xfer += iprot->readI64(this->txnId); + this->__isset.txnId = true; + } else { + xfer += iprot->skip(ftype); + } + break; + case 7: + if (ftype == ::apache::thrift::protocol::T_STRING) { + xfer += iprot->readString(this->validWriteIdList); + this->__isset.validWriteIdList = true; + } else { + xfer += iprot->skip(ftype); + } + break; default: xfer += iprot->skip(ftype); break; @@ -13554,6 +13966,16 @@ uint32_t PartitionsStatsRequest::write(::apache::thrift::protocol::TProtocol* op xfer += oprot->writeString(this->catName); xfer += oprot->writeFieldEnd(); } + if (this->__isset.txnId) { + xfer += oprot->writeFieldBegin("txnId", ::apache::thrift::protocol::T_I64, 6); + xfer += oprot->writeI64(this->txnId); + xfer += oprot->writeFieldEnd(); + } + if (this->__isset.validWriteIdList) { + xfer += oprot->writeFieldBegin("validWriteIdList", ::apache::thrift::protocol::T_STRING, 7); + xfer += oprot->writeString(this->validWriteIdList); + xfer += oprot->writeFieldEnd(); + } xfer += oprot->writeFieldStop(); xfer += oprot->writeStructEnd(); return xfer; @@ -13566,6 +13988,8 @@ void swap(PartitionsStatsRequest &a, PartitionsStatsRequest &b) { swap(a.colNames, b.colNames); swap(a.partNames, b.partNames); swap(a.catName, b.catName); + swap(a.txnId, b.txnId); + swap(a.validWriteIdList, b.validWriteIdList); swap(a.__isset, b.__isset); } @@ -13575,6 +13999,8 @@ PartitionsStatsRequest::PartitionsStatsRequest(const PartitionsStatsRequest& oth colNames = other541.colNames; partNames = other541.partNames; catName = other541.catName; + txnId = other541.txnId; + validWriteIdList = other541.validWriteIdList; __isset = other541.__isset; } PartitionsStatsRequest& PartitionsStatsRequest::operator=(const PartitionsStatsRequest& other542) { @@ -13583,6 +14009,8 @@ PartitionsStatsRequest& PartitionsStatsRequest::operator=(const PartitionsStatsR colNames = other542.colNames; partNames = other542.partNames; catName = other542.catName; + txnId = other542.txnId; + validWriteIdList = other542.validWriteIdList; __isset = other542.__isset; return *this; } @@ -13594,6 +14022,8 @@ void PartitionsStatsRequest::printTo(std::ostream& out) const { out << ", " << "colNames=" << to_string(colNames); out << ", " << "partNames=" << to_string(partNames); out << ", " << "catName="; (__isset.catName ? (out << to_string(catName)) : (out << "")); + out << ", " << "txnId="; (__isset.txnId ? (out << to_string(txnId)) : (out << "")); + out << ", " << "validWriteIdList="; (__isset.validWriteIdList ? (out << to_string(validWriteIdList)) : (out << "")); out << ")"; } @@ -13607,6 +14037,11 @@ void AddPartitionsResult::__set_partitions(const std::vector & val) { __isset.partitions = true; } +void AddPartitionsResult::__set_isStatsCompliant(const bool val) { + this->isStatsCompliant = val; +__isset.isStatsCompliant = true; +} + uint32_t AddPartitionsResult::read(::apache::thrift::protocol::TProtocol* iprot) { apache::thrift::protocol::TInputRecursionTracker tracker(*iprot); @@ -13648,6 +14083,14 @@ uint32_t AddPartitionsResult::read(::apache::thrift::protocol::TProtocol* iprot) xfer += iprot->skip(ftype); } break; + case 2: + if (ftype == ::apache::thrift::protocol::T_BOOL) { + xfer += iprot->readBool(this->isStatsCompliant); + this->__isset.isStatsCompliant = true; + } else { + xfer += iprot->skip(ftype); + } + break; default: xfer += iprot->skip(ftype); break; @@ -13678,6 +14121,11 @@ uint32_t AddPartitionsResult::write(::apache::thrift::protocol::TProtocol* oprot } xfer += oprot->writeFieldEnd(); } + if (this->__isset.isStatsCompliant) { + xfer += oprot->writeFieldBegin("isStatsCompliant", ::apache::thrift::protocol::T_BOOL, 2); + xfer += oprot->writeBool(this->isStatsCompliant); + xfer += oprot->writeFieldEnd(); + } xfer += oprot->writeFieldStop(); xfer += oprot->writeStructEnd(); return xfer; @@ -13686,15 +14134,18 @@ uint32_t AddPartitionsResult::write(::apache::thrift::protocol::TProtocol* oprot void swap(AddPartitionsResult &a, AddPartitionsResult &b) { using ::std::swap; swap(a.partitions, b.partitions); + swap(a.isStatsCompliant, b.isStatsCompliant); swap(a.__isset, b.__isset); } AddPartitionsResult::AddPartitionsResult(const AddPartitionsResult& other549) { partitions = other549.partitions; + isStatsCompliant = other549.isStatsCompliant; __isset = other549.__isset; } AddPartitionsResult& AddPartitionsResult::operator=(const AddPartitionsResult& other550) { partitions = other550.partitions; + isStatsCompliant = other550.isStatsCompliant; __isset = other550.__isset; return *this; } @@ -13702,6 +14153,7 @@ void AddPartitionsResult::printTo(std::ostream& out) const { using ::apache::thrift::to_string; out << "AddPartitionsResult("; out << "partitions="; (__isset.partitions ? (out << to_string(partitions)) : (out << "")); + out << ", " << "isStatsCompliant="; (__isset.isStatsCompliant ? (out << to_string(isStatsCompliant)) : (out << "")); out << ")"; } @@ -13736,6 +14188,16 @@ void AddPartitionsRequest::__set_catName(const std::string& val) { __isset.catName = true; } +void AddPartitionsRequest::__set_txnId(const int64_t val) { + this->txnId = val; +__isset.txnId = true; +} + +void AddPartitionsRequest::__set_validWriteIdList(const std::string& val) { + this->validWriteIdList = val; +__isset.validWriteIdList = true; +} + uint32_t AddPartitionsRequest::read(::apache::thrift::protocol::TProtocol* iprot) { apache::thrift::protocol::TInputRecursionTracker tracker(*iprot); @@ -13821,6 +14283,22 @@ uint32_t AddPartitionsRequest::read(::apache::thrift::protocol::TProtocol* iprot xfer += iprot->skip(ftype); } break; + case 7: + if (ftype == ::apache::thrift::protocol::T_I64) { + xfer += iprot->readI64(this->txnId); + this->__isset.txnId = true; + } else { + xfer += iprot->skip(ftype); + } + break; + case 8: + if (ftype == ::apache::thrift::protocol::T_STRING) { + xfer += iprot->readString(this->validWriteIdList); + this->__isset.validWriteIdList = true; + } else { + xfer += iprot->skip(ftype); + } + break; default: xfer += iprot->skip(ftype); break; @@ -13880,6 +14358,16 @@ uint32_t AddPartitionsRequest::write(::apache::thrift::protocol::TProtocol* opro xfer += oprot->writeString(this->catName); xfer += oprot->writeFieldEnd(); } + if (this->__isset.txnId) { + xfer += oprot->writeFieldBegin("txnId", ::apache::thrift::protocol::T_I64, 7); + xfer += oprot->writeI64(this->txnId); + xfer += oprot->writeFieldEnd(); + } + if (this->__isset.validWriteIdList) { + xfer += oprot->writeFieldBegin("validWriteIdList", ::apache::thrift::protocol::T_STRING, 8); + xfer += oprot->writeString(this->validWriteIdList); + xfer += oprot->writeFieldEnd(); + } xfer += oprot->writeFieldStop(); xfer += oprot->writeStructEnd(); return xfer; @@ -13893,6 +14381,8 @@ void swap(AddPartitionsRequest &a, AddPartitionsRequest &b) { swap(a.ifNotExists, b.ifNotExists); swap(a.needResult, b.needResult); swap(a.catName, b.catName); + swap(a.txnId, b.txnId); + swap(a.validWriteIdList, b.validWriteIdList); swap(a.__isset, b.__isset); } @@ -13903,6 +14393,8 @@ AddPartitionsRequest::AddPartitionsRequest(const AddPartitionsRequest& other557) ifNotExists = other557.ifNotExists; needResult = other557.needResult; catName = other557.catName; + txnId = other557.txnId; + validWriteIdList = other557.validWriteIdList; __isset = other557.__isset; } AddPartitionsRequest& AddPartitionsRequest::operator=(const AddPartitionsRequest& other558) { @@ -13912,6 +14404,8 @@ AddPartitionsRequest& AddPartitionsRequest::operator=(const AddPartitionsRequest ifNotExists = other558.ifNotExists; needResult = other558.needResult; catName = other558.catName; + txnId = other558.txnId; + validWriteIdList = other558.validWriteIdList; __isset = other558.__isset; return *this; } @@ -13924,6 +14418,8 @@ void AddPartitionsRequest::printTo(std::ostream& out) const { out << ", " << "ifNotExists=" << to_string(ifNotExists); out << ", " << "needResult="; (__isset.needResult ? (out << to_string(needResult)) : (out << "")); out << ", " << "catName="; (__isset.catName ? (out << to_string(catName)) : (out << "")); + out << ", " << "txnId="; (__isset.txnId ? (out << to_string(txnId)) : (out << "")); + out << ", " << "validWriteIdList="; (__isset.validWriteIdList ? (out << to_string(validWriteIdList)) : (out << "")); out << ")"; } @@ -24579,6 +25075,16 @@ void GetTableRequest::__set_catName(const std::string& val) { __isset.catName = true; } +void GetTableRequest::__set_txnId(const int64_t val) { + this->txnId = val; +__isset.txnId = true; +} + +void GetTableRequest::__set_validWriteIdList(const std::string& val) { + this->validWriteIdList = val; +__isset.validWriteIdList = true; +} + uint32_t GetTableRequest::read(::apache::thrift::protocol::TProtocol* iprot) { apache::thrift::protocol::TInputRecursionTracker tracker(*iprot); @@ -24634,8 +25140,24 @@ uint32_t GetTableRequest::read(::apache::thrift::protocol::TProtocol* iprot) { xfer += iprot->skip(ftype); } break; - default: - xfer += iprot->skip(ftype); + case 5: + if (ftype == ::apache::thrift::protocol::T_I64) { + xfer += iprot->readI64(this->txnId); + this->__isset.txnId = true; + } else { + xfer += iprot->skip(ftype); + } + break; + case 6: + if (ftype == ::apache::thrift::protocol::T_STRING) { + xfer += iprot->readString(this->validWriteIdList); + this->__isset.validWriteIdList = true; + } else { + xfer += iprot->skip(ftype); + } + break; + default: + xfer += iprot->skip(ftype); break; } xfer += iprot->readFieldEnd(); @@ -24673,6 +25195,16 @@ uint32_t GetTableRequest::write(::apache::thrift::protocol::TProtocol* oprot) co xfer += oprot->writeString(this->catName); xfer += oprot->writeFieldEnd(); } + if (this->__isset.txnId) { + xfer += oprot->writeFieldBegin("txnId", ::apache::thrift::protocol::T_I64, 5); + xfer += oprot->writeI64(this->txnId); + xfer += oprot->writeFieldEnd(); + } + if (this->__isset.validWriteIdList) { + xfer += oprot->writeFieldBegin("validWriteIdList", ::apache::thrift::protocol::T_STRING, 6); + xfer += oprot->writeString(this->validWriteIdList); + xfer += oprot->writeFieldEnd(); + } xfer += oprot->writeFieldStop(); xfer += oprot->writeStructEnd(); return xfer; @@ -24684,6 +25216,8 @@ void swap(GetTableRequest &a, GetTableRequest &b) { swap(a.tblName, b.tblName); swap(a.capabilities, b.capabilities); swap(a.catName, b.catName); + swap(a.txnId, b.txnId); + swap(a.validWriteIdList, b.validWriteIdList); swap(a.__isset, b.__isset); } @@ -24692,6 +25226,8 @@ GetTableRequest::GetTableRequest(const GetTableRequest& other986) { tblName = other986.tblName; capabilities = other986.capabilities; catName = other986.catName; + txnId = other986.txnId; + validWriteIdList = other986.validWriteIdList; __isset = other986.__isset; } GetTableRequest& GetTableRequest::operator=(const GetTableRequest& other987) { @@ -24699,6 +25235,8 @@ GetTableRequest& GetTableRequest::operator=(const GetTableRequest& other987) { tblName = other987.tblName; capabilities = other987.capabilities; catName = other987.catName; + txnId = other987.txnId; + validWriteIdList = other987.validWriteIdList; __isset = other987.__isset; return *this; } @@ -24709,6 +25247,8 @@ void GetTableRequest::printTo(std::ostream& out) const { out << ", " << "tblName=" << to_string(tblName); out << ", " << "capabilities="; (__isset.capabilities ? (out << to_string(capabilities)) : (out << "")); out << ", " << "catName="; (__isset.catName ? (out << to_string(catName)) : (out << "")); + out << ", " << "txnId="; (__isset.txnId ? (out << to_string(txnId)) : (out << "")); + out << ", " << "validWriteIdList="; (__isset.validWriteIdList ? (out << to_string(validWriteIdList)) : (out << "")); out << ")"; } @@ -24721,6 +25261,11 @@ void GetTableResult::__set_table(const Table& val) { this->table = val; } +void GetTableResult::__set_isStatsCompliant(const bool val) { + this->isStatsCompliant = val; +__isset.isStatsCompliant = true; +} + uint32_t GetTableResult::read(::apache::thrift::protocol::TProtocol* iprot) { apache::thrift::protocol::TInputRecursionTracker tracker(*iprot); @@ -24751,6 +25296,14 @@ uint32_t GetTableResult::read(::apache::thrift::protocol::TProtocol* iprot) { xfer += iprot->skip(ftype); } break; + case 2: + if (ftype == ::apache::thrift::protocol::T_BOOL) { + xfer += iprot->readBool(this->isStatsCompliant); + this->__isset.isStatsCompliant = true; + } else { + xfer += iprot->skip(ftype); + } + break; default: xfer += iprot->skip(ftype); break; @@ -24774,6 +25327,11 @@ uint32_t GetTableResult::write(::apache::thrift::protocol::TProtocol* oprot) con xfer += this->table.write(oprot); xfer += oprot->writeFieldEnd(); + if (this->__isset.isStatsCompliant) { + xfer += oprot->writeFieldBegin("isStatsCompliant", ::apache::thrift::protocol::T_BOOL, 2); + xfer += oprot->writeBool(this->isStatsCompliant); + xfer += oprot->writeFieldEnd(); + } xfer += oprot->writeFieldStop(); xfer += oprot->writeStructEnd(); return xfer; @@ -24782,19 +25340,26 @@ uint32_t GetTableResult::write(::apache::thrift::protocol::TProtocol* oprot) con void swap(GetTableResult &a, GetTableResult &b) { using ::std::swap; swap(a.table, b.table); + swap(a.isStatsCompliant, b.isStatsCompliant); + swap(a.__isset, b.__isset); } GetTableResult::GetTableResult(const GetTableResult& other988) { table = other988.table; + isStatsCompliant = other988.isStatsCompliant; + __isset = other988.__isset; } GetTableResult& GetTableResult::operator=(const GetTableResult& other989) { table = other989.table; + isStatsCompliant = other989.isStatsCompliant; + __isset = other989.__isset; return *this; } void GetTableResult::printTo(std::ostream& out) const { using ::apache::thrift::to_string; out << "GetTableResult("; out << "table=" << to_string(table); + out << ", " << "isStatsCompliant="; (__isset.isStatsCompliant ? (out << to_string(isStatsCompliant)) : (out << "")); out << ")"; } @@ -31760,6 +32325,312 @@ void GetRuntimeStatsRequest::printTo(std::ostream& out) const { } +AlterPartitionsRequest::~AlterPartitionsRequest() throw() { +} + + +void AlterPartitionsRequest::__set_dbName(const std::string& val) { + this->dbName = val; +} + +void AlterPartitionsRequest::__set_tableName(const std::string& val) { + this->tableName = val; +} + +void AlterPartitionsRequest::__set_partitions(const std::vector & val) { + this->partitions = val; +} + +void AlterPartitionsRequest::__set_environmentContext(const EnvironmentContext& val) { + this->environmentContext = val; +} + +void AlterPartitionsRequest::__set_txnId(const int64_t val) { + this->txnId = val; +__isset.txnId = true; +} + +void AlterPartitionsRequest::__set_writeId(const int64_t val) { + this->writeId = val; +__isset.writeId = true; +} + +void AlterPartitionsRequest::__set_validWriteIdList(const std::string& val) { + this->validWriteIdList = val; +__isset.validWriteIdList = true; +} + +uint32_t AlterPartitionsRequest::read(::apache::thrift::protocol::TProtocol* iprot) { + + apache::thrift::protocol::TInputRecursionTracker tracker(*iprot); + uint32_t xfer = 0; + std::string fname; + ::apache::thrift::protocol::TType ftype; + int16_t fid; + + xfer += iprot->readStructBegin(fname); + + using ::apache::thrift::protocol::TProtocolException; + + bool isset_dbName = false; + bool isset_tableName = false; + bool isset_partitions = false; + bool isset_environmentContext = false; + + while (true) + { + xfer += iprot->readFieldBegin(fname, ftype, fid); + if (ftype == ::apache::thrift::protocol::T_STOP) { + break; + } + switch (fid) + { + case 1: + if (ftype == ::apache::thrift::protocol::T_STRING) { + xfer += iprot->readString(this->dbName); + isset_dbName = true; + } else { + xfer += iprot->skip(ftype); + } + break; + case 2: + if (ftype == ::apache::thrift::protocol::T_STRING) { + xfer += iprot->readString(this->tableName); + isset_tableName = true; + } else { + xfer += iprot->skip(ftype); + } + break; + case 3: + if (ftype == ::apache::thrift::protocol::T_LIST) { + { + this->partitions.clear(); + uint32_t _size1196; + ::apache::thrift::protocol::TType _etype1199; + xfer += iprot->readListBegin(_etype1199, _size1196); + this->partitions.resize(_size1196); + uint32_t _i1200; + for (_i1200 = 0; _i1200 < _size1196; ++_i1200) + { + xfer += this->partitions[_i1200].read(iprot); + } + xfer += iprot->readListEnd(); + } + isset_partitions = true; + } else { + xfer += iprot->skip(ftype); + } + break; + case 4: + if (ftype == ::apache::thrift::protocol::T_STRUCT) { + xfer += this->environmentContext.read(iprot); + isset_environmentContext = true; + } else { + xfer += iprot->skip(ftype); + } + break; + case 5: + if (ftype == ::apache::thrift::protocol::T_I64) { + xfer += iprot->readI64(this->txnId); + this->__isset.txnId = true; + } else { + xfer += iprot->skip(ftype); + } + break; + case 6: + if (ftype == ::apache::thrift::protocol::T_I64) { + xfer += iprot->readI64(this->writeId); + this->__isset.writeId = true; + } else { + xfer += iprot->skip(ftype); + } + break; + case 7: + if (ftype == ::apache::thrift::protocol::T_STRING) { + xfer += iprot->readString(this->validWriteIdList); + this->__isset.validWriteIdList = true; + } else { + xfer += iprot->skip(ftype); + } + break; + default: + xfer += iprot->skip(ftype); + break; + } + xfer += iprot->readFieldEnd(); + } + + xfer += iprot->readStructEnd(); + + if (!isset_dbName) + throw TProtocolException(TProtocolException::INVALID_DATA); + if (!isset_tableName) + throw TProtocolException(TProtocolException::INVALID_DATA); + if (!isset_partitions) + throw TProtocolException(TProtocolException::INVALID_DATA); + if (!isset_environmentContext) + throw TProtocolException(TProtocolException::INVALID_DATA); + return xfer; +} + +uint32_t AlterPartitionsRequest::write(::apache::thrift::protocol::TProtocol* oprot) const { + uint32_t xfer = 0; + apache::thrift::protocol::TOutputRecursionTracker tracker(*oprot); + xfer += oprot->writeStructBegin("AlterPartitionsRequest"); + + xfer += oprot->writeFieldBegin("dbName", ::apache::thrift::protocol::T_STRING, 1); + xfer += oprot->writeString(this->dbName); + xfer += oprot->writeFieldEnd(); + + xfer += oprot->writeFieldBegin("tableName", ::apache::thrift::protocol::T_STRING, 2); + xfer += oprot->writeString(this->tableName); + xfer += oprot->writeFieldEnd(); + + xfer += oprot->writeFieldBegin("partitions", ::apache::thrift::protocol::T_LIST, 3); + { + xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast(this->partitions.size())); + std::vector ::const_iterator _iter1201; + for (_iter1201 = this->partitions.begin(); _iter1201 != this->partitions.end(); ++_iter1201) + { + xfer += (*_iter1201).write(oprot); + } + xfer += oprot->writeListEnd(); + } + xfer += oprot->writeFieldEnd(); + + xfer += oprot->writeFieldBegin("environmentContext", ::apache::thrift::protocol::T_STRUCT, 4); + xfer += this->environmentContext.write(oprot); + xfer += oprot->writeFieldEnd(); + + if (this->__isset.txnId) { + xfer += oprot->writeFieldBegin("txnId", ::apache::thrift::protocol::T_I64, 5); + xfer += oprot->writeI64(this->txnId); + xfer += oprot->writeFieldEnd(); + } + if (this->__isset.writeId) { + xfer += oprot->writeFieldBegin("writeId", ::apache::thrift::protocol::T_I64, 6); + xfer += oprot->writeI64(this->writeId); + xfer += oprot->writeFieldEnd(); + } + if (this->__isset.validWriteIdList) { + xfer += oprot->writeFieldBegin("validWriteIdList", ::apache::thrift::protocol::T_STRING, 7); + xfer += oprot->writeString(this->validWriteIdList); + xfer += oprot->writeFieldEnd(); + } + xfer += oprot->writeFieldStop(); + xfer += oprot->writeStructEnd(); + return xfer; +} + +void swap(AlterPartitionsRequest &a, AlterPartitionsRequest &b) { + using ::std::swap; + swap(a.dbName, b.dbName); + swap(a.tableName, b.tableName); + swap(a.partitions, b.partitions); + swap(a.environmentContext, b.environmentContext); + swap(a.txnId, b.txnId); + swap(a.writeId, b.writeId); + swap(a.validWriteIdList, b.validWriteIdList); + swap(a.__isset, b.__isset); +} + +AlterPartitionsRequest::AlterPartitionsRequest(const AlterPartitionsRequest& other1202) { + dbName = other1202.dbName; + tableName = other1202.tableName; + partitions = other1202.partitions; + environmentContext = other1202.environmentContext; + txnId = other1202.txnId; + writeId = other1202.writeId; + validWriteIdList = other1202.validWriteIdList; + __isset = other1202.__isset; +} +AlterPartitionsRequest& AlterPartitionsRequest::operator=(const AlterPartitionsRequest& other1203) { + dbName = other1203.dbName; + tableName = other1203.tableName; + partitions = other1203.partitions; + environmentContext = other1203.environmentContext; + txnId = other1203.txnId; + writeId = other1203.writeId; + validWriteIdList = other1203.validWriteIdList; + __isset = other1203.__isset; + return *this; +} +void AlterPartitionsRequest::printTo(std::ostream& out) const { + using ::apache::thrift::to_string; + out << "AlterPartitionsRequest("; + out << "dbName=" << to_string(dbName); + out << ", " << "tableName=" << to_string(tableName); + out << ", " << "partitions=" << to_string(partitions); + out << ", " << "environmentContext=" << to_string(environmentContext); + out << ", " << "txnId="; (__isset.txnId ? (out << to_string(txnId)) : (out << "")); + out << ", " << "writeId="; (__isset.writeId ? (out << to_string(writeId)) : (out << "")); + out << ", " << "validWriteIdList="; (__isset.validWriteIdList ? (out << to_string(validWriteIdList)) : (out << "")); + out << ")"; +} + + +AlterPartitionsResponse::~AlterPartitionsResponse() throw() { +} + + +uint32_t AlterPartitionsResponse::read(::apache::thrift::protocol::TProtocol* iprot) { + + apache::thrift::protocol::TInputRecursionTracker tracker(*iprot); + uint32_t xfer = 0; + std::string fname; + ::apache::thrift::protocol::TType ftype; + int16_t fid; + + xfer += iprot->readStructBegin(fname); + + using ::apache::thrift::protocol::TProtocolException; + + + while (true) + { + xfer += iprot->readFieldBegin(fname, ftype, fid); + if (ftype == ::apache::thrift::protocol::T_STOP) { + break; + } + xfer += iprot->skip(ftype); + xfer += iprot->readFieldEnd(); + } + + xfer += iprot->readStructEnd(); + + return xfer; +} + +uint32_t AlterPartitionsResponse::write(::apache::thrift::protocol::TProtocol* oprot) const { + uint32_t xfer = 0; + apache::thrift::protocol::TOutputRecursionTracker tracker(*oprot); + xfer += oprot->writeStructBegin("AlterPartitionsResponse"); + + xfer += oprot->writeFieldStop(); + xfer += oprot->writeStructEnd(); + return xfer; +} + +void swap(AlterPartitionsResponse &a, AlterPartitionsResponse &b) { + using ::std::swap; + (void) a; + (void) b; +} + +AlterPartitionsResponse::AlterPartitionsResponse(const AlterPartitionsResponse& other1204) { + (void) other1204; +} +AlterPartitionsResponse& AlterPartitionsResponse::operator=(const AlterPartitionsResponse& other1205) { + (void) other1205; + return *this; +} +void AlterPartitionsResponse::printTo(std::ostream& out) const { + using ::apache::thrift::to_string; + out << "AlterPartitionsResponse("; + out << ")"; +} + + MetaException::~MetaException() throw() { } @@ -31829,13 +32700,13 @@ void swap(MetaException &a, MetaException &b) { swap(a.__isset, b.__isset); } -MetaException::MetaException(const MetaException& other1196) : TException() { - message = other1196.message; - __isset = other1196.__isset; +MetaException::MetaException(const MetaException& other1206) : TException() { + message = other1206.message; + __isset = other1206.__isset; } -MetaException& MetaException::operator=(const MetaException& other1197) { - message = other1197.message; - __isset = other1197.__isset; +MetaException& MetaException::operator=(const MetaException& other1207) { + message = other1207.message; + __isset = other1207.__isset; return *this; } void MetaException::printTo(std::ostream& out) const { @@ -31926,13 +32797,13 @@ void swap(UnknownTableException &a, UnknownTableException &b) { swap(a.__isset, b.__isset); } -UnknownTableException::UnknownTableException(const UnknownTableException& other1198) : TException() { - message = other1198.message; - __isset = other1198.__isset; +UnknownTableException::UnknownTableException(const UnknownTableException& other1208) : TException() { + message = other1208.message; + __isset = other1208.__isset; } -UnknownTableException& UnknownTableException::operator=(const UnknownTableException& other1199) { - message = other1199.message; - __isset = other1199.__isset; +UnknownTableException& UnknownTableException::operator=(const UnknownTableException& other1209) { + message = other1209.message; + __isset = other1209.__isset; return *this; } void UnknownTableException::printTo(std::ostream& out) const { @@ -32023,13 +32894,13 @@ void swap(UnknownDBException &a, UnknownDBException &b) { swap(a.__isset, b.__isset); } -UnknownDBException::UnknownDBException(const UnknownDBException& other1200) : TException() { - message = other1200.message; - __isset = other1200.__isset; +UnknownDBException::UnknownDBException(const UnknownDBException& other1210) : TException() { + message = other1210.message; + __isset = other1210.__isset; } -UnknownDBException& UnknownDBException::operator=(const UnknownDBException& other1201) { - message = other1201.message; - __isset = other1201.__isset; +UnknownDBException& UnknownDBException::operator=(const UnknownDBException& other1211) { + message = other1211.message; + __isset = other1211.__isset; return *this; } void UnknownDBException::printTo(std::ostream& out) const { @@ -32120,13 +32991,13 @@ void swap(AlreadyExistsException &a, AlreadyExistsException &b) { swap(a.__isset, b.__isset); } -AlreadyExistsException::AlreadyExistsException(const AlreadyExistsException& other1202) : TException() { - message = other1202.message; - __isset = other1202.__isset; +AlreadyExistsException::AlreadyExistsException(const AlreadyExistsException& other1212) : TException() { + message = other1212.message; + __isset = other1212.__isset; } -AlreadyExistsException& AlreadyExistsException::operator=(const AlreadyExistsException& other1203) { - message = other1203.message; - __isset = other1203.__isset; +AlreadyExistsException& AlreadyExistsException::operator=(const AlreadyExistsException& other1213) { + message = other1213.message; + __isset = other1213.__isset; return *this; } void AlreadyExistsException::printTo(std::ostream& out) const { @@ -32217,13 +33088,13 @@ void swap(InvalidPartitionException &a, InvalidPartitionException &b) { swap(a.__isset, b.__isset); } -InvalidPartitionException::InvalidPartitionException(const InvalidPartitionException& other1204) : TException() { - message = other1204.message; - __isset = other1204.__isset; +InvalidPartitionException::InvalidPartitionException(const InvalidPartitionException& other1214) : TException() { + message = other1214.message; + __isset = other1214.__isset; } -InvalidPartitionException& InvalidPartitionException::operator=(const InvalidPartitionException& other1205) { - message = other1205.message; - __isset = other1205.__isset; +InvalidPartitionException& InvalidPartitionException::operator=(const InvalidPartitionException& other1215) { + message = other1215.message; + __isset = other1215.__isset; return *this; } void InvalidPartitionException::printTo(std::ostream& out) const { @@ -32314,13 +33185,13 @@ void swap(UnknownPartitionException &a, UnknownPartitionException &b) { swap(a.__isset, b.__isset); } -UnknownPartitionException::UnknownPartitionException(const UnknownPartitionException& other1206) : TException() { - message = other1206.message; - __isset = other1206.__isset; +UnknownPartitionException::UnknownPartitionException(const UnknownPartitionException& other1216) : TException() { + message = other1216.message; + __isset = other1216.__isset; } -UnknownPartitionException& UnknownPartitionException::operator=(const UnknownPartitionException& other1207) { - message = other1207.message; - __isset = other1207.__isset; +UnknownPartitionException& UnknownPartitionException::operator=(const UnknownPartitionException& other1217) { + message = other1217.message; + __isset = other1217.__isset; return *this; } void UnknownPartitionException::printTo(std::ostream& out) const { @@ -32411,13 +33282,13 @@ void swap(InvalidObjectException &a, InvalidObjectException &b) { swap(a.__isset, b.__isset); } -InvalidObjectException::InvalidObjectException(const InvalidObjectException& other1208) : TException() { - message = other1208.message; - __isset = other1208.__isset; +InvalidObjectException::InvalidObjectException(const InvalidObjectException& other1218) : TException() { + message = other1218.message; + __isset = other1218.__isset; } -InvalidObjectException& InvalidObjectException::operator=(const InvalidObjectException& other1209) { - message = other1209.message; - __isset = other1209.__isset; +InvalidObjectException& InvalidObjectException::operator=(const InvalidObjectException& other1219) { + message = other1219.message; + __isset = other1219.__isset; return *this; } void InvalidObjectException::printTo(std::ostream& out) const { @@ -32508,13 +33379,13 @@ void swap(NoSuchObjectException &a, NoSuchObjectException &b) { swap(a.__isset, b.__isset); } -NoSuchObjectException::NoSuchObjectException(const NoSuchObjectException& other1210) : TException() { - message = other1210.message; - __isset = other1210.__isset; +NoSuchObjectException::NoSuchObjectException(const NoSuchObjectException& other1220) : TException() { + message = other1220.message; + __isset = other1220.__isset; } -NoSuchObjectException& NoSuchObjectException::operator=(const NoSuchObjectException& other1211) { - message = other1211.message; - __isset = other1211.__isset; +NoSuchObjectException& NoSuchObjectException::operator=(const NoSuchObjectException& other1221) { + message = other1221.message; + __isset = other1221.__isset; return *this; } void NoSuchObjectException::printTo(std::ostream& out) const { @@ -32605,13 +33476,13 @@ void swap(InvalidOperationException &a, InvalidOperationException &b) { swap(a.__isset, b.__isset); } -InvalidOperationException::InvalidOperationException(const InvalidOperationException& other1212) : TException() { - message = other1212.message; - __isset = other1212.__isset; +InvalidOperationException::InvalidOperationException(const InvalidOperationException& other1222) : TException() { + message = other1222.message; + __isset = other1222.__isset; } -InvalidOperationException& InvalidOperationException::operator=(const InvalidOperationException& other1213) { - message = other1213.message; - __isset = other1213.__isset; +InvalidOperationException& InvalidOperationException::operator=(const InvalidOperationException& other1223) { + message = other1223.message; + __isset = other1223.__isset; return *this; } void InvalidOperationException::printTo(std::ostream& out) const { @@ -32702,13 +33573,13 @@ void swap(ConfigValSecurityException &a, ConfigValSecurityException &b) { swap(a.__isset, b.__isset); } -ConfigValSecurityException::ConfigValSecurityException(const ConfigValSecurityException& other1214) : TException() { - message = other1214.message; - __isset = other1214.__isset; +ConfigValSecurityException::ConfigValSecurityException(const ConfigValSecurityException& other1224) : TException() { + message = other1224.message; + __isset = other1224.__isset; } -ConfigValSecurityException& ConfigValSecurityException::operator=(const ConfigValSecurityException& other1215) { - message = other1215.message; - __isset = other1215.__isset; +ConfigValSecurityException& ConfigValSecurityException::operator=(const ConfigValSecurityException& other1225) { + message = other1225.message; + __isset = other1225.__isset; return *this; } void ConfigValSecurityException::printTo(std::ostream& out) const { @@ -32799,13 +33670,13 @@ void swap(InvalidInputException &a, InvalidInputException &b) { swap(a.__isset, b.__isset); } -InvalidInputException::InvalidInputException(const InvalidInputException& other1216) : TException() { - message = other1216.message; - __isset = other1216.__isset; +InvalidInputException::InvalidInputException(const InvalidInputException& other1226) : TException() { + message = other1226.message; + __isset = other1226.__isset; } -InvalidInputException& InvalidInputException::operator=(const InvalidInputException& other1217) { - message = other1217.message; - __isset = other1217.__isset; +InvalidInputException& InvalidInputException::operator=(const InvalidInputException& other1227) { + message = other1227.message; + __isset = other1227.__isset; return *this; } void InvalidInputException::printTo(std::ostream& out) const { @@ -32896,13 +33767,13 @@ void swap(NoSuchTxnException &a, NoSuchTxnException &b) { swap(a.__isset, b.__isset); } -NoSuchTxnException::NoSuchTxnException(const NoSuchTxnException& other1218) : TException() { - message = other1218.message; - __isset = other1218.__isset; +NoSuchTxnException::NoSuchTxnException(const NoSuchTxnException& other1228) : TException() { + message = other1228.message; + __isset = other1228.__isset; } -NoSuchTxnException& NoSuchTxnException::operator=(const NoSuchTxnException& other1219) { - message = other1219.message; - __isset = other1219.__isset; +NoSuchTxnException& NoSuchTxnException::operator=(const NoSuchTxnException& other1229) { + message = other1229.message; + __isset = other1229.__isset; return *this; } void NoSuchTxnException::printTo(std::ostream& out) const { @@ -32993,13 +33864,13 @@ void swap(TxnAbortedException &a, TxnAbortedException &b) { swap(a.__isset, b.__isset); } -TxnAbortedException::TxnAbortedException(const TxnAbortedException& other1220) : TException() { - message = other1220.message; - __isset = other1220.__isset; +TxnAbortedException::TxnAbortedException(const TxnAbortedException& other1230) : TException() { + message = other1230.message; + __isset = other1230.__isset; } -TxnAbortedException& TxnAbortedException::operator=(const TxnAbortedException& other1221) { - message = other1221.message; - __isset = other1221.__isset; +TxnAbortedException& TxnAbortedException::operator=(const TxnAbortedException& other1231) { + message = other1231.message; + __isset = other1231.__isset; return *this; } void TxnAbortedException::printTo(std::ostream& out) const { @@ -33090,13 +33961,13 @@ void swap(TxnOpenException &a, TxnOpenException &b) { swap(a.__isset, b.__isset); } -TxnOpenException::TxnOpenException(const TxnOpenException& other1222) : TException() { - message = other1222.message; - __isset = other1222.__isset; +TxnOpenException::TxnOpenException(const TxnOpenException& other1232) : TException() { + message = other1232.message; + __isset = other1232.__isset; } -TxnOpenException& TxnOpenException::operator=(const TxnOpenException& other1223) { - message = other1223.message; - __isset = other1223.__isset; +TxnOpenException& TxnOpenException::operator=(const TxnOpenException& other1233) { + message = other1233.message; + __isset = other1233.__isset; return *this; } void TxnOpenException::printTo(std::ostream& out) const { @@ -33187,13 +34058,13 @@ void swap(NoSuchLockException &a, NoSuchLockException &b) { swap(a.__isset, b.__isset); } -NoSuchLockException::NoSuchLockException(const NoSuchLockException& other1224) : TException() { - message = other1224.message; - __isset = other1224.__isset; +NoSuchLockException::NoSuchLockException(const NoSuchLockException& other1234) : TException() { + message = other1234.message; + __isset = other1234.__isset; } -NoSuchLockException& NoSuchLockException::operator=(const NoSuchLockException& other1225) { - message = other1225.message; - __isset = other1225.__isset; +NoSuchLockException& NoSuchLockException::operator=(const NoSuchLockException& other1235) { + message = other1235.message; + __isset = other1235.__isset; return *this; } void NoSuchLockException::printTo(std::ostream& out) const { diff --git standalone-metastore/src/gen/thrift/gen-cpp/hive_metastore_types.h standalone-metastore/src/gen/thrift/gen-cpp/hive_metastore_types.h index f5913fcd2d..62b2b50c21 100644 --- standalone-metastore/src/gen/thrift/gen-cpp/hive_metastore_types.h +++ standalone-metastore/src/gen/thrift/gen-cpp/hive_metastore_types.h @@ -673,6 +673,10 @@ class RuntimeStat; class GetRuntimeStatsRequest; +class AlterPartitionsRequest; + +class AlterPartitionsResponse; + class MetaException; class UnknownTableException; @@ -3107,7 +3111,7 @@ inline std::ostream& operator<<(std::ostream& out, const StorageDescriptor& obj) } typedef struct _Table__isset { - _Table__isset() : tableName(false), dbName(false), owner(false), createTime(false), lastAccessTime(false), retention(false), sd(false), partitionKeys(false), parameters(false), viewOriginalText(false), viewExpandedText(false), tableType(false), privileges(false), temporary(true), rewriteEnabled(false), creationMetadata(false), catName(false), ownerType(true) {} + _Table__isset() : tableName(false), dbName(false), owner(false), createTime(false), lastAccessTime(false), retention(false), sd(false), partitionKeys(false), parameters(false), viewOriginalText(false), viewExpandedText(false), tableType(false), privileges(false), temporary(true), rewriteEnabled(false), creationMetadata(false), catName(false), ownerType(true), writeId(true), isStatsCompliant(false) {} bool tableName :1; bool dbName :1; bool owner :1; @@ -3126,6 +3130,8 @@ typedef struct _Table__isset { bool creationMetadata :1; bool catName :1; bool ownerType :1; + bool writeId :1; + bool isStatsCompliant :1; } _Table__isset; class Table { @@ -3133,7 +3139,7 @@ class Table { Table(const Table&); Table& operator=(const Table&); - Table() : tableName(), dbName(), owner(), createTime(0), lastAccessTime(0), retention(0), viewOriginalText(), viewExpandedText(), tableType(), temporary(false), rewriteEnabled(0), catName(), ownerType((PrincipalType::type)1) { + Table() : tableName(), dbName(), owner(), createTime(0), lastAccessTime(0), retention(0), viewOriginalText(), viewExpandedText(), tableType(), temporary(false), rewriteEnabled(0), catName(), ownerType((PrincipalType::type)1), writeId(-1LL), isStatsCompliant(0) { ownerType = (PrincipalType::type)1; } @@ -3157,6 +3163,8 @@ class Table { CreationMetadata creationMetadata; std::string catName; PrincipalType::type ownerType; + int64_t writeId; + bool isStatsCompliant; _Table__isset __isset; @@ -3196,6 +3204,10 @@ class Table { void __set_ownerType(const PrincipalType::type val); + void __set_writeId(const int64_t val); + + void __set_isStatsCompliant(const bool val); + bool operator == (const Table & rhs) const { if (!(tableName == rhs.tableName)) @@ -3246,6 +3258,14 @@ class Table { return false; else if (__isset.ownerType && !(ownerType == rhs.ownerType)) return false; + if (__isset.writeId != rhs.__isset.writeId) + return false; + else if (__isset.writeId && !(writeId == rhs.writeId)) + return false; + if (__isset.isStatsCompliant != rhs.__isset.isStatsCompliant) + return false; + else if (__isset.isStatsCompliant && !(isStatsCompliant == rhs.isStatsCompliant)) + return false; return true; } bool operator != (const Table &rhs) const { @@ -3269,7 +3289,7 @@ inline std::ostream& operator<<(std::ostream& out, const Table& obj) } typedef struct _Partition__isset { - _Partition__isset() : values(false), dbName(false), tableName(false), createTime(false), lastAccessTime(false), sd(false), parameters(false), privileges(false), catName(false) {} + _Partition__isset() : values(false), dbName(false), tableName(false), createTime(false), lastAccessTime(false), sd(false), parameters(false), privileges(false), catName(false), writeId(true), isStatsCompliant(false) {} bool values :1; bool dbName :1; bool tableName :1; @@ -3279,6 +3299,8 @@ typedef struct _Partition__isset { bool parameters :1; bool privileges :1; bool catName :1; + bool writeId :1; + bool isStatsCompliant :1; } _Partition__isset; class Partition { @@ -3286,7 +3308,7 @@ class Partition { Partition(const Partition&); Partition& operator=(const Partition&); - Partition() : dbName(), tableName(), createTime(0), lastAccessTime(0), catName() { + Partition() : dbName(), tableName(), createTime(0), lastAccessTime(0), catName(), writeId(-1LL), isStatsCompliant(0) { } virtual ~Partition() throw(); @@ -3299,6 +3321,8 @@ class Partition { std::map parameters; PrincipalPrivilegeSet privileges; std::string catName; + int64_t writeId; + bool isStatsCompliant; _Partition__isset __isset; @@ -3320,6 +3344,10 @@ class Partition { void __set_catName(const std::string& val); + void __set_writeId(const int64_t val); + + void __set_isStatsCompliant(const bool val); + bool operator == (const Partition & rhs) const { if (!(values == rhs.values)) @@ -3344,6 +3372,14 @@ class Partition { return false; else if (__isset.catName && !(catName == rhs.catName)) return false; + if (__isset.writeId != rhs.__isset.writeId) + return false; + else if (__isset.writeId && !(writeId == rhs.writeId)) + return false; + if (__isset.isStatsCompliant != rhs.__isset.isStatsCompliant) + return false; + else if (__isset.isStatsCompliant && !(isStatsCompliant == rhs.isStatsCompliant)) + return false; return true; } bool operator != (const Partition &rhs) const { @@ -3543,13 +3579,15 @@ inline std::ostream& operator<<(std::ostream& out, const PartitionListComposingS } typedef struct _PartitionSpec__isset { - _PartitionSpec__isset() : dbName(false), tableName(false), rootPath(false), sharedSDPartitionSpec(false), partitionList(false), catName(false) {} + _PartitionSpec__isset() : dbName(false), tableName(false), rootPath(false), sharedSDPartitionSpec(false), partitionList(false), catName(false), writeId(true), isStatsCompliant(false) {} bool dbName :1; bool tableName :1; bool rootPath :1; bool sharedSDPartitionSpec :1; bool partitionList :1; bool catName :1; + bool writeId :1; + bool isStatsCompliant :1; } _PartitionSpec__isset; class PartitionSpec { @@ -3557,7 +3595,7 @@ class PartitionSpec { PartitionSpec(const PartitionSpec&); PartitionSpec& operator=(const PartitionSpec&); - PartitionSpec() : dbName(), tableName(), rootPath(), catName() { + PartitionSpec() : dbName(), tableName(), rootPath(), catName(), writeId(-1LL), isStatsCompliant(0) { } virtual ~PartitionSpec() throw(); @@ -3567,6 +3605,8 @@ class PartitionSpec { PartitionSpecWithSharedSD sharedSDPartitionSpec; PartitionListComposingSpec partitionList; std::string catName; + int64_t writeId; + bool isStatsCompliant; _PartitionSpec__isset __isset; @@ -3582,6 +3622,10 @@ class PartitionSpec { void __set_catName(const std::string& val); + void __set_writeId(const int64_t val); + + void __set_isStatsCompliant(const bool val); + bool operator == (const PartitionSpec & rhs) const { if (!(dbName == rhs.dbName)) @@ -3602,6 +3646,14 @@ class PartitionSpec { return false; else if (__isset.catName && !(catName == rhs.catName)) return false; + if (__isset.writeId != rhs.__isset.writeId) + return false; + else if (__isset.writeId && !(writeId == rhs.writeId)) + return false; + if (__isset.isStatsCompliant != rhs.__isset.isStatsCompliant) + return false; + else if (__isset.isStatsCompliant && !(isStatsCompliant == rhs.isStatsCompliant)) + return false; return true; } bool operator != (const PartitionSpec &rhs) const { @@ -4410,29 +4462,58 @@ inline std::ostream& operator<<(std::ostream& out, const ColumnStatisticsDesc& o return out; } +typedef struct _ColumnStatistics__isset { + _ColumnStatistics__isset() : txnId(true), validWriteIdList(false), isStatsCompliant(false) {} + bool txnId :1; + bool validWriteIdList :1; + bool isStatsCompliant :1; +} _ColumnStatistics__isset; class ColumnStatistics { public: ColumnStatistics(const ColumnStatistics&); ColumnStatistics& operator=(const ColumnStatistics&); - ColumnStatistics() { + ColumnStatistics() : txnId(-1LL), validWriteIdList(), isStatsCompliant(0) { } virtual ~ColumnStatistics() throw(); ColumnStatisticsDesc statsDesc; std::vector statsObj; + int64_t txnId; + std::string validWriteIdList; + bool isStatsCompliant; + + _ColumnStatistics__isset __isset; void __set_statsDesc(const ColumnStatisticsDesc& val); void __set_statsObj(const std::vector & val); + void __set_txnId(const int64_t val); + + void __set_validWriteIdList(const std::string& val); + + void __set_isStatsCompliant(const bool val); + bool operator == (const ColumnStatistics & rhs) const { if (!(statsDesc == rhs.statsDesc)) return false; if (!(statsObj == rhs.statsObj)) return false; + if (__isset.txnId != rhs.__isset.txnId) + return false; + else if (__isset.txnId && !(txnId == rhs.txnId)) + return false; + if (__isset.validWriteIdList != rhs.__isset.validWriteIdList) + return false; + else if (__isset.validWriteIdList && !(validWriteIdList == rhs.validWriteIdList)) + return false; + if (__isset.isStatsCompliant != rhs.__isset.isStatsCompliant) + return false; + else if (__isset.isStatsCompliant && !(isStatsCompliant == rhs.isStatsCompliant)) + return false; return true; } bool operator != (const ColumnStatistics &rhs) const { @@ -4455,29 +4536,42 @@ inline std::ostream& operator<<(std::ostream& out, const ColumnStatistics& obj) return out; } +typedef struct _AggrStats__isset { + _AggrStats__isset() : isStatsCompliant(false) {} + bool isStatsCompliant :1; +} _AggrStats__isset; class AggrStats { public: AggrStats(const AggrStats&); AggrStats& operator=(const AggrStats&); - AggrStats() : partsFound(0) { + AggrStats() : partsFound(0), isStatsCompliant(0) { } virtual ~AggrStats() throw(); std::vector colStats; int64_t partsFound; + bool isStatsCompliant; + + _AggrStats__isset __isset; void __set_colStats(const std::vector & val); void __set_partsFound(const int64_t val); + void __set_isStatsCompliant(const bool val); + bool operator == (const AggrStats & rhs) const { if (!(colStats == rhs.colStats)) return false; if (!(partsFound == rhs.partsFound)) return false; + if (__isset.isStatsCompliant != rhs.__isset.isStatsCompliant) + return false; + else if (__isset.isStatsCompliant && !(isStatsCompliant == rhs.isStatsCompliant)) + return false; return true; } bool operator != (const AggrStats &rhs) const { @@ -4501,8 +4595,11 @@ inline std::ostream& operator<<(std::ostream& out, const AggrStats& obj) } typedef struct _SetPartitionsStatsRequest__isset { - _SetPartitionsStatsRequest__isset() : needMerge(false) {} + _SetPartitionsStatsRequest__isset() : needMerge(false), txnId(true), writeId(true), validWriteIdList(false) {} bool needMerge :1; + bool txnId :1; + bool writeId :1; + bool validWriteIdList :1; } _SetPartitionsStatsRequest__isset; class SetPartitionsStatsRequest { @@ -4510,12 +4607,15 @@ class SetPartitionsStatsRequest { SetPartitionsStatsRequest(const SetPartitionsStatsRequest&); SetPartitionsStatsRequest& operator=(const SetPartitionsStatsRequest&); - SetPartitionsStatsRequest() : needMerge(0) { + SetPartitionsStatsRequest() : needMerge(0), txnId(-1LL), writeId(-1LL), validWriteIdList() { } virtual ~SetPartitionsStatsRequest() throw(); std::vector colStats; bool needMerge; + int64_t txnId; + int64_t writeId; + std::string validWriteIdList; _SetPartitionsStatsRequest__isset __isset; @@ -4523,6 +4623,12 @@ class SetPartitionsStatsRequest { void __set_needMerge(const bool val); + void __set_txnId(const int64_t val); + + void __set_writeId(const int64_t val); + + void __set_validWriteIdList(const std::string& val); + bool operator == (const SetPartitionsStatsRequest & rhs) const { if (!(colStats == rhs.colStats)) @@ -4531,6 +4637,18 @@ class SetPartitionsStatsRequest { return false; else if (__isset.needMerge && !(needMerge == rhs.needMerge)) return false; + if (__isset.txnId != rhs.__isset.txnId) + return false; + else if (__isset.txnId && !(txnId == rhs.txnId)) + return false; + if (__isset.writeId != rhs.__isset.writeId) + return false; + else if (__isset.writeId && !(writeId == rhs.writeId)) + return false; + if (__isset.validWriteIdList != rhs.__isset.validWriteIdList) + return false; + else if (__isset.validWriteIdList && !(validWriteIdList == rhs.validWriteIdList)) + return false; return true; } bool operator != (const SetPartitionsStatsRequest &rhs) const { @@ -5648,24 +5766,37 @@ inline std::ostream& operator<<(std::ostream& out, const PartitionsByExprRequest return out; } +typedef struct _TableStatsResult__isset { + _TableStatsResult__isset() : isStatsCompliant(false) {} + bool isStatsCompliant :1; +} _TableStatsResult__isset; class TableStatsResult { public: TableStatsResult(const TableStatsResult&); TableStatsResult& operator=(const TableStatsResult&); - TableStatsResult() { + TableStatsResult() : isStatsCompliant(0) { } virtual ~TableStatsResult() throw(); std::vector tableStats; + bool isStatsCompliant; + + _TableStatsResult__isset __isset; void __set_tableStats(const std::vector & val); + void __set_isStatsCompliant(const bool val); + bool operator == (const TableStatsResult & rhs) const { if (!(tableStats == rhs.tableStats)) return false; + if (__isset.isStatsCompliant != rhs.__isset.isStatsCompliant) + return false; + else if (__isset.isStatsCompliant && !(isStatsCompliant == rhs.isStatsCompliant)) + return false; return true; } bool operator != (const TableStatsResult &rhs) const { @@ -5688,24 +5819,37 @@ inline std::ostream& operator<<(std::ostream& out, const TableStatsResult& obj) return out; } +typedef struct _PartitionsStatsResult__isset { + _PartitionsStatsResult__isset() : isStatsCompliant(false) {} + bool isStatsCompliant :1; +} _PartitionsStatsResult__isset; class PartitionsStatsResult { public: PartitionsStatsResult(const PartitionsStatsResult&); PartitionsStatsResult& operator=(const PartitionsStatsResult&); - PartitionsStatsResult() { + PartitionsStatsResult() : isStatsCompliant(0) { } virtual ~PartitionsStatsResult() throw(); std::map > partStats; + bool isStatsCompliant; + + _PartitionsStatsResult__isset __isset; void __set_partStats(const std::map > & val); + void __set_isStatsCompliant(const bool val); + bool operator == (const PartitionsStatsResult & rhs) const { if (!(partStats == rhs.partStats)) return false; + if (__isset.isStatsCompliant != rhs.__isset.isStatsCompliant) + return false; + else if (__isset.isStatsCompliant && !(isStatsCompliant == rhs.isStatsCompliant)) + return false; return true; } bool operator != (const PartitionsStatsResult &rhs) const { @@ -5729,8 +5873,10 @@ inline std::ostream& operator<<(std::ostream& out, const PartitionsStatsResult& } typedef struct _TableStatsRequest__isset { - _TableStatsRequest__isset() : catName(false) {} + _TableStatsRequest__isset() : catName(false), txnId(true), validWriteIdList(false) {} bool catName :1; + bool txnId :1; + bool validWriteIdList :1; } _TableStatsRequest__isset; class TableStatsRequest { @@ -5738,7 +5884,7 @@ class TableStatsRequest { TableStatsRequest(const TableStatsRequest&); TableStatsRequest& operator=(const TableStatsRequest&); - TableStatsRequest() : dbName(), tblName(), catName() { + TableStatsRequest() : dbName(), tblName(), catName(), txnId(-1LL), validWriteIdList() { } virtual ~TableStatsRequest() throw(); @@ -5746,6 +5892,8 @@ class TableStatsRequest { std::string tblName; std::vector colNames; std::string catName; + int64_t txnId; + std::string validWriteIdList; _TableStatsRequest__isset __isset; @@ -5757,6 +5905,10 @@ class TableStatsRequest { void __set_catName(const std::string& val); + void __set_txnId(const int64_t val); + + void __set_validWriteIdList(const std::string& val); + bool operator == (const TableStatsRequest & rhs) const { if (!(dbName == rhs.dbName)) @@ -5769,6 +5921,14 @@ class TableStatsRequest { return false; else if (__isset.catName && !(catName == rhs.catName)) return false; + if (__isset.txnId != rhs.__isset.txnId) + return false; + else if (__isset.txnId && !(txnId == rhs.txnId)) + return false; + if (__isset.validWriteIdList != rhs.__isset.validWriteIdList) + return false; + else if (__isset.validWriteIdList && !(validWriteIdList == rhs.validWriteIdList)) + return false; return true; } bool operator != (const TableStatsRequest &rhs) const { @@ -5792,8 +5952,10 @@ inline std::ostream& operator<<(std::ostream& out, const TableStatsRequest& obj) } typedef struct _PartitionsStatsRequest__isset { - _PartitionsStatsRequest__isset() : catName(false) {} + _PartitionsStatsRequest__isset() : catName(false), txnId(true), validWriteIdList(false) {} bool catName :1; + bool txnId :1; + bool validWriteIdList :1; } _PartitionsStatsRequest__isset; class PartitionsStatsRequest { @@ -5801,7 +5963,7 @@ class PartitionsStatsRequest { PartitionsStatsRequest(const PartitionsStatsRequest&); PartitionsStatsRequest& operator=(const PartitionsStatsRequest&); - PartitionsStatsRequest() : dbName(), tblName(), catName() { + PartitionsStatsRequest() : dbName(), tblName(), catName(), txnId(-1LL), validWriteIdList() { } virtual ~PartitionsStatsRequest() throw(); @@ -5810,6 +5972,8 @@ class PartitionsStatsRequest { std::vector colNames; std::vector partNames; std::string catName; + int64_t txnId; + std::string validWriteIdList; _PartitionsStatsRequest__isset __isset; @@ -5823,6 +5987,10 @@ class PartitionsStatsRequest { void __set_catName(const std::string& val); + void __set_txnId(const int64_t val); + + void __set_validWriteIdList(const std::string& val); + bool operator == (const PartitionsStatsRequest & rhs) const { if (!(dbName == rhs.dbName)) @@ -5837,6 +6005,14 @@ class PartitionsStatsRequest { return false; else if (__isset.catName && !(catName == rhs.catName)) return false; + if (__isset.txnId != rhs.__isset.txnId) + return false; + else if (__isset.txnId && !(txnId == rhs.txnId)) + return false; + if (__isset.validWriteIdList != rhs.__isset.validWriteIdList) + return false; + else if (__isset.validWriteIdList && !(validWriteIdList == rhs.validWriteIdList)) + return false; return true; } bool operator != (const PartitionsStatsRequest &rhs) const { @@ -5860,8 +6036,9 @@ inline std::ostream& operator<<(std::ostream& out, const PartitionsStatsRequest& } typedef struct _AddPartitionsResult__isset { - _AddPartitionsResult__isset() : partitions(false) {} + _AddPartitionsResult__isset() : partitions(false), isStatsCompliant(false) {} bool partitions :1; + bool isStatsCompliant :1; } _AddPartitionsResult__isset; class AddPartitionsResult { @@ -5869,22 +6046,29 @@ class AddPartitionsResult { AddPartitionsResult(const AddPartitionsResult&); AddPartitionsResult& operator=(const AddPartitionsResult&); - AddPartitionsResult() { + AddPartitionsResult() : isStatsCompliant(0) { } virtual ~AddPartitionsResult() throw(); std::vector partitions; + bool isStatsCompliant; _AddPartitionsResult__isset __isset; void __set_partitions(const std::vector & val); + void __set_isStatsCompliant(const bool val); + bool operator == (const AddPartitionsResult & rhs) const { if (__isset.partitions != rhs.__isset.partitions) return false; else if (__isset.partitions && !(partitions == rhs.partitions)) return false; + if (__isset.isStatsCompliant != rhs.__isset.isStatsCompliant) + return false; + else if (__isset.isStatsCompliant && !(isStatsCompliant == rhs.isStatsCompliant)) + return false; return true; } bool operator != (const AddPartitionsResult &rhs) const { @@ -5908,9 +6092,11 @@ inline std::ostream& operator<<(std::ostream& out, const AddPartitionsResult& ob } typedef struct _AddPartitionsRequest__isset { - _AddPartitionsRequest__isset() : needResult(true), catName(false) {} + _AddPartitionsRequest__isset() : needResult(true), catName(false), txnId(true), validWriteIdList(false) {} bool needResult :1; bool catName :1; + bool txnId :1; + bool validWriteIdList :1; } _AddPartitionsRequest__isset; class AddPartitionsRequest { @@ -5918,7 +6104,7 @@ class AddPartitionsRequest { AddPartitionsRequest(const AddPartitionsRequest&); AddPartitionsRequest& operator=(const AddPartitionsRequest&); - AddPartitionsRequest() : dbName(), tblName(), ifNotExists(0), needResult(true), catName() { + AddPartitionsRequest() : dbName(), tblName(), ifNotExists(0), needResult(true), catName(), txnId(-1LL), validWriteIdList() { } virtual ~AddPartitionsRequest() throw(); @@ -5928,6 +6114,8 @@ class AddPartitionsRequest { bool ifNotExists; bool needResult; std::string catName; + int64_t txnId; + std::string validWriteIdList; _AddPartitionsRequest__isset __isset; @@ -5943,6 +6131,10 @@ class AddPartitionsRequest { void __set_catName(const std::string& val); + void __set_txnId(const int64_t val); + + void __set_validWriteIdList(const std::string& val); + bool operator == (const AddPartitionsRequest & rhs) const { if (!(dbName == rhs.dbName)) @@ -5961,6 +6153,14 @@ class AddPartitionsRequest { return false; else if (__isset.catName && !(catName == rhs.catName)) return false; + if (__isset.txnId != rhs.__isset.txnId) + return false; + else if (__isset.txnId && !(txnId == rhs.txnId)) + return false; + if (__isset.validWriteIdList != rhs.__isset.validWriteIdList) + return false; + else if (__isset.validWriteIdList && !(validWriteIdList == rhs.validWriteIdList)) + return false; return true; } bool operator != (const AddPartitionsRequest &rhs) const { @@ -10087,9 +10287,11 @@ inline std::ostream& operator<<(std::ostream& out, const ClientCapabilities& obj } typedef struct _GetTableRequest__isset { - _GetTableRequest__isset() : capabilities(false), catName(false) {} + _GetTableRequest__isset() : capabilities(false), catName(false), txnId(true), validWriteIdList(false) {} bool capabilities :1; bool catName :1; + bool txnId :1; + bool validWriteIdList :1; } _GetTableRequest__isset; class GetTableRequest { @@ -10097,7 +10299,7 @@ class GetTableRequest { GetTableRequest(const GetTableRequest&); GetTableRequest& operator=(const GetTableRequest&); - GetTableRequest() : dbName(), tblName(), catName() { + GetTableRequest() : dbName(), tblName(), catName(), txnId(-1LL), validWriteIdList() { } virtual ~GetTableRequest() throw(); @@ -10105,6 +10307,8 @@ class GetTableRequest { std::string tblName; ClientCapabilities capabilities; std::string catName; + int64_t txnId; + std::string validWriteIdList; _GetTableRequest__isset __isset; @@ -10116,6 +10320,10 @@ class GetTableRequest { void __set_catName(const std::string& val); + void __set_txnId(const int64_t val); + + void __set_validWriteIdList(const std::string& val); + bool operator == (const GetTableRequest & rhs) const { if (!(dbName == rhs.dbName)) @@ -10130,6 +10338,14 @@ class GetTableRequest { return false; else if (__isset.catName && !(catName == rhs.catName)) return false; + if (__isset.txnId != rhs.__isset.txnId) + return false; + else if (__isset.txnId && !(txnId == rhs.txnId)) + return false; + if (__isset.validWriteIdList != rhs.__isset.validWriteIdList) + return false; + else if (__isset.validWriteIdList && !(validWriteIdList == rhs.validWriteIdList)) + return false; return true; } bool operator != (const GetTableRequest &rhs) const { @@ -10152,24 +10368,37 @@ inline std::ostream& operator<<(std::ostream& out, const GetTableRequest& obj) return out; } +typedef struct _GetTableResult__isset { + _GetTableResult__isset() : isStatsCompliant(false) {} + bool isStatsCompliant :1; +} _GetTableResult__isset; class GetTableResult { public: GetTableResult(const GetTableResult&); GetTableResult& operator=(const GetTableResult&); - GetTableResult() { + GetTableResult() : isStatsCompliant(0) { } virtual ~GetTableResult() throw(); Table table; + bool isStatsCompliant; + + _GetTableResult__isset __isset; void __set_table(const Table& val); + void __set_isStatsCompliant(const bool val); + bool operator == (const GetTableResult & rhs) const { if (!(table == rhs.table)) return false; + if (__isset.isStatsCompliant != rhs.__isset.isStatsCompliant) + return false; + else if (__isset.isStatsCompliant && !(isStatsCompliant == rhs.isStatsCompliant)) + return false; return true; } bool operator != (const GetTableResult &rhs) const { @@ -13400,6 +13629,125 @@ inline std::ostream& operator<<(std::ostream& out, const GetRuntimeStatsRequest& return out; } +typedef struct _AlterPartitionsRequest__isset { + _AlterPartitionsRequest__isset() : txnId(true), writeId(true), validWriteIdList(false) {} + bool txnId :1; + bool writeId :1; + bool validWriteIdList :1; +} _AlterPartitionsRequest__isset; + +class AlterPartitionsRequest { + public: + + AlterPartitionsRequest(const AlterPartitionsRequest&); + AlterPartitionsRequest& operator=(const AlterPartitionsRequest&); + AlterPartitionsRequest() : dbName(), tableName(), txnId(-1LL), writeId(-1LL), validWriteIdList() { + } + + virtual ~AlterPartitionsRequest() throw(); + std::string dbName; + std::string tableName; + std::vector partitions; + EnvironmentContext environmentContext; + int64_t txnId; + int64_t writeId; + std::string validWriteIdList; + + _AlterPartitionsRequest__isset __isset; + + void __set_dbName(const std::string& val); + + void __set_tableName(const std::string& val); + + void __set_partitions(const std::vector & val); + + void __set_environmentContext(const EnvironmentContext& val); + + void __set_txnId(const int64_t val); + + void __set_writeId(const int64_t val); + + void __set_validWriteIdList(const std::string& val); + + bool operator == (const AlterPartitionsRequest & rhs) const + { + if (!(dbName == rhs.dbName)) + return false; + if (!(tableName == rhs.tableName)) + return false; + if (!(partitions == rhs.partitions)) + return false; + if (!(environmentContext == rhs.environmentContext)) + return false; + if (__isset.txnId != rhs.__isset.txnId) + return false; + else if (__isset.txnId && !(txnId == rhs.txnId)) + return false; + if (__isset.writeId != rhs.__isset.writeId) + return false; + else if (__isset.writeId && !(writeId == rhs.writeId)) + return false; + if (__isset.validWriteIdList != rhs.__isset.validWriteIdList) + return false; + else if (__isset.validWriteIdList && !(validWriteIdList == rhs.validWriteIdList)) + return false; + return true; + } + bool operator != (const AlterPartitionsRequest &rhs) const { + return !(*this == rhs); + } + + bool operator < (const AlterPartitionsRequest & ) const; + + uint32_t read(::apache::thrift::protocol::TProtocol* iprot); + uint32_t write(::apache::thrift::protocol::TProtocol* oprot) const; + + virtual void printTo(std::ostream& out) const; +}; + +void swap(AlterPartitionsRequest &a, AlterPartitionsRequest &b); + +inline std::ostream& operator<<(std::ostream& out, const AlterPartitionsRequest& obj) +{ + obj.printTo(out); + return out; +} + + +class AlterPartitionsResponse { + public: + + AlterPartitionsResponse(const AlterPartitionsResponse&); + AlterPartitionsResponse& operator=(const AlterPartitionsResponse&); + AlterPartitionsResponse() { + } + + virtual ~AlterPartitionsResponse() throw(); + + bool operator == (const AlterPartitionsResponse & /* rhs */) const + { + return true; + } + bool operator != (const AlterPartitionsResponse &rhs) const { + return !(*this == rhs); + } + + bool operator < (const AlterPartitionsResponse & ) const; + + uint32_t read(::apache::thrift::protocol::TProtocol* iprot); + uint32_t write(::apache::thrift::protocol::TProtocol* oprot) const; + + virtual void printTo(std::ostream& out) const; +}; + +void swap(AlterPartitionsResponse &a, AlterPartitionsResponse &b); + +inline std::ostream& operator<<(std::ostream& out, const AlterPartitionsResponse& obj) +{ + obj.printTo(out); + return out; +} + typedef struct _MetaException__isset { _MetaException__isset() : message(false) {} bool message :1; diff --git standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/AddPartitionsRequest.java standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/AddPartitionsRequest.java index dd3a127013..56e50433a0 100644 --- standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/AddPartitionsRequest.java +++ standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/AddPartitionsRequest.java @@ -44,6 +44,8 @@ private static final org.apache.thrift.protocol.TField IF_NOT_EXISTS_FIELD_DESC = new org.apache.thrift.protocol.TField("ifNotExists", org.apache.thrift.protocol.TType.BOOL, (short)4); private static final org.apache.thrift.protocol.TField NEED_RESULT_FIELD_DESC = new org.apache.thrift.protocol.TField("needResult", org.apache.thrift.protocol.TType.BOOL, (short)5); private static final org.apache.thrift.protocol.TField CAT_NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("catName", org.apache.thrift.protocol.TType.STRING, (short)6); + private static final org.apache.thrift.protocol.TField TXN_ID_FIELD_DESC = new org.apache.thrift.protocol.TField("txnId", org.apache.thrift.protocol.TType.I64, (short)7); + private static final org.apache.thrift.protocol.TField VALID_WRITE_ID_LIST_FIELD_DESC = new org.apache.thrift.protocol.TField("validWriteIdList", org.apache.thrift.protocol.TType.STRING, (short)8); private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); static { @@ -57,6 +59,8 @@ private boolean ifNotExists; // required private boolean needResult; // optional private String catName; // optional + private long txnId; // optional + private String validWriteIdList; // optional /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ public enum _Fields implements org.apache.thrift.TFieldIdEnum { @@ -65,7 +69,9 @@ PARTS((short)3, "parts"), IF_NOT_EXISTS((short)4, "ifNotExists"), NEED_RESULT((short)5, "needResult"), - CAT_NAME((short)6, "catName"); + CAT_NAME((short)6, "catName"), + TXN_ID((short)7, "txnId"), + VALID_WRITE_ID_LIST((short)8, "validWriteIdList"); private static final Map byName = new HashMap(); @@ -92,6 +98,10 @@ public static _Fields findByThriftId(int fieldId) { return NEED_RESULT; case 6: // CAT_NAME return CAT_NAME; + case 7: // TXN_ID + return TXN_ID; + case 8: // VALID_WRITE_ID_LIST + return VALID_WRITE_ID_LIST; default: return null; } @@ -134,8 +144,9 @@ public String getFieldName() { // isset id assignments private static final int __IFNOTEXISTS_ISSET_ID = 0; private static final int __NEEDRESULT_ISSET_ID = 1; + private static final int __TXNID_ISSET_ID = 2; private byte __isset_bitfield = 0; - private static final _Fields optionals[] = {_Fields.NEED_RESULT,_Fields.CAT_NAME}; + private static final _Fields optionals[] = {_Fields.NEED_RESULT,_Fields.CAT_NAME,_Fields.TXN_ID,_Fields.VALID_WRITE_ID_LIST}; public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; static { Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); @@ -152,6 +163,10 @@ public String getFieldName() { new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.BOOL))); tmpMap.put(_Fields.CAT_NAME, new org.apache.thrift.meta_data.FieldMetaData("catName", org.apache.thrift.TFieldRequirementType.OPTIONAL, new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING))); + tmpMap.put(_Fields.TXN_ID, new org.apache.thrift.meta_data.FieldMetaData("txnId", org.apache.thrift.TFieldRequirementType.OPTIONAL, + new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.I64))); + tmpMap.put(_Fields.VALID_WRITE_ID_LIST, new org.apache.thrift.meta_data.FieldMetaData("validWriteIdList", org.apache.thrift.TFieldRequirementType.OPTIONAL, + new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING))); metaDataMap = Collections.unmodifiableMap(tmpMap); org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(AddPartitionsRequest.class, metaDataMap); } @@ -159,6 +174,8 @@ public String getFieldName() { public AddPartitionsRequest() { this.needResult = true; + this.txnId = -1L; + } public AddPartitionsRequest( @@ -198,6 +215,10 @@ public AddPartitionsRequest(AddPartitionsRequest other) { if (other.isSetCatName()) { this.catName = other.catName; } + this.txnId = other.txnId; + if (other.isSetValidWriteIdList()) { + this.validWriteIdList = other.validWriteIdList; + } } public AddPartitionsRequest deepCopy() { @@ -214,6 +235,9 @@ public void clear() { this.needResult = true; this.catName = null; + this.txnId = -1L; + + this.validWriteIdList = null; } public String getDbName() { @@ -367,6 +391,51 @@ public void setCatNameIsSet(boolean value) { } } + public long getTxnId() { + return this.txnId; + } + + public void setTxnId(long txnId) { + this.txnId = txnId; + setTxnIdIsSet(true); + } + + public void unsetTxnId() { + __isset_bitfield = EncodingUtils.clearBit(__isset_bitfield, __TXNID_ISSET_ID); + } + + /** Returns true if field txnId is set (has been assigned a value) and false otherwise */ + public boolean isSetTxnId() { + return EncodingUtils.testBit(__isset_bitfield, __TXNID_ISSET_ID); + } + + public void setTxnIdIsSet(boolean value) { + __isset_bitfield = EncodingUtils.setBit(__isset_bitfield, __TXNID_ISSET_ID, value); + } + + public String getValidWriteIdList() { + return this.validWriteIdList; + } + + public void setValidWriteIdList(String validWriteIdList) { + this.validWriteIdList = validWriteIdList; + } + + public void unsetValidWriteIdList() { + this.validWriteIdList = null; + } + + /** Returns true if field validWriteIdList is set (has been assigned a value) and false otherwise */ + public boolean isSetValidWriteIdList() { + return this.validWriteIdList != null; + } + + public void setValidWriteIdListIsSet(boolean value) { + if (!value) { + this.validWriteIdList = null; + } + } + public void setFieldValue(_Fields field, Object value) { switch (field) { case DB_NAME: @@ -417,6 +486,22 @@ public void setFieldValue(_Fields field, Object value) { } break; + case TXN_ID: + if (value == null) { + unsetTxnId(); + } else { + setTxnId((Long)value); + } + break; + + case VALID_WRITE_ID_LIST: + if (value == null) { + unsetValidWriteIdList(); + } else { + setValidWriteIdList((String)value); + } + break; + } } @@ -440,6 +525,12 @@ public Object getFieldValue(_Fields field) { case CAT_NAME: return getCatName(); + case TXN_ID: + return getTxnId(); + + case VALID_WRITE_ID_LIST: + return getValidWriteIdList(); + } throw new IllegalStateException(); } @@ -463,6 +554,10 @@ public boolean isSet(_Fields field) { return isSetNeedResult(); case CAT_NAME: return isSetCatName(); + case TXN_ID: + return isSetTxnId(); + case VALID_WRITE_ID_LIST: + return isSetValidWriteIdList(); } throw new IllegalStateException(); } @@ -534,6 +629,24 @@ public boolean equals(AddPartitionsRequest that) { return false; } + boolean this_present_txnId = true && this.isSetTxnId(); + boolean that_present_txnId = true && that.isSetTxnId(); + if (this_present_txnId || that_present_txnId) { + if (!(this_present_txnId && that_present_txnId)) + return false; + if (this.txnId != that.txnId) + return false; + } + + boolean this_present_validWriteIdList = true && this.isSetValidWriteIdList(); + boolean that_present_validWriteIdList = true && that.isSetValidWriteIdList(); + if (this_present_validWriteIdList || that_present_validWriteIdList) { + if (!(this_present_validWriteIdList && that_present_validWriteIdList)) + return false; + if (!this.validWriteIdList.equals(that.validWriteIdList)) + return false; + } + return true; } @@ -571,6 +684,16 @@ public int hashCode() { if (present_catName) list.add(catName); + boolean present_txnId = true && (isSetTxnId()); + list.add(present_txnId); + if (present_txnId) + list.add(txnId); + + boolean present_validWriteIdList = true && (isSetValidWriteIdList()); + list.add(present_validWriteIdList); + if (present_validWriteIdList) + list.add(validWriteIdList); + return list.hashCode(); } @@ -642,6 +765,26 @@ public int compareTo(AddPartitionsRequest other) { return lastComparison; } } + lastComparison = Boolean.valueOf(isSetTxnId()).compareTo(other.isSetTxnId()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetTxnId()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.txnId, other.txnId); + if (lastComparison != 0) { + return lastComparison; + } + } + lastComparison = Boolean.valueOf(isSetValidWriteIdList()).compareTo(other.isSetValidWriteIdList()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetValidWriteIdList()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.validWriteIdList, other.validWriteIdList); + if (lastComparison != 0) { + return lastComparison; + } + } return 0; } @@ -705,6 +848,22 @@ public String toString() { } first = false; } + if (isSetTxnId()) { + if (!first) sb.append(", "); + sb.append("txnId:"); + sb.append(this.txnId); + first = false; + } + if (isSetValidWriteIdList()) { + if (!first) sb.append(", "); + sb.append("validWriteIdList:"); + if (this.validWriteIdList == null) { + sb.append("null"); + } else { + sb.append(this.validWriteIdList); + } + first = false; + } sb.append(")"); return sb.toString(); } @@ -825,6 +984,22 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, AddPartitionsReques org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); } break; + case 7: // TXN_ID + if (schemeField.type == org.apache.thrift.protocol.TType.I64) { + struct.txnId = iprot.readI64(); + struct.setTxnIdIsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + break; + case 8: // VALID_WRITE_ID_LIST + if (schemeField.type == org.apache.thrift.protocol.TType.STRING) { + struct.validWriteIdList = iprot.readString(); + struct.setValidWriteIdListIsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + break; default: org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); } @@ -875,6 +1050,18 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, AddPartitionsReque oprot.writeFieldEnd(); } } + if (struct.isSetTxnId()) { + oprot.writeFieldBegin(TXN_ID_FIELD_DESC); + oprot.writeI64(struct.txnId); + oprot.writeFieldEnd(); + } + if (struct.validWriteIdList != null) { + if (struct.isSetValidWriteIdList()) { + oprot.writeFieldBegin(VALID_WRITE_ID_LIST_FIELD_DESC); + oprot.writeString(struct.validWriteIdList); + oprot.writeFieldEnd(); + } + } oprot.writeFieldStop(); oprot.writeStructEnd(); } @@ -909,13 +1096,25 @@ public void write(org.apache.thrift.protocol.TProtocol prot, AddPartitionsReques if (struct.isSetCatName()) { optionals.set(1); } - oprot.writeBitSet(optionals, 2); + if (struct.isSetTxnId()) { + optionals.set(2); + } + if (struct.isSetValidWriteIdList()) { + optionals.set(3); + } + oprot.writeBitSet(optionals, 4); if (struct.isSetNeedResult()) { oprot.writeBool(struct.needResult); } if (struct.isSetCatName()) { oprot.writeString(struct.catName); } + if (struct.isSetTxnId()) { + oprot.writeI64(struct.txnId); + } + if (struct.isSetValidWriteIdList()) { + oprot.writeString(struct.validWriteIdList); + } } @Override @@ -939,7 +1138,7 @@ public void read(org.apache.thrift.protocol.TProtocol prot, AddPartitionsRequest struct.setPartsIsSet(true); struct.ifNotExists = iprot.readBool(); struct.setIfNotExistsIsSet(true); - BitSet incoming = iprot.readBitSet(2); + BitSet incoming = iprot.readBitSet(4); if (incoming.get(0)) { struct.needResult = iprot.readBool(); struct.setNeedResultIsSet(true); @@ -948,6 +1147,14 @@ public void read(org.apache.thrift.protocol.TProtocol prot, AddPartitionsRequest struct.catName = iprot.readString(); struct.setCatNameIsSet(true); } + if (incoming.get(2)) { + struct.txnId = iprot.readI64(); + struct.setTxnIdIsSet(true); + } + if (incoming.get(3)) { + struct.validWriteIdList = iprot.readString(); + struct.setValidWriteIdListIsSet(true); + } } } diff --git standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/AddPartitionsResult.java standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/AddPartitionsResult.java index fe41b8c711..3c0bf82365 100644 --- standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/AddPartitionsResult.java +++ standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/AddPartitionsResult.java @@ -39,6 +39,7 @@ private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("AddPartitionsResult"); private static final org.apache.thrift.protocol.TField PARTITIONS_FIELD_DESC = new org.apache.thrift.protocol.TField("partitions", org.apache.thrift.protocol.TType.LIST, (short)1); + private static final org.apache.thrift.protocol.TField IS_STATS_COMPLIANT_FIELD_DESC = new org.apache.thrift.protocol.TField("isStatsCompliant", org.apache.thrift.protocol.TType.BOOL, (short)2); private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); static { @@ -47,10 +48,12 @@ } private List partitions; // optional + private boolean isStatsCompliant; // optional /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ public enum _Fields implements org.apache.thrift.TFieldIdEnum { - PARTITIONS((short)1, "partitions"); + PARTITIONS((short)1, "partitions"), + IS_STATS_COMPLIANT((short)2, "isStatsCompliant"); private static final Map byName = new HashMap(); @@ -67,6 +70,8 @@ public static _Fields findByThriftId(int fieldId) { switch(fieldId) { case 1: // PARTITIONS return PARTITIONS; + case 2: // IS_STATS_COMPLIANT + return IS_STATS_COMPLIANT; default: return null; } @@ -107,13 +112,17 @@ public String getFieldName() { } // isset id assignments - private static final _Fields optionals[] = {_Fields.PARTITIONS}; + private static final int __ISSTATSCOMPLIANT_ISSET_ID = 0; + private byte __isset_bitfield = 0; + private static final _Fields optionals[] = {_Fields.PARTITIONS,_Fields.IS_STATS_COMPLIANT}; public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; static { Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); tmpMap.put(_Fields.PARTITIONS, new org.apache.thrift.meta_data.FieldMetaData("partitions", org.apache.thrift.TFieldRequirementType.OPTIONAL, new org.apache.thrift.meta_data.ListMetaData(org.apache.thrift.protocol.TType.LIST, new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, Partition.class)))); + tmpMap.put(_Fields.IS_STATS_COMPLIANT, new org.apache.thrift.meta_data.FieldMetaData("isStatsCompliant", org.apache.thrift.TFieldRequirementType.OPTIONAL, + new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.BOOL))); metaDataMap = Collections.unmodifiableMap(tmpMap); org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(AddPartitionsResult.class, metaDataMap); } @@ -125,6 +134,7 @@ public AddPartitionsResult() { * Performs a deep copy on other. */ public AddPartitionsResult(AddPartitionsResult other) { + __isset_bitfield = other.__isset_bitfield; if (other.isSetPartitions()) { List __this__partitions = new ArrayList(other.partitions.size()); for (Partition other_element : other.partitions) { @@ -132,6 +142,7 @@ public AddPartitionsResult(AddPartitionsResult other) { } this.partitions = __this__partitions; } + this.isStatsCompliant = other.isStatsCompliant; } public AddPartitionsResult deepCopy() { @@ -141,6 +152,8 @@ public AddPartitionsResult deepCopy() { @Override public void clear() { this.partitions = null; + setIsStatsCompliantIsSet(false); + this.isStatsCompliant = false; } public int getPartitionsSize() { @@ -181,6 +194,28 @@ public void setPartitionsIsSet(boolean value) { } } + public boolean isIsStatsCompliant() { + return this.isStatsCompliant; + } + + public void setIsStatsCompliant(boolean isStatsCompliant) { + this.isStatsCompliant = isStatsCompliant; + setIsStatsCompliantIsSet(true); + } + + public void unsetIsStatsCompliant() { + __isset_bitfield = EncodingUtils.clearBit(__isset_bitfield, __ISSTATSCOMPLIANT_ISSET_ID); + } + + /** Returns true if field isStatsCompliant is set (has been assigned a value) and false otherwise */ + public boolean isSetIsStatsCompliant() { + return EncodingUtils.testBit(__isset_bitfield, __ISSTATSCOMPLIANT_ISSET_ID); + } + + public void setIsStatsCompliantIsSet(boolean value) { + __isset_bitfield = EncodingUtils.setBit(__isset_bitfield, __ISSTATSCOMPLIANT_ISSET_ID, value); + } + public void setFieldValue(_Fields field, Object value) { switch (field) { case PARTITIONS: @@ -191,6 +226,14 @@ public void setFieldValue(_Fields field, Object value) { } break; + case IS_STATS_COMPLIANT: + if (value == null) { + unsetIsStatsCompliant(); + } else { + setIsStatsCompliant((Boolean)value); + } + break; + } } @@ -199,6 +242,9 @@ public Object getFieldValue(_Fields field) { case PARTITIONS: return getPartitions(); + case IS_STATS_COMPLIANT: + return isIsStatsCompliant(); + } throw new IllegalStateException(); } @@ -212,6 +258,8 @@ public boolean isSet(_Fields field) { switch (field) { case PARTITIONS: return isSetPartitions(); + case IS_STATS_COMPLIANT: + return isSetIsStatsCompliant(); } throw new IllegalStateException(); } @@ -238,6 +286,15 @@ public boolean equals(AddPartitionsResult that) { return false; } + boolean this_present_isStatsCompliant = true && this.isSetIsStatsCompliant(); + boolean that_present_isStatsCompliant = true && that.isSetIsStatsCompliant(); + if (this_present_isStatsCompliant || that_present_isStatsCompliant) { + if (!(this_present_isStatsCompliant && that_present_isStatsCompliant)) + return false; + if (this.isStatsCompliant != that.isStatsCompliant) + return false; + } + return true; } @@ -250,6 +307,11 @@ public int hashCode() { if (present_partitions) list.add(partitions); + boolean present_isStatsCompliant = true && (isSetIsStatsCompliant()); + list.add(present_isStatsCompliant); + if (present_isStatsCompliant) + list.add(isStatsCompliant); + return list.hashCode(); } @@ -271,6 +333,16 @@ public int compareTo(AddPartitionsResult other) { return lastComparison; } } + lastComparison = Boolean.valueOf(isSetIsStatsCompliant()).compareTo(other.isSetIsStatsCompliant()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetIsStatsCompliant()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.isStatsCompliant, other.isStatsCompliant); + if (lastComparison != 0) { + return lastComparison; + } + } return 0; } @@ -300,6 +372,12 @@ public String toString() { } first = false; } + if (isSetIsStatsCompliant()) { + if (!first) sb.append(", "); + sb.append("isStatsCompliant:"); + sb.append(this.isStatsCompliant); + first = false; + } sb.append(")"); return sb.toString(); } @@ -319,6 +397,8 @@ private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOExcept private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException { try { + // it doesn't seem like you should have to do this, but java serialization is wacky, and doesn't call the default constructor. + __isset_bitfield = 0; read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in))); } catch (org.apache.thrift.TException te) { throw new java.io.IOException(te); @@ -362,6 +442,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, AddPartitionsResult org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); } break; + case 2: // IS_STATS_COMPLIANT + if (schemeField.type == org.apache.thrift.protocol.TType.BOOL) { + struct.isStatsCompliant = iprot.readBool(); + struct.setIsStatsCompliantIsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + break; default: org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); } @@ -389,6 +477,11 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, AddPartitionsResul oprot.writeFieldEnd(); } } + if (struct.isSetIsStatsCompliant()) { + oprot.writeFieldBegin(IS_STATS_COMPLIANT_FIELD_DESC); + oprot.writeBool(struct.isStatsCompliant); + oprot.writeFieldEnd(); + } oprot.writeFieldStop(); oprot.writeStructEnd(); } @@ -410,7 +503,10 @@ public void write(org.apache.thrift.protocol.TProtocol prot, AddPartitionsResult if (struct.isSetPartitions()) { optionals.set(0); } - oprot.writeBitSet(optionals, 1); + if (struct.isSetIsStatsCompliant()) { + optionals.set(1); + } + oprot.writeBitSet(optionals, 2); if (struct.isSetPartitions()) { { oprot.writeI32(struct.partitions.size()); @@ -420,12 +516,15 @@ public void write(org.apache.thrift.protocol.TProtocol prot, AddPartitionsResult } } } + if (struct.isSetIsStatsCompliant()) { + oprot.writeBool(struct.isStatsCompliant); + } } @Override public void read(org.apache.thrift.protocol.TProtocol prot, AddPartitionsResult struct) throws org.apache.thrift.TException { TTupleProtocol iprot = (TTupleProtocol) prot; - BitSet incoming = iprot.readBitSet(1); + BitSet incoming = iprot.readBitSet(2); if (incoming.get(0)) { { org.apache.thrift.protocol.TList _list479 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); @@ -440,6 +539,10 @@ public void read(org.apache.thrift.protocol.TProtocol prot, AddPartitionsResult } struct.setPartitionsIsSet(true); } + if (incoming.get(1)) { + struct.isStatsCompliant = iprot.readBool(); + struct.setIsStatsCompliantIsSet(true); + } } } diff --git standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/AggrStats.java standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/AggrStats.java index fff212dfd4..87b8fea38c 100644 --- standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/AggrStats.java +++ standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/AggrStats.java @@ -40,6 +40,7 @@ private static final org.apache.thrift.protocol.TField COL_STATS_FIELD_DESC = new org.apache.thrift.protocol.TField("colStats", org.apache.thrift.protocol.TType.LIST, (short)1); private static final org.apache.thrift.protocol.TField PARTS_FOUND_FIELD_DESC = new org.apache.thrift.protocol.TField("partsFound", org.apache.thrift.protocol.TType.I64, (short)2); + private static final org.apache.thrift.protocol.TField IS_STATS_COMPLIANT_FIELD_DESC = new org.apache.thrift.protocol.TField("isStatsCompliant", org.apache.thrift.protocol.TType.BOOL, (short)3); private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); static { @@ -49,11 +50,13 @@ private List colStats; // required private long partsFound; // required + private boolean isStatsCompliant; // optional /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ public enum _Fields implements org.apache.thrift.TFieldIdEnum { COL_STATS((short)1, "colStats"), - PARTS_FOUND((short)2, "partsFound"); + PARTS_FOUND((short)2, "partsFound"), + IS_STATS_COMPLIANT((short)3, "isStatsCompliant"); private static final Map byName = new HashMap(); @@ -72,6 +75,8 @@ public static _Fields findByThriftId(int fieldId) { return COL_STATS; case 2: // PARTS_FOUND return PARTS_FOUND; + case 3: // IS_STATS_COMPLIANT + return IS_STATS_COMPLIANT; default: return null; } @@ -113,7 +118,9 @@ public String getFieldName() { // isset id assignments private static final int __PARTSFOUND_ISSET_ID = 0; + private static final int __ISSTATSCOMPLIANT_ISSET_ID = 1; private byte __isset_bitfield = 0; + private static final _Fields optionals[] = {_Fields.IS_STATS_COMPLIANT}; public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; static { Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); @@ -122,6 +129,8 @@ public String getFieldName() { new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, ColumnStatisticsObj.class)))); tmpMap.put(_Fields.PARTS_FOUND, new org.apache.thrift.meta_data.FieldMetaData("partsFound", org.apache.thrift.TFieldRequirementType.REQUIRED, new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.I64))); + tmpMap.put(_Fields.IS_STATS_COMPLIANT, new org.apache.thrift.meta_data.FieldMetaData("isStatsCompliant", org.apache.thrift.TFieldRequirementType.OPTIONAL, + new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.BOOL))); metaDataMap = Collections.unmodifiableMap(tmpMap); org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(AggrStats.class, metaDataMap); } @@ -152,6 +161,7 @@ public AggrStats(AggrStats other) { this.colStats = __this__colStats; } this.partsFound = other.partsFound; + this.isStatsCompliant = other.isStatsCompliant; } public AggrStats deepCopy() { @@ -163,6 +173,8 @@ public void clear() { this.colStats = null; setPartsFoundIsSet(false); this.partsFound = 0; + setIsStatsCompliantIsSet(false); + this.isStatsCompliant = false; } public int getColStatsSize() { @@ -225,6 +237,28 @@ public void setPartsFoundIsSet(boolean value) { __isset_bitfield = EncodingUtils.setBit(__isset_bitfield, __PARTSFOUND_ISSET_ID, value); } + public boolean isIsStatsCompliant() { + return this.isStatsCompliant; + } + + public void setIsStatsCompliant(boolean isStatsCompliant) { + this.isStatsCompliant = isStatsCompliant; + setIsStatsCompliantIsSet(true); + } + + public void unsetIsStatsCompliant() { + __isset_bitfield = EncodingUtils.clearBit(__isset_bitfield, __ISSTATSCOMPLIANT_ISSET_ID); + } + + /** Returns true if field isStatsCompliant is set (has been assigned a value) and false otherwise */ + public boolean isSetIsStatsCompliant() { + return EncodingUtils.testBit(__isset_bitfield, __ISSTATSCOMPLIANT_ISSET_ID); + } + + public void setIsStatsCompliantIsSet(boolean value) { + __isset_bitfield = EncodingUtils.setBit(__isset_bitfield, __ISSTATSCOMPLIANT_ISSET_ID, value); + } + public void setFieldValue(_Fields field, Object value) { switch (field) { case COL_STATS: @@ -243,6 +277,14 @@ public void setFieldValue(_Fields field, Object value) { } break; + case IS_STATS_COMPLIANT: + if (value == null) { + unsetIsStatsCompliant(); + } else { + setIsStatsCompliant((Boolean)value); + } + break; + } } @@ -254,6 +296,9 @@ public Object getFieldValue(_Fields field) { case PARTS_FOUND: return getPartsFound(); + case IS_STATS_COMPLIANT: + return isIsStatsCompliant(); + } throw new IllegalStateException(); } @@ -269,6 +314,8 @@ public boolean isSet(_Fields field) { return isSetColStats(); case PARTS_FOUND: return isSetPartsFound(); + case IS_STATS_COMPLIANT: + return isSetIsStatsCompliant(); } throw new IllegalStateException(); } @@ -304,6 +351,15 @@ public boolean equals(AggrStats that) { return false; } + boolean this_present_isStatsCompliant = true && this.isSetIsStatsCompliant(); + boolean that_present_isStatsCompliant = true && that.isSetIsStatsCompliant(); + if (this_present_isStatsCompliant || that_present_isStatsCompliant) { + if (!(this_present_isStatsCompliant && that_present_isStatsCompliant)) + return false; + if (this.isStatsCompliant != that.isStatsCompliant) + return false; + } + return true; } @@ -321,6 +377,11 @@ public int hashCode() { if (present_partsFound) list.add(partsFound); + boolean present_isStatsCompliant = true && (isSetIsStatsCompliant()); + list.add(present_isStatsCompliant); + if (present_isStatsCompliant) + list.add(isStatsCompliant); + return list.hashCode(); } @@ -352,6 +413,16 @@ public int compareTo(AggrStats other) { return lastComparison; } } + lastComparison = Boolean.valueOf(isSetIsStatsCompliant()).compareTo(other.isSetIsStatsCompliant()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetIsStatsCompliant()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.isStatsCompliant, other.isStatsCompliant); + if (lastComparison != 0) { + return lastComparison; + } + } return 0; } @@ -383,6 +454,12 @@ public String toString() { sb.append("partsFound:"); sb.append(this.partsFound); first = false; + if (isSetIsStatsCompliant()) { + if (!first) sb.append(", "); + sb.append("isStatsCompliant:"); + sb.append(this.isStatsCompliant); + first = false; + } sb.append(")"); return sb.toString(); } @@ -463,6 +540,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, AggrStats struct) t org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); } break; + case 3: // IS_STATS_COMPLIANT + if (schemeField.type == org.apache.thrift.protocol.TType.BOOL) { + struct.isStatsCompliant = iprot.readBool(); + struct.setIsStatsCompliantIsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + break; default: org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); } @@ -491,6 +576,11 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, AggrStats struct) oprot.writeFieldBegin(PARTS_FOUND_FIELD_DESC); oprot.writeI64(struct.partsFound); oprot.writeFieldEnd(); + if (struct.isSetIsStatsCompliant()) { + oprot.writeFieldBegin(IS_STATS_COMPLIANT_FIELD_DESC); + oprot.writeBool(struct.isStatsCompliant); + oprot.writeFieldEnd(); + } oprot.writeFieldStop(); oprot.writeStructEnd(); } @@ -516,6 +606,14 @@ public void write(org.apache.thrift.protocol.TProtocol prot, AggrStats struct) t } } oprot.writeI64(struct.partsFound); + BitSet optionals = new BitSet(); + if (struct.isSetIsStatsCompliant()) { + optionals.set(0); + } + oprot.writeBitSet(optionals, 1); + if (struct.isSetIsStatsCompliant()) { + oprot.writeBool(struct.isStatsCompliant); + } } @Override @@ -535,6 +633,11 @@ public void read(org.apache.thrift.protocol.TProtocol prot, AggrStats struct) th struct.setColStatsIsSet(true); struct.partsFound = iprot.readI64(); struct.setPartsFoundIsSet(true); + BitSet incoming = iprot.readBitSet(1); + if (incoming.get(0)) { + struct.isStatsCompliant = iprot.readBool(); + struct.setIsStatsCompliantIsSet(true); + } } } diff --git standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/AlterPartitionsRequest.java standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/AlterPartitionsRequest.java new file mode 100644 index 0000000000..cc6ecdf790 --- /dev/null +++ standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/AlterPartitionsRequest.java @@ -0,0 +1,1067 @@ +/** + * Autogenerated by Thrift Compiler (0.9.3) + * + * DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING + * @generated + */ +package org.apache.hadoop.hive.metastore.api; + +import org.apache.thrift.scheme.IScheme; +import org.apache.thrift.scheme.SchemeFactory; +import org.apache.thrift.scheme.StandardScheme; + +import org.apache.thrift.scheme.TupleScheme; +import org.apache.thrift.protocol.TTupleProtocol; +import org.apache.thrift.protocol.TProtocolException; +import org.apache.thrift.EncodingUtils; +import org.apache.thrift.TException; +import org.apache.thrift.async.AsyncMethodCallback; +import org.apache.thrift.server.AbstractNonblockingServer.*; +import java.util.List; +import java.util.ArrayList; +import java.util.Map; +import java.util.HashMap; +import java.util.EnumMap; +import java.util.Set; +import java.util.HashSet; +import java.util.EnumSet; +import java.util.Collections; +import java.util.BitSet; +import java.nio.ByteBuffer; +import java.util.Arrays; +import javax.annotation.Generated; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +@SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"}) +@Generated(value = "Autogenerated by Thrift Compiler (0.9.3)") +@org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public class AlterPartitionsRequest implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { + private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("AlterPartitionsRequest"); + + private static final org.apache.thrift.protocol.TField DB_NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("dbName", org.apache.thrift.protocol.TType.STRING, (short)1); + private static final org.apache.thrift.protocol.TField TABLE_NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("tableName", org.apache.thrift.protocol.TType.STRING, (short)2); + private static final org.apache.thrift.protocol.TField PARTITIONS_FIELD_DESC = new org.apache.thrift.protocol.TField("partitions", org.apache.thrift.protocol.TType.LIST, (short)3); + private static final org.apache.thrift.protocol.TField ENVIRONMENT_CONTEXT_FIELD_DESC = new org.apache.thrift.protocol.TField("environmentContext", org.apache.thrift.protocol.TType.STRUCT, (short)4); + private static final org.apache.thrift.protocol.TField TXN_ID_FIELD_DESC = new org.apache.thrift.protocol.TField("txnId", org.apache.thrift.protocol.TType.I64, (short)5); + private static final org.apache.thrift.protocol.TField WRITE_ID_FIELD_DESC = new org.apache.thrift.protocol.TField("writeId", org.apache.thrift.protocol.TType.I64, (short)6); + private static final org.apache.thrift.protocol.TField VALID_WRITE_ID_LIST_FIELD_DESC = new org.apache.thrift.protocol.TField("validWriteIdList", org.apache.thrift.protocol.TType.STRING, (short)7); + + private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); + static { + schemes.put(StandardScheme.class, new AlterPartitionsRequestStandardSchemeFactory()); + schemes.put(TupleScheme.class, new AlterPartitionsRequestTupleSchemeFactory()); + } + + private String dbName; // required + private String tableName; // required + private List partitions; // required + private EnvironmentContext environmentContext; // required + private long txnId; // optional + private long writeId; // optional + private String validWriteIdList; // optional + + /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ + public enum _Fields implements org.apache.thrift.TFieldIdEnum { + DB_NAME((short)1, "dbName"), + TABLE_NAME((short)2, "tableName"), + PARTITIONS((short)3, "partitions"), + ENVIRONMENT_CONTEXT((short)4, "environmentContext"), + TXN_ID((short)5, "txnId"), + WRITE_ID((short)6, "writeId"), + VALID_WRITE_ID_LIST((short)7, "validWriteIdList"); + + private static final Map byName = new HashMap(); + + static { + for (_Fields field : EnumSet.allOf(_Fields.class)) { + byName.put(field.getFieldName(), field); + } + } + + /** + * Find the _Fields constant that matches fieldId, or null if its not found. + */ + public static _Fields findByThriftId(int fieldId) { + switch(fieldId) { + case 1: // DB_NAME + return DB_NAME; + case 2: // TABLE_NAME + return TABLE_NAME; + case 3: // PARTITIONS + return PARTITIONS; + case 4: // ENVIRONMENT_CONTEXT + return ENVIRONMENT_CONTEXT; + case 5: // TXN_ID + return TXN_ID; + case 6: // WRITE_ID + return WRITE_ID; + case 7: // VALID_WRITE_ID_LIST + return VALID_WRITE_ID_LIST; + default: + return null; + } + } + + /** + * Find the _Fields constant that matches fieldId, throwing an exception + * if it is not found. + */ + public static _Fields findByThriftIdOrThrow(int fieldId) { + _Fields fields = findByThriftId(fieldId); + if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!"); + return fields; + } + + /** + * Find the _Fields constant that matches name, or null if its not found. + */ + public static _Fields findByName(String name) { + return byName.get(name); + } + + private final short _thriftId; + private final String _fieldName; + + _Fields(short thriftId, String fieldName) { + _thriftId = thriftId; + _fieldName = fieldName; + } + + public short getThriftFieldId() { + return _thriftId; + } + + public String getFieldName() { + return _fieldName; + } + } + + // isset id assignments + private static final int __TXNID_ISSET_ID = 0; + private static final int __WRITEID_ISSET_ID = 1; + private byte __isset_bitfield = 0; + private static final _Fields optionals[] = {_Fields.TXN_ID,_Fields.WRITE_ID,_Fields.VALID_WRITE_ID_LIST}; + public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; + static { + Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); + tmpMap.put(_Fields.DB_NAME, new org.apache.thrift.meta_data.FieldMetaData("dbName", org.apache.thrift.TFieldRequirementType.REQUIRED, + new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING))); + tmpMap.put(_Fields.TABLE_NAME, new org.apache.thrift.meta_data.FieldMetaData("tableName", org.apache.thrift.TFieldRequirementType.REQUIRED, + new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING))); + tmpMap.put(_Fields.PARTITIONS, new org.apache.thrift.meta_data.FieldMetaData("partitions", org.apache.thrift.TFieldRequirementType.REQUIRED, + new org.apache.thrift.meta_data.ListMetaData(org.apache.thrift.protocol.TType.LIST, + new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, Partition.class)))); + tmpMap.put(_Fields.ENVIRONMENT_CONTEXT, new org.apache.thrift.meta_data.FieldMetaData("environmentContext", org.apache.thrift.TFieldRequirementType.REQUIRED, + new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, EnvironmentContext.class))); + tmpMap.put(_Fields.TXN_ID, new org.apache.thrift.meta_data.FieldMetaData("txnId", org.apache.thrift.TFieldRequirementType.OPTIONAL, + new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.I64))); + tmpMap.put(_Fields.WRITE_ID, new org.apache.thrift.meta_data.FieldMetaData("writeId", org.apache.thrift.TFieldRequirementType.OPTIONAL, + new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.I64))); + tmpMap.put(_Fields.VALID_WRITE_ID_LIST, new org.apache.thrift.meta_data.FieldMetaData("validWriteIdList", org.apache.thrift.TFieldRequirementType.OPTIONAL, + new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING))); + metaDataMap = Collections.unmodifiableMap(tmpMap); + org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(AlterPartitionsRequest.class, metaDataMap); + } + + public AlterPartitionsRequest() { + this.txnId = -1L; + + this.writeId = -1L; + + } + + public AlterPartitionsRequest( + String dbName, + String tableName, + List partitions, + EnvironmentContext environmentContext) + { + this(); + this.dbName = dbName; + this.tableName = tableName; + this.partitions = partitions; + this.environmentContext = environmentContext; + } + + /** + * Performs a deep copy on other. + */ + public AlterPartitionsRequest(AlterPartitionsRequest other) { + __isset_bitfield = other.__isset_bitfield; + if (other.isSetDbName()) { + this.dbName = other.dbName; + } + if (other.isSetTableName()) { + this.tableName = other.tableName; + } + if (other.isSetPartitions()) { + List __this__partitions = new ArrayList(other.partitions.size()); + for (Partition other_element : other.partitions) { + __this__partitions.add(new Partition(other_element)); + } + this.partitions = __this__partitions; + } + if (other.isSetEnvironmentContext()) { + this.environmentContext = new EnvironmentContext(other.environmentContext); + } + this.txnId = other.txnId; + this.writeId = other.writeId; + if (other.isSetValidWriteIdList()) { + this.validWriteIdList = other.validWriteIdList; + } + } + + public AlterPartitionsRequest deepCopy() { + return new AlterPartitionsRequest(this); + } + + @Override + public void clear() { + this.dbName = null; + this.tableName = null; + this.partitions = null; + this.environmentContext = null; + this.txnId = -1L; + + this.writeId = -1L; + + this.validWriteIdList = null; + } + + public String getDbName() { + return this.dbName; + } + + public void setDbName(String dbName) { + this.dbName = dbName; + } + + public void unsetDbName() { + this.dbName = null; + } + + /** Returns true if field dbName is set (has been assigned a value) and false otherwise */ + public boolean isSetDbName() { + return this.dbName != null; + } + + public void setDbNameIsSet(boolean value) { + if (!value) { + this.dbName = null; + } + } + + public String getTableName() { + return this.tableName; + } + + public void setTableName(String tableName) { + this.tableName = tableName; + } + + public void unsetTableName() { + this.tableName = null; + } + + /** Returns true if field tableName is set (has been assigned a value) and false otherwise */ + public boolean isSetTableName() { + return this.tableName != null; + } + + public void setTableNameIsSet(boolean value) { + if (!value) { + this.tableName = null; + } + } + + public int getPartitionsSize() { + return (this.partitions == null) ? 0 : this.partitions.size(); + } + + public java.util.Iterator getPartitionsIterator() { + return (this.partitions == null) ? null : this.partitions.iterator(); + } + + public void addToPartitions(Partition elem) { + if (this.partitions == null) { + this.partitions = new ArrayList(); + } + this.partitions.add(elem); + } + + public List getPartitions() { + return this.partitions; + } + + public void setPartitions(List partitions) { + this.partitions = partitions; + } + + public void unsetPartitions() { + this.partitions = null; + } + + /** Returns true if field partitions is set (has been assigned a value) and false otherwise */ + public boolean isSetPartitions() { + return this.partitions != null; + } + + public void setPartitionsIsSet(boolean value) { + if (!value) { + this.partitions = null; + } + } + + public EnvironmentContext getEnvironmentContext() { + return this.environmentContext; + } + + public void setEnvironmentContext(EnvironmentContext environmentContext) { + this.environmentContext = environmentContext; + } + + public void unsetEnvironmentContext() { + this.environmentContext = null; + } + + /** Returns true if field environmentContext is set (has been assigned a value) and false otherwise */ + public boolean isSetEnvironmentContext() { + return this.environmentContext != null; + } + + public void setEnvironmentContextIsSet(boolean value) { + if (!value) { + this.environmentContext = null; + } + } + + public long getTxnId() { + return this.txnId; + } + + public void setTxnId(long txnId) { + this.txnId = txnId; + setTxnIdIsSet(true); + } + + public void unsetTxnId() { + __isset_bitfield = EncodingUtils.clearBit(__isset_bitfield, __TXNID_ISSET_ID); + } + + /** Returns true if field txnId is set (has been assigned a value) and false otherwise */ + public boolean isSetTxnId() { + return EncodingUtils.testBit(__isset_bitfield, __TXNID_ISSET_ID); + } + + public void setTxnIdIsSet(boolean value) { + __isset_bitfield = EncodingUtils.setBit(__isset_bitfield, __TXNID_ISSET_ID, value); + } + + public long getWriteId() { + return this.writeId; + } + + public void setWriteId(long writeId) { + this.writeId = writeId; + setWriteIdIsSet(true); + } + + public void unsetWriteId() { + __isset_bitfield = EncodingUtils.clearBit(__isset_bitfield, __WRITEID_ISSET_ID); + } + + /** Returns true if field writeId is set (has been assigned a value) and false otherwise */ + public boolean isSetWriteId() { + return EncodingUtils.testBit(__isset_bitfield, __WRITEID_ISSET_ID); + } + + public void setWriteIdIsSet(boolean value) { + __isset_bitfield = EncodingUtils.setBit(__isset_bitfield, __WRITEID_ISSET_ID, value); + } + + public String getValidWriteIdList() { + return this.validWriteIdList; + } + + public void setValidWriteIdList(String validWriteIdList) { + this.validWriteIdList = validWriteIdList; + } + + public void unsetValidWriteIdList() { + this.validWriteIdList = null; + } + + /** Returns true if field validWriteIdList is set (has been assigned a value) and false otherwise */ + public boolean isSetValidWriteIdList() { + return this.validWriteIdList != null; + } + + public void setValidWriteIdListIsSet(boolean value) { + if (!value) { + this.validWriteIdList = null; + } + } + + public void setFieldValue(_Fields field, Object value) { + switch (field) { + case DB_NAME: + if (value == null) { + unsetDbName(); + } else { + setDbName((String)value); + } + break; + + case TABLE_NAME: + if (value == null) { + unsetTableName(); + } else { + setTableName((String)value); + } + break; + + case PARTITIONS: + if (value == null) { + unsetPartitions(); + } else { + setPartitions((List)value); + } + break; + + case ENVIRONMENT_CONTEXT: + if (value == null) { + unsetEnvironmentContext(); + } else { + setEnvironmentContext((EnvironmentContext)value); + } + break; + + case TXN_ID: + if (value == null) { + unsetTxnId(); + } else { + setTxnId((Long)value); + } + break; + + case WRITE_ID: + if (value == null) { + unsetWriteId(); + } else { + setWriteId((Long)value); + } + break; + + case VALID_WRITE_ID_LIST: + if (value == null) { + unsetValidWriteIdList(); + } else { + setValidWriteIdList((String)value); + } + break; + + } + } + + public Object getFieldValue(_Fields field) { + switch (field) { + case DB_NAME: + return getDbName(); + + case TABLE_NAME: + return getTableName(); + + case PARTITIONS: + return getPartitions(); + + case ENVIRONMENT_CONTEXT: + return getEnvironmentContext(); + + case TXN_ID: + return getTxnId(); + + case WRITE_ID: + return getWriteId(); + + case VALID_WRITE_ID_LIST: + return getValidWriteIdList(); + + } + throw new IllegalStateException(); + } + + /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */ + public boolean isSet(_Fields field) { + if (field == null) { + throw new IllegalArgumentException(); + } + + switch (field) { + case DB_NAME: + return isSetDbName(); + case TABLE_NAME: + return isSetTableName(); + case PARTITIONS: + return isSetPartitions(); + case ENVIRONMENT_CONTEXT: + return isSetEnvironmentContext(); + case TXN_ID: + return isSetTxnId(); + case WRITE_ID: + return isSetWriteId(); + case VALID_WRITE_ID_LIST: + return isSetValidWriteIdList(); + } + throw new IllegalStateException(); + } + + @Override + public boolean equals(Object that) { + if (that == null) + return false; + if (that instanceof AlterPartitionsRequest) + return this.equals((AlterPartitionsRequest)that); + return false; + } + + public boolean equals(AlterPartitionsRequest that) { + if (that == null) + return false; + + boolean this_present_dbName = true && this.isSetDbName(); + boolean that_present_dbName = true && that.isSetDbName(); + if (this_present_dbName || that_present_dbName) { + if (!(this_present_dbName && that_present_dbName)) + return false; + if (!this.dbName.equals(that.dbName)) + return false; + } + + boolean this_present_tableName = true && this.isSetTableName(); + boolean that_present_tableName = true && that.isSetTableName(); + if (this_present_tableName || that_present_tableName) { + if (!(this_present_tableName && that_present_tableName)) + return false; + if (!this.tableName.equals(that.tableName)) + return false; + } + + boolean this_present_partitions = true && this.isSetPartitions(); + boolean that_present_partitions = true && that.isSetPartitions(); + if (this_present_partitions || that_present_partitions) { + if (!(this_present_partitions && that_present_partitions)) + return false; + if (!this.partitions.equals(that.partitions)) + return false; + } + + boolean this_present_environmentContext = true && this.isSetEnvironmentContext(); + boolean that_present_environmentContext = true && that.isSetEnvironmentContext(); + if (this_present_environmentContext || that_present_environmentContext) { + if (!(this_present_environmentContext && that_present_environmentContext)) + return false; + if (!this.environmentContext.equals(that.environmentContext)) + return false; + } + + boolean this_present_txnId = true && this.isSetTxnId(); + boolean that_present_txnId = true && that.isSetTxnId(); + if (this_present_txnId || that_present_txnId) { + if (!(this_present_txnId && that_present_txnId)) + return false; + if (this.txnId != that.txnId) + return false; + } + + boolean this_present_writeId = true && this.isSetWriteId(); + boolean that_present_writeId = true && that.isSetWriteId(); + if (this_present_writeId || that_present_writeId) { + if (!(this_present_writeId && that_present_writeId)) + return false; + if (this.writeId != that.writeId) + return false; + } + + boolean this_present_validWriteIdList = true && this.isSetValidWriteIdList(); + boolean that_present_validWriteIdList = true && that.isSetValidWriteIdList(); + if (this_present_validWriteIdList || that_present_validWriteIdList) { + if (!(this_present_validWriteIdList && that_present_validWriteIdList)) + return false; + if (!this.validWriteIdList.equals(that.validWriteIdList)) + return false; + } + + return true; + } + + @Override + public int hashCode() { + List list = new ArrayList(); + + boolean present_dbName = true && (isSetDbName()); + list.add(present_dbName); + if (present_dbName) + list.add(dbName); + + boolean present_tableName = true && (isSetTableName()); + list.add(present_tableName); + if (present_tableName) + list.add(tableName); + + boolean present_partitions = true && (isSetPartitions()); + list.add(present_partitions); + if (present_partitions) + list.add(partitions); + + boolean present_environmentContext = true && (isSetEnvironmentContext()); + list.add(present_environmentContext); + if (present_environmentContext) + list.add(environmentContext); + + boolean present_txnId = true && (isSetTxnId()); + list.add(present_txnId); + if (present_txnId) + list.add(txnId); + + boolean present_writeId = true && (isSetWriteId()); + list.add(present_writeId); + if (present_writeId) + list.add(writeId); + + boolean present_validWriteIdList = true && (isSetValidWriteIdList()); + list.add(present_validWriteIdList); + if (present_validWriteIdList) + list.add(validWriteIdList); + + return list.hashCode(); + } + + @Override + public int compareTo(AlterPartitionsRequest other) { + if (!getClass().equals(other.getClass())) { + return getClass().getName().compareTo(other.getClass().getName()); + } + + int lastComparison = 0; + + lastComparison = Boolean.valueOf(isSetDbName()).compareTo(other.isSetDbName()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetDbName()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.dbName, other.dbName); + if (lastComparison != 0) { + return lastComparison; + } + } + lastComparison = Boolean.valueOf(isSetTableName()).compareTo(other.isSetTableName()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetTableName()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.tableName, other.tableName); + if (lastComparison != 0) { + return lastComparison; + } + } + lastComparison = Boolean.valueOf(isSetPartitions()).compareTo(other.isSetPartitions()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetPartitions()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.partitions, other.partitions); + if (lastComparison != 0) { + return lastComparison; + } + } + lastComparison = Boolean.valueOf(isSetEnvironmentContext()).compareTo(other.isSetEnvironmentContext()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetEnvironmentContext()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.environmentContext, other.environmentContext); + if (lastComparison != 0) { + return lastComparison; + } + } + lastComparison = Boolean.valueOf(isSetTxnId()).compareTo(other.isSetTxnId()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetTxnId()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.txnId, other.txnId); + if (lastComparison != 0) { + return lastComparison; + } + } + lastComparison = Boolean.valueOf(isSetWriteId()).compareTo(other.isSetWriteId()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetWriteId()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.writeId, other.writeId); + if (lastComparison != 0) { + return lastComparison; + } + } + lastComparison = Boolean.valueOf(isSetValidWriteIdList()).compareTo(other.isSetValidWriteIdList()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetValidWriteIdList()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.validWriteIdList, other.validWriteIdList); + if (lastComparison != 0) { + return lastComparison; + } + } + return 0; + } + + public _Fields fieldForId(int fieldId) { + return _Fields.findByThriftId(fieldId); + } + + public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException { + schemes.get(iprot.getScheme()).getScheme().read(iprot, this); + } + + public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException { + schemes.get(oprot.getScheme()).getScheme().write(oprot, this); + } + + @Override + public String toString() { + StringBuilder sb = new StringBuilder("AlterPartitionsRequest("); + boolean first = true; + + sb.append("dbName:"); + if (this.dbName == null) { + sb.append("null"); + } else { + sb.append(this.dbName); + } + first = false; + if (!first) sb.append(", "); + sb.append("tableName:"); + if (this.tableName == null) { + sb.append("null"); + } else { + sb.append(this.tableName); + } + first = false; + if (!first) sb.append(", "); + sb.append("partitions:"); + if (this.partitions == null) { + sb.append("null"); + } else { + sb.append(this.partitions); + } + first = false; + if (!first) sb.append(", "); + sb.append("environmentContext:"); + if (this.environmentContext == null) { + sb.append("null"); + } else { + sb.append(this.environmentContext); + } + first = false; + if (isSetTxnId()) { + if (!first) sb.append(", "); + sb.append("txnId:"); + sb.append(this.txnId); + first = false; + } + if (isSetWriteId()) { + if (!first) sb.append(", "); + sb.append("writeId:"); + sb.append(this.writeId); + first = false; + } + if (isSetValidWriteIdList()) { + if (!first) sb.append(", "); + sb.append("validWriteIdList:"); + if (this.validWriteIdList == null) { + sb.append("null"); + } else { + sb.append(this.validWriteIdList); + } + first = false; + } + sb.append(")"); + return sb.toString(); + } + + public void validate() throws org.apache.thrift.TException { + // check for required fields + if (!isSetDbName()) { + throw new org.apache.thrift.protocol.TProtocolException("Required field 'dbName' is unset! Struct:" + toString()); + } + + if (!isSetTableName()) { + throw new org.apache.thrift.protocol.TProtocolException("Required field 'tableName' is unset! Struct:" + toString()); + } + + if (!isSetPartitions()) { + throw new org.apache.thrift.protocol.TProtocolException("Required field 'partitions' is unset! Struct:" + toString()); + } + + if (!isSetEnvironmentContext()) { + throw new org.apache.thrift.protocol.TProtocolException("Required field 'environmentContext' is unset! Struct:" + toString()); + } + + // check for sub-struct validity + if (environmentContext != null) { + environmentContext.validate(); + } + } + + private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException { + try { + write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out))); + } catch (org.apache.thrift.TException te) { + throw new java.io.IOException(te); + } + } + + private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException { + try { + // it doesn't seem like you should have to do this, but java serialization is wacky, and doesn't call the default constructor. + __isset_bitfield = 0; + read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in))); + } catch (org.apache.thrift.TException te) { + throw new java.io.IOException(te); + } + } + + private static class AlterPartitionsRequestStandardSchemeFactory implements SchemeFactory { + public AlterPartitionsRequestStandardScheme getScheme() { + return new AlterPartitionsRequestStandardScheme(); + } + } + + private static class AlterPartitionsRequestStandardScheme extends StandardScheme { + + public void read(org.apache.thrift.protocol.TProtocol iprot, AlterPartitionsRequest struct) throws org.apache.thrift.TException { + org.apache.thrift.protocol.TField schemeField; + iprot.readStructBegin(); + while (true) + { + schemeField = iprot.readFieldBegin(); + if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { + break; + } + switch (schemeField.id) { + case 1: // DB_NAME + if (schemeField.type == org.apache.thrift.protocol.TType.STRING) { + struct.dbName = iprot.readString(); + struct.setDbNameIsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + break; + case 2: // TABLE_NAME + if (schemeField.type == org.apache.thrift.protocol.TType.STRING) { + struct.tableName = iprot.readString(); + struct.setTableNameIsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + break; + case 3: // PARTITIONS + if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { + { + org.apache.thrift.protocol.TList _list952 = iprot.readListBegin(); + struct.partitions = new ArrayList(_list952.size); + Partition _elem953; + for (int _i954 = 0; _i954 < _list952.size; ++_i954) + { + _elem953 = new Partition(); + _elem953.read(iprot); + struct.partitions.add(_elem953); + } + iprot.readListEnd(); + } + struct.setPartitionsIsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + break; + case 4: // ENVIRONMENT_CONTEXT + if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) { + struct.environmentContext = new EnvironmentContext(); + struct.environmentContext.read(iprot); + struct.setEnvironmentContextIsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + break; + case 5: // TXN_ID + if (schemeField.type == org.apache.thrift.protocol.TType.I64) { + struct.txnId = iprot.readI64(); + struct.setTxnIdIsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + break; + case 6: // WRITE_ID + if (schemeField.type == org.apache.thrift.protocol.TType.I64) { + struct.writeId = iprot.readI64(); + struct.setWriteIdIsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + break; + case 7: // VALID_WRITE_ID_LIST + if (schemeField.type == org.apache.thrift.protocol.TType.STRING) { + struct.validWriteIdList = iprot.readString(); + struct.setValidWriteIdListIsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + break; + default: + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + iprot.readFieldEnd(); + } + iprot.readStructEnd(); + struct.validate(); + } + + public void write(org.apache.thrift.protocol.TProtocol oprot, AlterPartitionsRequest struct) throws org.apache.thrift.TException { + struct.validate(); + + oprot.writeStructBegin(STRUCT_DESC); + if (struct.dbName != null) { + oprot.writeFieldBegin(DB_NAME_FIELD_DESC); + oprot.writeString(struct.dbName); + oprot.writeFieldEnd(); + } + if (struct.tableName != null) { + oprot.writeFieldBegin(TABLE_NAME_FIELD_DESC); + oprot.writeString(struct.tableName); + oprot.writeFieldEnd(); + } + if (struct.partitions != null) { + oprot.writeFieldBegin(PARTITIONS_FIELD_DESC); + { + oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.partitions.size())); + for (Partition _iter955 : struct.partitions) + { + _iter955.write(oprot); + } + oprot.writeListEnd(); + } + oprot.writeFieldEnd(); + } + if (struct.environmentContext != null) { + oprot.writeFieldBegin(ENVIRONMENT_CONTEXT_FIELD_DESC); + struct.environmentContext.write(oprot); + oprot.writeFieldEnd(); + } + if (struct.isSetTxnId()) { + oprot.writeFieldBegin(TXN_ID_FIELD_DESC); + oprot.writeI64(struct.txnId); + oprot.writeFieldEnd(); + } + if (struct.isSetWriteId()) { + oprot.writeFieldBegin(WRITE_ID_FIELD_DESC); + oprot.writeI64(struct.writeId); + oprot.writeFieldEnd(); + } + if (struct.validWriteIdList != null) { + if (struct.isSetValidWriteIdList()) { + oprot.writeFieldBegin(VALID_WRITE_ID_LIST_FIELD_DESC); + oprot.writeString(struct.validWriteIdList); + oprot.writeFieldEnd(); + } + } + oprot.writeFieldStop(); + oprot.writeStructEnd(); + } + + } + + private static class AlterPartitionsRequestTupleSchemeFactory implements SchemeFactory { + public AlterPartitionsRequestTupleScheme getScheme() { + return new AlterPartitionsRequestTupleScheme(); + } + } + + private static class AlterPartitionsRequestTupleScheme extends TupleScheme { + + @Override + public void write(org.apache.thrift.protocol.TProtocol prot, AlterPartitionsRequest struct) throws org.apache.thrift.TException { + TTupleProtocol oprot = (TTupleProtocol) prot; + oprot.writeString(struct.dbName); + oprot.writeString(struct.tableName); + { + oprot.writeI32(struct.partitions.size()); + for (Partition _iter956 : struct.partitions) + { + _iter956.write(oprot); + } + } + struct.environmentContext.write(oprot); + BitSet optionals = new BitSet(); + if (struct.isSetTxnId()) { + optionals.set(0); + } + if (struct.isSetWriteId()) { + optionals.set(1); + } + if (struct.isSetValidWriteIdList()) { + optionals.set(2); + } + oprot.writeBitSet(optionals, 3); + if (struct.isSetTxnId()) { + oprot.writeI64(struct.txnId); + } + if (struct.isSetWriteId()) { + oprot.writeI64(struct.writeId); + } + if (struct.isSetValidWriteIdList()) { + oprot.writeString(struct.validWriteIdList); + } + } + + @Override + public void read(org.apache.thrift.protocol.TProtocol prot, AlterPartitionsRequest struct) throws org.apache.thrift.TException { + TTupleProtocol iprot = (TTupleProtocol) prot; + struct.dbName = iprot.readString(); + struct.setDbNameIsSet(true); + struct.tableName = iprot.readString(); + struct.setTableNameIsSet(true); + { + org.apache.thrift.protocol.TList _list957 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + struct.partitions = new ArrayList(_list957.size); + Partition _elem958; + for (int _i959 = 0; _i959 < _list957.size; ++_i959) + { + _elem958 = new Partition(); + _elem958.read(iprot); + struct.partitions.add(_elem958); + } + } + struct.setPartitionsIsSet(true); + struct.environmentContext = new EnvironmentContext(); + struct.environmentContext.read(iprot); + struct.setEnvironmentContextIsSet(true); + BitSet incoming = iprot.readBitSet(3); + if (incoming.get(0)) { + struct.txnId = iprot.readI64(); + struct.setTxnIdIsSet(true); + } + if (incoming.get(1)) { + struct.writeId = iprot.readI64(); + struct.setWriteIdIsSet(true); + } + if (incoming.get(2)) { + struct.validWriteIdList = iprot.readString(); + struct.setValidWriteIdListIsSet(true); + } + } + } + +} + diff --git standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/AlterPartitionsResponse.java standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/AlterPartitionsResponse.java new file mode 100644 index 0000000000..8e03462ddb --- /dev/null +++ standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/AlterPartitionsResponse.java @@ -0,0 +1,283 @@ +/** + * Autogenerated by Thrift Compiler (0.9.3) + * + * DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING + * @generated + */ +package org.apache.hadoop.hive.metastore.api; + +import org.apache.thrift.scheme.IScheme; +import org.apache.thrift.scheme.SchemeFactory; +import org.apache.thrift.scheme.StandardScheme; + +import org.apache.thrift.scheme.TupleScheme; +import org.apache.thrift.protocol.TTupleProtocol; +import org.apache.thrift.protocol.TProtocolException; +import org.apache.thrift.EncodingUtils; +import org.apache.thrift.TException; +import org.apache.thrift.async.AsyncMethodCallback; +import org.apache.thrift.server.AbstractNonblockingServer.*; +import java.util.List; +import java.util.ArrayList; +import java.util.Map; +import java.util.HashMap; +import java.util.EnumMap; +import java.util.Set; +import java.util.HashSet; +import java.util.EnumSet; +import java.util.Collections; +import java.util.BitSet; +import java.nio.ByteBuffer; +import java.util.Arrays; +import javax.annotation.Generated; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +@SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"}) +@Generated(value = "Autogenerated by Thrift Compiler (0.9.3)") +@org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public class AlterPartitionsResponse implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { + private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("AlterPartitionsResponse"); + + + private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); + static { + schemes.put(StandardScheme.class, new AlterPartitionsResponseStandardSchemeFactory()); + schemes.put(TupleScheme.class, new AlterPartitionsResponseTupleSchemeFactory()); + } + + + /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ + public enum _Fields implements org.apache.thrift.TFieldIdEnum { +; + + private static final Map byName = new HashMap(); + + static { + for (_Fields field : EnumSet.allOf(_Fields.class)) { + byName.put(field.getFieldName(), field); + } + } + + /** + * Find the _Fields constant that matches fieldId, or null if its not found. + */ + public static _Fields findByThriftId(int fieldId) { + switch(fieldId) { + default: + return null; + } + } + + /** + * Find the _Fields constant that matches fieldId, throwing an exception + * if it is not found. + */ + public static _Fields findByThriftIdOrThrow(int fieldId) { + _Fields fields = findByThriftId(fieldId); + if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!"); + return fields; + } + + /** + * Find the _Fields constant that matches name, or null if its not found. + */ + public static _Fields findByName(String name) { + return byName.get(name); + } + + private final short _thriftId; + private final String _fieldName; + + _Fields(short thriftId, String fieldName) { + _thriftId = thriftId; + _fieldName = fieldName; + } + + public short getThriftFieldId() { + return _thriftId; + } + + public String getFieldName() { + return _fieldName; + } + } + public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; + static { + Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); + metaDataMap = Collections.unmodifiableMap(tmpMap); + org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(AlterPartitionsResponse.class, metaDataMap); + } + + public AlterPartitionsResponse() { + } + + /** + * Performs a deep copy on other. + */ + public AlterPartitionsResponse(AlterPartitionsResponse other) { + } + + public AlterPartitionsResponse deepCopy() { + return new AlterPartitionsResponse(this); + } + + @Override + public void clear() { + } + + public void setFieldValue(_Fields field, Object value) { + switch (field) { + } + } + + public Object getFieldValue(_Fields field) { + switch (field) { + } + throw new IllegalStateException(); + } + + /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */ + public boolean isSet(_Fields field) { + if (field == null) { + throw new IllegalArgumentException(); + } + + switch (field) { + } + throw new IllegalStateException(); + } + + @Override + public boolean equals(Object that) { + if (that == null) + return false; + if (that instanceof AlterPartitionsResponse) + return this.equals((AlterPartitionsResponse)that); + return false; + } + + public boolean equals(AlterPartitionsResponse that) { + if (that == null) + return false; + + return true; + } + + @Override + public int hashCode() { + List list = new ArrayList(); + + return list.hashCode(); + } + + @Override + public int compareTo(AlterPartitionsResponse other) { + if (!getClass().equals(other.getClass())) { + return getClass().getName().compareTo(other.getClass().getName()); + } + + int lastComparison = 0; + + return 0; + } + + public _Fields fieldForId(int fieldId) { + return _Fields.findByThriftId(fieldId); + } + + public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException { + schemes.get(iprot.getScheme()).getScheme().read(iprot, this); + } + + public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException { + schemes.get(oprot.getScheme()).getScheme().write(oprot, this); + } + + @Override + public String toString() { + StringBuilder sb = new StringBuilder("AlterPartitionsResponse("); + boolean first = true; + + sb.append(")"); + return sb.toString(); + } + + public void validate() throws org.apache.thrift.TException { + // check for required fields + // check for sub-struct validity + } + + private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException { + try { + write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out))); + } catch (org.apache.thrift.TException te) { + throw new java.io.IOException(te); + } + } + + private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException { + try { + read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in))); + } catch (org.apache.thrift.TException te) { + throw new java.io.IOException(te); + } + } + + private static class AlterPartitionsResponseStandardSchemeFactory implements SchemeFactory { + public AlterPartitionsResponseStandardScheme getScheme() { + return new AlterPartitionsResponseStandardScheme(); + } + } + + private static class AlterPartitionsResponseStandardScheme extends StandardScheme { + + public void read(org.apache.thrift.protocol.TProtocol iprot, AlterPartitionsResponse struct) throws org.apache.thrift.TException { + org.apache.thrift.protocol.TField schemeField; + iprot.readStructBegin(); + while (true) + { + schemeField = iprot.readFieldBegin(); + if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { + break; + } + switch (schemeField.id) { + default: + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + iprot.readFieldEnd(); + } + iprot.readStructEnd(); + struct.validate(); + } + + public void write(org.apache.thrift.protocol.TProtocol oprot, AlterPartitionsResponse struct) throws org.apache.thrift.TException { + struct.validate(); + + oprot.writeStructBegin(STRUCT_DESC); + oprot.writeFieldStop(); + oprot.writeStructEnd(); + } + + } + + private static class AlterPartitionsResponseTupleSchemeFactory implements SchemeFactory { + public AlterPartitionsResponseTupleScheme getScheme() { + return new AlterPartitionsResponseTupleScheme(); + } + } + + private static class AlterPartitionsResponseTupleScheme extends TupleScheme { + + @Override + public void write(org.apache.thrift.protocol.TProtocol prot, AlterPartitionsResponse struct) throws org.apache.thrift.TException { + TTupleProtocol oprot = (TTupleProtocol) prot; + } + + @Override + public void read(org.apache.thrift.protocol.TProtocol prot, AlterPartitionsResponse struct) throws org.apache.thrift.TException { + TTupleProtocol iprot = (TTupleProtocol) prot; + } + } + +} + diff --git standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ColumnStatistics.java standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ColumnStatistics.java index 6ce7214c9d..87dc3f14a9 100644 --- standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ColumnStatistics.java +++ standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ColumnStatistics.java @@ -40,6 +40,9 @@ private static final org.apache.thrift.protocol.TField STATS_DESC_FIELD_DESC = new org.apache.thrift.protocol.TField("statsDesc", org.apache.thrift.protocol.TType.STRUCT, (short)1); private static final org.apache.thrift.protocol.TField STATS_OBJ_FIELD_DESC = new org.apache.thrift.protocol.TField("statsObj", org.apache.thrift.protocol.TType.LIST, (short)2); + private static final org.apache.thrift.protocol.TField TXN_ID_FIELD_DESC = new org.apache.thrift.protocol.TField("txnId", org.apache.thrift.protocol.TType.I64, (short)3); + private static final org.apache.thrift.protocol.TField VALID_WRITE_ID_LIST_FIELD_DESC = new org.apache.thrift.protocol.TField("validWriteIdList", org.apache.thrift.protocol.TType.STRING, (short)4); + private static final org.apache.thrift.protocol.TField IS_STATS_COMPLIANT_FIELD_DESC = new org.apache.thrift.protocol.TField("isStatsCompliant", org.apache.thrift.protocol.TType.BOOL, (short)5); private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); static { @@ -49,11 +52,17 @@ private ColumnStatisticsDesc statsDesc; // required private List statsObj; // required + private long txnId; // optional + private String validWriteIdList; // optional + private boolean isStatsCompliant; // optional /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ public enum _Fields implements org.apache.thrift.TFieldIdEnum { STATS_DESC((short)1, "statsDesc"), - STATS_OBJ((short)2, "statsObj"); + STATS_OBJ((short)2, "statsObj"), + TXN_ID((short)3, "txnId"), + VALID_WRITE_ID_LIST((short)4, "validWriteIdList"), + IS_STATS_COMPLIANT((short)5, "isStatsCompliant"); private static final Map byName = new HashMap(); @@ -72,6 +81,12 @@ public static _Fields findByThriftId(int fieldId) { return STATS_DESC; case 2: // STATS_OBJ return STATS_OBJ; + case 3: // TXN_ID + return TXN_ID; + case 4: // VALID_WRITE_ID_LIST + return VALID_WRITE_ID_LIST; + case 5: // IS_STATS_COMPLIANT + return IS_STATS_COMPLIANT; default: return null; } @@ -112,6 +127,10 @@ public String getFieldName() { } // isset id assignments + private static final int __TXNID_ISSET_ID = 0; + private static final int __ISSTATSCOMPLIANT_ISSET_ID = 1; + private byte __isset_bitfield = 0; + private static final _Fields optionals[] = {_Fields.TXN_ID,_Fields.VALID_WRITE_ID_LIST,_Fields.IS_STATS_COMPLIANT}; public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; static { Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); @@ -120,11 +139,19 @@ public String getFieldName() { tmpMap.put(_Fields.STATS_OBJ, new org.apache.thrift.meta_data.FieldMetaData("statsObj", org.apache.thrift.TFieldRequirementType.REQUIRED, new org.apache.thrift.meta_data.ListMetaData(org.apache.thrift.protocol.TType.LIST, new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, ColumnStatisticsObj.class)))); + tmpMap.put(_Fields.TXN_ID, new org.apache.thrift.meta_data.FieldMetaData("txnId", org.apache.thrift.TFieldRequirementType.OPTIONAL, + new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.I64))); + tmpMap.put(_Fields.VALID_WRITE_ID_LIST, new org.apache.thrift.meta_data.FieldMetaData("validWriteIdList", org.apache.thrift.TFieldRequirementType.OPTIONAL, + new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING))); + tmpMap.put(_Fields.IS_STATS_COMPLIANT, new org.apache.thrift.meta_data.FieldMetaData("isStatsCompliant", org.apache.thrift.TFieldRequirementType.OPTIONAL, + new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.BOOL))); metaDataMap = Collections.unmodifiableMap(tmpMap); org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(ColumnStatistics.class, metaDataMap); } public ColumnStatistics() { + this.txnId = -1L; + } public ColumnStatistics( @@ -140,6 +167,7 @@ public ColumnStatistics( * Performs a deep copy on other. */ public ColumnStatistics(ColumnStatistics other) { + __isset_bitfield = other.__isset_bitfield; if (other.isSetStatsDesc()) { this.statsDesc = new ColumnStatisticsDesc(other.statsDesc); } @@ -150,6 +178,11 @@ public ColumnStatistics(ColumnStatistics other) { } this.statsObj = __this__statsObj; } + this.txnId = other.txnId; + if (other.isSetValidWriteIdList()) { + this.validWriteIdList = other.validWriteIdList; + } + this.isStatsCompliant = other.isStatsCompliant; } public ColumnStatistics deepCopy() { @@ -160,6 +193,11 @@ public ColumnStatistics deepCopy() { public void clear() { this.statsDesc = null; this.statsObj = null; + this.txnId = -1L; + + this.validWriteIdList = null; + setIsStatsCompliantIsSet(false); + this.isStatsCompliant = false; } public ColumnStatisticsDesc getStatsDesc() { @@ -223,6 +261,73 @@ public void setStatsObjIsSet(boolean value) { } } + public long getTxnId() { + return this.txnId; + } + + public void setTxnId(long txnId) { + this.txnId = txnId; + setTxnIdIsSet(true); + } + + public void unsetTxnId() { + __isset_bitfield = EncodingUtils.clearBit(__isset_bitfield, __TXNID_ISSET_ID); + } + + /** Returns true if field txnId is set (has been assigned a value) and false otherwise */ + public boolean isSetTxnId() { + return EncodingUtils.testBit(__isset_bitfield, __TXNID_ISSET_ID); + } + + public void setTxnIdIsSet(boolean value) { + __isset_bitfield = EncodingUtils.setBit(__isset_bitfield, __TXNID_ISSET_ID, value); + } + + public String getValidWriteIdList() { + return this.validWriteIdList; + } + + public void setValidWriteIdList(String validWriteIdList) { + this.validWriteIdList = validWriteIdList; + } + + public void unsetValidWriteIdList() { + this.validWriteIdList = null; + } + + /** Returns true if field validWriteIdList is set (has been assigned a value) and false otherwise */ + public boolean isSetValidWriteIdList() { + return this.validWriteIdList != null; + } + + public void setValidWriteIdListIsSet(boolean value) { + if (!value) { + this.validWriteIdList = null; + } + } + + public boolean isIsStatsCompliant() { + return this.isStatsCompliant; + } + + public void setIsStatsCompliant(boolean isStatsCompliant) { + this.isStatsCompliant = isStatsCompliant; + setIsStatsCompliantIsSet(true); + } + + public void unsetIsStatsCompliant() { + __isset_bitfield = EncodingUtils.clearBit(__isset_bitfield, __ISSTATSCOMPLIANT_ISSET_ID); + } + + /** Returns true if field isStatsCompliant is set (has been assigned a value) and false otherwise */ + public boolean isSetIsStatsCompliant() { + return EncodingUtils.testBit(__isset_bitfield, __ISSTATSCOMPLIANT_ISSET_ID); + } + + public void setIsStatsCompliantIsSet(boolean value) { + __isset_bitfield = EncodingUtils.setBit(__isset_bitfield, __ISSTATSCOMPLIANT_ISSET_ID, value); + } + public void setFieldValue(_Fields field, Object value) { switch (field) { case STATS_DESC: @@ -241,6 +346,30 @@ public void setFieldValue(_Fields field, Object value) { } break; + case TXN_ID: + if (value == null) { + unsetTxnId(); + } else { + setTxnId((Long)value); + } + break; + + case VALID_WRITE_ID_LIST: + if (value == null) { + unsetValidWriteIdList(); + } else { + setValidWriteIdList((String)value); + } + break; + + case IS_STATS_COMPLIANT: + if (value == null) { + unsetIsStatsCompliant(); + } else { + setIsStatsCompliant((Boolean)value); + } + break; + } } @@ -252,6 +381,15 @@ public Object getFieldValue(_Fields field) { case STATS_OBJ: return getStatsObj(); + case TXN_ID: + return getTxnId(); + + case VALID_WRITE_ID_LIST: + return getValidWriteIdList(); + + case IS_STATS_COMPLIANT: + return isIsStatsCompliant(); + } throw new IllegalStateException(); } @@ -267,6 +405,12 @@ public boolean isSet(_Fields field) { return isSetStatsDesc(); case STATS_OBJ: return isSetStatsObj(); + case TXN_ID: + return isSetTxnId(); + case VALID_WRITE_ID_LIST: + return isSetValidWriteIdList(); + case IS_STATS_COMPLIANT: + return isSetIsStatsCompliant(); } throw new IllegalStateException(); } @@ -302,6 +446,33 @@ public boolean equals(ColumnStatistics that) { return false; } + boolean this_present_txnId = true && this.isSetTxnId(); + boolean that_present_txnId = true && that.isSetTxnId(); + if (this_present_txnId || that_present_txnId) { + if (!(this_present_txnId && that_present_txnId)) + return false; + if (this.txnId != that.txnId) + return false; + } + + boolean this_present_validWriteIdList = true && this.isSetValidWriteIdList(); + boolean that_present_validWriteIdList = true && that.isSetValidWriteIdList(); + if (this_present_validWriteIdList || that_present_validWriteIdList) { + if (!(this_present_validWriteIdList && that_present_validWriteIdList)) + return false; + if (!this.validWriteIdList.equals(that.validWriteIdList)) + return false; + } + + boolean this_present_isStatsCompliant = true && this.isSetIsStatsCompliant(); + boolean that_present_isStatsCompliant = true && that.isSetIsStatsCompliant(); + if (this_present_isStatsCompliant || that_present_isStatsCompliant) { + if (!(this_present_isStatsCompliant && that_present_isStatsCompliant)) + return false; + if (this.isStatsCompliant != that.isStatsCompliant) + return false; + } + return true; } @@ -319,6 +490,21 @@ public int hashCode() { if (present_statsObj) list.add(statsObj); + boolean present_txnId = true && (isSetTxnId()); + list.add(present_txnId); + if (present_txnId) + list.add(txnId); + + boolean present_validWriteIdList = true && (isSetValidWriteIdList()); + list.add(present_validWriteIdList); + if (present_validWriteIdList) + list.add(validWriteIdList); + + boolean present_isStatsCompliant = true && (isSetIsStatsCompliant()); + list.add(present_isStatsCompliant); + if (present_isStatsCompliant) + list.add(isStatsCompliant); + return list.hashCode(); } @@ -350,6 +536,36 @@ public int compareTo(ColumnStatistics other) { return lastComparison; } } + lastComparison = Boolean.valueOf(isSetTxnId()).compareTo(other.isSetTxnId()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetTxnId()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.txnId, other.txnId); + if (lastComparison != 0) { + return lastComparison; + } + } + lastComparison = Boolean.valueOf(isSetValidWriteIdList()).compareTo(other.isSetValidWriteIdList()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetValidWriteIdList()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.validWriteIdList, other.validWriteIdList); + if (lastComparison != 0) { + return lastComparison; + } + } + lastComparison = Boolean.valueOf(isSetIsStatsCompliant()).compareTo(other.isSetIsStatsCompliant()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetIsStatsCompliant()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.isStatsCompliant, other.isStatsCompliant); + if (lastComparison != 0) { + return lastComparison; + } + } return 0; } @@ -385,6 +601,28 @@ public String toString() { sb.append(this.statsObj); } first = false; + if (isSetTxnId()) { + if (!first) sb.append(", "); + sb.append("txnId:"); + sb.append(this.txnId); + first = false; + } + if (isSetValidWriteIdList()) { + if (!first) sb.append(", "); + sb.append("validWriteIdList:"); + if (this.validWriteIdList == null) { + sb.append("null"); + } else { + sb.append(this.validWriteIdList); + } + first = false; + } + if (isSetIsStatsCompliant()) { + if (!first) sb.append(", "); + sb.append("isStatsCompliant:"); + sb.append(this.isStatsCompliant); + first = false; + } sb.append(")"); return sb.toString(); } @@ -415,6 +653,8 @@ private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOExcept private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException { try { + // it doesn't seem like you should have to do this, but java serialization is wacky, and doesn't call the default constructor. + __isset_bitfield = 0; read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in))); } catch (org.apache.thrift.TException te) { throw new java.io.IOException(te); @@ -467,6 +707,30 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, ColumnStatistics st org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); } break; + case 3: // TXN_ID + if (schemeField.type == org.apache.thrift.protocol.TType.I64) { + struct.txnId = iprot.readI64(); + struct.setTxnIdIsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + break; + case 4: // VALID_WRITE_ID_LIST + if (schemeField.type == org.apache.thrift.protocol.TType.STRING) { + struct.validWriteIdList = iprot.readString(); + struct.setValidWriteIdListIsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + break; + case 5: // IS_STATS_COMPLIANT + if (schemeField.type == org.apache.thrift.protocol.TType.BOOL) { + struct.isStatsCompliant = iprot.readBool(); + struct.setIsStatsCompliantIsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + break; default: org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); } @@ -497,6 +761,23 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, ColumnStatistics s } oprot.writeFieldEnd(); } + if (struct.isSetTxnId()) { + oprot.writeFieldBegin(TXN_ID_FIELD_DESC); + oprot.writeI64(struct.txnId); + oprot.writeFieldEnd(); + } + if (struct.validWriteIdList != null) { + if (struct.isSetValidWriteIdList()) { + oprot.writeFieldBegin(VALID_WRITE_ID_LIST_FIELD_DESC); + oprot.writeString(struct.validWriteIdList); + oprot.writeFieldEnd(); + } + } + if (struct.isSetIsStatsCompliant()) { + oprot.writeFieldBegin(IS_STATS_COMPLIANT_FIELD_DESC); + oprot.writeBool(struct.isStatsCompliant); + oprot.writeFieldEnd(); + } oprot.writeFieldStop(); oprot.writeStructEnd(); } @@ -522,6 +803,26 @@ public void write(org.apache.thrift.protocol.TProtocol prot, ColumnStatistics st _iter272.write(oprot); } } + BitSet optionals = new BitSet(); + if (struct.isSetTxnId()) { + optionals.set(0); + } + if (struct.isSetValidWriteIdList()) { + optionals.set(1); + } + if (struct.isSetIsStatsCompliant()) { + optionals.set(2); + } + oprot.writeBitSet(optionals, 3); + if (struct.isSetTxnId()) { + oprot.writeI64(struct.txnId); + } + if (struct.isSetValidWriteIdList()) { + oprot.writeString(struct.validWriteIdList); + } + if (struct.isSetIsStatsCompliant()) { + oprot.writeBool(struct.isStatsCompliant); + } } @Override @@ -542,6 +843,19 @@ public void read(org.apache.thrift.protocol.TProtocol prot, ColumnStatistics str } } struct.setStatsObjIsSet(true); + BitSet incoming = iprot.readBitSet(3); + if (incoming.get(0)) { + struct.txnId = iprot.readI64(); + struct.setTxnIdIsSet(true); + } + if (incoming.get(1)) { + struct.validWriteIdList = iprot.readString(); + struct.setValidWriteIdListIsSet(true); + } + if (incoming.get(2)) { + struct.isStatsCompliant = iprot.readBool(); + struct.setIsStatsCompliantIsSet(true); + } } } diff --git standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GetTableRequest.java standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GetTableRequest.java index 3c88d8fc6d..821049e11f 100644 --- standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GetTableRequest.java +++ standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GetTableRequest.java @@ -42,6 +42,8 @@ private static final org.apache.thrift.protocol.TField TBL_NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("tblName", org.apache.thrift.protocol.TType.STRING, (short)2); private static final org.apache.thrift.protocol.TField CAPABILITIES_FIELD_DESC = new org.apache.thrift.protocol.TField("capabilities", org.apache.thrift.protocol.TType.STRUCT, (short)3); private static final org.apache.thrift.protocol.TField CAT_NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("catName", org.apache.thrift.protocol.TType.STRING, (short)4); + private static final org.apache.thrift.protocol.TField TXN_ID_FIELD_DESC = new org.apache.thrift.protocol.TField("txnId", org.apache.thrift.protocol.TType.I64, (short)5); + private static final org.apache.thrift.protocol.TField VALID_WRITE_ID_LIST_FIELD_DESC = new org.apache.thrift.protocol.TField("validWriteIdList", org.apache.thrift.protocol.TType.STRING, (short)6); private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); static { @@ -53,13 +55,17 @@ private String tblName; // required private ClientCapabilities capabilities; // optional private String catName; // optional + private long txnId; // optional + private String validWriteIdList; // optional /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ public enum _Fields implements org.apache.thrift.TFieldIdEnum { DB_NAME((short)1, "dbName"), TBL_NAME((short)2, "tblName"), CAPABILITIES((short)3, "capabilities"), - CAT_NAME((short)4, "catName"); + CAT_NAME((short)4, "catName"), + TXN_ID((short)5, "txnId"), + VALID_WRITE_ID_LIST((short)6, "validWriteIdList"); private static final Map byName = new HashMap(); @@ -82,6 +88,10 @@ public static _Fields findByThriftId(int fieldId) { return CAPABILITIES; case 4: // CAT_NAME return CAT_NAME; + case 5: // TXN_ID + return TXN_ID; + case 6: // VALID_WRITE_ID_LIST + return VALID_WRITE_ID_LIST; default: return null; } @@ -122,7 +132,9 @@ public String getFieldName() { } // isset id assignments - private static final _Fields optionals[] = {_Fields.CAPABILITIES,_Fields.CAT_NAME}; + private static final int __TXNID_ISSET_ID = 0; + private byte __isset_bitfield = 0; + private static final _Fields optionals[] = {_Fields.CAPABILITIES,_Fields.CAT_NAME,_Fields.TXN_ID,_Fields.VALID_WRITE_ID_LIST}; public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; static { Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); @@ -134,11 +146,17 @@ public String getFieldName() { new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, ClientCapabilities.class))); tmpMap.put(_Fields.CAT_NAME, new org.apache.thrift.meta_data.FieldMetaData("catName", org.apache.thrift.TFieldRequirementType.OPTIONAL, new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING))); + tmpMap.put(_Fields.TXN_ID, new org.apache.thrift.meta_data.FieldMetaData("txnId", org.apache.thrift.TFieldRequirementType.OPTIONAL, + new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.I64))); + tmpMap.put(_Fields.VALID_WRITE_ID_LIST, new org.apache.thrift.meta_data.FieldMetaData("validWriteIdList", org.apache.thrift.TFieldRequirementType.OPTIONAL, + new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING))); metaDataMap = Collections.unmodifiableMap(tmpMap); org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(GetTableRequest.class, metaDataMap); } public GetTableRequest() { + this.txnId = -1L; + } public GetTableRequest( @@ -154,6 +172,7 @@ public GetTableRequest( * Performs a deep copy on other. */ public GetTableRequest(GetTableRequest other) { + __isset_bitfield = other.__isset_bitfield; if (other.isSetDbName()) { this.dbName = other.dbName; } @@ -166,6 +185,10 @@ public GetTableRequest(GetTableRequest other) { if (other.isSetCatName()) { this.catName = other.catName; } + this.txnId = other.txnId; + if (other.isSetValidWriteIdList()) { + this.validWriteIdList = other.validWriteIdList; + } } public GetTableRequest deepCopy() { @@ -178,6 +201,9 @@ public void clear() { this.tblName = null; this.capabilities = null; this.catName = null; + this.txnId = -1L; + + this.validWriteIdList = null; } public String getDbName() { @@ -272,6 +298,51 @@ public void setCatNameIsSet(boolean value) { } } + public long getTxnId() { + return this.txnId; + } + + public void setTxnId(long txnId) { + this.txnId = txnId; + setTxnIdIsSet(true); + } + + public void unsetTxnId() { + __isset_bitfield = EncodingUtils.clearBit(__isset_bitfield, __TXNID_ISSET_ID); + } + + /** Returns true if field txnId is set (has been assigned a value) and false otherwise */ + public boolean isSetTxnId() { + return EncodingUtils.testBit(__isset_bitfield, __TXNID_ISSET_ID); + } + + public void setTxnIdIsSet(boolean value) { + __isset_bitfield = EncodingUtils.setBit(__isset_bitfield, __TXNID_ISSET_ID, value); + } + + public String getValidWriteIdList() { + return this.validWriteIdList; + } + + public void setValidWriteIdList(String validWriteIdList) { + this.validWriteIdList = validWriteIdList; + } + + public void unsetValidWriteIdList() { + this.validWriteIdList = null; + } + + /** Returns true if field validWriteIdList is set (has been assigned a value) and false otherwise */ + public boolean isSetValidWriteIdList() { + return this.validWriteIdList != null; + } + + public void setValidWriteIdListIsSet(boolean value) { + if (!value) { + this.validWriteIdList = null; + } + } + public void setFieldValue(_Fields field, Object value) { switch (field) { case DB_NAME: @@ -306,6 +377,22 @@ public void setFieldValue(_Fields field, Object value) { } break; + case TXN_ID: + if (value == null) { + unsetTxnId(); + } else { + setTxnId((Long)value); + } + break; + + case VALID_WRITE_ID_LIST: + if (value == null) { + unsetValidWriteIdList(); + } else { + setValidWriteIdList((String)value); + } + break; + } } @@ -323,6 +410,12 @@ public Object getFieldValue(_Fields field) { case CAT_NAME: return getCatName(); + case TXN_ID: + return getTxnId(); + + case VALID_WRITE_ID_LIST: + return getValidWriteIdList(); + } throw new IllegalStateException(); } @@ -342,6 +435,10 @@ public boolean isSet(_Fields field) { return isSetCapabilities(); case CAT_NAME: return isSetCatName(); + case TXN_ID: + return isSetTxnId(); + case VALID_WRITE_ID_LIST: + return isSetValidWriteIdList(); } throw new IllegalStateException(); } @@ -395,6 +492,24 @@ public boolean equals(GetTableRequest that) { return false; } + boolean this_present_txnId = true && this.isSetTxnId(); + boolean that_present_txnId = true && that.isSetTxnId(); + if (this_present_txnId || that_present_txnId) { + if (!(this_present_txnId && that_present_txnId)) + return false; + if (this.txnId != that.txnId) + return false; + } + + boolean this_present_validWriteIdList = true && this.isSetValidWriteIdList(); + boolean that_present_validWriteIdList = true && that.isSetValidWriteIdList(); + if (this_present_validWriteIdList || that_present_validWriteIdList) { + if (!(this_present_validWriteIdList && that_present_validWriteIdList)) + return false; + if (!this.validWriteIdList.equals(that.validWriteIdList)) + return false; + } + return true; } @@ -422,6 +537,16 @@ public int hashCode() { if (present_catName) list.add(catName); + boolean present_txnId = true && (isSetTxnId()); + list.add(present_txnId); + if (present_txnId) + list.add(txnId); + + boolean present_validWriteIdList = true && (isSetValidWriteIdList()); + list.add(present_validWriteIdList); + if (present_validWriteIdList) + list.add(validWriteIdList); + return list.hashCode(); } @@ -473,6 +598,26 @@ public int compareTo(GetTableRequest other) { return lastComparison; } } + lastComparison = Boolean.valueOf(isSetTxnId()).compareTo(other.isSetTxnId()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetTxnId()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.txnId, other.txnId); + if (lastComparison != 0) { + return lastComparison; + } + } + lastComparison = Boolean.valueOf(isSetValidWriteIdList()).compareTo(other.isSetValidWriteIdList()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetValidWriteIdList()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.validWriteIdList, other.validWriteIdList); + if (lastComparison != 0) { + return lastComparison; + } + } return 0; } @@ -528,6 +673,22 @@ public String toString() { } first = false; } + if (isSetTxnId()) { + if (!first) sb.append(", "); + sb.append("txnId:"); + sb.append(this.txnId); + first = false; + } + if (isSetValidWriteIdList()) { + if (!first) sb.append(", "); + sb.append("validWriteIdList:"); + if (this.validWriteIdList == null) { + sb.append("null"); + } else { + sb.append(this.validWriteIdList); + } + first = false; + } sb.append(")"); return sb.toString(); } @@ -558,6 +719,8 @@ private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOExcept private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException { try { + // it doesn't seem like you should have to do this, but java serialization is wacky, and doesn't call the default constructor. + __isset_bitfield = 0; read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in))); } catch (org.apache.thrift.TException te) { throw new java.io.IOException(te); @@ -615,6 +778,22 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, GetTableRequest str org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); } break; + case 5: // TXN_ID + if (schemeField.type == org.apache.thrift.protocol.TType.I64) { + struct.txnId = iprot.readI64(); + struct.setTxnIdIsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + break; + case 6: // VALID_WRITE_ID_LIST + if (schemeField.type == org.apache.thrift.protocol.TType.STRING) { + struct.validWriteIdList = iprot.readString(); + struct.setValidWriteIdListIsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + break; default: org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); } @@ -652,6 +831,18 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, GetTableRequest st oprot.writeFieldEnd(); } } + if (struct.isSetTxnId()) { + oprot.writeFieldBegin(TXN_ID_FIELD_DESC); + oprot.writeI64(struct.txnId); + oprot.writeFieldEnd(); + } + if (struct.validWriteIdList != null) { + if (struct.isSetValidWriteIdList()) { + oprot.writeFieldBegin(VALID_WRITE_ID_LIST_FIELD_DESC); + oprot.writeString(struct.validWriteIdList); + oprot.writeFieldEnd(); + } + } oprot.writeFieldStop(); oprot.writeStructEnd(); } @@ -678,13 +869,25 @@ public void write(org.apache.thrift.protocol.TProtocol prot, GetTableRequest str if (struct.isSetCatName()) { optionals.set(1); } - oprot.writeBitSet(optionals, 2); + if (struct.isSetTxnId()) { + optionals.set(2); + } + if (struct.isSetValidWriteIdList()) { + optionals.set(3); + } + oprot.writeBitSet(optionals, 4); if (struct.isSetCapabilities()) { struct.capabilities.write(oprot); } if (struct.isSetCatName()) { oprot.writeString(struct.catName); } + if (struct.isSetTxnId()) { + oprot.writeI64(struct.txnId); + } + if (struct.isSetValidWriteIdList()) { + oprot.writeString(struct.validWriteIdList); + } } @Override @@ -694,7 +897,7 @@ public void read(org.apache.thrift.protocol.TProtocol prot, GetTableRequest stru struct.setDbNameIsSet(true); struct.tblName = iprot.readString(); struct.setTblNameIsSet(true); - BitSet incoming = iprot.readBitSet(2); + BitSet incoming = iprot.readBitSet(4); if (incoming.get(0)) { struct.capabilities = new ClientCapabilities(); struct.capabilities.read(iprot); @@ -704,6 +907,14 @@ public void read(org.apache.thrift.protocol.TProtocol prot, GetTableRequest stru struct.catName = iprot.readString(); struct.setCatNameIsSet(true); } + if (incoming.get(2)) { + struct.txnId = iprot.readI64(); + struct.setTxnIdIsSet(true); + } + if (incoming.get(3)) { + struct.validWriteIdList = iprot.readString(); + struct.setValidWriteIdListIsSet(true); + } } } diff --git standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GetTableResult.java standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GetTableResult.java index 968e250f0b..aa41c15c2e 100644 --- standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GetTableResult.java +++ standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GetTableResult.java @@ -39,6 +39,7 @@ private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("GetTableResult"); private static final org.apache.thrift.protocol.TField TABLE_FIELD_DESC = new org.apache.thrift.protocol.TField("table", org.apache.thrift.protocol.TType.STRUCT, (short)1); + private static final org.apache.thrift.protocol.TField IS_STATS_COMPLIANT_FIELD_DESC = new org.apache.thrift.protocol.TField("isStatsCompliant", org.apache.thrift.protocol.TType.BOOL, (short)2); private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); static { @@ -47,10 +48,12 @@ } private Table table; // required + private boolean isStatsCompliant; // optional /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ public enum _Fields implements org.apache.thrift.TFieldIdEnum { - TABLE((short)1, "table"); + TABLE((short)1, "table"), + IS_STATS_COMPLIANT((short)2, "isStatsCompliant"); private static final Map byName = new HashMap(); @@ -67,6 +70,8 @@ public static _Fields findByThriftId(int fieldId) { switch(fieldId) { case 1: // TABLE return TABLE; + case 2: // IS_STATS_COMPLIANT + return IS_STATS_COMPLIANT; default: return null; } @@ -107,11 +112,16 @@ public String getFieldName() { } // isset id assignments + private static final int __ISSTATSCOMPLIANT_ISSET_ID = 0; + private byte __isset_bitfield = 0; + private static final _Fields optionals[] = {_Fields.IS_STATS_COMPLIANT}; public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; static { Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); tmpMap.put(_Fields.TABLE, new org.apache.thrift.meta_data.FieldMetaData("table", org.apache.thrift.TFieldRequirementType.REQUIRED, new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, Table.class))); + tmpMap.put(_Fields.IS_STATS_COMPLIANT, new org.apache.thrift.meta_data.FieldMetaData("isStatsCompliant", org.apache.thrift.TFieldRequirementType.OPTIONAL, + new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.BOOL))); metaDataMap = Collections.unmodifiableMap(tmpMap); org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(GetTableResult.class, metaDataMap); } @@ -130,9 +140,11 @@ public GetTableResult( * Performs a deep copy on other. */ public GetTableResult(GetTableResult other) { + __isset_bitfield = other.__isset_bitfield; if (other.isSetTable()) { this.table = new Table(other.table); } + this.isStatsCompliant = other.isStatsCompliant; } public GetTableResult deepCopy() { @@ -142,6 +154,8 @@ public GetTableResult deepCopy() { @Override public void clear() { this.table = null; + setIsStatsCompliantIsSet(false); + this.isStatsCompliant = false; } public Table getTable() { @@ -167,6 +181,28 @@ public void setTableIsSet(boolean value) { } } + public boolean isIsStatsCompliant() { + return this.isStatsCompliant; + } + + public void setIsStatsCompliant(boolean isStatsCompliant) { + this.isStatsCompliant = isStatsCompliant; + setIsStatsCompliantIsSet(true); + } + + public void unsetIsStatsCompliant() { + __isset_bitfield = EncodingUtils.clearBit(__isset_bitfield, __ISSTATSCOMPLIANT_ISSET_ID); + } + + /** Returns true if field isStatsCompliant is set (has been assigned a value) and false otherwise */ + public boolean isSetIsStatsCompliant() { + return EncodingUtils.testBit(__isset_bitfield, __ISSTATSCOMPLIANT_ISSET_ID); + } + + public void setIsStatsCompliantIsSet(boolean value) { + __isset_bitfield = EncodingUtils.setBit(__isset_bitfield, __ISSTATSCOMPLIANT_ISSET_ID, value); + } + public void setFieldValue(_Fields field, Object value) { switch (field) { case TABLE: @@ -177,6 +213,14 @@ public void setFieldValue(_Fields field, Object value) { } break; + case IS_STATS_COMPLIANT: + if (value == null) { + unsetIsStatsCompliant(); + } else { + setIsStatsCompliant((Boolean)value); + } + break; + } } @@ -185,6 +229,9 @@ public Object getFieldValue(_Fields field) { case TABLE: return getTable(); + case IS_STATS_COMPLIANT: + return isIsStatsCompliant(); + } throw new IllegalStateException(); } @@ -198,6 +245,8 @@ public boolean isSet(_Fields field) { switch (field) { case TABLE: return isSetTable(); + case IS_STATS_COMPLIANT: + return isSetIsStatsCompliant(); } throw new IllegalStateException(); } @@ -224,6 +273,15 @@ public boolean equals(GetTableResult that) { return false; } + boolean this_present_isStatsCompliant = true && this.isSetIsStatsCompliant(); + boolean that_present_isStatsCompliant = true && that.isSetIsStatsCompliant(); + if (this_present_isStatsCompliant || that_present_isStatsCompliant) { + if (!(this_present_isStatsCompliant && that_present_isStatsCompliant)) + return false; + if (this.isStatsCompliant != that.isStatsCompliant) + return false; + } + return true; } @@ -236,6 +294,11 @@ public int hashCode() { if (present_table) list.add(table); + boolean present_isStatsCompliant = true && (isSetIsStatsCompliant()); + list.add(present_isStatsCompliant); + if (present_isStatsCompliant) + list.add(isStatsCompliant); + return list.hashCode(); } @@ -257,6 +320,16 @@ public int compareTo(GetTableResult other) { return lastComparison; } } + lastComparison = Boolean.valueOf(isSetIsStatsCompliant()).compareTo(other.isSetIsStatsCompliant()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetIsStatsCompliant()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.isStatsCompliant, other.isStatsCompliant); + if (lastComparison != 0) { + return lastComparison; + } + } return 0; } @@ -284,6 +357,12 @@ public String toString() { sb.append(this.table); } first = false; + if (isSetIsStatsCompliant()) { + if (!first) sb.append(", "); + sb.append("isStatsCompliant:"); + sb.append(this.isStatsCompliant); + first = false; + } sb.append(")"); return sb.toString(); } @@ -310,6 +389,8 @@ private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOExcept private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException { try { + // it doesn't seem like you should have to do this, but java serialization is wacky, and doesn't call the default constructor. + __isset_bitfield = 0; read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in))); } catch (org.apache.thrift.TException te) { throw new java.io.IOException(te); @@ -343,6 +424,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, GetTableResult stru org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); } break; + case 2: // IS_STATS_COMPLIANT + if (schemeField.type == org.apache.thrift.protocol.TType.BOOL) { + struct.isStatsCompliant = iprot.readBool(); + struct.setIsStatsCompliantIsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + break; default: org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); } @@ -361,6 +450,11 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, GetTableResult str struct.table.write(oprot); oprot.writeFieldEnd(); } + if (struct.isSetIsStatsCompliant()) { + oprot.writeFieldBegin(IS_STATS_COMPLIANT_FIELD_DESC); + oprot.writeBool(struct.isStatsCompliant); + oprot.writeFieldEnd(); + } oprot.writeFieldStop(); oprot.writeStructEnd(); } @@ -379,6 +473,14 @@ public GetTableResultTupleScheme getScheme() { public void write(org.apache.thrift.protocol.TProtocol prot, GetTableResult struct) throws org.apache.thrift.TException { TTupleProtocol oprot = (TTupleProtocol) prot; struct.table.write(oprot); + BitSet optionals = new BitSet(); + if (struct.isSetIsStatsCompliant()) { + optionals.set(0); + } + oprot.writeBitSet(optionals, 1); + if (struct.isSetIsStatsCompliant()) { + oprot.writeBool(struct.isStatsCompliant); + } } @Override @@ -387,6 +489,11 @@ public void read(org.apache.thrift.protocol.TProtocol prot, GetTableResult struc struct.table = new Table(); struct.table.read(iprot); struct.setTableIsSet(true); + BitSet incoming = iprot.readBitSet(1); + if (incoming.get(0)) { + struct.isStatsCompliant = iprot.readBool(); + struct.setIsStatsCompliantIsSet(true); + } } } diff --git standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/Partition.java standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/Partition.java index 51f809a0f8..3778498728 100644 --- standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/Partition.java +++ standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/Partition.java @@ -47,6 +47,8 @@ private static final org.apache.thrift.protocol.TField PARAMETERS_FIELD_DESC = new org.apache.thrift.protocol.TField("parameters", org.apache.thrift.protocol.TType.MAP, (short)7); private static final org.apache.thrift.protocol.TField PRIVILEGES_FIELD_DESC = new org.apache.thrift.protocol.TField("privileges", org.apache.thrift.protocol.TType.STRUCT, (short)8); private static final org.apache.thrift.protocol.TField CAT_NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("catName", org.apache.thrift.protocol.TType.STRING, (short)9); + private static final org.apache.thrift.protocol.TField WRITE_ID_FIELD_DESC = new org.apache.thrift.protocol.TField("writeId", org.apache.thrift.protocol.TType.I64, (short)10); + private static final org.apache.thrift.protocol.TField IS_STATS_COMPLIANT_FIELD_DESC = new org.apache.thrift.protocol.TField("isStatsCompliant", org.apache.thrift.protocol.TType.BOOL, (short)11); private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); static { @@ -63,6 +65,8 @@ private Map parameters; // required private PrincipalPrivilegeSet privileges; // optional private String catName; // optional + private long writeId; // optional + private boolean isStatsCompliant; // optional /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ public enum _Fields implements org.apache.thrift.TFieldIdEnum { @@ -74,7 +78,9 @@ SD((short)6, "sd"), PARAMETERS((short)7, "parameters"), PRIVILEGES((short)8, "privileges"), - CAT_NAME((short)9, "catName"); + CAT_NAME((short)9, "catName"), + WRITE_ID((short)10, "writeId"), + IS_STATS_COMPLIANT((short)11, "isStatsCompliant"); private static final Map byName = new HashMap(); @@ -107,6 +113,10 @@ public static _Fields findByThriftId(int fieldId) { return PRIVILEGES; case 9: // CAT_NAME return CAT_NAME; + case 10: // WRITE_ID + return WRITE_ID; + case 11: // IS_STATS_COMPLIANT + return IS_STATS_COMPLIANT; default: return null; } @@ -149,8 +159,10 @@ public String getFieldName() { // isset id assignments private static final int __CREATETIME_ISSET_ID = 0; private static final int __LASTACCESSTIME_ISSET_ID = 1; + private static final int __WRITEID_ISSET_ID = 2; + private static final int __ISSTATSCOMPLIANT_ISSET_ID = 3; private byte __isset_bitfield = 0; - private static final _Fields optionals[] = {_Fields.PRIVILEGES,_Fields.CAT_NAME}; + private static final _Fields optionals[] = {_Fields.PRIVILEGES,_Fields.CAT_NAME,_Fields.WRITE_ID,_Fields.IS_STATS_COMPLIANT}; public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; static { Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); @@ -175,11 +187,17 @@ public String getFieldName() { new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, PrincipalPrivilegeSet.class))); tmpMap.put(_Fields.CAT_NAME, new org.apache.thrift.meta_data.FieldMetaData("catName", org.apache.thrift.TFieldRequirementType.OPTIONAL, new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING))); + tmpMap.put(_Fields.WRITE_ID, new org.apache.thrift.meta_data.FieldMetaData("writeId", org.apache.thrift.TFieldRequirementType.OPTIONAL, + new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.I64))); + tmpMap.put(_Fields.IS_STATS_COMPLIANT, new org.apache.thrift.meta_data.FieldMetaData("isStatsCompliant", org.apache.thrift.TFieldRequirementType.OPTIONAL, + new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.BOOL))); metaDataMap = Collections.unmodifiableMap(tmpMap); org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(Partition.class, metaDataMap); } public Partition() { + this.writeId = -1L; + } public Partition( @@ -233,6 +251,8 @@ public Partition(Partition other) { if (other.isSetCatName()) { this.catName = org.apache.hadoop.hive.metastore.utils.StringUtils.intern(other.catName); } + this.writeId = other.writeId; + this.isStatsCompliant = other.isStatsCompliant; } public Partition deepCopy() { @@ -252,6 +272,10 @@ public void clear() { this.parameters = null; this.privileges = null; this.catName = null; + this.writeId = -1L; + + setIsStatsCompliantIsSet(false); + this.isStatsCompliant = false; } public int getValuesSize() { @@ -485,6 +509,50 @@ public void setCatNameIsSet(boolean value) { } } + public long getWriteId() { + return this.writeId; + } + + public void setWriteId(long writeId) { + this.writeId = writeId; + setWriteIdIsSet(true); + } + + public void unsetWriteId() { + __isset_bitfield = EncodingUtils.clearBit(__isset_bitfield, __WRITEID_ISSET_ID); + } + + /** Returns true if field writeId is set (has been assigned a value) and false otherwise */ + public boolean isSetWriteId() { + return EncodingUtils.testBit(__isset_bitfield, __WRITEID_ISSET_ID); + } + + public void setWriteIdIsSet(boolean value) { + __isset_bitfield = EncodingUtils.setBit(__isset_bitfield, __WRITEID_ISSET_ID, value); + } + + public boolean isIsStatsCompliant() { + return this.isStatsCompliant; + } + + public void setIsStatsCompliant(boolean isStatsCompliant) { + this.isStatsCompliant = isStatsCompliant; + setIsStatsCompliantIsSet(true); + } + + public void unsetIsStatsCompliant() { + __isset_bitfield = EncodingUtils.clearBit(__isset_bitfield, __ISSTATSCOMPLIANT_ISSET_ID); + } + + /** Returns true if field isStatsCompliant is set (has been assigned a value) and false otherwise */ + public boolean isSetIsStatsCompliant() { + return EncodingUtils.testBit(__isset_bitfield, __ISSTATSCOMPLIANT_ISSET_ID); + } + + public void setIsStatsCompliantIsSet(boolean value) { + __isset_bitfield = EncodingUtils.setBit(__isset_bitfield, __ISSTATSCOMPLIANT_ISSET_ID, value); + } + public void setFieldValue(_Fields field, Object value) { switch (field) { case VALUES: @@ -559,6 +627,22 @@ public void setFieldValue(_Fields field, Object value) { } break; + case WRITE_ID: + if (value == null) { + unsetWriteId(); + } else { + setWriteId((Long)value); + } + break; + + case IS_STATS_COMPLIANT: + if (value == null) { + unsetIsStatsCompliant(); + } else { + setIsStatsCompliant((Boolean)value); + } + break; + } } @@ -591,6 +675,12 @@ public Object getFieldValue(_Fields field) { case CAT_NAME: return getCatName(); + case WRITE_ID: + return getWriteId(); + + case IS_STATS_COMPLIANT: + return isIsStatsCompliant(); + } throw new IllegalStateException(); } @@ -620,6 +710,10 @@ public boolean isSet(_Fields field) { return isSetPrivileges(); case CAT_NAME: return isSetCatName(); + case WRITE_ID: + return isSetWriteId(); + case IS_STATS_COMPLIANT: + return isSetIsStatsCompliant(); } throw new IllegalStateException(); } @@ -718,6 +812,24 @@ public boolean equals(Partition that) { return false; } + boolean this_present_writeId = true && this.isSetWriteId(); + boolean that_present_writeId = true && that.isSetWriteId(); + if (this_present_writeId || that_present_writeId) { + if (!(this_present_writeId && that_present_writeId)) + return false; + if (this.writeId != that.writeId) + return false; + } + + boolean this_present_isStatsCompliant = true && this.isSetIsStatsCompliant(); + boolean that_present_isStatsCompliant = true && that.isSetIsStatsCompliant(); + if (this_present_isStatsCompliant || that_present_isStatsCompliant) { + if (!(this_present_isStatsCompliant && that_present_isStatsCompliant)) + return false; + if (this.isStatsCompliant != that.isStatsCompliant) + return false; + } + return true; } @@ -770,6 +882,16 @@ public int hashCode() { if (present_catName) list.add(catName); + boolean present_writeId = true && (isSetWriteId()); + list.add(present_writeId); + if (present_writeId) + list.add(writeId); + + boolean present_isStatsCompliant = true && (isSetIsStatsCompliant()); + list.add(present_isStatsCompliant); + if (present_isStatsCompliant) + list.add(isStatsCompliant); + return list.hashCode(); } @@ -871,6 +993,26 @@ public int compareTo(Partition other) { return lastComparison; } } + lastComparison = Boolean.valueOf(isSetWriteId()).compareTo(other.isSetWriteId()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetWriteId()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.writeId, other.writeId); + if (lastComparison != 0) { + return lastComparison; + } + } + lastComparison = Boolean.valueOf(isSetIsStatsCompliant()).compareTo(other.isSetIsStatsCompliant()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetIsStatsCompliant()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.isStatsCompliant, other.isStatsCompliant); + if (lastComparison != 0) { + return lastComparison; + } + } return 0; } @@ -958,6 +1100,18 @@ public String toString() { } first = false; } + if (isSetWriteId()) { + if (!first) sb.append(", "); + sb.append("writeId:"); + sb.append(this.writeId); + first = false; + } + if (isSetIsStatsCompliant()) { + if (!first) sb.append(", "); + sb.append("isStatsCompliant:"); + sb.append(this.isStatsCompliant); + first = false; + } sb.append(")"); return sb.toString(); } @@ -1105,6 +1259,22 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, Partition struct) t org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); } break; + case 10: // WRITE_ID + if (schemeField.type == org.apache.thrift.protocol.TType.I64) { + struct.writeId = iprot.readI64(); + struct.setWriteIdIsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + break; + case 11: // IS_STATS_COMPLIANT + if (schemeField.type == org.apache.thrift.protocol.TType.BOOL) { + struct.isStatsCompliant = iprot.readBool(); + struct.setIsStatsCompliantIsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + break; default: org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); } @@ -1178,6 +1348,16 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, Partition struct) oprot.writeFieldEnd(); } } + if (struct.isSetWriteId()) { + oprot.writeFieldBegin(WRITE_ID_FIELD_DESC); + oprot.writeI64(struct.writeId); + oprot.writeFieldEnd(); + } + if (struct.isSetIsStatsCompliant()) { + oprot.writeFieldBegin(IS_STATS_COMPLIANT_FIELD_DESC); + oprot.writeBool(struct.isStatsCompliant); + oprot.writeFieldEnd(); + } oprot.writeFieldStop(); oprot.writeStructEnd(); } @@ -1223,7 +1403,13 @@ public void write(org.apache.thrift.protocol.TProtocol prot, Partition struct) t if (struct.isSetCatName()) { optionals.set(8); } - oprot.writeBitSet(optionals, 9); + if (struct.isSetWriteId()) { + optionals.set(9); + } + if (struct.isSetIsStatsCompliant()) { + optionals.set(10); + } + oprot.writeBitSet(optionals, 11); if (struct.isSetValues()) { { oprot.writeI32(struct.values.size()); @@ -1264,12 +1450,18 @@ public void write(org.apache.thrift.protocol.TProtocol prot, Partition struct) t if (struct.isSetCatName()) { oprot.writeString(struct.catName); } + if (struct.isSetWriteId()) { + oprot.writeI64(struct.writeId); + } + if (struct.isSetIsStatsCompliant()) { + oprot.writeBool(struct.isStatsCompliant); + } } @Override public void read(org.apache.thrift.protocol.TProtocol prot, Partition struct) throws org.apache.thrift.TException { TTupleProtocol iprot = (TTupleProtocol) prot; - BitSet incoming = iprot.readBitSet(9); + BitSet incoming = iprot.readBitSet(11); if (incoming.get(0)) { { org.apache.thrift.protocol.TList _list227 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); @@ -1328,6 +1520,14 @@ public void read(org.apache.thrift.protocol.TProtocol prot, Partition struct) th struct.catName = org.apache.hadoop.hive.metastore.utils.StringUtils.intern(iprot.readString()); struct.setCatNameIsSet(true); } + if (incoming.get(9)) { + struct.writeId = iprot.readI64(); + struct.setWriteIdIsSet(true); + } + if (incoming.get(10)) { + struct.isStatsCompliant = iprot.readBool(); + struct.setIsStatsCompliantIsSet(true); + } } } diff --git standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/PartitionSpec.java standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/PartitionSpec.java index 247fdaa5ac..47ff56c624 100644 --- standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/PartitionSpec.java +++ standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/PartitionSpec.java @@ -44,6 +44,8 @@ private static final org.apache.thrift.protocol.TField SHARED_SDPARTITION_SPEC_FIELD_DESC = new org.apache.thrift.protocol.TField("sharedSDPartitionSpec", org.apache.thrift.protocol.TType.STRUCT, (short)4); private static final org.apache.thrift.protocol.TField PARTITION_LIST_FIELD_DESC = new org.apache.thrift.protocol.TField("partitionList", org.apache.thrift.protocol.TType.STRUCT, (short)5); private static final org.apache.thrift.protocol.TField CAT_NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("catName", org.apache.thrift.protocol.TType.STRING, (short)6); + private static final org.apache.thrift.protocol.TField WRITE_ID_FIELD_DESC = new org.apache.thrift.protocol.TField("writeId", org.apache.thrift.protocol.TType.I64, (short)7); + private static final org.apache.thrift.protocol.TField IS_STATS_COMPLIANT_FIELD_DESC = new org.apache.thrift.protocol.TField("isStatsCompliant", org.apache.thrift.protocol.TType.BOOL, (short)8); private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); static { @@ -57,6 +59,8 @@ private PartitionSpecWithSharedSD sharedSDPartitionSpec; // optional private PartitionListComposingSpec partitionList; // optional private String catName; // optional + private long writeId; // optional + private boolean isStatsCompliant; // optional /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ public enum _Fields implements org.apache.thrift.TFieldIdEnum { @@ -65,7 +69,9 @@ ROOT_PATH((short)3, "rootPath"), SHARED_SDPARTITION_SPEC((short)4, "sharedSDPartitionSpec"), PARTITION_LIST((short)5, "partitionList"), - CAT_NAME((short)6, "catName"); + CAT_NAME((short)6, "catName"), + WRITE_ID((short)7, "writeId"), + IS_STATS_COMPLIANT((short)8, "isStatsCompliant"); private static final Map byName = new HashMap(); @@ -92,6 +98,10 @@ public static _Fields findByThriftId(int fieldId) { return PARTITION_LIST; case 6: // CAT_NAME return CAT_NAME; + case 7: // WRITE_ID + return WRITE_ID; + case 8: // IS_STATS_COMPLIANT + return IS_STATS_COMPLIANT; default: return null; } @@ -132,7 +142,10 @@ public String getFieldName() { } // isset id assignments - private static final _Fields optionals[] = {_Fields.SHARED_SDPARTITION_SPEC,_Fields.PARTITION_LIST,_Fields.CAT_NAME}; + private static final int __WRITEID_ISSET_ID = 0; + private static final int __ISSTATSCOMPLIANT_ISSET_ID = 1; + private byte __isset_bitfield = 0; + private static final _Fields optionals[] = {_Fields.SHARED_SDPARTITION_SPEC,_Fields.PARTITION_LIST,_Fields.CAT_NAME,_Fields.WRITE_ID,_Fields.IS_STATS_COMPLIANT}; public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; static { Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); @@ -148,11 +161,17 @@ public String getFieldName() { new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, PartitionListComposingSpec.class))); tmpMap.put(_Fields.CAT_NAME, new org.apache.thrift.meta_data.FieldMetaData("catName", org.apache.thrift.TFieldRequirementType.OPTIONAL, new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING))); + tmpMap.put(_Fields.WRITE_ID, new org.apache.thrift.meta_data.FieldMetaData("writeId", org.apache.thrift.TFieldRequirementType.OPTIONAL, + new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.I64))); + tmpMap.put(_Fields.IS_STATS_COMPLIANT, new org.apache.thrift.meta_data.FieldMetaData("isStatsCompliant", org.apache.thrift.TFieldRequirementType.OPTIONAL, + new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.BOOL))); metaDataMap = Collections.unmodifiableMap(tmpMap); org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(PartitionSpec.class, metaDataMap); } public PartitionSpec() { + this.writeId = -1L; + } public PartitionSpec( @@ -170,6 +189,7 @@ public PartitionSpec( * Performs a deep copy on other. */ public PartitionSpec(PartitionSpec other) { + __isset_bitfield = other.__isset_bitfield; if (other.isSetDbName()) { this.dbName = other.dbName; } @@ -188,6 +208,8 @@ public PartitionSpec(PartitionSpec other) { if (other.isSetCatName()) { this.catName = other.catName; } + this.writeId = other.writeId; + this.isStatsCompliant = other.isStatsCompliant; } public PartitionSpec deepCopy() { @@ -202,6 +224,10 @@ public void clear() { this.sharedSDPartitionSpec = null; this.partitionList = null; this.catName = null; + this.writeId = -1L; + + setIsStatsCompliantIsSet(false); + this.isStatsCompliant = false; } public String getDbName() { @@ -342,6 +368,50 @@ public void setCatNameIsSet(boolean value) { } } + public long getWriteId() { + return this.writeId; + } + + public void setWriteId(long writeId) { + this.writeId = writeId; + setWriteIdIsSet(true); + } + + public void unsetWriteId() { + __isset_bitfield = EncodingUtils.clearBit(__isset_bitfield, __WRITEID_ISSET_ID); + } + + /** Returns true if field writeId is set (has been assigned a value) and false otherwise */ + public boolean isSetWriteId() { + return EncodingUtils.testBit(__isset_bitfield, __WRITEID_ISSET_ID); + } + + public void setWriteIdIsSet(boolean value) { + __isset_bitfield = EncodingUtils.setBit(__isset_bitfield, __WRITEID_ISSET_ID, value); + } + + public boolean isIsStatsCompliant() { + return this.isStatsCompliant; + } + + public void setIsStatsCompliant(boolean isStatsCompliant) { + this.isStatsCompliant = isStatsCompliant; + setIsStatsCompliantIsSet(true); + } + + public void unsetIsStatsCompliant() { + __isset_bitfield = EncodingUtils.clearBit(__isset_bitfield, __ISSTATSCOMPLIANT_ISSET_ID); + } + + /** Returns true if field isStatsCompliant is set (has been assigned a value) and false otherwise */ + public boolean isSetIsStatsCompliant() { + return EncodingUtils.testBit(__isset_bitfield, __ISSTATSCOMPLIANT_ISSET_ID); + } + + public void setIsStatsCompliantIsSet(boolean value) { + __isset_bitfield = EncodingUtils.setBit(__isset_bitfield, __ISSTATSCOMPLIANT_ISSET_ID, value); + } + public void setFieldValue(_Fields field, Object value) { switch (field) { case DB_NAME: @@ -392,6 +462,22 @@ public void setFieldValue(_Fields field, Object value) { } break; + case WRITE_ID: + if (value == null) { + unsetWriteId(); + } else { + setWriteId((Long)value); + } + break; + + case IS_STATS_COMPLIANT: + if (value == null) { + unsetIsStatsCompliant(); + } else { + setIsStatsCompliant((Boolean)value); + } + break; + } } @@ -415,6 +501,12 @@ public Object getFieldValue(_Fields field) { case CAT_NAME: return getCatName(); + case WRITE_ID: + return getWriteId(); + + case IS_STATS_COMPLIANT: + return isIsStatsCompliant(); + } throw new IllegalStateException(); } @@ -438,6 +530,10 @@ public boolean isSet(_Fields field) { return isSetPartitionList(); case CAT_NAME: return isSetCatName(); + case WRITE_ID: + return isSetWriteId(); + case IS_STATS_COMPLIANT: + return isSetIsStatsCompliant(); } throw new IllegalStateException(); } @@ -509,6 +605,24 @@ public boolean equals(PartitionSpec that) { return false; } + boolean this_present_writeId = true && this.isSetWriteId(); + boolean that_present_writeId = true && that.isSetWriteId(); + if (this_present_writeId || that_present_writeId) { + if (!(this_present_writeId && that_present_writeId)) + return false; + if (this.writeId != that.writeId) + return false; + } + + boolean this_present_isStatsCompliant = true && this.isSetIsStatsCompliant(); + boolean that_present_isStatsCompliant = true && that.isSetIsStatsCompliant(); + if (this_present_isStatsCompliant || that_present_isStatsCompliant) { + if (!(this_present_isStatsCompliant && that_present_isStatsCompliant)) + return false; + if (this.isStatsCompliant != that.isStatsCompliant) + return false; + } + return true; } @@ -546,6 +660,16 @@ public int hashCode() { if (present_catName) list.add(catName); + boolean present_writeId = true && (isSetWriteId()); + list.add(present_writeId); + if (present_writeId) + list.add(writeId); + + boolean present_isStatsCompliant = true && (isSetIsStatsCompliant()); + list.add(present_isStatsCompliant); + if (present_isStatsCompliant) + list.add(isStatsCompliant); + return list.hashCode(); } @@ -617,6 +741,26 @@ public int compareTo(PartitionSpec other) { return lastComparison; } } + lastComparison = Boolean.valueOf(isSetWriteId()).compareTo(other.isSetWriteId()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetWriteId()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.writeId, other.writeId); + if (lastComparison != 0) { + return lastComparison; + } + } + lastComparison = Boolean.valueOf(isSetIsStatsCompliant()).compareTo(other.isSetIsStatsCompliant()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetIsStatsCompliant()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.isStatsCompliant, other.isStatsCompliant); + if (lastComparison != 0) { + return lastComparison; + } + } return 0; } @@ -690,6 +834,18 @@ public String toString() { } first = false; } + if (isSetWriteId()) { + if (!first) sb.append(", "); + sb.append("writeId:"); + sb.append(this.writeId); + first = false; + } + if (isSetIsStatsCompliant()) { + if (!first) sb.append(", "); + sb.append("isStatsCompliant:"); + sb.append(this.isStatsCompliant); + first = false; + } sb.append(")"); return sb.toString(); } @@ -715,6 +871,8 @@ private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOExcept private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException { try { + // it doesn't seem like you should have to do this, but java serialization is wacky, and doesn't call the default constructor. + __isset_bitfield = 0; read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in))); } catch (org.apache.thrift.TException te) { throw new java.io.IOException(te); @@ -789,6 +947,22 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, PartitionSpec struc org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); } break; + case 7: // WRITE_ID + if (schemeField.type == org.apache.thrift.protocol.TType.I64) { + struct.writeId = iprot.readI64(); + struct.setWriteIdIsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + break; + case 8: // IS_STATS_COMPLIANT + if (schemeField.type == org.apache.thrift.protocol.TType.BOOL) { + struct.isStatsCompliant = iprot.readBool(); + struct.setIsStatsCompliantIsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + break; default: org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); } @@ -838,6 +1012,16 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, PartitionSpec stru oprot.writeFieldEnd(); } } + if (struct.isSetWriteId()) { + oprot.writeFieldBegin(WRITE_ID_FIELD_DESC); + oprot.writeI64(struct.writeId); + oprot.writeFieldEnd(); + } + if (struct.isSetIsStatsCompliant()) { + oprot.writeFieldBegin(IS_STATS_COMPLIANT_FIELD_DESC); + oprot.writeBool(struct.isStatsCompliant); + oprot.writeFieldEnd(); + } oprot.writeFieldStop(); oprot.writeStructEnd(); } @@ -874,7 +1058,13 @@ public void write(org.apache.thrift.protocol.TProtocol prot, PartitionSpec struc if (struct.isSetCatName()) { optionals.set(5); } - oprot.writeBitSet(optionals, 6); + if (struct.isSetWriteId()) { + optionals.set(6); + } + if (struct.isSetIsStatsCompliant()) { + optionals.set(7); + } + oprot.writeBitSet(optionals, 8); if (struct.isSetDbName()) { oprot.writeString(struct.dbName); } @@ -893,12 +1083,18 @@ public void write(org.apache.thrift.protocol.TProtocol prot, PartitionSpec struc if (struct.isSetCatName()) { oprot.writeString(struct.catName); } + if (struct.isSetWriteId()) { + oprot.writeI64(struct.writeId); + } + if (struct.isSetIsStatsCompliant()) { + oprot.writeBool(struct.isStatsCompliant); + } } @Override public void read(org.apache.thrift.protocol.TProtocol prot, PartitionSpec struct) throws org.apache.thrift.TException { TTupleProtocol iprot = (TTupleProtocol) prot; - BitSet incoming = iprot.readBitSet(6); + BitSet incoming = iprot.readBitSet(8); if (incoming.get(0)) { struct.dbName = iprot.readString(); struct.setDbNameIsSet(true); @@ -925,6 +1121,14 @@ public void read(org.apache.thrift.protocol.TProtocol prot, PartitionSpec struct struct.catName = iprot.readString(); struct.setCatNameIsSet(true); } + if (incoming.get(6)) { + struct.writeId = iprot.readI64(); + struct.setWriteIdIsSet(true); + } + if (incoming.get(7)) { + struct.isStatsCompliant = iprot.readBool(); + struct.setIsStatsCompliantIsSet(true); + } } } diff --git standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/PartitionsStatsRequest.java standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/PartitionsStatsRequest.java index 91cf567e74..a298b89925 100644 --- standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/PartitionsStatsRequest.java +++ standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/PartitionsStatsRequest.java @@ -43,6 +43,8 @@ private static final org.apache.thrift.protocol.TField COL_NAMES_FIELD_DESC = new org.apache.thrift.protocol.TField("colNames", org.apache.thrift.protocol.TType.LIST, (short)3); private static final org.apache.thrift.protocol.TField PART_NAMES_FIELD_DESC = new org.apache.thrift.protocol.TField("partNames", org.apache.thrift.protocol.TType.LIST, (short)4); private static final org.apache.thrift.protocol.TField CAT_NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("catName", org.apache.thrift.protocol.TType.STRING, (short)5); + private static final org.apache.thrift.protocol.TField TXN_ID_FIELD_DESC = new org.apache.thrift.protocol.TField("txnId", org.apache.thrift.protocol.TType.I64, (short)6); + private static final org.apache.thrift.protocol.TField VALID_WRITE_ID_LIST_FIELD_DESC = new org.apache.thrift.protocol.TField("validWriteIdList", org.apache.thrift.protocol.TType.STRING, (short)7); private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); static { @@ -55,6 +57,8 @@ private List colNames; // required private List partNames; // required private String catName; // optional + private long txnId; // optional + private String validWriteIdList; // optional /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ public enum _Fields implements org.apache.thrift.TFieldIdEnum { @@ -62,7 +66,9 @@ TBL_NAME((short)2, "tblName"), COL_NAMES((short)3, "colNames"), PART_NAMES((short)4, "partNames"), - CAT_NAME((short)5, "catName"); + CAT_NAME((short)5, "catName"), + TXN_ID((short)6, "txnId"), + VALID_WRITE_ID_LIST((short)7, "validWriteIdList"); private static final Map byName = new HashMap(); @@ -87,6 +93,10 @@ public static _Fields findByThriftId(int fieldId) { return PART_NAMES; case 5: // CAT_NAME return CAT_NAME; + case 6: // TXN_ID + return TXN_ID; + case 7: // VALID_WRITE_ID_LIST + return VALID_WRITE_ID_LIST; default: return null; } @@ -127,7 +137,9 @@ public String getFieldName() { } // isset id assignments - private static final _Fields optionals[] = {_Fields.CAT_NAME}; + private static final int __TXNID_ISSET_ID = 0; + private byte __isset_bitfield = 0; + private static final _Fields optionals[] = {_Fields.CAT_NAME,_Fields.TXN_ID,_Fields.VALID_WRITE_ID_LIST}; public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; static { Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); @@ -143,11 +155,17 @@ public String getFieldName() { new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING)))); tmpMap.put(_Fields.CAT_NAME, new org.apache.thrift.meta_data.FieldMetaData("catName", org.apache.thrift.TFieldRequirementType.OPTIONAL, new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING))); + tmpMap.put(_Fields.TXN_ID, new org.apache.thrift.meta_data.FieldMetaData("txnId", org.apache.thrift.TFieldRequirementType.OPTIONAL, + new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.I64))); + tmpMap.put(_Fields.VALID_WRITE_ID_LIST, new org.apache.thrift.meta_data.FieldMetaData("validWriteIdList", org.apache.thrift.TFieldRequirementType.OPTIONAL, + new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING))); metaDataMap = Collections.unmodifiableMap(tmpMap); org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(PartitionsStatsRequest.class, metaDataMap); } public PartitionsStatsRequest() { + this.txnId = -1L; + } public PartitionsStatsRequest( @@ -167,6 +185,7 @@ public PartitionsStatsRequest( * Performs a deep copy on other. */ public PartitionsStatsRequest(PartitionsStatsRequest other) { + __isset_bitfield = other.__isset_bitfield; if (other.isSetDbName()) { this.dbName = other.dbName; } @@ -184,6 +203,10 @@ public PartitionsStatsRequest(PartitionsStatsRequest other) { if (other.isSetCatName()) { this.catName = other.catName; } + this.txnId = other.txnId; + if (other.isSetValidWriteIdList()) { + this.validWriteIdList = other.validWriteIdList; + } } public PartitionsStatsRequest deepCopy() { @@ -197,6 +220,9 @@ public void clear() { this.colNames = null; this.partNames = null; this.catName = null; + this.txnId = -1L; + + this.validWriteIdList = null; } public String getDbName() { @@ -344,6 +370,51 @@ public void setCatNameIsSet(boolean value) { } } + public long getTxnId() { + return this.txnId; + } + + public void setTxnId(long txnId) { + this.txnId = txnId; + setTxnIdIsSet(true); + } + + public void unsetTxnId() { + __isset_bitfield = EncodingUtils.clearBit(__isset_bitfield, __TXNID_ISSET_ID); + } + + /** Returns true if field txnId is set (has been assigned a value) and false otherwise */ + public boolean isSetTxnId() { + return EncodingUtils.testBit(__isset_bitfield, __TXNID_ISSET_ID); + } + + public void setTxnIdIsSet(boolean value) { + __isset_bitfield = EncodingUtils.setBit(__isset_bitfield, __TXNID_ISSET_ID, value); + } + + public String getValidWriteIdList() { + return this.validWriteIdList; + } + + public void setValidWriteIdList(String validWriteIdList) { + this.validWriteIdList = validWriteIdList; + } + + public void unsetValidWriteIdList() { + this.validWriteIdList = null; + } + + /** Returns true if field validWriteIdList is set (has been assigned a value) and false otherwise */ + public boolean isSetValidWriteIdList() { + return this.validWriteIdList != null; + } + + public void setValidWriteIdListIsSet(boolean value) { + if (!value) { + this.validWriteIdList = null; + } + } + public void setFieldValue(_Fields field, Object value) { switch (field) { case DB_NAME: @@ -386,6 +457,22 @@ public void setFieldValue(_Fields field, Object value) { } break; + case TXN_ID: + if (value == null) { + unsetTxnId(); + } else { + setTxnId((Long)value); + } + break; + + case VALID_WRITE_ID_LIST: + if (value == null) { + unsetValidWriteIdList(); + } else { + setValidWriteIdList((String)value); + } + break; + } } @@ -406,6 +493,12 @@ public Object getFieldValue(_Fields field) { case CAT_NAME: return getCatName(); + case TXN_ID: + return getTxnId(); + + case VALID_WRITE_ID_LIST: + return getValidWriteIdList(); + } throw new IllegalStateException(); } @@ -427,6 +520,10 @@ public boolean isSet(_Fields field) { return isSetPartNames(); case CAT_NAME: return isSetCatName(); + case TXN_ID: + return isSetTxnId(); + case VALID_WRITE_ID_LIST: + return isSetValidWriteIdList(); } throw new IllegalStateException(); } @@ -489,6 +586,24 @@ public boolean equals(PartitionsStatsRequest that) { return false; } + boolean this_present_txnId = true && this.isSetTxnId(); + boolean that_present_txnId = true && that.isSetTxnId(); + if (this_present_txnId || that_present_txnId) { + if (!(this_present_txnId && that_present_txnId)) + return false; + if (this.txnId != that.txnId) + return false; + } + + boolean this_present_validWriteIdList = true && this.isSetValidWriteIdList(); + boolean that_present_validWriteIdList = true && that.isSetValidWriteIdList(); + if (this_present_validWriteIdList || that_present_validWriteIdList) { + if (!(this_present_validWriteIdList && that_present_validWriteIdList)) + return false; + if (!this.validWriteIdList.equals(that.validWriteIdList)) + return false; + } + return true; } @@ -521,6 +636,16 @@ public int hashCode() { if (present_catName) list.add(catName); + boolean present_txnId = true && (isSetTxnId()); + list.add(present_txnId); + if (present_txnId) + list.add(txnId); + + boolean present_validWriteIdList = true && (isSetValidWriteIdList()); + list.add(present_validWriteIdList); + if (present_validWriteIdList) + list.add(validWriteIdList); + return list.hashCode(); } @@ -582,6 +707,26 @@ public int compareTo(PartitionsStatsRequest other) { return lastComparison; } } + lastComparison = Boolean.valueOf(isSetTxnId()).compareTo(other.isSetTxnId()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetTxnId()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.txnId, other.txnId); + if (lastComparison != 0) { + return lastComparison; + } + } + lastComparison = Boolean.valueOf(isSetValidWriteIdList()).compareTo(other.isSetValidWriteIdList()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetValidWriteIdList()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.validWriteIdList, other.validWriteIdList); + if (lastComparison != 0) { + return lastComparison; + } + } return 0; } @@ -643,6 +788,22 @@ public String toString() { } first = false; } + if (isSetTxnId()) { + if (!first) sb.append(", "); + sb.append("txnId:"); + sb.append(this.txnId); + first = false; + } + if (isSetValidWriteIdList()) { + if (!first) sb.append(", "); + sb.append("validWriteIdList:"); + if (this.validWriteIdList == null) { + sb.append("null"); + } else { + sb.append(this.validWriteIdList); + } + first = false; + } sb.append(")"); return sb.toString(); } @@ -678,6 +839,8 @@ private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOExcept private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException { try { + // it doesn't seem like you should have to do this, but java serialization is wacky, and doesn't call the default constructor. + __isset_bitfield = 0; read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in))); } catch (org.apache.thrift.TException te) { throw new java.io.IOException(te); @@ -762,6 +925,22 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, PartitionsStatsRequ org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); } break; + case 6: // TXN_ID + if (schemeField.type == org.apache.thrift.protocol.TType.I64) { + struct.txnId = iprot.readI64(); + struct.setTxnIdIsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + break; + case 7: // VALID_WRITE_ID_LIST + if (schemeField.type == org.apache.thrift.protocol.TType.STRING) { + struct.validWriteIdList = iprot.readString(); + struct.setValidWriteIdListIsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + break; default: org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); } @@ -816,6 +995,18 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, PartitionsStatsReq oprot.writeFieldEnd(); } } + if (struct.isSetTxnId()) { + oprot.writeFieldBegin(TXN_ID_FIELD_DESC); + oprot.writeI64(struct.txnId); + oprot.writeFieldEnd(); + } + if (struct.validWriteIdList != null) { + if (struct.isSetValidWriteIdList()) { + oprot.writeFieldBegin(VALID_WRITE_ID_LIST_FIELD_DESC); + oprot.writeString(struct.validWriteIdList); + oprot.writeFieldEnd(); + } + } oprot.writeFieldStop(); oprot.writeStructEnd(); } @@ -853,10 +1044,22 @@ public void write(org.apache.thrift.protocol.TProtocol prot, PartitionsStatsRequ if (struct.isSetCatName()) { optionals.set(0); } - oprot.writeBitSet(optionals, 1); + if (struct.isSetTxnId()) { + optionals.set(1); + } + if (struct.isSetValidWriteIdList()) { + optionals.set(2); + } + oprot.writeBitSet(optionals, 3); if (struct.isSetCatName()) { oprot.writeString(struct.catName); } + if (struct.isSetTxnId()) { + oprot.writeI64(struct.txnId); + } + if (struct.isSetValidWriteIdList()) { + oprot.writeString(struct.validWriteIdList); + } } @Override @@ -888,11 +1091,19 @@ public void read(org.apache.thrift.protocol.TProtocol prot, PartitionsStatsReque } } struct.setPartNamesIsSet(true); - BitSet incoming = iprot.readBitSet(1); + BitSet incoming = iprot.readBitSet(3); if (incoming.get(0)) { struct.catName = iprot.readString(); struct.setCatNameIsSet(true); } + if (incoming.get(1)) { + struct.txnId = iprot.readI64(); + struct.setTxnIdIsSet(true); + } + if (incoming.get(2)) { + struct.validWriteIdList = iprot.readString(); + struct.setValidWriteIdListIsSet(true); + } } } diff --git standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/PartitionsStatsResult.java standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/PartitionsStatsResult.java index 4caec8fa7e..fe6130c37f 100644 --- standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/PartitionsStatsResult.java +++ standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/PartitionsStatsResult.java @@ -39,6 +39,7 @@ private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("PartitionsStatsResult"); private static final org.apache.thrift.protocol.TField PART_STATS_FIELD_DESC = new org.apache.thrift.protocol.TField("partStats", org.apache.thrift.protocol.TType.MAP, (short)1); + private static final org.apache.thrift.protocol.TField IS_STATS_COMPLIANT_FIELD_DESC = new org.apache.thrift.protocol.TField("isStatsCompliant", org.apache.thrift.protocol.TType.BOOL, (short)2); private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); static { @@ -47,10 +48,12 @@ } private Map> partStats; // required + private boolean isStatsCompliant; // optional /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ public enum _Fields implements org.apache.thrift.TFieldIdEnum { - PART_STATS((short)1, "partStats"); + PART_STATS((short)1, "partStats"), + IS_STATS_COMPLIANT((short)2, "isStatsCompliant"); private static final Map byName = new HashMap(); @@ -67,6 +70,8 @@ public static _Fields findByThriftId(int fieldId) { switch(fieldId) { case 1: // PART_STATS return PART_STATS; + case 2: // IS_STATS_COMPLIANT + return IS_STATS_COMPLIANT; default: return null; } @@ -107,6 +112,9 @@ public String getFieldName() { } // isset id assignments + private static final int __ISSTATSCOMPLIANT_ISSET_ID = 0; + private byte __isset_bitfield = 0; + private static final _Fields optionals[] = {_Fields.IS_STATS_COMPLIANT}; public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; static { Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); @@ -115,6 +123,8 @@ public String getFieldName() { new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING), new org.apache.thrift.meta_data.ListMetaData(org.apache.thrift.protocol.TType.LIST, new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, ColumnStatisticsObj.class))))); + tmpMap.put(_Fields.IS_STATS_COMPLIANT, new org.apache.thrift.meta_data.FieldMetaData("isStatsCompliant", org.apache.thrift.TFieldRequirementType.OPTIONAL, + new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.BOOL))); metaDataMap = Collections.unmodifiableMap(tmpMap); org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(PartitionsStatsResult.class, metaDataMap); } @@ -133,6 +143,7 @@ public PartitionsStatsResult( * Performs a deep copy on other. */ public PartitionsStatsResult(PartitionsStatsResult other) { + __isset_bitfield = other.__isset_bitfield; if (other.isSetPartStats()) { Map> __this__partStats = new HashMap>(other.partStats.size()); for (Map.Entry> other_element : other.partStats.entrySet()) { @@ -151,6 +162,7 @@ public PartitionsStatsResult(PartitionsStatsResult other) { } this.partStats = __this__partStats; } + this.isStatsCompliant = other.isStatsCompliant; } public PartitionsStatsResult deepCopy() { @@ -160,6 +172,8 @@ public PartitionsStatsResult deepCopy() { @Override public void clear() { this.partStats = null; + setIsStatsCompliantIsSet(false); + this.isStatsCompliant = false; } public int getPartStatsSize() { @@ -196,6 +210,28 @@ public void setPartStatsIsSet(boolean value) { } } + public boolean isIsStatsCompliant() { + return this.isStatsCompliant; + } + + public void setIsStatsCompliant(boolean isStatsCompliant) { + this.isStatsCompliant = isStatsCompliant; + setIsStatsCompliantIsSet(true); + } + + public void unsetIsStatsCompliant() { + __isset_bitfield = EncodingUtils.clearBit(__isset_bitfield, __ISSTATSCOMPLIANT_ISSET_ID); + } + + /** Returns true if field isStatsCompliant is set (has been assigned a value) and false otherwise */ + public boolean isSetIsStatsCompliant() { + return EncodingUtils.testBit(__isset_bitfield, __ISSTATSCOMPLIANT_ISSET_ID); + } + + public void setIsStatsCompliantIsSet(boolean value) { + __isset_bitfield = EncodingUtils.setBit(__isset_bitfield, __ISSTATSCOMPLIANT_ISSET_ID, value); + } + public void setFieldValue(_Fields field, Object value) { switch (field) { case PART_STATS: @@ -206,6 +242,14 @@ public void setFieldValue(_Fields field, Object value) { } break; + case IS_STATS_COMPLIANT: + if (value == null) { + unsetIsStatsCompliant(); + } else { + setIsStatsCompliant((Boolean)value); + } + break; + } } @@ -214,6 +258,9 @@ public Object getFieldValue(_Fields field) { case PART_STATS: return getPartStats(); + case IS_STATS_COMPLIANT: + return isIsStatsCompliant(); + } throw new IllegalStateException(); } @@ -227,6 +274,8 @@ public boolean isSet(_Fields field) { switch (field) { case PART_STATS: return isSetPartStats(); + case IS_STATS_COMPLIANT: + return isSetIsStatsCompliant(); } throw new IllegalStateException(); } @@ -253,6 +302,15 @@ public boolean equals(PartitionsStatsResult that) { return false; } + boolean this_present_isStatsCompliant = true && this.isSetIsStatsCompliant(); + boolean that_present_isStatsCompliant = true && that.isSetIsStatsCompliant(); + if (this_present_isStatsCompliant || that_present_isStatsCompliant) { + if (!(this_present_isStatsCompliant && that_present_isStatsCompliant)) + return false; + if (this.isStatsCompliant != that.isStatsCompliant) + return false; + } + return true; } @@ -265,6 +323,11 @@ public int hashCode() { if (present_partStats) list.add(partStats); + boolean present_isStatsCompliant = true && (isSetIsStatsCompliant()); + list.add(present_isStatsCompliant); + if (present_isStatsCompliant) + list.add(isStatsCompliant); + return list.hashCode(); } @@ -286,6 +349,16 @@ public int compareTo(PartitionsStatsResult other) { return lastComparison; } } + lastComparison = Boolean.valueOf(isSetIsStatsCompliant()).compareTo(other.isSetIsStatsCompliant()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetIsStatsCompliant()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.isStatsCompliant, other.isStatsCompliant); + if (lastComparison != 0) { + return lastComparison; + } + } return 0; } @@ -313,6 +386,12 @@ public String toString() { sb.append(this.partStats); } first = false; + if (isSetIsStatsCompliant()) { + if (!first) sb.append(", "); + sb.append("isStatsCompliant:"); + sb.append(this.isStatsCompliant); + first = false; + } sb.append(")"); return sb.toString(); } @@ -336,6 +415,8 @@ private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOExcept private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException { try { + // it doesn't seem like you should have to do this, but java serialization is wacky, and doesn't call the default constructor. + __isset_bitfield = 0; read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in))); } catch (org.apache.thrift.TException te) { throw new java.io.IOException(te); @@ -391,6 +472,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, PartitionsStatsResu org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); } break; + case 2: // IS_STATS_COMPLIANT + if (schemeField.type == org.apache.thrift.protocol.TType.BOOL) { + struct.isStatsCompliant = iprot.readBool(); + struct.setIsStatsCompliantIsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + break; default: org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); } @@ -424,6 +513,11 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, PartitionsStatsRes } oprot.writeFieldEnd(); } + if (struct.isSetIsStatsCompliant()) { + oprot.writeFieldBegin(IS_STATS_COMPLIANT_FIELD_DESC); + oprot.writeBool(struct.isStatsCompliant); + oprot.writeFieldEnd(); + } oprot.writeFieldStop(); oprot.writeStructEnd(); } @@ -455,6 +549,14 @@ public void write(org.apache.thrift.protocol.TProtocol prot, PartitionsStatsResu } } } + BitSet optionals = new BitSet(); + if (struct.isSetIsStatsCompliant()) { + optionals.set(0); + } + oprot.writeBitSet(optionals, 1); + if (struct.isSetIsStatsCompliant()) { + oprot.writeBool(struct.isStatsCompliant); + } } @Override @@ -483,6 +585,11 @@ public void read(org.apache.thrift.protocol.TProtocol prot, PartitionsStatsResul } } struct.setPartStatsIsSet(true); + BitSet incoming = iprot.readBitSet(1); + if (incoming.get(0)) { + struct.isStatsCompliant = iprot.readBool(); + struct.setIsStatsCompliantIsSet(true); + } } } diff --git standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/SetPartitionsStatsRequest.java standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/SetPartitionsStatsRequest.java index a0ae84e760..2053e3e244 100644 --- standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/SetPartitionsStatsRequest.java +++ standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/SetPartitionsStatsRequest.java @@ -40,6 +40,9 @@ private static final org.apache.thrift.protocol.TField COL_STATS_FIELD_DESC = new org.apache.thrift.protocol.TField("colStats", org.apache.thrift.protocol.TType.LIST, (short)1); private static final org.apache.thrift.protocol.TField NEED_MERGE_FIELD_DESC = new org.apache.thrift.protocol.TField("needMerge", org.apache.thrift.protocol.TType.BOOL, (short)2); + private static final org.apache.thrift.protocol.TField TXN_ID_FIELD_DESC = new org.apache.thrift.protocol.TField("txnId", org.apache.thrift.protocol.TType.I64, (short)3); + private static final org.apache.thrift.protocol.TField WRITE_ID_FIELD_DESC = new org.apache.thrift.protocol.TField("writeId", org.apache.thrift.protocol.TType.I64, (short)4); + private static final org.apache.thrift.protocol.TField VALID_WRITE_ID_LIST_FIELD_DESC = new org.apache.thrift.protocol.TField("validWriteIdList", org.apache.thrift.protocol.TType.STRING, (short)5); private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); static { @@ -49,11 +52,17 @@ private List colStats; // required private boolean needMerge; // optional + private long txnId; // optional + private long writeId; // optional + private String validWriteIdList; // optional /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ public enum _Fields implements org.apache.thrift.TFieldIdEnum { COL_STATS((short)1, "colStats"), - NEED_MERGE((short)2, "needMerge"); + NEED_MERGE((short)2, "needMerge"), + TXN_ID((short)3, "txnId"), + WRITE_ID((short)4, "writeId"), + VALID_WRITE_ID_LIST((short)5, "validWriteIdList"); private static final Map byName = new HashMap(); @@ -72,6 +81,12 @@ public static _Fields findByThriftId(int fieldId) { return COL_STATS; case 2: // NEED_MERGE return NEED_MERGE; + case 3: // TXN_ID + return TXN_ID; + case 4: // WRITE_ID + return WRITE_ID; + case 5: // VALID_WRITE_ID_LIST + return VALID_WRITE_ID_LIST; default: return null; } @@ -113,8 +128,10 @@ public String getFieldName() { // isset id assignments private static final int __NEEDMERGE_ISSET_ID = 0; + private static final int __TXNID_ISSET_ID = 1; + private static final int __WRITEID_ISSET_ID = 2; private byte __isset_bitfield = 0; - private static final _Fields optionals[] = {_Fields.NEED_MERGE}; + private static final _Fields optionals[] = {_Fields.NEED_MERGE,_Fields.TXN_ID,_Fields.WRITE_ID,_Fields.VALID_WRITE_ID_LIST}; public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; static { Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); @@ -123,11 +140,21 @@ public String getFieldName() { new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, ColumnStatistics.class)))); tmpMap.put(_Fields.NEED_MERGE, new org.apache.thrift.meta_data.FieldMetaData("needMerge", org.apache.thrift.TFieldRequirementType.OPTIONAL, new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.BOOL))); + tmpMap.put(_Fields.TXN_ID, new org.apache.thrift.meta_data.FieldMetaData("txnId", org.apache.thrift.TFieldRequirementType.OPTIONAL, + new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.I64))); + tmpMap.put(_Fields.WRITE_ID, new org.apache.thrift.meta_data.FieldMetaData("writeId", org.apache.thrift.TFieldRequirementType.OPTIONAL, + new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.I64))); + tmpMap.put(_Fields.VALID_WRITE_ID_LIST, new org.apache.thrift.meta_data.FieldMetaData("validWriteIdList", org.apache.thrift.TFieldRequirementType.OPTIONAL, + new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING))); metaDataMap = Collections.unmodifiableMap(tmpMap); org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(SetPartitionsStatsRequest.class, metaDataMap); } public SetPartitionsStatsRequest() { + this.txnId = -1L; + + this.writeId = -1L; + } public SetPartitionsStatsRequest( @@ -150,6 +177,11 @@ public SetPartitionsStatsRequest(SetPartitionsStatsRequest other) { this.colStats = __this__colStats; } this.needMerge = other.needMerge; + this.txnId = other.txnId; + this.writeId = other.writeId; + if (other.isSetValidWriteIdList()) { + this.validWriteIdList = other.validWriteIdList; + } } public SetPartitionsStatsRequest deepCopy() { @@ -161,6 +193,11 @@ public void clear() { this.colStats = null; setNeedMergeIsSet(false); this.needMerge = false; + this.txnId = -1L; + + this.writeId = -1L; + + this.validWriteIdList = null; } public int getColStatsSize() { @@ -223,6 +260,73 @@ public void setNeedMergeIsSet(boolean value) { __isset_bitfield = EncodingUtils.setBit(__isset_bitfield, __NEEDMERGE_ISSET_ID, value); } + public long getTxnId() { + return this.txnId; + } + + public void setTxnId(long txnId) { + this.txnId = txnId; + setTxnIdIsSet(true); + } + + public void unsetTxnId() { + __isset_bitfield = EncodingUtils.clearBit(__isset_bitfield, __TXNID_ISSET_ID); + } + + /** Returns true if field txnId is set (has been assigned a value) and false otherwise */ + public boolean isSetTxnId() { + return EncodingUtils.testBit(__isset_bitfield, __TXNID_ISSET_ID); + } + + public void setTxnIdIsSet(boolean value) { + __isset_bitfield = EncodingUtils.setBit(__isset_bitfield, __TXNID_ISSET_ID, value); + } + + public long getWriteId() { + return this.writeId; + } + + public void setWriteId(long writeId) { + this.writeId = writeId; + setWriteIdIsSet(true); + } + + public void unsetWriteId() { + __isset_bitfield = EncodingUtils.clearBit(__isset_bitfield, __WRITEID_ISSET_ID); + } + + /** Returns true if field writeId is set (has been assigned a value) and false otherwise */ + public boolean isSetWriteId() { + return EncodingUtils.testBit(__isset_bitfield, __WRITEID_ISSET_ID); + } + + public void setWriteIdIsSet(boolean value) { + __isset_bitfield = EncodingUtils.setBit(__isset_bitfield, __WRITEID_ISSET_ID, value); + } + + public String getValidWriteIdList() { + return this.validWriteIdList; + } + + public void setValidWriteIdList(String validWriteIdList) { + this.validWriteIdList = validWriteIdList; + } + + public void unsetValidWriteIdList() { + this.validWriteIdList = null; + } + + /** Returns true if field validWriteIdList is set (has been assigned a value) and false otherwise */ + public boolean isSetValidWriteIdList() { + return this.validWriteIdList != null; + } + + public void setValidWriteIdListIsSet(boolean value) { + if (!value) { + this.validWriteIdList = null; + } + } + public void setFieldValue(_Fields field, Object value) { switch (field) { case COL_STATS: @@ -241,6 +345,30 @@ public void setFieldValue(_Fields field, Object value) { } break; + case TXN_ID: + if (value == null) { + unsetTxnId(); + } else { + setTxnId((Long)value); + } + break; + + case WRITE_ID: + if (value == null) { + unsetWriteId(); + } else { + setWriteId((Long)value); + } + break; + + case VALID_WRITE_ID_LIST: + if (value == null) { + unsetValidWriteIdList(); + } else { + setValidWriteIdList((String)value); + } + break; + } } @@ -252,6 +380,15 @@ public Object getFieldValue(_Fields field) { case NEED_MERGE: return isNeedMerge(); + case TXN_ID: + return getTxnId(); + + case WRITE_ID: + return getWriteId(); + + case VALID_WRITE_ID_LIST: + return getValidWriteIdList(); + } throw new IllegalStateException(); } @@ -267,6 +404,12 @@ public boolean isSet(_Fields field) { return isSetColStats(); case NEED_MERGE: return isSetNeedMerge(); + case TXN_ID: + return isSetTxnId(); + case WRITE_ID: + return isSetWriteId(); + case VALID_WRITE_ID_LIST: + return isSetValidWriteIdList(); } throw new IllegalStateException(); } @@ -302,6 +445,33 @@ public boolean equals(SetPartitionsStatsRequest that) { return false; } + boolean this_present_txnId = true && this.isSetTxnId(); + boolean that_present_txnId = true && that.isSetTxnId(); + if (this_present_txnId || that_present_txnId) { + if (!(this_present_txnId && that_present_txnId)) + return false; + if (this.txnId != that.txnId) + return false; + } + + boolean this_present_writeId = true && this.isSetWriteId(); + boolean that_present_writeId = true && that.isSetWriteId(); + if (this_present_writeId || that_present_writeId) { + if (!(this_present_writeId && that_present_writeId)) + return false; + if (this.writeId != that.writeId) + return false; + } + + boolean this_present_validWriteIdList = true && this.isSetValidWriteIdList(); + boolean that_present_validWriteIdList = true && that.isSetValidWriteIdList(); + if (this_present_validWriteIdList || that_present_validWriteIdList) { + if (!(this_present_validWriteIdList && that_present_validWriteIdList)) + return false; + if (!this.validWriteIdList.equals(that.validWriteIdList)) + return false; + } + return true; } @@ -319,6 +489,21 @@ public int hashCode() { if (present_needMerge) list.add(needMerge); + boolean present_txnId = true && (isSetTxnId()); + list.add(present_txnId); + if (present_txnId) + list.add(txnId); + + boolean present_writeId = true && (isSetWriteId()); + list.add(present_writeId); + if (present_writeId) + list.add(writeId); + + boolean present_validWriteIdList = true && (isSetValidWriteIdList()); + list.add(present_validWriteIdList); + if (present_validWriteIdList) + list.add(validWriteIdList); + return list.hashCode(); } @@ -350,6 +535,36 @@ public int compareTo(SetPartitionsStatsRequest other) { return lastComparison; } } + lastComparison = Boolean.valueOf(isSetTxnId()).compareTo(other.isSetTxnId()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetTxnId()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.txnId, other.txnId); + if (lastComparison != 0) { + return lastComparison; + } + } + lastComparison = Boolean.valueOf(isSetWriteId()).compareTo(other.isSetWriteId()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetWriteId()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.writeId, other.writeId); + if (lastComparison != 0) { + return lastComparison; + } + } + lastComparison = Boolean.valueOf(isSetValidWriteIdList()).compareTo(other.isSetValidWriteIdList()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetValidWriteIdList()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.validWriteIdList, other.validWriteIdList); + if (lastComparison != 0) { + return lastComparison; + } + } return 0; } @@ -383,6 +598,28 @@ public String toString() { sb.append(this.needMerge); first = false; } + if (isSetTxnId()) { + if (!first) sb.append(", "); + sb.append("txnId:"); + sb.append(this.txnId); + first = false; + } + if (isSetWriteId()) { + if (!first) sb.append(", "); + sb.append("writeId:"); + sb.append(this.writeId); + first = false; + } + if (isSetValidWriteIdList()) { + if (!first) sb.append(", "); + sb.append("validWriteIdList:"); + if (this.validWriteIdList == null) { + sb.append("null"); + } else { + sb.append(this.validWriteIdList); + } + first = false; + } sb.append(")"); return sb.toString(); } @@ -459,6 +696,30 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, SetPartitionsStatsR org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); } break; + case 3: // TXN_ID + if (schemeField.type == org.apache.thrift.protocol.TType.I64) { + struct.txnId = iprot.readI64(); + struct.setTxnIdIsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + break; + case 4: // WRITE_ID + if (schemeField.type == org.apache.thrift.protocol.TType.I64) { + struct.writeId = iprot.readI64(); + struct.setWriteIdIsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + break; + case 5: // VALID_WRITE_ID_LIST + if (schemeField.type == org.apache.thrift.protocol.TType.STRING) { + struct.validWriteIdList = iprot.readString(); + struct.setValidWriteIdListIsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + break; default: org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); } @@ -489,6 +750,23 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, SetPartitionsStats oprot.writeBool(struct.needMerge); oprot.writeFieldEnd(); } + if (struct.isSetTxnId()) { + oprot.writeFieldBegin(TXN_ID_FIELD_DESC); + oprot.writeI64(struct.txnId); + oprot.writeFieldEnd(); + } + if (struct.isSetWriteId()) { + oprot.writeFieldBegin(WRITE_ID_FIELD_DESC); + oprot.writeI64(struct.writeId); + oprot.writeFieldEnd(); + } + if (struct.validWriteIdList != null) { + if (struct.isSetValidWriteIdList()) { + oprot.writeFieldBegin(VALID_WRITE_ID_LIST_FIELD_DESC); + oprot.writeString(struct.validWriteIdList); + oprot.writeFieldEnd(); + } + } oprot.writeFieldStop(); oprot.writeStructEnd(); } @@ -517,10 +795,28 @@ public void write(org.apache.thrift.protocol.TProtocol prot, SetPartitionsStatsR if (struct.isSetNeedMerge()) { optionals.set(0); } - oprot.writeBitSet(optionals, 1); + if (struct.isSetTxnId()) { + optionals.set(1); + } + if (struct.isSetWriteId()) { + optionals.set(2); + } + if (struct.isSetValidWriteIdList()) { + optionals.set(3); + } + oprot.writeBitSet(optionals, 4); if (struct.isSetNeedMerge()) { oprot.writeBool(struct.needMerge); } + if (struct.isSetTxnId()) { + oprot.writeI64(struct.txnId); + } + if (struct.isSetWriteId()) { + oprot.writeI64(struct.writeId); + } + if (struct.isSetValidWriteIdList()) { + oprot.writeString(struct.validWriteIdList); + } } @Override @@ -538,11 +834,23 @@ public void read(org.apache.thrift.protocol.TProtocol prot, SetPartitionsStatsRe } } struct.setColStatsIsSet(true); - BitSet incoming = iprot.readBitSet(1); + BitSet incoming = iprot.readBitSet(4); if (incoming.get(0)) { struct.needMerge = iprot.readBool(); struct.setNeedMergeIsSet(true); } + if (incoming.get(1)) { + struct.txnId = iprot.readI64(); + struct.setTxnIdIsSet(true); + } + if (incoming.get(2)) { + struct.writeId = iprot.readI64(); + struct.setWriteIdIsSet(true); + } + if (incoming.get(3)) { + struct.validWriteIdList = iprot.readString(); + struct.setValidWriteIdListIsSet(true); + } } } diff --git standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/Table.java standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/Table.java index 38d4f64f64..f9d48c8945 100644 --- standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/Table.java +++ standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/Table.java @@ -56,6 +56,8 @@ private static final org.apache.thrift.protocol.TField CREATION_METADATA_FIELD_DESC = new org.apache.thrift.protocol.TField("creationMetadata", org.apache.thrift.protocol.TType.STRUCT, (short)16); private static final org.apache.thrift.protocol.TField CAT_NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("catName", org.apache.thrift.protocol.TType.STRING, (short)17); private static final org.apache.thrift.protocol.TField OWNER_TYPE_FIELD_DESC = new org.apache.thrift.protocol.TField("ownerType", org.apache.thrift.protocol.TType.I32, (short)18); + private static final org.apache.thrift.protocol.TField WRITE_ID_FIELD_DESC = new org.apache.thrift.protocol.TField("writeId", org.apache.thrift.protocol.TType.I64, (short)19); + private static final org.apache.thrift.protocol.TField IS_STATS_COMPLIANT_FIELD_DESC = new org.apache.thrift.protocol.TField("isStatsCompliant", org.apache.thrift.protocol.TType.BOOL, (short)20); private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); static { @@ -81,6 +83,8 @@ private CreationMetadata creationMetadata; // optional private String catName; // optional private PrincipalType ownerType; // optional + private long writeId; // optional + private boolean isStatsCompliant; // optional /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ public enum _Fields implements org.apache.thrift.TFieldIdEnum { @@ -105,7 +109,9 @@ * * @see PrincipalType */ - OWNER_TYPE((short)18, "ownerType"); + OWNER_TYPE((short)18, "ownerType"), + WRITE_ID((short)19, "writeId"), + IS_STATS_COMPLIANT((short)20, "isStatsCompliant"); private static final Map byName = new HashMap(); @@ -156,6 +162,10 @@ public static _Fields findByThriftId(int fieldId) { return CAT_NAME; case 18: // OWNER_TYPE return OWNER_TYPE; + case 19: // WRITE_ID + return WRITE_ID; + case 20: // IS_STATS_COMPLIANT + return IS_STATS_COMPLIANT; default: return null; } @@ -201,8 +211,10 @@ public String getFieldName() { private static final int __RETENTION_ISSET_ID = 2; private static final int __TEMPORARY_ISSET_ID = 3; private static final int __REWRITEENABLED_ISSET_ID = 4; + private static final int __WRITEID_ISSET_ID = 5; + private static final int __ISSTATSCOMPLIANT_ISSET_ID = 6; private byte __isset_bitfield = 0; - private static final _Fields optionals[] = {_Fields.PRIVILEGES,_Fields.TEMPORARY,_Fields.REWRITE_ENABLED,_Fields.CREATION_METADATA,_Fields.CAT_NAME,_Fields.OWNER_TYPE}; + private static final _Fields optionals[] = {_Fields.PRIVILEGES,_Fields.TEMPORARY,_Fields.REWRITE_ENABLED,_Fields.CREATION_METADATA,_Fields.CAT_NAME,_Fields.OWNER_TYPE,_Fields.WRITE_ID,_Fields.IS_STATS_COMPLIANT}; public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; static { Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); @@ -245,6 +257,10 @@ public String getFieldName() { new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING))); tmpMap.put(_Fields.OWNER_TYPE, new org.apache.thrift.meta_data.FieldMetaData("ownerType", org.apache.thrift.TFieldRequirementType.OPTIONAL, new org.apache.thrift.meta_data.EnumMetaData(org.apache.thrift.protocol.TType.ENUM, PrincipalType.class))); + tmpMap.put(_Fields.WRITE_ID, new org.apache.thrift.meta_data.FieldMetaData("writeId", org.apache.thrift.TFieldRequirementType.OPTIONAL, + new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.I64))); + tmpMap.put(_Fields.IS_STATS_COMPLIANT, new org.apache.thrift.meta_data.FieldMetaData("isStatsCompliant", org.apache.thrift.TFieldRequirementType.OPTIONAL, + new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.BOOL))); metaDataMap = Collections.unmodifiableMap(tmpMap); org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(Table.class, metaDataMap); } @@ -254,6 +270,8 @@ public Table() { this.ownerType = org.apache.hadoop.hive.metastore.api.PrincipalType.USER; + this.writeId = -1L; + } public Table( @@ -342,6 +360,8 @@ public Table(Table other) { if (other.isSetOwnerType()) { this.ownerType = other.ownerType; } + this.writeId = other.writeId; + this.isStatsCompliant = other.isStatsCompliant; } public Table deepCopy() { @@ -374,6 +394,10 @@ public void clear() { this.catName = null; this.ownerType = org.apache.hadoop.hive.metastore.api.PrincipalType.USER; + this.writeId = -1L; + + setIsStatsCompliantIsSet(false); + this.isStatsCompliant = false; } public String getTableName() { @@ -819,6 +843,50 @@ public void setOwnerTypeIsSet(boolean value) { } } + public long getWriteId() { + return this.writeId; + } + + public void setWriteId(long writeId) { + this.writeId = writeId; + setWriteIdIsSet(true); + } + + public void unsetWriteId() { + __isset_bitfield = EncodingUtils.clearBit(__isset_bitfield, __WRITEID_ISSET_ID); + } + + /** Returns true if field writeId is set (has been assigned a value) and false otherwise */ + public boolean isSetWriteId() { + return EncodingUtils.testBit(__isset_bitfield, __WRITEID_ISSET_ID); + } + + public void setWriteIdIsSet(boolean value) { + __isset_bitfield = EncodingUtils.setBit(__isset_bitfield, __WRITEID_ISSET_ID, value); + } + + public boolean isIsStatsCompliant() { + return this.isStatsCompliant; + } + + public void setIsStatsCompliant(boolean isStatsCompliant) { + this.isStatsCompliant = isStatsCompliant; + setIsStatsCompliantIsSet(true); + } + + public void unsetIsStatsCompliant() { + __isset_bitfield = EncodingUtils.clearBit(__isset_bitfield, __ISSTATSCOMPLIANT_ISSET_ID); + } + + /** Returns true if field isStatsCompliant is set (has been assigned a value) and false otherwise */ + public boolean isSetIsStatsCompliant() { + return EncodingUtils.testBit(__isset_bitfield, __ISSTATSCOMPLIANT_ISSET_ID); + } + + public void setIsStatsCompliantIsSet(boolean value) { + __isset_bitfield = EncodingUtils.setBit(__isset_bitfield, __ISSTATSCOMPLIANT_ISSET_ID, value); + } + public void setFieldValue(_Fields field, Object value) { switch (field) { case TABLE_NAME: @@ -965,6 +1033,22 @@ public void setFieldValue(_Fields field, Object value) { } break; + case WRITE_ID: + if (value == null) { + unsetWriteId(); + } else { + setWriteId((Long)value); + } + break; + + case IS_STATS_COMPLIANT: + if (value == null) { + unsetIsStatsCompliant(); + } else { + setIsStatsCompliant((Boolean)value); + } + break; + } } @@ -1024,6 +1108,12 @@ public Object getFieldValue(_Fields field) { case OWNER_TYPE: return getOwnerType(); + case WRITE_ID: + return getWriteId(); + + case IS_STATS_COMPLIANT: + return isIsStatsCompliant(); + } throw new IllegalStateException(); } @@ -1071,6 +1161,10 @@ public boolean isSet(_Fields field) { return isSetCatName(); case OWNER_TYPE: return isSetOwnerType(); + case WRITE_ID: + return isSetWriteId(); + case IS_STATS_COMPLIANT: + return isSetIsStatsCompliant(); } throw new IllegalStateException(); } @@ -1250,6 +1344,24 @@ public boolean equals(Table that) { return false; } + boolean this_present_writeId = true && this.isSetWriteId(); + boolean that_present_writeId = true && that.isSetWriteId(); + if (this_present_writeId || that_present_writeId) { + if (!(this_present_writeId && that_present_writeId)) + return false; + if (this.writeId != that.writeId) + return false; + } + + boolean this_present_isStatsCompliant = true && this.isSetIsStatsCompliant(); + boolean that_present_isStatsCompliant = true && that.isSetIsStatsCompliant(); + if (this_present_isStatsCompliant || that_present_isStatsCompliant) { + if (!(this_present_isStatsCompliant && that_present_isStatsCompliant)) + return false; + if (this.isStatsCompliant != that.isStatsCompliant) + return false; + } + return true; } @@ -1347,6 +1459,16 @@ public int hashCode() { if (present_ownerType) list.add(ownerType.getValue()); + boolean present_writeId = true && (isSetWriteId()); + list.add(present_writeId); + if (present_writeId) + list.add(writeId); + + boolean present_isStatsCompliant = true && (isSetIsStatsCompliant()); + list.add(present_isStatsCompliant); + if (present_isStatsCompliant) + list.add(isStatsCompliant); + return list.hashCode(); } @@ -1538,6 +1660,26 @@ public int compareTo(Table other) { return lastComparison; } } + lastComparison = Boolean.valueOf(isSetWriteId()).compareTo(other.isSetWriteId()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetWriteId()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.writeId, other.writeId); + if (lastComparison != 0) { + return lastComparison; + } + } + lastComparison = Boolean.valueOf(isSetIsStatsCompliant()).compareTo(other.isSetIsStatsCompliant()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetIsStatsCompliant()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.isStatsCompliant, other.isStatsCompliant); + if (lastComparison != 0) { + return lastComparison; + } + } return 0; } @@ -1693,6 +1835,18 @@ public String toString() { } first = false; } + if (isSetWriteId()) { + if (!first) sb.append(", "); + sb.append("writeId:"); + sb.append(this.writeId); + first = false; + } + if (isSetIsStatsCompliant()) { + if (!first) sb.append(", "); + sb.append("isStatsCompliant:"); + sb.append(this.isStatsCompliant); + first = false; + } sb.append(")"); return sb.toString(); } @@ -1914,6 +2068,22 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, Table struct) throw org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); } break; + case 19: // WRITE_ID + if (schemeField.type == org.apache.thrift.protocol.TType.I64) { + struct.writeId = iprot.readI64(); + struct.setWriteIdIsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + break; + case 20: // IS_STATS_COMPLIANT + if (schemeField.type == org.apache.thrift.protocol.TType.BOOL) { + struct.isStatsCompliant = iprot.readBool(); + struct.setIsStatsCompliantIsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + break; default: org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); } @@ -2034,6 +2204,16 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, Table struct) thro oprot.writeFieldEnd(); } } + if (struct.isSetWriteId()) { + oprot.writeFieldBegin(WRITE_ID_FIELD_DESC); + oprot.writeI64(struct.writeId); + oprot.writeFieldEnd(); + } + if (struct.isSetIsStatsCompliant()) { + oprot.writeFieldBegin(IS_STATS_COMPLIANT_FIELD_DESC); + oprot.writeBool(struct.isStatsCompliant); + oprot.writeFieldEnd(); + } oprot.writeFieldStop(); oprot.writeStructEnd(); } @@ -2106,7 +2286,13 @@ public void write(org.apache.thrift.protocol.TProtocol prot, Table struct) throw if (struct.isSetOwnerType()) { optionals.set(17); } - oprot.writeBitSet(optionals, 18); + if (struct.isSetWriteId()) { + optionals.set(18); + } + if (struct.isSetIsStatsCompliant()) { + optionals.set(19); + } + oprot.writeBitSet(optionals, 20); if (struct.isSetTableName()) { oprot.writeString(struct.tableName); } @@ -2174,12 +2360,18 @@ public void write(org.apache.thrift.protocol.TProtocol prot, Table struct) throw if (struct.isSetOwnerType()) { oprot.writeI32(struct.ownerType.getValue()); } + if (struct.isSetWriteId()) { + oprot.writeI64(struct.writeId); + } + if (struct.isSetIsStatsCompliant()) { + oprot.writeBool(struct.isStatsCompliant); + } } @Override public void read(org.apache.thrift.protocol.TProtocol prot, Table struct) throws org.apache.thrift.TException { TTupleProtocol iprot = (TTupleProtocol) prot; - BitSet incoming = iprot.readBitSet(18); + BitSet incoming = iprot.readBitSet(20); if (incoming.get(0)) { struct.tableName = iprot.readString(); struct.setTableNameIsSet(true); @@ -2276,6 +2468,14 @@ public void read(org.apache.thrift.protocol.TProtocol prot, Table struct) throws struct.ownerType = org.apache.hadoop.hive.metastore.api.PrincipalType.findByValue(iprot.readI32()); struct.setOwnerTypeIsSet(true); } + if (incoming.get(18)) { + struct.writeId = iprot.readI64(); + struct.setWriteIdIsSet(true); + } + if (incoming.get(19)) { + struct.isStatsCompliant = iprot.readBool(); + struct.setIsStatsCompliantIsSet(true); + } } } diff --git standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/TableStatsRequest.java standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/TableStatsRequest.java index a663a64c67..c9b70a4456 100644 --- standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/TableStatsRequest.java +++ standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/TableStatsRequest.java @@ -42,6 +42,8 @@ private static final org.apache.thrift.protocol.TField TBL_NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("tblName", org.apache.thrift.protocol.TType.STRING, (short)2); private static final org.apache.thrift.protocol.TField COL_NAMES_FIELD_DESC = new org.apache.thrift.protocol.TField("colNames", org.apache.thrift.protocol.TType.LIST, (short)3); private static final org.apache.thrift.protocol.TField CAT_NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("catName", org.apache.thrift.protocol.TType.STRING, (short)4); + private static final org.apache.thrift.protocol.TField TXN_ID_FIELD_DESC = new org.apache.thrift.protocol.TField("txnId", org.apache.thrift.protocol.TType.I64, (short)5); + private static final org.apache.thrift.protocol.TField VALID_WRITE_ID_LIST_FIELD_DESC = new org.apache.thrift.protocol.TField("validWriteIdList", org.apache.thrift.protocol.TType.STRING, (short)6); private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); static { @@ -53,13 +55,17 @@ private String tblName; // required private List colNames; // required private String catName; // optional + private long txnId; // optional + private String validWriteIdList; // optional /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ public enum _Fields implements org.apache.thrift.TFieldIdEnum { DB_NAME((short)1, "dbName"), TBL_NAME((short)2, "tblName"), COL_NAMES((short)3, "colNames"), - CAT_NAME((short)4, "catName"); + CAT_NAME((short)4, "catName"), + TXN_ID((short)5, "txnId"), + VALID_WRITE_ID_LIST((short)6, "validWriteIdList"); private static final Map byName = new HashMap(); @@ -82,6 +88,10 @@ public static _Fields findByThriftId(int fieldId) { return COL_NAMES; case 4: // CAT_NAME return CAT_NAME; + case 5: // TXN_ID + return TXN_ID; + case 6: // VALID_WRITE_ID_LIST + return VALID_WRITE_ID_LIST; default: return null; } @@ -122,7 +132,9 @@ public String getFieldName() { } // isset id assignments - private static final _Fields optionals[] = {_Fields.CAT_NAME}; + private static final int __TXNID_ISSET_ID = 0; + private byte __isset_bitfield = 0; + private static final _Fields optionals[] = {_Fields.CAT_NAME,_Fields.TXN_ID,_Fields.VALID_WRITE_ID_LIST}; public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; static { Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); @@ -135,11 +147,17 @@ public String getFieldName() { new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING)))); tmpMap.put(_Fields.CAT_NAME, new org.apache.thrift.meta_data.FieldMetaData("catName", org.apache.thrift.TFieldRequirementType.OPTIONAL, new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING))); + tmpMap.put(_Fields.TXN_ID, new org.apache.thrift.meta_data.FieldMetaData("txnId", org.apache.thrift.TFieldRequirementType.OPTIONAL, + new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.I64))); + tmpMap.put(_Fields.VALID_WRITE_ID_LIST, new org.apache.thrift.meta_data.FieldMetaData("validWriteIdList", org.apache.thrift.TFieldRequirementType.OPTIONAL, + new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING))); metaDataMap = Collections.unmodifiableMap(tmpMap); org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(TableStatsRequest.class, metaDataMap); } public TableStatsRequest() { + this.txnId = -1L; + } public TableStatsRequest( @@ -157,6 +175,7 @@ public TableStatsRequest( * Performs a deep copy on other. */ public TableStatsRequest(TableStatsRequest other) { + __isset_bitfield = other.__isset_bitfield; if (other.isSetDbName()) { this.dbName = other.dbName; } @@ -170,6 +189,10 @@ public TableStatsRequest(TableStatsRequest other) { if (other.isSetCatName()) { this.catName = other.catName; } + this.txnId = other.txnId; + if (other.isSetValidWriteIdList()) { + this.validWriteIdList = other.validWriteIdList; + } } public TableStatsRequest deepCopy() { @@ -182,6 +205,9 @@ public void clear() { this.tblName = null; this.colNames = null; this.catName = null; + this.txnId = -1L; + + this.validWriteIdList = null; } public String getDbName() { @@ -291,6 +317,51 @@ public void setCatNameIsSet(boolean value) { } } + public long getTxnId() { + return this.txnId; + } + + public void setTxnId(long txnId) { + this.txnId = txnId; + setTxnIdIsSet(true); + } + + public void unsetTxnId() { + __isset_bitfield = EncodingUtils.clearBit(__isset_bitfield, __TXNID_ISSET_ID); + } + + /** Returns true if field txnId is set (has been assigned a value) and false otherwise */ + public boolean isSetTxnId() { + return EncodingUtils.testBit(__isset_bitfield, __TXNID_ISSET_ID); + } + + public void setTxnIdIsSet(boolean value) { + __isset_bitfield = EncodingUtils.setBit(__isset_bitfield, __TXNID_ISSET_ID, value); + } + + public String getValidWriteIdList() { + return this.validWriteIdList; + } + + public void setValidWriteIdList(String validWriteIdList) { + this.validWriteIdList = validWriteIdList; + } + + public void unsetValidWriteIdList() { + this.validWriteIdList = null; + } + + /** Returns true if field validWriteIdList is set (has been assigned a value) and false otherwise */ + public boolean isSetValidWriteIdList() { + return this.validWriteIdList != null; + } + + public void setValidWriteIdListIsSet(boolean value) { + if (!value) { + this.validWriteIdList = null; + } + } + public void setFieldValue(_Fields field, Object value) { switch (field) { case DB_NAME: @@ -325,6 +396,22 @@ public void setFieldValue(_Fields field, Object value) { } break; + case TXN_ID: + if (value == null) { + unsetTxnId(); + } else { + setTxnId((Long)value); + } + break; + + case VALID_WRITE_ID_LIST: + if (value == null) { + unsetValidWriteIdList(); + } else { + setValidWriteIdList((String)value); + } + break; + } } @@ -342,6 +429,12 @@ public Object getFieldValue(_Fields field) { case CAT_NAME: return getCatName(); + case TXN_ID: + return getTxnId(); + + case VALID_WRITE_ID_LIST: + return getValidWriteIdList(); + } throw new IllegalStateException(); } @@ -361,6 +454,10 @@ public boolean isSet(_Fields field) { return isSetColNames(); case CAT_NAME: return isSetCatName(); + case TXN_ID: + return isSetTxnId(); + case VALID_WRITE_ID_LIST: + return isSetValidWriteIdList(); } throw new IllegalStateException(); } @@ -414,6 +511,24 @@ public boolean equals(TableStatsRequest that) { return false; } + boolean this_present_txnId = true && this.isSetTxnId(); + boolean that_present_txnId = true && that.isSetTxnId(); + if (this_present_txnId || that_present_txnId) { + if (!(this_present_txnId && that_present_txnId)) + return false; + if (this.txnId != that.txnId) + return false; + } + + boolean this_present_validWriteIdList = true && this.isSetValidWriteIdList(); + boolean that_present_validWriteIdList = true && that.isSetValidWriteIdList(); + if (this_present_validWriteIdList || that_present_validWriteIdList) { + if (!(this_present_validWriteIdList && that_present_validWriteIdList)) + return false; + if (!this.validWriteIdList.equals(that.validWriteIdList)) + return false; + } + return true; } @@ -441,6 +556,16 @@ public int hashCode() { if (present_catName) list.add(catName); + boolean present_txnId = true && (isSetTxnId()); + list.add(present_txnId); + if (present_txnId) + list.add(txnId); + + boolean present_validWriteIdList = true && (isSetValidWriteIdList()); + list.add(present_validWriteIdList); + if (present_validWriteIdList) + list.add(validWriteIdList); + return list.hashCode(); } @@ -492,6 +617,26 @@ public int compareTo(TableStatsRequest other) { return lastComparison; } } + lastComparison = Boolean.valueOf(isSetTxnId()).compareTo(other.isSetTxnId()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetTxnId()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.txnId, other.txnId); + if (lastComparison != 0) { + return lastComparison; + } + } + lastComparison = Boolean.valueOf(isSetValidWriteIdList()).compareTo(other.isSetValidWriteIdList()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetValidWriteIdList()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.validWriteIdList, other.validWriteIdList); + if (lastComparison != 0) { + return lastComparison; + } + } return 0; } @@ -545,6 +690,22 @@ public String toString() { } first = false; } + if (isSetTxnId()) { + if (!first) sb.append(", "); + sb.append("txnId:"); + sb.append(this.txnId); + first = false; + } + if (isSetValidWriteIdList()) { + if (!first) sb.append(", "); + sb.append("validWriteIdList:"); + if (this.validWriteIdList == null) { + sb.append("null"); + } else { + sb.append(this.validWriteIdList); + } + first = false; + } sb.append(")"); return sb.toString(); } @@ -576,6 +737,8 @@ private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOExcept private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException { try { + // it doesn't seem like you should have to do this, but java serialization is wacky, and doesn't call the default constructor. + __isset_bitfield = 0; read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in))); } catch (org.apache.thrift.TException te) { throw new java.io.IOException(te); @@ -642,6 +805,22 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, TableStatsRequest s org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); } break; + case 5: // TXN_ID + if (schemeField.type == org.apache.thrift.protocol.TType.I64) { + struct.txnId = iprot.readI64(); + struct.setTxnIdIsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + break; + case 6: // VALID_WRITE_ID_LIST + if (schemeField.type == org.apache.thrift.protocol.TType.STRING) { + struct.validWriteIdList = iprot.readString(); + struct.setValidWriteIdListIsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + break; default: org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); } @@ -684,6 +863,18 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, TableStatsRequest oprot.writeFieldEnd(); } } + if (struct.isSetTxnId()) { + oprot.writeFieldBegin(TXN_ID_FIELD_DESC); + oprot.writeI64(struct.txnId); + oprot.writeFieldEnd(); + } + if (struct.validWriteIdList != null) { + if (struct.isSetValidWriteIdList()) { + oprot.writeFieldBegin(VALID_WRITE_ID_LIST_FIELD_DESC); + oprot.writeString(struct.validWriteIdList); + oprot.writeFieldEnd(); + } + } oprot.writeFieldStop(); oprot.writeStructEnd(); } @@ -714,10 +905,22 @@ public void write(org.apache.thrift.protocol.TProtocol prot, TableStatsRequest s if (struct.isSetCatName()) { optionals.set(0); } - oprot.writeBitSet(optionals, 1); + if (struct.isSetTxnId()) { + optionals.set(1); + } + if (struct.isSetValidWriteIdList()) { + optionals.set(2); + } + oprot.writeBitSet(optionals, 3); if (struct.isSetCatName()) { oprot.writeString(struct.catName); } + if (struct.isSetTxnId()) { + oprot.writeI64(struct.txnId); + } + if (struct.isSetValidWriteIdList()) { + oprot.writeString(struct.validWriteIdList); + } } @Override @@ -738,11 +941,19 @@ public void read(org.apache.thrift.protocol.TProtocol prot, TableStatsRequest st } } struct.setColNamesIsSet(true); - BitSet incoming = iprot.readBitSet(1); + BitSet incoming = iprot.readBitSet(3); if (incoming.get(0)) { struct.catName = iprot.readString(); struct.setCatNameIsSet(true); } + if (incoming.get(1)) { + struct.txnId = iprot.readI64(); + struct.setTxnIdIsSet(true); + } + if (incoming.get(2)) { + struct.validWriteIdList = iprot.readString(); + struct.setValidWriteIdListIsSet(true); + } } } diff --git standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/TableStatsResult.java standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/TableStatsResult.java index dff7d5c204..0685a22c7c 100644 --- standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/TableStatsResult.java +++ standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/TableStatsResult.java @@ -39,6 +39,7 @@ private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("TableStatsResult"); private static final org.apache.thrift.protocol.TField TABLE_STATS_FIELD_DESC = new org.apache.thrift.protocol.TField("tableStats", org.apache.thrift.protocol.TType.LIST, (short)1); + private static final org.apache.thrift.protocol.TField IS_STATS_COMPLIANT_FIELD_DESC = new org.apache.thrift.protocol.TField("isStatsCompliant", org.apache.thrift.protocol.TType.BOOL, (short)2); private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); static { @@ -47,10 +48,12 @@ } private List tableStats; // required + private boolean isStatsCompliant; // optional /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ public enum _Fields implements org.apache.thrift.TFieldIdEnum { - TABLE_STATS((short)1, "tableStats"); + TABLE_STATS((short)1, "tableStats"), + IS_STATS_COMPLIANT((short)2, "isStatsCompliant"); private static final Map byName = new HashMap(); @@ -67,6 +70,8 @@ public static _Fields findByThriftId(int fieldId) { switch(fieldId) { case 1: // TABLE_STATS return TABLE_STATS; + case 2: // IS_STATS_COMPLIANT + return IS_STATS_COMPLIANT; default: return null; } @@ -107,12 +112,17 @@ public String getFieldName() { } // isset id assignments + private static final int __ISSTATSCOMPLIANT_ISSET_ID = 0; + private byte __isset_bitfield = 0; + private static final _Fields optionals[] = {_Fields.IS_STATS_COMPLIANT}; public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; static { Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); tmpMap.put(_Fields.TABLE_STATS, new org.apache.thrift.meta_data.FieldMetaData("tableStats", org.apache.thrift.TFieldRequirementType.REQUIRED, new org.apache.thrift.meta_data.ListMetaData(org.apache.thrift.protocol.TType.LIST, new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, ColumnStatisticsObj.class)))); + tmpMap.put(_Fields.IS_STATS_COMPLIANT, new org.apache.thrift.meta_data.FieldMetaData("isStatsCompliant", org.apache.thrift.TFieldRequirementType.OPTIONAL, + new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.BOOL))); metaDataMap = Collections.unmodifiableMap(tmpMap); org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(TableStatsResult.class, metaDataMap); } @@ -131,6 +141,7 @@ public TableStatsResult( * Performs a deep copy on other. */ public TableStatsResult(TableStatsResult other) { + __isset_bitfield = other.__isset_bitfield; if (other.isSetTableStats()) { List __this__tableStats = new ArrayList(other.tableStats.size()); for (ColumnStatisticsObj other_element : other.tableStats) { @@ -138,6 +149,7 @@ public TableStatsResult(TableStatsResult other) { } this.tableStats = __this__tableStats; } + this.isStatsCompliant = other.isStatsCompliant; } public TableStatsResult deepCopy() { @@ -147,6 +159,8 @@ public TableStatsResult deepCopy() { @Override public void clear() { this.tableStats = null; + setIsStatsCompliantIsSet(false); + this.isStatsCompliant = false; } public int getTableStatsSize() { @@ -187,6 +201,28 @@ public void setTableStatsIsSet(boolean value) { } } + public boolean isIsStatsCompliant() { + return this.isStatsCompliant; + } + + public void setIsStatsCompliant(boolean isStatsCompliant) { + this.isStatsCompliant = isStatsCompliant; + setIsStatsCompliantIsSet(true); + } + + public void unsetIsStatsCompliant() { + __isset_bitfield = EncodingUtils.clearBit(__isset_bitfield, __ISSTATSCOMPLIANT_ISSET_ID); + } + + /** Returns true if field isStatsCompliant is set (has been assigned a value) and false otherwise */ + public boolean isSetIsStatsCompliant() { + return EncodingUtils.testBit(__isset_bitfield, __ISSTATSCOMPLIANT_ISSET_ID); + } + + public void setIsStatsCompliantIsSet(boolean value) { + __isset_bitfield = EncodingUtils.setBit(__isset_bitfield, __ISSTATSCOMPLIANT_ISSET_ID, value); + } + public void setFieldValue(_Fields field, Object value) { switch (field) { case TABLE_STATS: @@ -197,6 +233,14 @@ public void setFieldValue(_Fields field, Object value) { } break; + case IS_STATS_COMPLIANT: + if (value == null) { + unsetIsStatsCompliant(); + } else { + setIsStatsCompliant((Boolean)value); + } + break; + } } @@ -205,6 +249,9 @@ public Object getFieldValue(_Fields field) { case TABLE_STATS: return getTableStats(); + case IS_STATS_COMPLIANT: + return isIsStatsCompliant(); + } throw new IllegalStateException(); } @@ -218,6 +265,8 @@ public boolean isSet(_Fields field) { switch (field) { case TABLE_STATS: return isSetTableStats(); + case IS_STATS_COMPLIANT: + return isSetIsStatsCompliant(); } throw new IllegalStateException(); } @@ -244,6 +293,15 @@ public boolean equals(TableStatsResult that) { return false; } + boolean this_present_isStatsCompliant = true && this.isSetIsStatsCompliant(); + boolean that_present_isStatsCompliant = true && that.isSetIsStatsCompliant(); + if (this_present_isStatsCompliant || that_present_isStatsCompliant) { + if (!(this_present_isStatsCompliant && that_present_isStatsCompliant)) + return false; + if (this.isStatsCompliant != that.isStatsCompliant) + return false; + } + return true; } @@ -256,6 +314,11 @@ public int hashCode() { if (present_tableStats) list.add(tableStats); + boolean present_isStatsCompliant = true && (isSetIsStatsCompliant()); + list.add(present_isStatsCompliant); + if (present_isStatsCompliant) + list.add(isStatsCompliant); + return list.hashCode(); } @@ -277,6 +340,16 @@ public int compareTo(TableStatsResult other) { return lastComparison; } } + lastComparison = Boolean.valueOf(isSetIsStatsCompliant()).compareTo(other.isSetIsStatsCompliant()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetIsStatsCompliant()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.isStatsCompliant, other.isStatsCompliant); + if (lastComparison != 0) { + return lastComparison; + } + } return 0; } @@ -304,6 +377,12 @@ public String toString() { sb.append(this.tableStats); } first = false; + if (isSetIsStatsCompliant()) { + if (!first) sb.append(", "); + sb.append("isStatsCompliant:"); + sb.append(this.isStatsCompliant); + first = false; + } sb.append(")"); return sb.toString(); } @@ -327,6 +406,8 @@ private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOExcept private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException { try { + // it doesn't seem like you should have to do this, but java serialization is wacky, and doesn't call the default constructor. + __isset_bitfield = 0; read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in))); } catch (org.apache.thrift.TException te) { throw new java.io.IOException(te); @@ -370,6 +451,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, TableStatsResult st org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); } break; + case 2: // IS_STATS_COMPLIANT + if (schemeField.type == org.apache.thrift.protocol.TType.BOOL) { + struct.isStatsCompliant = iprot.readBool(); + struct.setIsStatsCompliantIsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + break; default: org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); } @@ -395,6 +484,11 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, TableStatsResult s } oprot.writeFieldEnd(); } + if (struct.isSetIsStatsCompliant()) { + oprot.writeFieldBegin(IS_STATS_COMPLIANT_FIELD_DESC); + oprot.writeBool(struct.isStatsCompliant); + oprot.writeFieldEnd(); + } oprot.writeFieldStop(); oprot.writeStructEnd(); } @@ -419,6 +513,14 @@ public void write(org.apache.thrift.protocol.TProtocol prot, TableStatsResult st _iter428.write(oprot); } } + BitSet optionals = new BitSet(); + if (struct.isSetIsStatsCompliant()) { + optionals.set(0); + } + oprot.writeBitSet(optionals, 1); + if (struct.isSetIsStatsCompliant()) { + oprot.writeBool(struct.isStatsCompliant); + } } @Override @@ -436,6 +538,11 @@ public void read(org.apache.thrift.protocol.TProtocol prot, TableStatsResult str } } struct.setTableStatsIsSet(true); + BitSet incoming = iprot.readBitSet(1); + if (incoming.get(0)) { + struct.isStatsCompliant = iprot.readBool(); + struct.setIsStatsCompliantIsSet(true); + } } } diff --git standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ThriftHiveMetastore.java standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ThriftHiveMetastore.java index ec129ef922..183f977e7d 100644 --- standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ThriftHiveMetastore.java +++ standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ThriftHiveMetastore.java @@ -210,6 +210,8 @@ public void alter_partitions_with_environment_context(String db_name, String tbl_name, List new_parts, EnvironmentContext environment_context) throws InvalidOperationException, MetaException, org.apache.thrift.TException; + public AlterPartitionsResponse alter_partitions_with_environment_context_req(AlterPartitionsRequest req) throws InvalidOperationException, MetaException, org.apache.thrift.TException; + public void alter_partition_with_environment_context(String db_name, String tbl_name, Partition new_part, EnvironmentContext environment_context) throws InvalidOperationException, MetaException, org.apache.thrift.TException; public void rename_partition(String db_name, String tbl_name, List part_vals, Partition new_part) throws InvalidOperationException, MetaException, org.apache.thrift.TException; @@ -630,6 +632,8 @@ public void alter_partitions_with_environment_context(String db_name, String tbl_name, List new_parts, EnvironmentContext environment_context, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException; + public void alter_partitions_with_environment_context_req(AlterPartitionsRequest req, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException; + public void alter_partition_with_environment_context(String db_name, String tbl_name, Partition new_part, EnvironmentContext environment_context, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException; public void rename_partition(String db_name, String tbl_name, List part_vals, Partition new_part, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException; @@ -3434,6 +3438,35 @@ public void recv_alter_partitions_with_environment_context() throws InvalidOpera return; } + public AlterPartitionsResponse alter_partitions_with_environment_context_req(AlterPartitionsRequest req) throws InvalidOperationException, MetaException, org.apache.thrift.TException + { + send_alter_partitions_with_environment_context_req(req); + return recv_alter_partitions_with_environment_context_req(); + } + + public void send_alter_partitions_with_environment_context_req(AlterPartitionsRequest req) throws org.apache.thrift.TException + { + alter_partitions_with_environment_context_req_args args = new alter_partitions_with_environment_context_req_args(); + args.setReq(req); + sendBase("alter_partitions_with_environment_context_req", args); + } + + public AlterPartitionsResponse recv_alter_partitions_with_environment_context_req() throws InvalidOperationException, MetaException, org.apache.thrift.TException + { + alter_partitions_with_environment_context_req_result result = new alter_partitions_with_environment_context_req_result(); + receiveBase(result, "alter_partitions_with_environment_context_req"); + if (result.isSetSuccess()) { + return result.success; + } + if (result.o1 != null) { + throw result.o1; + } + if (result.o2 != null) { + throw result.o2; + } + throw new org.apache.thrift.TApplicationException(org.apache.thrift.TApplicationException.MISSING_RESULT, "alter_partitions_with_environment_context_req failed: unknown result"); + } + public void alter_partition_with_environment_context(String db_name, String tbl_name, Partition new_part, EnvironmentContext environment_context) throws InvalidOperationException, MetaException, org.apache.thrift.TException { send_alter_partition_with_environment_context(db_name, tbl_name, new_part, environment_context); @@ -9937,6 +9970,38 @@ public void getResult() throws InvalidOperationException, MetaException, org.apa } } + public void alter_partitions_with_environment_context_req(AlterPartitionsRequest req, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException { + checkReady(); + alter_partitions_with_environment_context_req_call method_call = new alter_partitions_with_environment_context_req_call(req, resultHandler, this, ___protocolFactory, ___transport); + this.___currentMethod = method_call; + ___manager.call(method_call); + } + + @org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public static class alter_partitions_with_environment_context_req_call extends org.apache.thrift.async.TAsyncMethodCall { + private AlterPartitionsRequest req; + public alter_partitions_with_environment_context_req_call(AlterPartitionsRequest req, org.apache.thrift.async.AsyncMethodCallback resultHandler, org.apache.thrift.async.TAsyncClient client, org.apache.thrift.protocol.TProtocolFactory protocolFactory, org.apache.thrift.transport.TNonblockingTransport transport) throws org.apache.thrift.TException { + super(client, protocolFactory, transport, resultHandler, false); + this.req = req; + } + + public void write_args(org.apache.thrift.protocol.TProtocol prot) throws org.apache.thrift.TException { + prot.writeMessageBegin(new org.apache.thrift.protocol.TMessage("alter_partitions_with_environment_context_req", org.apache.thrift.protocol.TMessageType.CALL, 0)); + alter_partitions_with_environment_context_req_args args = new alter_partitions_with_environment_context_req_args(); + args.setReq(req); + args.write(prot); + prot.writeMessageEnd(); + } + + public AlterPartitionsResponse getResult() throws InvalidOperationException, MetaException, org.apache.thrift.TException { + if (getState() != org.apache.thrift.async.TAsyncMethodCall.State.RESPONSE_READ) { + throw new IllegalStateException("Method call not finished!"); + } + org.apache.thrift.transport.TMemoryInputTransport memoryTransport = new org.apache.thrift.transport.TMemoryInputTransport(getFrameBuffer().array()); + org.apache.thrift.protocol.TProtocol prot = client.getProtocolFactory().getProtocol(memoryTransport); + return (new Client(prot)).recv_alter_partitions_with_environment_context_req(); + } + } + public void alter_partition_with_environment_context(String db_name, String tbl_name, Partition new_part, EnvironmentContext environment_context, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException { checkReady(); alter_partition_with_environment_context_call method_call = new alter_partition_with_environment_context_call(db_name, tbl_name, new_part, environment_context, resultHandler, this, ___protocolFactory, ___transport); @@ -14128,6 +14193,7 @@ protected Processor(I iface, Map extends org.apache.thrift.ProcessFunction { + public alter_partitions_with_environment_context_req() { + super("alter_partitions_with_environment_context_req"); + } + + public alter_partitions_with_environment_context_req_args getEmptyArgsInstance() { + return new alter_partitions_with_environment_context_req_args(); + } + + protected boolean isOneway() { + return false; + } + + public alter_partitions_with_environment_context_req_result getResult(I iface, alter_partitions_with_environment_context_req_args args) throws org.apache.thrift.TException { + alter_partitions_with_environment_context_req_result result = new alter_partitions_with_environment_context_req_result(); + try { + result.success = iface.alter_partitions_with_environment_context_req(args.req); + } catch (InvalidOperationException o1) { + result.o1 = o1; + } catch (MetaException o2) { + result.o2 = o2; + } + return result; + } + } + @org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public static class alter_partition_with_environment_context extends org.apache.thrift.ProcessFunction { public alter_partition_with_environment_context() { super("alter_partition_with_environment_context"); @@ -19682,6 +19774,7 @@ protected AsyncProcessor(I iface, Map extends org.apache.thrift.AsyncProcessFunction { + public alter_partitions_with_environment_context_req() { + super("alter_partitions_with_environment_context_req"); + } + + public alter_partitions_with_environment_context_req_args getEmptyArgsInstance() { + return new alter_partitions_with_environment_context_req_args(); + } + + public AsyncMethodCallback getResultHandler(final AsyncFrameBuffer fb, final int seqid) { + final org.apache.thrift.AsyncProcessFunction fcall = this; + return new AsyncMethodCallback() { + public void onComplete(AlterPartitionsResponse o) { + alter_partitions_with_environment_context_req_result result = new alter_partitions_with_environment_context_req_result(); + result.success = o; + try { + fcall.sendResponse(fb,result, org.apache.thrift.protocol.TMessageType.REPLY,seqid); + return; + } catch (Exception e) { + LOGGER.error("Exception writing to internal frame buffer", e); + } + fb.close(); + } + public void onError(Exception e) { + byte msgType = org.apache.thrift.protocol.TMessageType.REPLY; + org.apache.thrift.TBase msg; + alter_partitions_with_environment_context_req_result result = new alter_partitions_with_environment_context_req_result(); + if (e instanceof InvalidOperationException) { + result.o1 = (InvalidOperationException) e; + result.setO1IsSet(true); + msg = result; + } + else if (e instanceof MetaException) { + result.o2 = (MetaException) e; + result.setO2IsSet(true); + msg = result; + } + else + { + msgType = org.apache.thrift.protocol.TMessageType.EXCEPTION; + msg = (org.apache.thrift.TBase)new org.apache.thrift.TApplicationException(org.apache.thrift.TApplicationException.INTERNAL_ERROR, e.getMessage()); + } + try { + fcall.sendResponse(fb,msg,msgType,seqid); + return; + } catch (Exception ex) { + LOGGER.error("Exception writing to internal frame buffer", ex); + } + fb.close(); + } + }; + } + + protected boolean isOneway() { + return false; + } + + public void start(I iface, alter_partitions_with_environment_context_req_args args, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws TException { + iface.alter_partitions_with_environment_context_req(args.req,resultHandler); + } + } + @org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public static class alter_partition_with_environment_context extends org.apache.thrift.AsyncProcessFunction { public alter_partition_with_environment_context() { super("alter_partition_with_environment_context"); @@ -42384,13 +42539,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_databases_resul case 0: // SUCCESS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list952 = iprot.readListBegin(); - struct.success = new ArrayList(_list952.size); - String _elem953; - for (int _i954 = 0; _i954 < _list952.size; ++_i954) + org.apache.thrift.protocol.TList _list960 = iprot.readListBegin(); + struct.success = new ArrayList(_list960.size); + String _elem961; + for (int _i962 = 0; _i962 < _list960.size; ++_i962) { - _elem953 = iprot.readString(); - struct.success.add(_elem953); + _elem961 = iprot.readString(); + struct.success.add(_elem961); } iprot.readListEnd(); } @@ -42425,9 +42580,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_databases_resu oprot.writeFieldBegin(SUCCESS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.success.size())); - for (String _iter955 : struct.success) + for (String _iter963 : struct.success) { - oprot.writeString(_iter955); + oprot.writeString(_iter963); } oprot.writeListEnd(); } @@ -42466,9 +42621,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_databases_resul if (struct.isSetSuccess()) { { oprot.writeI32(struct.success.size()); - for (String _iter956 : struct.success) + for (String _iter964 : struct.success) { - oprot.writeString(_iter956); + oprot.writeString(_iter964); } } } @@ -42483,13 +42638,13 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_databases_result BitSet incoming = iprot.readBitSet(2); if (incoming.get(0)) { { - org.apache.thrift.protocol.TList _list957 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.success = new ArrayList(_list957.size); - String _elem958; - for (int _i959 = 0; _i959 < _list957.size; ++_i959) + org.apache.thrift.protocol.TList _list965 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.success = new ArrayList(_list965.size); + String _elem966; + for (int _i967 = 0; _i967 < _list965.size; ++_i967) { - _elem958 = iprot.readString(); - struct.success.add(_elem958); + _elem966 = iprot.readString(); + struct.success.add(_elem966); } } struct.setSuccessIsSet(true); @@ -43143,13 +43298,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_all_databases_r case 0: // SUCCESS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list960 = iprot.readListBegin(); - struct.success = new ArrayList(_list960.size); - String _elem961; - for (int _i962 = 0; _i962 < _list960.size; ++_i962) + org.apache.thrift.protocol.TList _list968 = iprot.readListBegin(); + struct.success = new ArrayList(_list968.size); + String _elem969; + for (int _i970 = 0; _i970 < _list968.size; ++_i970) { - _elem961 = iprot.readString(); - struct.success.add(_elem961); + _elem969 = iprot.readString(); + struct.success.add(_elem969); } iprot.readListEnd(); } @@ -43184,9 +43339,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_all_databases_ oprot.writeFieldBegin(SUCCESS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.success.size())); - for (String _iter963 : struct.success) + for (String _iter971 : struct.success) { - oprot.writeString(_iter963); + oprot.writeString(_iter971); } oprot.writeListEnd(); } @@ -43225,9 +43380,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_all_databases_r if (struct.isSetSuccess()) { { oprot.writeI32(struct.success.size()); - for (String _iter964 : struct.success) + for (String _iter972 : struct.success) { - oprot.writeString(_iter964); + oprot.writeString(_iter972); } } } @@ -43242,13 +43397,13 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_all_databases_re BitSet incoming = iprot.readBitSet(2); if (incoming.get(0)) { { - org.apache.thrift.protocol.TList _list965 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.success = new ArrayList(_list965.size); - String _elem966; - for (int _i967 = 0; _i967 < _list965.size; ++_i967) + org.apache.thrift.protocol.TList _list973 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.success = new ArrayList(_list973.size); + String _elem974; + for (int _i975 = 0; _i975 < _list973.size; ++_i975) { - _elem966 = iprot.readString(); - struct.success.add(_elem966); + _elem974 = iprot.readString(); + struct.success.add(_elem974); } } struct.setSuccessIsSet(true); @@ -47855,16 +48010,16 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_type_all_result case 0: // SUCCESS if (schemeField.type == org.apache.thrift.protocol.TType.MAP) { { - org.apache.thrift.protocol.TMap _map968 = iprot.readMapBegin(); - struct.success = new HashMap(2*_map968.size); - String _key969; - Type _val970; - for (int _i971 = 0; _i971 < _map968.size; ++_i971) + org.apache.thrift.protocol.TMap _map976 = iprot.readMapBegin(); + struct.success = new HashMap(2*_map976.size); + String _key977; + Type _val978; + for (int _i979 = 0; _i979 < _map976.size; ++_i979) { - _key969 = iprot.readString(); - _val970 = new Type(); - _val970.read(iprot); - struct.success.put(_key969, _val970); + _key977 = iprot.readString(); + _val978 = new Type(); + _val978.read(iprot); + struct.success.put(_key977, _val978); } iprot.readMapEnd(); } @@ -47899,10 +48054,10 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_type_all_resul oprot.writeFieldBegin(SUCCESS_FIELD_DESC); { oprot.writeMapBegin(new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRUCT, struct.success.size())); - for (Map.Entry _iter972 : struct.success.entrySet()) + for (Map.Entry _iter980 : struct.success.entrySet()) { - oprot.writeString(_iter972.getKey()); - _iter972.getValue().write(oprot); + oprot.writeString(_iter980.getKey()); + _iter980.getValue().write(oprot); } oprot.writeMapEnd(); } @@ -47941,10 +48096,10 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_type_all_result if (struct.isSetSuccess()) { { oprot.writeI32(struct.success.size()); - for (Map.Entry _iter973 : struct.success.entrySet()) + for (Map.Entry _iter981 : struct.success.entrySet()) { - oprot.writeString(_iter973.getKey()); - _iter973.getValue().write(oprot); + oprot.writeString(_iter981.getKey()); + _iter981.getValue().write(oprot); } } } @@ -47959,16 +48114,16 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_type_all_result BitSet incoming = iprot.readBitSet(2); if (incoming.get(0)) { { - org.apache.thrift.protocol.TMap _map974 = new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); - struct.success = new HashMap(2*_map974.size); - String _key975; - Type _val976; - for (int _i977 = 0; _i977 < _map974.size; ++_i977) + org.apache.thrift.protocol.TMap _map982 = new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + struct.success = new HashMap(2*_map982.size); + String _key983; + Type _val984; + for (int _i985 = 0; _i985 < _map982.size; ++_i985) { - _key975 = iprot.readString(); - _val976 = new Type(); - _val976.read(iprot); - struct.success.put(_key975, _val976); + _key983 = iprot.readString(); + _val984 = new Type(); + _val984.read(iprot); + struct.success.put(_key983, _val984); } } struct.setSuccessIsSet(true); @@ -49003,14 +49158,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_fields_result s case 0: // SUCCESS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list978 = iprot.readListBegin(); - struct.success = new ArrayList(_list978.size); - FieldSchema _elem979; - for (int _i980 = 0; _i980 < _list978.size; ++_i980) + org.apache.thrift.protocol.TList _list986 = iprot.readListBegin(); + struct.success = new ArrayList(_list986.size); + FieldSchema _elem987; + for (int _i988 = 0; _i988 < _list986.size; ++_i988) { - _elem979 = new FieldSchema(); - _elem979.read(iprot); - struct.success.add(_elem979); + _elem987 = new FieldSchema(); + _elem987.read(iprot); + struct.success.add(_elem987); } iprot.readListEnd(); } @@ -49063,9 +49218,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_fields_result oprot.writeFieldBegin(SUCCESS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.success.size())); - for (FieldSchema _iter981 : struct.success) + for (FieldSchema _iter989 : struct.success) { - _iter981.write(oprot); + _iter989.write(oprot); } oprot.writeListEnd(); } @@ -49120,9 +49275,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_fields_result s if (struct.isSetSuccess()) { { oprot.writeI32(struct.success.size()); - for (FieldSchema _iter982 : struct.success) + for (FieldSchema _iter990 : struct.success) { - _iter982.write(oprot); + _iter990.write(oprot); } } } @@ -49143,14 +49298,14 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_fields_result st BitSet incoming = iprot.readBitSet(4); if (incoming.get(0)) { { - org.apache.thrift.protocol.TList _list983 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); - struct.success = new ArrayList(_list983.size); - FieldSchema _elem984; - for (int _i985 = 0; _i985 < _list983.size; ++_i985) + org.apache.thrift.protocol.TList _list991 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + struct.success = new ArrayList(_list991.size); + FieldSchema _elem992; + for (int _i993 = 0; _i993 < _list991.size; ++_i993) { - _elem984 = new FieldSchema(); - _elem984.read(iprot); - struct.success.add(_elem984); + _elem992 = new FieldSchema(); + _elem992.read(iprot); + struct.success.add(_elem992); } } struct.setSuccessIsSet(true); @@ -50304,14 +50459,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_fields_with_env case 0: // SUCCESS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list986 = iprot.readListBegin(); - struct.success = new ArrayList(_list986.size); - FieldSchema _elem987; - for (int _i988 = 0; _i988 < _list986.size; ++_i988) + org.apache.thrift.protocol.TList _list994 = iprot.readListBegin(); + struct.success = new ArrayList(_list994.size); + FieldSchema _elem995; + for (int _i996 = 0; _i996 < _list994.size; ++_i996) { - _elem987 = new FieldSchema(); - _elem987.read(iprot); - struct.success.add(_elem987); + _elem995 = new FieldSchema(); + _elem995.read(iprot); + struct.success.add(_elem995); } iprot.readListEnd(); } @@ -50364,9 +50519,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_fields_with_en oprot.writeFieldBegin(SUCCESS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.success.size())); - for (FieldSchema _iter989 : struct.success) + for (FieldSchema _iter997 : struct.success) { - _iter989.write(oprot); + _iter997.write(oprot); } oprot.writeListEnd(); } @@ -50421,9 +50576,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_fields_with_env if (struct.isSetSuccess()) { { oprot.writeI32(struct.success.size()); - for (FieldSchema _iter990 : struct.success) + for (FieldSchema _iter998 : struct.success) { - _iter990.write(oprot); + _iter998.write(oprot); } } } @@ -50444,14 +50599,14 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_fields_with_envi BitSet incoming = iprot.readBitSet(4); if (incoming.get(0)) { { - org.apache.thrift.protocol.TList _list991 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); - struct.success = new ArrayList(_list991.size); - FieldSchema _elem992; - for (int _i993 = 0; _i993 < _list991.size; ++_i993) + org.apache.thrift.protocol.TList _list999 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + struct.success = new ArrayList(_list999.size); + FieldSchema _elem1000; + for (int _i1001 = 0; _i1001 < _list999.size; ++_i1001) { - _elem992 = new FieldSchema(); - _elem992.read(iprot); - struct.success.add(_elem992); + _elem1000 = new FieldSchema(); + _elem1000.read(iprot); + struct.success.add(_elem1000); } } struct.setSuccessIsSet(true); @@ -51496,14 +51651,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_schema_result s case 0: // SUCCESS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list994 = iprot.readListBegin(); - struct.success = new ArrayList(_list994.size); - FieldSchema _elem995; - for (int _i996 = 0; _i996 < _list994.size; ++_i996) + org.apache.thrift.protocol.TList _list1002 = iprot.readListBegin(); + struct.success = new ArrayList(_list1002.size); + FieldSchema _elem1003; + for (int _i1004 = 0; _i1004 < _list1002.size; ++_i1004) { - _elem995 = new FieldSchema(); - _elem995.read(iprot); - struct.success.add(_elem995); + _elem1003 = new FieldSchema(); + _elem1003.read(iprot); + struct.success.add(_elem1003); } iprot.readListEnd(); } @@ -51556,9 +51711,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_schema_result oprot.writeFieldBegin(SUCCESS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.success.size())); - for (FieldSchema _iter997 : struct.success) + for (FieldSchema _iter1005 : struct.success) { - _iter997.write(oprot); + _iter1005.write(oprot); } oprot.writeListEnd(); } @@ -51613,9 +51768,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_schema_result s if (struct.isSetSuccess()) { { oprot.writeI32(struct.success.size()); - for (FieldSchema _iter998 : struct.success) + for (FieldSchema _iter1006 : struct.success) { - _iter998.write(oprot); + _iter1006.write(oprot); } } } @@ -51636,14 +51791,14 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_schema_result st BitSet incoming = iprot.readBitSet(4); if (incoming.get(0)) { { - org.apache.thrift.protocol.TList _list999 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); - struct.success = new ArrayList(_list999.size); - FieldSchema _elem1000; - for (int _i1001 = 0; _i1001 < _list999.size; ++_i1001) + org.apache.thrift.protocol.TList _list1007 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + struct.success = new ArrayList(_list1007.size); + FieldSchema _elem1008; + for (int _i1009 = 0; _i1009 < _list1007.size; ++_i1009) { - _elem1000 = new FieldSchema(); - _elem1000.read(iprot); - struct.success.add(_elem1000); + _elem1008 = new FieldSchema(); + _elem1008.read(iprot); + struct.success.add(_elem1008); } } struct.setSuccessIsSet(true); @@ -52797,14 +52952,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_schema_with_env case 0: // SUCCESS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1002 = iprot.readListBegin(); - struct.success = new ArrayList(_list1002.size); - FieldSchema _elem1003; - for (int _i1004 = 0; _i1004 < _list1002.size; ++_i1004) + org.apache.thrift.protocol.TList _list1010 = iprot.readListBegin(); + struct.success = new ArrayList(_list1010.size); + FieldSchema _elem1011; + for (int _i1012 = 0; _i1012 < _list1010.size; ++_i1012) { - _elem1003 = new FieldSchema(); - _elem1003.read(iprot); - struct.success.add(_elem1003); + _elem1011 = new FieldSchema(); + _elem1011.read(iprot); + struct.success.add(_elem1011); } iprot.readListEnd(); } @@ -52857,9 +53012,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_schema_with_en oprot.writeFieldBegin(SUCCESS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.success.size())); - for (FieldSchema _iter1005 : struct.success) + for (FieldSchema _iter1013 : struct.success) { - _iter1005.write(oprot); + _iter1013.write(oprot); } oprot.writeListEnd(); } @@ -52914,9 +53069,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_schema_with_env if (struct.isSetSuccess()) { { oprot.writeI32(struct.success.size()); - for (FieldSchema _iter1006 : struct.success) + for (FieldSchema _iter1014 : struct.success) { - _iter1006.write(oprot); + _iter1014.write(oprot); } } } @@ -52937,14 +53092,14 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_schema_with_envi BitSet incoming = iprot.readBitSet(4); if (incoming.get(0)) { { - org.apache.thrift.protocol.TList _list1007 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); - struct.success = new ArrayList(_list1007.size); - FieldSchema _elem1008; - for (int _i1009 = 0; _i1009 < _list1007.size; ++_i1009) + org.apache.thrift.protocol.TList _list1015 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + struct.success = new ArrayList(_list1015.size); + FieldSchema _elem1016; + for (int _i1017 = 0; _i1017 < _list1015.size; ++_i1017) { - _elem1008 = new FieldSchema(); - _elem1008.read(iprot); - struct.success.add(_elem1008); + _elem1016 = new FieldSchema(); + _elem1016.read(iprot); + struct.success.add(_elem1016); } } struct.setSuccessIsSet(true); @@ -56073,14 +56228,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, create_table_with_c case 2: // PRIMARY_KEYS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1010 = iprot.readListBegin(); - struct.primaryKeys = new ArrayList(_list1010.size); - SQLPrimaryKey _elem1011; - for (int _i1012 = 0; _i1012 < _list1010.size; ++_i1012) + org.apache.thrift.protocol.TList _list1018 = iprot.readListBegin(); + struct.primaryKeys = new ArrayList(_list1018.size); + SQLPrimaryKey _elem1019; + for (int _i1020 = 0; _i1020 < _list1018.size; ++_i1020) { - _elem1011 = new SQLPrimaryKey(); - _elem1011.read(iprot); - struct.primaryKeys.add(_elem1011); + _elem1019 = new SQLPrimaryKey(); + _elem1019.read(iprot); + struct.primaryKeys.add(_elem1019); } iprot.readListEnd(); } @@ -56092,14 +56247,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, create_table_with_c case 3: // FOREIGN_KEYS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1013 = iprot.readListBegin(); - struct.foreignKeys = new ArrayList(_list1013.size); - SQLForeignKey _elem1014; - for (int _i1015 = 0; _i1015 < _list1013.size; ++_i1015) + org.apache.thrift.protocol.TList _list1021 = iprot.readListBegin(); + struct.foreignKeys = new ArrayList(_list1021.size); + SQLForeignKey _elem1022; + for (int _i1023 = 0; _i1023 < _list1021.size; ++_i1023) { - _elem1014 = new SQLForeignKey(); - _elem1014.read(iprot); - struct.foreignKeys.add(_elem1014); + _elem1022 = new SQLForeignKey(); + _elem1022.read(iprot); + struct.foreignKeys.add(_elem1022); } iprot.readListEnd(); } @@ -56111,14 +56266,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, create_table_with_c case 4: // UNIQUE_CONSTRAINTS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1016 = iprot.readListBegin(); - struct.uniqueConstraints = new ArrayList(_list1016.size); - SQLUniqueConstraint _elem1017; - for (int _i1018 = 0; _i1018 < _list1016.size; ++_i1018) + org.apache.thrift.protocol.TList _list1024 = iprot.readListBegin(); + struct.uniqueConstraints = new ArrayList(_list1024.size); + SQLUniqueConstraint _elem1025; + for (int _i1026 = 0; _i1026 < _list1024.size; ++_i1026) { - _elem1017 = new SQLUniqueConstraint(); - _elem1017.read(iprot); - struct.uniqueConstraints.add(_elem1017); + _elem1025 = new SQLUniqueConstraint(); + _elem1025.read(iprot); + struct.uniqueConstraints.add(_elem1025); } iprot.readListEnd(); } @@ -56130,14 +56285,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, create_table_with_c case 5: // NOT_NULL_CONSTRAINTS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1019 = iprot.readListBegin(); - struct.notNullConstraints = new ArrayList(_list1019.size); - SQLNotNullConstraint _elem1020; - for (int _i1021 = 0; _i1021 < _list1019.size; ++_i1021) + org.apache.thrift.protocol.TList _list1027 = iprot.readListBegin(); + struct.notNullConstraints = new ArrayList(_list1027.size); + SQLNotNullConstraint _elem1028; + for (int _i1029 = 0; _i1029 < _list1027.size; ++_i1029) { - _elem1020 = new SQLNotNullConstraint(); - _elem1020.read(iprot); - struct.notNullConstraints.add(_elem1020); + _elem1028 = new SQLNotNullConstraint(); + _elem1028.read(iprot); + struct.notNullConstraints.add(_elem1028); } iprot.readListEnd(); } @@ -56149,14 +56304,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, create_table_with_c case 6: // DEFAULT_CONSTRAINTS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1022 = iprot.readListBegin(); - struct.defaultConstraints = new ArrayList(_list1022.size); - SQLDefaultConstraint _elem1023; - for (int _i1024 = 0; _i1024 < _list1022.size; ++_i1024) + org.apache.thrift.protocol.TList _list1030 = iprot.readListBegin(); + struct.defaultConstraints = new ArrayList(_list1030.size); + SQLDefaultConstraint _elem1031; + for (int _i1032 = 0; _i1032 < _list1030.size; ++_i1032) { - _elem1023 = new SQLDefaultConstraint(); - _elem1023.read(iprot); - struct.defaultConstraints.add(_elem1023); + _elem1031 = new SQLDefaultConstraint(); + _elem1031.read(iprot); + struct.defaultConstraints.add(_elem1031); } iprot.readListEnd(); } @@ -56168,14 +56323,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, create_table_with_c case 7: // CHECK_CONSTRAINTS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1025 = iprot.readListBegin(); - struct.checkConstraints = new ArrayList(_list1025.size); - SQLCheckConstraint _elem1026; - for (int _i1027 = 0; _i1027 < _list1025.size; ++_i1027) + org.apache.thrift.protocol.TList _list1033 = iprot.readListBegin(); + struct.checkConstraints = new ArrayList(_list1033.size); + SQLCheckConstraint _elem1034; + for (int _i1035 = 0; _i1035 < _list1033.size; ++_i1035) { - _elem1026 = new SQLCheckConstraint(); - _elem1026.read(iprot); - struct.checkConstraints.add(_elem1026); + _elem1034 = new SQLCheckConstraint(); + _elem1034.read(iprot); + struct.checkConstraints.add(_elem1034); } iprot.readListEnd(); } @@ -56206,9 +56361,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, create_table_with_ oprot.writeFieldBegin(PRIMARY_KEYS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.primaryKeys.size())); - for (SQLPrimaryKey _iter1028 : struct.primaryKeys) + for (SQLPrimaryKey _iter1036 : struct.primaryKeys) { - _iter1028.write(oprot); + _iter1036.write(oprot); } oprot.writeListEnd(); } @@ -56218,9 +56373,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, create_table_with_ oprot.writeFieldBegin(FOREIGN_KEYS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.foreignKeys.size())); - for (SQLForeignKey _iter1029 : struct.foreignKeys) + for (SQLForeignKey _iter1037 : struct.foreignKeys) { - _iter1029.write(oprot); + _iter1037.write(oprot); } oprot.writeListEnd(); } @@ -56230,9 +56385,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, create_table_with_ oprot.writeFieldBegin(UNIQUE_CONSTRAINTS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.uniqueConstraints.size())); - for (SQLUniqueConstraint _iter1030 : struct.uniqueConstraints) + for (SQLUniqueConstraint _iter1038 : struct.uniqueConstraints) { - _iter1030.write(oprot); + _iter1038.write(oprot); } oprot.writeListEnd(); } @@ -56242,9 +56397,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, create_table_with_ oprot.writeFieldBegin(NOT_NULL_CONSTRAINTS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.notNullConstraints.size())); - for (SQLNotNullConstraint _iter1031 : struct.notNullConstraints) + for (SQLNotNullConstraint _iter1039 : struct.notNullConstraints) { - _iter1031.write(oprot); + _iter1039.write(oprot); } oprot.writeListEnd(); } @@ -56254,9 +56409,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, create_table_with_ oprot.writeFieldBegin(DEFAULT_CONSTRAINTS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.defaultConstraints.size())); - for (SQLDefaultConstraint _iter1032 : struct.defaultConstraints) + for (SQLDefaultConstraint _iter1040 : struct.defaultConstraints) { - _iter1032.write(oprot); + _iter1040.write(oprot); } oprot.writeListEnd(); } @@ -56266,9 +56421,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, create_table_with_ oprot.writeFieldBegin(CHECK_CONSTRAINTS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.checkConstraints.size())); - for (SQLCheckConstraint _iter1033 : struct.checkConstraints) + for (SQLCheckConstraint _iter1041 : struct.checkConstraints) { - _iter1033.write(oprot); + _iter1041.write(oprot); } oprot.writeListEnd(); } @@ -56320,54 +56475,54 @@ public void write(org.apache.thrift.protocol.TProtocol prot, create_table_with_c if (struct.isSetPrimaryKeys()) { { oprot.writeI32(struct.primaryKeys.size()); - for (SQLPrimaryKey _iter1034 : struct.primaryKeys) + for (SQLPrimaryKey _iter1042 : struct.primaryKeys) { - _iter1034.write(oprot); + _iter1042.write(oprot); } } } if (struct.isSetForeignKeys()) { { oprot.writeI32(struct.foreignKeys.size()); - for (SQLForeignKey _iter1035 : struct.foreignKeys) + for (SQLForeignKey _iter1043 : struct.foreignKeys) { - _iter1035.write(oprot); + _iter1043.write(oprot); } } } if (struct.isSetUniqueConstraints()) { { oprot.writeI32(struct.uniqueConstraints.size()); - for (SQLUniqueConstraint _iter1036 : struct.uniqueConstraints) + for (SQLUniqueConstraint _iter1044 : struct.uniqueConstraints) { - _iter1036.write(oprot); + _iter1044.write(oprot); } } } if (struct.isSetNotNullConstraints()) { { oprot.writeI32(struct.notNullConstraints.size()); - for (SQLNotNullConstraint _iter1037 : struct.notNullConstraints) + for (SQLNotNullConstraint _iter1045 : struct.notNullConstraints) { - _iter1037.write(oprot); + _iter1045.write(oprot); } } } if (struct.isSetDefaultConstraints()) { { oprot.writeI32(struct.defaultConstraints.size()); - for (SQLDefaultConstraint _iter1038 : struct.defaultConstraints) + for (SQLDefaultConstraint _iter1046 : struct.defaultConstraints) { - _iter1038.write(oprot); + _iter1046.write(oprot); } } } if (struct.isSetCheckConstraints()) { { oprot.writeI32(struct.checkConstraints.size()); - for (SQLCheckConstraint _iter1039 : struct.checkConstraints) + for (SQLCheckConstraint _iter1047 : struct.checkConstraints) { - _iter1039.write(oprot); + _iter1047.write(oprot); } } } @@ -56384,84 +56539,84 @@ public void read(org.apache.thrift.protocol.TProtocol prot, create_table_with_co } if (incoming.get(1)) { { - org.apache.thrift.protocol.TList _list1040 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); - struct.primaryKeys = new ArrayList(_list1040.size); - SQLPrimaryKey _elem1041; - for (int _i1042 = 0; _i1042 < _list1040.size; ++_i1042) + org.apache.thrift.protocol.TList _list1048 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + struct.primaryKeys = new ArrayList(_list1048.size); + SQLPrimaryKey _elem1049; + for (int _i1050 = 0; _i1050 < _list1048.size; ++_i1050) { - _elem1041 = new SQLPrimaryKey(); - _elem1041.read(iprot); - struct.primaryKeys.add(_elem1041); + _elem1049 = new SQLPrimaryKey(); + _elem1049.read(iprot); + struct.primaryKeys.add(_elem1049); } } struct.setPrimaryKeysIsSet(true); } if (incoming.get(2)) { { - org.apache.thrift.protocol.TList _list1043 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); - struct.foreignKeys = new ArrayList(_list1043.size); - SQLForeignKey _elem1044; - for (int _i1045 = 0; _i1045 < _list1043.size; ++_i1045) + org.apache.thrift.protocol.TList _list1051 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + struct.foreignKeys = new ArrayList(_list1051.size); + SQLForeignKey _elem1052; + for (int _i1053 = 0; _i1053 < _list1051.size; ++_i1053) { - _elem1044 = new SQLForeignKey(); - _elem1044.read(iprot); - struct.foreignKeys.add(_elem1044); + _elem1052 = new SQLForeignKey(); + _elem1052.read(iprot); + struct.foreignKeys.add(_elem1052); } } struct.setForeignKeysIsSet(true); } if (incoming.get(3)) { { - org.apache.thrift.protocol.TList _list1046 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); - struct.uniqueConstraints = new ArrayList(_list1046.size); - SQLUniqueConstraint _elem1047; - for (int _i1048 = 0; _i1048 < _list1046.size; ++_i1048) + org.apache.thrift.protocol.TList _list1054 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + struct.uniqueConstraints = new ArrayList(_list1054.size); + SQLUniqueConstraint _elem1055; + for (int _i1056 = 0; _i1056 < _list1054.size; ++_i1056) { - _elem1047 = new SQLUniqueConstraint(); - _elem1047.read(iprot); - struct.uniqueConstraints.add(_elem1047); + _elem1055 = new SQLUniqueConstraint(); + _elem1055.read(iprot); + struct.uniqueConstraints.add(_elem1055); } } struct.setUniqueConstraintsIsSet(true); } if (incoming.get(4)) { { - org.apache.thrift.protocol.TList _list1049 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); - struct.notNullConstraints = new ArrayList(_list1049.size); - SQLNotNullConstraint _elem1050; - for (int _i1051 = 0; _i1051 < _list1049.size; ++_i1051) + org.apache.thrift.protocol.TList _list1057 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + struct.notNullConstraints = new ArrayList(_list1057.size); + SQLNotNullConstraint _elem1058; + for (int _i1059 = 0; _i1059 < _list1057.size; ++_i1059) { - _elem1050 = new SQLNotNullConstraint(); - _elem1050.read(iprot); - struct.notNullConstraints.add(_elem1050); + _elem1058 = new SQLNotNullConstraint(); + _elem1058.read(iprot); + struct.notNullConstraints.add(_elem1058); } } struct.setNotNullConstraintsIsSet(true); } if (incoming.get(5)) { { - org.apache.thrift.protocol.TList _list1052 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); - struct.defaultConstraints = new ArrayList(_list1052.size); - SQLDefaultConstraint _elem1053; - for (int _i1054 = 0; _i1054 < _list1052.size; ++_i1054) + org.apache.thrift.protocol.TList _list1060 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + struct.defaultConstraints = new ArrayList(_list1060.size); + SQLDefaultConstraint _elem1061; + for (int _i1062 = 0; _i1062 < _list1060.size; ++_i1062) { - _elem1053 = new SQLDefaultConstraint(); - _elem1053.read(iprot); - struct.defaultConstraints.add(_elem1053); + _elem1061 = new SQLDefaultConstraint(); + _elem1061.read(iprot); + struct.defaultConstraints.add(_elem1061); } } struct.setDefaultConstraintsIsSet(true); } if (incoming.get(6)) { { - org.apache.thrift.protocol.TList _list1055 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); - struct.checkConstraints = new ArrayList(_list1055.size); - SQLCheckConstraint _elem1056; - for (int _i1057 = 0; _i1057 < _list1055.size; ++_i1057) + org.apache.thrift.protocol.TList _list1063 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + struct.checkConstraints = new ArrayList(_list1063.size); + SQLCheckConstraint _elem1064; + for (int _i1065 = 0; _i1065 < _list1063.size; ++_i1065) { - _elem1056 = new SQLCheckConstraint(); - _elem1056.read(iprot); - struct.checkConstraints.add(_elem1056); + _elem1064 = new SQLCheckConstraint(); + _elem1064.read(iprot); + struct.checkConstraints.add(_elem1064); } } struct.setCheckConstraintsIsSet(true); @@ -65611,13 +65766,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, truncate_table_args case 3: // PART_NAMES if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1058 = iprot.readListBegin(); - struct.partNames = new ArrayList(_list1058.size); - String _elem1059; - for (int _i1060 = 0; _i1060 < _list1058.size; ++_i1060) + org.apache.thrift.protocol.TList _list1066 = iprot.readListBegin(); + struct.partNames = new ArrayList(_list1066.size); + String _elem1067; + for (int _i1068 = 0; _i1068 < _list1066.size; ++_i1068) { - _elem1059 = iprot.readString(); - struct.partNames.add(_elem1059); + _elem1067 = iprot.readString(); + struct.partNames.add(_elem1067); } iprot.readListEnd(); } @@ -65653,9 +65808,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, truncate_table_arg oprot.writeFieldBegin(PART_NAMES_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.partNames.size())); - for (String _iter1061 : struct.partNames) + for (String _iter1069 : struct.partNames) { - oprot.writeString(_iter1061); + oprot.writeString(_iter1069); } oprot.writeListEnd(); } @@ -65698,9 +65853,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, truncate_table_args if (struct.isSetPartNames()) { { oprot.writeI32(struct.partNames.size()); - for (String _iter1062 : struct.partNames) + for (String _iter1070 : struct.partNames) { - oprot.writeString(_iter1062); + oprot.writeString(_iter1070); } } } @@ -65720,13 +65875,13 @@ public void read(org.apache.thrift.protocol.TProtocol prot, truncate_table_args } if (incoming.get(2)) { { - org.apache.thrift.protocol.TList _list1063 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.partNames = new ArrayList(_list1063.size); - String _elem1064; - for (int _i1065 = 0; _i1065 < _list1063.size; ++_i1065) + org.apache.thrift.protocol.TList _list1071 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.partNames = new ArrayList(_list1071.size); + String _elem1072; + for (int _i1073 = 0; _i1073 < _list1071.size; ++_i1073) { - _elem1064 = iprot.readString(); - struct.partNames.add(_elem1064); + _elem1072 = iprot.readString(); + struct.partNames.add(_elem1072); } } struct.setPartNamesIsSet(true); @@ -66951,13 +67106,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_tables_result s case 0: // SUCCESS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1066 = iprot.readListBegin(); - struct.success = new ArrayList(_list1066.size); - String _elem1067; - for (int _i1068 = 0; _i1068 < _list1066.size; ++_i1068) + org.apache.thrift.protocol.TList _list1074 = iprot.readListBegin(); + struct.success = new ArrayList(_list1074.size); + String _elem1075; + for (int _i1076 = 0; _i1076 < _list1074.size; ++_i1076) { - _elem1067 = iprot.readString(); - struct.success.add(_elem1067); + _elem1075 = iprot.readString(); + struct.success.add(_elem1075); } iprot.readListEnd(); } @@ -66992,9 +67147,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_tables_result oprot.writeFieldBegin(SUCCESS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.success.size())); - for (String _iter1069 : struct.success) + for (String _iter1077 : struct.success) { - oprot.writeString(_iter1069); + oprot.writeString(_iter1077); } oprot.writeListEnd(); } @@ -67033,9 +67188,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_tables_result s if (struct.isSetSuccess()) { { oprot.writeI32(struct.success.size()); - for (String _iter1070 : struct.success) + for (String _iter1078 : struct.success) { - oprot.writeString(_iter1070); + oprot.writeString(_iter1078); } } } @@ -67050,13 +67205,13 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_tables_result st BitSet incoming = iprot.readBitSet(2); if (incoming.get(0)) { { - org.apache.thrift.protocol.TList _list1071 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.success = new ArrayList(_list1071.size); - String _elem1072; - for (int _i1073 = 0; _i1073 < _list1071.size; ++_i1073) + org.apache.thrift.protocol.TList _list1079 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.success = new ArrayList(_list1079.size); + String _elem1080; + for (int _i1081 = 0; _i1081 < _list1079.size; ++_i1081) { - _elem1072 = iprot.readString(); - struct.success.add(_elem1072); + _elem1080 = iprot.readString(); + struct.success.add(_elem1080); } } struct.setSuccessIsSet(true); @@ -68030,13 +68185,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_tables_by_type_ case 0: // SUCCESS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1074 = iprot.readListBegin(); - struct.success = new ArrayList(_list1074.size); - String _elem1075; - for (int _i1076 = 0; _i1076 < _list1074.size; ++_i1076) + org.apache.thrift.protocol.TList _list1082 = iprot.readListBegin(); + struct.success = new ArrayList(_list1082.size); + String _elem1083; + for (int _i1084 = 0; _i1084 < _list1082.size; ++_i1084) { - _elem1075 = iprot.readString(); - struct.success.add(_elem1075); + _elem1083 = iprot.readString(); + struct.success.add(_elem1083); } iprot.readListEnd(); } @@ -68071,9 +68226,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_tables_by_type oprot.writeFieldBegin(SUCCESS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.success.size())); - for (String _iter1077 : struct.success) + for (String _iter1085 : struct.success) { - oprot.writeString(_iter1077); + oprot.writeString(_iter1085); } oprot.writeListEnd(); } @@ -68112,9 +68267,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_tables_by_type_ if (struct.isSetSuccess()) { { oprot.writeI32(struct.success.size()); - for (String _iter1078 : struct.success) + for (String _iter1086 : struct.success) { - oprot.writeString(_iter1078); + oprot.writeString(_iter1086); } } } @@ -68129,13 +68284,13 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_tables_by_type_r BitSet incoming = iprot.readBitSet(2); if (incoming.get(0)) { { - org.apache.thrift.protocol.TList _list1079 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.success = new ArrayList(_list1079.size); - String _elem1080; - for (int _i1081 = 0; _i1081 < _list1079.size; ++_i1081) + org.apache.thrift.protocol.TList _list1087 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.success = new ArrayList(_list1087.size); + String _elem1088; + for (int _i1089 = 0; _i1089 < _list1087.size; ++_i1089) { - _elem1080 = iprot.readString(); - struct.success.add(_elem1080); + _elem1088 = iprot.readString(); + struct.success.add(_elem1088); } } struct.setSuccessIsSet(true); @@ -68901,13 +69056,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_materialized_vi case 0: // SUCCESS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1082 = iprot.readListBegin(); - struct.success = new ArrayList(_list1082.size); - String _elem1083; - for (int _i1084 = 0; _i1084 < _list1082.size; ++_i1084) + org.apache.thrift.protocol.TList _list1090 = iprot.readListBegin(); + struct.success = new ArrayList(_list1090.size); + String _elem1091; + for (int _i1092 = 0; _i1092 < _list1090.size; ++_i1092) { - _elem1083 = iprot.readString(); - struct.success.add(_elem1083); + _elem1091 = iprot.readString(); + struct.success.add(_elem1091); } iprot.readListEnd(); } @@ -68942,9 +69097,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_materialized_v oprot.writeFieldBegin(SUCCESS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.success.size())); - for (String _iter1085 : struct.success) + for (String _iter1093 : struct.success) { - oprot.writeString(_iter1085); + oprot.writeString(_iter1093); } oprot.writeListEnd(); } @@ -68983,9 +69138,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_materialized_vi if (struct.isSetSuccess()) { { oprot.writeI32(struct.success.size()); - for (String _iter1086 : struct.success) + for (String _iter1094 : struct.success) { - oprot.writeString(_iter1086); + oprot.writeString(_iter1094); } } } @@ -69000,13 +69155,13 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_materialized_vie BitSet incoming = iprot.readBitSet(2); if (incoming.get(0)) { { - org.apache.thrift.protocol.TList _list1087 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.success = new ArrayList(_list1087.size); - String _elem1088; - for (int _i1089 = 0; _i1089 < _list1087.size; ++_i1089) + org.apache.thrift.protocol.TList _list1095 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.success = new ArrayList(_list1095.size); + String _elem1096; + for (int _i1097 = 0; _i1097 < _list1095.size; ++_i1097) { - _elem1088 = iprot.readString(); - struct.success.add(_elem1088); + _elem1096 = iprot.readString(); + struct.success.add(_elem1096); } } struct.setSuccessIsSet(true); @@ -69511,13 +69666,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_table_meta_args case 3: // TBL_TYPES if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1090 = iprot.readListBegin(); - struct.tbl_types = new ArrayList(_list1090.size); - String _elem1091; - for (int _i1092 = 0; _i1092 < _list1090.size; ++_i1092) + org.apache.thrift.protocol.TList _list1098 = iprot.readListBegin(); + struct.tbl_types = new ArrayList(_list1098.size); + String _elem1099; + for (int _i1100 = 0; _i1100 < _list1098.size; ++_i1100) { - _elem1091 = iprot.readString(); - struct.tbl_types.add(_elem1091); + _elem1099 = iprot.readString(); + struct.tbl_types.add(_elem1099); } iprot.readListEnd(); } @@ -69553,9 +69708,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_table_meta_arg oprot.writeFieldBegin(TBL_TYPES_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.tbl_types.size())); - for (String _iter1093 : struct.tbl_types) + for (String _iter1101 : struct.tbl_types) { - oprot.writeString(_iter1093); + oprot.writeString(_iter1101); } oprot.writeListEnd(); } @@ -69598,9 +69753,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_table_meta_args if (struct.isSetTbl_types()) { { oprot.writeI32(struct.tbl_types.size()); - for (String _iter1094 : struct.tbl_types) + for (String _iter1102 : struct.tbl_types) { - oprot.writeString(_iter1094); + oprot.writeString(_iter1102); } } } @@ -69620,13 +69775,13 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_table_meta_args } if (incoming.get(2)) { { - org.apache.thrift.protocol.TList _list1095 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.tbl_types = new ArrayList(_list1095.size); - String _elem1096; - for (int _i1097 = 0; _i1097 < _list1095.size; ++_i1097) + org.apache.thrift.protocol.TList _list1103 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.tbl_types = new ArrayList(_list1103.size); + String _elem1104; + for (int _i1105 = 0; _i1105 < _list1103.size; ++_i1105) { - _elem1096 = iprot.readString(); - struct.tbl_types.add(_elem1096); + _elem1104 = iprot.readString(); + struct.tbl_types.add(_elem1104); } } struct.setTbl_typesIsSet(true); @@ -70032,14 +70187,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_table_meta_resu case 0: // SUCCESS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1098 = iprot.readListBegin(); - struct.success = new ArrayList(_list1098.size); - TableMeta _elem1099; - for (int _i1100 = 0; _i1100 < _list1098.size; ++_i1100) + org.apache.thrift.protocol.TList _list1106 = iprot.readListBegin(); + struct.success = new ArrayList(_list1106.size); + TableMeta _elem1107; + for (int _i1108 = 0; _i1108 < _list1106.size; ++_i1108) { - _elem1099 = new TableMeta(); - _elem1099.read(iprot); - struct.success.add(_elem1099); + _elem1107 = new TableMeta(); + _elem1107.read(iprot); + struct.success.add(_elem1107); } iprot.readListEnd(); } @@ -70074,9 +70229,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_table_meta_res oprot.writeFieldBegin(SUCCESS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.success.size())); - for (TableMeta _iter1101 : struct.success) + for (TableMeta _iter1109 : struct.success) { - _iter1101.write(oprot); + _iter1109.write(oprot); } oprot.writeListEnd(); } @@ -70115,9 +70270,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_table_meta_resu if (struct.isSetSuccess()) { { oprot.writeI32(struct.success.size()); - for (TableMeta _iter1102 : struct.success) + for (TableMeta _iter1110 : struct.success) { - _iter1102.write(oprot); + _iter1110.write(oprot); } } } @@ -70132,14 +70287,14 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_table_meta_resul BitSet incoming = iprot.readBitSet(2); if (incoming.get(0)) { { - org.apache.thrift.protocol.TList _list1103 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); - struct.success = new ArrayList(_list1103.size); - TableMeta _elem1104; - for (int _i1105 = 0; _i1105 < _list1103.size; ++_i1105) + org.apache.thrift.protocol.TList _list1111 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + struct.success = new ArrayList(_list1111.size); + TableMeta _elem1112; + for (int _i1113 = 0; _i1113 < _list1111.size; ++_i1113) { - _elem1104 = new TableMeta(); - _elem1104.read(iprot); - struct.success.add(_elem1104); + _elem1112 = new TableMeta(); + _elem1112.read(iprot); + struct.success.add(_elem1112); } } struct.setSuccessIsSet(true); @@ -70905,13 +71060,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_all_tables_resu case 0: // SUCCESS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1106 = iprot.readListBegin(); - struct.success = new ArrayList(_list1106.size); - String _elem1107; - for (int _i1108 = 0; _i1108 < _list1106.size; ++_i1108) + org.apache.thrift.protocol.TList _list1114 = iprot.readListBegin(); + struct.success = new ArrayList(_list1114.size); + String _elem1115; + for (int _i1116 = 0; _i1116 < _list1114.size; ++_i1116) { - _elem1107 = iprot.readString(); - struct.success.add(_elem1107); + _elem1115 = iprot.readString(); + struct.success.add(_elem1115); } iprot.readListEnd(); } @@ -70946,9 +71101,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_all_tables_res oprot.writeFieldBegin(SUCCESS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.success.size())); - for (String _iter1109 : struct.success) + for (String _iter1117 : struct.success) { - oprot.writeString(_iter1109); + oprot.writeString(_iter1117); } oprot.writeListEnd(); } @@ -70987,9 +71142,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_all_tables_resu if (struct.isSetSuccess()) { { oprot.writeI32(struct.success.size()); - for (String _iter1110 : struct.success) + for (String _iter1118 : struct.success) { - oprot.writeString(_iter1110); + oprot.writeString(_iter1118); } } } @@ -71004,13 +71159,13 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_all_tables_resul BitSet incoming = iprot.readBitSet(2); if (incoming.get(0)) { { - org.apache.thrift.protocol.TList _list1111 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.success = new ArrayList(_list1111.size); - String _elem1112; - for (int _i1113 = 0; _i1113 < _list1111.size; ++_i1113) + org.apache.thrift.protocol.TList _list1119 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.success = new ArrayList(_list1119.size); + String _elem1120; + for (int _i1121 = 0; _i1121 < _list1119.size; ++_i1121) { - _elem1112 = iprot.readString(); - struct.success.add(_elem1112); + _elem1120 = iprot.readString(); + struct.success.add(_elem1120); } } struct.setSuccessIsSet(true); @@ -72463,13 +72618,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_table_objects_b case 2: // TBL_NAMES if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1114 = iprot.readListBegin(); - struct.tbl_names = new ArrayList(_list1114.size); - String _elem1115; - for (int _i1116 = 0; _i1116 < _list1114.size; ++_i1116) + org.apache.thrift.protocol.TList _list1122 = iprot.readListBegin(); + struct.tbl_names = new ArrayList(_list1122.size); + String _elem1123; + for (int _i1124 = 0; _i1124 < _list1122.size; ++_i1124) { - _elem1115 = iprot.readString(); - struct.tbl_names.add(_elem1115); + _elem1123 = iprot.readString(); + struct.tbl_names.add(_elem1123); } iprot.readListEnd(); } @@ -72500,9 +72655,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_table_objects_ oprot.writeFieldBegin(TBL_NAMES_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.tbl_names.size())); - for (String _iter1117 : struct.tbl_names) + for (String _iter1125 : struct.tbl_names) { - oprot.writeString(_iter1117); + oprot.writeString(_iter1125); } oprot.writeListEnd(); } @@ -72539,9 +72694,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_table_objects_b if (struct.isSetTbl_names()) { { oprot.writeI32(struct.tbl_names.size()); - for (String _iter1118 : struct.tbl_names) + for (String _iter1126 : struct.tbl_names) { - oprot.writeString(_iter1118); + oprot.writeString(_iter1126); } } } @@ -72557,13 +72712,13 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_table_objects_by } if (incoming.get(1)) { { - org.apache.thrift.protocol.TList _list1119 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.tbl_names = new ArrayList(_list1119.size); - String _elem1120; - for (int _i1121 = 0; _i1121 < _list1119.size; ++_i1121) + org.apache.thrift.protocol.TList _list1127 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.tbl_names = new ArrayList(_list1127.size); + String _elem1128; + for (int _i1129 = 0; _i1129 < _list1127.size; ++_i1129) { - _elem1120 = iprot.readString(); - struct.tbl_names.add(_elem1120); + _elem1128 = iprot.readString(); + struct.tbl_names.add(_elem1128); } } struct.setTbl_namesIsSet(true); @@ -72888,14 +73043,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_table_objects_b case 0: // SUCCESS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1122 = iprot.readListBegin(); - struct.success = new ArrayList
(_list1122.size); - Table _elem1123; - for (int _i1124 = 0; _i1124 < _list1122.size; ++_i1124) + org.apache.thrift.protocol.TList _list1130 = iprot.readListBegin(); + struct.success = new ArrayList
(_list1130.size); + Table _elem1131; + for (int _i1132 = 0; _i1132 < _list1130.size; ++_i1132) { - _elem1123 = new Table(); - _elem1123.read(iprot); - struct.success.add(_elem1123); + _elem1131 = new Table(); + _elem1131.read(iprot); + struct.success.add(_elem1131); } iprot.readListEnd(); } @@ -72921,9 +73076,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_table_objects_ oprot.writeFieldBegin(SUCCESS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.success.size())); - for (Table _iter1125 : struct.success) + for (Table _iter1133 : struct.success) { - _iter1125.write(oprot); + _iter1133.write(oprot); } oprot.writeListEnd(); } @@ -72954,9 +73109,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_table_objects_b if (struct.isSetSuccess()) { { oprot.writeI32(struct.success.size()); - for (Table _iter1126 : struct.success) + for (Table _iter1134 : struct.success) { - _iter1126.write(oprot); + _iter1134.write(oprot); } } } @@ -72968,14 +73123,14 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_table_objects_by BitSet incoming = iprot.readBitSet(1); if (incoming.get(0)) { { - org.apache.thrift.protocol.TList _list1127 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); - struct.success = new ArrayList
(_list1127.size); - Table _elem1128; - for (int _i1129 = 0; _i1129 < _list1127.size; ++_i1129) + org.apache.thrift.protocol.TList _list1135 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + struct.success = new ArrayList
(_list1135.size); + Table _elem1136; + for (int _i1137 = 0; _i1137 < _list1135.size; ++_i1137) { - _elem1128 = new Table(); - _elem1128.read(iprot); - struct.success.add(_elem1128); + _elem1136 = new Table(); + _elem1136.read(iprot); + struct.success.add(_elem1136); } } struct.setSuccessIsSet(true); @@ -75368,13 +75523,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_materialization case 2: // TBL_NAMES if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1130 = iprot.readListBegin(); - struct.tbl_names = new ArrayList(_list1130.size); - String _elem1131; - for (int _i1132 = 0; _i1132 < _list1130.size; ++_i1132) + org.apache.thrift.protocol.TList _list1138 = iprot.readListBegin(); + struct.tbl_names = new ArrayList(_list1138.size); + String _elem1139; + for (int _i1140 = 0; _i1140 < _list1138.size; ++_i1140) { - _elem1131 = iprot.readString(); - struct.tbl_names.add(_elem1131); + _elem1139 = iprot.readString(); + struct.tbl_names.add(_elem1139); } iprot.readListEnd(); } @@ -75405,9 +75560,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_materializatio oprot.writeFieldBegin(TBL_NAMES_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.tbl_names.size())); - for (String _iter1133 : struct.tbl_names) + for (String _iter1141 : struct.tbl_names) { - oprot.writeString(_iter1133); + oprot.writeString(_iter1141); } oprot.writeListEnd(); } @@ -75444,9 +75599,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_materialization if (struct.isSetTbl_names()) { { oprot.writeI32(struct.tbl_names.size()); - for (String _iter1134 : struct.tbl_names) + for (String _iter1142 : struct.tbl_names) { - oprot.writeString(_iter1134); + oprot.writeString(_iter1142); } } } @@ -75462,13 +75617,13 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_materialization_ } if (incoming.get(1)) { { - org.apache.thrift.protocol.TList _list1135 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.tbl_names = new ArrayList(_list1135.size); - String _elem1136; - for (int _i1137 = 0; _i1137 < _list1135.size; ++_i1137) + org.apache.thrift.protocol.TList _list1143 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.tbl_names = new ArrayList(_list1143.size); + String _elem1144; + for (int _i1145 = 0; _i1145 < _list1143.size; ++_i1145) { - _elem1136 = iprot.readString(); - struct.tbl_names.add(_elem1136); + _elem1144 = iprot.readString(); + struct.tbl_names.add(_elem1144); } } struct.setTbl_namesIsSet(true); @@ -76041,16 +76196,16 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_materialization case 0: // SUCCESS if (schemeField.type == org.apache.thrift.protocol.TType.MAP) { { - org.apache.thrift.protocol.TMap _map1138 = iprot.readMapBegin(); - struct.success = new HashMap(2*_map1138.size); - String _key1139; - Materialization _val1140; - for (int _i1141 = 0; _i1141 < _map1138.size; ++_i1141) + org.apache.thrift.protocol.TMap _map1146 = iprot.readMapBegin(); + struct.success = new HashMap(2*_map1146.size); + String _key1147; + Materialization _val1148; + for (int _i1149 = 0; _i1149 < _map1146.size; ++_i1149) { - _key1139 = iprot.readString(); - _val1140 = new Materialization(); - _val1140.read(iprot); - struct.success.put(_key1139, _val1140); + _key1147 = iprot.readString(); + _val1148 = new Materialization(); + _val1148.read(iprot); + struct.success.put(_key1147, _val1148); } iprot.readMapEnd(); } @@ -76103,10 +76258,10 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_materializatio oprot.writeFieldBegin(SUCCESS_FIELD_DESC); { oprot.writeMapBegin(new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRUCT, struct.success.size())); - for (Map.Entry _iter1142 : struct.success.entrySet()) + for (Map.Entry _iter1150 : struct.success.entrySet()) { - oprot.writeString(_iter1142.getKey()); - _iter1142.getValue().write(oprot); + oprot.writeString(_iter1150.getKey()); + _iter1150.getValue().write(oprot); } oprot.writeMapEnd(); } @@ -76161,10 +76316,10 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_materialization if (struct.isSetSuccess()) { { oprot.writeI32(struct.success.size()); - for (Map.Entry _iter1143 : struct.success.entrySet()) + for (Map.Entry _iter1151 : struct.success.entrySet()) { - oprot.writeString(_iter1143.getKey()); - _iter1143.getValue().write(oprot); + oprot.writeString(_iter1151.getKey()); + _iter1151.getValue().write(oprot); } } } @@ -76185,16 +76340,16 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_materialization_ BitSet incoming = iprot.readBitSet(4); if (incoming.get(0)) { { - org.apache.thrift.protocol.TMap _map1144 = new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); - struct.success = new HashMap(2*_map1144.size); - String _key1145; - Materialization _val1146; - for (int _i1147 = 0; _i1147 < _map1144.size; ++_i1147) + org.apache.thrift.protocol.TMap _map1152 = new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + struct.success = new HashMap(2*_map1152.size); + String _key1153; + Materialization _val1154; + for (int _i1155 = 0; _i1155 < _map1152.size; ++_i1155) { - _key1145 = iprot.readString(); - _val1146 = new Materialization(); - _val1146.read(iprot); - struct.success.put(_key1145, _val1146); + _key1153 = iprot.readString(); + _val1154 = new Materialization(); + _val1154.read(iprot); + struct.success.put(_key1153, _val1154); } } struct.setSuccessIsSet(true); @@ -78587,13 +78742,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_table_names_by_ case 0: // SUCCESS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1148 = iprot.readListBegin(); - struct.success = new ArrayList(_list1148.size); - String _elem1149; - for (int _i1150 = 0; _i1150 < _list1148.size; ++_i1150) + org.apache.thrift.protocol.TList _list1156 = iprot.readListBegin(); + struct.success = new ArrayList(_list1156.size); + String _elem1157; + for (int _i1158 = 0; _i1158 < _list1156.size; ++_i1158) { - _elem1149 = iprot.readString(); - struct.success.add(_elem1149); + _elem1157 = iprot.readString(); + struct.success.add(_elem1157); } iprot.readListEnd(); } @@ -78646,9 +78801,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_table_names_by oprot.writeFieldBegin(SUCCESS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.success.size())); - for (String _iter1151 : struct.success) + for (String _iter1159 : struct.success) { - oprot.writeString(_iter1151); + oprot.writeString(_iter1159); } oprot.writeListEnd(); } @@ -78703,9 +78858,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_table_names_by_ if (struct.isSetSuccess()) { { oprot.writeI32(struct.success.size()); - for (String _iter1152 : struct.success) + for (String _iter1160 : struct.success) { - oprot.writeString(_iter1152); + oprot.writeString(_iter1160); } } } @@ -78726,13 +78881,13 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_table_names_by_f BitSet incoming = iprot.readBitSet(4); if (incoming.get(0)) { { - org.apache.thrift.protocol.TList _list1153 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.success = new ArrayList(_list1153.size); - String _elem1154; - for (int _i1155 = 0; _i1155 < _list1153.size; ++_i1155) + org.apache.thrift.protocol.TList _list1161 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.success = new ArrayList(_list1161.size); + String _elem1162; + for (int _i1163 = 0; _i1163 < _list1161.size; ++_i1163) { - _elem1154 = iprot.readString(); - struct.success.add(_elem1154); + _elem1162 = iprot.readString(); + struct.success.add(_elem1162); } } struct.setSuccessIsSet(true); @@ -84591,14 +84746,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, add_partitions_args case 1: // NEW_PARTS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1156 = iprot.readListBegin(); - struct.new_parts = new ArrayList(_list1156.size); - Partition _elem1157; - for (int _i1158 = 0; _i1158 < _list1156.size; ++_i1158) + org.apache.thrift.protocol.TList _list1164 = iprot.readListBegin(); + struct.new_parts = new ArrayList(_list1164.size); + Partition _elem1165; + for (int _i1166 = 0; _i1166 < _list1164.size; ++_i1166) { - _elem1157 = new Partition(); - _elem1157.read(iprot); - struct.new_parts.add(_elem1157); + _elem1165 = new Partition(); + _elem1165.read(iprot); + struct.new_parts.add(_elem1165); } iprot.readListEnd(); } @@ -84624,9 +84779,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, add_partitions_arg oprot.writeFieldBegin(NEW_PARTS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.new_parts.size())); - for (Partition _iter1159 : struct.new_parts) + for (Partition _iter1167 : struct.new_parts) { - _iter1159.write(oprot); + _iter1167.write(oprot); } oprot.writeListEnd(); } @@ -84657,9 +84812,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, add_partitions_args if (struct.isSetNew_parts()) { { oprot.writeI32(struct.new_parts.size()); - for (Partition _iter1160 : struct.new_parts) + for (Partition _iter1168 : struct.new_parts) { - _iter1160.write(oprot); + _iter1168.write(oprot); } } } @@ -84671,14 +84826,14 @@ public void read(org.apache.thrift.protocol.TProtocol prot, add_partitions_args BitSet incoming = iprot.readBitSet(1); if (incoming.get(0)) { { - org.apache.thrift.protocol.TList _list1161 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); - struct.new_parts = new ArrayList(_list1161.size); - Partition _elem1162; - for (int _i1163 = 0; _i1163 < _list1161.size; ++_i1163) + org.apache.thrift.protocol.TList _list1169 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + struct.new_parts = new ArrayList(_list1169.size); + Partition _elem1170; + for (int _i1171 = 0; _i1171 < _list1169.size; ++_i1171) { - _elem1162 = new Partition(); - _elem1162.read(iprot); - struct.new_parts.add(_elem1162); + _elem1170 = new Partition(); + _elem1170.read(iprot); + struct.new_parts.add(_elem1170); } } struct.setNew_partsIsSet(true); @@ -85679,14 +85834,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, add_partitions_pspe case 1: // NEW_PARTS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1164 = iprot.readListBegin(); - struct.new_parts = new ArrayList(_list1164.size); - PartitionSpec _elem1165; - for (int _i1166 = 0; _i1166 < _list1164.size; ++_i1166) + org.apache.thrift.protocol.TList _list1172 = iprot.readListBegin(); + struct.new_parts = new ArrayList(_list1172.size); + PartitionSpec _elem1173; + for (int _i1174 = 0; _i1174 < _list1172.size; ++_i1174) { - _elem1165 = new PartitionSpec(); - _elem1165.read(iprot); - struct.new_parts.add(_elem1165); + _elem1173 = new PartitionSpec(); + _elem1173.read(iprot); + struct.new_parts.add(_elem1173); } iprot.readListEnd(); } @@ -85712,9 +85867,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, add_partitions_psp oprot.writeFieldBegin(NEW_PARTS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.new_parts.size())); - for (PartitionSpec _iter1167 : struct.new_parts) + for (PartitionSpec _iter1175 : struct.new_parts) { - _iter1167.write(oprot); + _iter1175.write(oprot); } oprot.writeListEnd(); } @@ -85745,9 +85900,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, add_partitions_pspe if (struct.isSetNew_parts()) { { oprot.writeI32(struct.new_parts.size()); - for (PartitionSpec _iter1168 : struct.new_parts) + for (PartitionSpec _iter1176 : struct.new_parts) { - _iter1168.write(oprot); + _iter1176.write(oprot); } } } @@ -85759,14 +85914,14 @@ public void read(org.apache.thrift.protocol.TProtocol prot, add_partitions_pspec BitSet incoming = iprot.readBitSet(1); if (incoming.get(0)) { { - org.apache.thrift.protocol.TList _list1169 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); - struct.new_parts = new ArrayList(_list1169.size); - PartitionSpec _elem1170; - for (int _i1171 = 0; _i1171 < _list1169.size; ++_i1171) + org.apache.thrift.protocol.TList _list1177 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + struct.new_parts = new ArrayList(_list1177.size); + PartitionSpec _elem1178; + for (int _i1179 = 0; _i1179 < _list1177.size; ++_i1179) { - _elem1170 = new PartitionSpec(); - _elem1170.read(iprot); - struct.new_parts.add(_elem1170); + _elem1178 = new PartitionSpec(); + _elem1178.read(iprot); + struct.new_parts.add(_elem1178); } } struct.setNew_partsIsSet(true); @@ -86942,13 +87097,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, append_partition_ar case 3: // PART_VALS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1172 = iprot.readListBegin(); - struct.part_vals = new ArrayList(_list1172.size); - String _elem1173; - for (int _i1174 = 0; _i1174 < _list1172.size; ++_i1174) + org.apache.thrift.protocol.TList _list1180 = iprot.readListBegin(); + struct.part_vals = new ArrayList(_list1180.size); + String _elem1181; + for (int _i1182 = 0; _i1182 < _list1180.size; ++_i1182) { - _elem1173 = iprot.readString(); - struct.part_vals.add(_elem1173); + _elem1181 = iprot.readString(); + struct.part_vals.add(_elem1181); } iprot.readListEnd(); } @@ -86984,9 +87139,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, append_partition_a oprot.writeFieldBegin(PART_VALS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.part_vals.size())); - for (String _iter1175 : struct.part_vals) + for (String _iter1183 : struct.part_vals) { - oprot.writeString(_iter1175); + oprot.writeString(_iter1183); } oprot.writeListEnd(); } @@ -87029,9 +87184,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, append_partition_ar if (struct.isSetPart_vals()) { { oprot.writeI32(struct.part_vals.size()); - for (String _iter1176 : struct.part_vals) + for (String _iter1184 : struct.part_vals) { - oprot.writeString(_iter1176); + oprot.writeString(_iter1184); } } } @@ -87051,13 +87206,13 @@ public void read(org.apache.thrift.protocol.TProtocol prot, append_partition_arg } if (incoming.get(2)) { { - org.apache.thrift.protocol.TList _list1177 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.part_vals = new ArrayList(_list1177.size); - String _elem1178; - for (int _i1179 = 0; _i1179 < _list1177.size; ++_i1179) + org.apache.thrift.protocol.TList _list1185 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.part_vals = new ArrayList(_list1185.size); + String _elem1186; + for (int _i1187 = 0; _i1187 < _list1185.size; ++_i1187) { - _elem1178 = iprot.readString(); - struct.part_vals.add(_elem1178); + _elem1186 = iprot.readString(); + struct.part_vals.add(_elem1186); } } struct.setPart_valsIsSet(true); @@ -89366,13 +89521,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, append_partition_wi case 3: // PART_VALS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1180 = iprot.readListBegin(); - struct.part_vals = new ArrayList(_list1180.size); - String _elem1181; - for (int _i1182 = 0; _i1182 < _list1180.size; ++_i1182) + org.apache.thrift.protocol.TList _list1188 = iprot.readListBegin(); + struct.part_vals = new ArrayList(_list1188.size); + String _elem1189; + for (int _i1190 = 0; _i1190 < _list1188.size; ++_i1190) { - _elem1181 = iprot.readString(); - struct.part_vals.add(_elem1181); + _elem1189 = iprot.readString(); + struct.part_vals.add(_elem1189); } iprot.readListEnd(); } @@ -89417,9 +89572,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, append_partition_w oprot.writeFieldBegin(PART_VALS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.part_vals.size())); - for (String _iter1183 : struct.part_vals) + for (String _iter1191 : struct.part_vals) { - oprot.writeString(_iter1183); + oprot.writeString(_iter1191); } oprot.writeListEnd(); } @@ -89470,9 +89625,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, append_partition_wi if (struct.isSetPart_vals()) { { oprot.writeI32(struct.part_vals.size()); - for (String _iter1184 : struct.part_vals) + for (String _iter1192 : struct.part_vals) { - oprot.writeString(_iter1184); + oprot.writeString(_iter1192); } } } @@ -89495,13 +89650,13 @@ public void read(org.apache.thrift.protocol.TProtocol prot, append_partition_wit } if (incoming.get(2)) { { - org.apache.thrift.protocol.TList _list1185 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.part_vals = new ArrayList(_list1185.size); - String _elem1186; - for (int _i1187 = 0; _i1187 < _list1185.size; ++_i1187) + org.apache.thrift.protocol.TList _list1193 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.part_vals = new ArrayList(_list1193.size); + String _elem1194; + for (int _i1195 = 0; _i1195 < _list1193.size; ++_i1195) { - _elem1186 = iprot.readString(); - struct.part_vals.add(_elem1186); + _elem1194 = iprot.readString(); + struct.part_vals.add(_elem1194); } } struct.setPart_valsIsSet(true); @@ -93371,13 +93526,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, drop_partition_args case 3: // PART_VALS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1188 = iprot.readListBegin(); - struct.part_vals = new ArrayList(_list1188.size); - String _elem1189; - for (int _i1190 = 0; _i1190 < _list1188.size; ++_i1190) + org.apache.thrift.protocol.TList _list1196 = iprot.readListBegin(); + struct.part_vals = new ArrayList(_list1196.size); + String _elem1197; + for (int _i1198 = 0; _i1198 < _list1196.size; ++_i1198) { - _elem1189 = iprot.readString(); - struct.part_vals.add(_elem1189); + _elem1197 = iprot.readString(); + struct.part_vals.add(_elem1197); } iprot.readListEnd(); } @@ -93421,9 +93576,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, drop_partition_arg oprot.writeFieldBegin(PART_VALS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.part_vals.size())); - for (String _iter1191 : struct.part_vals) + for (String _iter1199 : struct.part_vals) { - oprot.writeString(_iter1191); + oprot.writeString(_iter1199); } oprot.writeListEnd(); } @@ -93472,9 +93627,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, drop_partition_args if (struct.isSetPart_vals()) { { oprot.writeI32(struct.part_vals.size()); - for (String _iter1192 : struct.part_vals) + for (String _iter1200 : struct.part_vals) { - oprot.writeString(_iter1192); + oprot.writeString(_iter1200); } } } @@ -93497,13 +93652,13 @@ public void read(org.apache.thrift.protocol.TProtocol prot, drop_partition_args } if (incoming.get(2)) { { - org.apache.thrift.protocol.TList _list1193 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.part_vals = new ArrayList(_list1193.size); - String _elem1194; - for (int _i1195 = 0; _i1195 < _list1193.size; ++_i1195) + org.apache.thrift.protocol.TList _list1201 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.part_vals = new ArrayList(_list1201.size); + String _elem1202; + for (int _i1203 = 0; _i1203 < _list1201.size; ++_i1203) { - _elem1194 = iprot.readString(); - struct.part_vals.add(_elem1194); + _elem1202 = iprot.readString(); + struct.part_vals.add(_elem1202); } } struct.setPart_valsIsSet(true); @@ -94742,13 +94897,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, drop_partition_with case 3: // PART_VALS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1196 = iprot.readListBegin(); - struct.part_vals = new ArrayList(_list1196.size); - String _elem1197; - for (int _i1198 = 0; _i1198 < _list1196.size; ++_i1198) + org.apache.thrift.protocol.TList _list1204 = iprot.readListBegin(); + struct.part_vals = new ArrayList(_list1204.size); + String _elem1205; + for (int _i1206 = 0; _i1206 < _list1204.size; ++_i1206) { - _elem1197 = iprot.readString(); - struct.part_vals.add(_elem1197); + _elem1205 = iprot.readString(); + struct.part_vals.add(_elem1205); } iprot.readListEnd(); } @@ -94801,9 +94956,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, drop_partition_wit oprot.writeFieldBegin(PART_VALS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.part_vals.size())); - for (String _iter1199 : struct.part_vals) + for (String _iter1207 : struct.part_vals) { - oprot.writeString(_iter1199); + oprot.writeString(_iter1207); } oprot.writeListEnd(); } @@ -94860,9 +95015,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, drop_partition_with if (struct.isSetPart_vals()) { { oprot.writeI32(struct.part_vals.size()); - for (String _iter1200 : struct.part_vals) + for (String _iter1208 : struct.part_vals) { - oprot.writeString(_iter1200); + oprot.writeString(_iter1208); } } } @@ -94888,13 +95043,13 @@ public void read(org.apache.thrift.protocol.TProtocol prot, drop_partition_with_ } if (incoming.get(2)) { { - org.apache.thrift.protocol.TList _list1201 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.part_vals = new ArrayList(_list1201.size); - String _elem1202; - for (int _i1203 = 0; _i1203 < _list1201.size; ++_i1203) + org.apache.thrift.protocol.TList _list1209 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.part_vals = new ArrayList(_list1209.size); + String _elem1210; + for (int _i1211 = 0; _i1211 < _list1209.size; ++_i1211) { - _elem1202 = iprot.readString(); - struct.part_vals.add(_elem1202); + _elem1210 = iprot.readString(); + struct.part_vals.add(_elem1210); } } struct.setPart_valsIsSet(true); @@ -99496,13 +99651,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_partition_args case 3: // PART_VALS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1204 = iprot.readListBegin(); - struct.part_vals = new ArrayList(_list1204.size); - String _elem1205; - for (int _i1206 = 0; _i1206 < _list1204.size; ++_i1206) + org.apache.thrift.protocol.TList _list1212 = iprot.readListBegin(); + struct.part_vals = new ArrayList(_list1212.size); + String _elem1213; + for (int _i1214 = 0; _i1214 < _list1212.size; ++_i1214) { - _elem1205 = iprot.readString(); - struct.part_vals.add(_elem1205); + _elem1213 = iprot.readString(); + struct.part_vals.add(_elem1213); } iprot.readListEnd(); } @@ -99538,9 +99693,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_partition_args oprot.writeFieldBegin(PART_VALS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.part_vals.size())); - for (String _iter1207 : struct.part_vals) + for (String _iter1215 : struct.part_vals) { - oprot.writeString(_iter1207); + oprot.writeString(_iter1215); } oprot.writeListEnd(); } @@ -99583,9 +99738,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_partition_args if (struct.isSetPart_vals()) { { oprot.writeI32(struct.part_vals.size()); - for (String _iter1208 : struct.part_vals) + for (String _iter1216 : struct.part_vals) { - oprot.writeString(_iter1208); + oprot.writeString(_iter1216); } } } @@ -99605,13 +99760,13 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_partition_args s } if (incoming.get(2)) { { - org.apache.thrift.protocol.TList _list1209 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.part_vals = new ArrayList(_list1209.size); - String _elem1210; - for (int _i1211 = 0; _i1211 < _list1209.size; ++_i1211) + org.apache.thrift.protocol.TList _list1217 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.part_vals = new ArrayList(_list1217.size); + String _elem1218; + for (int _i1219 = 0; _i1219 < _list1217.size; ++_i1219) { - _elem1210 = iprot.readString(); - struct.part_vals.add(_elem1210); + _elem1218 = iprot.readString(); + struct.part_vals.add(_elem1218); } } struct.setPart_valsIsSet(true); @@ -100829,15 +100984,15 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, exchange_partition_ case 1: // PARTITION_SPECS if (schemeField.type == org.apache.thrift.protocol.TType.MAP) { { - org.apache.thrift.protocol.TMap _map1212 = iprot.readMapBegin(); - struct.partitionSpecs = new HashMap(2*_map1212.size); - String _key1213; - String _val1214; - for (int _i1215 = 0; _i1215 < _map1212.size; ++_i1215) + org.apache.thrift.protocol.TMap _map1220 = iprot.readMapBegin(); + struct.partitionSpecs = new HashMap(2*_map1220.size); + String _key1221; + String _val1222; + for (int _i1223 = 0; _i1223 < _map1220.size; ++_i1223) { - _key1213 = iprot.readString(); - _val1214 = iprot.readString(); - struct.partitionSpecs.put(_key1213, _val1214); + _key1221 = iprot.readString(); + _val1222 = iprot.readString(); + struct.partitionSpecs.put(_key1221, _val1222); } iprot.readMapEnd(); } @@ -100895,10 +101050,10 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, exchange_partition oprot.writeFieldBegin(PARTITION_SPECS_FIELD_DESC); { oprot.writeMapBegin(new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRING, struct.partitionSpecs.size())); - for (Map.Entry _iter1216 : struct.partitionSpecs.entrySet()) + for (Map.Entry _iter1224 : struct.partitionSpecs.entrySet()) { - oprot.writeString(_iter1216.getKey()); - oprot.writeString(_iter1216.getValue()); + oprot.writeString(_iter1224.getKey()); + oprot.writeString(_iter1224.getValue()); } oprot.writeMapEnd(); } @@ -100961,10 +101116,10 @@ public void write(org.apache.thrift.protocol.TProtocol prot, exchange_partition_ if (struct.isSetPartitionSpecs()) { { oprot.writeI32(struct.partitionSpecs.size()); - for (Map.Entry _iter1217 : struct.partitionSpecs.entrySet()) + for (Map.Entry _iter1225 : struct.partitionSpecs.entrySet()) { - oprot.writeString(_iter1217.getKey()); - oprot.writeString(_iter1217.getValue()); + oprot.writeString(_iter1225.getKey()); + oprot.writeString(_iter1225.getValue()); } } } @@ -100988,15 +101143,15 @@ public void read(org.apache.thrift.protocol.TProtocol prot, exchange_partition_a BitSet incoming = iprot.readBitSet(5); if (incoming.get(0)) { { - org.apache.thrift.protocol.TMap _map1218 = new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.partitionSpecs = new HashMap(2*_map1218.size); - String _key1219; - String _val1220; - for (int _i1221 = 0; _i1221 < _map1218.size; ++_i1221) + org.apache.thrift.protocol.TMap _map1226 = new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.partitionSpecs = new HashMap(2*_map1226.size); + String _key1227; + String _val1228; + for (int _i1229 = 0; _i1229 < _map1226.size; ++_i1229) { - _key1219 = iprot.readString(); - _val1220 = iprot.readString(); - struct.partitionSpecs.put(_key1219, _val1220); + _key1227 = iprot.readString(); + _val1228 = iprot.readString(); + struct.partitionSpecs.put(_key1227, _val1228); } } struct.setPartitionSpecsIsSet(true); @@ -102442,15 +102597,15 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, exchange_partitions case 1: // PARTITION_SPECS if (schemeField.type == org.apache.thrift.protocol.TType.MAP) { { - org.apache.thrift.protocol.TMap _map1222 = iprot.readMapBegin(); - struct.partitionSpecs = new HashMap(2*_map1222.size); - String _key1223; - String _val1224; - for (int _i1225 = 0; _i1225 < _map1222.size; ++_i1225) + org.apache.thrift.protocol.TMap _map1230 = iprot.readMapBegin(); + struct.partitionSpecs = new HashMap(2*_map1230.size); + String _key1231; + String _val1232; + for (int _i1233 = 0; _i1233 < _map1230.size; ++_i1233) { - _key1223 = iprot.readString(); - _val1224 = iprot.readString(); - struct.partitionSpecs.put(_key1223, _val1224); + _key1231 = iprot.readString(); + _val1232 = iprot.readString(); + struct.partitionSpecs.put(_key1231, _val1232); } iprot.readMapEnd(); } @@ -102508,10 +102663,10 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, exchange_partition oprot.writeFieldBegin(PARTITION_SPECS_FIELD_DESC); { oprot.writeMapBegin(new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRING, struct.partitionSpecs.size())); - for (Map.Entry _iter1226 : struct.partitionSpecs.entrySet()) + for (Map.Entry _iter1234 : struct.partitionSpecs.entrySet()) { - oprot.writeString(_iter1226.getKey()); - oprot.writeString(_iter1226.getValue()); + oprot.writeString(_iter1234.getKey()); + oprot.writeString(_iter1234.getValue()); } oprot.writeMapEnd(); } @@ -102574,10 +102729,10 @@ public void write(org.apache.thrift.protocol.TProtocol prot, exchange_partitions if (struct.isSetPartitionSpecs()) { { oprot.writeI32(struct.partitionSpecs.size()); - for (Map.Entry _iter1227 : struct.partitionSpecs.entrySet()) + for (Map.Entry _iter1235 : struct.partitionSpecs.entrySet()) { - oprot.writeString(_iter1227.getKey()); - oprot.writeString(_iter1227.getValue()); + oprot.writeString(_iter1235.getKey()); + oprot.writeString(_iter1235.getValue()); } } } @@ -102601,15 +102756,15 @@ public void read(org.apache.thrift.protocol.TProtocol prot, exchange_partitions_ BitSet incoming = iprot.readBitSet(5); if (incoming.get(0)) { { - org.apache.thrift.protocol.TMap _map1228 = new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.partitionSpecs = new HashMap(2*_map1228.size); - String _key1229; - String _val1230; - for (int _i1231 = 0; _i1231 < _map1228.size; ++_i1231) + org.apache.thrift.protocol.TMap _map1236 = new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.partitionSpecs = new HashMap(2*_map1236.size); + String _key1237; + String _val1238; + for (int _i1239 = 0; _i1239 < _map1236.size; ++_i1239) { - _key1229 = iprot.readString(); - _val1230 = iprot.readString(); - struct.partitionSpecs.put(_key1229, _val1230); + _key1237 = iprot.readString(); + _val1238 = iprot.readString(); + struct.partitionSpecs.put(_key1237, _val1238); } } struct.setPartitionSpecsIsSet(true); @@ -103274,14 +103429,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, exchange_partitions case 0: // SUCCESS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1232 = iprot.readListBegin(); - struct.success = new ArrayList(_list1232.size); - Partition _elem1233; - for (int _i1234 = 0; _i1234 < _list1232.size; ++_i1234) + org.apache.thrift.protocol.TList _list1240 = iprot.readListBegin(); + struct.success = new ArrayList(_list1240.size); + Partition _elem1241; + for (int _i1242 = 0; _i1242 < _list1240.size; ++_i1242) { - _elem1233 = new Partition(); - _elem1233.read(iprot); - struct.success.add(_elem1233); + _elem1241 = new Partition(); + _elem1241.read(iprot); + struct.success.add(_elem1241); } iprot.readListEnd(); } @@ -103343,9 +103498,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, exchange_partition oprot.writeFieldBegin(SUCCESS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.success.size())); - for (Partition _iter1235 : struct.success) + for (Partition _iter1243 : struct.success) { - _iter1235.write(oprot); + _iter1243.write(oprot); } oprot.writeListEnd(); } @@ -103408,9 +103563,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, exchange_partitions if (struct.isSetSuccess()) { { oprot.writeI32(struct.success.size()); - for (Partition _iter1236 : struct.success) + for (Partition _iter1244 : struct.success) { - _iter1236.write(oprot); + _iter1244.write(oprot); } } } @@ -103434,14 +103589,14 @@ public void read(org.apache.thrift.protocol.TProtocol prot, exchange_partitions_ BitSet incoming = iprot.readBitSet(5); if (incoming.get(0)) { { - org.apache.thrift.protocol.TList _list1237 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); - struct.success = new ArrayList(_list1237.size); - Partition _elem1238; - for (int _i1239 = 0; _i1239 < _list1237.size; ++_i1239) + org.apache.thrift.protocol.TList _list1245 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + struct.success = new ArrayList(_list1245.size); + Partition _elem1246; + for (int _i1247 = 0; _i1247 < _list1245.size; ++_i1247) { - _elem1238 = new Partition(); - _elem1238.read(iprot); - struct.success.add(_elem1238); + _elem1246 = new Partition(); + _elem1246.read(iprot); + struct.success.add(_elem1246); } } struct.setSuccessIsSet(true); @@ -104140,13 +104295,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_partition_with_ case 3: // PART_VALS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1240 = iprot.readListBegin(); - struct.part_vals = new ArrayList(_list1240.size); - String _elem1241; - for (int _i1242 = 0; _i1242 < _list1240.size; ++_i1242) + org.apache.thrift.protocol.TList _list1248 = iprot.readListBegin(); + struct.part_vals = new ArrayList(_list1248.size); + String _elem1249; + for (int _i1250 = 0; _i1250 < _list1248.size; ++_i1250) { - _elem1241 = iprot.readString(); - struct.part_vals.add(_elem1241); + _elem1249 = iprot.readString(); + struct.part_vals.add(_elem1249); } iprot.readListEnd(); } @@ -104166,13 +104321,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_partition_with_ case 5: // GROUP_NAMES if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1243 = iprot.readListBegin(); - struct.group_names = new ArrayList(_list1243.size); - String _elem1244; - for (int _i1245 = 0; _i1245 < _list1243.size; ++_i1245) + org.apache.thrift.protocol.TList _list1251 = iprot.readListBegin(); + struct.group_names = new ArrayList(_list1251.size); + String _elem1252; + for (int _i1253 = 0; _i1253 < _list1251.size; ++_i1253) { - _elem1244 = iprot.readString(); - struct.group_names.add(_elem1244); + _elem1252 = iprot.readString(); + struct.group_names.add(_elem1252); } iprot.readListEnd(); } @@ -104208,9 +104363,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_partition_with oprot.writeFieldBegin(PART_VALS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.part_vals.size())); - for (String _iter1246 : struct.part_vals) + for (String _iter1254 : struct.part_vals) { - oprot.writeString(_iter1246); + oprot.writeString(_iter1254); } oprot.writeListEnd(); } @@ -104225,9 +104380,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_partition_with oprot.writeFieldBegin(GROUP_NAMES_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.group_names.size())); - for (String _iter1247 : struct.group_names) + for (String _iter1255 : struct.group_names) { - oprot.writeString(_iter1247); + oprot.writeString(_iter1255); } oprot.writeListEnd(); } @@ -104276,9 +104431,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_partition_with_ if (struct.isSetPart_vals()) { { oprot.writeI32(struct.part_vals.size()); - for (String _iter1248 : struct.part_vals) + for (String _iter1256 : struct.part_vals) { - oprot.writeString(_iter1248); + oprot.writeString(_iter1256); } } } @@ -104288,9 +104443,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_partition_with_ if (struct.isSetGroup_names()) { { oprot.writeI32(struct.group_names.size()); - for (String _iter1249 : struct.group_names) + for (String _iter1257 : struct.group_names) { - oprot.writeString(_iter1249); + oprot.writeString(_iter1257); } } } @@ -104310,13 +104465,13 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_partition_with_a } if (incoming.get(2)) { { - org.apache.thrift.protocol.TList _list1250 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.part_vals = new ArrayList(_list1250.size); - String _elem1251; - for (int _i1252 = 0; _i1252 < _list1250.size; ++_i1252) + org.apache.thrift.protocol.TList _list1258 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.part_vals = new ArrayList(_list1258.size); + String _elem1259; + for (int _i1260 = 0; _i1260 < _list1258.size; ++_i1260) { - _elem1251 = iprot.readString(); - struct.part_vals.add(_elem1251); + _elem1259 = iprot.readString(); + struct.part_vals.add(_elem1259); } } struct.setPart_valsIsSet(true); @@ -104327,13 +104482,13 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_partition_with_a } if (incoming.get(4)) { { - org.apache.thrift.protocol.TList _list1253 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.group_names = new ArrayList(_list1253.size); - String _elem1254; - for (int _i1255 = 0; _i1255 < _list1253.size; ++_i1255) + org.apache.thrift.protocol.TList _list1261 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.group_names = new ArrayList(_list1261.size); + String _elem1262; + for (int _i1263 = 0; _i1263 < _list1261.size; ++_i1263) { - _elem1254 = iprot.readString(); - struct.group_names.add(_elem1254); + _elem1262 = iprot.readString(); + struct.group_names.add(_elem1262); } } struct.setGroup_namesIsSet(true); @@ -107102,14 +107257,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_partitions_resu case 0: // SUCCESS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1256 = iprot.readListBegin(); - struct.success = new ArrayList(_list1256.size); - Partition _elem1257; - for (int _i1258 = 0; _i1258 < _list1256.size; ++_i1258) + org.apache.thrift.protocol.TList _list1264 = iprot.readListBegin(); + struct.success = new ArrayList(_list1264.size); + Partition _elem1265; + for (int _i1266 = 0; _i1266 < _list1264.size; ++_i1266) { - _elem1257 = new Partition(); - _elem1257.read(iprot); - struct.success.add(_elem1257); + _elem1265 = new Partition(); + _elem1265.read(iprot); + struct.success.add(_elem1265); } iprot.readListEnd(); } @@ -107153,9 +107308,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_partitions_res oprot.writeFieldBegin(SUCCESS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.success.size())); - for (Partition _iter1259 : struct.success) + for (Partition _iter1267 : struct.success) { - _iter1259.write(oprot); + _iter1267.write(oprot); } oprot.writeListEnd(); } @@ -107202,9 +107357,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_partitions_resu if (struct.isSetSuccess()) { { oprot.writeI32(struct.success.size()); - for (Partition _iter1260 : struct.success) + for (Partition _iter1268 : struct.success) { - _iter1260.write(oprot); + _iter1268.write(oprot); } } } @@ -107222,14 +107377,14 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_partitions_resul BitSet incoming = iprot.readBitSet(3); if (incoming.get(0)) { { - org.apache.thrift.protocol.TList _list1261 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); - struct.success = new ArrayList(_list1261.size); - Partition _elem1262; - for (int _i1263 = 0; _i1263 < _list1261.size; ++_i1263) + org.apache.thrift.protocol.TList _list1269 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + struct.success = new ArrayList(_list1269.size); + Partition _elem1270; + for (int _i1271 = 0; _i1271 < _list1269.size; ++_i1271) { - _elem1262 = new Partition(); - _elem1262.read(iprot); - struct.success.add(_elem1262); + _elem1270 = new Partition(); + _elem1270.read(iprot); + struct.success.add(_elem1270); } } struct.setSuccessIsSet(true); @@ -107919,13 +108074,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_partitions_with case 5: // GROUP_NAMES if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1264 = iprot.readListBegin(); - struct.group_names = new ArrayList(_list1264.size); - String _elem1265; - for (int _i1266 = 0; _i1266 < _list1264.size; ++_i1266) + org.apache.thrift.protocol.TList _list1272 = iprot.readListBegin(); + struct.group_names = new ArrayList(_list1272.size); + String _elem1273; + for (int _i1274 = 0; _i1274 < _list1272.size; ++_i1274) { - _elem1265 = iprot.readString(); - struct.group_names.add(_elem1265); + _elem1273 = iprot.readString(); + struct.group_names.add(_elem1273); } iprot.readListEnd(); } @@ -107969,9 +108124,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_partitions_wit oprot.writeFieldBegin(GROUP_NAMES_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.group_names.size())); - for (String _iter1267 : struct.group_names) + for (String _iter1275 : struct.group_names) { - oprot.writeString(_iter1267); + oprot.writeString(_iter1275); } oprot.writeListEnd(); } @@ -108026,9 +108181,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_partitions_with if (struct.isSetGroup_names()) { { oprot.writeI32(struct.group_names.size()); - for (String _iter1268 : struct.group_names) + for (String _iter1276 : struct.group_names) { - oprot.writeString(_iter1268); + oprot.writeString(_iter1276); } } } @@ -108056,13 +108211,13 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_partitions_with_ } if (incoming.get(4)) { { - org.apache.thrift.protocol.TList _list1269 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.group_names = new ArrayList(_list1269.size); - String _elem1270; - for (int _i1271 = 0; _i1271 < _list1269.size; ++_i1271) + org.apache.thrift.protocol.TList _list1277 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.group_names = new ArrayList(_list1277.size); + String _elem1278; + for (int _i1279 = 0; _i1279 < _list1277.size; ++_i1279) { - _elem1270 = iprot.readString(); - struct.group_names.add(_elem1270); + _elem1278 = iprot.readString(); + struct.group_names.add(_elem1278); } } struct.setGroup_namesIsSet(true); @@ -108549,14 +108704,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_partitions_with case 0: // SUCCESS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1272 = iprot.readListBegin(); - struct.success = new ArrayList(_list1272.size); - Partition _elem1273; - for (int _i1274 = 0; _i1274 < _list1272.size; ++_i1274) + org.apache.thrift.protocol.TList _list1280 = iprot.readListBegin(); + struct.success = new ArrayList(_list1280.size); + Partition _elem1281; + for (int _i1282 = 0; _i1282 < _list1280.size; ++_i1282) { - _elem1273 = new Partition(); - _elem1273.read(iprot); - struct.success.add(_elem1273); + _elem1281 = new Partition(); + _elem1281.read(iprot); + struct.success.add(_elem1281); } iprot.readListEnd(); } @@ -108600,9 +108755,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_partitions_wit oprot.writeFieldBegin(SUCCESS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.success.size())); - for (Partition _iter1275 : struct.success) + for (Partition _iter1283 : struct.success) { - _iter1275.write(oprot); + _iter1283.write(oprot); } oprot.writeListEnd(); } @@ -108649,9 +108804,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_partitions_with if (struct.isSetSuccess()) { { oprot.writeI32(struct.success.size()); - for (Partition _iter1276 : struct.success) + for (Partition _iter1284 : struct.success) { - _iter1276.write(oprot); + _iter1284.write(oprot); } } } @@ -108669,14 +108824,14 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_partitions_with_ BitSet incoming = iprot.readBitSet(3); if (incoming.get(0)) { { - org.apache.thrift.protocol.TList _list1277 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); - struct.success = new ArrayList(_list1277.size); - Partition _elem1278; - for (int _i1279 = 0; _i1279 < _list1277.size; ++_i1279) + org.apache.thrift.protocol.TList _list1285 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + struct.success = new ArrayList(_list1285.size); + Partition _elem1286; + for (int _i1287 = 0; _i1287 < _list1285.size; ++_i1287) { - _elem1278 = new Partition(); - _elem1278.read(iprot); - struct.success.add(_elem1278); + _elem1286 = new Partition(); + _elem1286.read(iprot); + struct.success.add(_elem1286); } } struct.setSuccessIsSet(true); @@ -109739,14 +109894,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_partitions_pspe case 0: // SUCCESS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1280 = iprot.readListBegin(); - struct.success = new ArrayList(_list1280.size); - PartitionSpec _elem1281; - for (int _i1282 = 0; _i1282 < _list1280.size; ++_i1282) + org.apache.thrift.protocol.TList _list1288 = iprot.readListBegin(); + struct.success = new ArrayList(_list1288.size); + PartitionSpec _elem1289; + for (int _i1290 = 0; _i1290 < _list1288.size; ++_i1290) { - _elem1281 = new PartitionSpec(); - _elem1281.read(iprot); - struct.success.add(_elem1281); + _elem1289 = new PartitionSpec(); + _elem1289.read(iprot); + struct.success.add(_elem1289); } iprot.readListEnd(); } @@ -109790,9 +109945,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_partitions_psp oprot.writeFieldBegin(SUCCESS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.success.size())); - for (PartitionSpec _iter1283 : struct.success) + for (PartitionSpec _iter1291 : struct.success) { - _iter1283.write(oprot); + _iter1291.write(oprot); } oprot.writeListEnd(); } @@ -109839,9 +109994,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_partitions_pspe if (struct.isSetSuccess()) { { oprot.writeI32(struct.success.size()); - for (PartitionSpec _iter1284 : struct.success) + for (PartitionSpec _iter1292 : struct.success) { - _iter1284.write(oprot); + _iter1292.write(oprot); } } } @@ -109859,14 +110014,14 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_partitions_pspec BitSet incoming = iprot.readBitSet(3); if (incoming.get(0)) { { - org.apache.thrift.protocol.TList _list1285 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); - struct.success = new ArrayList(_list1285.size); - PartitionSpec _elem1286; - for (int _i1287 = 0; _i1287 < _list1285.size; ++_i1287) + org.apache.thrift.protocol.TList _list1293 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + struct.success = new ArrayList(_list1293.size); + PartitionSpec _elem1294; + for (int _i1295 = 0; _i1295 < _list1293.size; ++_i1295) { - _elem1286 = new PartitionSpec(); - _elem1286.read(iprot); - struct.success.add(_elem1286); + _elem1294 = new PartitionSpec(); + _elem1294.read(iprot); + struct.success.add(_elem1294); } } struct.setSuccessIsSet(true); @@ -110926,13 +111081,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_partition_names case 0: // SUCCESS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1288 = iprot.readListBegin(); - struct.success = new ArrayList(_list1288.size); - String _elem1289; - for (int _i1290 = 0; _i1290 < _list1288.size; ++_i1290) + org.apache.thrift.protocol.TList _list1296 = iprot.readListBegin(); + struct.success = new ArrayList(_list1296.size); + String _elem1297; + for (int _i1298 = 0; _i1298 < _list1296.size; ++_i1298) { - _elem1289 = iprot.readString(); - struct.success.add(_elem1289); + _elem1297 = iprot.readString(); + struct.success.add(_elem1297); } iprot.readListEnd(); } @@ -110976,9 +111131,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_partition_name oprot.writeFieldBegin(SUCCESS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.success.size())); - for (String _iter1291 : struct.success) + for (String _iter1299 : struct.success) { - oprot.writeString(_iter1291); + oprot.writeString(_iter1299); } oprot.writeListEnd(); } @@ -111025,9 +111180,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_partition_names if (struct.isSetSuccess()) { { oprot.writeI32(struct.success.size()); - for (String _iter1292 : struct.success) + for (String _iter1300 : struct.success) { - oprot.writeString(_iter1292); + oprot.writeString(_iter1300); } } } @@ -111045,13 +111200,13 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_partition_names_ BitSet incoming = iprot.readBitSet(3); if (incoming.get(0)) { { - org.apache.thrift.protocol.TList _list1293 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.success = new ArrayList(_list1293.size); - String _elem1294; - for (int _i1295 = 0; _i1295 < _list1293.size; ++_i1295) + org.apache.thrift.protocol.TList _list1301 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.success = new ArrayList(_list1301.size); + String _elem1302; + for (int _i1303 = 0; _i1303 < _list1301.size; ++_i1303) { - _elem1294 = iprot.readString(); - struct.success.add(_elem1294); + _elem1302 = iprot.readString(); + struct.success.add(_elem1302); } } struct.setSuccessIsSet(true); @@ -112582,13 +112737,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_partitions_ps_a case 3: // PART_VALS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1296 = iprot.readListBegin(); - struct.part_vals = new ArrayList(_list1296.size); - String _elem1297; - for (int _i1298 = 0; _i1298 < _list1296.size; ++_i1298) + org.apache.thrift.protocol.TList _list1304 = iprot.readListBegin(); + struct.part_vals = new ArrayList(_list1304.size); + String _elem1305; + for (int _i1306 = 0; _i1306 < _list1304.size; ++_i1306) { - _elem1297 = iprot.readString(); - struct.part_vals.add(_elem1297); + _elem1305 = iprot.readString(); + struct.part_vals.add(_elem1305); } iprot.readListEnd(); } @@ -112632,9 +112787,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_partitions_ps_ oprot.writeFieldBegin(PART_VALS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.part_vals.size())); - for (String _iter1299 : struct.part_vals) + for (String _iter1307 : struct.part_vals) { - oprot.writeString(_iter1299); + oprot.writeString(_iter1307); } oprot.writeListEnd(); } @@ -112683,9 +112838,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_partitions_ps_a if (struct.isSetPart_vals()) { { oprot.writeI32(struct.part_vals.size()); - for (String _iter1300 : struct.part_vals) + for (String _iter1308 : struct.part_vals) { - oprot.writeString(_iter1300); + oprot.writeString(_iter1308); } } } @@ -112708,13 +112863,13 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_partitions_ps_ar } if (incoming.get(2)) { { - org.apache.thrift.protocol.TList _list1301 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.part_vals = new ArrayList(_list1301.size); - String _elem1302; - for (int _i1303 = 0; _i1303 < _list1301.size; ++_i1303) + org.apache.thrift.protocol.TList _list1309 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.part_vals = new ArrayList(_list1309.size); + String _elem1310; + for (int _i1311 = 0; _i1311 < _list1309.size; ++_i1311) { - _elem1302 = iprot.readString(); - struct.part_vals.add(_elem1302); + _elem1310 = iprot.readString(); + struct.part_vals.add(_elem1310); } } struct.setPart_valsIsSet(true); @@ -113205,14 +113360,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_partitions_ps_r case 0: // SUCCESS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1304 = iprot.readListBegin(); - struct.success = new ArrayList(_list1304.size); - Partition _elem1305; - for (int _i1306 = 0; _i1306 < _list1304.size; ++_i1306) + org.apache.thrift.protocol.TList _list1312 = iprot.readListBegin(); + struct.success = new ArrayList(_list1312.size); + Partition _elem1313; + for (int _i1314 = 0; _i1314 < _list1312.size; ++_i1314) { - _elem1305 = new Partition(); - _elem1305.read(iprot); - struct.success.add(_elem1305); + _elem1313 = new Partition(); + _elem1313.read(iprot); + struct.success.add(_elem1313); } iprot.readListEnd(); } @@ -113256,9 +113411,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_partitions_ps_ oprot.writeFieldBegin(SUCCESS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.success.size())); - for (Partition _iter1307 : struct.success) + for (Partition _iter1315 : struct.success) { - _iter1307.write(oprot); + _iter1315.write(oprot); } oprot.writeListEnd(); } @@ -113305,9 +113460,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_partitions_ps_r if (struct.isSetSuccess()) { { oprot.writeI32(struct.success.size()); - for (Partition _iter1308 : struct.success) + for (Partition _iter1316 : struct.success) { - _iter1308.write(oprot); + _iter1316.write(oprot); } } } @@ -113325,14 +113480,14 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_partitions_ps_re BitSet incoming = iprot.readBitSet(3); if (incoming.get(0)) { { - org.apache.thrift.protocol.TList _list1309 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); - struct.success = new ArrayList(_list1309.size); - Partition _elem1310; - for (int _i1311 = 0; _i1311 < _list1309.size; ++_i1311) + org.apache.thrift.protocol.TList _list1317 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + struct.success = new ArrayList(_list1317.size); + Partition _elem1318; + for (int _i1319 = 0; _i1319 < _list1317.size; ++_i1319) { - _elem1310 = new Partition(); - _elem1310.read(iprot); - struct.success.add(_elem1310); + _elem1318 = new Partition(); + _elem1318.read(iprot); + struct.success.add(_elem1318); } } struct.setSuccessIsSet(true); @@ -114104,13 +114259,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_partitions_ps_w case 3: // PART_VALS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1312 = iprot.readListBegin(); - struct.part_vals = new ArrayList(_list1312.size); - String _elem1313; - for (int _i1314 = 0; _i1314 < _list1312.size; ++_i1314) + org.apache.thrift.protocol.TList _list1320 = iprot.readListBegin(); + struct.part_vals = new ArrayList(_list1320.size); + String _elem1321; + for (int _i1322 = 0; _i1322 < _list1320.size; ++_i1322) { - _elem1313 = iprot.readString(); - struct.part_vals.add(_elem1313); + _elem1321 = iprot.readString(); + struct.part_vals.add(_elem1321); } iprot.readListEnd(); } @@ -114138,13 +114293,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_partitions_ps_w case 6: // GROUP_NAMES if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1315 = iprot.readListBegin(); - struct.group_names = new ArrayList(_list1315.size); - String _elem1316; - for (int _i1317 = 0; _i1317 < _list1315.size; ++_i1317) + org.apache.thrift.protocol.TList _list1323 = iprot.readListBegin(); + struct.group_names = new ArrayList(_list1323.size); + String _elem1324; + for (int _i1325 = 0; _i1325 < _list1323.size; ++_i1325) { - _elem1316 = iprot.readString(); - struct.group_names.add(_elem1316); + _elem1324 = iprot.readString(); + struct.group_names.add(_elem1324); } iprot.readListEnd(); } @@ -114180,9 +114335,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_partitions_ps_ oprot.writeFieldBegin(PART_VALS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.part_vals.size())); - for (String _iter1318 : struct.part_vals) + for (String _iter1326 : struct.part_vals) { - oprot.writeString(_iter1318); + oprot.writeString(_iter1326); } oprot.writeListEnd(); } @@ -114200,9 +114355,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_partitions_ps_ oprot.writeFieldBegin(GROUP_NAMES_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.group_names.size())); - for (String _iter1319 : struct.group_names) + for (String _iter1327 : struct.group_names) { - oprot.writeString(_iter1319); + oprot.writeString(_iter1327); } oprot.writeListEnd(); } @@ -114254,9 +114409,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_partitions_ps_w if (struct.isSetPart_vals()) { { oprot.writeI32(struct.part_vals.size()); - for (String _iter1320 : struct.part_vals) + for (String _iter1328 : struct.part_vals) { - oprot.writeString(_iter1320); + oprot.writeString(_iter1328); } } } @@ -114269,9 +114424,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_partitions_ps_w if (struct.isSetGroup_names()) { { oprot.writeI32(struct.group_names.size()); - for (String _iter1321 : struct.group_names) + for (String _iter1329 : struct.group_names) { - oprot.writeString(_iter1321); + oprot.writeString(_iter1329); } } } @@ -114291,13 +114446,13 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_partitions_ps_wi } if (incoming.get(2)) { { - org.apache.thrift.protocol.TList _list1322 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.part_vals = new ArrayList(_list1322.size); - String _elem1323; - for (int _i1324 = 0; _i1324 < _list1322.size; ++_i1324) + org.apache.thrift.protocol.TList _list1330 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.part_vals = new ArrayList(_list1330.size); + String _elem1331; + for (int _i1332 = 0; _i1332 < _list1330.size; ++_i1332) { - _elem1323 = iprot.readString(); - struct.part_vals.add(_elem1323); + _elem1331 = iprot.readString(); + struct.part_vals.add(_elem1331); } } struct.setPart_valsIsSet(true); @@ -114312,13 +114467,13 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_partitions_ps_wi } if (incoming.get(5)) { { - org.apache.thrift.protocol.TList _list1325 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.group_names = new ArrayList(_list1325.size); - String _elem1326; - for (int _i1327 = 0; _i1327 < _list1325.size; ++_i1327) + org.apache.thrift.protocol.TList _list1333 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.group_names = new ArrayList(_list1333.size); + String _elem1334; + for (int _i1335 = 0; _i1335 < _list1333.size; ++_i1335) { - _elem1326 = iprot.readString(); - struct.group_names.add(_elem1326); + _elem1334 = iprot.readString(); + struct.group_names.add(_elem1334); } } struct.setGroup_namesIsSet(true); @@ -114805,14 +114960,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_partitions_ps_w case 0: // SUCCESS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1328 = iprot.readListBegin(); - struct.success = new ArrayList(_list1328.size); - Partition _elem1329; - for (int _i1330 = 0; _i1330 < _list1328.size; ++_i1330) + org.apache.thrift.protocol.TList _list1336 = iprot.readListBegin(); + struct.success = new ArrayList(_list1336.size); + Partition _elem1337; + for (int _i1338 = 0; _i1338 < _list1336.size; ++_i1338) { - _elem1329 = new Partition(); - _elem1329.read(iprot); - struct.success.add(_elem1329); + _elem1337 = new Partition(); + _elem1337.read(iprot); + struct.success.add(_elem1337); } iprot.readListEnd(); } @@ -114856,9 +115011,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_partitions_ps_ oprot.writeFieldBegin(SUCCESS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.success.size())); - for (Partition _iter1331 : struct.success) + for (Partition _iter1339 : struct.success) { - _iter1331.write(oprot); + _iter1339.write(oprot); } oprot.writeListEnd(); } @@ -114905,9 +115060,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_partitions_ps_w if (struct.isSetSuccess()) { { oprot.writeI32(struct.success.size()); - for (Partition _iter1332 : struct.success) + for (Partition _iter1340 : struct.success) { - _iter1332.write(oprot); + _iter1340.write(oprot); } } } @@ -114925,14 +115080,14 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_partitions_ps_wi BitSet incoming = iprot.readBitSet(3); if (incoming.get(0)) { { - org.apache.thrift.protocol.TList _list1333 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); - struct.success = new ArrayList(_list1333.size); - Partition _elem1334; - for (int _i1335 = 0; _i1335 < _list1333.size; ++_i1335) + org.apache.thrift.protocol.TList _list1341 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + struct.success = new ArrayList(_list1341.size); + Partition _elem1342; + for (int _i1343 = 0; _i1343 < _list1341.size; ++_i1343) { - _elem1334 = new Partition(); - _elem1334.read(iprot); - struct.success.add(_elem1334); + _elem1342 = new Partition(); + _elem1342.read(iprot); + struct.success.add(_elem1342); } } struct.setSuccessIsSet(true); @@ -115525,13 +115680,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_partition_names case 3: // PART_VALS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1336 = iprot.readListBegin(); - struct.part_vals = new ArrayList(_list1336.size); - String _elem1337; - for (int _i1338 = 0; _i1338 < _list1336.size; ++_i1338) + org.apache.thrift.protocol.TList _list1344 = iprot.readListBegin(); + struct.part_vals = new ArrayList(_list1344.size); + String _elem1345; + for (int _i1346 = 0; _i1346 < _list1344.size; ++_i1346) { - _elem1337 = iprot.readString(); - struct.part_vals.add(_elem1337); + _elem1345 = iprot.readString(); + struct.part_vals.add(_elem1345); } iprot.readListEnd(); } @@ -115575,9 +115730,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_partition_name oprot.writeFieldBegin(PART_VALS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.part_vals.size())); - for (String _iter1339 : struct.part_vals) + for (String _iter1347 : struct.part_vals) { - oprot.writeString(_iter1339); + oprot.writeString(_iter1347); } oprot.writeListEnd(); } @@ -115626,9 +115781,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_partition_names if (struct.isSetPart_vals()) { { oprot.writeI32(struct.part_vals.size()); - for (String _iter1340 : struct.part_vals) + for (String _iter1348 : struct.part_vals) { - oprot.writeString(_iter1340); + oprot.writeString(_iter1348); } } } @@ -115651,13 +115806,13 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_partition_names_ } if (incoming.get(2)) { { - org.apache.thrift.protocol.TList _list1341 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.part_vals = new ArrayList(_list1341.size); - String _elem1342; - for (int _i1343 = 0; _i1343 < _list1341.size; ++_i1343) + org.apache.thrift.protocol.TList _list1349 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.part_vals = new ArrayList(_list1349.size); + String _elem1350; + for (int _i1351 = 0; _i1351 < _list1349.size; ++_i1351) { - _elem1342 = iprot.readString(); - struct.part_vals.add(_elem1342); + _elem1350 = iprot.readString(); + struct.part_vals.add(_elem1350); } } struct.setPart_valsIsSet(true); @@ -116145,13 +116300,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_partition_names case 0: // SUCCESS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1344 = iprot.readListBegin(); - struct.success = new ArrayList(_list1344.size); - String _elem1345; - for (int _i1346 = 0; _i1346 < _list1344.size; ++_i1346) + org.apache.thrift.protocol.TList _list1352 = iprot.readListBegin(); + struct.success = new ArrayList(_list1352.size); + String _elem1353; + for (int _i1354 = 0; _i1354 < _list1352.size; ++_i1354) { - _elem1345 = iprot.readString(); - struct.success.add(_elem1345); + _elem1353 = iprot.readString(); + struct.success.add(_elem1353); } iprot.readListEnd(); } @@ -116195,9 +116350,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_partition_name oprot.writeFieldBegin(SUCCESS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.success.size())); - for (String _iter1347 : struct.success) + for (String _iter1355 : struct.success) { - oprot.writeString(_iter1347); + oprot.writeString(_iter1355); } oprot.writeListEnd(); } @@ -116244,9 +116399,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_partition_names if (struct.isSetSuccess()) { { oprot.writeI32(struct.success.size()); - for (String _iter1348 : struct.success) + for (String _iter1356 : struct.success) { - oprot.writeString(_iter1348); + oprot.writeString(_iter1356); } } } @@ -116264,13 +116419,13 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_partition_names_ BitSet incoming = iprot.readBitSet(3); if (incoming.get(0)) { { - org.apache.thrift.protocol.TList _list1349 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.success = new ArrayList(_list1349.size); - String _elem1350; - for (int _i1351 = 0; _i1351 < _list1349.size; ++_i1351) + org.apache.thrift.protocol.TList _list1357 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.success = new ArrayList(_list1357.size); + String _elem1358; + for (int _i1359 = 0; _i1359 < _list1357.size; ++_i1359) { - _elem1350 = iprot.readString(); - struct.success.add(_elem1350); + _elem1358 = iprot.readString(); + struct.success.add(_elem1358); } } struct.setSuccessIsSet(true); @@ -117437,14 +117592,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_partitions_by_f case 0: // SUCCESS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1352 = iprot.readListBegin(); - struct.success = new ArrayList(_list1352.size); - Partition _elem1353; - for (int _i1354 = 0; _i1354 < _list1352.size; ++_i1354) + org.apache.thrift.protocol.TList _list1360 = iprot.readListBegin(); + struct.success = new ArrayList(_list1360.size); + Partition _elem1361; + for (int _i1362 = 0; _i1362 < _list1360.size; ++_i1362) { - _elem1353 = new Partition(); - _elem1353.read(iprot); - struct.success.add(_elem1353); + _elem1361 = new Partition(); + _elem1361.read(iprot); + struct.success.add(_elem1361); } iprot.readListEnd(); } @@ -117488,9 +117643,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_partitions_by_ oprot.writeFieldBegin(SUCCESS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.success.size())); - for (Partition _iter1355 : struct.success) + for (Partition _iter1363 : struct.success) { - _iter1355.write(oprot); + _iter1363.write(oprot); } oprot.writeListEnd(); } @@ -117537,9 +117692,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_partitions_by_f if (struct.isSetSuccess()) { { oprot.writeI32(struct.success.size()); - for (Partition _iter1356 : struct.success) + for (Partition _iter1364 : struct.success) { - _iter1356.write(oprot); + _iter1364.write(oprot); } } } @@ -117557,14 +117712,14 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_partitions_by_fi BitSet incoming = iprot.readBitSet(3); if (incoming.get(0)) { { - org.apache.thrift.protocol.TList _list1357 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); - struct.success = new ArrayList(_list1357.size); - Partition _elem1358; - for (int _i1359 = 0; _i1359 < _list1357.size; ++_i1359) + org.apache.thrift.protocol.TList _list1365 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + struct.success = new ArrayList(_list1365.size); + Partition _elem1366; + for (int _i1367 = 0; _i1367 < _list1365.size; ++_i1367) { - _elem1358 = new Partition(); - _elem1358.read(iprot); - struct.success.add(_elem1358); + _elem1366 = new Partition(); + _elem1366.read(iprot); + struct.success.add(_elem1366); } } struct.setSuccessIsSet(true); @@ -118731,14 +118886,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_part_specs_by_f case 0: // SUCCESS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1360 = iprot.readListBegin(); - struct.success = new ArrayList(_list1360.size); - PartitionSpec _elem1361; - for (int _i1362 = 0; _i1362 < _list1360.size; ++_i1362) + org.apache.thrift.protocol.TList _list1368 = iprot.readListBegin(); + struct.success = new ArrayList(_list1368.size); + PartitionSpec _elem1369; + for (int _i1370 = 0; _i1370 < _list1368.size; ++_i1370) { - _elem1361 = new PartitionSpec(); - _elem1361.read(iprot); - struct.success.add(_elem1361); + _elem1369 = new PartitionSpec(); + _elem1369.read(iprot); + struct.success.add(_elem1369); } iprot.readListEnd(); } @@ -118782,9 +118937,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_part_specs_by_ oprot.writeFieldBegin(SUCCESS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.success.size())); - for (PartitionSpec _iter1363 : struct.success) + for (PartitionSpec _iter1371 : struct.success) { - _iter1363.write(oprot); + _iter1371.write(oprot); } oprot.writeListEnd(); } @@ -118831,9 +118986,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_part_specs_by_f if (struct.isSetSuccess()) { { oprot.writeI32(struct.success.size()); - for (PartitionSpec _iter1364 : struct.success) + for (PartitionSpec _iter1372 : struct.success) { - _iter1364.write(oprot); + _iter1372.write(oprot); } } } @@ -118851,14 +119006,14 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_part_specs_by_fi BitSet incoming = iprot.readBitSet(3); if (incoming.get(0)) { { - org.apache.thrift.protocol.TList _list1365 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); - struct.success = new ArrayList(_list1365.size); - PartitionSpec _elem1366; - for (int _i1367 = 0; _i1367 < _list1365.size; ++_i1367) + org.apache.thrift.protocol.TList _list1373 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + struct.success = new ArrayList(_list1373.size); + PartitionSpec _elem1374; + for (int _i1375 = 0; _i1375 < _list1373.size; ++_i1375) { - _elem1366 = new PartitionSpec(); - _elem1366.read(iprot); - struct.success.add(_elem1366); + _elem1374 = new PartitionSpec(); + _elem1374.read(iprot); + struct.success.add(_elem1374); } } struct.setSuccessIsSet(true); @@ -121442,13 +121597,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_partitions_by_n case 3: // NAMES if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1368 = iprot.readListBegin(); - struct.names = new ArrayList(_list1368.size); - String _elem1369; - for (int _i1370 = 0; _i1370 < _list1368.size; ++_i1370) + org.apache.thrift.protocol.TList _list1376 = iprot.readListBegin(); + struct.names = new ArrayList(_list1376.size); + String _elem1377; + for (int _i1378 = 0; _i1378 < _list1376.size; ++_i1378) { - _elem1369 = iprot.readString(); - struct.names.add(_elem1369); + _elem1377 = iprot.readString(); + struct.names.add(_elem1377); } iprot.readListEnd(); } @@ -121484,9 +121639,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_partitions_by_ oprot.writeFieldBegin(NAMES_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.names.size())); - for (String _iter1371 : struct.names) + for (String _iter1379 : struct.names) { - oprot.writeString(_iter1371); + oprot.writeString(_iter1379); } oprot.writeListEnd(); } @@ -121529,9 +121684,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_partitions_by_n if (struct.isSetNames()) { { oprot.writeI32(struct.names.size()); - for (String _iter1372 : struct.names) + for (String _iter1380 : struct.names) { - oprot.writeString(_iter1372); + oprot.writeString(_iter1380); } } } @@ -121551,13 +121706,13 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_partitions_by_na } if (incoming.get(2)) { { - org.apache.thrift.protocol.TList _list1373 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.names = new ArrayList(_list1373.size); - String _elem1374; - for (int _i1375 = 0; _i1375 < _list1373.size; ++_i1375) + org.apache.thrift.protocol.TList _list1381 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.names = new ArrayList(_list1381.size); + String _elem1382; + for (int _i1383 = 0; _i1383 < _list1381.size; ++_i1383) { - _elem1374 = iprot.readString(); - struct.names.add(_elem1374); + _elem1382 = iprot.readString(); + struct.names.add(_elem1382); } } struct.setNamesIsSet(true); @@ -122044,14 +122199,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_partitions_by_n case 0: // SUCCESS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1376 = iprot.readListBegin(); - struct.success = new ArrayList(_list1376.size); - Partition _elem1377; - for (int _i1378 = 0; _i1378 < _list1376.size; ++_i1378) + org.apache.thrift.protocol.TList _list1384 = iprot.readListBegin(); + struct.success = new ArrayList(_list1384.size); + Partition _elem1385; + for (int _i1386 = 0; _i1386 < _list1384.size; ++_i1386) { - _elem1377 = new Partition(); - _elem1377.read(iprot); - struct.success.add(_elem1377); + _elem1385 = new Partition(); + _elem1385.read(iprot); + struct.success.add(_elem1385); } iprot.readListEnd(); } @@ -122095,9 +122250,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_partitions_by_ oprot.writeFieldBegin(SUCCESS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.success.size())); - for (Partition _iter1379 : struct.success) + for (Partition _iter1387 : struct.success) { - _iter1379.write(oprot); + _iter1387.write(oprot); } oprot.writeListEnd(); } @@ -122144,9 +122299,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_partitions_by_n if (struct.isSetSuccess()) { { oprot.writeI32(struct.success.size()); - for (Partition _iter1380 : struct.success) + for (Partition _iter1388 : struct.success) { - _iter1380.write(oprot); + _iter1388.write(oprot); } } } @@ -122164,14 +122319,14 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_partitions_by_na BitSet incoming = iprot.readBitSet(3); if (incoming.get(0)) { { - org.apache.thrift.protocol.TList _list1381 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); - struct.success = new ArrayList(_list1381.size); - Partition _elem1382; - for (int _i1383 = 0; _i1383 < _list1381.size; ++_i1383) + org.apache.thrift.protocol.TList _list1389 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + struct.success = new ArrayList(_list1389.size); + Partition _elem1390; + for (int _i1391 = 0; _i1391 < _list1389.size; ++_i1391) { - _elem1382 = new Partition(); - _elem1382.read(iprot); - struct.success.add(_elem1382); + _elem1390 = new Partition(); + _elem1390.read(iprot); + struct.success.add(_elem1390); } } struct.setSuccessIsSet(true); @@ -123721,14 +123876,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, alter_partitions_ar case 3: // NEW_PARTS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1384 = iprot.readListBegin(); - struct.new_parts = new ArrayList(_list1384.size); - Partition _elem1385; - for (int _i1386 = 0; _i1386 < _list1384.size; ++_i1386) + org.apache.thrift.protocol.TList _list1392 = iprot.readListBegin(); + struct.new_parts = new ArrayList(_list1392.size); + Partition _elem1393; + for (int _i1394 = 0; _i1394 < _list1392.size; ++_i1394) { - _elem1385 = new Partition(); - _elem1385.read(iprot); - struct.new_parts.add(_elem1385); + _elem1393 = new Partition(); + _elem1393.read(iprot); + struct.new_parts.add(_elem1393); } iprot.readListEnd(); } @@ -123764,9 +123919,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, alter_partitions_a oprot.writeFieldBegin(NEW_PARTS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.new_parts.size())); - for (Partition _iter1387 : struct.new_parts) + for (Partition _iter1395 : struct.new_parts) { - _iter1387.write(oprot); + _iter1395.write(oprot); } oprot.writeListEnd(); } @@ -123809,9 +123964,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, alter_partitions_ar if (struct.isSetNew_parts()) { { oprot.writeI32(struct.new_parts.size()); - for (Partition _iter1388 : struct.new_parts) + for (Partition _iter1396 : struct.new_parts) { - _iter1388.write(oprot); + _iter1396.write(oprot); } } } @@ -123831,14 +123986,14 @@ public void read(org.apache.thrift.protocol.TProtocol prot, alter_partitions_arg } if (incoming.get(2)) { { - org.apache.thrift.protocol.TList _list1389 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); - struct.new_parts = new ArrayList(_list1389.size); - Partition _elem1390; - for (int _i1391 = 0; _i1391 < _list1389.size; ++_i1391) + org.apache.thrift.protocol.TList _list1397 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + struct.new_parts = new ArrayList(_list1397.size); + Partition _elem1398; + for (int _i1399 = 0; _i1399 < _list1397.size; ++_i1399) { - _elem1390 = new Partition(); - _elem1390.read(iprot); - struct.new_parts.add(_elem1390); + _elem1398 = new Partition(); + _elem1398.read(iprot); + struct.new_parts.add(_elem1398); } } struct.setNew_partsIsSet(true); @@ -124891,14 +125046,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, alter_partitions_wi case 3: // NEW_PARTS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1392 = iprot.readListBegin(); - struct.new_parts = new ArrayList(_list1392.size); - Partition _elem1393; - for (int _i1394 = 0; _i1394 < _list1392.size; ++_i1394) + org.apache.thrift.protocol.TList _list1400 = iprot.readListBegin(); + struct.new_parts = new ArrayList(_list1400.size); + Partition _elem1401; + for (int _i1402 = 0; _i1402 < _list1400.size; ++_i1402) { - _elem1393 = new Partition(); - _elem1393.read(iprot); - struct.new_parts.add(_elem1393); + _elem1401 = new Partition(); + _elem1401.read(iprot); + struct.new_parts.add(_elem1401); } iprot.readListEnd(); } @@ -124943,9 +125098,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, alter_partitions_w oprot.writeFieldBegin(NEW_PARTS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.new_parts.size())); - for (Partition _iter1395 : struct.new_parts) + for (Partition _iter1403 : struct.new_parts) { - _iter1395.write(oprot); + _iter1403.write(oprot); } oprot.writeListEnd(); } @@ -124996,9 +125151,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, alter_partitions_wi if (struct.isSetNew_parts()) { { oprot.writeI32(struct.new_parts.size()); - for (Partition _iter1396 : struct.new_parts) + for (Partition _iter1404 : struct.new_parts) { - _iter1396.write(oprot); + _iter1404.write(oprot); } } } @@ -125021,14 +125176,14 @@ public void read(org.apache.thrift.protocol.TProtocol prot, alter_partitions_wit } if (incoming.get(2)) { { - org.apache.thrift.protocol.TList _list1397 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); - struct.new_parts = new ArrayList(_list1397.size); - Partition _elem1398; - for (int _i1399 = 0; _i1399 < _list1397.size; ++_i1399) + org.apache.thrift.protocol.TList _list1405 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + struct.new_parts = new ArrayList(_list1405.size); + Partition _elem1406; + for (int _i1407 = 0; _i1407 < _list1405.size; ++_i1407) { - _elem1398 = new Partition(); - _elem1398.read(iprot); - struct.new_parts.add(_elem1398); + _elem1406 = new Partition(); + _elem1406.read(iprot); + struct.new_parts.add(_elem1406); } } struct.setNew_partsIsSet(true); @@ -125443,10 +125598,937 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, alter_partitions_wi struct.validate(); } - public void write(org.apache.thrift.protocol.TProtocol oprot, alter_partitions_with_environment_context_result struct) throws org.apache.thrift.TException { + public void write(org.apache.thrift.protocol.TProtocol oprot, alter_partitions_with_environment_context_result struct) throws org.apache.thrift.TException { + struct.validate(); + + oprot.writeStructBegin(STRUCT_DESC); + if (struct.o1 != null) { + oprot.writeFieldBegin(O1_FIELD_DESC); + struct.o1.write(oprot); + oprot.writeFieldEnd(); + } + if (struct.o2 != null) { + oprot.writeFieldBegin(O2_FIELD_DESC); + struct.o2.write(oprot); + oprot.writeFieldEnd(); + } + oprot.writeFieldStop(); + oprot.writeStructEnd(); + } + + } + + private static class alter_partitions_with_environment_context_resultTupleSchemeFactory implements SchemeFactory { + public alter_partitions_with_environment_context_resultTupleScheme getScheme() { + return new alter_partitions_with_environment_context_resultTupleScheme(); + } + } + + private static class alter_partitions_with_environment_context_resultTupleScheme extends TupleScheme { + + @Override + public void write(org.apache.thrift.protocol.TProtocol prot, alter_partitions_with_environment_context_result struct) throws org.apache.thrift.TException { + TTupleProtocol oprot = (TTupleProtocol) prot; + BitSet optionals = new BitSet(); + if (struct.isSetO1()) { + optionals.set(0); + } + if (struct.isSetO2()) { + optionals.set(1); + } + oprot.writeBitSet(optionals, 2); + if (struct.isSetO1()) { + struct.o1.write(oprot); + } + if (struct.isSetO2()) { + struct.o2.write(oprot); + } + } + + @Override + public void read(org.apache.thrift.protocol.TProtocol prot, alter_partitions_with_environment_context_result struct) throws org.apache.thrift.TException { + TTupleProtocol iprot = (TTupleProtocol) prot; + BitSet incoming = iprot.readBitSet(2); + if (incoming.get(0)) { + struct.o1 = new InvalidOperationException(); + struct.o1.read(iprot); + struct.setO1IsSet(true); + } + if (incoming.get(1)) { + struct.o2 = new MetaException(); + struct.o2.read(iprot); + struct.setO2IsSet(true); + } + } + } + + } + + @org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public static class alter_partitions_with_environment_context_req_args implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { + private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("alter_partitions_with_environment_context_req_args"); + + private static final org.apache.thrift.protocol.TField REQ_FIELD_DESC = new org.apache.thrift.protocol.TField("req", org.apache.thrift.protocol.TType.STRUCT, (short)1); + + private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); + static { + schemes.put(StandardScheme.class, new alter_partitions_with_environment_context_req_argsStandardSchemeFactory()); + schemes.put(TupleScheme.class, new alter_partitions_with_environment_context_req_argsTupleSchemeFactory()); + } + + private AlterPartitionsRequest req; // required + + /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ + public enum _Fields implements org.apache.thrift.TFieldIdEnum { + REQ((short)1, "req"); + + private static final Map byName = new HashMap(); + + static { + for (_Fields field : EnumSet.allOf(_Fields.class)) { + byName.put(field.getFieldName(), field); + } + } + + /** + * Find the _Fields constant that matches fieldId, or null if its not found. + */ + public static _Fields findByThriftId(int fieldId) { + switch(fieldId) { + case 1: // REQ + return REQ; + default: + return null; + } + } + + /** + * Find the _Fields constant that matches fieldId, throwing an exception + * if it is not found. + */ + public static _Fields findByThriftIdOrThrow(int fieldId) { + _Fields fields = findByThriftId(fieldId); + if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!"); + return fields; + } + + /** + * Find the _Fields constant that matches name, or null if its not found. + */ + public static _Fields findByName(String name) { + return byName.get(name); + } + + private final short _thriftId; + private final String _fieldName; + + _Fields(short thriftId, String fieldName) { + _thriftId = thriftId; + _fieldName = fieldName; + } + + public short getThriftFieldId() { + return _thriftId; + } + + public String getFieldName() { + return _fieldName; + } + } + + // isset id assignments + public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; + static { + Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); + tmpMap.put(_Fields.REQ, new org.apache.thrift.meta_data.FieldMetaData("req", org.apache.thrift.TFieldRequirementType.DEFAULT, + new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, AlterPartitionsRequest.class))); + metaDataMap = Collections.unmodifiableMap(tmpMap); + org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(alter_partitions_with_environment_context_req_args.class, metaDataMap); + } + + public alter_partitions_with_environment_context_req_args() { + } + + public alter_partitions_with_environment_context_req_args( + AlterPartitionsRequest req) + { + this(); + this.req = req; + } + + /** + * Performs a deep copy on other. + */ + public alter_partitions_with_environment_context_req_args(alter_partitions_with_environment_context_req_args other) { + if (other.isSetReq()) { + this.req = new AlterPartitionsRequest(other.req); + } + } + + public alter_partitions_with_environment_context_req_args deepCopy() { + return new alter_partitions_with_environment_context_req_args(this); + } + + @Override + public void clear() { + this.req = null; + } + + public AlterPartitionsRequest getReq() { + return this.req; + } + + public void setReq(AlterPartitionsRequest req) { + this.req = req; + } + + public void unsetReq() { + this.req = null; + } + + /** Returns true if field req is set (has been assigned a value) and false otherwise */ + public boolean isSetReq() { + return this.req != null; + } + + public void setReqIsSet(boolean value) { + if (!value) { + this.req = null; + } + } + + public void setFieldValue(_Fields field, Object value) { + switch (field) { + case REQ: + if (value == null) { + unsetReq(); + } else { + setReq((AlterPartitionsRequest)value); + } + break; + + } + } + + public Object getFieldValue(_Fields field) { + switch (field) { + case REQ: + return getReq(); + + } + throw new IllegalStateException(); + } + + /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */ + public boolean isSet(_Fields field) { + if (field == null) { + throw new IllegalArgumentException(); + } + + switch (field) { + case REQ: + return isSetReq(); + } + throw new IllegalStateException(); + } + + @Override + public boolean equals(Object that) { + if (that == null) + return false; + if (that instanceof alter_partitions_with_environment_context_req_args) + return this.equals((alter_partitions_with_environment_context_req_args)that); + return false; + } + + public boolean equals(alter_partitions_with_environment_context_req_args that) { + if (that == null) + return false; + + boolean this_present_req = true && this.isSetReq(); + boolean that_present_req = true && that.isSetReq(); + if (this_present_req || that_present_req) { + if (!(this_present_req && that_present_req)) + return false; + if (!this.req.equals(that.req)) + return false; + } + + return true; + } + + @Override + public int hashCode() { + List list = new ArrayList(); + + boolean present_req = true && (isSetReq()); + list.add(present_req); + if (present_req) + list.add(req); + + return list.hashCode(); + } + + @Override + public int compareTo(alter_partitions_with_environment_context_req_args other) { + if (!getClass().equals(other.getClass())) { + return getClass().getName().compareTo(other.getClass().getName()); + } + + int lastComparison = 0; + + lastComparison = Boolean.valueOf(isSetReq()).compareTo(other.isSetReq()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetReq()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.req, other.req); + if (lastComparison != 0) { + return lastComparison; + } + } + return 0; + } + + public _Fields fieldForId(int fieldId) { + return _Fields.findByThriftId(fieldId); + } + + public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException { + schemes.get(iprot.getScheme()).getScheme().read(iprot, this); + } + + public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException { + schemes.get(oprot.getScheme()).getScheme().write(oprot, this); + } + + @Override + public String toString() { + StringBuilder sb = new StringBuilder("alter_partitions_with_environment_context_req_args("); + boolean first = true; + + sb.append("req:"); + if (this.req == null) { + sb.append("null"); + } else { + sb.append(this.req); + } + first = false; + sb.append(")"); + return sb.toString(); + } + + public void validate() throws org.apache.thrift.TException { + // check for required fields + // check for sub-struct validity + if (req != null) { + req.validate(); + } + } + + private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException { + try { + write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out))); + } catch (org.apache.thrift.TException te) { + throw new java.io.IOException(te); + } + } + + private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException { + try { + read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in))); + } catch (org.apache.thrift.TException te) { + throw new java.io.IOException(te); + } + } + + private static class alter_partitions_with_environment_context_req_argsStandardSchemeFactory implements SchemeFactory { + public alter_partitions_with_environment_context_req_argsStandardScheme getScheme() { + return new alter_partitions_with_environment_context_req_argsStandardScheme(); + } + } + + private static class alter_partitions_with_environment_context_req_argsStandardScheme extends StandardScheme { + + public void read(org.apache.thrift.protocol.TProtocol iprot, alter_partitions_with_environment_context_req_args struct) throws org.apache.thrift.TException { + org.apache.thrift.protocol.TField schemeField; + iprot.readStructBegin(); + while (true) + { + schemeField = iprot.readFieldBegin(); + if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { + break; + } + switch (schemeField.id) { + case 1: // REQ + if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) { + struct.req = new AlterPartitionsRequest(); + struct.req.read(iprot); + struct.setReqIsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + break; + default: + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + iprot.readFieldEnd(); + } + iprot.readStructEnd(); + struct.validate(); + } + + public void write(org.apache.thrift.protocol.TProtocol oprot, alter_partitions_with_environment_context_req_args struct) throws org.apache.thrift.TException { + struct.validate(); + + oprot.writeStructBegin(STRUCT_DESC); + if (struct.req != null) { + oprot.writeFieldBegin(REQ_FIELD_DESC); + struct.req.write(oprot); + oprot.writeFieldEnd(); + } + oprot.writeFieldStop(); + oprot.writeStructEnd(); + } + + } + + private static class alter_partitions_with_environment_context_req_argsTupleSchemeFactory implements SchemeFactory { + public alter_partitions_with_environment_context_req_argsTupleScheme getScheme() { + return new alter_partitions_with_environment_context_req_argsTupleScheme(); + } + } + + private static class alter_partitions_with_environment_context_req_argsTupleScheme extends TupleScheme { + + @Override + public void write(org.apache.thrift.protocol.TProtocol prot, alter_partitions_with_environment_context_req_args struct) throws org.apache.thrift.TException { + TTupleProtocol oprot = (TTupleProtocol) prot; + BitSet optionals = new BitSet(); + if (struct.isSetReq()) { + optionals.set(0); + } + oprot.writeBitSet(optionals, 1); + if (struct.isSetReq()) { + struct.req.write(oprot); + } + } + + @Override + public void read(org.apache.thrift.protocol.TProtocol prot, alter_partitions_with_environment_context_req_args struct) throws org.apache.thrift.TException { + TTupleProtocol iprot = (TTupleProtocol) prot; + BitSet incoming = iprot.readBitSet(1); + if (incoming.get(0)) { + struct.req = new AlterPartitionsRequest(); + struct.req.read(iprot); + struct.setReqIsSet(true); + } + } + } + + } + + @org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public static class alter_partitions_with_environment_context_req_result implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { + private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("alter_partitions_with_environment_context_req_result"); + + private static final org.apache.thrift.protocol.TField SUCCESS_FIELD_DESC = new org.apache.thrift.protocol.TField("success", org.apache.thrift.protocol.TType.STRUCT, (short)0); + private static final org.apache.thrift.protocol.TField O1_FIELD_DESC = new org.apache.thrift.protocol.TField("o1", org.apache.thrift.protocol.TType.STRUCT, (short)1); + private static final org.apache.thrift.protocol.TField O2_FIELD_DESC = new org.apache.thrift.protocol.TField("o2", org.apache.thrift.protocol.TType.STRUCT, (short)2); + + private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); + static { + schemes.put(StandardScheme.class, new alter_partitions_with_environment_context_req_resultStandardSchemeFactory()); + schemes.put(TupleScheme.class, new alter_partitions_with_environment_context_req_resultTupleSchemeFactory()); + } + + private AlterPartitionsResponse success; // required + private InvalidOperationException o1; // required + private MetaException o2; // required + + /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ + public enum _Fields implements org.apache.thrift.TFieldIdEnum { + SUCCESS((short)0, "success"), + O1((short)1, "o1"), + O2((short)2, "o2"); + + private static final Map byName = new HashMap(); + + static { + for (_Fields field : EnumSet.allOf(_Fields.class)) { + byName.put(field.getFieldName(), field); + } + } + + /** + * Find the _Fields constant that matches fieldId, or null if its not found. + */ + public static _Fields findByThriftId(int fieldId) { + switch(fieldId) { + case 0: // SUCCESS + return SUCCESS; + case 1: // O1 + return O1; + case 2: // O2 + return O2; + default: + return null; + } + } + + /** + * Find the _Fields constant that matches fieldId, throwing an exception + * if it is not found. + */ + public static _Fields findByThriftIdOrThrow(int fieldId) { + _Fields fields = findByThriftId(fieldId); + if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!"); + return fields; + } + + /** + * Find the _Fields constant that matches name, or null if its not found. + */ + public static _Fields findByName(String name) { + return byName.get(name); + } + + private final short _thriftId; + private final String _fieldName; + + _Fields(short thriftId, String fieldName) { + _thriftId = thriftId; + _fieldName = fieldName; + } + + public short getThriftFieldId() { + return _thriftId; + } + + public String getFieldName() { + return _fieldName; + } + } + + // isset id assignments + public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; + static { + Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); + tmpMap.put(_Fields.SUCCESS, new org.apache.thrift.meta_data.FieldMetaData("success", org.apache.thrift.TFieldRequirementType.DEFAULT, + new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, AlterPartitionsResponse.class))); + tmpMap.put(_Fields.O1, new org.apache.thrift.meta_data.FieldMetaData("o1", org.apache.thrift.TFieldRequirementType.DEFAULT, + new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRUCT))); + tmpMap.put(_Fields.O2, new org.apache.thrift.meta_data.FieldMetaData("o2", org.apache.thrift.TFieldRequirementType.DEFAULT, + new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRUCT))); + metaDataMap = Collections.unmodifiableMap(tmpMap); + org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(alter_partitions_with_environment_context_req_result.class, metaDataMap); + } + + public alter_partitions_with_environment_context_req_result() { + } + + public alter_partitions_with_environment_context_req_result( + AlterPartitionsResponse success, + InvalidOperationException o1, + MetaException o2) + { + this(); + this.success = success; + this.o1 = o1; + this.o2 = o2; + } + + /** + * Performs a deep copy on other. + */ + public alter_partitions_with_environment_context_req_result(alter_partitions_with_environment_context_req_result other) { + if (other.isSetSuccess()) { + this.success = new AlterPartitionsResponse(other.success); + } + if (other.isSetO1()) { + this.o1 = new InvalidOperationException(other.o1); + } + if (other.isSetO2()) { + this.o2 = new MetaException(other.o2); + } + } + + public alter_partitions_with_environment_context_req_result deepCopy() { + return new alter_partitions_with_environment_context_req_result(this); + } + + @Override + public void clear() { + this.success = null; + this.o1 = null; + this.o2 = null; + } + + public AlterPartitionsResponse getSuccess() { + return this.success; + } + + public void setSuccess(AlterPartitionsResponse success) { + this.success = success; + } + + public void unsetSuccess() { + this.success = null; + } + + /** Returns true if field success is set (has been assigned a value) and false otherwise */ + public boolean isSetSuccess() { + return this.success != null; + } + + public void setSuccessIsSet(boolean value) { + if (!value) { + this.success = null; + } + } + + public InvalidOperationException getO1() { + return this.o1; + } + + public void setO1(InvalidOperationException o1) { + this.o1 = o1; + } + + public void unsetO1() { + this.o1 = null; + } + + /** Returns true if field o1 is set (has been assigned a value) and false otherwise */ + public boolean isSetO1() { + return this.o1 != null; + } + + public void setO1IsSet(boolean value) { + if (!value) { + this.o1 = null; + } + } + + public MetaException getO2() { + return this.o2; + } + + public void setO2(MetaException o2) { + this.o2 = o2; + } + + public void unsetO2() { + this.o2 = null; + } + + /** Returns true if field o2 is set (has been assigned a value) and false otherwise */ + public boolean isSetO2() { + return this.o2 != null; + } + + public void setO2IsSet(boolean value) { + if (!value) { + this.o2 = null; + } + } + + public void setFieldValue(_Fields field, Object value) { + switch (field) { + case SUCCESS: + if (value == null) { + unsetSuccess(); + } else { + setSuccess((AlterPartitionsResponse)value); + } + break; + + case O1: + if (value == null) { + unsetO1(); + } else { + setO1((InvalidOperationException)value); + } + break; + + case O2: + if (value == null) { + unsetO2(); + } else { + setO2((MetaException)value); + } + break; + + } + } + + public Object getFieldValue(_Fields field) { + switch (field) { + case SUCCESS: + return getSuccess(); + + case O1: + return getO1(); + + case O2: + return getO2(); + + } + throw new IllegalStateException(); + } + + /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */ + public boolean isSet(_Fields field) { + if (field == null) { + throw new IllegalArgumentException(); + } + + switch (field) { + case SUCCESS: + return isSetSuccess(); + case O1: + return isSetO1(); + case O2: + return isSetO2(); + } + throw new IllegalStateException(); + } + + @Override + public boolean equals(Object that) { + if (that == null) + return false; + if (that instanceof alter_partitions_with_environment_context_req_result) + return this.equals((alter_partitions_with_environment_context_req_result)that); + return false; + } + + public boolean equals(alter_partitions_with_environment_context_req_result that) { + if (that == null) + return false; + + boolean this_present_success = true && this.isSetSuccess(); + boolean that_present_success = true && that.isSetSuccess(); + if (this_present_success || that_present_success) { + if (!(this_present_success && that_present_success)) + return false; + if (!this.success.equals(that.success)) + return false; + } + + boolean this_present_o1 = true && this.isSetO1(); + boolean that_present_o1 = true && that.isSetO1(); + if (this_present_o1 || that_present_o1) { + if (!(this_present_o1 && that_present_o1)) + return false; + if (!this.o1.equals(that.o1)) + return false; + } + + boolean this_present_o2 = true && this.isSetO2(); + boolean that_present_o2 = true && that.isSetO2(); + if (this_present_o2 || that_present_o2) { + if (!(this_present_o2 && that_present_o2)) + return false; + if (!this.o2.equals(that.o2)) + return false; + } + + return true; + } + + @Override + public int hashCode() { + List list = new ArrayList(); + + boolean present_success = true && (isSetSuccess()); + list.add(present_success); + if (present_success) + list.add(success); + + boolean present_o1 = true && (isSetO1()); + list.add(present_o1); + if (present_o1) + list.add(o1); + + boolean present_o2 = true && (isSetO2()); + list.add(present_o2); + if (present_o2) + list.add(o2); + + return list.hashCode(); + } + + @Override + public int compareTo(alter_partitions_with_environment_context_req_result other) { + if (!getClass().equals(other.getClass())) { + return getClass().getName().compareTo(other.getClass().getName()); + } + + int lastComparison = 0; + + lastComparison = Boolean.valueOf(isSetSuccess()).compareTo(other.isSetSuccess()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetSuccess()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.success, other.success); + if (lastComparison != 0) { + return lastComparison; + } + } + lastComparison = Boolean.valueOf(isSetO1()).compareTo(other.isSetO1()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetO1()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.o1, other.o1); + if (lastComparison != 0) { + return lastComparison; + } + } + lastComparison = Boolean.valueOf(isSetO2()).compareTo(other.isSetO2()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetO2()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.o2, other.o2); + if (lastComparison != 0) { + return lastComparison; + } + } + return 0; + } + + public _Fields fieldForId(int fieldId) { + return _Fields.findByThriftId(fieldId); + } + + public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException { + schemes.get(iprot.getScheme()).getScheme().read(iprot, this); + } + + public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException { + schemes.get(oprot.getScheme()).getScheme().write(oprot, this); + } + + @Override + public String toString() { + StringBuilder sb = new StringBuilder("alter_partitions_with_environment_context_req_result("); + boolean first = true; + + sb.append("success:"); + if (this.success == null) { + sb.append("null"); + } else { + sb.append(this.success); + } + first = false; + if (!first) sb.append(", "); + sb.append("o1:"); + if (this.o1 == null) { + sb.append("null"); + } else { + sb.append(this.o1); + } + first = false; + if (!first) sb.append(", "); + sb.append("o2:"); + if (this.o2 == null) { + sb.append("null"); + } else { + sb.append(this.o2); + } + first = false; + sb.append(")"); + return sb.toString(); + } + + public void validate() throws org.apache.thrift.TException { + // check for required fields + // check for sub-struct validity + if (success != null) { + success.validate(); + } + } + + private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException { + try { + write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out))); + } catch (org.apache.thrift.TException te) { + throw new java.io.IOException(te); + } + } + + private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException { + try { + read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in))); + } catch (org.apache.thrift.TException te) { + throw new java.io.IOException(te); + } + } + + private static class alter_partitions_with_environment_context_req_resultStandardSchemeFactory implements SchemeFactory { + public alter_partitions_with_environment_context_req_resultStandardScheme getScheme() { + return new alter_partitions_with_environment_context_req_resultStandardScheme(); + } + } + + private static class alter_partitions_with_environment_context_req_resultStandardScheme extends StandardScheme { + + public void read(org.apache.thrift.protocol.TProtocol iprot, alter_partitions_with_environment_context_req_result struct) throws org.apache.thrift.TException { + org.apache.thrift.protocol.TField schemeField; + iprot.readStructBegin(); + while (true) + { + schemeField = iprot.readFieldBegin(); + if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { + break; + } + switch (schemeField.id) { + case 0: // SUCCESS + if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) { + struct.success = new AlterPartitionsResponse(); + struct.success.read(iprot); + struct.setSuccessIsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + break; + case 1: // O1 + if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) { + struct.o1 = new InvalidOperationException(); + struct.o1.read(iprot); + struct.setO1IsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + break; + case 2: // O2 + if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) { + struct.o2 = new MetaException(); + struct.o2.read(iprot); + struct.setO2IsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + break; + default: + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + iprot.readFieldEnd(); + } + iprot.readStructEnd(); + struct.validate(); + } + + public void write(org.apache.thrift.protocol.TProtocol oprot, alter_partitions_with_environment_context_req_result struct) throws org.apache.thrift.TException { struct.validate(); oprot.writeStructBegin(STRUCT_DESC); + if (struct.success != null) { + oprot.writeFieldBegin(SUCCESS_FIELD_DESC); + struct.success.write(oprot); + oprot.writeFieldEnd(); + } if (struct.o1 != null) { oprot.writeFieldBegin(O1_FIELD_DESC); struct.o1.write(oprot); @@ -125463,25 +126545,31 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, alter_partitions_w } - private static class alter_partitions_with_environment_context_resultTupleSchemeFactory implements SchemeFactory { - public alter_partitions_with_environment_context_resultTupleScheme getScheme() { - return new alter_partitions_with_environment_context_resultTupleScheme(); + private static class alter_partitions_with_environment_context_req_resultTupleSchemeFactory implements SchemeFactory { + public alter_partitions_with_environment_context_req_resultTupleScheme getScheme() { + return new alter_partitions_with_environment_context_req_resultTupleScheme(); } } - private static class alter_partitions_with_environment_context_resultTupleScheme extends TupleScheme { + private static class alter_partitions_with_environment_context_req_resultTupleScheme extends TupleScheme { @Override - public void write(org.apache.thrift.protocol.TProtocol prot, alter_partitions_with_environment_context_result struct) throws org.apache.thrift.TException { + public void write(org.apache.thrift.protocol.TProtocol prot, alter_partitions_with_environment_context_req_result struct) throws org.apache.thrift.TException { TTupleProtocol oprot = (TTupleProtocol) prot; BitSet optionals = new BitSet(); - if (struct.isSetO1()) { + if (struct.isSetSuccess()) { optionals.set(0); } - if (struct.isSetO2()) { + if (struct.isSetO1()) { optionals.set(1); } - oprot.writeBitSet(optionals, 2); + if (struct.isSetO2()) { + optionals.set(2); + } + oprot.writeBitSet(optionals, 3); + if (struct.isSetSuccess()) { + struct.success.write(oprot); + } if (struct.isSetO1()) { struct.o1.write(oprot); } @@ -125491,15 +126579,20 @@ public void write(org.apache.thrift.protocol.TProtocol prot, alter_partitions_wi } @Override - public void read(org.apache.thrift.protocol.TProtocol prot, alter_partitions_with_environment_context_result struct) throws org.apache.thrift.TException { + public void read(org.apache.thrift.protocol.TProtocol prot, alter_partitions_with_environment_context_req_result struct) throws org.apache.thrift.TException { TTupleProtocol iprot = (TTupleProtocol) prot; - BitSet incoming = iprot.readBitSet(2); + BitSet incoming = iprot.readBitSet(3); if (incoming.get(0)) { + struct.success = new AlterPartitionsResponse(); + struct.success.read(iprot); + struct.setSuccessIsSet(true); + } + if (incoming.get(1)) { struct.o1 = new InvalidOperationException(); struct.o1.read(iprot); struct.setO1IsSet(true); } - if (incoming.get(1)) { + if (incoming.get(2)) { struct.o2 = new MetaException(); struct.o2.read(iprot); struct.setO2IsSet(true); @@ -127229,13 +128322,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, rename_partition_ar case 3: // PART_VALS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1400 = iprot.readListBegin(); - struct.part_vals = new ArrayList(_list1400.size); - String _elem1401; - for (int _i1402 = 0; _i1402 < _list1400.size; ++_i1402) + org.apache.thrift.protocol.TList _list1408 = iprot.readListBegin(); + struct.part_vals = new ArrayList(_list1408.size); + String _elem1409; + for (int _i1410 = 0; _i1410 < _list1408.size; ++_i1410) { - _elem1401 = iprot.readString(); - struct.part_vals.add(_elem1401); + _elem1409 = iprot.readString(); + struct.part_vals.add(_elem1409); } iprot.readListEnd(); } @@ -127280,9 +128373,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, rename_partition_a oprot.writeFieldBegin(PART_VALS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.part_vals.size())); - for (String _iter1403 : struct.part_vals) + for (String _iter1411 : struct.part_vals) { - oprot.writeString(_iter1403); + oprot.writeString(_iter1411); } oprot.writeListEnd(); } @@ -127333,9 +128426,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, rename_partition_ar if (struct.isSetPart_vals()) { { oprot.writeI32(struct.part_vals.size()); - for (String _iter1404 : struct.part_vals) + for (String _iter1412 : struct.part_vals) { - oprot.writeString(_iter1404); + oprot.writeString(_iter1412); } } } @@ -127358,13 +128451,13 @@ public void read(org.apache.thrift.protocol.TProtocol prot, rename_partition_arg } if (incoming.get(2)) { { - org.apache.thrift.protocol.TList _list1405 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.part_vals = new ArrayList(_list1405.size); - String _elem1406; - for (int _i1407 = 0; _i1407 < _list1405.size; ++_i1407) + org.apache.thrift.protocol.TList _list1413 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.part_vals = new ArrayList(_list1413.size); + String _elem1414; + for (int _i1415 = 0; _i1415 < _list1413.size; ++_i1415) { - _elem1406 = iprot.readString(); - struct.part_vals.add(_elem1406); + _elem1414 = iprot.readString(); + struct.part_vals.add(_elem1414); } } struct.setPart_valsIsSet(true); @@ -128238,13 +129331,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, partition_name_has_ case 1: // PART_VALS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1408 = iprot.readListBegin(); - struct.part_vals = new ArrayList(_list1408.size); - String _elem1409; - for (int _i1410 = 0; _i1410 < _list1408.size; ++_i1410) + org.apache.thrift.protocol.TList _list1416 = iprot.readListBegin(); + struct.part_vals = new ArrayList(_list1416.size); + String _elem1417; + for (int _i1418 = 0; _i1418 < _list1416.size; ++_i1418) { - _elem1409 = iprot.readString(); - struct.part_vals.add(_elem1409); + _elem1417 = iprot.readString(); + struct.part_vals.add(_elem1417); } iprot.readListEnd(); } @@ -128278,9 +129371,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, partition_name_has oprot.writeFieldBegin(PART_VALS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.part_vals.size())); - for (String _iter1411 : struct.part_vals) + for (String _iter1419 : struct.part_vals) { - oprot.writeString(_iter1411); + oprot.writeString(_iter1419); } oprot.writeListEnd(); } @@ -128317,9 +129410,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, partition_name_has_ if (struct.isSetPart_vals()) { { oprot.writeI32(struct.part_vals.size()); - for (String _iter1412 : struct.part_vals) + for (String _iter1420 : struct.part_vals) { - oprot.writeString(_iter1412); + oprot.writeString(_iter1420); } } } @@ -128334,13 +129427,13 @@ public void read(org.apache.thrift.protocol.TProtocol prot, partition_name_has_v BitSet incoming = iprot.readBitSet(2); if (incoming.get(0)) { { - org.apache.thrift.protocol.TList _list1413 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.part_vals = new ArrayList(_list1413.size); - String _elem1414; - for (int _i1415 = 0; _i1415 < _list1413.size; ++_i1415) + org.apache.thrift.protocol.TList _list1421 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.part_vals = new ArrayList(_list1421.size); + String _elem1422; + for (int _i1423 = 0; _i1423 < _list1421.size; ++_i1423) { - _elem1414 = iprot.readString(); - struct.part_vals.add(_elem1414); + _elem1422 = iprot.readString(); + struct.part_vals.add(_elem1422); } } struct.setPart_valsIsSet(true); @@ -130495,13 +131588,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, partition_name_to_v case 0: // SUCCESS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1416 = iprot.readListBegin(); - struct.success = new ArrayList(_list1416.size); - String _elem1417; - for (int _i1418 = 0; _i1418 < _list1416.size; ++_i1418) + org.apache.thrift.protocol.TList _list1424 = iprot.readListBegin(); + struct.success = new ArrayList(_list1424.size); + String _elem1425; + for (int _i1426 = 0; _i1426 < _list1424.size; ++_i1426) { - _elem1417 = iprot.readString(); - struct.success.add(_elem1417); + _elem1425 = iprot.readString(); + struct.success.add(_elem1425); } iprot.readListEnd(); } @@ -130536,9 +131629,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, partition_name_to_ oprot.writeFieldBegin(SUCCESS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.success.size())); - for (String _iter1419 : struct.success) + for (String _iter1427 : struct.success) { - oprot.writeString(_iter1419); + oprot.writeString(_iter1427); } oprot.writeListEnd(); } @@ -130577,9 +131670,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, partition_name_to_v if (struct.isSetSuccess()) { { oprot.writeI32(struct.success.size()); - for (String _iter1420 : struct.success) + for (String _iter1428 : struct.success) { - oprot.writeString(_iter1420); + oprot.writeString(_iter1428); } } } @@ -130594,13 +131687,13 @@ public void read(org.apache.thrift.protocol.TProtocol prot, partition_name_to_va BitSet incoming = iprot.readBitSet(2); if (incoming.get(0)) { { - org.apache.thrift.protocol.TList _list1421 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.success = new ArrayList(_list1421.size); - String _elem1422; - for (int _i1423 = 0; _i1423 < _list1421.size; ++_i1423) + org.apache.thrift.protocol.TList _list1429 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.success = new ArrayList(_list1429.size); + String _elem1430; + for (int _i1431 = 0; _i1431 < _list1429.size; ++_i1431) { - _elem1422 = iprot.readString(); - struct.success.add(_elem1422); + _elem1430 = iprot.readString(); + struct.success.add(_elem1430); } } struct.setSuccessIsSet(true); @@ -131363,15 +132456,15 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, partition_name_to_s case 0: // SUCCESS if (schemeField.type == org.apache.thrift.protocol.TType.MAP) { { - org.apache.thrift.protocol.TMap _map1424 = iprot.readMapBegin(); - struct.success = new HashMap(2*_map1424.size); - String _key1425; - String _val1426; - for (int _i1427 = 0; _i1427 < _map1424.size; ++_i1427) + org.apache.thrift.protocol.TMap _map1432 = iprot.readMapBegin(); + struct.success = new HashMap(2*_map1432.size); + String _key1433; + String _val1434; + for (int _i1435 = 0; _i1435 < _map1432.size; ++_i1435) { - _key1425 = iprot.readString(); - _val1426 = iprot.readString(); - struct.success.put(_key1425, _val1426); + _key1433 = iprot.readString(); + _val1434 = iprot.readString(); + struct.success.put(_key1433, _val1434); } iprot.readMapEnd(); } @@ -131406,10 +132499,10 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, partition_name_to_ oprot.writeFieldBegin(SUCCESS_FIELD_DESC); { oprot.writeMapBegin(new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRING, struct.success.size())); - for (Map.Entry _iter1428 : struct.success.entrySet()) + for (Map.Entry _iter1436 : struct.success.entrySet()) { - oprot.writeString(_iter1428.getKey()); - oprot.writeString(_iter1428.getValue()); + oprot.writeString(_iter1436.getKey()); + oprot.writeString(_iter1436.getValue()); } oprot.writeMapEnd(); } @@ -131448,10 +132541,10 @@ public void write(org.apache.thrift.protocol.TProtocol prot, partition_name_to_s if (struct.isSetSuccess()) { { oprot.writeI32(struct.success.size()); - for (Map.Entry _iter1429 : struct.success.entrySet()) + for (Map.Entry _iter1437 : struct.success.entrySet()) { - oprot.writeString(_iter1429.getKey()); - oprot.writeString(_iter1429.getValue()); + oprot.writeString(_iter1437.getKey()); + oprot.writeString(_iter1437.getValue()); } } } @@ -131466,15 +132559,15 @@ public void read(org.apache.thrift.protocol.TProtocol prot, partition_name_to_sp BitSet incoming = iprot.readBitSet(2); if (incoming.get(0)) { { - org.apache.thrift.protocol.TMap _map1430 = new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.success = new HashMap(2*_map1430.size); - String _key1431; - String _val1432; - for (int _i1433 = 0; _i1433 < _map1430.size; ++_i1433) + org.apache.thrift.protocol.TMap _map1438 = new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.success = new HashMap(2*_map1438.size); + String _key1439; + String _val1440; + for (int _i1441 = 0; _i1441 < _map1438.size; ++_i1441) { - _key1431 = iprot.readString(); - _val1432 = iprot.readString(); - struct.success.put(_key1431, _val1432); + _key1439 = iprot.readString(); + _val1440 = iprot.readString(); + struct.success.put(_key1439, _val1440); } } struct.setSuccessIsSet(true); @@ -132069,15 +133162,15 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, markPartitionForEve case 3: // PART_VALS if (schemeField.type == org.apache.thrift.protocol.TType.MAP) { { - org.apache.thrift.protocol.TMap _map1434 = iprot.readMapBegin(); - struct.part_vals = new HashMap(2*_map1434.size); - String _key1435; - String _val1436; - for (int _i1437 = 0; _i1437 < _map1434.size; ++_i1437) + org.apache.thrift.protocol.TMap _map1442 = iprot.readMapBegin(); + struct.part_vals = new HashMap(2*_map1442.size); + String _key1443; + String _val1444; + for (int _i1445 = 0; _i1445 < _map1442.size; ++_i1445) { - _key1435 = iprot.readString(); - _val1436 = iprot.readString(); - struct.part_vals.put(_key1435, _val1436); + _key1443 = iprot.readString(); + _val1444 = iprot.readString(); + struct.part_vals.put(_key1443, _val1444); } iprot.readMapEnd(); } @@ -132121,10 +133214,10 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, markPartitionForEv oprot.writeFieldBegin(PART_VALS_FIELD_DESC); { oprot.writeMapBegin(new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRING, struct.part_vals.size())); - for (Map.Entry _iter1438 : struct.part_vals.entrySet()) + for (Map.Entry _iter1446 : struct.part_vals.entrySet()) { - oprot.writeString(_iter1438.getKey()); - oprot.writeString(_iter1438.getValue()); + oprot.writeString(_iter1446.getKey()); + oprot.writeString(_iter1446.getValue()); } oprot.writeMapEnd(); } @@ -132175,10 +133268,10 @@ public void write(org.apache.thrift.protocol.TProtocol prot, markPartitionForEve if (struct.isSetPart_vals()) { { oprot.writeI32(struct.part_vals.size()); - for (Map.Entry _iter1439 : struct.part_vals.entrySet()) + for (Map.Entry _iter1447 : struct.part_vals.entrySet()) { - oprot.writeString(_iter1439.getKey()); - oprot.writeString(_iter1439.getValue()); + oprot.writeString(_iter1447.getKey()); + oprot.writeString(_iter1447.getValue()); } } } @@ -132201,15 +133294,15 @@ public void read(org.apache.thrift.protocol.TProtocol prot, markPartitionForEven } if (incoming.get(2)) { { - org.apache.thrift.protocol.TMap _map1440 = new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.part_vals = new HashMap(2*_map1440.size); - String _key1441; - String _val1442; - for (int _i1443 = 0; _i1443 < _map1440.size; ++_i1443) + org.apache.thrift.protocol.TMap _map1448 = new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.part_vals = new HashMap(2*_map1448.size); + String _key1449; + String _val1450; + for (int _i1451 = 0; _i1451 < _map1448.size; ++_i1451) { - _key1441 = iprot.readString(); - _val1442 = iprot.readString(); - struct.part_vals.put(_key1441, _val1442); + _key1449 = iprot.readString(); + _val1450 = iprot.readString(); + struct.part_vals.put(_key1449, _val1450); } } struct.setPart_valsIsSet(true); @@ -133693,15 +134786,15 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, isPartitionMarkedFo case 3: // PART_VALS if (schemeField.type == org.apache.thrift.protocol.TType.MAP) { { - org.apache.thrift.protocol.TMap _map1444 = iprot.readMapBegin(); - struct.part_vals = new HashMap(2*_map1444.size); - String _key1445; - String _val1446; - for (int _i1447 = 0; _i1447 < _map1444.size; ++_i1447) + org.apache.thrift.protocol.TMap _map1452 = iprot.readMapBegin(); + struct.part_vals = new HashMap(2*_map1452.size); + String _key1453; + String _val1454; + for (int _i1455 = 0; _i1455 < _map1452.size; ++_i1455) { - _key1445 = iprot.readString(); - _val1446 = iprot.readString(); - struct.part_vals.put(_key1445, _val1446); + _key1453 = iprot.readString(); + _val1454 = iprot.readString(); + struct.part_vals.put(_key1453, _val1454); } iprot.readMapEnd(); } @@ -133745,10 +134838,10 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, isPartitionMarkedF oprot.writeFieldBegin(PART_VALS_FIELD_DESC); { oprot.writeMapBegin(new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRING, struct.part_vals.size())); - for (Map.Entry _iter1448 : struct.part_vals.entrySet()) + for (Map.Entry _iter1456 : struct.part_vals.entrySet()) { - oprot.writeString(_iter1448.getKey()); - oprot.writeString(_iter1448.getValue()); + oprot.writeString(_iter1456.getKey()); + oprot.writeString(_iter1456.getValue()); } oprot.writeMapEnd(); } @@ -133799,10 +134892,10 @@ public void write(org.apache.thrift.protocol.TProtocol prot, isPartitionMarkedFo if (struct.isSetPart_vals()) { { oprot.writeI32(struct.part_vals.size()); - for (Map.Entry _iter1449 : struct.part_vals.entrySet()) + for (Map.Entry _iter1457 : struct.part_vals.entrySet()) { - oprot.writeString(_iter1449.getKey()); - oprot.writeString(_iter1449.getValue()); + oprot.writeString(_iter1457.getKey()); + oprot.writeString(_iter1457.getValue()); } } } @@ -133825,15 +134918,15 @@ public void read(org.apache.thrift.protocol.TProtocol prot, isPartitionMarkedFor } if (incoming.get(2)) { { - org.apache.thrift.protocol.TMap _map1450 = new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.part_vals = new HashMap(2*_map1450.size); - String _key1451; - String _val1452; - for (int _i1453 = 0; _i1453 < _map1450.size; ++_i1453) + org.apache.thrift.protocol.TMap _map1458 = new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.part_vals = new HashMap(2*_map1458.size); + String _key1459; + String _val1460; + for (int _i1461 = 0; _i1461 < _map1458.size; ++_i1461) { - _key1451 = iprot.readString(); - _val1452 = iprot.readString(); - struct.part_vals.put(_key1451, _val1452); + _key1459 = iprot.readString(); + _val1460 = iprot.readString(); + struct.part_vals.put(_key1459, _val1460); } } struct.setPart_valsIsSet(true); @@ -156189,13 +157282,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_functions_resul case 0: // SUCCESS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1454 = iprot.readListBegin(); - struct.success = new ArrayList(_list1454.size); - String _elem1455; - for (int _i1456 = 0; _i1456 < _list1454.size; ++_i1456) + org.apache.thrift.protocol.TList _list1462 = iprot.readListBegin(); + struct.success = new ArrayList(_list1462.size); + String _elem1463; + for (int _i1464 = 0; _i1464 < _list1462.size; ++_i1464) { - _elem1455 = iprot.readString(); - struct.success.add(_elem1455); + _elem1463 = iprot.readString(); + struct.success.add(_elem1463); } iprot.readListEnd(); } @@ -156230,9 +157323,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_functions_resu oprot.writeFieldBegin(SUCCESS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.success.size())); - for (String _iter1457 : struct.success) + for (String _iter1465 : struct.success) { - oprot.writeString(_iter1457); + oprot.writeString(_iter1465); } oprot.writeListEnd(); } @@ -156271,9 +157364,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_functions_resul if (struct.isSetSuccess()) { { oprot.writeI32(struct.success.size()); - for (String _iter1458 : struct.success) + for (String _iter1466 : struct.success) { - oprot.writeString(_iter1458); + oprot.writeString(_iter1466); } } } @@ -156288,13 +157381,13 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_functions_result BitSet incoming = iprot.readBitSet(2); if (incoming.get(0)) { { - org.apache.thrift.protocol.TList _list1459 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.success = new ArrayList(_list1459.size); - String _elem1460; - for (int _i1461 = 0; _i1461 < _list1459.size; ++_i1461) + org.apache.thrift.protocol.TList _list1467 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.success = new ArrayList(_list1467.size); + String _elem1468; + for (int _i1469 = 0; _i1469 < _list1467.size; ++_i1469) { - _elem1460 = iprot.readString(); - struct.success.add(_elem1460); + _elem1468 = iprot.readString(); + struct.success.add(_elem1468); } } struct.setSuccessIsSet(true); @@ -160349,13 +161442,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_role_names_resu case 0: // SUCCESS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1462 = iprot.readListBegin(); - struct.success = new ArrayList(_list1462.size); - String _elem1463; - for (int _i1464 = 0; _i1464 < _list1462.size; ++_i1464) + org.apache.thrift.protocol.TList _list1470 = iprot.readListBegin(); + struct.success = new ArrayList(_list1470.size); + String _elem1471; + for (int _i1472 = 0; _i1472 < _list1470.size; ++_i1472) { - _elem1463 = iprot.readString(); - struct.success.add(_elem1463); + _elem1471 = iprot.readString(); + struct.success.add(_elem1471); } iprot.readListEnd(); } @@ -160390,9 +161483,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_role_names_res oprot.writeFieldBegin(SUCCESS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.success.size())); - for (String _iter1465 : struct.success) + for (String _iter1473 : struct.success) { - oprot.writeString(_iter1465); + oprot.writeString(_iter1473); } oprot.writeListEnd(); } @@ -160431,9 +161524,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_role_names_resu if (struct.isSetSuccess()) { { oprot.writeI32(struct.success.size()); - for (String _iter1466 : struct.success) + for (String _iter1474 : struct.success) { - oprot.writeString(_iter1466); + oprot.writeString(_iter1474); } } } @@ -160448,13 +161541,13 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_role_names_resul BitSet incoming = iprot.readBitSet(2); if (incoming.get(0)) { { - org.apache.thrift.protocol.TList _list1467 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.success = new ArrayList(_list1467.size); - String _elem1468; - for (int _i1469 = 0; _i1469 < _list1467.size; ++_i1469) + org.apache.thrift.protocol.TList _list1475 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.success = new ArrayList(_list1475.size); + String _elem1476; + for (int _i1477 = 0; _i1477 < _list1475.size; ++_i1477) { - _elem1468 = iprot.readString(); - struct.success.add(_elem1468); + _elem1476 = iprot.readString(); + struct.success.add(_elem1476); } } struct.setSuccessIsSet(true); @@ -163745,14 +164838,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, list_roles_result s case 0: // SUCCESS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1470 = iprot.readListBegin(); - struct.success = new ArrayList(_list1470.size); - Role _elem1471; - for (int _i1472 = 0; _i1472 < _list1470.size; ++_i1472) + org.apache.thrift.protocol.TList _list1478 = iprot.readListBegin(); + struct.success = new ArrayList(_list1478.size); + Role _elem1479; + for (int _i1480 = 0; _i1480 < _list1478.size; ++_i1480) { - _elem1471 = new Role(); - _elem1471.read(iprot); - struct.success.add(_elem1471); + _elem1479 = new Role(); + _elem1479.read(iprot); + struct.success.add(_elem1479); } iprot.readListEnd(); } @@ -163787,9 +164880,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, list_roles_result oprot.writeFieldBegin(SUCCESS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.success.size())); - for (Role _iter1473 : struct.success) + for (Role _iter1481 : struct.success) { - _iter1473.write(oprot); + _iter1481.write(oprot); } oprot.writeListEnd(); } @@ -163828,9 +164921,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, list_roles_result s if (struct.isSetSuccess()) { { oprot.writeI32(struct.success.size()); - for (Role _iter1474 : struct.success) + for (Role _iter1482 : struct.success) { - _iter1474.write(oprot); + _iter1482.write(oprot); } } } @@ -163845,14 +164938,14 @@ public void read(org.apache.thrift.protocol.TProtocol prot, list_roles_result st BitSet incoming = iprot.readBitSet(2); if (incoming.get(0)) { { - org.apache.thrift.protocol.TList _list1475 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); - struct.success = new ArrayList(_list1475.size); - Role _elem1476; - for (int _i1477 = 0; _i1477 < _list1475.size; ++_i1477) + org.apache.thrift.protocol.TList _list1483 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + struct.success = new ArrayList(_list1483.size); + Role _elem1484; + for (int _i1485 = 0; _i1485 < _list1483.size; ++_i1485) { - _elem1476 = new Role(); - _elem1476.read(iprot); - struct.success.add(_elem1476); + _elem1484 = new Role(); + _elem1484.read(iprot); + struct.success.add(_elem1484); } } struct.setSuccessIsSet(true); @@ -166857,13 +167950,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_privilege_set_a case 3: // GROUP_NAMES if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1478 = iprot.readListBegin(); - struct.group_names = new ArrayList(_list1478.size); - String _elem1479; - for (int _i1480 = 0; _i1480 < _list1478.size; ++_i1480) + org.apache.thrift.protocol.TList _list1486 = iprot.readListBegin(); + struct.group_names = new ArrayList(_list1486.size); + String _elem1487; + for (int _i1488 = 0; _i1488 < _list1486.size; ++_i1488) { - _elem1479 = iprot.readString(); - struct.group_names.add(_elem1479); + _elem1487 = iprot.readString(); + struct.group_names.add(_elem1487); } iprot.readListEnd(); } @@ -166899,9 +167992,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_privilege_set_ oprot.writeFieldBegin(GROUP_NAMES_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.group_names.size())); - for (String _iter1481 : struct.group_names) + for (String _iter1489 : struct.group_names) { - oprot.writeString(_iter1481); + oprot.writeString(_iter1489); } oprot.writeListEnd(); } @@ -166944,9 +168037,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_privilege_set_a if (struct.isSetGroup_names()) { { oprot.writeI32(struct.group_names.size()); - for (String _iter1482 : struct.group_names) + for (String _iter1490 : struct.group_names) { - oprot.writeString(_iter1482); + oprot.writeString(_iter1490); } } } @@ -166967,13 +168060,13 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_privilege_set_ar } if (incoming.get(2)) { { - org.apache.thrift.protocol.TList _list1483 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.group_names = new ArrayList(_list1483.size); - String _elem1484; - for (int _i1485 = 0; _i1485 < _list1483.size; ++_i1485) + org.apache.thrift.protocol.TList _list1491 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.group_names = new ArrayList(_list1491.size); + String _elem1492; + for (int _i1493 = 0; _i1493 < _list1491.size; ++_i1493) { - _elem1484 = iprot.readString(); - struct.group_names.add(_elem1484); + _elem1492 = iprot.readString(); + struct.group_names.add(_elem1492); } } struct.setGroup_namesIsSet(true); @@ -168431,14 +169524,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, list_privileges_res case 0: // SUCCESS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1486 = iprot.readListBegin(); - struct.success = new ArrayList(_list1486.size); - HiveObjectPrivilege _elem1487; - for (int _i1488 = 0; _i1488 < _list1486.size; ++_i1488) + org.apache.thrift.protocol.TList _list1494 = iprot.readListBegin(); + struct.success = new ArrayList(_list1494.size); + HiveObjectPrivilege _elem1495; + for (int _i1496 = 0; _i1496 < _list1494.size; ++_i1496) { - _elem1487 = new HiveObjectPrivilege(); - _elem1487.read(iprot); - struct.success.add(_elem1487); + _elem1495 = new HiveObjectPrivilege(); + _elem1495.read(iprot); + struct.success.add(_elem1495); } iprot.readListEnd(); } @@ -168473,9 +169566,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, list_privileges_re oprot.writeFieldBegin(SUCCESS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.success.size())); - for (HiveObjectPrivilege _iter1489 : struct.success) + for (HiveObjectPrivilege _iter1497 : struct.success) { - _iter1489.write(oprot); + _iter1497.write(oprot); } oprot.writeListEnd(); } @@ -168514,9 +169607,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, list_privileges_res if (struct.isSetSuccess()) { { oprot.writeI32(struct.success.size()); - for (HiveObjectPrivilege _iter1490 : struct.success) + for (HiveObjectPrivilege _iter1498 : struct.success) { - _iter1490.write(oprot); + _iter1498.write(oprot); } } } @@ -168531,14 +169624,14 @@ public void read(org.apache.thrift.protocol.TProtocol prot, list_privileges_resu BitSet incoming = iprot.readBitSet(2); if (incoming.get(0)) { { - org.apache.thrift.protocol.TList _list1491 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); - struct.success = new ArrayList(_list1491.size); - HiveObjectPrivilege _elem1492; - for (int _i1493 = 0; _i1493 < _list1491.size; ++_i1493) + org.apache.thrift.protocol.TList _list1499 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + struct.success = new ArrayList(_list1499.size); + HiveObjectPrivilege _elem1500; + for (int _i1501 = 0; _i1501 < _list1499.size; ++_i1501) { - _elem1492 = new HiveObjectPrivilege(); - _elem1492.read(iprot); - struct.success.add(_elem1492); + _elem1500 = new HiveObjectPrivilege(); + _elem1500.read(iprot); + struct.success.add(_elem1500); } } struct.setSuccessIsSet(true); @@ -172485,13 +173578,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, set_ugi_args struct case 2: // GROUP_NAMES if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1494 = iprot.readListBegin(); - struct.group_names = new ArrayList(_list1494.size); - String _elem1495; - for (int _i1496 = 0; _i1496 < _list1494.size; ++_i1496) + org.apache.thrift.protocol.TList _list1502 = iprot.readListBegin(); + struct.group_names = new ArrayList(_list1502.size); + String _elem1503; + for (int _i1504 = 0; _i1504 < _list1502.size; ++_i1504) { - _elem1495 = iprot.readString(); - struct.group_names.add(_elem1495); + _elem1503 = iprot.readString(); + struct.group_names.add(_elem1503); } iprot.readListEnd(); } @@ -172522,9 +173615,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, set_ugi_args struc oprot.writeFieldBegin(GROUP_NAMES_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.group_names.size())); - for (String _iter1497 : struct.group_names) + for (String _iter1505 : struct.group_names) { - oprot.writeString(_iter1497); + oprot.writeString(_iter1505); } oprot.writeListEnd(); } @@ -172561,9 +173654,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, set_ugi_args struct if (struct.isSetGroup_names()) { { oprot.writeI32(struct.group_names.size()); - for (String _iter1498 : struct.group_names) + for (String _iter1506 : struct.group_names) { - oprot.writeString(_iter1498); + oprot.writeString(_iter1506); } } } @@ -172579,13 +173672,13 @@ public void read(org.apache.thrift.protocol.TProtocol prot, set_ugi_args struct) } if (incoming.get(1)) { { - org.apache.thrift.protocol.TList _list1499 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.group_names = new ArrayList(_list1499.size); - String _elem1500; - for (int _i1501 = 0; _i1501 < _list1499.size; ++_i1501) + org.apache.thrift.protocol.TList _list1507 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.group_names = new ArrayList(_list1507.size); + String _elem1508; + for (int _i1509 = 0; _i1509 < _list1507.size; ++_i1509) { - _elem1500 = iprot.readString(); - struct.group_names.add(_elem1500); + _elem1508 = iprot.readString(); + struct.group_names.add(_elem1508); } } struct.setGroup_namesIsSet(true); @@ -172988,13 +174081,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, set_ugi_result stru case 0: // SUCCESS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1502 = iprot.readListBegin(); - struct.success = new ArrayList(_list1502.size); - String _elem1503; - for (int _i1504 = 0; _i1504 < _list1502.size; ++_i1504) + org.apache.thrift.protocol.TList _list1510 = iprot.readListBegin(); + struct.success = new ArrayList(_list1510.size); + String _elem1511; + for (int _i1512 = 0; _i1512 < _list1510.size; ++_i1512) { - _elem1503 = iprot.readString(); - struct.success.add(_elem1503); + _elem1511 = iprot.readString(); + struct.success.add(_elem1511); } iprot.readListEnd(); } @@ -173029,9 +174122,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, set_ugi_result str oprot.writeFieldBegin(SUCCESS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.success.size())); - for (String _iter1505 : struct.success) + for (String _iter1513 : struct.success) { - oprot.writeString(_iter1505); + oprot.writeString(_iter1513); } oprot.writeListEnd(); } @@ -173070,9 +174163,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, set_ugi_result stru if (struct.isSetSuccess()) { { oprot.writeI32(struct.success.size()); - for (String _iter1506 : struct.success) + for (String _iter1514 : struct.success) { - oprot.writeString(_iter1506); + oprot.writeString(_iter1514); } } } @@ -173087,13 +174180,13 @@ public void read(org.apache.thrift.protocol.TProtocol prot, set_ugi_result struc BitSet incoming = iprot.readBitSet(2); if (incoming.get(0)) { { - org.apache.thrift.protocol.TList _list1507 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.success = new ArrayList(_list1507.size); - String _elem1508; - for (int _i1509 = 0; _i1509 < _list1507.size; ++_i1509) + org.apache.thrift.protocol.TList _list1515 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.success = new ArrayList(_list1515.size); + String _elem1516; + for (int _i1517 = 0; _i1517 < _list1515.size; ++_i1517) { - _elem1508 = iprot.readString(); - struct.success.add(_elem1508); + _elem1516 = iprot.readString(); + struct.success.add(_elem1516); } } struct.setSuccessIsSet(true); @@ -178384,13 +179477,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_all_token_ident case 0: // SUCCESS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1510 = iprot.readListBegin(); - struct.success = new ArrayList(_list1510.size); - String _elem1511; - for (int _i1512 = 0; _i1512 < _list1510.size; ++_i1512) + org.apache.thrift.protocol.TList _list1518 = iprot.readListBegin(); + struct.success = new ArrayList(_list1518.size); + String _elem1519; + for (int _i1520 = 0; _i1520 < _list1518.size; ++_i1520) { - _elem1511 = iprot.readString(); - struct.success.add(_elem1511); + _elem1519 = iprot.readString(); + struct.success.add(_elem1519); } iprot.readListEnd(); } @@ -178416,9 +179509,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_all_token_iden oprot.writeFieldBegin(SUCCESS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.success.size())); - for (String _iter1513 : struct.success) + for (String _iter1521 : struct.success) { - oprot.writeString(_iter1513); + oprot.writeString(_iter1521); } oprot.writeListEnd(); } @@ -178449,9 +179542,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_all_token_ident if (struct.isSetSuccess()) { { oprot.writeI32(struct.success.size()); - for (String _iter1514 : struct.success) + for (String _iter1522 : struct.success) { - oprot.writeString(_iter1514); + oprot.writeString(_iter1522); } } } @@ -178463,13 +179556,13 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_all_token_identi BitSet incoming = iprot.readBitSet(1); if (incoming.get(0)) { { - org.apache.thrift.protocol.TList _list1515 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.success = new ArrayList(_list1515.size); - String _elem1516; - for (int _i1517 = 0; _i1517 < _list1515.size; ++_i1517) + org.apache.thrift.protocol.TList _list1523 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.success = new ArrayList(_list1523.size); + String _elem1524; + for (int _i1525 = 0; _i1525 < _list1523.size; ++_i1525) { - _elem1516 = iprot.readString(); - struct.success.add(_elem1516); + _elem1524 = iprot.readString(); + struct.success.add(_elem1524); } } struct.setSuccessIsSet(true); @@ -181499,13 +182592,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_master_keys_res case 0: // SUCCESS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1518 = iprot.readListBegin(); - struct.success = new ArrayList(_list1518.size); - String _elem1519; - for (int _i1520 = 0; _i1520 < _list1518.size; ++_i1520) + org.apache.thrift.protocol.TList _list1526 = iprot.readListBegin(); + struct.success = new ArrayList(_list1526.size); + String _elem1527; + for (int _i1528 = 0; _i1528 < _list1526.size; ++_i1528) { - _elem1519 = iprot.readString(); - struct.success.add(_elem1519); + _elem1527 = iprot.readString(); + struct.success.add(_elem1527); } iprot.readListEnd(); } @@ -181531,9 +182624,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_master_keys_re oprot.writeFieldBegin(SUCCESS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.success.size())); - for (String _iter1521 : struct.success) + for (String _iter1529 : struct.success) { - oprot.writeString(_iter1521); + oprot.writeString(_iter1529); } oprot.writeListEnd(); } @@ -181564,9 +182657,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_master_keys_res if (struct.isSetSuccess()) { { oprot.writeI32(struct.success.size()); - for (String _iter1522 : struct.success) + for (String _iter1530 : struct.success) { - oprot.writeString(_iter1522); + oprot.writeString(_iter1530); } } } @@ -181578,13 +182671,13 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_master_keys_resu BitSet incoming = iprot.readBitSet(1); if (incoming.get(0)) { { - org.apache.thrift.protocol.TList _list1523 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.success = new ArrayList(_list1523.size); - String _elem1524; - for (int _i1525 = 0; _i1525 < _list1523.size; ++_i1525) + org.apache.thrift.protocol.TList _list1531 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.success = new ArrayList(_list1531.size); + String _elem1532; + for (int _i1533 = 0; _i1533 < _list1531.size; ++_i1533) { - _elem1524 = iprot.readString(); - struct.success.add(_elem1524); + _elem1532 = iprot.readString(); + struct.success.add(_elem1532); } } struct.setSuccessIsSet(true); @@ -230493,14 +231586,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_schema_all_vers case 0: // SUCCESS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1526 = iprot.readListBegin(); - struct.success = new ArrayList(_list1526.size); - SchemaVersion _elem1527; - for (int _i1528 = 0; _i1528 < _list1526.size; ++_i1528) + org.apache.thrift.protocol.TList _list1534 = iprot.readListBegin(); + struct.success = new ArrayList(_list1534.size); + SchemaVersion _elem1535; + for (int _i1536 = 0; _i1536 < _list1534.size; ++_i1536) { - _elem1527 = new SchemaVersion(); - _elem1527.read(iprot); - struct.success.add(_elem1527); + _elem1535 = new SchemaVersion(); + _elem1535.read(iprot); + struct.success.add(_elem1535); } iprot.readListEnd(); } @@ -230544,9 +231637,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_schema_all_ver oprot.writeFieldBegin(SUCCESS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.success.size())); - for (SchemaVersion _iter1529 : struct.success) + for (SchemaVersion _iter1537 : struct.success) { - _iter1529.write(oprot); + _iter1537.write(oprot); } oprot.writeListEnd(); } @@ -230593,9 +231686,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_schema_all_vers if (struct.isSetSuccess()) { { oprot.writeI32(struct.success.size()); - for (SchemaVersion _iter1530 : struct.success) + for (SchemaVersion _iter1538 : struct.success) { - _iter1530.write(oprot); + _iter1538.write(oprot); } } } @@ -230613,14 +231706,14 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_schema_all_versi BitSet incoming = iprot.readBitSet(3); if (incoming.get(0)) { { - org.apache.thrift.protocol.TList _list1531 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); - struct.success = new ArrayList(_list1531.size); - SchemaVersion _elem1532; - for (int _i1533 = 0; _i1533 < _list1531.size; ++_i1533) + org.apache.thrift.protocol.TList _list1539 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + struct.success = new ArrayList(_list1539.size); + SchemaVersion _elem1540; + for (int _i1541 = 0; _i1541 < _list1539.size; ++_i1541) { - _elem1532 = new SchemaVersion(); - _elem1532.read(iprot); - struct.success.add(_elem1532); + _elem1540 = new SchemaVersion(); + _elem1540.read(iprot); + struct.success.add(_elem1540); } } struct.setSuccessIsSet(true); @@ -239163,14 +240256,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_runtime_stats_r case 0: // SUCCESS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1534 = iprot.readListBegin(); - struct.success = new ArrayList(_list1534.size); - RuntimeStat _elem1535; - for (int _i1536 = 0; _i1536 < _list1534.size; ++_i1536) + org.apache.thrift.protocol.TList _list1542 = iprot.readListBegin(); + struct.success = new ArrayList(_list1542.size); + RuntimeStat _elem1543; + for (int _i1544 = 0; _i1544 < _list1542.size; ++_i1544) { - _elem1535 = new RuntimeStat(); - _elem1535.read(iprot); - struct.success.add(_elem1535); + _elem1543 = new RuntimeStat(); + _elem1543.read(iprot); + struct.success.add(_elem1543); } iprot.readListEnd(); } @@ -239205,9 +240298,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_runtime_stats_ oprot.writeFieldBegin(SUCCESS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.success.size())); - for (RuntimeStat _iter1537 : struct.success) + for (RuntimeStat _iter1545 : struct.success) { - _iter1537.write(oprot); + _iter1545.write(oprot); } oprot.writeListEnd(); } @@ -239246,9 +240339,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_runtime_stats_r if (struct.isSetSuccess()) { { oprot.writeI32(struct.success.size()); - for (RuntimeStat _iter1538 : struct.success) + for (RuntimeStat _iter1546 : struct.success) { - _iter1538.write(oprot); + _iter1546.write(oprot); } } } @@ -239263,14 +240356,14 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_runtime_stats_re BitSet incoming = iprot.readBitSet(2); if (incoming.get(0)) { { - org.apache.thrift.protocol.TList _list1539 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); - struct.success = new ArrayList(_list1539.size); - RuntimeStat _elem1540; - for (int _i1541 = 0; _i1541 < _list1539.size; ++_i1541) + org.apache.thrift.protocol.TList _list1547 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + struct.success = new ArrayList(_list1547.size); + RuntimeStat _elem1548; + for (int _i1549 = 0; _i1549 < _list1547.size; ++_i1549) { - _elem1540 = new RuntimeStat(); - _elem1540.read(iprot); - struct.success.add(_elem1540); + _elem1548 = new RuntimeStat(); + _elem1548.read(iprot); + struct.success.add(_elem1548); } } struct.setSuccessIsSet(true); diff --git standalone-metastore/src/gen/thrift/gen-php/metastore/ThriftHiveMetastore.php standalone-metastore/src/gen/thrift/gen-php/metastore/ThriftHiveMetastore.php index a83017b9dd..187acebfe0 100644 --- standalone-metastore/src/gen/thrift/gen-php/metastore/ThriftHiveMetastore.php +++ standalone-metastore/src/gen/thrift/gen-php/metastore/ThriftHiveMetastore.php @@ -723,6 +723,13 @@ interface ThriftHiveMetastoreIf extends \FacebookServiceIf { */ public function alter_partitions_with_environment_context($db_name, $tbl_name, array $new_parts, \metastore\EnvironmentContext $environment_context); /** + * @param \metastore\AlterPartitionsRequest $req + * @return \metastore\AlterPartitionsResponse + * @throws \metastore\InvalidOperationException + * @throws \metastore\MetaException + */ + public function alter_partitions_with_environment_context_req(\metastore\AlterPartitionsRequest $req); + /** * @param string $db_name * @param string $tbl_name * @param \metastore\Partition $new_part @@ -6456,6 +6463,63 @@ class ThriftHiveMetastoreClient extends \FacebookServiceClient implements \metas return; } + public function alter_partitions_with_environment_context_req(\metastore\AlterPartitionsRequest $req) + { + $this->send_alter_partitions_with_environment_context_req($req); + return $this->recv_alter_partitions_with_environment_context_req(); + } + + public function send_alter_partitions_with_environment_context_req(\metastore\AlterPartitionsRequest $req) + { + $args = new \metastore\ThriftHiveMetastore_alter_partitions_with_environment_context_req_args(); + $args->req = $req; + $bin_accel = ($this->output_ instanceof TBinaryProtocolAccelerated) && function_exists('thrift_protocol_write_binary'); + if ($bin_accel) + { + thrift_protocol_write_binary($this->output_, 'alter_partitions_with_environment_context_req', TMessageType::CALL, $args, $this->seqid_, $this->output_->isStrictWrite()); + } + else + { + $this->output_->writeMessageBegin('alter_partitions_with_environment_context_req', TMessageType::CALL, $this->seqid_); + $args->write($this->output_); + $this->output_->writeMessageEnd(); + $this->output_->getTransport()->flush(); + } + } + + public function recv_alter_partitions_with_environment_context_req() + { + $bin_accel = ($this->input_ instanceof TBinaryProtocolAccelerated) && function_exists('thrift_protocol_read_binary'); + if ($bin_accel) $result = thrift_protocol_read_binary($this->input_, '\metastore\ThriftHiveMetastore_alter_partitions_with_environment_context_req_result', $this->input_->isStrictRead()); + else + { + $rseqid = 0; + $fname = null; + $mtype = 0; + + $this->input_->readMessageBegin($fname, $mtype, $rseqid); + if ($mtype == TMessageType::EXCEPTION) { + $x = new TApplicationException(); + $x->read($this->input_); + $this->input_->readMessageEnd(); + throw $x; + } + $result = new \metastore\ThriftHiveMetastore_alter_partitions_with_environment_context_req_result(); + $result->read($this->input_); + $this->input_->readMessageEnd(); + } + if ($result->success !== null) { + return $result->success; + } + if ($result->o1 !== null) { + throw $result->o1; + } + if ($result->o2 !== null) { + throw $result->o2; + } + throw new \Exception("alter_partitions_with_environment_context_req failed: unknown result"); + } + public function alter_partition_with_environment_context($db_name, $tbl_name, \metastore\Partition $new_part, \metastore\EnvironmentContext $environment_context) { $this->send_alter_partition_with_environment_context($db_name, $tbl_name, $new_part, $environment_context); @@ -15496,14 +15560,14 @@ class ThriftHiveMetastore_get_databases_result { case 0: if ($ftype == TType::LST) { $this->success = array(); - $_size841 = 0; - $_etype844 = 0; - $xfer += $input->readListBegin($_etype844, $_size841); - for ($_i845 = 0; $_i845 < $_size841; ++$_i845) + $_size848 = 0; + $_etype851 = 0; + $xfer += $input->readListBegin($_etype851, $_size848); + for ($_i852 = 0; $_i852 < $_size848; ++$_i852) { - $elem846 = null; - $xfer += $input->readString($elem846); - $this->success []= $elem846; + $elem853 = null; + $xfer += $input->readString($elem853); + $this->success []= $elem853; } $xfer += $input->readListEnd(); } else { @@ -15539,9 +15603,9 @@ class ThriftHiveMetastore_get_databases_result { { $output->writeListBegin(TType::STRING, count($this->success)); { - foreach ($this->success as $iter847) + foreach ($this->success as $iter854) { - $xfer += $output->writeString($iter847); + $xfer += $output->writeString($iter854); } } $output->writeListEnd(); @@ -15672,14 +15736,14 @@ class ThriftHiveMetastore_get_all_databases_result { case 0: if ($ftype == TType::LST) { $this->success = array(); - $_size848 = 0; - $_etype851 = 0; - $xfer += $input->readListBegin($_etype851, $_size848); - for ($_i852 = 0; $_i852 < $_size848; ++$_i852) + $_size855 = 0; + $_etype858 = 0; + $xfer += $input->readListBegin($_etype858, $_size855); + for ($_i859 = 0; $_i859 < $_size855; ++$_i859) { - $elem853 = null; - $xfer += $input->readString($elem853); - $this->success []= $elem853; + $elem860 = null; + $xfer += $input->readString($elem860); + $this->success []= $elem860; } $xfer += $input->readListEnd(); } else { @@ -15715,9 +15779,9 @@ class ThriftHiveMetastore_get_all_databases_result { { $output->writeListBegin(TType::STRING, count($this->success)); { - foreach ($this->success as $iter854) + foreach ($this->success as $iter861) { - $xfer += $output->writeString($iter854); + $xfer += $output->writeString($iter861); } } $output->writeListEnd(); @@ -16718,18 +16782,18 @@ class ThriftHiveMetastore_get_type_all_result { case 0: if ($ftype == TType::MAP) { $this->success = array(); - $_size855 = 0; - $_ktype856 = 0; - $_vtype857 = 0; - $xfer += $input->readMapBegin($_ktype856, $_vtype857, $_size855); - for ($_i859 = 0; $_i859 < $_size855; ++$_i859) + $_size862 = 0; + $_ktype863 = 0; + $_vtype864 = 0; + $xfer += $input->readMapBegin($_ktype863, $_vtype864, $_size862); + for ($_i866 = 0; $_i866 < $_size862; ++$_i866) { - $key860 = ''; - $val861 = new \metastore\Type(); - $xfer += $input->readString($key860); - $val861 = new \metastore\Type(); - $xfer += $val861->read($input); - $this->success[$key860] = $val861; + $key867 = ''; + $val868 = new \metastore\Type(); + $xfer += $input->readString($key867); + $val868 = new \metastore\Type(); + $xfer += $val868->read($input); + $this->success[$key867] = $val868; } $xfer += $input->readMapEnd(); } else { @@ -16765,10 +16829,10 @@ class ThriftHiveMetastore_get_type_all_result { { $output->writeMapBegin(TType::STRING, TType::STRUCT, count($this->success)); { - foreach ($this->success as $kiter862 => $viter863) + foreach ($this->success as $kiter869 => $viter870) { - $xfer += $output->writeString($kiter862); - $xfer += $viter863->write($output); + $xfer += $output->writeString($kiter869); + $xfer += $viter870->write($output); } } $output->writeMapEnd(); @@ -16972,15 +17036,15 @@ class ThriftHiveMetastore_get_fields_result { case 0: if ($ftype == TType::LST) { $this->success = array(); - $_size864 = 0; - $_etype867 = 0; - $xfer += $input->readListBegin($_etype867, $_size864); - for ($_i868 = 0; $_i868 < $_size864; ++$_i868) + $_size871 = 0; + $_etype874 = 0; + $xfer += $input->readListBegin($_etype874, $_size871); + for ($_i875 = 0; $_i875 < $_size871; ++$_i875) { - $elem869 = null; - $elem869 = new \metastore\FieldSchema(); - $xfer += $elem869->read($input); - $this->success []= $elem869; + $elem876 = null; + $elem876 = new \metastore\FieldSchema(); + $xfer += $elem876->read($input); + $this->success []= $elem876; } $xfer += $input->readListEnd(); } else { @@ -17032,9 +17096,9 @@ class ThriftHiveMetastore_get_fields_result { { $output->writeListBegin(TType::STRUCT, count($this->success)); { - foreach ($this->success as $iter870) + foreach ($this->success as $iter877) { - $xfer += $iter870->write($output); + $xfer += $iter877->write($output); } } $output->writeListEnd(); @@ -17276,15 +17340,15 @@ class ThriftHiveMetastore_get_fields_with_environment_context_result { case 0: if ($ftype == TType::LST) { $this->success = array(); - $_size871 = 0; - $_etype874 = 0; - $xfer += $input->readListBegin($_etype874, $_size871); - for ($_i875 = 0; $_i875 < $_size871; ++$_i875) + $_size878 = 0; + $_etype881 = 0; + $xfer += $input->readListBegin($_etype881, $_size878); + for ($_i882 = 0; $_i882 < $_size878; ++$_i882) { - $elem876 = null; - $elem876 = new \metastore\FieldSchema(); - $xfer += $elem876->read($input); - $this->success []= $elem876; + $elem883 = null; + $elem883 = new \metastore\FieldSchema(); + $xfer += $elem883->read($input); + $this->success []= $elem883; } $xfer += $input->readListEnd(); } else { @@ -17336,9 +17400,9 @@ class ThriftHiveMetastore_get_fields_with_environment_context_result { { $output->writeListBegin(TType::STRUCT, count($this->success)); { - foreach ($this->success as $iter877) + foreach ($this->success as $iter884) { - $xfer += $iter877->write($output); + $xfer += $iter884->write($output); } } $output->writeListEnd(); @@ -17552,15 +17616,15 @@ class ThriftHiveMetastore_get_schema_result { case 0: if ($ftype == TType::LST) { $this->success = array(); - $_size878 = 0; - $_etype881 = 0; - $xfer += $input->readListBegin($_etype881, $_size878); - for ($_i882 = 0; $_i882 < $_size878; ++$_i882) + $_size885 = 0; + $_etype888 = 0; + $xfer += $input->readListBegin($_etype888, $_size885); + for ($_i889 = 0; $_i889 < $_size885; ++$_i889) { - $elem883 = null; - $elem883 = new \metastore\FieldSchema(); - $xfer += $elem883->read($input); - $this->success []= $elem883; + $elem890 = null; + $elem890 = new \metastore\FieldSchema(); + $xfer += $elem890->read($input); + $this->success []= $elem890; } $xfer += $input->readListEnd(); } else { @@ -17612,9 +17676,9 @@ class ThriftHiveMetastore_get_schema_result { { $output->writeListBegin(TType::STRUCT, count($this->success)); { - foreach ($this->success as $iter884) + foreach ($this->success as $iter891) { - $xfer += $iter884->write($output); + $xfer += $iter891->write($output); } } $output->writeListEnd(); @@ -17856,15 +17920,15 @@ class ThriftHiveMetastore_get_schema_with_environment_context_result { case 0: if ($ftype == TType::LST) { $this->success = array(); - $_size885 = 0; - $_etype888 = 0; - $xfer += $input->readListBegin($_etype888, $_size885); - for ($_i889 = 0; $_i889 < $_size885; ++$_i889) + $_size892 = 0; + $_etype895 = 0; + $xfer += $input->readListBegin($_etype895, $_size892); + for ($_i896 = 0; $_i896 < $_size892; ++$_i896) { - $elem890 = null; - $elem890 = new \metastore\FieldSchema(); - $xfer += $elem890->read($input); - $this->success []= $elem890; + $elem897 = null; + $elem897 = new \metastore\FieldSchema(); + $xfer += $elem897->read($input); + $this->success []= $elem897; } $xfer += $input->readListEnd(); } else { @@ -17916,9 +17980,9 @@ class ThriftHiveMetastore_get_schema_with_environment_context_result { { $output->writeListBegin(TType::STRUCT, count($this->success)); { - foreach ($this->success as $iter891) + foreach ($this->success as $iter898) { - $xfer += $iter891->write($output); + $xfer += $iter898->write($output); } } $output->writeListEnd(); @@ -18590,15 +18654,15 @@ class ThriftHiveMetastore_create_table_with_constraints_args { case 2: if ($ftype == TType::LST) { $this->primaryKeys = array(); - $_size892 = 0; - $_etype895 = 0; - $xfer += $input->readListBegin($_etype895, $_size892); - for ($_i896 = 0; $_i896 < $_size892; ++$_i896) + $_size899 = 0; + $_etype902 = 0; + $xfer += $input->readListBegin($_etype902, $_size899); + for ($_i903 = 0; $_i903 < $_size899; ++$_i903) { - $elem897 = null; - $elem897 = new \metastore\SQLPrimaryKey(); - $xfer += $elem897->read($input); - $this->primaryKeys []= $elem897; + $elem904 = null; + $elem904 = new \metastore\SQLPrimaryKey(); + $xfer += $elem904->read($input); + $this->primaryKeys []= $elem904; } $xfer += $input->readListEnd(); } else { @@ -18608,15 +18672,15 @@ class ThriftHiveMetastore_create_table_with_constraints_args { case 3: if ($ftype == TType::LST) { $this->foreignKeys = array(); - $_size898 = 0; - $_etype901 = 0; - $xfer += $input->readListBegin($_etype901, $_size898); - for ($_i902 = 0; $_i902 < $_size898; ++$_i902) + $_size905 = 0; + $_etype908 = 0; + $xfer += $input->readListBegin($_etype908, $_size905); + for ($_i909 = 0; $_i909 < $_size905; ++$_i909) { - $elem903 = null; - $elem903 = new \metastore\SQLForeignKey(); - $xfer += $elem903->read($input); - $this->foreignKeys []= $elem903; + $elem910 = null; + $elem910 = new \metastore\SQLForeignKey(); + $xfer += $elem910->read($input); + $this->foreignKeys []= $elem910; } $xfer += $input->readListEnd(); } else { @@ -18626,15 +18690,15 @@ class ThriftHiveMetastore_create_table_with_constraints_args { case 4: if ($ftype == TType::LST) { $this->uniqueConstraints = array(); - $_size904 = 0; - $_etype907 = 0; - $xfer += $input->readListBegin($_etype907, $_size904); - for ($_i908 = 0; $_i908 < $_size904; ++$_i908) + $_size911 = 0; + $_etype914 = 0; + $xfer += $input->readListBegin($_etype914, $_size911); + for ($_i915 = 0; $_i915 < $_size911; ++$_i915) { - $elem909 = null; - $elem909 = new \metastore\SQLUniqueConstraint(); - $xfer += $elem909->read($input); - $this->uniqueConstraints []= $elem909; + $elem916 = null; + $elem916 = new \metastore\SQLUniqueConstraint(); + $xfer += $elem916->read($input); + $this->uniqueConstraints []= $elem916; } $xfer += $input->readListEnd(); } else { @@ -18644,15 +18708,15 @@ class ThriftHiveMetastore_create_table_with_constraints_args { case 5: if ($ftype == TType::LST) { $this->notNullConstraints = array(); - $_size910 = 0; - $_etype913 = 0; - $xfer += $input->readListBegin($_etype913, $_size910); - for ($_i914 = 0; $_i914 < $_size910; ++$_i914) + $_size917 = 0; + $_etype920 = 0; + $xfer += $input->readListBegin($_etype920, $_size917); + for ($_i921 = 0; $_i921 < $_size917; ++$_i921) { - $elem915 = null; - $elem915 = new \metastore\SQLNotNullConstraint(); - $xfer += $elem915->read($input); - $this->notNullConstraints []= $elem915; + $elem922 = null; + $elem922 = new \metastore\SQLNotNullConstraint(); + $xfer += $elem922->read($input); + $this->notNullConstraints []= $elem922; } $xfer += $input->readListEnd(); } else { @@ -18662,15 +18726,15 @@ class ThriftHiveMetastore_create_table_with_constraints_args { case 6: if ($ftype == TType::LST) { $this->defaultConstraints = array(); - $_size916 = 0; - $_etype919 = 0; - $xfer += $input->readListBegin($_etype919, $_size916); - for ($_i920 = 0; $_i920 < $_size916; ++$_i920) + $_size923 = 0; + $_etype926 = 0; + $xfer += $input->readListBegin($_etype926, $_size923); + for ($_i927 = 0; $_i927 < $_size923; ++$_i927) { - $elem921 = null; - $elem921 = new \metastore\SQLDefaultConstraint(); - $xfer += $elem921->read($input); - $this->defaultConstraints []= $elem921; + $elem928 = null; + $elem928 = new \metastore\SQLDefaultConstraint(); + $xfer += $elem928->read($input); + $this->defaultConstraints []= $elem928; } $xfer += $input->readListEnd(); } else { @@ -18680,15 +18744,15 @@ class ThriftHiveMetastore_create_table_with_constraints_args { case 7: if ($ftype == TType::LST) { $this->checkConstraints = array(); - $_size922 = 0; - $_etype925 = 0; - $xfer += $input->readListBegin($_etype925, $_size922); - for ($_i926 = 0; $_i926 < $_size922; ++$_i926) + $_size929 = 0; + $_etype932 = 0; + $xfer += $input->readListBegin($_etype932, $_size929); + for ($_i933 = 0; $_i933 < $_size929; ++$_i933) { - $elem927 = null; - $elem927 = new \metastore\SQLCheckConstraint(); - $xfer += $elem927->read($input); - $this->checkConstraints []= $elem927; + $elem934 = null; + $elem934 = new \metastore\SQLCheckConstraint(); + $xfer += $elem934->read($input); + $this->checkConstraints []= $elem934; } $xfer += $input->readListEnd(); } else { @@ -18724,9 +18788,9 @@ class ThriftHiveMetastore_create_table_with_constraints_args { { $output->writeListBegin(TType::STRUCT, count($this->primaryKeys)); { - foreach ($this->primaryKeys as $iter928) + foreach ($this->primaryKeys as $iter935) { - $xfer += $iter928->write($output); + $xfer += $iter935->write($output); } } $output->writeListEnd(); @@ -18741,9 +18805,9 @@ class ThriftHiveMetastore_create_table_with_constraints_args { { $output->writeListBegin(TType::STRUCT, count($this->foreignKeys)); { - foreach ($this->foreignKeys as $iter929) + foreach ($this->foreignKeys as $iter936) { - $xfer += $iter929->write($output); + $xfer += $iter936->write($output); } } $output->writeListEnd(); @@ -18758,9 +18822,9 @@ class ThriftHiveMetastore_create_table_with_constraints_args { { $output->writeListBegin(TType::STRUCT, count($this->uniqueConstraints)); { - foreach ($this->uniqueConstraints as $iter930) + foreach ($this->uniqueConstraints as $iter937) { - $xfer += $iter930->write($output); + $xfer += $iter937->write($output); } } $output->writeListEnd(); @@ -18775,9 +18839,9 @@ class ThriftHiveMetastore_create_table_with_constraints_args { { $output->writeListBegin(TType::STRUCT, count($this->notNullConstraints)); { - foreach ($this->notNullConstraints as $iter931) + foreach ($this->notNullConstraints as $iter938) { - $xfer += $iter931->write($output); + $xfer += $iter938->write($output); } } $output->writeListEnd(); @@ -18792,9 +18856,9 @@ class ThriftHiveMetastore_create_table_with_constraints_args { { $output->writeListBegin(TType::STRUCT, count($this->defaultConstraints)); { - foreach ($this->defaultConstraints as $iter932) + foreach ($this->defaultConstraints as $iter939) { - $xfer += $iter932->write($output); + $xfer += $iter939->write($output); } } $output->writeListEnd(); @@ -18809,9 +18873,9 @@ class ThriftHiveMetastore_create_table_with_constraints_args { { $output->writeListBegin(TType::STRUCT, count($this->checkConstraints)); { - foreach ($this->checkConstraints as $iter933) + foreach ($this->checkConstraints as $iter940) { - $xfer += $iter933->write($output); + $xfer += $iter940->write($output); } } $output->writeListEnd(); @@ -20811,14 +20875,14 @@ class ThriftHiveMetastore_truncate_table_args { case 3: if ($ftype == TType::LST) { $this->partNames = array(); - $_size934 = 0; - $_etype937 = 0; - $xfer += $input->readListBegin($_etype937, $_size934); - for ($_i938 = 0; $_i938 < $_size934; ++$_i938) + $_size941 = 0; + $_etype944 = 0; + $xfer += $input->readListBegin($_etype944, $_size941); + for ($_i945 = 0; $_i945 < $_size941; ++$_i945) { - $elem939 = null; - $xfer += $input->readString($elem939); - $this->partNames []= $elem939; + $elem946 = null; + $xfer += $input->readString($elem946); + $this->partNames []= $elem946; } $xfer += $input->readListEnd(); } else { @@ -20856,9 +20920,9 @@ class ThriftHiveMetastore_truncate_table_args { { $output->writeListBegin(TType::STRING, count($this->partNames)); { - foreach ($this->partNames as $iter940) + foreach ($this->partNames as $iter947) { - $xfer += $output->writeString($iter940); + $xfer += $output->writeString($iter947); } } $output->writeListEnd(); @@ -21109,14 +21173,14 @@ class ThriftHiveMetastore_get_tables_result { case 0: if ($ftype == TType::LST) { $this->success = array(); - $_size941 = 0; - $_etype944 = 0; - $xfer += $input->readListBegin($_etype944, $_size941); - for ($_i945 = 0; $_i945 < $_size941; ++$_i945) + $_size948 = 0; + $_etype951 = 0; + $xfer += $input->readListBegin($_etype951, $_size948); + for ($_i952 = 0; $_i952 < $_size948; ++$_i952) { - $elem946 = null; - $xfer += $input->readString($elem946); - $this->success []= $elem946; + $elem953 = null; + $xfer += $input->readString($elem953); + $this->success []= $elem953; } $xfer += $input->readListEnd(); } else { @@ -21152,9 +21216,9 @@ class ThriftHiveMetastore_get_tables_result { { $output->writeListBegin(TType::STRING, count($this->success)); { - foreach ($this->success as $iter947) + foreach ($this->success as $iter954) { - $xfer += $output->writeString($iter947); + $xfer += $output->writeString($iter954); } } $output->writeListEnd(); @@ -21356,14 +21420,14 @@ class ThriftHiveMetastore_get_tables_by_type_result { case 0: if ($ftype == TType::LST) { $this->success = array(); - $_size948 = 0; - $_etype951 = 0; - $xfer += $input->readListBegin($_etype951, $_size948); - for ($_i952 = 0; $_i952 < $_size948; ++$_i952) + $_size955 = 0; + $_etype958 = 0; + $xfer += $input->readListBegin($_etype958, $_size955); + for ($_i959 = 0; $_i959 < $_size955; ++$_i959) { - $elem953 = null; - $xfer += $input->readString($elem953); - $this->success []= $elem953; + $elem960 = null; + $xfer += $input->readString($elem960); + $this->success []= $elem960; } $xfer += $input->readListEnd(); } else { @@ -21399,9 +21463,9 @@ class ThriftHiveMetastore_get_tables_by_type_result { { $output->writeListBegin(TType::STRING, count($this->success)); { - foreach ($this->success as $iter954) + foreach ($this->success as $iter961) { - $xfer += $output->writeString($iter954); + $xfer += $output->writeString($iter961); } } $output->writeListEnd(); @@ -21557,14 +21621,14 @@ class ThriftHiveMetastore_get_materialized_views_for_rewriting_result { case 0: if ($ftype == TType::LST) { $this->success = array(); - $_size955 = 0; - $_etype958 = 0; - $xfer += $input->readListBegin($_etype958, $_size955); - for ($_i959 = 0; $_i959 < $_size955; ++$_i959) + $_size962 = 0; + $_etype965 = 0; + $xfer += $input->readListBegin($_etype965, $_size962); + for ($_i966 = 0; $_i966 < $_size962; ++$_i966) { - $elem960 = null; - $xfer += $input->readString($elem960); - $this->success []= $elem960; + $elem967 = null; + $xfer += $input->readString($elem967); + $this->success []= $elem967; } $xfer += $input->readListEnd(); } else { @@ -21600,9 +21664,9 @@ class ThriftHiveMetastore_get_materialized_views_for_rewriting_result { { $output->writeListBegin(TType::STRING, count($this->success)); { - foreach ($this->success as $iter961) + foreach ($this->success as $iter968) { - $xfer += $output->writeString($iter961); + $xfer += $output->writeString($iter968); } } $output->writeListEnd(); @@ -21707,14 +21771,14 @@ class ThriftHiveMetastore_get_table_meta_args { case 3: if ($ftype == TType::LST) { $this->tbl_types = array(); - $_size962 = 0; - $_etype965 = 0; - $xfer += $input->readListBegin($_etype965, $_size962); - for ($_i966 = 0; $_i966 < $_size962; ++$_i966) + $_size969 = 0; + $_etype972 = 0; + $xfer += $input->readListBegin($_etype972, $_size969); + for ($_i973 = 0; $_i973 < $_size969; ++$_i973) { - $elem967 = null; - $xfer += $input->readString($elem967); - $this->tbl_types []= $elem967; + $elem974 = null; + $xfer += $input->readString($elem974); + $this->tbl_types []= $elem974; } $xfer += $input->readListEnd(); } else { @@ -21752,9 +21816,9 @@ class ThriftHiveMetastore_get_table_meta_args { { $output->writeListBegin(TType::STRING, count($this->tbl_types)); { - foreach ($this->tbl_types as $iter968) + foreach ($this->tbl_types as $iter975) { - $xfer += $output->writeString($iter968); + $xfer += $output->writeString($iter975); } } $output->writeListEnd(); @@ -21831,15 +21895,15 @@ class ThriftHiveMetastore_get_table_meta_result { case 0: if ($ftype == TType::LST) { $this->success = array(); - $_size969 = 0; - $_etype972 = 0; - $xfer += $input->readListBegin($_etype972, $_size969); - for ($_i973 = 0; $_i973 < $_size969; ++$_i973) + $_size976 = 0; + $_etype979 = 0; + $xfer += $input->readListBegin($_etype979, $_size976); + for ($_i980 = 0; $_i980 < $_size976; ++$_i980) { - $elem974 = null; - $elem974 = new \metastore\TableMeta(); - $xfer += $elem974->read($input); - $this->success []= $elem974; + $elem981 = null; + $elem981 = new \metastore\TableMeta(); + $xfer += $elem981->read($input); + $this->success []= $elem981; } $xfer += $input->readListEnd(); } else { @@ -21875,9 +21939,9 @@ class ThriftHiveMetastore_get_table_meta_result { { $output->writeListBegin(TType::STRUCT, count($this->success)); { - foreach ($this->success as $iter975) + foreach ($this->success as $iter982) { - $xfer += $iter975->write($output); + $xfer += $iter982->write($output); } } $output->writeListEnd(); @@ -22033,14 +22097,14 @@ class ThriftHiveMetastore_get_all_tables_result { case 0: if ($ftype == TType::LST) { $this->success = array(); - $_size976 = 0; - $_etype979 = 0; - $xfer += $input->readListBegin($_etype979, $_size976); - for ($_i980 = 0; $_i980 < $_size976; ++$_i980) + $_size983 = 0; + $_etype986 = 0; + $xfer += $input->readListBegin($_etype986, $_size983); + for ($_i987 = 0; $_i987 < $_size983; ++$_i987) { - $elem981 = null; - $xfer += $input->readString($elem981); - $this->success []= $elem981; + $elem988 = null; + $xfer += $input->readString($elem988); + $this->success []= $elem988; } $xfer += $input->readListEnd(); } else { @@ -22076,9 +22140,9 @@ class ThriftHiveMetastore_get_all_tables_result { { $output->writeListBegin(TType::STRING, count($this->success)); { - foreach ($this->success as $iter982) + foreach ($this->success as $iter989) { - $xfer += $output->writeString($iter982); + $xfer += $output->writeString($iter989); } } $output->writeListEnd(); @@ -22393,14 +22457,14 @@ class ThriftHiveMetastore_get_table_objects_by_name_args { case 2: if ($ftype == TType::LST) { $this->tbl_names = array(); - $_size983 = 0; - $_etype986 = 0; - $xfer += $input->readListBegin($_etype986, $_size983); - for ($_i987 = 0; $_i987 < $_size983; ++$_i987) + $_size990 = 0; + $_etype993 = 0; + $xfer += $input->readListBegin($_etype993, $_size990); + for ($_i994 = 0; $_i994 < $_size990; ++$_i994) { - $elem988 = null; - $xfer += $input->readString($elem988); - $this->tbl_names []= $elem988; + $elem995 = null; + $xfer += $input->readString($elem995); + $this->tbl_names []= $elem995; } $xfer += $input->readListEnd(); } else { @@ -22433,9 +22497,9 @@ class ThriftHiveMetastore_get_table_objects_by_name_args { { $output->writeListBegin(TType::STRING, count($this->tbl_names)); { - foreach ($this->tbl_names as $iter989) + foreach ($this->tbl_names as $iter996) { - $xfer += $output->writeString($iter989); + $xfer += $output->writeString($iter996); } } $output->writeListEnd(); @@ -22500,15 +22564,15 @@ class ThriftHiveMetastore_get_table_objects_by_name_result { case 0: if ($ftype == TType::LST) { $this->success = array(); - $_size990 = 0; - $_etype993 = 0; - $xfer += $input->readListBegin($_etype993, $_size990); - for ($_i994 = 0; $_i994 < $_size990; ++$_i994) + $_size997 = 0; + $_etype1000 = 0; + $xfer += $input->readListBegin($_etype1000, $_size997); + for ($_i1001 = 0; $_i1001 < $_size997; ++$_i1001) { - $elem995 = null; - $elem995 = new \metastore\Table(); - $xfer += $elem995->read($input); - $this->success []= $elem995; + $elem1002 = null; + $elem1002 = new \metastore\Table(); + $xfer += $elem1002->read($input); + $this->success []= $elem1002; } $xfer += $input->readListEnd(); } else { @@ -22536,9 +22600,9 @@ class ThriftHiveMetastore_get_table_objects_by_name_result { { $output->writeListBegin(TType::STRUCT, count($this->success)); { - foreach ($this->success as $iter996) + foreach ($this->success as $iter1003) { - $xfer += $iter996->write($output); + $xfer += $iter1003->write($output); } } $output->writeListEnd(); @@ -23065,14 +23129,14 @@ class ThriftHiveMetastore_get_materialization_invalidation_info_args { case 2: if ($ftype == TType::LST) { $this->tbl_names = array(); - $_size997 = 0; - $_etype1000 = 0; - $xfer += $input->readListBegin($_etype1000, $_size997); - for ($_i1001 = 0; $_i1001 < $_size997; ++$_i1001) + $_size1004 = 0; + $_etype1007 = 0; + $xfer += $input->readListBegin($_etype1007, $_size1004); + for ($_i1008 = 0; $_i1008 < $_size1004; ++$_i1008) { - $elem1002 = null; - $xfer += $input->readString($elem1002); - $this->tbl_names []= $elem1002; + $elem1009 = null; + $xfer += $input->readString($elem1009); + $this->tbl_names []= $elem1009; } $xfer += $input->readListEnd(); } else { @@ -23105,9 +23169,9 @@ class ThriftHiveMetastore_get_materialization_invalidation_info_args { { $output->writeListBegin(TType::STRING, count($this->tbl_names)); { - foreach ($this->tbl_names as $iter1003) + foreach ($this->tbl_names as $iter1010) { - $xfer += $output->writeString($iter1003); + $xfer += $output->writeString($iter1010); } } $output->writeListEnd(); @@ -23212,18 +23276,18 @@ class ThriftHiveMetastore_get_materialization_invalidation_info_result { case 0: if ($ftype == TType::MAP) { $this->success = array(); - $_size1004 = 0; - $_ktype1005 = 0; - $_vtype1006 = 0; - $xfer += $input->readMapBegin($_ktype1005, $_vtype1006, $_size1004); - for ($_i1008 = 0; $_i1008 < $_size1004; ++$_i1008) + $_size1011 = 0; + $_ktype1012 = 0; + $_vtype1013 = 0; + $xfer += $input->readMapBegin($_ktype1012, $_vtype1013, $_size1011); + for ($_i1015 = 0; $_i1015 < $_size1011; ++$_i1015) { - $key1009 = ''; - $val1010 = new \metastore\Materialization(); - $xfer += $input->readString($key1009); - $val1010 = new \metastore\Materialization(); - $xfer += $val1010->read($input); - $this->success[$key1009] = $val1010; + $key1016 = ''; + $val1017 = new \metastore\Materialization(); + $xfer += $input->readString($key1016); + $val1017 = new \metastore\Materialization(); + $xfer += $val1017->read($input); + $this->success[$key1016] = $val1017; } $xfer += $input->readMapEnd(); } else { @@ -23275,10 +23339,10 @@ class ThriftHiveMetastore_get_materialization_invalidation_info_result { { $output->writeMapBegin(TType::STRING, TType::STRUCT, count($this->success)); { - foreach ($this->success as $kiter1011 => $viter1012) + foreach ($this->success as $kiter1018 => $viter1019) { - $xfer += $output->writeString($kiter1011); - $xfer += $viter1012->write($output); + $xfer += $output->writeString($kiter1018); + $xfer += $viter1019->write($output); } } $output->writeMapEnd(); @@ -23790,14 +23854,14 @@ class ThriftHiveMetastore_get_table_names_by_filter_result { case 0: if ($ftype == TType::LST) { $this->success = array(); - $_size1013 = 0; - $_etype1016 = 0; - $xfer += $input->readListBegin($_etype1016, $_size1013); - for ($_i1017 = 0; $_i1017 < $_size1013; ++$_i1017) + $_size1020 = 0; + $_etype1023 = 0; + $xfer += $input->readListBegin($_etype1023, $_size1020); + for ($_i1024 = 0; $_i1024 < $_size1020; ++$_i1024) { - $elem1018 = null; - $xfer += $input->readString($elem1018); - $this->success []= $elem1018; + $elem1025 = null; + $xfer += $input->readString($elem1025); + $this->success []= $elem1025; } $xfer += $input->readListEnd(); } else { @@ -23849,9 +23913,9 @@ class ThriftHiveMetastore_get_table_names_by_filter_result { { $output->writeListBegin(TType::STRING, count($this->success)); { - foreach ($this->success as $iter1019) + foreach ($this->success as $iter1026) { - $xfer += $output->writeString($iter1019); + $xfer += $output->writeString($iter1026); } } $output->writeListEnd(); @@ -25164,266 +25228,13 @@ class ThriftHiveMetastore_add_partitions_args { case 1: if ($ftype == TType::LST) { $this->new_parts = array(); - $_size1020 = 0; - $_etype1023 = 0; - $xfer += $input->readListBegin($_etype1023, $_size1020); - for ($_i1024 = 0; $_i1024 < $_size1020; ++$_i1024) - { - $elem1025 = null; - $elem1025 = new \metastore\Partition(); - $xfer += $elem1025->read($input); - $this->new_parts []= $elem1025; - } - $xfer += $input->readListEnd(); - } else { - $xfer += $input->skip($ftype); - } - break; - default: - $xfer += $input->skip($ftype); - break; - } - $xfer += $input->readFieldEnd(); - } - $xfer += $input->readStructEnd(); - return $xfer; - } - - public function write($output) { - $xfer = 0; - $xfer += $output->writeStructBegin('ThriftHiveMetastore_add_partitions_args'); - if ($this->new_parts !== null) { - if (!is_array($this->new_parts)) { - throw new TProtocolException('Bad type in structure.', TProtocolException::INVALID_DATA); - } - $xfer += $output->writeFieldBegin('new_parts', TType::LST, 1); - { - $output->writeListBegin(TType::STRUCT, count($this->new_parts)); - { - foreach ($this->new_parts as $iter1026) - { - $xfer += $iter1026->write($output); - } - } - $output->writeListEnd(); - } - $xfer += $output->writeFieldEnd(); - } - $xfer += $output->writeFieldStop(); - $xfer += $output->writeStructEnd(); - return $xfer; - } - -} - -class ThriftHiveMetastore_add_partitions_result { - static $_TSPEC; - - /** - * @var int - */ - public $success = null; - /** - * @var \metastore\InvalidObjectException - */ - public $o1 = null; - /** - * @var \metastore\AlreadyExistsException - */ - public $o2 = null; - /** - * @var \metastore\MetaException - */ - public $o3 = null; - - public function __construct($vals=null) { - if (!isset(self::$_TSPEC)) { - self::$_TSPEC = array( - 0 => array( - 'var' => 'success', - 'type' => TType::I32, - ), - 1 => array( - 'var' => 'o1', - 'type' => TType::STRUCT, - 'class' => '\metastore\InvalidObjectException', - ), - 2 => array( - 'var' => 'o2', - 'type' => TType::STRUCT, - 'class' => '\metastore\AlreadyExistsException', - ), - 3 => array( - 'var' => 'o3', - 'type' => TType::STRUCT, - 'class' => '\metastore\MetaException', - ), - ); - } - if (is_array($vals)) { - if (isset($vals['success'])) { - $this->success = $vals['success']; - } - if (isset($vals['o1'])) { - $this->o1 = $vals['o1']; - } - if (isset($vals['o2'])) { - $this->o2 = $vals['o2']; - } - if (isset($vals['o3'])) { - $this->o3 = $vals['o3']; - } - } - } - - public function getName() { - return 'ThriftHiveMetastore_add_partitions_result'; - } - - public function read($input) - { - $xfer = 0; - $fname = null; - $ftype = 0; - $fid = 0; - $xfer += $input->readStructBegin($fname); - while (true) - { - $xfer += $input->readFieldBegin($fname, $ftype, $fid); - if ($ftype == TType::STOP) { - break; - } - switch ($fid) - { - case 0: - if ($ftype == TType::I32) { - $xfer += $input->readI32($this->success); - } else { - $xfer += $input->skip($ftype); - } - break; - case 1: - if ($ftype == TType::STRUCT) { - $this->o1 = new \metastore\InvalidObjectException(); - $xfer += $this->o1->read($input); - } else { - $xfer += $input->skip($ftype); - } - break; - case 2: - if ($ftype == TType::STRUCT) { - $this->o2 = new \metastore\AlreadyExistsException(); - $xfer += $this->o2->read($input); - } else { - $xfer += $input->skip($ftype); - } - break; - case 3: - if ($ftype == TType::STRUCT) { - $this->o3 = new \metastore\MetaException(); - $xfer += $this->o3->read($input); - } else { - $xfer += $input->skip($ftype); - } - break; - default: - $xfer += $input->skip($ftype); - break; - } - $xfer += $input->readFieldEnd(); - } - $xfer += $input->readStructEnd(); - return $xfer; - } - - public function write($output) { - $xfer = 0; - $xfer += $output->writeStructBegin('ThriftHiveMetastore_add_partitions_result'); - if ($this->success !== null) { - $xfer += $output->writeFieldBegin('success', TType::I32, 0); - $xfer += $output->writeI32($this->success); - $xfer += $output->writeFieldEnd(); - } - if ($this->o1 !== null) { - $xfer += $output->writeFieldBegin('o1', TType::STRUCT, 1); - $xfer += $this->o1->write($output); - $xfer += $output->writeFieldEnd(); - } - if ($this->o2 !== null) { - $xfer += $output->writeFieldBegin('o2', TType::STRUCT, 2); - $xfer += $this->o2->write($output); - $xfer += $output->writeFieldEnd(); - } - if ($this->o3 !== null) { - $xfer += $output->writeFieldBegin('o3', TType::STRUCT, 3); - $xfer += $this->o3->write($output); - $xfer += $output->writeFieldEnd(); - } - $xfer += $output->writeFieldStop(); - $xfer += $output->writeStructEnd(); - return $xfer; - } - -} - -class ThriftHiveMetastore_add_partitions_pspec_args { - static $_TSPEC; - - /** - * @var \metastore\PartitionSpec[] - */ - public $new_parts = null; - - public function __construct($vals=null) { - if (!isset(self::$_TSPEC)) { - self::$_TSPEC = array( - 1 => array( - 'var' => 'new_parts', - 'type' => TType::LST, - 'etype' => TType::STRUCT, - 'elem' => array( - 'type' => TType::STRUCT, - 'class' => '\metastore\PartitionSpec', - ), - ), - ); - } - if (is_array($vals)) { - if (isset($vals['new_parts'])) { - $this->new_parts = $vals['new_parts']; - } - } - } - - public function getName() { - return 'ThriftHiveMetastore_add_partitions_pspec_args'; - } - - public function read($input) - { - $xfer = 0; - $fname = null; - $ftype = 0; - $fid = 0; - $xfer += $input->readStructBegin($fname); - while (true) - { - $xfer += $input->readFieldBegin($fname, $ftype, $fid); - if ($ftype == TType::STOP) { - break; - } - switch ($fid) - { - case 1: - if ($ftype == TType::LST) { - $this->new_parts = array(); $_size1027 = 0; $_etype1030 = 0; $xfer += $input->readListBegin($_etype1030, $_size1027); for ($_i1031 = 0; $_i1031 < $_size1027; ++$_i1031) { $elem1032 = null; - $elem1032 = new \metastore\PartitionSpec(); + $elem1032 = new \metastore\Partition(); $xfer += $elem1032->read($input); $this->new_parts []= $elem1032; } @@ -25444,7 +25255,7 @@ class ThriftHiveMetastore_add_partitions_pspec_args { public function write($output) { $xfer = 0; - $xfer += $output->writeStructBegin('ThriftHiveMetastore_add_partitions_pspec_args'); + $xfer += $output->writeStructBegin('ThriftHiveMetastore_add_partitions_args'); if ($this->new_parts !== null) { if (!is_array($this->new_parts)) { throw new TProtocolException('Bad type in structure.', TProtocolException::INVALID_DATA); @@ -25469,6 +25280,259 @@ class ThriftHiveMetastore_add_partitions_pspec_args { } +class ThriftHiveMetastore_add_partitions_result { + static $_TSPEC; + + /** + * @var int + */ + public $success = null; + /** + * @var \metastore\InvalidObjectException + */ + public $o1 = null; + /** + * @var \metastore\AlreadyExistsException + */ + public $o2 = null; + /** + * @var \metastore\MetaException + */ + public $o3 = null; + + public function __construct($vals=null) { + if (!isset(self::$_TSPEC)) { + self::$_TSPEC = array( + 0 => array( + 'var' => 'success', + 'type' => TType::I32, + ), + 1 => array( + 'var' => 'o1', + 'type' => TType::STRUCT, + 'class' => '\metastore\InvalidObjectException', + ), + 2 => array( + 'var' => 'o2', + 'type' => TType::STRUCT, + 'class' => '\metastore\AlreadyExistsException', + ), + 3 => array( + 'var' => 'o3', + 'type' => TType::STRUCT, + 'class' => '\metastore\MetaException', + ), + ); + } + if (is_array($vals)) { + if (isset($vals['success'])) { + $this->success = $vals['success']; + } + if (isset($vals['o1'])) { + $this->o1 = $vals['o1']; + } + if (isset($vals['o2'])) { + $this->o2 = $vals['o2']; + } + if (isset($vals['o3'])) { + $this->o3 = $vals['o3']; + } + } + } + + public function getName() { + return 'ThriftHiveMetastore_add_partitions_result'; + } + + public function read($input) + { + $xfer = 0; + $fname = null; + $ftype = 0; + $fid = 0; + $xfer += $input->readStructBegin($fname); + while (true) + { + $xfer += $input->readFieldBegin($fname, $ftype, $fid); + if ($ftype == TType::STOP) { + break; + } + switch ($fid) + { + case 0: + if ($ftype == TType::I32) { + $xfer += $input->readI32($this->success); + } else { + $xfer += $input->skip($ftype); + } + break; + case 1: + if ($ftype == TType::STRUCT) { + $this->o1 = new \metastore\InvalidObjectException(); + $xfer += $this->o1->read($input); + } else { + $xfer += $input->skip($ftype); + } + break; + case 2: + if ($ftype == TType::STRUCT) { + $this->o2 = new \metastore\AlreadyExistsException(); + $xfer += $this->o2->read($input); + } else { + $xfer += $input->skip($ftype); + } + break; + case 3: + if ($ftype == TType::STRUCT) { + $this->o3 = new \metastore\MetaException(); + $xfer += $this->o3->read($input); + } else { + $xfer += $input->skip($ftype); + } + break; + default: + $xfer += $input->skip($ftype); + break; + } + $xfer += $input->readFieldEnd(); + } + $xfer += $input->readStructEnd(); + return $xfer; + } + + public function write($output) { + $xfer = 0; + $xfer += $output->writeStructBegin('ThriftHiveMetastore_add_partitions_result'); + if ($this->success !== null) { + $xfer += $output->writeFieldBegin('success', TType::I32, 0); + $xfer += $output->writeI32($this->success); + $xfer += $output->writeFieldEnd(); + } + if ($this->o1 !== null) { + $xfer += $output->writeFieldBegin('o1', TType::STRUCT, 1); + $xfer += $this->o1->write($output); + $xfer += $output->writeFieldEnd(); + } + if ($this->o2 !== null) { + $xfer += $output->writeFieldBegin('o2', TType::STRUCT, 2); + $xfer += $this->o2->write($output); + $xfer += $output->writeFieldEnd(); + } + if ($this->o3 !== null) { + $xfer += $output->writeFieldBegin('o3', TType::STRUCT, 3); + $xfer += $this->o3->write($output); + $xfer += $output->writeFieldEnd(); + } + $xfer += $output->writeFieldStop(); + $xfer += $output->writeStructEnd(); + return $xfer; + } + +} + +class ThriftHiveMetastore_add_partitions_pspec_args { + static $_TSPEC; + + /** + * @var \metastore\PartitionSpec[] + */ + public $new_parts = null; + + public function __construct($vals=null) { + if (!isset(self::$_TSPEC)) { + self::$_TSPEC = array( + 1 => array( + 'var' => 'new_parts', + 'type' => TType::LST, + 'etype' => TType::STRUCT, + 'elem' => array( + 'type' => TType::STRUCT, + 'class' => '\metastore\PartitionSpec', + ), + ), + ); + } + if (is_array($vals)) { + if (isset($vals['new_parts'])) { + $this->new_parts = $vals['new_parts']; + } + } + } + + public function getName() { + return 'ThriftHiveMetastore_add_partitions_pspec_args'; + } + + public function read($input) + { + $xfer = 0; + $fname = null; + $ftype = 0; + $fid = 0; + $xfer += $input->readStructBegin($fname); + while (true) + { + $xfer += $input->readFieldBegin($fname, $ftype, $fid); + if ($ftype == TType::STOP) { + break; + } + switch ($fid) + { + case 1: + if ($ftype == TType::LST) { + $this->new_parts = array(); + $_size1034 = 0; + $_etype1037 = 0; + $xfer += $input->readListBegin($_etype1037, $_size1034); + for ($_i1038 = 0; $_i1038 < $_size1034; ++$_i1038) + { + $elem1039 = null; + $elem1039 = new \metastore\PartitionSpec(); + $xfer += $elem1039->read($input); + $this->new_parts []= $elem1039; + } + $xfer += $input->readListEnd(); + } else { + $xfer += $input->skip($ftype); + } + break; + default: + $xfer += $input->skip($ftype); + break; + } + $xfer += $input->readFieldEnd(); + } + $xfer += $input->readStructEnd(); + return $xfer; + } + + public function write($output) { + $xfer = 0; + $xfer += $output->writeStructBegin('ThriftHiveMetastore_add_partitions_pspec_args'); + if ($this->new_parts !== null) { + if (!is_array($this->new_parts)) { + throw new TProtocolException('Bad type in structure.', TProtocolException::INVALID_DATA); + } + $xfer += $output->writeFieldBegin('new_parts', TType::LST, 1); + { + $output->writeListBegin(TType::STRUCT, count($this->new_parts)); + { + foreach ($this->new_parts as $iter1040) + { + $xfer += $iter1040->write($output); + } + } + $output->writeListEnd(); + } + $xfer += $output->writeFieldEnd(); + } + $xfer += $output->writeFieldStop(); + $xfer += $output->writeStructEnd(); + return $xfer; + } + +} + class ThriftHiveMetastore_add_partitions_pspec_result { static $_TSPEC; @@ -25705,14 +25769,14 @@ class ThriftHiveMetastore_append_partition_args { case 3: if ($ftype == TType::LST) { $this->part_vals = array(); - $_size1034 = 0; - $_etype1037 = 0; - $xfer += $input->readListBegin($_etype1037, $_size1034); - for ($_i1038 = 0; $_i1038 < $_size1034; ++$_i1038) + $_size1041 = 0; + $_etype1044 = 0; + $xfer += $input->readListBegin($_etype1044, $_size1041); + for ($_i1045 = 0; $_i1045 < $_size1041; ++$_i1045) { - $elem1039 = null; - $xfer += $input->readString($elem1039); - $this->part_vals []= $elem1039; + $elem1046 = null; + $xfer += $input->readString($elem1046); + $this->part_vals []= $elem1046; } $xfer += $input->readListEnd(); } else { @@ -25750,9 +25814,9 @@ class ThriftHiveMetastore_append_partition_args { { $output->writeListBegin(TType::STRING, count($this->part_vals)); { - foreach ($this->part_vals as $iter1040) + foreach ($this->part_vals as $iter1047) { - $xfer += $output->writeString($iter1040); + $xfer += $output->writeString($iter1047); } } $output->writeListEnd(); @@ -26254,14 +26318,14 @@ class ThriftHiveMetastore_append_partition_with_environment_context_args { case 3: if ($ftype == TType::LST) { $this->part_vals = array(); - $_size1041 = 0; - $_etype1044 = 0; - $xfer += $input->readListBegin($_etype1044, $_size1041); - for ($_i1045 = 0; $_i1045 < $_size1041; ++$_i1045) + $_size1048 = 0; + $_etype1051 = 0; + $xfer += $input->readListBegin($_etype1051, $_size1048); + for ($_i1052 = 0; $_i1052 < $_size1048; ++$_i1052) { - $elem1046 = null; - $xfer += $input->readString($elem1046); - $this->part_vals []= $elem1046; + $elem1053 = null; + $xfer += $input->readString($elem1053); + $this->part_vals []= $elem1053; } $xfer += $input->readListEnd(); } else { @@ -26307,9 +26371,9 @@ class ThriftHiveMetastore_append_partition_with_environment_context_args { { $output->writeListBegin(TType::STRING, count($this->part_vals)); { - foreach ($this->part_vals as $iter1047) + foreach ($this->part_vals as $iter1054) { - $xfer += $output->writeString($iter1047); + $xfer += $output->writeString($iter1054); } } $output->writeListEnd(); @@ -27163,14 +27227,14 @@ class ThriftHiveMetastore_drop_partition_args { case 3: if ($ftype == TType::LST) { $this->part_vals = array(); - $_size1048 = 0; - $_etype1051 = 0; - $xfer += $input->readListBegin($_etype1051, $_size1048); - for ($_i1052 = 0; $_i1052 < $_size1048; ++$_i1052) + $_size1055 = 0; + $_etype1058 = 0; + $xfer += $input->readListBegin($_etype1058, $_size1055); + for ($_i1059 = 0; $_i1059 < $_size1055; ++$_i1059) { - $elem1053 = null; - $xfer += $input->readString($elem1053); - $this->part_vals []= $elem1053; + $elem1060 = null; + $xfer += $input->readString($elem1060); + $this->part_vals []= $elem1060; } $xfer += $input->readListEnd(); } else { @@ -27215,9 +27279,9 @@ class ThriftHiveMetastore_drop_partition_args { { $output->writeListBegin(TType::STRING, count($this->part_vals)); { - foreach ($this->part_vals as $iter1054) + foreach ($this->part_vals as $iter1061) { - $xfer += $output->writeString($iter1054); + $xfer += $output->writeString($iter1061); } } $output->writeListEnd(); @@ -27470,14 +27534,14 @@ class ThriftHiveMetastore_drop_partition_with_environment_context_args { case 3: if ($ftype == TType::LST) { $this->part_vals = array(); - $_size1055 = 0; - $_etype1058 = 0; - $xfer += $input->readListBegin($_etype1058, $_size1055); - for ($_i1059 = 0; $_i1059 < $_size1055; ++$_i1059) + $_size1062 = 0; + $_etype1065 = 0; + $xfer += $input->readListBegin($_etype1065, $_size1062); + for ($_i1066 = 0; $_i1066 < $_size1062; ++$_i1066) { - $elem1060 = null; - $xfer += $input->readString($elem1060); - $this->part_vals []= $elem1060; + $elem1067 = null; + $xfer += $input->readString($elem1067); + $this->part_vals []= $elem1067; } $xfer += $input->readListEnd(); } else { @@ -27530,9 +27594,9 @@ class ThriftHiveMetastore_drop_partition_with_environment_context_args { { $output->writeListBegin(TType::STRING, count($this->part_vals)); { - foreach ($this->part_vals as $iter1061) + foreach ($this->part_vals as $iter1068) { - $xfer += $output->writeString($iter1061); + $xfer += $output->writeString($iter1068); } } $output->writeListEnd(); @@ -28546,14 +28610,14 @@ class ThriftHiveMetastore_get_partition_args { case 3: if ($ftype == TType::LST) { $this->part_vals = array(); - $_size1062 = 0; - $_etype1065 = 0; - $xfer += $input->readListBegin($_etype1065, $_size1062); - for ($_i1066 = 0; $_i1066 < $_size1062; ++$_i1066) + $_size1069 = 0; + $_etype1072 = 0; + $xfer += $input->readListBegin($_etype1072, $_size1069); + for ($_i1073 = 0; $_i1073 < $_size1069; ++$_i1073) { - $elem1067 = null; - $xfer += $input->readString($elem1067); - $this->part_vals []= $elem1067; + $elem1074 = null; + $xfer += $input->readString($elem1074); + $this->part_vals []= $elem1074; } $xfer += $input->readListEnd(); } else { @@ -28591,9 +28655,9 @@ class ThriftHiveMetastore_get_partition_args { { $output->writeListBegin(TType::STRING, count($this->part_vals)); { - foreach ($this->part_vals as $iter1068) + foreach ($this->part_vals as $iter1075) { - $xfer += $output->writeString($iter1068); + $xfer += $output->writeString($iter1075); } } $output->writeListEnd(); @@ -28835,17 +28899,17 @@ class ThriftHiveMetastore_exchange_partition_args { case 1: if ($ftype == TType::MAP) { $this->partitionSpecs = array(); - $_size1069 = 0; - $_ktype1070 = 0; - $_vtype1071 = 0; - $xfer += $input->readMapBegin($_ktype1070, $_vtype1071, $_size1069); - for ($_i1073 = 0; $_i1073 < $_size1069; ++$_i1073) + $_size1076 = 0; + $_ktype1077 = 0; + $_vtype1078 = 0; + $xfer += $input->readMapBegin($_ktype1077, $_vtype1078, $_size1076); + for ($_i1080 = 0; $_i1080 < $_size1076; ++$_i1080) { - $key1074 = ''; - $val1075 = ''; - $xfer += $input->readString($key1074); - $xfer += $input->readString($val1075); - $this->partitionSpecs[$key1074] = $val1075; + $key1081 = ''; + $val1082 = ''; + $xfer += $input->readString($key1081); + $xfer += $input->readString($val1082); + $this->partitionSpecs[$key1081] = $val1082; } $xfer += $input->readMapEnd(); } else { @@ -28901,10 +28965,10 @@ class ThriftHiveMetastore_exchange_partition_args { { $output->writeMapBegin(TType::STRING, TType::STRING, count($this->partitionSpecs)); { - foreach ($this->partitionSpecs as $kiter1076 => $viter1077) + foreach ($this->partitionSpecs as $kiter1083 => $viter1084) { - $xfer += $output->writeString($kiter1076); - $xfer += $output->writeString($viter1077); + $xfer += $output->writeString($kiter1083); + $xfer += $output->writeString($viter1084); } } $output->writeMapEnd(); @@ -29216,17 +29280,17 @@ class ThriftHiveMetastore_exchange_partitions_args { case 1: if ($ftype == TType::MAP) { $this->partitionSpecs = array(); - $_size1078 = 0; - $_ktype1079 = 0; - $_vtype1080 = 0; - $xfer += $input->readMapBegin($_ktype1079, $_vtype1080, $_size1078); - for ($_i1082 = 0; $_i1082 < $_size1078; ++$_i1082) + $_size1085 = 0; + $_ktype1086 = 0; + $_vtype1087 = 0; + $xfer += $input->readMapBegin($_ktype1086, $_vtype1087, $_size1085); + for ($_i1089 = 0; $_i1089 < $_size1085; ++$_i1089) { - $key1083 = ''; - $val1084 = ''; - $xfer += $input->readString($key1083); - $xfer += $input->readString($val1084); - $this->partitionSpecs[$key1083] = $val1084; + $key1090 = ''; + $val1091 = ''; + $xfer += $input->readString($key1090); + $xfer += $input->readString($val1091); + $this->partitionSpecs[$key1090] = $val1091; } $xfer += $input->readMapEnd(); } else { @@ -29282,10 +29346,10 @@ class ThriftHiveMetastore_exchange_partitions_args { { $output->writeMapBegin(TType::STRING, TType::STRING, count($this->partitionSpecs)); { - foreach ($this->partitionSpecs as $kiter1085 => $viter1086) + foreach ($this->partitionSpecs as $kiter1092 => $viter1093) { - $xfer += $output->writeString($kiter1085); - $xfer += $output->writeString($viter1086); + $xfer += $output->writeString($kiter1092); + $xfer += $output->writeString($viter1093); } } $output->writeMapEnd(); @@ -29418,15 +29482,15 @@ class ThriftHiveMetastore_exchange_partitions_result { case 0: if ($ftype == TType::LST) { $this->success = array(); - $_size1087 = 0; - $_etype1090 = 0; - $xfer += $input->readListBegin($_etype1090, $_size1087); - for ($_i1091 = 0; $_i1091 < $_size1087; ++$_i1091) + $_size1094 = 0; + $_etype1097 = 0; + $xfer += $input->readListBegin($_etype1097, $_size1094); + for ($_i1098 = 0; $_i1098 < $_size1094; ++$_i1098) { - $elem1092 = null; - $elem1092 = new \metastore\Partition(); - $xfer += $elem1092->read($input); - $this->success []= $elem1092; + $elem1099 = null; + $elem1099 = new \metastore\Partition(); + $xfer += $elem1099->read($input); + $this->success []= $elem1099; } $xfer += $input->readListEnd(); } else { @@ -29486,9 +29550,9 @@ class ThriftHiveMetastore_exchange_partitions_result { { $output->writeListBegin(TType::STRUCT, count($this->success)); { - foreach ($this->success as $iter1093) + foreach ($this->success as $iter1100) { - $xfer += $iter1093->write($output); + $xfer += $iter1100->write($output); } } $output->writeListEnd(); @@ -29634,14 +29698,14 @@ class ThriftHiveMetastore_get_partition_with_auth_args { case 3: if ($ftype == TType::LST) { $this->part_vals = array(); - $_size1094 = 0; - $_etype1097 = 0; - $xfer += $input->readListBegin($_etype1097, $_size1094); - for ($_i1098 = 0; $_i1098 < $_size1094; ++$_i1098) + $_size1101 = 0; + $_etype1104 = 0; + $xfer += $input->readListBegin($_etype1104, $_size1101); + for ($_i1105 = 0; $_i1105 < $_size1101; ++$_i1105) { - $elem1099 = null; - $xfer += $input->readString($elem1099); - $this->part_vals []= $elem1099; + $elem1106 = null; + $xfer += $input->readString($elem1106); + $this->part_vals []= $elem1106; } $xfer += $input->readListEnd(); } else { @@ -29658,14 +29722,14 @@ class ThriftHiveMetastore_get_partition_with_auth_args { case 5: if ($ftype == TType::LST) { $this->group_names = array(); - $_size1100 = 0; - $_etype1103 = 0; - $xfer += $input->readListBegin($_etype1103, $_size1100); - for ($_i1104 = 0; $_i1104 < $_size1100; ++$_i1104) + $_size1107 = 0; + $_etype1110 = 0; + $xfer += $input->readListBegin($_etype1110, $_size1107); + for ($_i1111 = 0; $_i1111 < $_size1107; ++$_i1111) { - $elem1105 = null; - $xfer += $input->readString($elem1105); - $this->group_names []= $elem1105; + $elem1112 = null; + $xfer += $input->readString($elem1112); + $this->group_names []= $elem1112; } $xfer += $input->readListEnd(); } else { @@ -29703,9 +29767,9 @@ class ThriftHiveMetastore_get_partition_with_auth_args { { $output->writeListBegin(TType::STRING, count($this->part_vals)); { - foreach ($this->part_vals as $iter1106) + foreach ($this->part_vals as $iter1113) { - $xfer += $output->writeString($iter1106); + $xfer += $output->writeString($iter1113); } } $output->writeListEnd(); @@ -29725,9 +29789,9 @@ class ThriftHiveMetastore_get_partition_with_auth_args { { $output->writeListBegin(TType::STRING, count($this->group_names)); { - foreach ($this->group_names as $iter1107) + foreach ($this->group_names as $iter1114) { - $xfer += $output->writeString($iter1107); + $xfer += $output->writeString($iter1114); } } $output->writeListEnd(); @@ -30318,15 +30382,15 @@ class ThriftHiveMetastore_get_partitions_result { case 0: if ($ftype == TType::LST) { $this->success = array(); - $_size1108 = 0; - $_etype1111 = 0; - $xfer += $input->readListBegin($_etype1111, $_size1108); - for ($_i1112 = 0; $_i1112 < $_size1108; ++$_i1112) + $_size1115 = 0; + $_etype1118 = 0; + $xfer += $input->readListBegin($_etype1118, $_size1115); + for ($_i1119 = 0; $_i1119 < $_size1115; ++$_i1119) { - $elem1113 = null; - $elem1113 = new \metastore\Partition(); - $xfer += $elem1113->read($input); - $this->success []= $elem1113; + $elem1120 = null; + $elem1120 = new \metastore\Partition(); + $xfer += $elem1120->read($input); + $this->success []= $elem1120; } $xfer += $input->readListEnd(); } else { @@ -30370,9 +30434,9 @@ class ThriftHiveMetastore_get_partitions_result { { $output->writeListBegin(TType::STRUCT, count($this->success)); { - foreach ($this->success as $iter1114) + foreach ($this->success as $iter1121) { - $xfer += $iter1114->write($output); + $xfer += $iter1121->write($output); } } $output->writeListEnd(); @@ -30518,14 +30582,14 @@ class ThriftHiveMetastore_get_partitions_with_auth_args { case 5: if ($ftype == TType::LST) { $this->group_names = array(); - $_size1115 = 0; - $_etype1118 = 0; - $xfer += $input->readListBegin($_etype1118, $_size1115); - for ($_i1119 = 0; $_i1119 < $_size1115; ++$_i1119) + $_size1122 = 0; + $_etype1125 = 0; + $xfer += $input->readListBegin($_etype1125, $_size1122); + for ($_i1126 = 0; $_i1126 < $_size1122; ++$_i1126) { - $elem1120 = null; - $xfer += $input->readString($elem1120); - $this->group_names []= $elem1120; + $elem1127 = null; + $xfer += $input->readString($elem1127); + $this->group_names []= $elem1127; } $xfer += $input->readListEnd(); } else { @@ -30573,9 +30637,9 @@ class ThriftHiveMetastore_get_partitions_with_auth_args { { $output->writeListBegin(TType::STRING, count($this->group_names)); { - foreach ($this->group_names as $iter1121) + foreach ($this->group_names as $iter1128) { - $xfer += $output->writeString($iter1121); + $xfer += $output->writeString($iter1128); } } $output->writeListEnd(); @@ -30664,15 +30728,15 @@ class ThriftHiveMetastore_get_partitions_with_auth_result { case 0: if ($ftype == TType::LST) { $this->success = array(); - $_size1122 = 0; - $_etype1125 = 0; - $xfer += $input->readListBegin($_etype1125, $_size1122); - for ($_i1126 = 0; $_i1126 < $_size1122; ++$_i1126) + $_size1129 = 0; + $_etype1132 = 0; + $xfer += $input->readListBegin($_etype1132, $_size1129); + for ($_i1133 = 0; $_i1133 < $_size1129; ++$_i1133) { - $elem1127 = null; - $elem1127 = new \metastore\Partition(); - $xfer += $elem1127->read($input); - $this->success []= $elem1127; + $elem1134 = null; + $elem1134 = new \metastore\Partition(); + $xfer += $elem1134->read($input); + $this->success []= $elem1134; } $xfer += $input->readListEnd(); } else { @@ -30716,9 +30780,9 @@ class ThriftHiveMetastore_get_partitions_with_auth_result { { $output->writeListBegin(TType::STRUCT, count($this->success)); { - foreach ($this->success as $iter1128) + foreach ($this->success as $iter1135) { - $xfer += $iter1128->write($output); + $xfer += $iter1135->write($output); } } $output->writeListEnd(); @@ -30938,15 +31002,15 @@ class ThriftHiveMetastore_get_partitions_pspec_result { case 0: if ($ftype == TType::LST) { $this->success = array(); - $_size1129 = 0; - $_etype1132 = 0; - $xfer += $input->readListBegin($_etype1132, $_size1129); - for ($_i1133 = 0; $_i1133 < $_size1129; ++$_i1133) + $_size1136 = 0; + $_etype1139 = 0; + $xfer += $input->readListBegin($_etype1139, $_size1136); + for ($_i1140 = 0; $_i1140 < $_size1136; ++$_i1140) { - $elem1134 = null; - $elem1134 = new \metastore\PartitionSpec(); - $xfer += $elem1134->read($input); - $this->success []= $elem1134; + $elem1141 = null; + $elem1141 = new \metastore\PartitionSpec(); + $xfer += $elem1141->read($input); + $this->success []= $elem1141; } $xfer += $input->readListEnd(); } else { @@ -30990,9 +31054,9 @@ class ThriftHiveMetastore_get_partitions_pspec_result { { $output->writeListBegin(TType::STRUCT, count($this->success)); { - foreach ($this->success as $iter1135) + foreach ($this->success as $iter1142) { - $xfer += $iter1135->write($output); + $xfer += $iter1142->write($output); } } $output->writeListEnd(); @@ -31211,14 +31275,14 @@ class ThriftHiveMetastore_get_partition_names_result { case 0: if ($ftype == TType::LST) { $this->success = array(); - $_size1136 = 0; - $_etype1139 = 0; - $xfer += $input->readListBegin($_etype1139, $_size1136); - for ($_i1140 = 0; $_i1140 < $_size1136; ++$_i1140) + $_size1143 = 0; + $_etype1146 = 0; + $xfer += $input->readListBegin($_etype1146, $_size1143); + for ($_i1147 = 0; $_i1147 < $_size1143; ++$_i1147) { - $elem1141 = null; - $xfer += $input->readString($elem1141); - $this->success []= $elem1141; + $elem1148 = null; + $xfer += $input->readString($elem1148); + $this->success []= $elem1148; } $xfer += $input->readListEnd(); } else { @@ -31262,9 +31326,9 @@ class ThriftHiveMetastore_get_partition_names_result { { $output->writeListBegin(TType::STRING, count($this->success)); { - foreach ($this->success as $iter1142) + foreach ($this->success as $iter1149) { - $xfer += $output->writeString($iter1142); + $xfer += $output->writeString($iter1149); } } $output->writeListEnd(); @@ -31595,14 +31659,14 @@ class ThriftHiveMetastore_get_partitions_ps_args { case 3: if ($ftype == TType::LST) { $this->part_vals = array(); - $_size1143 = 0; - $_etype1146 = 0; - $xfer += $input->readListBegin($_etype1146, $_size1143); - for ($_i1147 = 0; $_i1147 < $_size1143; ++$_i1147) + $_size1150 = 0; + $_etype1153 = 0; + $xfer += $input->readListBegin($_etype1153, $_size1150); + for ($_i1154 = 0; $_i1154 < $_size1150; ++$_i1154) { - $elem1148 = null; - $xfer += $input->readString($elem1148); - $this->part_vals []= $elem1148; + $elem1155 = null; + $xfer += $input->readString($elem1155); + $this->part_vals []= $elem1155; } $xfer += $input->readListEnd(); } else { @@ -31647,9 +31711,9 @@ class ThriftHiveMetastore_get_partitions_ps_args { { $output->writeListBegin(TType::STRING, count($this->part_vals)); { - foreach ($this->part_vals as $iter1149) + foreach ($this->part_vals as $iter1156) { - $xfer += $output->writeString($iter1149); + $xfer += $output->writeString($iter1156); } } $output->writeListEnd(); @@ -31743,15 +31807,15 @@ class ThriftHiveMetastore_get_partitions_ps_result { case 0: if ($ftype == TType::LST) { $this->success = array(); - $_size1150 = 0; - $_etype1153 = 0; - $xfer += $input->readListBegin($_etype1153, $_size1150); - for ($_i1154 = 0; $_i1154 < $_size1150; ++$_i1154) + $_size1157 = 0; + $_etype1160 = 0; + $xfer += $input->readListBegin($_etype1160, $_size1157); + for ($_i1161 = 0; $_i1161 < $_size1157; ++$_i1161) { - $elem1155 = null; - $elem1155 = new \metastore\Partition(); - $xfer += $elem1155->read($input); - $this->success []= $elem1155; + $elem1162 = null; + $elem1162 = new \metastore\Partition(); + $xfer += $elem1162->read($input); + $this->success []= $elem1162; } $xfer += $input->readListEnd(); } else { @@ -31795,9 +31859,9 @@ class ThriftHiveMetastore_get_partitions_ps_result { { $output->writeListBegin(TType::STRUCT, count($this->success)); { - foreach ($this->success as $iter1156) + foreach ($this->success as $iter1163) { - $xfer += $iter1156->write($output); + $xfer += $iter1163->write($output); } } $output->writeListEnd(); @@ -31944,14 +32008,14 @@ class ThriftHiveMetastore_get_partitions_ps_with_auth_args { case 3: if ($ftype == TType::LST) { $this->part_vals = array(); - $_size1157 = 0; - $_etype1160 = 0; - $xfer += $input->readListBegin($_etype1160, $_size1157); - for ($_i1161 = 0; $_i1161 < $_size1157; ++$_i1161) + $_size1164 = 0; + $_etype1167 = 0; + $xfer += $input->readListBegin($_etype1167, $_size1164); + for ($_i1168 = 0; $_i1168 < $_size1164; ++$_i1168) { - $elem1162 = null; - $xfer += $input->readString($elem1162); - $this->part_vals []= $elem1162; + $elem1169 = null; + $xfer += $input->readString($elem1169); + $this->part_vals []= $elem1169; } $xfer += $input->readListEnd(); } else { @@ -31975,14 +32039,14 @@ class ThriftHiveMetastore_get_partitions_ps_with_auth_args { case 6: if ($ftype == TType::LST) { $this->group_names = array(); - $_size1163 = 0; - $_etype1166 = 0; - $xfer += $input->readListBegin($_etype1166, $_size1163); - for ($_i1167 = 0; $_i1167 < $_size1163; ++$_i1167) + $_size1170 = 0; + $_etype1173 = 0; + $xfer += $input->readListBegin($_etype1173, $_size1170); + for ($_i1174 = 0; $_i1174 < $_size1170; ++$_i1174) { - $elem1168 = null; - $xfer += $input->readString($elem1168); - $this->group_names []= $elem1168; + $elem1175 = null; + $xfer += $input->readString($elem1175); + $this->group_names []= $elem1175; } $xfer += $input->readListEnd(); } else { @@ -32020,9 +32084,9 @@ class ThriftHiveMetastore_get_partitions_ps_with_auth_args { { $output->writeListBegin(TType::STRING, count($this->part_vals)); { - foreach ($this->part_vals as $iter1169) + foreach ($this->part_vals as $iter1176) { - $xfer += $output->writeString($iter1169); + $xfer += $output->writeString($iter1176); } } $output->writeListEnd(); @@ -32047,9 +32111,9 @@ class ThriftHiveMetastore_get_partitions_ps_with_auth_args { { $output->writeListBegin(TType::STRING, count($this->group_names)); { - foreach ($this->group_names as $iter1170) + foreach ($this->group_names as $iter1177) { - $xfer += $output->writeString($iter1170); + $xfer += $output->writeString($iter1177); } } $output->writeListEnd(); @@ -32138,15 +32202,15 @@ class ThriftHiveMetastore_get_partitions_ps_with_auth_result { case 0: if ($ftype == TType::LST) { $this->success = array(); - $_size1171 = 0; - $_etype1174 = 0; - $xfer += $input->readListBegin($_etype1174, $_size1171); - for ($_i1175 = 0; $_i1175 < $_size1171; ++$_i1175) + $_size1178 = 0; + $_etype1181 = 0; + $xfer += $input->readListBegin($_etype1181, $_size1178); + for ($_i1182 = 0; $_i1182 < $_size1178; ++$_i1182) { - $elem1176 = null; - $elem1176 = new \metastore\Partition(); - $xfer += $elem1176->read($input); - $this->success []= $elem1176; + $elem1183 = null; + $elem1183 = new \metastore\Partition(); + $xfer += $elem1183->read($input); + $this->success []= $elem1183; } $xfer += $input->readListEnd(); } else { @@ -32190,9 +32254,9 @@ class ThriftHiveMetastore_get_partitions_ps_with_auth_result { { $output->writeListBegin(TType::STRUCT, count($this->success)); { - foreach ($this->success as $iter1177) + foreach ($this->success as $iter1184) { - $xfer += $iter1177->write($output); + $xfer += $iter1184->write($output); } } $output->writeListEnd(); @@ -32313,14 +32377,14 @@ class ThriftHiveMetastore_get_partition_names_ps_args { case 3: if ($ftype == TType::LST) { $this->part_vals = array(); - $_size1178 = 0; - $_etype1181 = 0; - $xfer += $input->readListBegin($_etype1181, $_size1178); - for ($_i1182 = 0; $_i1182 < $_size1178; ++$_i1182) + $_size1185 = 0; + $_etype1188 = 0; + $xfer += $input->readListBegin($_etype1188, $_size1185); + for ($_i1189 = 0; $_i1189 < $_size1185; ++$_i1189) { - $elem1183 = null; - $xfer += $input->readString($elem1183); - $this->part_vals []= $elem1183; + $elem1190 = null; + $xfer += $input->readString($elem1190); + $this->part_vals []= $elem1190; } $xfer += $input->readListEnd(); } else { @@ -32365,9 +32429,9 @@ class ThriftHiveMetastore_get_partition_names_ps_args { { $output->writeListBegin(TType::STRING, count($this->part_vals)); { - foreach ($this->part_vals as $iter1184) + foreach ($this->part_vals as $iter1191) { - $xfer += $output->writeString($iter1184); + $xfer += $output->writeString($iter1191); } } $output->writeListEnd(); @@ -32460,14 +32524,14 @@ class ThriftHiveMetastore_get_partition_names_ps_result { case 0: if ($ftype == TType::LST) { $this->success = array(); - $_size1185 = 0; - $_etype1188 = 0; - $xfer += $input->readListBegin($_etype1188, $_size1185); - for ($_i1189 = 0; $_i1189 < $_size1185; ++$_i1189) + $_size1192 = 0; + $_etype1195 = 0; + $xfer += $input->readListBegin($_etype1195, $_size1192); + for ($_i1196 = 0; $_i1196 < $_size1192; ++$_i1196) { - $elem1190 = null; - $xfer += $input->readString($elem1190); - $this->success []= $elem1190; + $elem1197 = null; + $xfer += $input->readString($elem1197); + $this->success []= $elem1197; } $xfer += $input->readListEnd(); } else { @@ -32511,9 +32575,9 @@ class ThriftHiveMetastore_get_partition_names_ps_result { { $output->writeListBegin(TType::STRING, count($this->success)); { - foreach ($this->success as $iter1191) + foreach ($this->success as $iter1198) { - $xfer += $output->writeString($iter1191); + $xfer += $output->writeString($iter1198); } } $output->writeListEnd(); @@ -32756,15 +32820,15 @@ class ThriftHiveMetastore_get_partitions_by_filter_result { case 0: if ($ftype == TType::LST) { $this->success = array(); - $_size1192 = 0; - $_etype1195 = 0; - $xfer += $input->readListBegin($_etype1195, $_size1192); - for ($_i1196 = 0; $_i1196 < $_size1192; ++$_i1196) + $_size1199 = 0; + $_etype1202 = 0; + $xfer += $input->readListBegin($_etype1202, $_size1199); + for ($_i1203 = 0; $_i1203 < $_size1199; ++$_i1203) { - $elem1197 = null; - $elem1197 = new \metastore\Partition(); - $xfer += $elem1197->read($input); - $this->success []= $elem1197; + $elem1204 = null; + $elem1204 = new \metastore\Partition(); + $xfer += $elem1204->read($input); + $this->success []= $elem1204; } $xfer += $input->readListEnd(); } else { @@ -32808,9 +32872,9 @@ class ThriftHiveMetastore_get_partitions_by_filter_result { { $output->writeListBegin(TType::STRUCT, count($this->success)); { - foreach ($this->success as $iter1198) + foreach ($this->success as $iter1205) { - $xfer += $iter1198->write($output); + $xfer += $iter1205->write($output); } } $output->writeListEnd(); @@ -33053,15 +33117,15 @@ class ThriftHiveMetastore_get_part_specs_by_filter_result { case 0: if ($ftype == TType::LST) { $this->success = array(); - $_size1199 = 0; - $_etype1202 = 0; - $xfer += $input->readListBegin($_etype1202, $_size1199); - for ($_i1203 = 0; $_i1203 < $_size1199; ++$_i1203) + $_size1206 = 0; + $_etype1209 = 0; + $xfer += $input->readListBegin($_etype1209, $_size1206); + for ($_i1210 = 0; $_i1210 < $_size1206; ++$_i1210) { - $elem1204 = null; - $elem1204 = new \metastore\PartitionSpec(); - $xfer += $elem1204->read($input); - $this->success []= $elem1204; + $elem1211 = null; + $elem1211 = new \metastore\PartitionSpec(); + $xfer += $elem1211->read($input); + $this->success []= $elem1211; } $xfer += $input->readListEnd(); } else { @@ -33105,9 +33169,9 @@ class ThriftHiveMetastore_get_part_specs_by_filter_result { { $output->writeListBegin(TType::STRUCT, count($this->success)); { - foreach ($this->success as $iter1205) + foreach ($this->success as $iter1212) { - $xfer += $iter1205->write($output); + $xfer += $iter1212->write($output); } } $output->writeListEnd(); @@ -33673,14 +33737,14 @@ class ThriftHiveMetastore_get_partitions_by_names_args { case 3: if ($ftype == TType::LST) { $this->names = array(); - $_size1206 = 0; - $_etype1209 = 0; - $xfer += $input->readListBegin($_etype1209, $_size1206); - for ($_i1210 = 0; $_i1210 < $_size1206; ++$_i1210) + $_size1213 = 0; + $_etype1216 = 0; + $xfer += $input->readListBegin($_etype1216, $_size1213); + for ($_i1217 = 0; $_i1217 < $_size1213; ++$_i1217) { - $elem1211 = null; - $xfer += $input->readString($elem1211); - $this->names []= $elem1211; + $elem1218 = null; + $xfer += $input->readString($elem1218); + $this->names []= $elem1218; } $xfer += $input->readListEnd(); } else { @@ -33718,9 +33782,9 @@ class ThriftHiveMetastore_get_partitions_by_names_args { { $output->writeListBegin(TType::STRING, count($this->names)); { - foreach ($this->names as $iter1212) + foreach ($this->names as $iter1219) { - $xfer += $output->writeString($iter1212); + $xfer += $output->writeString($iter1219); } } $output->writeListEnd(); @@ -33809,15 +33873,15 @@ class ThriftHiveMetastore_get_partitions_by_names_result { case 0: if ($ftype == TType::LST) { $this->success = array(); - $_size1213 = 0; - $_etype1216 = 0; - $xfer += $input->readListBegin($_etype1216, $_size1213); - for ($_i1217 = 0; $_i1217 < $_size1213; ++$_i1217) + $_size1220 = 0; + $_etype1223 = 0; + $xfer += $input->readListBegin($_etype1223, $_size1220); + for ($_i1224 = 0; $_i1224 < $_size1220; ++$_i1224) { - $elem1218 = null; - $elem1218 = new \metastore\Partition(); - $xfer += $elem1218->read($input); - $this->success []= $elem1218; + $elem1225 = null; + $elem1225 = new \metastore\Partition(); + $xfer += $elem1225->read($input); + $this->success []= $elem1225; } $xfer += $input->readListEnd(); } else { @@ -33861,9 +33925,9 @@ class ThriftHiveMetastore_get_partitions_by_names_result { { $output->writeListBegin(TType::STRUCT, count($this->success)); { - foreach ($this->success as $iter1219) + foreach ($this->success as $iter1226) { - $xfer += $iter1219->write($output); + $xfer += $iter1226->write($output); } } $output->writeListEnd(); @@ -34202,15 +34266,15 @@ class ThriftHiveMetastore_alter_partitions_args { case 3: if ($ftype == TType::LST) { $this->new_parts = array(); - $_size1220 = 0; - $_etype1223 = 0; - $xfer += $input->readListBegin($_etype1223, $_size1220); - for ($_i1224 = 0; $_i1224 < $_size1220; ++$_i1224) + $_size1227 = 0; + $_etype1230 = 0; + $xfer += $input->readListBegin($_etype1230, $_size1227); + for ($_i1231 = 0; $_i1231 < $_size1227; ++$_i1231) { - $elem1225 = null; - $elem1225 = new \metastore\Partition(); - $xfer += $elem1225->read($input); - $this->new_parts []= $elem1225; + $elem1232 = null; + $elem1232 = new \metastore\Partition(); + $xfer += $elem1232->read($input); + $this->new_parts []= $elem1232; } $xfer += $input->readListEnd(); } else { @@ -34248,9 +34312,9 @@ class ThriftHiveMetastore_alter_partitions_args { { $output->writeListBegin(TType::STRUCT, count($this->new_parts)); { - foreach ($this->new_parts as $iter1226) + foreach ($this->new_parts as $iter1233) { - $xfer += $iter1226->write($output); + $xfer += $iter1233->write($output); } } $output->writeListEnd(); @@ -34465,15 +34529,15 @@ class ThriftHiveMetastore_alter_partitions_with_environment_context_args { case 3: if ($ftype == TType::LST) { $this->new_parts = array(); - $_size1227 = 0; - $_etype1230 = 0; - $xfer += $input->readListBegin($_etype1230, $_size1227); - for ($_i1231 = 0; $_i1231 < $_size1227; ++$_i1231) + $_size1234 = 0; + $_etype1237 = 0; + $xfer += $input->readListBegin($_etype1237, $_size1234); + for ($_i1238 = 0; $_i1238 < $_size1234; ++$_i1238) { - $elem1232 = null; - $elem1232 = new \metastore\Partition(); - $xfer += $elem1232->read($input); - $this->new_parts []= $elem1232; + $elem1239 = null; + $elem1239 = new \metastore\Partition(); + $xfer += $elem1239->read($input); + $this->new_parts []= $elem1239; } $xfer += $input->readListEnd(); } else { @@ -34519,9 +34583,9 @@ class ThriftHiveMetastore_alter_partitions_with_environment_context_args { { $output->writeListBegin(TType::STRUCT, count($this->new_parts)); { - foreach ($this->new_parts as $iter1233) + foreach ($this->new_parts as $iter1240) { - $xfer += $iter1233->write($output); + $xfer += $iter1240->write($output); } } $output->writeListEnd(); @@ -34645,6 +34709,216 @@ class ThriftHiveMetastore_alter_partitions_with_environment_context_result { } +class ThriftHiveMetastore_alter_partitions_with_environment_context_req_args { + static $_TSPEC; + + /** + * @var \metastore\AlterPartitionsRequest + */ + public $req = null; + + public function __construct($vals=null) { + if (!isset(self::$_TSPEC)) { + self::$_TSPEC = array( + 1 => array( + 'var' => 'req', + 'type' => TType::STRUCT, + 'class' => '\metastore\AlterPartitionsRequest', + ), + ); + } + if (is_array($vals)) { + if (isset($vals['req'])) { + $this->req = $vals['req']; + } + } + } + + public function getName() { + return 'ThriftHiveMetastore_alter_partitions_with_environment_context_req_args'; + } + + public function read($input) + { + $xfer = 0; + $fname = null; + $ftype = 0; + $fid = 0; + $xfer += $input->readStructBegin($fname); + while (true) + { + $xfer += $input->readFieldBegin($fname, $ftype, $fid); + if ($ftype == TType::STOP) { + break; + } + switch ($fid) + { + case 1: + if ($ftype == TType::STRUCT) { + $this->req = new \metastore\AlterPartitionsRequest(); + $xfer += $this->req->read($input); + } else { + $xfer += $input->skip($ftype); + } + break; + default: + $xfer += $input->skip($ftype); + break; + } + $xfer += $input->readFieldEnd(); + } + $xfer += $input->readStructEnd(); + return $xfer; + } + + public function write($output) { + $xfer = 0; + $xfer += $output->writeStructBegin('ThriftHiveMetastore_alter_partitions_with_environment_context_req_args'); + if ($this->req !== null) { + if (!is_object($this->req)) { + throw new TProtocolException('Bad type in structure.', TProtocolException::INVALID_DATA); + } + $xfer += $output->writeFieldBegin('req', TType::STRUCT, 1); + $xfer += $this->req->write($output); + $xfer += $output->writeFieldEnd(); + } + $xfer += $output->writeFieldStop(); + $xfer += $output->writeStructEnd(); + return $xfer; + } + +} + +class ThriftHiveMetastore_alter_partitions_with_environment_context_req_result { + static $_TSPEC; + + /** + * @var \metastore\AlterPartitionsResponse + */ + public $success = null; + /** + * @var \metastore\InvalidOperationException + */ + public $o1 = null; + /** + * @var \metastore\MetaException + */ + public $o2 = null; + + public function __construct($vals=null) { + if (!isset(self::$_TSPEC)) { + self::$_TSPEC = array( + 0 => array( + 'var' => 'success', + 'type' => TType::STRUCT, + 'class' => '\metastore\AlterPartitionsResponse', + ), + 1 => array( + 'var' => 'o1', + 'type' => TType::STRUCT, + 'class' => '\metastore\InvalidOperationException', + ), + 2 => array( + 'var' => 'o2', + 'type' => TType::STRUCT, + 'class' => '\metastore\MetaException', + ), + ); + } + if (is_array($vals)) { + if (isset($vals['success'])) { + $this->success = $vals['success']; + } + if (isset($vals['o1'])) { + $this->o1 = $vals['o1']; + } + if (isset($vals['o2'])) { + $this->o2 = $vals['o2']; + } + } + } + + public function getName() { + return 'ThriftHiveMetastore_alter_partitions_with_environment_context_req_result'; + } + + public function read($input) + { + $xfer = 0; + $fname = null; + $ftype = 0; + $fid = 0; + $xfer += $input->readStructBegin($fname); + while (true) + { + $xfer += $input->readFieldBegin($fname, $ftype, $fid); + if ($ftype == TType::STOP) { + break; + } + switch ($fid) + { + case 0: + if ($ftype == TType::STRUCT) { + $this->success = new \metastore\AlterPartitionsResponse(); + $xfer += $this->success->read($input); + } else { + $xfer += $input->skip($ftype); + } + break; + case 1: + if ($ftype == TType::STRUCT) { + $this->o1 = new \metastore\InvalidOperationException(); + $xfer += $this->o1->read($input); + } else { + $xfer += $input->skip($ftype); + } + break; + case 2: + if ($ftype == TType::STRUCT) { + $this->o2 = new \metastore\MetaException(); + $xfer += $this->o2->read($input); + } else { + $xfer += $input->skip($ftype); + } + break; + default: + $xfer += $input->skip($ftype); + break; + } + $xfer += $input->readFieldEnd(); + } + $xfer += $input->readStructEnd(); + return $xfer; + } + + public function write($output) { + $xfer = 0; + $xfer += $output->writeStructBegin('ThriftHiveMetastore_alter_partitions_with_environment_context_req_result'); + if ($this->success !== null) { + if (!is_object($this->success)) { + throw new TProtocolException('Bad type in structure.', TProtocolException::INVALID_DATA); + } + $xfer += $output->writeFieldBegin('success', TType::STRUCT, 0); + $xfer += $this->success->write($output); + $xfer += $output->writeFieldEnd(); + } + if ($this->o1 !== null) { + $xfer += $output->writeFieldBegin('o1', TType::STRUCT, 1); + $xfer += $this->o1->write($output); + $xfer += $output->writeFieldEnd(); + } + if ($this->o2 !== null) { + $xfer += $output->writeFieldBegin('o2', TType::STRUCT, 2); + $xfer += $this->o2->write($output); + $xfer += $output->writeFieldEnd(); + } + $xfer += $output->writeFieldStop(); + $xfer += $output->writeStructEnd(); + return $xfer; + } + +} + class ThriftHiveMetastore_alter_partition_with_environment_context_args { static $_TSPEC; @@ -34999,14 +35273,14 @@ class ThriftHiveMetastore_rename_partition_args { case 3: if ($ftype == TType::LST) { $this->part_vals = array(); - $_size1234 = 0; - $_etype1237 = 0; - $xfer += $input->readListBegin($_etype1237, $_size1234); - for ($_i1238 = 0; $_i1238 < $_size1234; ++$_i1238) + $_size1241 = 0; + $_etype1244 = 0; + $xfer += $input->readListBegin($_etype1244, $_size1241); + for ($_i1245 = 0; $_i1245 < $_size1241; ++$_i1245) { - $elem1239 = null; - $xfer += $input->readString($elem1239); - $this->part_vals []= $elem1239; + $elem1246 = null; + $xfer += $input->readString($elem1246); + $this->part_vals []= $elem1246; } $xfer += $input->readListEnd(); } else { @@ -35052,9 +35326,9 @@ class ThriftHiveMetastore_rename_partition_args { { $output->writeListBegin(TType::STRING, count($this->part_vals)); { - foreach ($this->part_vals as $iter1240) + foreach ($this->part_vals as $iter1247) { - $xfer += $output->writeString($iter1240); + $xfer += $output->writeString($iter1247); } } $output->writeListEnd(); @@ -35239,14 +35513,14 @@ class ThriftHiveMetastore_partition_name_has_valid_characters_args { case 1: if ($ftype == TType::LST) { $this->part_vals = array(); - $_size1241 = 0; - $_etype1244 = 0; - $xfer += $input->readListBegin($_etype1244, $_size1241); - for ($_i1245 = 0; $_i1245 < $_size1241; ++$_i1245) + $_size1248 = 0; + $_etype1251 = 0; + $xfer += $input->readListBegin($_etype1251, $_size1248); + for ($_i1252 = 0; $_i1252 < $_size1248; ++$_i1252) { - $elem1246 = null; - $xfer += $input->readString($elem1246); - $this->part_vals []= $elem1246; + $elem1253 = null; + $xfer += $input->readString($elem1253); + $this->part_vals []= $elem1253; } $xfer += $input->readListEnd(); } else { @@ -35281,9 +35555,9 @@ class ThriftHiveMetastore_partition_name_has_valid_characters_args { { $output->writeListBegin(TType::STRING, count($this->part_vals)); { - foreach ($this->part_vals as $iter1247) + foreach ($this->part_vals as $iter1254) { - $xfer += $output->writeString($iter1247); + $xfer += $output->writeString($iter1254); } } $output->writeListEnd(); @@ -35737,14 +36011,14 @@ class ThriftHiveMetastore_partition_name_to_vals_result { case 0: if ($ftype == TType::LST) { $this->success = array(); - $_size1248 = 0; - $_etype1251 = 0; - $xfer += $input->readListBegin($_etype1251, $_size1248); - for ($_i1252 = 0; $_i1252 < $_size1248; ++$_i1252) + $_size1255 = 0; + $_etype1258 = 0; + $xfer += $input->readListBegin($_etype1258, $_size1255); + for ($_i1259 = 0; $_i1259 < $_size1255; ++$_i1259) { - $elem1253 = null; - $xfer += $input->readString($elem1253); - $this->success []= $elem1253; + $elem1260 = null; + $xfer += $input->readString($elem1260); + $this->success []= $elem1260; } $xfer += $input->readListEnd(); } else { @@ -35780,9 +36054,9 @@ class ThriftHiveMetastore_partition_name_to_vals_result { { $output->writeListBegin(TType::STRING, count($this->success)); { - foreach ($this->success as $iter1254) + foreach ($this->success as $iter1261) { - $xfer += $output->writeString($iter1254); + $xfer += $output->writeString($iter1261); } } $output->writeListEnd(); @@ -35942,17 +36216,17 @@ class ThriftHiveMetastore_partition_name_to_spec_result { case 0: if ($ftype == TType::MAP) { $this->success = array(); - $_size1255 = 0; - $_ktype1256 = 0; - $_vtype1257 = 0; - $xfer += $input->readMapBegin($_ktype1256, $_vtype1257, $_size1255); - for ($_i1259 = 0; $_i1259 < $_size1255; ++$_i1259) + $_size1262 = 0; + $_ktype1263 = 0; + $_vtype1264 = 0; + $xfer += $input->readMapBegin($_ktype1263, $_vtype1264, $_size1262); + for ($_i1266 = 0; $_i1266 < $_size1262; ++$_i1266) { - $key1260 = ''; - $val1261 = ''; - $xfer += $input->readString($key1260); - $xfer += $input->readString($val1261); - $this->success[$key1260] = $val1261; + $key1267 = ''; + $val1268 = ''; + $xfer += $input->readString($key1267); + $xfer += $input->readString($val1268); + $this->success[$key1267] = $val1268; } $xfer += $input->readMapEnd(); } else { @@ -35988,10 +36262,10 @@ class ThriftHiveMetastore_partition_name_to_spec_result { { $output->writeMapBegin(TType::STRING, TType::STRING, count($this->success)); { - foreach ($this->success as $kiter1262 => $viter1263) + foreach ($this->success as $kiter1269 => $viter1270) { - $xfer += $output->writeString($kiter1262); - $xfer += $output->writeString($viter1263); + $xfer += $output->writeString($kiter1269); + $xfer += $output->writeString($viter1270); } } $output->writeMapEnd(); @@ -36111,17 +36385,17 @@ class ThriftHiveMetastore_markPartitionForEvent_args { case 3: if ($ftype == TType::MAP) { $this->part_vals = array(); - $_size1264 = 0; - $_ktype1265 = 0; - $_vtype1266 = 0; - $xfer += $input->readMapBegin($_ktype1265, $_vtype1266, $_size1264); - for ($_i1268 = 0; $_i1268 < $_size1264; ++$_i1268) + $_size1271 = 0; + $_ktype1272 = 0; + $_vtype1273 = 0; + $xfer += $input->readMapBegin($_ktype1272, $_vtype1273, $_size1271); + for ($_i1275 = 0; $_i1275 < $_size1271; ++$_i1275) { - $key1269 = ''; - $val1270 = ''; - $xfer += $input->readString($key1269); - $xfer += $input->readString($val1270); - $this->part_vals[$key1269] = $val1270; + $key1276 = ''; + $val1277 = ''; + $xfer += $input->readString($key1276); + $xfer += $input->readString($val1277); + $this->part_vals[$key1276] = $val1277; } $xfer += $input->readMapEnd(); } else { @@ -36166,10 +36440,10 @@ class ThriftHiveMetastore_markPartitionForEvent_args { { $output->writeMapBegin(TType::STRING, TType::STRING, count($this->part_vals)); { - foreach ($this->part_vals as $kiter1271 => $viter1272) + foreach ($this->part_vals as $kiter1278 => $viter1279) { - $xfer += $output->writeString($kiter1271); - $xfer += $output->writeString($viter1272); + $xfer += $output->writeString($kiter1278); + $xfer += $output->writeString($viter1279); } } $output->writeMapEnd(); @@ -36491,17 +36765,17 @@ class ThriftHiveMetastore_isPartitionMarkedForEvent_args { case 3: if ($ftype == TType::MAP) { $this->part_vals = array(); - $_size1273 = 0; - $_ktype1274 = 0; - $_vtype1275 = 0; - $xfer += $input->readMapBegin($_ktype1274, $_vtype1275, $_size1273); - for ($_i1277 = 0; $_i1277 < $_size1273; ++$_i1277) + $_size1280 = 0; + $_ktype1281 = 0; + $_vtype1282 = 0; + $xfer += $input->readMapBegin($_ktype1281, $_vtype1282, $_size1280); + for ($_i1284 = 0; $_i1284 < $_size1280; ++$_i1284) { - $key1278 = ''; - $val1279 = ''; - $xfer += $input->readString($key1278); - $xfer += $input->readString($val1279); - $this->part_vals[$key1278] = $val1279; + $key1285 = ''; + $val1286 = ''; + $xfer += $input->readString($key1285); + $xfer += $input->readString($val1286); + $this->part_vals[$key1285] = $val1286; } $xfer += $input->readMapEnd(); } else { @@ -36546,10 +36820,10 @@ class ThriftHiveMetastore_isPartitionMarkedForEvent_args { { $output->writeMapBegin(TType::STRING, TType::STRING, count($this->part_vals)); { - foreach ($this->part_vals as $kiter1280 => $viter1281) + foreach ($this->part_vals as $kiter1287 => $viter1288) { - $xfer += $output->writeString($kiter1280); - $xfer += $output->writeString($viter1281); + $xfer += $output->writeString($kiter1287); + $xfer += $output->writeString($viter1288); } } $output->writeMapEnd(); @@ -41508,14 +41782,14 @@ class ThriftHiveMetastore_get_functions_result { case 0: if ($ftype == TType::LST) { $this->success = array(); - $_size1282 = 0; - $_etype1285 = 0; - $xfer += $input->readListBegin($_etype1285, $_size1282); - for ($_i1286 = 0; $_i1286 < $_size1282; ++$_i1286) + $_size1289 = 0; + $_etype1292 = 0; + $xfer += $input->readListBegin($_etype1292, $_size1289); + for ($_i1293 = 0; $_i1293 < $_size1289; ++$_i1293) { - $elem1287 = null; - $xfer += $input->readString($elem1287); - $this->success []= $elem1287; + $elem1294 = null; + $xfer += $input->readString($elem1294); + $this->success []= $elem1294; } $xfer += $input->readListEnd(); } else { @@ -41551,9 +41825,9 @@ class ThriftHiveMetastore_get_functions_result { { $output->writeListBegin(TType::STRING, count($this->success)); { - foreach ($this->success as $iter1288) + foreach ($this->success as $iter1295) { - $xfer += $output->writeString($iter1288); + $xfer += $output->writeString($iter1295); } } $output->writeListEnd(); @@ -42422,14 +42696,14 @@ class ThriftHiveMetastore_get_role_names_result { case 0: if ($ftype == TType::LST) { $this->success = array(); - $_size1289 = 0; - $_etype1292 = 0; - $xfer += $input->readListBegin($_etype1292, $_size1289); - for ($_i1293 = 0; $_i1293 < $_size1289; ++$_i1293) + $_size1296 = 0; + $_etype1299 = 0; + $xfer += $input->readListBegin($_etype1299, $_size1296); + for ($_i1300 = 0; $_i1300 < $_size1296; ++$_i1300) { - $elem1294 = null; - $xfer += $input->readString($elem1294); - $this->success []= $elem1294; + $elem1301 = null; + $xfer += $input->readString($elem1301); + $this->success []= $elem1301; } $xfer += $input->readListEnd(); } else { @@ -42465,9 +42739,9 @@ class ThriftHiveMetastore_get_role_names_result { { $output->writeListBegin(TType::STRING, count($this->success)); { - foreach ($this->success as $iter1295) + foreach ($this->success as $iter1302) { - $xfer += $output->writeString($iter1295); + $xfer += $output->writeString($iter1302); } } $output->writeListEnd(); @@ -43158,15 +43432,15 @@ class ThriftHiveMetastore_list_roles_result { case 0: if ($ftype == TType::LST) { $this->success = array(); - $_size1296 = 0; - $_etype1299 = 0; - $xfer += $input->readListBegin($_etype1299, $_size1296); - for ($_i1300 = 0; $_i1300 < $_size1296; ++$_i1300) + $_size1303 = 0; + $_etype1306 = 0; + $xfer += $input->readListBegin($_etype1306, $_size1303); + for ($_i1307 = 0; $_i1307 < $_size1303; ++$_i1307) { - $elem1301 = null; - $elem1301 = new \metastore\Role(); - $xfer += $elem1301->read($input); - $this->success []= $elem1301; + $elem1308 = null; + $elem1308 = new \metastore\Role(); + $xfer += $elem1308->read($input); + $this->success []= $elem1308; } $xfer += $input->readListEnd(); } else { @@ -43202,9 +43476,9 @@ class ThriftHiveMetastore_list_roles_result { { $output->writeListBegin(TType::STRUCT, count($this->success)); { - foreach ($this->success as $iter1302) + foreach ($this->success as $iter1309) { - $xfer += $iter1302->write($output); + $xfer += $iter1309->write($output); } } $output->writeListEnd(); @@ -43866,14 +44140,14 @@ class ThriftHiveMetastore_get_privilege_set_args { case 3: if ($ftype == TType::LST) { $this->group_names = array(); - $_size1303 = 0; - $_etype1306 = 0; - $xfer += $input->readListBegin($_etype1306, $_size1303); - for ($_i1307 = 0; $_i1307 < $_size1303; ++$_i1307) + $_size1310 = 0; + $_etype1313 = 0; + $xfer += $input->readListBegin($_etype1313, $_size1310); + for ($_i1314 = 0; $_i1314 < $_size1310; ++$_i1314) { - $elem1308 = null; - $xfer += $input->readString($elem1308); - $this->group_names []= $elem1308; + $elem1315 = null; + $xfer += $input->readString($elem1315); + $this->group_names []= $elem1315; } $xfer += $input->readListEnd(); } else { @@ -43914,9 +44188,9 @@ class ThriftHiveMetastore_get_privilege_set_args { { $output->writeListBegin(TType::STRING, count($this->group_names)); { - foreach ($this->group_names as $iter1309) + foreach ($this->group_names as $iter1316) { - $xfer += $output->writeString($iter1309); + $xfer += $output->writeString($iter1316); } } $output->writeListEnd(); @@ -44224,15 +44498,15 @@ class ThriftHiveMetastore_list_privileges_result { case 0: if ($ftype == TType::LST) { $this->success = array(); - $_size1310 = 0; - $_etype1313 = 0; - $xfer += $input->readListBegin($_etype1313, $_size1310); - for ($_i1314 = 0; $_i1314 < $_size1310; ++$_i1314) + $_size1317 = 0; + $_etype1320 = 0; + $xfer += $input->readListBegin($_etype1320, $_size1317); + for ($_i1321 = 0; $_i1321 < $_size1317; ++$_i1321) { - $elem1315 = null; - $elem1315 = new \metastore\HiveObjectPrivilege(); - $xfer += $elem1315->read($input); - $this->success []= $elem1315; + $elem1322 = null; + $elem1322 = new \metastore\HiveObjectPrivilege(); + $xfer += $elem1322->read($input); + $this->success []= $elem1322; } $xfer += $input->readListEnd(); } else { @@ -44268,9 +44542,9 @@ class ThriftHiveMetastore_list_privileges_result { { $output->writeListBegin(TType::STRUCT, count($this->success)); { - foreach ($this->success as $iter1316) + foreach ($this->success as $iter1323) { - $xfer += $iter1316->write($output); + $xfer += $iter1323->write($output); } } $output->writeListEnd(); @@ -45138,14 +45412,14 @@ class ThriftHiveMetastore_set_ugi_args { case 2: if ($ftype == TType::LST) { $this->group_names = array(); - $_size1317 = 0; - $_etype1320 = 0; - $xfer += $input->readListBegin($_etype1320, $_size1317); - for ($_i1321 = 0; $_i1321 < $_size1317; ++$_i1321) + $_size1324 = 0; + $_etype1327 = 0; + $xfer += $input->readListBegin($_etype1327, $_size1324); + for ($_i1328 = 0; $_i1328 < $_size1324; ++$_i1328) { - $elem1322 = null; - $xfer += $input->readString($elem1322); - $this->group_names []= $elem1322; + $elem1329 = null; + $xfer += $input->readString($elem1329); + $this->group_names []= $elem1329; } $xfer += $input->readListEnd(); } else { @@ -45178,9 +45452,9 @@ class ThriftHiveMetastore_set_ugi_args { { $output->writeListBegin(TType::STRING, count($this->group_names)); { - foreach ($this->group_names as $iter1323) + foreach ($this->group_names as $iter1330) { - $xfer += $output->writeString($iter1323); + $xfer += $output->writeString($iter1330); } } $output->writeListEnd(); @@ -45256,14 +45530,14 @@ class ThriftHiveMetastore_set_ugi_result { case 0: if ($ftype == TType::LST) { $this->success = array(); - $_size1324 = 0; - $_etype1327 = 0; - $xfer += $input->readListBegin($_etype1327, $_size1324); - for ($_i1328 = 0; $_i1328 < $_size1324; ++$_i1328) + $_size1331 = 0; + $_etype1334 = 0; + $xfer += $input->readListBegin($_etype1334, $_size1331); + for ($_i1335 = 0; $_i1335 < $_size1331; ++$_i1335) { - $elem1329 = null; - $xfer += $input->readString($elem1329); - $this->success []= $elem1329; + $elem1336 = null; + $xfer += $input->readString($elem1336); + $this->success []= $elem1336; } $xfer += $input->readListEnd(); } else { @@ -45299,9 +45573,9 @@ class ThriftHiveMetastore_set_ugi_result { { $output->writeListBegin(TType::STRING, count($this->success)); { - foreach ($this->success as $iter1330) + foreach ($this->success as $iter1337) { - $xfer += $output->writeString($iter1330); + $xfer += $output->writeString($iter1337); } } $output->writeListEnd(); @@ -46418,14 +46692,14 @@ class ThriftHiveMetastore_get_all_token_identifiers_result { case 0: if ($ftype == TType::LST) { $this->success = array(); - $_size1331 = 0; - $_etype1334 = 0; - $xfer += $input->readListBegin($_etype1334, $_size1331); - for ($_i1335 = 0; $_i1335 < $_size1331; ++$_i1335) + $_size1338 = 0; + $_etype1341 = 0; + $xfer += $input->readListBegin($_etype1341, $_size1338); + for ($_i1342 = 0; $_i1342 < $_size1338; ++$_i1342) { - $elem1336 = null; - $xfer += $input->readString($elem1336); - $this->success []= $elem1336; + $elem1343 = null; + $xfer += $input->readString($elem1343); + $this->success []= $elem1343; } $xfer += $input->readListEnd(); } else { @@ -46453,9 +46727,9 @@ class ThriftHiveMetastore_get_all_token_identifiers_result { { $output->writeListBegin(TType::STRING, count($this->success)); { - foreach ($this->success as $iter1337) + foreach ($this->success as $iter1344) { - $xfer += $output->writeString($iter1337); + $xfer += $output->writeString($iter1344); } } $output->writeListEnd(); @@ -47094,14 +47368,14 @@ class ThriftHiveMetastore_get_master_keys_result { case 0: if ($ftype == TType::LST) { $this->success = array(); - $_size1338 = 0; - $_etype1341 = 0; - $xfer += $input->readListBegin($_etype1341, $_size1338); - for ($_i1342 = 0; $_i1342 < $_size1338; ++$_i1342) + $_size1345 = 0; + $_etype1348 = 0; + $xfer += $input->readListBegin($_etype1348, $_size1345); + for ($_i1349 = 0; $_i1349 < $_size1345; ++$_i1349) { - $elem1343 = null; - $xfer += $input->readString($elem1343); - $this->success []= $elem1343; + $elem1350 = null; + $xfer += $input->readString($elem1350); + $this->success []= $elem1350; } $xfer += $input->readListEnd(); } else { @@ -47129,9 +47403,9 @@ class ThriftHiveMetastore_get_master_keys_result { { $output->writeListBegin(TType::STRING, count($this->success)); { - foreach ($this->success as $iter1344) + foreach ($this->success as $iter1351) { - $xfer += $output->writeString($iter1344); + $xfer += $output->writeString($iter1351); } } $output->writeListEnd(); @@ -57960,15 +58234,15 @@ class ThriftHiveMetastore_get_schema_all_versions_result { case 0: if ($ftype == TType::LST) { $this->success = array(); - $_size1345 = 0; - $_etype1348 = 0; - $xfer += $input->readListBegin($_etype1348, $_size1345); - for ($_i1349 = 0; $_i1349 < $_size1345; ++$_i1349) + $_size1352 = 0; + $_etype1355 = 0; + $xfer += $input->readListBegin($_etype1355, $_size1352); + for ($_i1356 = 0; $_i1356 < $_size1352; ++$_i1356) { - $elem1350 = null; - $elem1350 = new \metastore\SchemaVersion(); - $xfer += $elem1350->read($input); - $this->success []= $elem1350; + $elem1357 = null; + $elem1357 = new \metastore\SchemaVersion(); + $xfer += $elem1357->read($input); + $this->success []= $elem1357; } $xfer += $input->readListEnd(); } else { @@ -58012,9 +58286,9 @@ class ThriftHiveMetastore_get_schema_all_versions_result { { $output->writeListBegin(TType::STRUCT, count($this->success)); { - foreach ($this->success as $iter1351) + foreach ($this->success as $iter1358) { - $xfer += $iter1351->write($output); + $xfer += $iter1358->write($output); } } $output->writeListEnd(); @@ -59883,15 +60157,15 @@ class ThriftHiveMetastore_get_runtime_stats_result { case 0: if ($ftype == TType::LST) { $this->success = array(); - $_size1352 = 0; - $_etype1355 = 0; - $xfer += $input->readListBegin($_etype1355, $_size1352); - for ($_i1356 = 0; $_i1356 < $_size1352; ++$_i1356) + $_size1359 = 0; + $_etype1362 = 0; + $xfer += $input->readListBegin($_etype1362, $_size1359); + for ($_i1363 = 0; $_i1363 < $_size1359; ++$_i1363) { - $elem1357 = null; - $elem1357 = new \metastore\RuntimeStat(); - $xfer += $elem1357->read($input); - $this->success []= $elem1357; + $elem1364 = null; + $elem1364 = new \metastore\RuntimeStat(); + $xfer += $elem1364->read($input); + $this->success []= $elem1364; } $xfer += $input->readListEnd(); } else { @@ -59927,9 +60201,9 @@ class ThriftHiveMetastore_get_runtime_stats_result { { $output->writeListBegin(TType::STRUCT, count($this->success)); { - foreach ($this->success as $iter1358) + foreach ($this->success as $iter1365) { - $xfer += $iter1358->write($output); + $xfer += $iter1365->write($output); } } $output->writeListEnd(); diff --git standalone-metastore/src/gen/thrift/gen-php/metastore/Types.php standalone-metastore/src/gen/thrift/gen-php/metastore/Types.php index 93b5780cb2..b6b3bfaa13 100644 --- standalone-metastore/src/gen/thrift/gen-php/metastore/Types.php +++ standalone-metastore/src/gen/thrift/gen-php/metastore/Types.php @@ -6517,6 +6517,14 @@ class Table { * @var int */ public $ownerType = 1; + /** + * @var int + */ + public $writeId = -1; + /** + * @var bool + */ + public $isStatsCompliant = null; public function __construct($vals=null) { if (!isset(self::$_TSPEC)) { @@ -6609,6 +6617,14 @@ class Table { 'var' => 'ownerType', 'type' => TType::I32, ), + 19 => array( + 'var' => 'writeId', + 'type' => TType::I64, + ), + 20 => array( + 'var' => 'isStatsCompliant', + 'type' => TType::BOOL, + ), ); } if (is_array($vals)) { @@ -6666,6 +6682,12 @@ class Table { if (isset($vals['ownerType'])) { $this->ownerType = $vals['ownerType']; } + if (isset($vals['writeId'])) { + $this->writeId = $vals['writeId']; + } + if (isset($vals['isStatsCompliant'])) { + $this->isStatsCompliant = $vals['isStatsCompliant']; + } } } @@ -6841,6 +6863,20 @@ class Table { $xfer += $input->skip($ftype); } break; + case 19: + if ($ftype == TType::I64) { + $xfer += $input->readI64($this->writeId); + } else { + $xfer += $input->skip($ftype); + } + break; + case 20: + if ($ftype == TType::BOOL) { + $xfer += $input->readBool($this->isStatsCompliant); + } else { + $xfer += $input->skip($ftype); + } + break; default: $xfer += $input->skip($ftype); break; @@ -6978,6 +7014,16 @@ class Table { $xfer += $output->writeI32($this->ownerType); $xfer += $output->writeFieldEnd(); } + if ($this->writeId !== null) { + $xfer += $output->writeFieldBegin('writeId', TType::I64, 19); + $xfer += $output->writeI64($this->writeId); + $xfer += $output->writeFieldEnd(); + } + if ($this->isStatsCompliant !== null) { + $xfer += $output->writeFieldBegin('isStatsCompliant', TType::BOOL, 20); + $xfer += $output->writeBool($this->isStatsCompliant); + $xfer += $output->writeFieldEnd(); + } $xfer += $output->writeFieldStop(); $xfer += $output->writeStructEnd(); return $xfer; @@ -7024,6 +7070,14 @@ class Partition { * @var string */ public $catName = null; + /** + * @var int + */ + public $writeId = -1; + /** + * @var bool + */ + public $isStatsCompliant = null; public function __construct($vals=null) { if (!isset(self::$_TSPEC)) { @@ -7078,6 +7132,14 @@ class Partition { 'var' => 'catName', 'type' => TType::STRING, ), + 10 => array( + 'var' => 'writeId', + 'type' => TType::I64, + ), + 11 => array( + 'var' => 'isStatsCompliant', + 'type' => TType::BOOL, + ), ); } if (is_array($vals)) { @@ -7108,6 +7170,12 @@ class Partition { if (isset($vals['catName'])) { $this->catName = $vals['catName']; } + if (isset($vals['writeId'])) { + $this->writeId = $vals['writeId']; + } + if (isset($vals['isStatsCompliant'])) { + $this->isStatsCompliant = $vals['isStatsCompliant']; + } } } @@ -7218,6 +7286,20 @@ class Partition { $xfer += $input->skip($ftype); } break; + case 10: + if ($ftype == TType::I64) { + $xfer += $input->readI64($this->writeId); + } else { + $xfer += $input->skip($ftype); + } + break; + case 11: + if ($ftype == TType::BOOL) { + $xfer += $input->readBool($this->isStatsCompliant); + } else { + $xfer += $input->skip($ftype); + } + break; default: $xfer += $input->skip($ftype); break; @@ -7307,6 +7389,16 @@ class Partition { $xfer += $output->writeString($this->catName); $xfer += $output->writeFieldEnd(); } + if ($this->writeId !== null) { + $xfer += $output->writeFieldBegin('writeId', TType::I64, 10); + $xfer += $output->writeI64($this->writeId); + $xfer += $output->writeFieldEnd(); + } + if ($this->isStatsCompliant !== null) { + $xfer += $output->writeFieldBegin('isStatsCompliant', TType::BOOL, 11); + $xfer += $output->writeBool($this->isStatsCompliant); + $xfer += $output->writeFieldEnd(); + } $xfer += $output->writeFieldStop(); $xfer += $output->writeStructEnd(); return $xfer; @@ -7830,6 +7922,14 @@ class PartitionSpec { * @var string */ public $catName = null; + /** + * @var int + */ + public $writeId = -1; + /** + * @var bool + */ + public $isStatsCompliant = null; public function __construct($vals=null) { if (!isset(self::$_TSPEC)) { @@ -7860,6 +7960,14 @@ class PartitionSpec { 'var' => 'catName', 'type' => TType::STRING, ), + 7 => array( + 'var' => 'writeId', + 'type' => TType::I64, + ), + 8 => array( + 'var' => 'isStatsCompliant', + 'type' => TType::BOOL, + ), ); } if (is_array($vals)) { @@ -7881,6 +7989,12 @@ class PartitionSpec { if (isset($vals['catName'])) { $this->catName = $vals['catName']; } + if (isset($vals['writeId'])) { + $this->writeId = $vals['writeId']; + } + if (isset($vals['isStatsCompliant'])) { + $this->isStatsCompliant = $vals['isStatsCompliant']; + } } } @@ -7947,6 +8061,20 @@ class PartitionSpec { $xfer += $input->skip($ftype); } break; + case 7: + if ($ftype == TType::I64) { + $xfer += $input->readI64($this->writeId); + } else { + $xfer += $input->skip($ftype); + } + break; + case 8: + if ($ftype == TType::BOOL) { + $xfer += $input->readBool($this->isStatsCompliant); + } else { + $xfer += $input->skip($ftype); + } + break; default: $xfer += $input->skip($ftype); break; @@ -7996,6 +8124,16 @@ class PartitionSpec { $xfer += $output->writeString($this->catName); $xfer += $output->writeFieldEnd(); } + if ($this->writeId !== null) { + $xfer += $output->writeFieldBegin('writeId', TType::I64, 7); + $xfer += $output->writeI64($this->writeId); + $xfer += $output->writeFieldEnd(); + } + if ($this->isStatsCompliant !== null) { + $xfer += $output->writeFieldBegin('isStatsCompliant', TType::BOOL, 8); + $xfer += $output->writeBool($this->isStatsCompliant); + $xfer += $output->writeFieldEnd(); + } $xfer += $output->writeFieldStop(); $xfer += $output->writeStructEnd(); return $xfer; @@ -9894,6 +10032,18 @@ class ColumnStatistics { * @var \metastore\ColumnStatisticsObj[] */ public $statsObj = null; + /** + * @var int + */ + public $txnId = -1; + /** + * @var string + */ + public $validWriteIdList = null; + /** + * @var bool + */ + public $isStatsCompliant = null; public function __construct($vals=null) { if (!isset(self::$_TSPEC)) { @@ -9912,6 +10062,18 @@ class ColumnStatistics { 'class' => '\metastore\ColumnStatisticsObj', ), ), + 3 => array( + 'var' => 'txnId', + 'type' => TType::I64, + ), + 4 => array( + 'var' => 'validWriteIdList', + 'type' => TType::STRING, + ), + 5 => array( + 'var' => 'isStatsCompliant', + 'type' => TType::BOOL, + ), ); } if (is_array($vals)) { @@ -9921,6 +10083,15 @@ class ColumnStatistics { if (isset($vals['statsObj'])) { $this->statsObj = $vals['statsObj']; } + if (isset($vals['txnId'])) { + $this->txnId = $vals['txnId']; + } + if (isset($vals['validWriteIdList'])) { + $this->validWriteIdList = $vals['validWriteIdList']; + } + if (isset($vals['isStatsCompliant'])) { + $this->isStatsCompliant = $vals['isStatsCompliant']; + } } } @@ -9969,6 +10140,27 @@ class ColumnStatistics { $xfer += $input->skip($ftype); } break; + case 3: + if ($ftype == TType::I64) { + $xfer += $input->readI64($this->txnId); + } else { + $xfer += $input->skip($ftype); + } + break; + case 4: + if ($ftype == TType::STRING) { + $xfer += $input->readString($this->validWriteIdList); + } else { + $xfer += $input->skip($ftype); + } + break; + case 5: + if ($ftype == TType::BOOL) { + $xfer += $input->readBool($this->isStatsCompliant); + } else { + $xfer += $input->skip($ftype); + } + break; default: $xfer += $input->skip($ftype); break; @@ -10007,6 +10199,21 @@ class ColumnStatistics { } $xfer += $output->writeFieldEnd(); } + if ($this->txnId !== null) { + $xfer += $output->writeFieldBegin('txnId', TType::I64, 3); + $xfer += $output->writeI64($this->txnId); + $xfer += $output->writeFieldEnd(); + } + if ($this->validWriteIdList !== null) { + $xfer += $output->writeFieldBegin('validWriteIdList', TType::STRING, 4); + $xfer += $output->writeString($this->validWriteIdList); + $xfer += $output->writeFieldEnd(); + } + if ($this->isStatsCompliant !== null) { + $xfer += $output->writeFieldBegin('isStatsCompliant', TType::BOOL, 5); + $xfer += $output->writeBool($this->isStatsCompliant); + $xfer += $output->writeFieldEnd(); + } $xfer += $output->writeFieldStop(); $xfer += $output->writeStructEnd(); return $xfer; @@ -10025,6 +10232,10 @@ class AggrStats { * @var int */ public $partsFound = null; + /** + * @var bool + */ + public $isStatsCompliant = null; public function __construct($vals=null) { if (!isset(self::$_TSPEC)) { @@ -10042,6 +10253,10 @@ class AggrStats { 'var' => 'partsFound', 'type' => TType::I64, ), + 3 => array( + 'var' => 'isStatsCompliant', + 'type' => TType::BOOL, + ), ); } if (is_array($vals)) { @@ -10051,6 +10266,9 @@ class AggrStats { if (isset($vals['partsFound'])) { $this->partsFound = $vals['partsFound']; } + if (isset($vals['isStatsCompliant'])) { + $this->isStatsCompliant = $vals['isStatsCompliant']; + } } } @@ -10098,6 +10316,13 @@ class AggrStats { $xfer += $input->skip($ftype); } break; + case 3: + if ($ftype == TType::BOOL) { + $xfer += $input->readBool($this->isStatsCompliant); + } else { + $xfer += $input->skip($ftype); + } + break; default: $xfer += $input->skip($ftype); break; @@ -10133,6 +10358,11 @@ class AggrStats { $xfer += $output->writeI64($this->partsFound); $xfer += $output->writeFieldEnd(); } + if ($this->isStatsCompliant !== null) { + $xfer += $output->writeFieldBegin('isStatsCompliant', TType::BOOL, 3); + $xfer += $output->writeBool($this->isStatsCompliant); + $xfer += $output->writeFieldEnd(); + } $xfer += $output->writeFieldStop(); $xfer += $output->writeStructEnd(); return $xfer; @@ -10151,6 +10381,18 @@ class SetPartitionsStatsRequest { * @var bool */ public $needMerge = null; + /** + * @var int + */ + public $txnId = -1; + /** + * @var int + */ + public $writeId = -1; + /** + * @var string + */ + public $validWriteIdList = null; public function __construct($vals=null) { if (!isset(self::$_TSPEC)) { @@ -10168,6 +10410,18 @@ class SetPartitionsStatsRequest { 'var' => 'needMerge', 'type' => TType::BOOL, ), + 3 => array( + 'var' => 'txnId', + 'type' => TType::I64, + ), + 4 => array( + 'var' => 'writeId', + 'type' => TType::I64, + ), + 5 => array( + 'var' => 'validWriteIdList', + 'type' => TType::STRING, + ), ); } if (is_array($vals)) { @@ -10177,6 +10431,15 @@ class SetPartitionsStatsRequest { if (isset($vals['needMerge'])) { $this->needMerge = $vals['needMerge']; } + if (isset($vals['txnId'])) { + $this->txnId = $vals['txnId']; + } + if (isset($vals['writeId'])) { + $this->writeId = $vals['writeId']; + } + if (isset($vals['validWriteIdList'])) { + $this->validWriteIdList = $vals['validWriteIdList']; + } } } @@ -10224,6 +10487,27 @@ class SetPartitionsStatsRequest { $xfer += $input->skip($ftype); } break; + case 3: + if ($ftype == TType::I64) { + $xfer += $input->readI64($this->txnId); + } else { + $xfer += $input->skip($ftype); + } + break; + case 4: + if ($ftype == TType::I64) { + $xfer += $input->readI64($this->writeId); + } else { + $xfer += $input->skip($ftype); + } + break; + case 5: + if ($ftype == TType::STRING) { + $xfer += $input->readString($this->validWriteIdList); + } else { + $xfer += $input->skip($ftype); + } + break; default: $xfer += $input->skip($ftype); break; @@ -10259,6 +10543,21 @@ class SetPartitionsStatsRequest { $xfer += $output->writeBool($this->needMerge); $xfer += $output->writeFieldEnd(); } + if ($this->txnId !== null) { + $xfer += $output->writeFieldBegin('txnId', TType::I64, 3); + $xfer += $output->writeI64($this->txnId); + $xfer += $output->writeFieldEnd(); + } + if ($this->writeId !== null) { + $xfer += $output->writeFieldBegin('writeId', TType::I64, 4); + $xfer += $output->writeI64($this->writeId); + $xfer += $output->writeFieldEnd(); + } + if ($this->validWriteIdList !== null) { + $xfer += $output->writeFieldBegin('validWriteIdList', TType::STRING, 5); + $xfer += $output->writeString($this->validWriteIdList); + $xfer += $output->writeFieldEnd(); + } $xfer += $output->writeFieldStop(); $xfer += $output->writeStructEnd(); return $xfer; @@ -13010,6 +13309,10 @@ class TableStatsResult { * @var \metastore\ColumnStatisticsObj[] */ public $tableStats = null; + /** + * @var bool + */ + public $isStatsCompliant = null; public function __construct($vals=null) { if (!isset(self::$_TSPEC)) { @@ -13023,12 +13326,19 @@ class TableStatsResult { 'class' => '\metastore\ColumnStatisticsObj', ), ), + 2 => array( + 'var' => 'isStatsCompliant', + 'type' => TType::BOOL, + ), ); } if (is_array($vals)) { if (isset($vals['tableStats'])) { $this->tableStats = $vals['tableStats']; } + if (isset($vals['isStatsCompliant'])) { + $this->isStatsCompliant = $vals['isStatsCompliant']; + } } } @@ -13069,6 +13379,13 @@ class TableStatsResult { $xfer += $input->skip($ftype); } break; + case 2: + if ($ftype == TType::BOOL) { + $xfer += $input->readBool($this->isStatsCompliant); + } else { + $xfer += $input->skip($ftype); + } + break; default: $xfer += $input->skip($ftype); break; @@ -13099,6 +13416,11 @@ class TableStatsResult { } $xfer += $output->writeFieldEnd(); } + if ($this->isStatsCompliant !== null) { + $xfer += $output->writeFieldBegin('isStatsCompliant', TType::BOOL, 2); + $xfer += $output->writeBool($this->isStatsCompliant); + $xfer += $output->writeFieldEnd(); + } $xfer += $output->writeFieldStop(); $xfer += $output->writeStructEnd(); return $xfer; @@ -13113,6 +13435,10 @@ class PartitionsStatsResult { * @var array */ public $partStats = null; + /** + * @var bool + */ + public $isStatsCompliant = null; public function __construct($vals=null) { if (!isset(self::$_TSPEC)) { @@ -13134,12 +13460,19 @@ class PartitionsStatsResult { ), ), ), + 2 => array( + 'var' => 'isStatsCompliant', + 'type' => TType::BOOL, + ), ); } if (is_array($vals)) { if (isset($vals['partStats'])) { $this->partStats = $vals['partStats']; } + if (isset($vals['isStatsCompliant'])) { + $this->isStatsCompliant = $vals['isStatsCompliant']; + } } } @@ -13193,6 +13526,13 @@ class PartitionsStatsResult { $xfer += $input->skip($ftype); } break; + case 2: + if ($ftype == TType::BOOL) { + $xfer += $input->readBool($this->isStatsCompliant); + } else { + $xfer += $input->skip($ftype); + } + break; default: $xfer += $input->skip($ftype); break; @@ -13233,7 +13573,12 @@ class PartitionsStatsResult { } $xfer += $output->writeFieldEnd(); } - $xfer += $output->writeFieldStop(); + if ($this->isStatsCompliant !== null) { + $xfer += $output->writeFieldBegin('isStatsCompliant', TType::BOOL, 2); + $xfer += $output->writeBool($this->isStatsCompliant); + $xfer += $output->writeFieldEnd(); + } + $xfer += $output->writeFieldStop(); $xfer += $output->writeStructEnd(); return $xfer; } @@ -13259,6 +13604,14 @@ class TableStatsRequest { * @var string */ public $catName = null; + /** + * @var int + */ + public $txnId = -1; + /** + * @var string + */ + public $validWriteIdList = null; public function __construct($vals=null) { if (!isset(self::$_TSPEC)) { @@ -13283,6 +13636,14 @@ class TableStatsRequest { 'var' => 'catName', 'type' => TType::STRING, ), + 5 => array( + 'var' => 'txnId', + 'type' => TType::I64, + ), + 6 => array( + 'var' => 'validWriteIdList', + 'type' => TType::STRING, + ), ); } if (is_array($vals)) { @@ -13298,6 +13659,12 @@ class TableStatsRequest { if (isset($vals['catName'])) { $this->catName = $vals['catName']; } + if (isset($vals['txnId'])) { + $this->txnId = $vals['txnId']; + } + if (isset($vals['validWriteIdList'])) { + $this->validWriteIdList = $vals['validWriteIdList']; + } } } @@ -13358,6 +13725,20 @@ class TableStatsRequest { $xfer += $input->skip($ftype); } break; + case 5: + if ($ftype == TType::I64) { + $xfer += $input->readI64($this->txnId); + } else { + $xfer += $input->skip($ftype); + } + break; + case 6: + if ($ftype == TType::STRING) { + $xfer += $input->readString($this->validWriteIdList); + } else { + $xfer += $input->skip($ftype); + } + break; default: $xfer += $input->skip($ftype); break; @@ -13403,6 +13784,16 @@ class TableStatsRequest { $xfer += $output->writeString($this->catName); $xfer += $output->writeFieldEnd(); } + if ($this->txnId !== null) { + $xfer += $output->writeFieldBegin('txnId', TType::I64, 5); + $xfer += $output->writeI64($this->txnId); + $xfer += $output->writeFieldEnd(); + } + if ($this->validWriteIdList !== null) { + $xfer += $output->writeFieldBegin('validWriteIdList', TType::STRING, 6); + $xfer += $output->writeString($this->validWriteIdList); + $xfer += $output->writeFieldEnd(); + } $xfer += $output->writeFieldStop(); $xfer += $output->writeStructEnd(); return $xfer; @@ -13433,6 +13824,14 @@ class PartitionsStatsRequest { * @var string */ public $catName = null; + /** + * @var int + */ + public $txnId = -1; + /** + * @var string + */ + public $validWriteIdList = null; public function __construct($vals=null) { if (!isset(self::$_TSPEC)) { @@ -13465,6 +13864,14 @@ class PartitionsStatsRequest { 'var' => 'catName', 'type' => TType::STRING, ), + 6 => array( + 'var' => 'txnId', + 'type' => TType::I64, + ), + 7 => array( + 'var' => 'validWriteIdList', + 'type' => TType::STRING, + ), ); } if (is_array($vals)) { @@ -13483,6 +13890,12 @@ class PartitionsStatsRequest { if (isset($vals['catName'])) { $this->catName = $vals['catName']; } + if (isset($vals['txnId'])) { + $this->txnId = $vals['txnId']; + } + if (isset($vals['validWriteIdList'])) { + $this->validWriteIdList = $vals['validWriteIdList']; + } } } @@ -13560,6 +13973,20 @@ class PartitionsStatsRequest { $xfer += $input->skip($ftype); } break; + case 6: + if ($ftype == TType::I64) { + $xfer += $input->readI64($this->txnId); + } else { + $xfer += $input->skip($ftype); + } + break; + case 7: + if ($ftype == TType::STRING) { + $xfer += $input->readString($this->validWriteIdList); + } else { + $xfer += $input->skip($ftype); + } + break; default: $xfer += $input->skip($ftype); break; @@ -13622,6 +14049,16 @@ class PartitionsStatsRequest { $xfer += $output->writeString($this->catName); $xfer += $output->writeFieldEnd(); } + if ($this->txnId !== null) { + $xfer += $output->writeFieldBegin('txnId', TType::I64, 6); + $xfer += $output->writeI64($this->txnId); + $xfer += $output->writeFieldEnd(); + } + if ($this->validWriteIdList !== null) { + $xfer += $output->writeFieldBegin('validWriteIdList', TType::STRING, 7); + $xfer += $output->writeString($this->validWriteIdList); + $xfer += $output->writeFieldEnd(); + } $xfer += $output->writeFieldStop(); $xfer += $output->writeStructEnd(); return $xfer; @@ -13636,6 +14073,10 @@ class AddPartitionsResult { * @var \metastore\Partition[] */ public $partitions = null; + /** + * @var bool + */ + public $isStatsCompliant = null; public function __construct($vals=null) { if (!isset(self::$_TSPEC)) { @@ -13649,12 +14090,19 @@ class AddPartitionsResult { 'class' => '\metastore\Partition', ), ), + 2 => array( + 'var' => 'isStatsCompliant', + 'type' => TType::BOOL, + ), ); } if (is_array($vals)) { if (isset($vals['partitions'])) { $this->partitions = $vals['partitions']; } + if (isset($vals['isStatsCompliant'])) { + $this->isStatsCompliant = $vals['isStatsCompliant']; + } } } @@ -13695,6 +14143,13 @@ class AddPartitionsResult { $xfer += $input->skip($ftype); } break; + case 2: + if ($ftype == TType::BOOL) { + $xfer += $input->readBool($this->isStatsCompliant); + } else { + $xfer += $input->skip($ftype); + } + break; default: $xfer += $input->skip($ftype); break; @@ -13725,6 +14180,11 @@ class AddPartitionsResult { } $xfer += $output->writeFieldEnd(); } + if ($this->isStatsCompliant !== null) { + $xfer += $output->writeFieldBegin('isStatsCompliant', TType::BOOL, 2); + $xfer += $output->writeBool($this->isStatsCompliant); + $xfer += $output->writeFieldEnd(); + } $xfer += $output->writeFieldStop(); $xfer += $output->writeStructEnd(); return $xfer; @@ -13759,6 +14219,14 @@ class AddPartitionsRequest { * @var string */ public $catName = null; + /** + * @var int + */ + public $txnId = -1; + /** + * @var string + */ + public $validWriteIdList = null; public function __construct($vals=null) { if (!isset(self::$_TSPEC)) { @@ -13792,6 +14260,14 @@ class AddPartitionsRequest { 'var' => 'catName', 'type' => TType::STRING, ), + 7 => array( + 'var' => 'txnId', + 'type' => TType::I64, + ), + 8 => array( + 'var' => 'validWriteIdList', + 'type' => TType::STRING, + ), ); } if (is_array($vals)) { @@ -13813,6 +14289,12 @@ class AddPartitionsRequest { if (isset($vals['catName'])) { $this->catName = $vals['catName']; } + if (isset($vals['txnId'])) { + $this->txnId = $vals['txnId']; + } + if (isset($vals['validWriteIdList'])) { + $this->validWriteIdList = $vals['validWriteIdList']; + } } } @@ -13888,6 +14370,20 @@ class AddPartitionsRequest { $xfer += $input->skip($ftype); } break; + case 7: + if ($ftype == TType::I64) { + $xfer += $input->readI64($this->txnId); + } else { + $xfer += $input->skip($ftype); + } + break; + case 8: + if ($ftype == TType::STRING) { + $xfer += $input->readString($this->validWriteIdList); + } else { + $xfer += $input->skip($ftype); + } + break; default: $xfer += $input->skip($ftype); break; @@ -13943,6 +14439,16 @@ class AddPartitionsRequest { $xfer += $output->writeString($this->catName); $xfer += $output->writeFieldEnd(); } + if ($this->txnId !== null) { + $xfer += $output->writeFieldBegin('txnId', TType::I64, 7); + $xfer += $output->writeI64($this->txnId); + $xfer += $output->writeFieldEnd(); + } + if ($this->validWriteIdList !== null) { + $xfer += $output->writeFieldBegin('validWriteIdList', TType::STRING, 8); + $xfer += $output->writeString($this->validWriteIdList); + $xfer += $output->writeFieldEnd(); + } $xfer += $output->writeFieldStop(); $xfer += $output->writeStructEnd(); return $xfer; @@ -24180,6 +24686,14 @@ class GetTableRequest { * @var string */ public $catName = null; + /** + * @var int + */ + public $txnId = -1; + /** + * @var string + */ + public $validWriteIdList = null; public function __construct($vals=null) { if (!isset(self::$_TSPEC)) { @@ -24201,6 +24715,14 @@ class GetTableRequest { 'var' => 'catName', 'type' => TType::STRING, ), + 5 => array( + 'var' => 'txnId', + 'type' => TType::I64, + ), + 6 => array( + 'var' => 'validWriteIdList', + 'type' => TType::STRING, + ), ); } if (is_array($vals)) { @@ -24216,6 +24738,12 @@ class GetTableRequest { if (isset($vals['catName'])) { $this->catName = $vals['catName']; } + if (isset($vals['txnId'])) { + $this->txnId = $vals['txnId']; + } + if (isset($vals['validWriteIdList'])) { + $this->validWriteIdList = $vals['validWriteIdList']; + } } } @@ -24267,6 +24795,20 @@ class GetTableRequest { $xfer += $input->skip($ftype); } break; + case 5: + if ($ftype == TType::I64) { + $xfer += $input->readI64($this->txnId); + } else { + $xfer += $input->skip($ftype); + } + break; + case 6: + if ($ftype == TType::STRING) { + $xfer += $input->readString($this->validWriteIdList); + } else { + $xfer += $input->skip($ftype); + } + break; default: $xfer += $input->skip($ftype); break; @@ -24303,6 +24845,16 @@ class GetTableRequest { $xfer += $output->writeString($this->catName); $xfer += $output->writeFieldEnd(); } + if ($this->txnId !== null) { + $xfer += $output->writeFieldBegin('txnId', TType::I64, 5); + $xfer += $output->writeI64($this->txnId); + $xfer += $output->writeFieldEnd(); + } + if ($this->validWriteIdList !== null) { + $xfer += $output->writeFieldBegin('validWriteIdList', TType::STRING, 6); + $xfer += $output->writeString($this->validWriteIdList); + $xfer += $output->writeFieldEnd(); + } $xfer += $output->writeFieldStop(); $xfer += $output->writeStructEnd(); return $xfer; @@ -24317,6 +24869,10 @@ class GetTableResult { * @var \metastore\Table */ public $table = null; + /** + * @var bool + */ + public $isStatsCompliant = null; public function __construct($vals=null) { if (!isset(self::$_TSPEC)) { @@ -24326,12 +24882,19 @@ class GetTableResult { 'type' => TType::STRUCT, 'class' => '\metastore\Table', ), + 2 => array( + 'var' => 'isStatsCompliant', + 'type' => TType::BOOL, + ), ); } if (is_array($vals)) { if (isset($vals['table'])) { $this->table = $vals['table']; } + if (isset($vals['isStatsCompliant'])) { + $this->isStatsCompliant = $vals['isStatsCompliant']; + } } } @@ -24362,6 +24925,13 @@ class GetTableResult { $xfer += $input->skip($ftype); } break; + case 2: + if ($ftype == TType::BOOL) { + $xfer += $input->readBool($this->isStatsCompliant); + } else { + $xfer += $input->skip($ftype); + } + break; default: $xfer += $input->skip($ftype); break; @@ -24383,6 +24953,11 @@ class GetTableResult { $xfer += $this->table->write($output); $xfer += $output->writeFieldEnd(); } + if ($this->isStatsCompliant !== null) { + $xfer += $output->writeFieldBegin('isStatsCompliant', TType::BOOL, 2); + $xfer += $output->writeBool($this->isStatsCompliant); + $xfer += $output->writeFieldEnd(); + } $xfer += $output->writeFieldStop(); $xfer += $output->writeStructEnd(); return $xfer; @@ -30890,6 +31465,302 @@ class GetRuntimeStatsRequest { } +class AlterPartitionsRequest { + static $_TSPEC; + + /** + * @var string + */ + public $dbName = null; + /** + * @var string + */ + public $tableName = null; + /** + * @var \metastore\Partition[] + */ + public $partitions = null; + /** + * @var \metastore\EnvironmentContext + */ + public $environmentContext = null; + /** + * @var int + */ + public $txnId = -1; + /** + * @var int + */ + public $writeId = -1; + /** + * @var string + */ + public $validWriteIdList = null; + + public function __construct($vals=null) { + if (!isset(self::$_TSPEC)) { + self::$_TSPEC = array( + 1 => array( + 'var' => 'dbName', + 'type' => TType::STRING, + ), + 2 => array( + 'var' => 'tableName', + 'type' => TType::STRING, + ), + 3 => array( + 'var' => 'partitions', + 'type' => TType::LST, + 'etype' => TType::STRUCT, + 'elem' => array( + 'type' => TType::STRUCT, + 'class' => '\metastore\Partition', + ), + ), + 4 => array( + 'var' => 'environmentContext', + 'type' => TType::STRUCT, + 'class' => '\metastore\EnvironmentContext', + ), + 5 => array( + 'var' => 'txnId', + 'type' => TType::I64, + ), + 6 => array( + 'var' => 'writeId', + 'type' => TType::I64, + ), + 7 => array( + 'var' => 'validWriteIdList', + 'type' => TType::STRING, + ), + ); + } + if (is_array($vals)) { + if (isset($vals['dbName'])) { + $this->dbName = $vals['dbName']; + } + if (isset($vals['tableName'])) { + $this->tableName = $vals['tableName']; + } + if (isset($vals['partitions'])) { + $this->partitions = $vals['partitions']; + } + if (isset($vals['environmentContext'])) { + $this->environmentContext = $vals['environmentContext']; + } + if (isset($vals['txnId'])) { + $this->txnId = $vals['txnId']; + } + if (isset($vals['writeId'])) { + $this->writeId = $vals['writeId']; + } + if (isset($vals['validWriteIdList'])) { + $this->validWriteIdList = $vals['validWriteIdList']; + } + } + } + + public function getName() { + return 'AlterPartitionsRequest'; + } + + public function read($input) + { + $xfer = 0; + $fname = null; + $ftype = 0; + $fid = 0; + $xfer += $input->readStructBegin($fname); + while (true) + { + $xfer += $input->readFieldBegin($fname, $ftype, $fid); + if ($ftype == TType::STOP) { + break; + } + switch ($fid) + { + case 1: + if ($ftype == TType::STRING) { + $xfer += $input->readString($this->dbName); + } else { + $xfer += $input->skip($ftype); + } + break; + case 2: + if ($ftype == TType::STRING) { + $xfer += $input->readString($this->tableName); + } else { + $xfer += $input->skip($ftype); + } + break; + case 3: + if ($ftype == TType::LST) { + $this->partitions = array(); + $_size841 = 0; + $_etype844 = 0; + $xfer += $input->readListBegin($_etype844, $_size841); + for ($_i845 = 0; $_i845 < $_size841; ++$_i845) + { + $elem846 = null; + $elem846 = new \metastore\Partition(); + $xfer += $elem846->read($input); + $this->partitions []= $elem846; + } + $xfer += $input->readListEnd(); + } else { + $xfer += $input->skip($ftype); + } + break; + case 4: + if ($ftype == TType::STRUCT) { + $this->environmentContext = new \metastore\EnvironmentContext(); + $xfer += $this->environmentContext->read($input); + } else { + $xfer += $input->skip($ftype); + } + break; + case 5: + if ($ftype == TType::I64) { + $xfer += $input->readI64($this->txnId); + } else { + $xfer += $input->skip($ftype); + } + break; + case 6: + if ($ftype == TType::I64) { + $xfer += $input->readI64($this->writeId); + } else { + $xfer += $input->skip($ftype); + } + break; + case 7: + if ($ftype == TType::STRING) { + $xfer += $input->readString($this->validWriteIdList); + } else { + $xfer += $input->skip($ftype); + } + break; + default: + $xfer += $input->skip($ftype); + break; + } + $xfer += $input->readFieldEnd(); + } + $xfer += $input->readStructEnd(); + return $xfer; + } + + public function write($output) { + $xfer = 0; + $xfer += $output->writeStructBegin('AlterPartitionsRequest'); + if ($this->dbName !== null) { + $xfer += $output->writeFieldBegin('dbName', TType::STRING, 1); + $xfer += $output->writeString($this->dbName); + $xfer += $output->writeFieldEnd(); + } + if ($this->tableName !== null) { + $xfer += $output->writeFieldBegin('tableName', TType::STRING, 2); + $xfer += $output->writeString($this->tableName); + $xfer += $output->writeFieldEnd(); + } + if ($this->partitions !== null) { + if (!is_array($this->partitions)) { + throw new TProtocolException('Bad type in structure.', TProtocolException::INVALID_DATA); + } + $xfer += $output->writeFieldBegin('partitions', TType::LST, 3); + { + $output->writeListBegin(TType::STRUCT, count($this->partitions)); + { + foreach ($this->partitions as $iter847) + { + $xfer += $iter847->write($output); + } + } + $output->writeListEnd(); + } + $xfer += $output->writeFieldEnd(); + } + if ($this->environmentContext !== null) { + if (!is_object($this->environmentContext)) { + throw new TProtocolException('Bad type in structure.', TProtocolException::INVALID_DATA); + } + $xfer += $output->writeFieldBegin('environmentContext', TType::STRUCT, 4); + $xfer += $this->environmentContext->write($output); + $xfer += $output->writeFieldEnd(); + } + if ($this->txnId !== null) { + $xfer += $output->writeFieldBegin('txnId', TType::I64, 5); + $xfer += $output->writeI64($this->txnId); + $xfer += $output->writeFieldEnd(); + } + if ($this->writeId !== null) { + $xfer += $output->writeFieldBegin('writeId', TType::I64, 6); + $xfer += $output->writeI64($this->writeId); + $xfer += $output->writeFieldEnd(); + } + if ($this->validWriteIdList !== null) { + $xfer += $output->writeFieldBegin('validWriteIdList', TType::STRING, 7); + $xfer += $output->writeString($this->validWriteIdList); + $xfer += $output->writeFieldEnd(); + } + $xfer += $output->writeFieldStop(); + $xfer += $output->writeStructEnd(); + return $xfer; + } + +} + +class AlterPartitionsResponse { + static $_TSPEC; + + + public function __construct() { + if (!isset(self::$_TSPEC)) { + self::$_TSPEC = array( + ); + } + } + + public function getName() { + return 'AlterPartitionsResponse'; + } + + public function read($input) + { + $xfer = 0; + $fname = null; + $ftype = 0; + $fid = 0; + $xfer += $input->readStructBegin($fname); + while (true) + { + $xfer += $input->readFieldBegin($fname, $ftype, $fid); + if ($ftype == TType::STOP) { + break; + } + switch ($fid) + { + default: + $xfer += $input->skip($ftype); + break; + } + $xfer += $input->readFieldEnd(); + } + $xfer += $input->readStructEnd(); + return $xfer; + } + + public function write($output) { + $xfer = 0; + $xfer += $output->writeStructBegin('AlterPartitionsResponse'); + $xfer += $output->writeFieldStop(); + $xfer += $output->writeStructEnd(); + return $xfer; + } + +} + class MetaException extends TException { static $_TSPEC; diff --git standalone-metastore/src/gen/thrift/gen-py/hive_metastore/ThriftHiveMetastore-remote standalone-metastore/src/gen/thrift/gen-py/hive_metastore/ThriftHiveMetastore-remote index add9197c15..b115f4c235 100755 --- standalone-metastore/src/gen/thrift/gen-py/hive_metastore/ThriftHiveMetastore-remote +++ standalone-metastore/src/gen/thrift/gen-py/hive_metastore/ThriftHiveMetastore-remote @@ -108,6 +108,7 @@ if len(sys.argv) <= 1 or sys.argv[1] == '--help': print(' void alter_partition(string db_name, string tbl_name, Partition new_part)') print(' void alter_partitions(string db_name, string tbl_name, new_parts)') print(' void alter_partitions_with_environment_context(string db_name, string tbl_name, new_parts, EnvironmentContext environment_context)') + print(' AlterPartitionsResponse alter_partitions_with_environment_context_req(AlterPartitionsRequest req)') print(' void alter_partition_with_environment_context(string db_name, string tbl_name, Partition new_part, EnvironmentContext environment_context)') print(' void rename_partition(string db_name, string tbl_name, part_vals, Partition new_part)') print(' bool partition_name_has_valid_characters( part_vals, bool throw_exception)') @@ -805,6 +806,12 @@ elif cmd == 'alter_partitions_with_environment_context': sys.exit(1) pp.pprint(client.alter_partitions_with_environment_context(args[0],args[1],eval(args[2]),eval(args[3]),)) +elif cmd == 'alter_partitions_with_environment_context_req': + if len(args) != 1: + print('alter_partitions_with_environment_context_req requires 1 args') + sys.exit(1) + pp.pprint(client.alter_partitions_with_environment_context_req(eval(args[0]),)) + elif cmd == 'alter_partition_with_environment_context': if len(args) != 4: print('alter_partition_with_environment_context requires 4 args') diff --git standalone-metastore/src/gen/thrift/gen-py/hive_metastore/ThriftHiveMetastore.py standalone-metastore/src/gen/thrift/gen-py/hive_metastore/ThriftHiveMetastore.py index a5bcc10d5f..0af7238422 100644 --- standalone-metastore/src/gen/thrift/gen-py/hive_metastore/ThriftHiveMetastore.py +++ standalone-metastore/src/gen/thrift/gen-py/hive_metastore/ThriftHiveMetastore.py @@ -727,6 +727,13 @@ def alter_partitions_with_environment_context(self, db_name, tbl_name, new_parts """ pass + def alter_partitions_with_environment_context_req(self, req): + """ + Parameters: + - req + """ + pass + def alter_partition_with_environment_context(self, db_name, tbl_name, new_part, environment_context): """ Parameters: @@ -4780,6 +4787,41 @@ def recv_alter_partitions_with_environment_context(self): raise result.o2 return + def alter_partitions_with_environment_context_req(self, req): + """ + Parameters: + - req + """ + self.send_alter_partitions_with_environment_context_req(req) + return self.recv_alter_partitions_with_environment_context_req() + + def send_alter_partitions_with_environment_context_req(self, req): + self._oprot.writeMessageBegin('alter_partitions_with_environment_context_req', TMessageType.CALL, self._seqid) + args = alter_partitions_with_environment_context_req_args() + args.req = req + args.write(self._oprot) + self._oprot.writeMessageEnd() + self._oprot.trans.flush() + + def recv_alter_partitions_with_environment_context_req(self): + iprot = self._iprot + (fname, mtype, rseqid) = iprot.readMessageBegin() + if mtype == TMessageType.EXCEPTION: + x = TApplicationException() + x.read(iprot) + iprot.readMessageEnd() + raise x + result = alter_partitions_with_environment_context_req_result() + result.read(iprot) + iprot.readMessageEnd() + if result.success is not None: + return result.success + if result.o1 is not None: + raise result.o1 + if result.o2 is not None: + raise result.o2 + raise TApplicationException(TApplicationException.MISSING_RESULT, "alter_partitions_with_environment_context_req failed: unknown result") + def alter_partition_with_environment_context(self, db_name, tbl_name, new_part, environment_context): """ Parameters: @@ -9127,6 +9169,7 @@ def __init__(self, handler): self._processMap["alter_partition"] = Processor.process_alter_partition self._processMap["alter_partitions"] = Processor.process_alter_partitions self._processMap["alter_partitions_with_environment_context"] = Processor.process_alter_partitions_with_environment_context + self._processMap["alter_partitions_with_environment_context_req"] = Processor.process_alter_partitions_with_environment_context_req self._processMap["alter_partition_with_environment_context"] = Processor.process_alter_partition_with_environment_context self._processMap["rename_partition"] = Processor.process_rename_partition self._processMap["partition_name_has_valid_characters"] = Processor.process_partition_name_has_valid_characters @@ -11424,6 +11467,31 @@ def process_alter_partitions_with_environment_context(self, seqid, iprot, oprot) oprot.writeMessageEnd() oprot.trans.flush() + def process_alter_partitions_with_environment_context_req(self, seqid, iprot, oprot): + args = alter_partitions_with_environment_context_req_args() + args.read(iprot) + iprot.readMessageEnd() + result = alter_partitions_with_environment_context_req_result() + try: + result.success = self._handler.alter_partitions_with_environment_context_req(args.req) + msg_type = TMessageType.REPLY + except (TTransport.TTransportException, KeyboardInterrupt, SystemExit): + raise + except InvalidOperationException as o1: + msg_type = TMessageType.REPLY + result.o1 = o1 + except MetaException as o2: + msg_type = TMessageType.REPLY + result.o2 = o2 + except Exception as ex: + msg_type = TMessageType.EXCEPTION + logging.exception(ex) + result = TApplicationException(TApplicationException.INTERNAL_ERROR, 'Internal error') + oprot.writeMessageBegin("alter_partitions_with_environment_context_req", msg_type, seqid) + result.write(oprot) + oprot.writeMessageEnd() + oprot.trans.flush() + def process_alter_partition_with_environment_context(self, seqid, iprot, oprot): args = alter_partition_with_environment_context_args() args.read(iprot) @@ -16045,10 +16113,10 @@ def read(self, iprot): if fid == 0: if ftype == TType.LIST: self.success = [] - (_etype840, _size837) = iprot.readListBegin() - for _i841 in xrange(_size837): - _elem842 = iprot.readString() - self.success.append(_elem842) + (_etype847, _size844) = iprot.readListBegin() + for _i848 in xrange(_size844): + _elem849 = iprot.readString() + self.success.append(_elem849) iprot.readListEnd() else: iprot.skip(ftype) @@ -16071,8 +16139,8 @@ def write(self, oprot): if self.success is not None: oprot.writeFieldBegin('success', TType.LIST, 0) oprot.writeListBegin(TType.STRING, len(self.success)) - for iter843 in self.success: - oprot.writeString(iter843) + for iter850 in self.success: + oprot.writeString(iter850) oprot.writeListEnd() oprot.writeFieldEnd() if self.o1 is not None: @@ -16177,10 +16245,10 @@ def read(self, iprot): if fid == 0: if ftype == TType.LIST: self.success = [] - (_etype847, _size844) = iprot.readListBegin() - for _i848 in xrange(_size844): - _elem849 = iprot.readString() - self.success.append(_elem849) + (_etype854, _size851) = iprot.readListBegin() + for _i855 in xrange(_size851): + _elem856 = iprot.readString() + self.success.append(_elem856) iprot.readListEnd() else: iprot.skip(ftype) @@ -16203,8 +16271,8 @@ def write(self, oprot): if self.success is not None: oprot.writeFieldBegin('success', TType.LIST, 0) oprot.writeListBegin(TType.STRING, len(self.success)) - for iter850 in self.success: - oprot.writeString(iter850) + for iter857 in self.success: + oprot.writeString(iter857) oprot.writeListEnd() oprot.writeFieldEnd() if self.o1 is not None: @@ -16974,12 +17042,12 @@ def read(self, iprot): if fid == 0: if ftype == TType.MAP: self.success = {} - (_ktype852, _vtype853, _size851 ) = iprot.readMapBegin() - for _i855 in xrange(_size851): - _key856 = iprot.readString() - _val857 = Type() - _val857.read(iprot) - self.success[_key856] = _val857 + (_ktype859, _vtype860, _size858 ) = iprot.readMapBegin() + for _i862 in xrange(_size858): + _key863 = iprot.readString() + _val864 = Type() + _val864.read(iprot) + self.success[_key863] = _val864 iprot.readMapEnd() else: iprot.skip(ftype) @@ -17002,9 +17070,9 @@ def write(self, oprot): if self.success is not None: oprot.writeFieldBegin('success', TType.MAP, 0) oprot.writeMapBegin(TType.STRING, TType.STRUCT, len(self.success)) - for kiter858,viter859 in self.success.items(): - oprot.writeString(kiter858) - viter859.write(oprot) + for kiter865,viter866 in self.success.items(): + oprot.writeString(kiter865) + viter866.write(oprot) oprot.writeMapEnd() oprot.writeFieldEnd() if self.o2 is not None: @@ -17147,11 +17215,11 @@ def read(self, iprot): if fid == 0: if ftype == TType.LIST: self.success = [] - (_etype863, _size860) = iprot.readListBegin() - for _i864 in xrange(_size860): - _elem865 = FieldSchema() - _elem865.read(iprot) - self.success.append(_elem865) + (_etype870, _size867) = iprot.readListBegin() + for _i871 in xrange(_size867): + _elem872 = FieldSchema() + _elem872.read(iprot) + self.success.append(_elem872) iprot.readListEnd() else: iprot.skip(ftype) @@ -17186,8 +17254,8 @@ def write(self, oprot): if self.success is not None: oprot.writeFieldBegin('success', TType.LIST, 0) oprot.writeListBegin(TType.STRUCT, len(self.success)) - for iter866 in self.success: - iter866.write(oprot) + for iter873 in self.success: + iter873.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() if self.o1 is not None: @@ -17354,11 +17422,11 @@ def read(self, iprot): if fid == 0: if ftype == TType.LIST: self.success = [] - (_etype870, _size867) = iprot.readListBegin() - for _i871 in xrange(_size867): - _elem872 = FieldSchema() - _elem872.read(iprot) - self.success.append(_elem872) + (_etype877, _size874) = iprot.readListBegin() + for _i878 in xrange(_size874): + _elem879 = FieldSchema() + _elem879.read(iprot) + self.success.append(_elem879) iprot.readListEnd() else: iprot.skip(ftype) @@ -17393,8 +17461,8 @@ def write(self, oprot): if self.success is not None: oprot.writeFieldBegin('success', TType.LIST, 0) oprot.writeListBegin(TType.STRUCT, len(self.success)) - for iter873 in self.success: - iter873.write(oprot) + for iter880 in self.success: + iter880.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() if self.o1 is not None: @@ -17547,11 +17615,11 @@ def read(self, iprot): if fid == 0: if ftype == TType.LIST: self.success = [] - (_etype877, _size874) = iprot.readListBegin() - for _i878 in xrange(_size874): - _elem879 = FieldSchema() - _elem879.read(iprot) - self.success.append(_elem879) + (_etype884, _size881) = iprot.readListBegin() + for _i885 in xrange(_size881): + _elem886 = FieldSchema() + _elem886.read(iprot) + self.success.append(_elem886) iprot.readListEnd() else: iprot.skip(ftype) @@ -17586,8 +17654,8 @@ def write(self, oprot): if self.success is not None: oprot.writeFieldBegin('success', TType.LIST, 0) oprot.writeListBegin(TType.STRUCT, len(self.success)) - for iter880 in self.success: - iter880.write(oprot) + for iter887 in self.success: + iter887.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() if self.o1 is not None: @@ -17754,11 +17822,11 @@ def read(self, iprot): if fid == 0: if ftype == TType.LIST: self.success = [] - (_etype884, _size881) = iprot.readListBegin() - for _i885 in xrange(_size881): - _elem886 = FieldSchema() - _elem886.read(iprot) - self.success.append(_elem886) + (_etype891, _size888) = iprot.readListBegin() + for _i892 in xrange(_size888): + _elem893 = FieldSchema() + _elem893.read(iprot) + self.success.append(_elem893) iprot.readListEnd() else: iprot.skip(ftype) @@ -17793,8 +17861,8 @@ def write(self, oprot): if self.success is not None: oprot.writeFieldBegin('success', TType.LIST, 0) oprot.writeListBegin(TType.STRUCT, len(self.success)) - for iter887 in self.success: - iter887.write(oprot) + for iter894 in self.success: + iter894.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() if self.o1 is not None: @@ -18247,66 +18315,66 @@ def read(self, iprot): elif fid == 2: if ftype == TType.LIST: self.primaryKeys = [] - (_etype891, _size888) = iprot.readListBegin() - for _i892 in xrange(_size888): - _elem893 = SQLPrimaryKey() - _elem893.read(iprot) - self.primaryKeys.append(_elem893) + (_etype898, _size895) = iprot.readListBegin() + for _i899 in xrange(_size895): + _elem900 = SQLPrimaryKey() + _elem900.read(iprot) + self.primaryKeys.append(_elem900) iprot.readListEnd() else: iprot.skip(ftype) elif fid == 3: if ftype == TType.LIST: self.foreignKeys = [] - (_etype897, _size894) = iprot.readListBegin() - for _i898 in xrange(_size894): - _elem899 = SQLForeignKey() - _elem899.read(iprot) - self.foreignKeys.append(_elem899) + (_etype904, _size901) = iprot.readListBegin() + for _i905 in xrange(_size901): + _elem906 = SQLForeignKey() + _elem906.read(iprot) + self.foreignKeys.append(_elem906) iprot.readListEnd() else: iprot.skip(ftype) elif fid == 4: if ftype == TType.LIST: self.uniqueConstraints = [] - (_etype903, _size900) = iprot.readListBegin() - for _i904 in xrange(_size900): - _elem905 = SQLUniqueConstraint() - _elem905.read(iprot) - self.uniqueConstraints.append(_elem905) + (_etype910, _size907) = iprot.readListBegin() + for _i911 in xrange(_size907): + _elem912 = SQLUniqueConstraint() + _elem912.read(iprot) + self.uniqueConstraints.append(_elem912) iprot.readListEnd() else: iprot.skip(ftype) elif fid == 5: if ftype == TType.LIST: self.notNullConstraints = [] - (_etype909, _size906) = iprot.readListBegin() - for _i910 in xrange(_size906): - _elem911 = SQLNotNullConstraint() - _elem911.read(iprot) - self.notNullConstraints.append(_elem911) + (_etype916, _size913) = iprot.readListBegin() + for _i917 in xrange(_size913): + _elem918 = SQLNotNullConstraint() + _elem918.read(iprot) + self.notNullConstraints.append(_elem918) iprot.readListEnd() else: iprot.skip(ftype) elif fid == 6: if ftype == TType.LIST: self.defaultConstraints = [] - (_etype915, _size912) = iprot.readListBegin() - for _i916 in xrange(_size912): - _elem917 = SQLDefaultConstraint() - _elem917.read(iprot) - self.defaultConstraints.append(_elem917) + (_etype922, _size919) = iprot.readListBegin() + for _i923 in xrange(_size919): + _elem924 = SQLDefaultConstraint() + _elem924.read(iprot) + self.defaultConstraints.append(_elem924) iprot.readListEnd() else: iprot.skip(ftype) elif fid == 7: if ftype == TType.LIST: self.checkConstraints = [] - (_etype921, _size918) = iprot.readListBegin() - for _i922 in xrange(_size918): - _elem923 = SQLCheckConstraint() - _elem923.read(iprot) - self.checkConstraints.append(_elem923) + (_etype928, _size925) = iprot.readListBegin() + for _i929 in xrange(_size925): + _elem930 = SQLCheckConstraint() + _elem930.read(iprot) + self.checkConstraints.append(_elem930) iprot.readListEnd() else: iprot.skip(ftype) @@ -18327,43 +18395,43 @@ def write(self, oprot): if self.primaryKeys is not None: oprot.writeFieldBegin('primaryKeys', TType.LIST, 2) oprot.writeListBegin(TType.STRUCT, len(self.primaryKeys)) - for iter924 in self.primaryKeys: - iter924.write(oprot) + for iter931 in self.primaryKeys: + iter931.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() if self.foreignKeys is not None: oprot.writeFieldBegin('foreignKeys', TType.LIST, 3) oprot.writeListBegin(TType.STRUCT, len(self.foreignKeys)) - for iter925 in self.foreignKeys: - iter925.write(oprot) + for iter932 in self.foreignKeys: + iter932.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() if self.uniqueConstraints is not None: oprot.writeFieldBegin('uniqueConstraints', TType.LIST, 4) oprot.writeListBegin(TType.STRUCT, len(self.uniqueConstraints)) - for iter926 in self.uniqueConstraints: - iter926.write(oprot) + for iter933 in self.uniqueConstraints: + iter933.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() if self.notNullConstraints is not None: oprot.writeFieldBegin('notNullConstraints', TType.LIST, 5) oprot.writeListBegin(TType.STRUCT, len(self.notNullConstraints)) - for iter927 in self.notNullConstraints: - iter927.write(oprot) + for iter934 in self.notNullConstraints: + iter934.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() if self.defaultConstraints is not None: oprot.writeFieldBegin('defaultConstraints', TType.LIST, 6) oprot.writeListBegin(TType.STRUCT, len(self.defaultConstraints)) - for iter928 in self.defaultConstraints: - iter928.write(oprot) + for iter935 in self.defaultConstraints: + iter935.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() if self.checkConstraints is not None: oprot.writeFieldBegin('checkConstraints', TType.LIST, 7) oprot.writeListBegin(TType.STRUCT, len(self.checkConstraints)) - for iter929 in self.checkConstraints: - iter929.write(oprot) + for iter936 in self.checkConstraints: + iter936.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() oprot.writeFieldStop() @@ -19923,10 +19991,10 @@ def read(self, iprot): elif fid == 3: if ftype == TType.LIST: self.partNames = [] - (_etype933, _size930) = iprot.readListBegin() - for _i934 in xrange(_size930): - _elem935 = iprot.readString() - self.partNames.append(_elem935) + (_etype940, _size937) = iprot.readListBegin() + for _i941 in xrange(_size937): + _elem942 = iprot.readString() + self.partNames.append(_elem942) iprot.readListEnd() else: iprot.skip(ftype) @@ -19951,8 +20019,8 @@ def write(self, oprot): if self.partNames is not None: oprot.writeFieldBegin('partNames', TType.LIST, 3) oprot.writeListBegin(TType.STRING, len(self.partNames)) - for iter936 in self.partNames: - oprot.writeString(iter936) + for iter943 in self.partNames: + oprot.writeString(iter943) oprot.writeListEnd() oprot.writeFieldEnd() oprot.writeFieldStop() @@ -20152,10 +20220,10 @@ def read(self, iprot): if fid == 0: if ftype == TType.LIST: self.success = [] - (_etype940, _size937) = iprot.readListBegin() - for _i941 in xrange(_size937): - _elem942 = iprot.readString() - self.success.append(_elem942) + (_etype947, _size944) = iprot.readListBegin() + for _i948 in xrange(_size944): + _elem949 = iprot.readString() + self.success.append(_elem949) iprot.readListEnd() else: iprot.skip(ftype) @@ -20178,8 +20246,8 @@ def write(self, oprot): if self.success is not None: oprot.writeFieldBegin('success', TType.LIST, 0) oprot.writeListBegin(TType.STRING, len(self.success)) - for iter943 in self.success: - oprot.writeString(iter943) + for iter950 in self.success: + oprot.writeString(iter950) oprot.writeListEnd() oprot.writeFieldEnd() if self.o1 is not None: @@ -20329,10 +20397,10 @@ def read(self, iprot): if fid == 0: if ftype == TType.LIST: self.success = [] - (_etype947, _size944) = iprot.readListBegin() - for _i948 in xrange(_size944): - _elem949 = iprot.readString() - self.success.append(_elem949) + (_etype954, _size951) = iprot.readListBegin() + for _i955 in xrange(_size951): + _elem956 = iprot.readString() + self.success.append(_elem956) iprot.readListEnd() else: iprot.skip(ftype) @@ -20355,8 +20423,8 @@ def write(self, oprot): if self.success is not None: oprot.writeFieldBegin('success', TType.LIST, 0) oprot.writeListBegin(TType.STRING, len(self.success)) - for iter950 in self.success: - oprot.writeString(iter950) + for iter957 in self.success: + oprot.writeString(iter957) oprot.writeListEnd() oprot.writeFieldEnd() if self.o1 is not None: @@ -20480,10 +20548,10 @@ def read(self, iprot): if fid == 0: if ftype == TType.LIST: self.success = [] - (_etype954, _size951) = iprot.readListBegin() - for _i955 in xrange(_size951): - _elem956 = iprot.readString() - self.success.append(_elem956) + (_etype961, _size958) = iprot.readListBegin() + for _i962 in xrange(_size958): + _elem963 = iprot.readString() + self.success.append(_elem963) iprot.readListEnd() else: iprot.skip(ftype) @@ -20506,8 +20574,8 @@ def write(self, oprot): if self.success is not None: oprot.writeFieldBegin('success', TType.LIST, 0) oprot.writeListBegin(TType.STRING, len(self.success)) - for iter957 in self.success: - oprot.writeString(iter957) + for iter964 in self.success: + oprot.writeString(iter964) oprot.writeListEnd() oprot.writeFieldEnd() if self.o1 is not None: @@ -20580,10 +20648,10 @@ def read(self, iprot): elif fid == 3: if ftype == TType.LIST: self.tbl_types = [] - (_etype961, _size958) = iprot.readListBegin() - for _i962 in xrange(_size958): - _elem963 = iprot.readString() - self.tbl_types.append(_elem963) + (_etype968, _size965) = iprot.readListBegin() + for _i969 in xrange(_size965): + _elem970 = iprot.readString() + self.tbl_types.append(_elem970) iprot.readListEnd() else: iprot.skip(ftype) @@ -20608,8 +20676,8 @@ def write(self, oprot): if self.tbl_types is not None: oprot.writeFieldBegin('tbl_types', TType.LIST, 3) oprot.writeListBegin(TType.STRING, len(self.tbl_types)) - for iter964 in self.tbl_types: - oprot.writeString(iter964) + for iter971 in self.tbl_types: + oprot.writeString(iter971) oprot.writeListEnd() oprot.writeFieldEnd() oprot.writeFieldStop() @@ -20665,11 +20733,11 @@ def read(self, iprot): if fid == 0: if ftype == TType.LIST: self.success = [] - (_etype968, _size965) = iprot.readListBegin() - for _i969 in xrange(_size965): - _elem970 = TableMeta() - _elem970.read(iprot) - self.success.append(_elem970) + (_etype975, _size972) = iprot.readListBegin() + for _i976 in xrange(_size972): + _elem977 = TableMeta() + _elem977.read(iprot) + self.success.append(_elem977) iprot.readListEnd() else: iprot.skip(ftype) @@ -20692,8 +20760,8 @@ def write(self, oprot): if self.success is not None: oprot.writeFieldBegin('success', TType.LIST, 0) oprot.writeListBegin(TType.STRUCT, len(self.success)) - for iter971 in self.success: - iter971.write(oprot) + for iter978 in self.success: + iter978.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() if self.o1 is not None: @@ -20817,10 +20885,10 @@ def read(self, iprot): if fid == 0: if ftype == TType.LIST: self.success = [] - (_etype975, _size972) = iprot.readListBegin() - for _i976 in xrange(_size972): - _elem977 = iprot.readString() - self.success.append(_elem977) + (_etype982, _size979) = iprot.readListBegin() + for _i983 in xrange(_size979): + _elem984 = iprot.readString() + self.success.append(_elem984) iprot.readListEnd() else: iprot.skip(ftype) @@ -20843,8 +20911,8 @@ def write(self, oprot): if self.success is not None: oprot.writeFieldBegin('success', TType.LIST, 0) oprot.writeListBegin(TType.STRING, len(self.success)) - for iter978 in self.success: - oprot.writeString(iter978) + for iter985 in self.success: + oprot.writeString(iter985) oprot.writeListEnd() oprot.writeFieldEnd() if self.o1 is not None: @@ -21080,10 +21148,10 @@ def read(self, iprot): elif fid == 2: if ftype == TType.LIST: self.tbl_names = [] - (_etype982, _size979) = iprot.readListBegin() - for _i983 in xrange(_size979): - _elem984 = iprot.readString() - self.tbl_names.append(_elem984) + (_etype989, _size986) = iprot.readListBegin() + for _i990 in xrange(_size986): + _elem991 = iprot.readString() + self.tbl_names.append(_elem991) iprot.readListEnd() else: iprot.skip(ftype) @@ -21104,8 +21172,8 @@ def write(self, oprot): if self.tbl_names is not None: oprot.writeFieldBegin('tbl_names', TType.LIST, 2) oprot.writeListBegin(TType.STRING, len(self.tbl_names)) - for iter985 in self.tbl_names: - oprot.writeString(iter985) + for iter992 in self.tbl_names: + oprot.writeString(iter992) oprot.writeListEnd() oprot.writeFieldEnd() oprot.writeFieldStop() @@ -21157,11 +21225,11 @@ def read(self, iprot): if fid == 0: if ftype == TType.LIST: self.success = [] - (_etype989, _size986) = iprot.readListBegin() - for _i990 in xrange(_size986): - _elem991 = Table() - _elem991.read(iprot) - self.success.append(_elem991) + (_etype996, _size993) = iprot.readListBegin() + for _i997 in xrange(_size993): + _elem998 = Table() + _elem998.read(iprot) + self.success.append(_elem998) iprot.readListEnd() else: iprot.skip(ftype) @@ -21178,8 +21246,8 @@ def write(self, oprot): if self.success is not None: oprot.writeFieldBegin('success', TType.LIST, 0) oprot.writeListBegin(TType.STRUCT, len(self.success)) - for iter992 in self.success: - iter992.write(oprot) + for iter999 in self.success: + iter999.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() oprot.writeFieldStop() @@ -21571,10 +21639,10 @@ def read(self, iprot): elif fid == 2: if ftype == TType.LIST: self.tbl_names = [] - (_etype996, _size993) = iprot.readListBegin() - for _i997 in xrange(_size993): - _elem998 = iprot.readString() - self.tbl_names.append(_elem998) + (_etype1003, _size1000) = iprot.readListBegin() + for _i1004 in xrange(_size1000): + _elem1005 = iprot.readString() + self.tbl_names.append(_elem1005) iprot.readListEnd() else: iprot.skip(ftype) @@ -21595,8 +21663,8 @@ def write(self, oprot): if self.tbl_names is not None: oprot.writeFieldBegin('tbl_names', TType.LIST, 2) oprot.writeListBegin(TType.STRING, len(self.tbl_names)) - for iter999 in self.tbl_names: - oprot.writeString(iter999) + for iter1006 in self.tbl_names: + oprot.writeString(iter1006) oprot.writeListEnd() oprot.writeFieldEnd() oprot.writeFieldStop() @@ -21657,12 +21725,12 @@ def read(self, iprot): if fid == 0: if ftype == TType.MAP: self.success = {} - (_ktype1001, _vtype1002, _size1000 ) = iprot.readMapBegin() - for _i1004 in xrange(_size1000): - _key1005 = iprot.readString() - _val1006 = Materialization() - _val1006.read(iprot) - self.success[_key1005] = _val1006 + (_ktype1008, _vtype1009, _size1007 ) = iprot.readMapBegin() + for _i1011 in xrange(_size1007): + _key1012 = iprot.readString() + _val1013 = Materialization() + _val1013.read(iprot) + self.success[_key1012] = _val1013 iprot.readMapEnd() else: iprot.skip(ftype) @@ -21697,9 +21765,9 @@ def write(self, oprot): if self.success is not None: oprot.writeFieldBegin('success', TType.MAP, 0) oprot.writeMapBegin(TType.STRING, TType.STRUCT, len(self.success)) - for kiter1007,viter1008 in self.success.items(): - oprot.writeString(kiter1007) - viter1008.write(oprot) + for kiter1014,viter1015 in self.success.items(): + oprot.writeString(kiter1014) + viter1015.write(oprot) oprot.writeMapEnd() oprot.writeFieldEnd() if self.o1 is not None: @@ -22064,10 +22132,10 @@ def read(self, iprot): if fid == 0: if ftype == TType.LIST: self.success = [] - (_etype1012, _size1009) = iprot.readListBegin() - for _i1013 in xrange(_size1009): - _elem1014 = iprot.readString() - self.success.append(_elem1014) + (_etype1019, _size1016) = iprot.readListBegin() + for _i1020 in xrange(_size1016): + _elem1021 = iprot.readString() + self.success.append(_elem1021) iprot.readListEnd() else: iprot.skip(ftype) @@ -22102,8 +22170,8 @@ def write(self, oprot): if self.success is not None: oprot.writeFieldBegin('success', TType.LIST, 0) oprot.writeListBegin(TType.STRING, len(self.success)) - for iter1015 in self.success: - oprot.writeString(iter1015) + for iter1022 in self.success: + oprot.writeString(iter1022) oprot.writeListEnd() oprot.writeFieldEnd() if self.o1 is not None: @@ -23073,11 +23141,11 @@ def read(self, iprot): if fid == 1: if ftype == TType.LIST: self.new_parts = [] - (_etype1019, _size1016) = iprot.readListBegin() - for _i1020 in xrange(_size1016): - _elem1021 = Partition() - _elem1021.read(iprot) - self.new_parts.append(_elem1021) + (_etype1026, _size1023) = iprot.readListBegin() + for _i1027 in xrange(_size1023): + _elem1028 = Partition() + _elem1028.read(iprot) + self.new_parts.append(_elem1028) iprot.readListEnd() else: iprot.skip(ftype) @@ -23094,8 +23162,8 @@ def write(self, oprot): if self.new_parts is not None: oprot.writeFieldBegin('new_parts', TType.LIST, 1) oprot.writeListBegin(TType.STRUCT, len(self.new_parts)) - for iter1022 in self.new_parts: - iter1022.write(oprot) + for iter1029 in self.new_parts: + iter1029.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() oprot.writeFieldStop() @@ -23253,11 +23321,11 @@ def read(self, iprot): if fid == 1: if ftype == TType.LIST: self.new_parts = [] - (_etype1026, _size1023) = iprot.readListBegin() - for _i1027 in xrange(_size1023): - _elem1028 = PartitionSpec() - _elem1028.read(iprot) - self.new_parts.append(_elem1028) + (_etype1033, _size1030) = iprot.readListBegin() + for _i1034 in xrange(_size1030): + _elem1035 = PartitionSpec() + _elem1035.read(iprot) + self.new_parts.append(_elem1035) iprot.readListEnd() else: iprot.skip(ftype) @@ -23274,8 +23342,8 @@ def write(self, oprot): if self.new_parts is not None: oprot.writeFieldBegin('new_parts', TType.LIST, 1) oprot.writeListBegin(TType.STRUCT, len(self.new_parts)) - for iter1029 in self.new_parts: - iter1029.write(oprot) + for iter1036 in self.new_parts: + iter1036.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() oprot.writeFieldStop() @@ -23449,10 +23517,10 @@ def read(self, iprot): elif fid == 3: if ftype == TType.LIST: self.part_vals = [] - (_etype1033, _size1030) = iprot.readListBegin() - for _i1034 in xrange(_size1030): - _elem1035 = iprot.readString() - self.part_vals.append(_elem1035) + (_etype1040, _size1037) = iprot.readListBegin() + for _i1041 in xrange(_size1037): + _elem1042 = iprot.readString() + self.part_vals.append(_elem1042) iprot.readListEnd() else: iprot.skip(ftype) @@ -23477,8 +23545,8 @@ def write(self, oprot): if self.part_vals is not None: oprot.writeFieldBegin('part_vals', TType.LIST, 3) oprot.writeListBegin(TType.STRING, len(self.part_vals)) - for iter1036 in self.part_vals: - oprot.writeString(iter1036) + for iter1043 in self.part_vals: + oprot.writeString(iter1043) oprot.writeListEnd() oprot.writeFieldEnd() oprot.writeFieldStop() @@ -23831,10 +23899,10 @@ def read(self, iprot): elif fid == 3: if ftype == TType.LIST: self.part_vals = [] - (_etype1040, _size1037) = iprot.readListBegin() - for _i1041 in xrange(_size1037): - _elem1042 = iprot.readString() - self.part_vals.append(_elem1042) + (_etype1047, _size1044) = iprot.readListBegin() + for _i1048 in xrange(_size1044): + _elem1049 = iprot.readString() + self.part_vals.append(_elem1049) iprot.readListEnd() else: iprot.skip(ftype) @@ -23865,8 +23933,8 @@ def write(self, oprot): if self.part_vals is not None: oprot.writeFieldBegin('part_vals', TType.LIST, 3) oprot.writeListBegin(TType.STRING, len(self.part_vals)) - for iter1043 in self.part_vals: - oprot.writeString(iter1043) + for iter1050 in self.part_vals: + oprot.writeString(iter1050) oprot.writeListEnd() oprot.writeFieldEnd() if self.environment_context is not None: @@ -24461,10 +24529,10 @@ def read(self, iprot): elif fid == 3: if ftype == TType.LIST: self.part_vals = [] - (_etype1047, _size1044) = iprot.readListBegin() - for _i1048 in xrange(_size1044): - _elem1049 = iprot.readString() - self.part_vals.append(_elem1049) + (_etype1054, _size1051) = iprot.readListBegin() + for _i1055 in xrange(_size1051): + _elem1056 = iprot.readString() + self.part_vals.append(_elem1056) iprot.readListEnd() else: iprot.skip(ftype) @@ -24494,8 +24562,8 @@ def write(self, oprot): if self.part_vals is not None: oprot.writeFieldBegin('part_vals', TType.LIST, 3) oprot.writeListBegin(TType.STRING, len(self.part_vals)) - for iter1050 in self.part_vals: - oprot.writeString(iter1050) + for iter1057 in self.part_vals: + oprot.writeString(iter1057) oprot.writeListEnd() oprot.writeFieldEnd() if self.deleteData is not None: @@ -24668,10 +24736,10 @@ def read(self, iprot): elif fid == 3: if ftype == TType.LIST: self.part_vals = [] - (_etype1054, _size1051) = iprot.readListBegin() - for _i1055 in xrange(_size1051): - _elem1056 = iprot.readString() - self.part_vals.append(_elem1056) + (_etype1061, _size1058) = iprot.readListBegin() + for _i1062 in xrange(_size1058): + _elem1063 = iprot.readString() + self.part_vals.append(_elem1063) iprot.readListEnd() else: iprot.skip(ftype) @@ -24707,8 +24775,8 @@ def write(self, oprot): if self.part_vals is not None: oprot.writeFieldBegin('part_vals', TType.LIST, 3) oprot.writeListBegin(TType.STRING, len(self.part_vals)) - for iter1057 in self.part_vals: - oprot.writeString(iter1057) + for iter1064 in self.part_vals: + oprot.writeString(iter1064) oprot.writeListEnd() oprot.writeFieldEnd() if self.deleteData is not None: @@ -25445,10 +25513,10 @@ def read(self, iprot): elif fid == 3: if ftype == TType.LIST: self.part_vals = [] - (_etype1061, _size1058) = iprot.readListBegin() - for _i1062 in xrange(_size1058): - _elem1063 = iprot.readString() - self.part_vals.append(_elem1063) + (_etype1068, _size1065) = iprot.readListBegin() + for _i1069 in xrange(_size1065): + _elem1070 = iprot.readString() + self.part_vals.append(_elem1070) iprot.readListEnd() else: iprot.skip(ftype) @@ -25473,8 +25541,8 @@ def write(self, oprot): if self.part_vals is not None: oprot.writeFieldBegin('part_vals', TType.LIST, 3) oprot.writeListBegin(TType.STRING, len(self.part_vals)) - for iter1064 in self.part_vals: - oprot.writeString(iter1064) + for iter1071 in self.part_vals: + oprot.writeString(iter1071) oprot.writeListEnd() oprot.writeFieldEnd() oprot.writeFieldStop() @@ -25633,11 +25701,11 @@ def read(self, iprot): if fid == 1: if ftype == TType.MAP: self.partitionSpecs = {} - (_ktype1066, _vtype1067, _size1065 ) = iprot.readMapBegin() - for _i1069 in xrange(_size1065): - _key1070 = iprot.readString() - _val1071 = iprot.readString() - self.partitionSpecs[_key1070] = _val1071 + (_ktype1073, _vtype1074, _size1072 ) = iprot.readMapBegin() + for _i1076 in xrange(_size1072): + _key1077 = iprot.readString() + _val1078 = iprot.readString() + self.partitionSpecs[_key1077] = _val1078 iprot.readMapEnd() else: iprot.skip(ftype) @@ -25674,9 +25742,9 @@ def write(self, oprot): if self.partitionSpecs is not None: oprot.writeFieldBegin('partitionSpecs', TType.MAP, 1) oprot.writeMapBegin(TType.STRING, TType.STRING, len(self.partitionSpecs)) - for kiter1072,viter1073 in self.partitionSpecs.items(): - oprot.writeString(kiter1072) - oprot.writeString(viter1073) + for kiter1079,viter1080 in self.partitionSpecs.items(): + oprot.writeString(kiter1079) + oprot.writeString(viter1080) oprot.writeMapEnd() oprot.writeFieldEnd() if self.source_db is not None: @@ -25881,11 +25949,11 @@ def read(self, iprot): if fid == 1: if ftype == TType.MAP: self.partitionSpecs = {} - (_ktype1075, _vtype1076, _size1074 ) = iprot.readMapBegin() - for _i1078 in xrange(_size1074): - _key1079 = iprot.readString() - _val1080 = iprot.readString() - self.partitionSpecs[_key1079] = _val1080 + (_ktype1082, _vtype1083, _size1081 ) = iprot.readMapBegin() + for _i1085 in xrange(_size1081): + _key1086 = iprot.readString() + _val1087 = iprot.readString() + self.partitionSpecs[_key1086] = _val1087 iprot.readMapEnd() else: iprot.skip(ftype) @@ -25922,9 +25990,9 @@ def write(self, oprot): if self.partitionSpecs is not None: oprot.writeFieldBegin('partitionSpecs', TType.MAP, 1) oprot.writeMapBegin(TType.STRING, TType.STRING, len(self.partitionSpecs)) - for kiter1081,viter1082 in self.partitionSpecs.items(): - oprot.writeString(kiter1081) - oprot.writeString(viter1082) + for kiter1088,viter1089 in self.partitionSpecs.items(): + oprot.writeString(kiter1088) + oprot.writeString(viter1089) oprot.writeMapEnd() oprot.writeFieldEnd() if self.source_db is not None: @@ -26007,11 +26075,11 @@ def read(self, iprot): if fid == 0: if ftype == TType.LIST: self.success = [] - (_etype1086, _size1083) = iprot.readListBegin() - for _i1087 in xrange(_size1083): - _elem1088 = Partition() - _elem1088.read(iprot) - self.success.append(_elem1088) + (_etype1093, _size1090) = iprot.readListBegin() + for _i1094 in xrange(_size1090): + _elem1095 = Partition() + _elem1095.read(iprot) + self.success.append(_elem1095) iprot.readListEnd() else: iprot.skip(ftype) @@ -26052,8 +26120,8 @@ def write(self, oprot): if self.success is not None: oprot.writeFieldBegin('success', TType.LIST, 0) oprot.writeListBegin(TType.STRUCT, len(self.success)) - for iter1089 in self.success: - iter1089.write(oprot) + for iter1096 in self.success: + iter1096.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() if self.o1 is not None: @@ -26147,10 +26215,10 @@ def read(self, iprot): elif fid == 3: if ftype == TType.LIST: self.part_vals = [] - (_etype1093, _size1090) = iprot.readListBegin() - for _i1094 in xrange(_size1090): - _elem1095 = iprot.readString() - self.part_vals.append(_elem1095) + (_etype1100, _size1097) = iprot.readListBegin() + for _i1101 in xrange(_size1097): + _elem1102 = iprot.readString() + self.part_vals.append(_elem1102) iprot.readListEnd() else: iprot.skip(ftype) @@ -26162,10 +26230,10 @@ def read(self, iprot): elif fid == 5: if ftype == TType.LIST: self.group_names = [] - (_etype1099, _size1096) = iprot.readListBegin() - for _i1100 in xrange(_size1096): - _elem1101 = iprot.readString() - self.group_names.append(_elem1101) + (_etype1106, _size1103) = iprot.readListBegin() + for _i1107 in xrange(_size1103): + _elem1108 = iprot.readString() + self.group_names.append(_elem1108) iprot.readListEnd() else: iprot.skip(ftype) @@ -26190,8 +26258,8 @@ def write(self, oprot): if self.part_vals is not None: oprot.writeFieldBegin('part_vals', TType.LIST, 3) oprot.writeListBegin(TType.STRING, len(self.part_vals)) - for iter1102 in self.part_vals: - oprot.writeString(iter1102) + for iter1109 in self.part_vals: + oprot.writeString(iter1109) oprot.writeListEnd() oprot.writeFieldEnd() if self.user_name is not None: @@ -26201,8 +26269,8 @@ def write(self, oprot): if self.group_names is not None: oprot.writeFieldBegin('group_names', TType.LIST, 5) oprot.writeListBegin(TType.STRING, len(self.group_names)) - for iter1103 in self.group_names: - oprot.writeString(iter1103) + for iter1110 in self.group_names: + oprot.writeString(iter1110) oprot.writeListEnd() oprot.writeFieldEnd() oprot.writeFieldStop() @@ -26631,11 +26699,11 @@ def read(self, iprot): if fid == 0: if ftype == TType.LIST: self.success = [] - (_etype1107, _size1104) = iprot.readListBegin() - for _i1108 in xrange(_size1104): - _elem1109 = Partition() - _elem1109.read(iprot) - self.success.append(_elem1109) + (_etype1114, _size1111) = iprot.readListBegin() + for _i1115 in xrange(_size1111): + _elem1116 = Partition() + _elem1116.read(iprot) + self.success.append(_elem1116) iprot.readListEnd() else: iprot.skip(ftype) @@ -26664,8 +26732,8 @@ def write(self, oprot): if self.success is not None: oprot.writeFieldBegin('success', TType.LIST, 0) oprot.writeListBegin(TType.STRUCT, len(self.success)) - for iter1110 in self.success: - iter1110.write(oprot) + for iter1117 in self.success: + iter1117.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() if self.o1 is not None: @@ -26759,10 +26827,10 @@ def read(self, iprot): elif fid == 5: if ftype == TType.LIST: self.group_names = [] - (_etype1114, _size1111) = iprot.readListBegin() - for _i1115 in xrange(_size1111): - _elem1116 = iprot.readString() - self.group_names.append(_elem1116) + (_etype1121, _size1118) = iprot.readListBegin() + for _i1122 in xrange(_size1118): + _elem1123 = iprot.readString() + self.group_names.append(_elem1123) iprot.readListEnd() else: iprot.skip(ftype) @@ -26795,8 +26863,8 @@ def write(self, oprot): if self.group_names is not None: oprot.writeFieldBegin('group_names', TType.LIST, 5) oprot.writeListBegin(TType.STRING, len(self.group_names)) - for iter1117 in self.group_names: - oprot.writeString(iter1117) + for iter1124 in self.group_names: + oprot.writeString(iter1124) oprot.writeListEnd() oprot.writeFieldEnd() oprot.writeFieldStop() @@ -26857,11 +26925,11 @@ def read(self, iprot): if fid == 0: if ftype == TType.LIST: self.success = [] - (_etype1121, _size1118) = iprot.readListBegin() - for _i1122 in xrange(_size1118): - _elem1123 = Partition() - _elem1123.read(iprot) - self.success.append(_elem1123) + (_etype1128, _size1125) = iprot.readListBegin() + for _i1129 in xrange(_size1125): + _elem1130 = Partition() + _elem1130.read(iprot) + self.success.append(_elem1130) iprot.readListEnd() else: iprot.skip(ftype) @@ -26890,8 +26958,8 @@ def write(self, oprot): if self.success is not None: oprot.writeFieldBegin('success', TType.LIST, 0) oprot.writeListBegin(TType.STRUCT, len(self.success)) - for iter1124 in self.success: - iter1124.write(oprot) + for iter1131 in self.success: + iter1131.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() if self.o1 is not None: @@ -27049,11 +27117,11 @@ def read(self, iprot): if fid == 0: if ftype == TType.LIST: self.success = [] - (_etype1128, _size1125) = iprot.readListBegin() - for _i1129 in xrange(_size1125): - _elem1130 = PartitionSpec() - _elem1130.read(iprot) - self.success.append(_elem1130) + (_etype1135, _size1132) = iprot.readListBegin() + for _i1136 in xrange(_size1132): + _elem1137 = PartitionSpec() + _elem1137.read(iprot) + self.success.append(_elem1137) iprot.readListEnd() else: iprot.skip(ftype) @@ -27082,8 +27150,8 @@ def write(self, oprot): if self.success is not None: oprot.writeFieldBegin('success', TType.LIST, 0) oprot.writeListBegin(TType.STRUCT, len(self.success)) - for iter1131 in self.success: - iter1131.write(oprot) + for iter1138 in self.success: + iter1138.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() if self.o1 is not None: @@ -27241,10 +27309,10 @@ def read(self, iprot): if fid == 0: if ftype == TType.LIST: self.success = [] - (_etype1135, _size1132) = iprot.readListBegin() - for _i1136 in xrange(_size1132): - _elem1137 = iprot.readString() - self.success.append(_elem1137) + (_etype1142, _size1139) = iprot.readListBegin() + for _i1143 in xrange(_size1139): + _elem1144 = iprot.readString() + self.success.append(_elem1144) iprot.readListEnd() else: iprot.skip(ftype) @@ -27273,8 +27341,8 @@ def write(self, oprot): if self.success is not None: oprot.writeFieldBegin('success', TType.LIST, 0) oprot.writeListBegin(TType.STRING, len(self.success)) - for iter1138 in self.success: - oprot.writeString(iter1138) + for iter1145 in self.success: + oprot.writeString(iter1145) oprot.writeListEnd() oprot.writeFieldEnd() if self.o1 is not None: @@ -27514,10 +27582,10 @@ def read(self, iprot): elif fid == 3: if ftype == TType.LIST: self.part_vals = [] - (_etype1142, _size1139) = iprot.readListBegin() - for _i1143 in xrange(_size1139): - _elem1144 = iprot.readString() - self.part_vals.append(_elem1144) + (_etype1149, _size1146) = iprot.readListBegin() + for _i1150 in xrange(_size1146): + _elem1151 = iprot.readString() + self.part_vals.append(_elem1151) iprot.readListEnd() else: iprot.skip(ftype) @@ -27547,8 +27615,8 @@ def write(self, oprot): if self.part_vals is not None: oprot.writeFieldBegin('part_vals', TType.LIST, 3) oprot.writeListBegin(TType.STRING, len(self.part_vals)) - for iter1145 in self.part_vals: - oprot.writeString(iter1145) + for iter1152 in self.part_vals: + oprot.writeString(iter1152) oprot.writeListEnd() oprot.writeFieldEnd() if self.max_parts is not None: @@ -27612,11 +27680,11 @@ def read(self, iprot): if fid == 0: if ftype == TType.LIST: self.success = [] - (_etype1149, _size1146) = iprot.readListBegin() - for _i1150 in xrange(_size1146): - _elem1151 = Partition() - _elem1151.read(iprot) - self.success.append(_elem1151) + (_etype1156, _size1153) = iprot.readListBegin() + for _i1157 in xrange(_size1153): + _elem1158 = Partition() + _elem1158.read(iprot) + self.success.append(_elem1158) iprot.readListEnd() else: iprot.skip(ftype) @@ -27645,8 +27713,8 @@ def write(self, oprot): if self.success is not None: oprot.writeFieldBegin('success', TType.LIST, 0) oprot.writeListBegin(TType.STRUCT, len(self.success)) - for iter1152 in self.success: - iter1152.write(oprot) + for iter1159 in self.success: + iter1159.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() if self.o1 is not None: @@ -27733,10 +27801,10 @@ def read(self, iprot): elif fid == 3: if ftype == TType.LIST: self.part_vals = [] - (_etype1156, _size1153) = iprot.readListBegin() - for _i1157 in xrange(_size1153): - _elem1158 = iprot.readString() - self.part_vals.append(_elem1158) + (_etype1163, _size1160) = iprot.readListBegin() + for _i1164 in xrange(_size1160): + _elem1165 = iprot.readString() + self.part_vals.append(_elem1165) iprot.readListEnd() else: iprot.skip(ftype) @@ -27753,10 +27821,10 @@ def read(self, iprot): elif fid == 6: if ftype == TType.LIST: self.group_names = [] - (_etype1162, _size1159) = iprot.readListBegin() - for _i1163 in xrange(_size1159): - _elem1164 = iprot.readString() - self.group_names.append(_elem1164) + (_etype1169, _size1166) = iprot.readListBegin() + for _i1170 in xrange(_size1166): + _elem1171 = iprot.readString() + self.group_names.append(_elem1171) iprot.readListEnd() else: iprot.skip(ftype) @@ -27781,8 +27849,8 @@ def write(self, oprot): if self.part_vals is not None: oprot.writeFieldBegin('part_vals', TType.LIST, 3) oprot.writeListBegin(TType.STRING, len(self.part_vals)) - for iter1165 in self.part_vals: - oprot.writeString(iter1165) + for iter1172 in self.part_vals: + oprot.writeString(iter1172) oprot.writeListEnd() oprot.writeFieldEnd() if self.max_parts is not None: @@ -27796,8 +27864,8 @@ def write(self, oprot): if self.group_names is not None: oprot.writeFieldBegin('group_names', TType.LIST, 6) oprot.writeListBegin(TType.STRING, len(self.group_names)) - for iter1166 in self.group_names: - oprot.writeString(iter1166) + for iter1173 in self.group_names: + oprot.writeString(iter1173) oprot.writeListEnd() oprot.writeFieldEnd() oprot.writeFieldStop() @@ -27859,11 +27927,11 @@ def read(self, iprot): if fid == 0: if ftype == TType.LIST: self.success = [] - (_etype1170, _size1167) = iprot.readListBegin() - for _i1171 in xrange(_size1167): - _elem1172 = Partition() - _elem1172.read(iprot) - self.success.append(_elem1172) + (_etype1177, _size1174) = iprot.readListBegin() + for _i1178 in xrange(_size1174): + _elem1179 = Partition() + _elem1179.read(iprot) + self.success.append(_elem1179) iprot.readListEnd() else: iprot.skip(ftype) @@ -27892,8 +27960,8 @@ def write(self, oprot): if self.success is not None: oprot.writeFieldBegin('success', TType.LIST, 0) oprot.writeListBegin(TType.STRUCT, len(self.success)) - for iter1173 in self.success: - iter1173.write(oprot) + for iter1180 in self.success: + iter1180.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() if self.o1 is not None: @@ -27974,10 +28042,10 @@ def read(self, iprot): elif fid == 3: if ftype == TType.LIST: self.part_vals = [] - (_etype1177, _size1174) = iprot.readListBegin() - for _i1178 in xrange(_size1174): - _elem1179 = iprot.readString() - self.part_vals.append(_elem1179) + (_etype1184, _size1181) = iprot.readListBegin() + for _i1185 in xrange(_size1181): + _elem1186 = iprot.readString() + self.part_vals.append(_elem1186) iprot.readListEnd() else: iprot.skip(ftype) @@ -28007,8 +28075,8 @@ def write(self, oprot): if self.part_vals is not None: oprot.writeFieldBegin('part_vals', TType.LIST, 3) oprot.writeListBegin(TType.STRING, len(self.part_vals)) - for iter1180 in self.part_vals: - oprot.writeString(iter1180) + for iter1187 in self.part_vals: + oprot.writeString(iter1187) oprot.writeListEnd() oprot.writeFieldEnd() if self.max_parts is not None: @@ -28072,10 +28140,10 @@ def read(self, iprot): if fid == 0: if ftype == TType.LIST: self.success = [] - (_etype1184, _size1181) = iprot.readListBegin() - for _i1185 in xrange(_size1181): - _elem1186 = iprot.readString() - self.success.append(_elem1186) + (_etype1191, _size1188) = iprot.readListBegin() + for _i1192 in xrange(_size1188): + _elem1193 = iprot.readString() + self.success.append(_elem1193) iprot.readListEnd() else: iprot.skip(ftype) @@ -28104,8 +28172,8 @@ def write(self, oprot): if self.success is not None: oprot.writeFieldBegin('success', TType.LIST, 0) oprot.writeListBegin(TType.STRING, len(self.success)) - for iter1187 in self.success: - oprot.writeString(iter1187) + for iter1194 in self.success: + oprot.writeString(iter1194) oprot.writeListEnd() oprot.writeFieldEnd() if self.o1 is not None: @@ -28276,11 +28344,11 @@ def read(self, iprot): if fid == 0: if ftype == TType.LIST: self.success = [] - (_etype1191, _size1188) = iprot.readListBegin() - for _i1192 in xrange(_size1188): - _elem1193 = Partition() - _elem1193.read(iprot) - self.success.append(_elem1193) + (_etype1198, _size1195) = iprot.readListBegin() + for _i1199 in xrange(_size1195): + _elem1200 = Partition() + _elem1200.read(iprot) + self.success.append(_elem1200) iprot.readListEnd() else: iprot.skip(ftype) @@ -28309,8 +28377,8 @@ def write(self, oprot): if self.success is not None: oprot.writeFieldBegin('success', TType.LIST, 0) oprot.writeListBegin(TType.STRUCT, len(self.success)) - for iter1194 in self.success: - iter1194.write(oprot) + for iter1201 in self.success: + iter1201.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() if self.o1 is not None: @@ -28481,11 +28549,11 @@ def read(self, iprot): if fid == 0: if ftype == TType.LIST: self.success = [] - (_etype1198, _size1195) = iprot.readListBegin() - for _i1199 in xrange(_size1195): - _elem1200 = PartitionSpec() - _elem1200.read(iprot) - self.success.append(_elem1200) + (_etype1205, _size1202) = iprot.readListBegin() + for _i1206 in xrange(_size1202): + _elem1207 = PartitionSpec() + _elem1207.read(iprot) + self.success.append(_elem1207) iprot.readListEnd() else: iprot.skip(ftype) @@ -28514,8 +28582,8 @@ def write(self, oprot): if self.success is not None: oprot.writeFieldBegin('success', TType.LIST, 0) oprot.writeListBegin(TType.STRUCT, len(self.success)) - for iter1201 in self.success: - iter1201.write(oprot) + for iter1208 in self.success: + iter1208.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() if self.o1 is not None: @@ -28935,10 +29003,10 @@ def read(self, iprot): elif fid == 3: if ftype == TType.LIST: self.names = [] - (_etype1205, _size1202) = iprot.readListBegin() - for _i1206 in xrange(_size1202): - _elem1207 = iprot.readString() - self.names.append(_elem1207) + (_etype1212, _size1209) = iprot.readListBegin() + for _i1213 in xrange(_size1209): + _elem1214 = iprot.readString() + self.names.append(_elem1214) iprot.readListEnd() else: iprot.skip(ftype) @@ -28963,8 +29031,8 @@ def write(self, oprot): if self.names is not None: oprot.writeFieldBegin('names', TType.LIST, 3) oprot.writeListBegin(TType.STRING, len(self.names)) - for iter1208 in self.names: - oprot.writeString(iter1208) + for iter1215 in self.names: + oprot.writeString(iter1215) oprot.writeListEnd() oprot.writeFieldEnd() oprot.writeFieldStop() @@ -29023,11 +29091,11 @@ def read(self, iprot): if fid == 0: if ftype == TType.LIST: self.success = [] - (_etype1212, _size1209) = iprot.readListBegin() - for _i1213 in xrange(_size1209): - _elem1214 = Partition() - _elem1214.read(iprot) - self.success.append(_elem1214) + (_etype1219, _size1216) = iprot.readListBegin() + for _i1220 in xrange(_size1216): + _elem1221 = Partition() + _elem1221.read(iprot) + self.success.append(_elem1221) iprot.readListEnd() else: iprot.skip(ftype) @@ -29056,8 +29124,8 @@ def write(self, oprot): if self.success is not None: oprot.writeFieldBegin('success', TType.LIST, 0) oprot.writeListBegin(TType.STRUCT, len(self.success)) - for iter1215 in self.success: - iter1215.write(oprot) + for iter1222 in self.success: + iter1222.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() if self.o1 is not None: @@ -29307,11 +29375,11 @@ def read(self, iprot): elif fid == 3: if ftype == TType.LIST: self.new_parts = [] - (_etype1219, _size1216) = iprot.readListBegin() - for _i1220 in xrange(_size1216): - _elem1221 = Partition() - _elem1221.read(iprot) - self.new_parts.append(_elem1221) + (_etype1226, _size1223) = iprot.readListBegin() + for _i1227 in xrange(_size1223): + _elem1228 = Partition() + _elem1228.read(iprot) + self.new_parts.append(_elem1228) iprot.readListEnd() else: iprot.skip(ftype) @@ -29336,8 +29404,8 @@ def write(self, oprot): if self.new_parts is not None: oprot.writeFieldBegin('new_parts', TType.LIST, 3) oprot.writeListBegin(TType.STRUCT, len(self.new_parts)) - for iter1222 in self.new_parts: - iter1222.write(oprot) + for iter1229 in self.new_parts: + iter1229.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() oprot.writeFieldStop() @@ -29490,11 +29558,11 @@ def read(self, iprot): elif fid == 3: if ftype == TType.LIST: self.new_parts = [] - (_etype1226, _size1223) = iprot.readListBegin() - for _i1227 in xrange(_size1223): - _elem1228 = Partition() - _elem1228.read(iprot) - self.new_parts.append(_elem1228) + (_etype1233, _size1230) = iprot.readListBegin() + for _i1234 in xrange(_size1230): + _elem1235 = Partition() + _elem1235.read(iprot) + self.new_parts.append(_elem1235) iprot.readListEnd() else: iprot.skip(ftype) @@ -29525,8 +29593,8 @@ def write(self, oprot): if self.new_parts is not None: oprot.writeFieldBegin('new_parts', TType.LIST, 3) oprot.writeListBegin(TType.STRUCT, len(self.new_parts)) - for iter1229 in self.new_parts: - iter1229.write(oprot) + for iter1236 in self.new_parts: + iter1236.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() if self.environment_context is not None: @@ -29639,6 +29707,165 @@ def __eq__(self, other): def __ne__(self, other): return not (self == other) +class alter_partitions_with_environment_context_req_args: + """ + Attributes: + - req + """ + + thrift_spec = ( + None, # 0 + (1, TType.STRUCT, 'req', (AlterPartitionsRequest, AlterPartitionsRequest.thrift_spec), None, ), # 1 + ) + + def __init__(self, req=None,): + self.req = req + + def read(self, iprot): + if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: + fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec)) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 1: + if ftype == TType.STRUCT: + self.req = AlterPartitionsRequest() + self.req.read(iprot) + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None: + oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))) + return + oprot.writeStructBegin('alter_partitions_with_environment_context_req_args') + if self.req is not None: + oprot.writeFieldBegin('req', TType.STRUCT, 1) + self.req.write(oprot) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + + def __hash__(self): + value = 17 + value = (value * 31) ^ hash(self.req) + return value + + def __repr__(self): + L = ['%s=%r' % (key, value) + for key, value in self.__dict__.iteritems()] + return '%s(%s)' % (self.__class__.__name__, ', '.join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + +class alter_partitions_with_environment_context_req_result: + """ + Attributes: + - success + - o1 + - o2 + """ + + thrift_spec = ( + (0, TType.STRUCT, 'success', (AlterPartitionsResponse, AlterPartitionsResponse.thrift_spec), None, ), # 0 + (1, TType.STRUCT, 'o1', (InvalidOperationException, InvalidOperationException.thrift_spec), None, ), # 1 + (2, TType.STRUCT, 'o2', (MetaException, MetaException.thrift_spec), None, ), # 2 + ) + + def __init__(self, success=None, o1=None, o2=None,): + self.success = success + self.o1 = o1 + self.o2 = o2 + + def read(self, iprot): + if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: + fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec)) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 0: + if ftype == TType.STRUCT: + self.success = AlterPartitionsResponse() + self.success.read(iprot) + else: + iprot.skip(ftype) + elif fid == 1: + if ftype == TType.STRUCT: + self.o1 = InvalidOperationException() + self.o1.read(iprot) + else: + iprot.skip(ftype) + elif fid == 2: + if ftype == TType.STRUCT: + self.o2 = MetaException() + self.o2.read(iprot) + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None: + oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))) + return + oprot.writeStructBegin('alter_partitions_with_environment_context_req_result') + if self.success is not None: + oprot.writeFieldBegin('success', TType.STRUCT, 0) + self.success.write(oprot) + oprot.writeFieldEnd() + if self.o1 is not None: + oprot.writeFieldBegin('o1', TType.STRUCT, 1) + self.o1.write(oprot) + oprot.writeFieldEnd() + if self.o2 is not None: + oprot.writeFieldBegin('o2', TType.STRUCT, 2) + self.o2.write(oprot) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + + def __hash__(self): + value = 17 + value = (value * 31) ^ hash(self.success) + value = (value * 31) ^ hash(self.o1) + value = (value * 31) ^ hash(self.o2) + return value + + def __repr__(self): + L = ['%s=%r' % (key, value) + for key, value in self.__dict__.iteritems()] + return '%s(%s)' % (self.__class__.__name__, ', '.join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + class alter_partition_with_environment_context_args: """ Attributes: @@ -29870,10 +30097,10 @@ def read(self, iprot): elif fid == 3: if ftype == TType.LIST: self.part_vals = [] - (_etype1233, _size1230) = iprot.readListBegin() - for _i1234 in xrange(_size1230): - _elem1235 = iprot.readString() - self.part_vals.append(_elem1235) + (_etype1240, _size1237) = iprot.readListBegin() + for _i1241 in xrange(_size1237): + _elem1242 = iprot.readString() + self.part_vals.append(_elem1242) iprot.readListEnd() else: iprot.skip(ftype) @@ -29904,8 +30131,8 @@ def write(self, oprot): if self.part_vals is not None: oprot.writeFieldBegin('part_vals', TType.LIST, 3) oprot.writeListBegin(TType.STRING, len(self.part_vals)) - for iter1236 in self.part_vals: - oprot.writeString(iter1236) + for iter1243 in self.part_vals: + oprot.writeString(iter1243) oprot.writeListEnd() oprot.writeFieldEnd() if self.new_part is not None: @@ -30047,10 +30274,10 @@ def read(self, iprot): if fid == 1: if ftype == TType.LIST: self.part_vals = [] - (_etype1240, _size1237) = iprot.readListBegin() - for _i1241 in xrange(_size1237): - _elem1242 = iprot.readString() - self.part_vals.append(_elem1242) + (_etype1247, _size1244) = iprot.readListBegin() + for _i1248 in xrange(_size1244): + _elem1249 = iprot.readString() + self.part_vals.append(_elem1249) iprot.readListEnd() else: iprot.skip(ftype) @@ -30072,8 +30299,8 @@ def write(self, oprot): if self.part_vals is not None: oprot.writeFieldBegin('part_vals', TType.LIST, 1) oprot.writeListBegin(TType.STRING, len(self.part_vals)) - for iter1243 in self.part_vals: - oprot.writeString(iter1243) + for iter1250 in self.part_vals: + oprot.writeString(iter1250) oprot.writeListEnd() oprot.writeFieldEnd() if self.throw_exception is not None: @@ -30431,10 +30658,10 @@ def read(self, iprot): if fid == 0: if ftype == TType.LIST: self.success = [] - (_etype1247, _size1244) = iprot.readListBegin() - for _i1248 in xrange(_size1244): - _elem1249 = iprot.readString() - self.success.append(_elem1249) + (_etype1254, _size1251) = iprot.readListBegin() + for _i1255 in xrange(_size1251): + _elem1256 = iprot.readString() + self.success.append(_elem1256) iprot.readListEnd() else: iprot.skip(ftype) @@ -30457,8 +30684,8 @@ def write(self, oprot): if self.success is not None: oprot.writeFieldBegin('success', TType.LIST, 0) oprot.writeListBegin(TType.STRING, len(self.success)) - for iter1250 in self.success: - oprot.writeString(iter1250) + for iter1257 in self.success: + oprot.writeString(iter1257) oprot.writeListEnd() oprot.writeFieldEnd() if self.o1 is not None: @@ -30582,11 +30809,11 @@ def read(self, iprot): if fid == 0: if ftype == TType.MAP: self.success = {} - (_ktype1252, _vtype1253, _size1251 ) = iprot.readMapBegin() - for _i1255 in xrange(_size1251): - _key1256 = iprot.readString() - _val1257 = iprot.readString() - self.success[_key1256] = _val1257 + (_ktype1259, _vtype1260, _size1258 ) = iprot.readMapBegin() + for _i1262 in xrange(_size1258): + _key1263 = iprot.readString() + _val1264 = iprot.readString() + self.success[_key1263] = _val1264 iprot.readMapEnd() else: iprot.skip(ftype) @@ -30609,9 +30836,9 @@ def write(self, oprot): if self.success is not None: oprot.writeFieldBegin('success', TType.MAP, 0) oprot.writeMapBegin(TType.STRING, TType.STRING, len(self.success)) - for kiter1258,viter1259 in self.success.items(): - oprot.writeString(kiter1258) - oprot.writeString(viter1259) + for kiter1265,viter1266 in self.success.items(): + oprot.writeString(kiter1265) + oprot.writeString(viter1266) oprot.writeMapEnd() oprot.writeFieldEnd() if self.o1 is not None: @@ -30687,11 +30914,11 @@ def read(self, iprot): elif fid == 3: if ftype == TType.MAP: self.part_vals = {} - (_ktype1261, _vtype1262, _size1260 ) = iprot.readMapBegin() - for _i1264 in xrange(_size1260): - _key1265 = iprot.readString() - _val1266 = iprot.readString() - self.part_vals[_key1265] = _val1266 + (_ktype1268, _vtype1269, _size1267 ) = iprot.readMapBegin() + for _i1271 in xrange(_size1267): + _key1272 = iprot.readString() + _val1273 = iprot.readString() + self.part_vals[_key1272] = _val1273 iprot.readMapEnd() else: iprot.skip(ftype) @@ -30721,9 +30948,9 @@ def write(self, oprot): if self.part_vals is not None: oprot.writeFieldBegin('part_vals', TType.MAP, 3) oprot.writeMapBegin(TType.STRING, TType.STRING, len(self.part_vals)) - for kiter1267,viter1268 in self.part_vals.items(): - oprot.writeString(kiter1267) - oprot.writeString(viter1268) + for kiter1274,viter1275 in self.part_vals.items(): + oprot.writeString(kiter1274) + oprot.writeString(viter1275) oprot.writeMapEnd() oprot.writeFieldEnd() if self.eventType is not None: @@ -30937,11 +31164,11 @@ def read(self, iprot): elif fid == 3: if ftype == TType.MAP: self.part_vals = {} - (_ktype1270, _vtype1271, _size1269 ) = iprot.readMapBegin() - for _i1273 in xrange(_size1269): - _key1274 = iprot.readString() - _val1275 = iprot.readString() - self.part_vals[_key1274] = _val1275 + (_ktype1277, _vtype1278, _size1276 ) = iprot.readMapBegin() + for _i1280 in xrange(_size1276): + _key1281 = iprot.readString() + _val1282 = iprot.readString() + self.part_vals[_key1281] = _val1282 iprot.readMapEnd() else: iprot.skip(ftype) @@ -30971,9 +31198,9 @@ def write(self, oprot): if self.part_vals is not None: oprot.writeFieldBegin('part_vals', TType.MAP, 3) oprot.writeMapBegin(TType.STRING, TType.STRING, len(self.part_vals)) - for kiter1276,viter1277 in self.part_vals.items(): - oprot.writeString(kiter1276) - oprot.writeString(viter1277) + for kiter1283,viter1284 in self.part_vals.items(): + oprot.writeString(kiter1283) + oprot.writeString(viter1284) oprot.writeMapEnd() oprot.writeFieldEnd() if self.eventType is not None: @@ -34625,10 +34852,10 @@ def read(self, iprot): if fid == 0: if ftype == TType.LIST: self.success = [] - (_etype1281, _size1278) = iprot.readListBegin() - for _i1282 in xrange(_size1278): - _elem1283 = iprot.readString() - self.success.append(_elem1283) + (_etype1288, _size1285) = iprot.readListBegin() + for _i1289 in xrange(_size1285): + _elem1290 = iprot.readString() + self.success.append(_elem1290) iprot.readListEnd() else: iprot.skip(ftype) @@ -34651,8 +34878,8 @@ def write(self, oprot): if self.success is not None: oprot.writeFieldBegin('success', TType.LIST, 0) oprot.writeListBegin(TType.STRING, len(self.success)) - for iter1284 in self.success: - oprot.writeString(iter1284) + for iter1291 in self.success: + oprot.writeString(iter1291) oprot.writeListEnd() oprot.writeFieldEnd() if self.o1 is not None: @@ -35340,10 +35567,10 @@ def read(self, iprot): if fid == 0: if ftype == TType.LIST: self.success = [] - (_etype1288, _size1285) = iprot.readListBegin() - for _i1289 in xrange(_size1285): - _elem1290 = iprot.readString() - self.success.append(_elem1290) + (_etype1295, _size1292) = iprot.readListBegin() + for _i1296 in xrange(_size1292): + _elem1297 = iprot.readString() + self.success.append(_elem1297) iprot.readListEnd() else: iprot.skip(ftype) @@ -35366,8 +35593,8 @@ def write(self, oprot): if self.success is not None: oprot.writeFieldBegin('success', TType.LIST, 0) oprot.writeListBegin(TType.STRING, len(self.success)) - for iter1291 in self.success: - oprot.writeString(iter1291) + for iter1298 in self.success: + oprot.writeString(iter1298) oprot.writeListEnd() oprot.writeFieldEnd() if self.o1 is not None: @@ -35881,11 +36108,11 @@ def read(self, iprot): if fid == 0: if ftype == TType.LIST: self.success = [] - (_etype1295, _size1292) = iprot.readListBegin() - for _i1296 in xrange(_size1292): - _elem1297 = Role() - _elem1297.read(iprot) - self.success.append(_elem1297) + (_etype1302, _size1299) = iprot.readListBegin() + for _i1303 in xrange(_size1299): + _elem1304 = Role() + _elem1304.read(iprot) + self.success.append(_elem1304) iprot.readListEnd() else: iprot.skip(ftype) @@ -35908,8 +36135,8 @@ def write(self, oprot): if self.success is not None: oprot.writeFieldBegin('success', TType.LIST, 0) oprot.writeListBegin(TType.STRUCT, len(self.success)) - for iter1298 in self.success: - iter1298.write(oprot) + for iter1305 in self.success: + iter1305.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() if self.o1 is not None: @@ -36418,10 +36645,10 @@ def read(self, iprot): elif fid == 3: if ftype == TType.LIST: self.group_names = [] - (_etype1302, _size1299) = iprot.readListBegin() - for _i1303 in xrange(_size1299): - _elem1304 = iprot.readString() - self.group_names.append(_elem1304) + (_etype1309, _size1306) = iprot.readListBegin() + for _i1310 in xrange(_size1306): + _elem1311 = iprot.readString() + self.group_names.append(_elem1311) iprot.readListEnd() else: iprot.skip(ftype) @@ -36446,8 +36673,8 @@ def write(self, oprot): if self.group_names is not None: oprot.writeFieldBegin('group_names', TType.LIST, 3) oprot.writeListBegin(TType.STRING, len(self.group_names)) - for iter1305 in self.group_names: - oprot.writeString(iter1305) + for iter1312 in self.group_names: + oprot.writeString(iter1312) oprot.writeListEnd() oprot.writeFieldEnd() oprot.writeFieldStop() @@ -36674,11 +36901,11 @@ def read(self, iprot): if fid == 0: if ftype == TType.LIST: self.success = [] - (_etype1309, _size1306) = iprot.readListBegin() - for _i1310 in xrange(_size1306): - _elem1311 = HiveObjectPrivilege() - _elem1311.read(iprot) - self.success.append(_elem1311) + (_etype1316, _size1313) = iprot.readListBegin() + for _i1317 in xrange(_size1313): + _elem1318 = HiveObjectPrivilege() + _elem1318.read(iprot) + self.success.append(_elem1318) iprot.readListEnd() else: iprot.skip(ftype) @@ -36701,8 +36928,8 @@ def write(self, oprot): if self.success is not None: oprot.writeFieldBegin('success', TType.LIST, 0) oprot.writeListBegin(TType.STRUCT, len(self.success)) - for iter1312 in self.success: - iter1312.write(oprot) + for iter1319 in self.success: + iter1319.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() if self.o1 is not None: @@ -37372,10 +37599,10 @@ def read(self, iprot): elif fid == 2: if ftype == TType.LIST: self.group_names = [] - (_etype1316, _size1313) = iprot.readListBegin() - for _i1317 in xrange(_size1313): - _elem1318 = iprot.readString() - self.group_names.append(_elem1318) + (_etype1323, _size1320) = iprot.readListBegin() + for _i1324 in xrange(_size1320): + _elem1325 = iprot.readString() + self.group_names.append(_elem1325) iprot.readListEnd() else: iprot.skip(ftype) @@ -37396,8 +37623,8 @@ def write(self, oprot): if self.group_names is not None: oprot.writeFieldBegin('group_names', TType.LIST, 2) oprot.writeListBegin(TType.STRING, len(self.group_names)) - for iter1319 in self.group_names: - oprot.writeString(iter1319) + for iter1326 in self.group_names: + oprot.writeString(iter1326) oprot.writeListEnd() oprot.writeFieldEnd() oprot.writeFieldStop() @@ -37452,10 +37679,10 @@ def read(self, iprot): if fid == 0: if ftype == TType.LIST: self.success = [] - (_etype1323, _size1320) = iprot.readListBegin() - for _i1324 in xrange(_size1320): - _elem1325 = iprot.readString() - self.success.append(_elem1325) + (_etype1330, _size1327) = iprot.readListBegin() + for _i1331 in xrange(_size1327): + _elem1332 = iprot.readString() + self.success.append(_elem1332) iprot.readListEnd() else: iprot.skip(ftype) @@ -37478,8 +37705,8 @@ def write(self, oprot): if self.success is not None: oprot.writeFieldBegin('success', TType.LIST, 0) oprot.writeListBegin(TType.STRING, len(self.success)) - for iter1326 in self.success: - oprot.writeString(iter1326) + for iter1333 in self.success: + oprot.writeString(iter1333) oprot.writeListEnd() oprot.writeFieldEnd() if self.o1 is not None: @@ -38411,10 +38638,10 @@ def read(self, iprot): if fid == 0: if ftype == TType.LIST: self.success = [] - (_etype1330, _size1327) = iprot.readListBegin() - for _i1331 in xrange(_size1327): - _elem1332 = iprot.readString() - self.success.append(_elem1332) + (_etype1337, _size1334) = iprot.readListBegin() + for _i1338 in xrange(_size1334): + _elem1339 = iprot.readString() + self.success.append(_elem1339) iprot.readListEnd() else: iprot.skip(ftype) @@ -38431,8 +38658,8 @@ def write(self, oprot): if self.success is not None: oprot.writeFieldBegin('success', TType.LIST, 0) oprot.writeListBegin(TType.STRING, len(self.success)) - for iter1333 in self.success: - oprot.writeString(iter1333) + for iter1340 in self.success: + oprot.writeString(iter1340) oprot.writeListEnd() oprot.writeFieldEnd() oprot.writeFieldStop() @@ -38959,10 +39186,10 @@ def read(self, iprot): if fid == 0: if ftype == TType.LIST: self.success = [] - (_etype1337, _size1334) = iprot.readListBegin() - for _i1338 in xrange(_size1334): - _elem1339 = iprot.readString() - self.success.append(_elem1339) + (_etype1344, _size1341) = iprot.readListBegin() + for _i1345 in xrange(_size1341): + _elem1346 = iprot.readString() + self.success.append(_elem1346) iprot.readListEnd() else: iprot.skip(ftype) @@ -38979,8 +39206,8 @@ def write(self, oprot): if self.success is not None: oprot.writeFieldBegin('success', TType.LIST, 0) oprot.writeListBegin(TType.STRING, len(self.success)) - for iter1340 in self.success: - oprot.writeString(iter1340) + for iter1347 in self.success: + oprot.writeString(iter1347) oprot.writeListEnd() oprot.writeFieldEnd() oprot.writeFieldStop() @@ -47387,11 +47614,11 @@ def read(self, iprot): if fid == 0: if ftype == TType.LIST: self.success = [] - (_etype1344, _size1341) = iprot.readListBegin() - for _i1345 in xrange(_size1341): - _elem1346 = SchemaVersion() - _elem1346.read(iprot) - self.success.append(_elem1346) + (_etype1351, _size1348) = iprot.readListBegin() + for _i1352 in xrange(_size1348): + _elem1353 = SchemaVersion() + _elem1353.read(iprot) + self.success.append(_elem1353) iprot.readListEnd() else: iprot.skip(ftype) @@ -47420,8 +47647,8 @@ def write(self, oprot): if self.success is not None: oprot.writeFieldBegin('success', TType.LIST, 0) oprot.writeListBegin(TType.STRUCT, len(self.success)) - for iter1347 in self.success: - iter1347.write(oprot) + for iter1354 in self.success: + iter1354.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() if self.o1 is not None: @@ -48896,11 +49123,11 @@ def read(self, iprot): if fid == 0: if ftype == TType.LIST: self.success = [] - (_etype1351, _size1348) = iprot.readListBegin() - for _i1352 in xrange(_size1348): - _elem1353 = RuntimeStat() - _elem1353.read(iprot) - self.success.append(_elem1353) + (_etype1358, _size1355) = iprot.readListBegin() + for _i1359 in xrange(_size1355): + _elem1360 = RuntimeStat() + _elem1360.read(iprot) + self.success.append(_elem1360) iprot.readListEnd() else: iprot.skip(ftype) @@ -48923,8 +49150,8 @@ def write(self, oprot): if self.success is not None: oprot.writeFieldBegin('success', TType.LIST, 0) oprot.writeListBegin(TType.STRUCT, len(self.success)) - for iter1354 in self.success: - iter1354.write(oprot) + for iter1361 in self.success: + iter1361.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() if self.o1 is not None: diff --git standalone-metastore/src/gen/thrift/gen-py/hive_metastore/ttypes.py standalone-metastore/src/gen/thrift/gen-py/hive_metastore/ttypes.py index 031e72b3c9..df3c586d4a 100644 --- standalone-metastore/src/gen/thrift/gen-py/hive_metastore/ttypes.py +++ standalone-metastore/src/gen/thrift/gen-py/hive_metastore/ttypes.py @@ -4550,6 +4550,8 @@ class Table: - creationMetadata - catName - ownerType + - writeId + - isStatsCompliant """ thrift_spec = ( @@ -4572,9 +4574,11 @@ class Table: (16, TType.STRUCT, 'creationMetadata', (CreationMetadata, CreationMetadata.thrift_spec), None, ), # 16 (17, TType.STRING, 'catName', None, None, ), # 17 (18, TType.I32, 'ownerType', None, 1, ), # 18 + (19, TType.I64, 'writeId', None, -1, ), # 19 + (20, TType.BOOL, 'isStatsCompliant', None, None, ), # 20 ) - def __init__(self, tableName=None, dbName=None, owner=None, createTime=None, lastAccessTime=None, retention=None, sd=None, partitionKeys=None, parameters=None, viewOriginalText=None, viewExpandedText=None, tableType=None, privileges=None, temporary=thrift_spec[14][4], rewriteEnabled=None, creationMetadata=None, catName=None, ownerType=thrift_spec[18][4],): + def __init__(self, tableName=None, dbName=None, owner=None, createTime=None, lastAccessTime=None, retention=None, sd=None, partitionKeys=None, parameters=None, viewOriginalText=None, viewExpandedText=None, tableType=None, privileges=None, temporary=thrift_spec[14][4], rewriteEnabled=None, creationMetadata=None, catName=None, ownerType=thrift_spec[18][4], writeId=thrift_spec[19][4], isStatsCompliant=None,): self.tableName = tableName self.dbName = dbName self.owner = owner @@ -4593,6 +4597,8 @@ def __init__(self, tableName=None, dbName=None, owner=None, createTime=None, las self.creationMetadata = creationMetadata self.catName = catName self.ownerType = ownerType + self.writeId = writeId + self.isStatsCompliant = isStatsCompliant def read(self, iprot): if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: @@ -4708,6 +4714,16 @@ def read(self, iprot): self.ownerType = iprot.readI32() else: iprot.skip(ftype) + elif fid == 19: + if ftype == TType.I64: + self.writeId = iprot.readI64() + else: + iprot.skip(ftype) + elif fid == 20: + if ftype == TType.BOOL: + self.isStatsCompliant = iprot.readBool() + else: + iprot.skip(ftype) else: iprot.skip(ftype) iprot.readFieldEnd() @@ -4797,6 +4813,14 @@ def write(self, oprot): oprot.writeFieldBegin('ownerType', TType.I32, 18) oprot.writeI32(self.ownerType) oprot.writeFieldEnd() + if self.writeId is not None: + oprot.writeFieldBegin('writeId', TType.I64, 19) + oprot.writeI64(self.writeId) + oprot.writeFieldEnd() + if self.isStatsCompliant is not None: + oprot.writeFieldBegin('isStatsCompliant', TType.BOOL, 20) + oprot.writeBool(self.isStatsCompliant) + oprot.writeFieldEnd() oprot.writeFieldStop() oprot.writeStructEnd() @@ -4824,6 +4848,8 @@ def __hash__(self): value = (value * 31) ^ hash(self.creationMetadata) value = (value * 31) ^ hash(self.catName) value = (value * 31) ^ hash(self.ownerType) + value = (value * 31) ^ hash(self.writeId) + value = (value * 31) ^ hash(self.isStatsCompliant) return value def __repr__(self): @@ -4849,6 +4875,8 @@ class Partition: - parameters - privileges - catName + - writeId + - isStatsCompliant """ thrift_spec = ( @@ -4862,9 +4890,11 @@ class Partition: (7, TType.MAP, 'parameters', (TType.STRING,None,TType.STRING,None), None, ), # 7 (8, TType.STRUCT, 'privileges', (PrincipalPrivilegeSet, PrincipalPrivilegeSet.thrift_spec), None, ), # 8 (9, TType.STRING, 'catName', None, None, ), # 9 + (10, TType.I64, 'writeId', None, -1, ), # 10 + (11, TType.BOOL, 'isStatsCompliant', None, None, ), # 11 ) - def __init__(self, values=None, dbName=None, tableName=None, createTime=None, lastAccessTime=None, sd=None, parameters=None, privileges=None, catName=None,): + def __init__(self, values=None, dbName=None, tableName=None, createTime=None, lastAccessTime=None, sd=None, parameters=None, privileges=None, catName=None, writeId=thrift_spec[10][4], isStatsCompliant=None,): self.values = values self.dbName = dbName self.tableName = tableName @@ -4874,6 +4904,8 @@ def __init__(self, values=None, dbName=None, tableName=None, createTime=None, la self.parameters = parameters self.privileges = privileges self.catName = catName + self.writeId = writeId + self.isStatsCompliant = isStatsCompliant def read(self, iprot): if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: @@ -4942,6 +4974,16 @@ def read(self, iprot): self.catName = iprot.readString() else: iprot.skip(ftype) + elif fid == 10: + if ftype == TType.I64: + self.writeId = iprot.readI64() + else: + iprot.skip(ftype) + elif fid == 11: + if ftype == TType.BOOL: + self.isStatsCompliant = iprot.readBool() + else: + iprot.skip(ftype) else: iprot.skip(ftype) iprot.readFieldEnd() @@ -4995,6 +5037,14 @@ def write(self, oprot): oprot.writeFieldBegin('catName', TType.STRING, 9) oprot.writeString(self.catName) oprot.writeFieldEnd() + if self.writeId is not None: + oprot.writeFieldBegin('writeId', TType.I64, 10) + oprot.writeI64(self.writeId) + oprot.writeFieldEnd() + if self.isStatsCompliant is not None: + oprot.writeFieldBegin('isStatsCompliant', TType.BOOL, 11) + oprot.writeBool(self.isStatsCompliant) + oprot.writeFieldEnd() oprot.writeFieldStop() oprot.writeStructEnd() @@ -5013,6 +5063,8 @@ def __hash__(self): value = (value * 31) ^ hash(self.parameters) value = (value * 31) ^ hash(self.privileges) value = (value * 31) ^ hash(self.catName) + value = (value * 31) ^ hash(self.writeId) + value = (value * 31) ^ hash(self.isStatsCompliant) return value def __repr__(self): @@ -5346,6 +5398,8 @@ class PartitionSpec: - sharedSDPartitionSpec - partitionList - catName + - writeId + - isStatsCompliant """ thrift_spec = ( @@ -5356,15 +5410,19 @@ class PartitionSpec: (4, TType.STRUCT, 'sharedSDPartitionSpec', (PartitionSpecWithSharedSD, PartitionSpecWithSharedSD.thrift_spec), None, ), # 4 (5, TType.STRUCT, 'partitionList', (PartitionListComposingSpec, PartitionListComposingSpec.thrift_spec), None, ), # 5 (6, TType.STRING, 'catName', None, None, ), # 6 + (7, TType.I64, 'writeId', None, -1, ), # 7 + (8, TType.BOOL, 'isStatsCompliant', None, None, ), # 8 ) - def __init__(self, dbName=None, tableName=None, rootPath=None, sharedSDPartitionSpec=None, partitionList=None, catName=None,): + def __init__(self, dbName=None, tableName=None, rootPath=None, sharedSDPartitionSpec=None, partitionList=None, catName=None, writeId=thrift_spec[7][4], isStatsCompliant=None,): self.dbName = dbName self.tableName = tableName self.rootPath = rootPath self.sharedSDPartitionSpec = sharedSDPartitionSpec self.partitionList = partitionList self.catName = catName + self.writeId = writeId + self.isStatsCompliant = isStatsCompliant def read(self, iprot): if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: @@ -5407,6 +5465,16 @@ def read(self, iprot): self.catName = iprot.readString() else: iprot.skip(ftype) + elif fid == 7: + if ftype == TType.I64: + self.writeId = iprot.readI64() + else: + iprot.skip(ftype) + elif fid == 8: + if ftype == TType.BOOL: + self.isStatsCompliant = iprot.readBool() + else: + iprot.skip(ftype) else: iprot.skip(ftype) iprot.readFieldEnd() @@ -5441,6 +5509,14 @@ def write(self, oprot): oprot.writeFieldBegin('catName', TType.STRING, 6) oprot.writeString(self.catName) oprot.writeFieldEnd() + if self.writeId is not None: + oprot.writeFieldBegin('writeId', TType.I64, 7) + oprot.writeI64(self.writeId) + oprot.writeFieldEnd() + if self.isStatsCompliant is not None: + oprot.writeFieldBegin('isStatsCompliant', TType.BOOL, 8) + oprot.writeBool(self.isStatsCompliant) + oprot.writeFieldEnd() oprot.writeFieldStop() oprot.writeStructEnd() @@ -5456,6 +5532,8 @@ def __hash__(self): value = (value * 31) ^ hash(self.sharedSDPartitionSpec) value = (value * 31) ^ hash(self.partitionList) value = (value * 31) ^ hash(self.catName) + value = (value * 31) ^ hash(self.writeId) + value = (value * 31) ^ hash(self.isStatsCompliant) return value def __repr__(self): @@ -6841,17 +6919,26 @@ class ColumnStatistics: Attributes: - statsDesc - statsObj + - txnId + - validWriteIdList + - isStatsCompliant """ thrift_spec = ( None, # 0 (1, TType.STRUCT, 'statsDesc', (ColumnStatisticsDesc, ColumnStatisticsDesc.thrift_spec), None, ), # 1 (2, TType.LIST, 'statsObj', (TType.STRUCT,(ColumnStatisticsObj, ColumnStatisticsObj.thrift_spec)), None, ), # 2 + (3, TType.I64, 'txnId', None, -1, ), # 3 + (4, TType.STRING, 'validWriteIdList', None, None, ), # 4 + (5, TType.BOOL, 'isStatsCompliant', None, None, ), # 5 ) - def __init__(self, statsDesc=None, statsObj=None,): + def __init__(self, statsDesc=None, statsObj=None, txnId=thrift_spec[3][4], validWriteIdList=None, isStatsCompliant=None,): self.statsDesc = statsDesc self.statsObj = statsObj + self.txnId = txnId + self.validWriteIdList = validWriteIdList + self.isStatsCompliant = isStatsCompliant def read(self, iprot): if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: @@ -6879,6 +6966,21 @@ def read(self, iprot): iprot.readListEnd() else: iprot.skip(ftype) + elif fid == 3: + if ftype == TType.I64: + self.txnId = iprot.readI64() + else: + iprot.skip(ftype) + elif fid == 4: + if ftype == TType.STRING: + self.validWriteIdList = iprot.readString() + else: + iprot.skip(ftype) + elif fid == 5: + if ftype == TType.BOOL: + self.isStatsCompliant = iprot.readBool() + else: + iprot.skip(ftype) else: iprot.skip(ftype) iprot.readFieldEnd() @@ -6900,6 +7002,18 @@ def write(self, oprot): iter243.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() + if self.txnId is not None: + oprot.writeFieldBegin('txnId', TType.I64, 3) + oprot.writeI64(self.txnId) + oprot.writeFieldEnd() + if self.validWriteIdList is not None: + oprot.writeFieldBegin('validWriteIdList', TType.STRING, 4) + oprot.writeString(self.validWriteIdList) + oprot.writeFieldEnd() + if self.isStatsCompliant is not None: + oprot.writeFieldBegin('isStatsCompliant', TType.BOOL, 5) + oprot.writeBool(self.isStatsCompliant) + oprot.writeFieldEnd() oprot.writeFieldStop() oprot.writeStructEnd() @@ -6915,6 +7029,9 @@ def __hash__(self): value = 17 value = (value * 31) ^ hash(self.statsDesc) value = (value * 31) ^ hash(self.statsObj) + value = (value * 31) ^ hash(self.txnId) + value = (value * 31) ^ hash(self.validWriteIdList) + value = (value * 31) ^ hash(self.isStatsCompliant) return value def __repr__(self): @@ -6933,17 +7050,20 @@ class AggrStats: Attributes: - colStats - partsFound + - isStatsCompliant """ thrift_spec = ( None, # 0 (1, TType.LIST, 'colStats', (TType.STRUCT,(ColumnStatisticsObj, ColumnStatisticsObj.thrift_spec)), None, ), # 1 (2, TType.I64, 'partsFound', None, None, ), # 2 + (3, TType.BOOL, 'isStatsCompliant', None, None, ), # 3 ) - def __init__(self, colStats=None, partsFound=None,): + def __init__(self, colStats=None, partsFound=None, isStatsCompliant=None,): self.colStats = colStats self.partsFound = partsFound + self.isStatsCompliant = isStatsCompliant def read(self, iprot): if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: @@ -6970,6 +7090,11 @@ def read(self, iprot): self.partsFound = iprot.readI64() else: iprot.skip(ftype) + elif fid == 3: + if ftype == TType.BOOL: + self.isStatsCompliant = iprot.readBool() + else: + iprot.skip(ftype) else: iprot.skip(ftype) iprot.readFieldEnd() @@ -6991,6 +7116,10 @@ def write(self, oprot): oprot.writeFieldBegin('partsFound', TType.I64, 2) oprot.writeI64(self.partsFound) oprot.writeFieldEnd() + if self.isStatsCompliant is not None: + oprot.writeFieldBegin('isStatsCompliant', TType.BOOL, 3) + oprot.writeBool(self.isStatsCompliant) + oprot.writeFieldEnd() oprot.writeFieldStop() oprot.writeStructEnd() @@ -7006,6 +7135,7 @@ def __hash__(self): value = 17 value = (value * 31) ^ hash(self.colStats) value = (value * 31) ^ hash(self.partsFound) + value = (value * 31) ^ hash(self.isStatsCompliant) return value def __repr__(self): @@ -7024,17 +7154,26 @@ class SetPartitionsStatsRequest: Attributes: - colStats - needMerge + - txnId + - writeId + - validWriteIdList """ thrift_spec = ( None, # 0 (1, TType.LIST, 'colStats', (TType.STRUCT,(ColumnStatistics, ColumnStatistics.thrift_spec)), None, ), # 1 (2, TType.BOOL, 'needMerge', None, None, ), # 2 + (3, TType.I64, 'txnId', None, -1, ), # 3 + (4, TType.I64, 'writeId', None, -1, ), # 4 + (5, TType.STRING, 'validWriteIdList', None, None, ), # 5 ) - def __init__(self, colStats=None, needMerge=None,): + def __init__(self, colStats=None, needMerge=None, txnId=thrift_spec[3][4], writeId=thrift_spec[4][4], validWriteIdList=None,): self.colStats = colStats self.needMerge = needMerge + self.txnId = txnId + self.writeId = writeId + self.validWriteIdList = validWriteIdList def read(self, iprot): if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: @@ -7061,6 +7200,21 @@ def read(self, iprot): self.needMerge = iprot.readBool() else: iprot.skip(ftype) + elif fid == 3: + if ftype == TType.I64: + self.txnId = iprot.readI64() + else: + iprot.skip(ftype) + elif fid == 4: + if ftype == TType.I64: + self.writeId = iprot.readI64() + else: + iprot.skip(ftype) + elif fid == 5: + if ftype == TType.STRING: + self.validWriteIdList = iprot.readString() + else: + iprot.skip(ftype) else: iprot.skip(ftype) iprot.readFieldEnd() @@ -7082,6 +7236,18 @@ def write(self, oprot): oprot.writeFieldBegin('needMerge', TType.BOOL, 2) oprot.writeBool(self.needMerge) oprot.writeFieldEnd() + if self.txnId is not None: + oprot.writeFieldBegin('txnId', TType.I64, 3) + oprot.writeI64(self.txnId) + oprot.writeFieldEnd() + if self.writeId is not None: + oprot.writeFieldBegin('writeId', TType.I64, 4) + oprot.writeI64(self.writeId) + oprot.writeFieldEnd() + if self.validWriteIdList is not None: + oprot.writeFieldBegin('validWriteIdList', TType.STRING, 5) + oprot.writeString(self.validWriteIdList) + oprot.writeFieldEnd() oprot.writeFieldStop() oprot.writeStructEnd() @@ -7095,6 +7261,9 @@ def __hash__(self): value = 17 value = (value * 31) ^ hash(self.colStats) value = (value * 31) ^ hash(self.needMerge) + value = (value * 31) ^ hash(self.txnId) + value = (value * 31) ^ hash(self.writeId) + value = (value * 31) ^ hash(self.validWriteIdList) return value def __repr__(self): @@ -9133,15 +9302,18 @@ class TableStatsResult: """ Attributes: - tableStats + - isStatsCompliant """ thrift_spec = ( None, # 0 (1, TType.LIST, 'tableStats', (TType.STRUCT,(ColumnStatisticsObj, ColumnStatisticsObj.thrift_spec)), None, ), # 1 + (2, TType.BOOL, 'isStatsCompliant', None, None, ), # 2 ) - def __init__(self, tableStats=None,): + def __init__(self, tableStats=None, isStatsCompliant=None,): self.tableStats = tableStats + self.isStatsCompliant = isStatsCompliant def read(self, iprot): if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: @@ -9163,6 +9335,11 @@ def read(self, iprot): iprot.readListEnd() else: iprot.skip(ftype) + elif fid == 2: + if ftype == TType.BOOL: + self.isStatsCompliant = iprot.readBool() + else: + iprot.skip(ftype) else: iprot.skip(ftype) iprot.readFieldEnd() @@ -9180,6 +9357,10 @@ def write(self, oprot): iter380.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() + if self.isStatsCompliant is not None: + oprot.writeFieldBegin('isStatsCompliant', TType.BOOL, 2) + oprot.writeBool(self.isStatsCompliant) + oprot.writeFieldEnd() oprot.writeFieldStop() oprot.writeStructEnd() @@ -9192,6 +9373,7 @@ def validate(self): def __hash__(self): value = 17 value = (value * 31) ^ hash(self.tableStats) + value = (value * 31) ^ hash(self.isStatsCompliant) return value def __repr__(self): @@ -9209,15 +9391,18 @@ class PartitionsStatsResult: """ Attributes: - partStats + - isStatsCompliant """ thrift_spec = ( None, # 0 (1, TType.MAP, 'partStats', (TType.STRING,None,TType.LIST,(TType.STRUCT,(ColumnStatisticsObj, ColumnStatisticsObj.thrift_spec))), None, ), # 1 + (2, TType.BOOL, 'isStatsCompliant', None, None, ), # 2 ) - def __init__(self, partStats=None,): + def __init__(self, partStats=None, isStatsCompliant=None,): self.partStats = partStats + self.isStatsCompliant = isStatsCompliant def read(self, iprot): if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: @@ -9245,6 +9430,11 @@ def read(self, iprot): iprot.readMapEnd() else: iprot.skip(ftype) + elif fid == 2: + if ftype == TType.BOOL: + self.isStatsCompliant = iprot.readBool() + else: + iprot.skip(ftype) else: iprot.skip(ftype) iprot.readFieldEnd() @@ -9266,6 +9456,10 @@ def write(self, oprot): oprot.writeListEnd() oprot.writeMapEnd() oprot.writeFieldEnd() + if self.isStatsCompliant is not None: + oprot.writeFieldBegin('isStatsCompliant', TType.BOOL, 2) + oprot.writeBool(self.isStatsCompliant) + oprot.writeFieldEnd() oprot.writeFieldStop() oprot.writeStructEnd() @@ -9278,6 +9472,7 @@ def validate(self): def __hash__(self): value = 17 value = (value * 31) ^ hash(self.partStats) + value = (value * 31) ^ hash(self.isStatsCompliant) return value def __repr__(self): @@ -9298,6 +9493,8 @@ class TableStatsRequest: - tblName - colNames - catName + - txnId + - validWriteIdList """ thrift_spec = ( @@ -9306,13 +9503,17 @@ class TableStatsRequest: (2, TType.STRING, 'tblName', None, None, ), # 2 (3, TType.LIST, 'colNames', (TType.STRING,None), None, ), # 3 (4, TType.STRING, 'catName', None, None, ), # 4 + (5, TType.I64, 'txnId', None, -1, ), # 5 + (6, TType.STRING, 'validWriteIdList', None, None, ), # 6 ) - def __init__(self, dbName=None, tblName=None, colNames=None, catName=None,): + def __init__(self, dbName=None, tblName=None, colNames=None, catName=None, txnId=thrift_spec[5][4], validWriteIdList=None,): self.dbName = dbName self.tblName = tblName self.colNames = colNames self.catName = catName + self.txnId = txnId + self.validWriteIdList = validWriteIdList def read(self, iprot): if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: @@ -9348,6 +9549,16 @@ def read(self, iprot): self.catName = iprot.readString() else: iprot.skip(ftype) + elif fid == 5: + if ftype == TType.I64: + self.txnId = iprot.readI64() + else: + iprot.skip(ftype) + elif fid == 6: + if ftype == TType.STRING: + self.validWriteIdList = iprot.readString() + else: + iprot.skip(ftype) else: iprot.skip(ftype) iprot.readFieldEnd() @@ -9377,6 +9588,14 @@ def write(self, oprot): oprot.writeFieldBegin('catName', TType.STRING, 4) oprot.writeString(self.catName) oprot.writeFieldEnd() + if self.txnId is not None: + oprot.writeFieldBegin('txnId', TType.I64, 5) + oprot.writeI64(self.txnId) + oprot.writeFieldEnd() + if self.validWriteIdList is not None: + oprot.writeFieldBegin('validWriteIdList', TType.STRING, 6) + oprot.writeString(self.validWriteIdList) + oprot.writeFieldEnd() oprot.writeFieldStop() oprot.writeStructEnd() @@ -9396,6 +9615,8 @@ def __hash__(self): value = (value * 31) ^ hash(self.tblName) value = (value * 31) ^ hash(self.colNames) value = (value * 31) ^ hash(self.catName) + value = (value * 31) ^ hash(self.txnId) + value = (value * 31) ^ hash(self.validWriteIdList) return value def __repr__(self): @@ -9417,6 +9638,8 @@ class PartitionsStatsRequest: - colNames - partNames - catName + - txnId + - validWriteIdList """ thrift_spec = ( @@ -9426,14 +9649,18 @@ class PartitionsStatsRequest: (3, TType.LIST, 'colNames', (TType.STRING,None), None, ), # 3 (4, TType.LIST, 'partNames', (TType.STRING,None), None, ), # 4 (5, TType.STRING, 'catName', None, None, ), # 5 + (6, TType.I64, 'txnId', None, -1, ), # 6 + (7, TType.STRING, 'validWriteIdList', None, None, ), # 7 ) - def __init__(self, dbName=None, tblName=None, colNames=None, partNames=None, catName=None,): + def __init__(self, dbName=None, tblName=None, colNames=None, partNames=None, catName=None, txnId=thrift_spec[6][4], validWriteIdList=None,): self.dbName = dbName self.tblName = tblName self.colNames = colNames self.partNames = partNames self.catName = catName + self.txnId = txnId + self.validWriteIdList = validWriteIdList def read(self, iprot): if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: @@ -9479,6 +9706,16 @@ def read(self, iprot): self.catName = iprot.readString() else: iprot.skip(ftype) + elif fid == 6: + if ftype == TType.I64: + self.txnId = iprot.readI64() + else: + iprot.skip(ftype) + elif fid == 7: + if ftype == TType.STRING: + self.validWriteIdList = iprot.readString() + else: + iprot.skip(ftype) else: iprot.skip(ftype) iprot.readFieldEnd() @@ -9515,6 +9752,14 @@ def write(self, oprot): oprot.writeFieldBegin('catName', TType.STRING, 5) oprot.writeString(self.catName) oprot.writeFieldEnd() + if self.txnId is not None: + oprot.writeFieldBegin('txnId', TType.I64, 6) + oprot.writeI64(self.txnId) + oprot.writeFieldEnd() + if self.validWriteIdList is not None: + oprot.writeFieldBegin('validWriteIdList', TType.STRING, 7) + oprot.writeString(self.validWriteIdList) + oprot.writeFieldEnd() oprot.writeFieldStop() oprot.writeStructEnd() @@ -9537,6 +9782,8 @@ def __hash__(self): value = (value * 31) ^ hash(self.colNames) value = (value * 31) ^ hash(self.partNames) value = (value * 31) ^ hash(self.catName) + value = (value * 31) ^ hash(self.txnId) + value = (value * 31) ^ hash(self.validWriteIdList) return value def __repr__(self): @@ -9554,15 +9801,18 @@ class AddPartitionsResult: """ Attributes: - partitions + - isStatsCompliant """ thrift_spec = ( None, # 0 (1, TType.LIST, 'partitions', (TType.STRUCT,(Partition, Partition.thrift_spec)), None, ), # 1 + (2, TType.BOOL, 'isStatsCompliant', None, None, ), # 2 ) - def __init__(self, partitions=None,): + def __init__(self, partitions=None, isStatsCompliant=None,): self.partitions = partitions + self.isStatsCompliant = isStatsCompliant def read(self, iprot): if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: @@ -9584,6 +9834,11 @@ def read(self, iprot): iprot.readListEnd() else: iprot.skip(ftype) + elif fid == 2: + if ftype == TType.BOOL: + self.isStatsCompliant = iprot.readBool() + else: + iprot.skip(ftype) else: iprot.skip(ftype) iprot.readFieldEnd() @@ -9601,6 +9856,10 @@ def write(self, oprot): iter424.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() + if self.isStatsCompliant is not None: + oprot.writeFieldBegin('isStatsCompliant', TType.BOOL, 2) + oprot.writeBool(self.isStatsCompliant) + oprot.writeFieldEnd() oprot.writeFieldStop() oprot.writeStructEnd() @@ -9611,6 +9870,7 @@ def validate(self): def __hash__(self): value = 17 value = (value * 31) ^ hash(self.partitions) + value = (value * 31) ^ hash(self.isStatsCompliant) return value def __repr__(self): @@ -9633,6 +9893,8 @@ class AddPartitionsRequest: - ifNotExists - needResult - catName + - txnId + - validWriteIdList """ thrift_spec = ( @@ -9643,15 +9905,19 @@ class AddPartitionsRequest: (4, TType.BOOL, 'ifNotExists', None, None, ), # 4 (5, TType.BOOL, 'needResult', None, True, ), # 5 (6, TType.STRING, 'catName', None, None, ), # 6 + (7, TType.I64, 'txnId', None, -1, ), # 7 + (8, TType.STRING, 'validWriteIdList', None, None, ), # 8 ) - def __init__(self, dbName=None, tblName=None, parts=None, ifNotExists=None, needResult=thrift_spec[5][4], catName=None,): + def __init__(self, dbName=None, tblName=None, parts=None, ifNotExists=None, needResult=thrift_spec[5][4], catName=None, txnId=thrift_spec[7][4], validWriteIdList=None,): self.dbName = dbName self.tblName = tblName self.parts = parts self.ifNotExists = ifNotExists self.needResult = needResult self.catName = catName + self.txnId = txnId + self.validWriteIdList = validWriteIdList def read(self, iprot): if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: @@ -9698,6 +9964,16 @@ def read(self, iprot): self.catName = iprot.readString() else: iprot.skip(ftype) + elif fid == 7: + if ftype == TType.I64: + self.txnId = iprot.readI64() + else: + iprot.skip(ftype) + elif fid == 8: + if ftype == TType.STRING: + self.validWriteIdList = iprot.readString() + else: + iprot.skip(ftype) else: iprot.skip(ftype) iprot.readFieldEnd() @@ -9735,6 +10011,14 @@ def write(self, oprot): oprot.writeFieldBegin('catName', TType.STRING, 6) oprot.writeString(self.catName) oprot.writeFieldEnd() + if self.txnId is not None: + oprot.writeFieldBegin('txnId', TType.I64, 7) + oprot.writeI64(self.txnId) + oprot.writeFieldEnd() + if self.validWriteIdList is not None: + oprot.writeFieldBegin('validWriteIdList', TType.STRING, 8) + oprot.writeString(self.validWriteIdList) + oprot.writeFieldEnd() oprot.writeFieldStop() oprot.writeStructEnd() @@ -9758,6 +10042,8 @@ def __hash__(self): value = (value * 31) ^ hash(self.ifNotExists) value = (value * 31) ^ hash(self.needResult) value = (value * 31) ^ hash(self.catName) + value = (value * 31) ^ hash(self.txnId) + value = (value * 31) ^ hash(self.validWriteIdList) return value def __repr__(self): @@ -16998,6 +17284,8 @@ class GetTableRequest: - tblName - capabilities - catName + - txnId + - validWriteIdList """ thrift_spec = ( @@ -17006,13 +17294,17 @@ class GetTableRequest: (2, TType.STRING, 'tblName', None, None, ), # 2 (3, TType.STRUCT, 'capabilities', (ClientCapabilities, ClientCapabilities.thrift_spec), None, ), # 3 (4, TType.STRING, 'catName', None, None, ), # 4 + (5, TType.I64, 'txnId', None, -1, ), # 5 + (6, TType.STRING, 'validWriteIdList', None, None, ), # 6 ) - def __init__(self, dbName=None, tblName=None, capabilities=None, catName=None,): + def __init__(self, dbName=None, tblName=None, capabilities=None, catName=None, txnId=thrift_spec[5][4], validWriteIdList=None,): self.dbName = dbName self.tblName = tblName self.capabilities = capabilities self.catName = catName + self.txnId = txnId + self.validWriteIdList = validWriteIdList def read(self, iprot): if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: @@ -17044,6 +17336,16 @@ def read(self, iprot): self.catName = iprot.readString() else: iprot.skip(ftype) + elif fid == 5: + if ftype == TType.I64: + self.txnId = iprot.readI64() + else: + iprot.skip(ftype) + elif fid == 6: + if ftype == TType.STRING: + self.validWriteIdList = iprot.readString() + else: + iprot.skip(ftype) else: iprot.skip(ftype) iprot.readFieldEnd() @@ -17070,6 +17372,14 @@ def write(self, oprot): oprot.writeFieldBegin('catName', TType.STRING, 4) oprot.writeString(self.catName) oprot.writeFieldEnd() + if self.txnId is not None: + oprot.writeFieldBegin('txnId', TType.I64, 5) + oprot.writeI64(self.txnId) + oprot.writeFieldEnd() + if self.validWriteIdList is not None: + oprot.writeFieldBegin('validWriteIdList', TType.STRING, 6) + oprot.writeString(self.validWriteIdList) + oprot.writeFieldEnd() oprot.writeFieldStop() oprot.writeStructEnd() @@ -17087,6 +17397,8 @@ def __hash__(self): value = (value * 31) ^ hash(self.tblName) value = (value * 31) ^ hash(self.capabilities) value = (value * 31) ^ hash(self.catName) + value = (value * 31) ^ hash(self.txnId) + value = (value * 31) ^ hash(self.validWriteIdList) return value def __repr__(self): @@ -17104,15 +17416,18 @@ class GetTableResult: """ Attributes: - table + - isStatsCompliant """ thrift_spec = ( None, # 0 (1, TType.STRUCT, 'table', (Table, Table.thrift_spec), None, ), # 1 + (2, TType.BOOL, 'isStatsCompliant', None, None, ), # 2 ) - def __init__(self, table=None,): + def __init__(self, table=None, isStatsCompliant=None,): self.table = table + self.isStatsCompliant = isStatsCompliant def read(self, iprot): if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: @@ -17129,6 +17444,11 @@ def read(self, iprot): self.table.read(iprot) else: iprot.skip(ftype) + elif fid == 2: + if ftype == TType.BOOL: + self.isStatsCompliant = iprot.readBool() + else: + iprot.skip(ftype) else: iprot.skip(ftype) iprot.readFieldEnd() @@ -17143,6 +17463,10 @@ def write(self, oprot): oprot.writeFieldBegin('table', TType.STRUCT, 1) self.table.write(oprot) oprot.writeFieldEnd() + if self.isStatsCompliant is not None: + oprot.writeFieldBegin('isStatsCompliant', TType.BOOL, 2) + oprot.writeBool(self.isStatsCompliant) + oprot.writeFieldEnd() oprot.writeFieldStop() oprot.writeStructEnd() @@ -17155,6 +17479,7 @@ def validate(self): def __hash__(self): value = 17 value = (value * 31) ^ hash(self.table) + value = (value * 31) ^ hash(self.isStatsCompliant) return value def __repr__(self): @@ -22055,6 +22380,213 @@ def __eq__(self, other): def __ne__(self, other): return not (self == other) +class AlterPartitionsRequest: + """ + Attributes: + - dbName + - tableName + - partitions + - environmentContext + - txnId + - writeId + - validWriteIdList + """ + + thrift_spec = ( + None, # 0 + (1, TType.STRING, 'dbName', None, None, ), # 1 + (2, TType.STRING, 'tableName', None, None, ), # 2 + (3, TType.LIST, 'partitions', (TType.STRUCT,(Partition, Partition.thrift_spec)), None, ), # 3 + (4, TType.STRUCT, 'environmentContext', (EnvironmentContext, EnvironmentContext.thrift_spec), None, ), # 4 + (5, TType.I64, 'txnId', None, -1, ), # 5 + (6, TType.I64, 'writeId', None, -1, ), # 6 + (7, TType.STRING, 'validWriteIdList', None, None, ), # 7 + ) + + def __init__(self, dbName=None, tableName=None, partitions=None, environmentContext=None, txnId=thrift_spec[5][4], writeId=thrift_spec[6][4], validWriteIdList=None,): + self.dbName = dbName + self.tableName = tableName + self.partitions = partitions + self.environmentContext = environmentContext + self.txnId = txnId + self.writeId = writeId + self.validWriteIdList = validWriteIdList + + def read(self, iprot): + if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: + fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec)) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 1: + if ftype == TType.STRING: + self.dbName = iprot.readString() + else: + iprot.skip(ftype) + elif fid == 2: + if ftype == TType.STRING: + self.tableName = iprot.readString() + else: + iprot.skip(ftype) + elif fid == 3: + if ftype == TType.LIST: + self.partitions = [] + (_etype840, _size837) = iprot.readListBegin() + for _i841 in xrange(_size837): + _elem842 = Partition() + _elem842.read(iprot) + self.partitions.append(_elem842) + iprot.readListEnd() + else: + iprot.skip(ftype) + elif fid == 4: + if ftype == TType.STRUCT: + self.environmentContext = EnvironmentContext() + self.environmentContext.read(iprot) + else: + iprot.skip(ftype) + elif fid == 5: + if ftype == TType.I64: + self.txnId = iprot.readI64() + else: + iprot.skip(ftype) + elif fid == 6: + if ftype == TType.I64: + self.writeId = iprot.readI64() + else: + iprot.skip(ftype) + elif fid == 7: + if ftype == TType.STRING: + self.validWriteIdList = iprot.readString() + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None: + oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))) + return + oprot.writeStructBegin('AlterPartitionsRequest') + if self.dbName is not None: + oprot.writeFieldBegin('dbName', TType.STRING, 1) + oprot.writeString(self.dbName) + oprot.writeFieldEnd() + if self.tableName is not None: + oprot.writeFieldBegin('tableName', TType.STRING, 2) + oprot.writeString(self.tableName) + oprot.writeFieldEnd() + if self.partitions is not None: + oprot.writeFieldBegin('partitions', TType.LIST, 3) + oprot.writeListBegin(TType.STRUCT, len(self.partitions)) + for iter843 in self.partitions: + iter843.write(oprot) + oprot.writeListEnd() + oprot.writeFieldEnd() + if self.environmentContext is not None: + oprot.writeFieldBegin('environmentContext', TType.STRUCT, 4) + self.environmentContext.write(oprot) + oprot.writeFieldEnd() + if self.txnId is not None: + oprot.writeFieldBegin('txnId', TType.I64, 5) + oprot.writeI64(self.txnId) + oprot.writeFieldEnd() + if self.writeId is not None: + oprot.writeFieldBegin('writeId', TType.I64, 6) + oprot.writeI64(self.writeId) + oprot.writeFieldEnd() + if self.validWriteIdList is not None: + oprot.writeFieldBegin('validWriteIdList', TType.STRING, 7) + oprot.writeString(self.validWriteIdList) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + if self.dbName is None: + raise TProtocol.TProtocolException(message='Required field dbName is unset!') + if self.tableName is None: + raise TProtocol.TProtocolException(message='Required field tableName is unset!') + if self.partitions is None: + raise TProtocol.TProtocolException(message='Required field partitions is unset!') + if self.environmentContext is None: + raise TProtocol.TProtocolException(message='Required field environmentContext is unset!') + return + + + def __hash__(self): + value = 17 + value = (value * 31) ^ hash(self.dbName) + value = (value * 31) ^ hash(self.tableName) + value = (value * 31) ^ hash(self.partitions) + value = (value * 31) ^ hash(self.environmentContext) + value = (value * 31) ^ hash(self.txnId) + value = (value * 31) ^ hash(self.writeId) + value = (value * 31) ^ hash(self.validWriteIdList) + return value + + def __repr__(self): + L = ['%s=%r' % (key, value) + for key, value in self.__dict__.iteritems()] + return '%s(%s)' % (self.__class__.__name__, ', '.join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + +class AlterPartitionsResponse: + + thrift_spec = ( + ) + + def read(self, iprot): + if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: + fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec)) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None: + oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))) + return + oprot.writeStructBegin('AlterPartitionsResponse') + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + + def __hash__(self): + value = 17 + return value + + def __repr__(self): + L = ['%s=%r' % (key, value) + for key, value in self.__dict__.iteritems()] + return '%s(%s)' % (self.__class__.__name__, ', '.join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + class MetaException(TException): """ Attributes: diff --git standalone-metastore/src/gen/thrift/gen-rb/hive_metastore_types.rb standalone-metastore/src/gen/thrift/gen-rb/hive_metastore_types.rb index 0348ff263e..86b469cfbd 100644 --- standalone-metastore/src/gen/thrift/gen-rb/hive_metastore_types.rb +++ standalone-metastore/src/gen/thrift/gen-rb/hive_metastore_types.rb @@ -1062,6 +1062,8 @@ class Table CREATIONMETADATA = 16 CATNAME = 17 OWNERTYPE = 18 + WRITEID = 19 + ISSTATSCOMPLIANT = 20 FIELDS = { TABLENAME => {:type => ::Thrift::Types::STRING, :name => 'tableName'}, @@ -1081,7 +1083,9 @@ class Table REWRITEENABLED => {:type => ::Thrift::Types::BOOL, :name => 'rewriteEnabled', :optional => true}, CREATIONMETADATA => {:type => ::Thrift::Types::STRUCT, :name => 'creationMetadata', :class => ::CreationMetadata, :optional => true}, CATNAME => {:type => ::Thrift::Types::STRING, :name => 'catName', :optional => true}, - OWNERTYPE => {:type => ::Thrift::Types::I32, :name => 'ownerType', :default => 1, :optional => true, :enum_class => ::PrincipalType} + OWNERTYPE => {:type => ::Thrift::Types::I32, :name => 'ownerType', :default => 1, :optional => true, :enum_class => ::PrincipalType}, + WRITEID => {:type => ::Thrift::Types::I64, :name => 'writeId', :default => -1, :optional => true}, + ISSTATSCOMPLIANT => {:type => ::Thrift::Types::BOOL, :name => 'isStatsCompliant', :optional => true} } def struct_fields; FIELDS; end @@ -1106,6 +1110,8 @@ class Partition PARAMETERS = 7 PRIVILEGES = 8 CATNAME = 9 + WRITEID = 10 + ISSTATSCOMPLIANT = 11 FIELDS = { VALUES => {:type => ::Thrift::Types::LIST, :name => 'values', :element => {:type => ::Thrift::Types::STRING}}, @@ -1116,7 +1122,9 @@ class Partition SD => {:type => ::Thrift::Types::STRUCT, :name => 'sd', :class => ::StorageDescriptor}, PARAMETERS => {:type => ::Thrift::Types::MAP, :name => 'parameters', :key => {:type => ::Thrift::Types::STRING}, :value => {:type => ::Thrift::Types::STRING}}, PRIVILEGES => {:type => ::Thrift::Types::STRUCT, :name => 'privileges', :class => ::PrincipalPrivilegeSet, :optional => true}, - CATNAME => {:type => ::Thrift::Types::STRING, :name => 'catName', :optional => true} + CATNAME => {:type => ::Thrift::Types::STRING, :name => 'catName', :optional => true}, + WRITEID => {:type => ::Thrift::Types::I64, :name => 'writeId', :default => -1, :optional => true}, + ISSTATSCOMPLIANT => {:type => ::Thrift::Types::BOOL, :name => 'isStatsCompliant', :optional => true} } def struct_fields; FIELDS; end @@ -1195,6 +1203,8 @@ class PartitionSpec SHAREDSDPARTITIONSPEC = 4 PARTITIONLIST = 5 CATNAME = 6 + WRITEID = 7 + ISSTATSCOMPLIANT = 8 FIELDS = { DBNAME => {:type => ::Thrift::Types::STRING, :name => 'dbName'}, @@ -1202,7 +1212,9 @@ class PartitionSpec ROOTPATH => {:type => ::Thrift::Types::STRING, :name => 'rootPath'}, SHAREDSDPARTITIONSPEC => {:type => ::Thrift::Types::STRUCT, :name => 'sharedSDPartitionSpec', :class => ::PartitionSpecWithSharedSD, :optional => true}, PARTITIONLIST => {:type => ::Thrift::Types::STRUCT, :name => 'partitionList', :class => ::PartitionListComposingSpec, :optional => true}, - CATNAME => {:type => ::Thrift::Types::STRING, :name => 'catName', :optional => true} + CATNAME => {:type => ::Thrift::Types::STRING, :name => 'catName', :optional => true}, + WRITEID => {:type => ::Thrift::Types::I64, :name => 'writeId', :default => -1, :optional => true}, + ISSTATSCOMPLIANT => {:type => ::Thrift::Types::BOOL, :name => 'isStatsCompliant', :optional => true} } def struct_fields; FIELDS; end @@ -1547,10 +1559,16 @@ class ColumnStatistics include ::Thrift::Struct, ::Thrift::Struct_Union STATSDESC = 1 STATSOBJ = 2 + TXNID = 3 + VALIDWRITEIDLIST = 4 + ISSTATSCOMPLIANT = 5 FIELDS = { STATSDESC => {:type => ::Thrift::Types::STRUCT, :name => 'statsDesc', :class => ::ColumnStatisticsDesc}, - STATSOBJ => {:type => ::Thrift::Types::LIST, :name => 'statsObj', :element => {:type => ::Thrift::Types::STRUCT, :class => ::ColumnStatisticsObj}} + STATSOBJ => {:type => ::Thrift::Types::LIST, :name => 'statsObj', :element => {:type => ::Thrift::Types::STRUCT, :class => ::ColumnStatisticsObj}}, + TXNID => {:type => ::Thrift::Types::I64, :name => 'txnId', :default => -1, :optional => true}, + VALIDWRITEIDLIST => {:type => ::Thrift::Types::STRING, :name => 'validWriteIdList', :optional => true}, + ISSTATSCOMPLIANT => {:type => ::Thrift::Types::BOOL, :name => 'isStatsCompliant', :optional => true} } def struct_fields; FIELDS; end @@ -1567,10 +1585,12 @@ class AggrStats include ::Thrift::Struct, ::Thrift::Struct_Union COLSTATS = 1 PARTSFOUND = 2 + ISSTATSCOMPLIANT = 3 FIELDS = { COLSTATS => {:type => ::Thrift::Types::LIST, :name => 'colStats', :element => {:type => ::Thrift::Types::STRUCT, :class => ::ColumnStatisticsObj}}, - PARTSFOUND => {:type => ::Thrift::Types::I64, :name => 'partsFound'} + PARTSFOUND => {:type => ::Thrift::Types::I64, :name => 'partsFound'}, + ISSTATSCOMPLIANT => {:type => ::Thrift::Types::BOOL, :name => 'isStatsCompliant', :optional => true} } def struct_fields; FIELDS; end @@ -1587,10 +1607,16 @@ class SetPartitionsStatsRequest include ::Thrift::Struct, ::Thrift::Struct_Union COLSTATS = 1 NEEDMERGE = 2 + TXNID = 3 + WRITEID = 4 + VALIDWRITEIDLIST = 5 FIELDS = { COLSTATS => {:type => ::Thrift::Types::LIST, :name => 'colStats', :element => {:type => ::Thrift::Types::STRUCT, :class => ::ColumnStatistics}}, - NEEDMERGE => {:type => ::Thrift::Types::BOOL, :name => 'needMerge', :optional => true} + NEEDMERGE => {:type => ::Thrift::Types::BOOL, :name => 'needMerge', :optional => true}, + TXNID => {:type => ::Thrift::Types::I64, :name => 'txnId', :default => -1, :optional => true}, + WRITEID => {:type => ::Thrift::Types::I64, :name => 'writeId', :default => -1, :optional => true}, + VALIDWRITEIDLIST => {:type => ::Thrift::Types::STRING, :name => 'validWriteIdList', :optional => true} } def struct_fields; FIELDS; end @@ -2055,9 +2081,11 @@ end class TableStatsResult include ::Thrift::Struct, ::Thrift::Struct_Union TABLESTATS = 1 + ISSTATSCOMPLIANT = 2 FIELDS = { - TABLESTATS => {:type => ::Thrift::Types::LIST, :name => 'tableStats', :element => {:type => ::Thrift::Types::STRUCT, :class => ::ColumnStatisticsObj}} + TABLESTATS => {:type => ::Thrift::Types::LIST, :name => 'tableStats', :element => {:type => ::Thrift::Types::STRUCT, :class => ::ColumnStatisticsObj}}, + ISSTATSCOMPLIANT => {:type => ::Thrift::Types::BOOL, :name => 'isStatsCompliant', :optional => true} } def struct_fields; FIELDS; end @@ -2072,9 +2100,11 @@ end class PartitionsStatsResult include ::Thrift::Struct, ::Thrift::Struct_Union PARTSTATS = 1 + ISSTATSCOMPLIANT = 2 FIELDS = { - PARTSTATS => {:type => ::Thrift::Types::MAP, :name => 'partStats', :key => {:type => ::Thrift::Types::STRING}, :value => {:type => ::Thrift::Types::LIST, :element => {:type => ::Thrift::Types::STRUCT, :class => ::ColumnStatisticsObj}}} + PARTSTATS => {:type => ::Thrift::Types::MAP, :name => 'partStats', :key => {:type => ::Thrift::Types::STRING}, :value => {:type => ::Thrift::Types::LIST, :element => {:type => ::Thrift::Types::STRUCT, :class => ::ColumnStatisticsObj}}}, + ISSTATSCOMPLIANT => {:type => ::Thrift::Types::BOOL, :name => 'isStatsCompliant', :optional => true} } def struct_fields; FIELDS; end @@ -2092,12 +2122,16 @@ class TableStatsRequest TBLNAME = 2 COLNAMES = 3 CATNAME = 4 + TXNID = 5 + VALIDWRITEIDLIST = 6 FIELDS = { DBNAME => {:type => ::Thrift::Types::STRING, :name => 'dbName'}, TBLNAME => {:type => ::Thrift::Types::STRING, :name => 'tblName'}, COLNAMES => {:type => ::Thrift::Types::LIST, :name => 'colNames', :element => {:type => ::Thrift::Types::STRING}}, - CATNAME => {:type => ::Thrift::Types::STRING, :name => 'catName', :optional => true} + CATNAME => {:type => ::Thrift::Types::STRING, :name => 'catName', :optional => true}, + TXNID => {:type => ::Thrift::Types::I64, :name => 'txnId', :default => -1, :optional => true}, + VALIDWRITEIDLIST => {:type => ::Thrift::Types::STRING, :name => 'validWriteIdList', :optional => true} } def struct_fields; FIELDS; end @@ -2118,13 +2152,17 @@ class PartitionsStatsRequest COLNAMES = 3 PARTNAMES = 4 CATNAME = 5 + TXNID = 6 + VALIDWRITEIDLIST = 7 FIELDS = { DBNAME => {:type => ::Thrift::Types::STRING, :name => 'dbName'}, TBLNAME => {:type => ::Thrift::Types::STRING, :name => 'tblName'}, COLNAMES => {:type => ::Thrift::Types::LIST, :name => 'colNames', :element => {:type => ::Thrift::Types::STRING}}, PARTNAMES => {:type => ::Thrift::Types::LIST, :name => 'partNames', :element => {:type => ::Thrift::Types::STRING}}, - CATNAME => {:type => ::Thrift::Types::STRING, :name => 'catName', :optional => true} + CATNAME => {:type => ::Thrift::Types::STRING, :name => 'catName', :optional => true}, + TXNID => {:type => ::Thrift::Types::I64, :name => 'txnId', :default => -1, :optional => true}, + VALIDWRITEIDLIST => {:type => ::Thrift::Types::STRING, :name => 'validWriteIdList', :optional => true} } def struct_fields; FIELDS; end @@ -2142,9 +2180,11 @@ end class AddPartitionsResult include ::Thrift::Struct, ::Thrift::Struct_Union PARTITIONS = 1 + ISSTATSCOMPLIANT = 2 FIELDS = { - PARTITIONS => {:type => ::Thrift::Types::LIST, :name => 'partitions', :element => {:type => ::Thrift::Types::STRUCT, :class => ::Partition}, :optional => true} + PARTITIONS => {:type => ::Thrift::Types::LIST, :name => 'partitions', :element => {:type => ::Thrift::Types::STRUCT, :class => ::Partition}, :optional => true}, + ISSTATSCOMPLIANT => {:type => ::Thrift::Types::BOOL, :name => 'isStatsCompliant', :optional => true} } def struct_fields; FIELDS; end @@ -2163,6 +2203,8 @@ class AddPartitionsRequest IFNOTEXISTS = 4 NEEDRESULT = 5 CATNAME = 6 + TXNID = 7 + VALIDWRITEIDLIST = 8 FIELDS = { DBNAME => {:type => ::Thrift::Types::STRING, :name => 'dbName'}, @@ -2170,7 +2212,9 @@ class AddPartitionsRequest PARTS => {:type => ::Thrift::Types::LIST, :name => 'parts', :element => {:type => ::Thrift::Types::STRUCT, :class => ::Partition}}, IFNOTEXISTS => {:type => ::Thrift::Types::BOOL, :name => 'ifNotExists'}, NEEDRESULT => {:type => ::Thrift::Types::BOOL, :name => 'needResult', :default => true, :optional => true}, - CATNAME => {:type => ::Thrift::Types::STRING, :name => 'catName', :optional => true} + CATNAME => {:type => ::Thrift::Types::STRING, :name => 'catName', :optional => true}, + TXNID => {:type => ::Thrift::Types::I64, :name => 'txnId', :default => -1, :optional => true}, + VALIDWRITEIDLIST => {:type => ::Thrift::Types::STRING, :name => 'validWriteIdList', :optional => true} } def struct_fields; FIELDS; end @@ -3813,12 +3857,16 @@ class GetTableRequest TBLNAME = 2 CAPABILITIES = 3 CATNAME = 4 + TXNID = 5 + VALIDWRITEIDLIST = 6 FIELDS = { DBNAME => {:type => ::Thrift::Types::STRING, :name => 'dbName'}, TBLNAME => {:type => ::Thrift::Types::STRING, :name => 'tblName'}, CAPABILITIES => {:type => ::Thrift::Types::STRUCT, :name => 'capabilities', :class => ::ClientCapabilities, :optional => true}, - CATNAME => {:type => ::Thrift::Types::STRING, :name => 'catName', :optional => true} + CATNAME => {:type => ::Thrift::Types::STRING, :name => 'catName', :optional => true}, + TXNID => {:type => ::Thrift::Types::I64, :name => 'txnId', :default => -1, :optional => true}, + VALIDWRITEIDLIST => {:type => ::Thrift::Types::STRING, :name => 'validWriteIdList', :optional => true} } def struct_fields; FIELDS; end @@ -3834,9 +3882,11 @@ end class GetTableResult include ::Thrift::Struct, ::Thrift::Struct_Union TABLE = 1 + ISSTATSCOMPLIANT = 2 FIELDS = { - TABLE => {:type => ::Thrift::Types::STRUCT, :name => 'table', :class => ::Table} + TABLE => {:type => ::Thrift::Types::STRUCT, :name => 'table', :class => ::Table}, + ISSTATSCOMPLIANT => {:type => ::Thrift::Types::BOOL, :name => 'isStatsCompliant', :optional => true} } def struct_fields; FIELDS; end @@ -5005,6 +5055,53 @@ class GetRuntimeStatsRequest ::Thrift::Struct.generate_accessors self end +class AlterPartitionsRequest + include ::Thrift::Struct, ::Thrift::Struct_Union + DBNAME = 1 + TABLENAME = 2 + PARTITIONS = 3 + ENVIRONMENTCONTEXT = 4 + TXNID = 5 + WRITEID = 6 + VALIDWRITEIDLIST = 7 + + FIELDS = { + DBNAME => {:type => ::Thrift::Types::STRING, :name => 'dbName'}, + TABLENAME => {:type => ::Thrift::Types::STRING, :name => 'tableName'}, + PARTITIONS => {:type => ::Thrift::Types::LIST, :name => 'partitions', :element => {:type => ::Thrift::Types::STRUCT, :class => ::Partition}}, + ENVIRONMENTCONTEXT => {:type => ::Thrift::Types::STRUCT, :name => 'environmentContext', :class => ::EnvironmentContext}, + TXNID => {:type => ::Thrift::Types::I64, :name => 'txnId', :default => -1, :optional => true}, + WRITEID => {:type => ::Thrift::Types::I64, :name => 'writeId', :default => -1, :optional => true}, + VALIDWRITEIDLIST => {:type => ::Thrift::Types::STRING, :name => 'validWriteIdList', :optional => true} + } + + def struct_fields; FIELDS; end + + def validate + raise ::Thrift::ProtocolException.new(::Thrift::ProtocolException::UNKNOWN, 'Required field dbName is unset!') unless @dbName + raise ::Thrift::ProtocolException.new(::Thrift::ProtocolException::UNKNOWN, 'Required field tableName is unset!') unless @tableName + raise ::Thrift::ProtocolException.new(::Thrift::ProtocolException::UNKNOWN, 'Required field partitions is unset!') unless @partitions + raise ::Thrift::ProtocolException.new(::Thrift::ProtocolException::UNKNOWN, 'Required field environmentContext is unset!') unless @environmentContext + end + + ::Thrift::Struct.generate_accessors self +end + +class AlterPartitionsResponse + include ::Thrift::Struct, ::Thrift::Struct_Union + + FIELDS = { + + } + + def struct_fields; FIELDS; end + + def validate + end + + ::Thrift::Struct.generate_accessors self +end + class MetaException < ::Thrift::Exception include ::Thrift::Struct, ::Thrift::Struct_Union def initialize(message=nil) diff --git standalone-metastore/src/gen/thrift/gen-rb/thrift_hive_metastore.rb standalone-metastore/src/gen/thrift/gen-rb/thrift_hive_metastore.rb index 2bd958ed16..fdcd3dec7c 100644 --- standalone-metastore/src/gen/thrift/gen-rb/thrift_hive_metastore.rb +++ standalone-metastore/src/gen/thrift/gen-rb/thrift_hive_metastore.rb @@ -1432,6 +1432,23 @@ module ThriftHiveMetastore return end + def alter_partitions_with_environment_context_req(req) + send_alter_partitions_with_environment_context_req(req) + return recv_alter_partitions_with_environment_context_req() + end + + def send_alter_partitions_with_environment_context_req(req) + send_message('alter_partitions_with_environment_context_req', Alter_partitions_with_environment_context_req_args, :req => req) + end + + def recv_alter_partitions_with_environment_context_req() + result = receive_message(Alter_partitions_with_environment_context_req_result) + return result.success unless result.success.nil? + raise result.o1 unless result.o1.nil? + raise result.o2 unless result.o2.nil? + raise ::Thrift::ApplicationException.new(::Thrift::ApplicationException::MISSING_RESULT, 'alter_partitions_with_environment_context_req failed: unknown result') + end + def alter_partition_with_environment_context(db_name, tbl_name, new_part, environment_context) send_alter_partition_with_environment_context(db_name, tbl_name, new_part, environment_context) recv_alter_partition_with_environment_context() @@ -4604,6 +4621,19 @@ module ThriftHiveMetastore write_result(result, oprot, 'alter_partitions_with_environment_context', seqid) end + def process_alter_partitions_with_environment_context_req(seqid, iprot, oprot) + args = read_args(iprot, Alter_partitions_with_environment_context_req_args) + result = Alter_partitions_with_environment_context_req_result.new() + begin + result.success = @handler.alter_partitions_with_environment_context_req(args.req) + rescue ::InvalidOperationException => o1 + result.o1 = o1 + rescue ::MetaException => o2 + result.o2 = o2 + end + write_result(result, oprot, 'alter_partitions_with_environment_context_req', seqid) + end + def process_alter_partition_with_environment_context(seqid, iprot, oprot) args = read_args(iprot, Alter_partition_with_environment_context_args) result = Alter_partition_with_environment_context_result.new() @@ -9332,6 +9362,42 @@ module ThriftHiveMetastore ::Thrift::Struct.generate_accessors self end + class Alter_partitions_with_environment_context_req_args + include ::Thrift::Struct, ::Thrift::Struct_Union + REQ = 1 + + FIELDS = { + REQ => {:type => ::Thrift::Types::STRUCT, :name => 'req', :class => ::AlterPartitionsRequest} + } + + def struct_fields; FIELDS; end + + def validate + end + + ::Thrift::Struct.generate_accessors self + end + + class Alter_partitions_with_environment_context_req_result + include ::Thrift::Struct, ::Thrift::Struct_Union + SUCCESS = 0 + O1 = 1 + O2 = 2 + + FIELDS = { + SUCCESS => {:type => ::Thrift::Types::STRUCT, :name => 'success', :class => ::AlterPartitionsResponse}, + O1 => {:type => ::Thrift::Types::STRUCT, :name => 'o1', :class => ::InvalidOperationException}, + O2 => {:type => ::Thrift::Types::STRUCT, :name => 'o2', :class => ::MetaException} + } + + def struct_fields; FIELDS; end + + def validate + end + + ::Thrift::Struct.generate_accessors self + end + class Alter_partition_with_environment_context_args include ::Thrift::Struct, ::Thrift::Struct_Union DB_NAME = 1 diff --git standalone-metastore/src/main/java/org/apache/hadoop/hive/common/StatsSetupConst.java standalone-metastore/src/main/java/org/apache/hadoop/hive/common/StatsSetupConst.java index 78ea01d968..a13b40dcd4 100644 --- standalone-metastore/src/main/java/org/apache/hadoop/hive/common/StatsSetupConst.java +++ standalone-metastore/src/main/java/org/apache/hadoop/hive/common/StatsSetupConst.java @@ -149,6 +149,10 @@ public String getAggregator(Configuration conf) { public static final String CASCADE = "CASCADE"; + // TODO: when alter calls are switched to req/resp models, replace these and the above with fields. + public static final String TXN_ID = "WRITER_TXN_ID"; + public static final String VALID_WRITE_IDS = "WRITER_WRITE_ID"; + public static final String TRUE = "true"; public static final String FALSE = "false"; diff --git standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/AlterHandler.java standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/AlterHandler.java index 050dca9abf..e7cf07ff09 100644 --- standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/AlterHandler.java +++ standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/AlterHandler.java @@ -197,6 +197,7 @@ Partition alterPartition(final RawStore msdb, Warehouse wh, final String catName */ List alterPartitions(final RawStore msdb, Warehouse wh, final String catName, final String dbname, final String name, final List new_parts, - EnvironmentContext environmentContext,IHMSHandler handler) + EnvironmentContext environmentContext, long txnId, String writeIdList, long writeId, + IHMSHandler handler) throws InvalidOperationException, InvalidObjectException, AlreadyExistsException, MetaException; } \ No newline at end of file diff --git standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/HiveAlterHandler.java standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/HiveAlterHandler.java index 93ac74c68b..8b2a6babeb 100644 --- standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/HiveAlterHandler.java +++ standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/HiveAlterHandler.java @@ -141,7 +141,7 @@ public void alterTable(RawStore msdb, Warehouse wh, String catName, String dbnam // check if table with the new name already exists if (!newTblName.equals(name) || !newDbName.equals(dbname)) { - if (msdb.getTable(catName, newDbName, newTblName) != null) { + if (msdb.getTable(catName, newDbName, newTblName, -1, null) != null) { throw new InvalidOperationException("new table " + newDbName + "." + newTblName + " already exists"); } @@ -150,7 +150,8 @@ public void alterTable(RawStore msdb, Warehouse wh, String catName, String dbnam msdb.openTransaction(); // get old table - oldt = msdb.getTable(catName, dbname, name); + // Note: we don't verify stats here; it's done below in alterTableUpdateTableColumnStats. + oldt = msdb.getTable(catName, dbname, name, -1, null); if (oldt == null) { throw new InvalidOperationException("table " + TableName.getQualified(catName, dbname, name) + " doesn't exist"); @@ -275,7 +276,8 @@ public void alterTable(RawStore msdb, Warehouse wh, String catName, String dbnam columnStatsNeedUpdated.put(part, colStats); } } - msdb.alterTable(catName, dbname, name, newt); + // Do not verify stats parameters on a partitioned table. + msdb.alterTable(catName, dbname, name, newt, -1, null); // alterPartition is only for changing the partition location in the table rename if (dataWasMoved) { @@ -293,7 +295,8 @@ public void alterTable(RawStore msdb, Warehouse wh, String catName, String dbnam for (Partition part : partBatch) { partValues.add(part.getValues()); } - msdb.alterPartitions(catName, newDbName, newTblName, partValues, partBatch); + msdb.alterPartitions(catName, newDbName, newTblName, partValues, + partBatch, -1, -1, null); } } @@ -304,7 +307,7 @@ public void alterTable(RawStore msdb, Warehouse wh, String catName, String dbnam msdb.updatePartitionColumnStatistics(newPartColStats, partColStats.getKey().getValues()); } } else { - alterTableUpdateTableColumnStats(msdb, oldt, newt); + alterTableUpdateTableColumnStats(msdb, oldt, newt, environmentContext); } } else { // operations other than table rename @@ -327,21 +330,23 @@ public void alterTable(RawStore msdb, Warehouse wh, String catName, String dbnam ColumnStatistics colStats = updateOrGetPartitionColumnStats(msdb, catName, dbname, name, part.getValues(), oldCols, oldt, part, null); assert(colStats == null); + // Note: we don't do txn stats validation here; this can only delete stats? if (cascade) { - msdb.alterPartition(catName, dbname, name, part.getValues(), part); + msdb.alterPartition(catName, dbname, name, part.getValues(), part, -1, null); } else { // update changed properties (stats) oldPart.setParameters(part.getParameters()); - msdb.alterPartition(catName, dbname, name, part.getValues(), oldPart); + msdb.alterPartition(catName, dbname, name, part.getValues(), oldPart, -1, null); } } - msdb.alterTable(catName, dbname, name, newt); + // Don't validate table-level stats for a partitoned table. + msdb.alterTable(catName, dbname, name, newt, -1, null); } else { LOG.warn("Alter table not cascaded to partitions."); - alterTableUpdateTableColumnStats(msdb, oldt, newt); + alterTableUpdateTableColumnStats(msdb, oldt, newt, environmentContext); } } else { - alterTableUpdateTableColumnStats(msdb, oldt, newt); + alterTableUpdateTableColumnStats(msdb, oldt, newt, environmentContext); } } @@ -444,13 +449,20 @@ public Partition alterPartition(final RawStore msdb, Warehouse wh, final String new_part.putToParameters(hive_metastoreConstants.DDL_TIME, Long.toString(System .currentTimeMillis() / 1000)); } + long txnId = -1; + String validWriteIds = null; + if (environmentContext != null && environmentContext.isSetProperties() + && environmentContext.getProperties().containsKey(StatsSetupConst.VALID_WRITE_IDS)) { + txnId = Long.parseLong(environmentContext.getProperties().get(StatsSetupConst.TXN_ID)); + validWriteIds = environmentContext.getProperties().get(StatsSetupConst.VALID_WRITE_IDS); + } //alter partition if (part_vals == null || part_vals.size() == 0) { try { msdb.openTransaction(); - Table tbl = msdb.getTable(catName, dbname, name); + Table tbl = msdb.getTable(catName, dbname, name, -1, null); if (tbl == null) { throw new InvalidObjectException( "Unable to alter partition because table or database does not exist."); @@ -471,7 +483,8 @@ public Partition alterPartition(final RawStore msdb, Warehouse wh, final String updateOrGetPartitionColumnStats(msdb, catName, dbname, name, new_part.getValues(), oldPart.getSd().getCols(), tbl, new_part, null); } - msdb.alterPartition(catName, dbname, name, new_part.getValues(), new_part); + msdb.alterPartition( + catName, dbname, name, new_part.getValues(), new_part, txnId, validWriteIds); if (transactionalListeners != null && !transactionalListeners.isEmpty()) { MetaStoreListenerNotifier.notifyEvent(transactionalListeners, EventMessage.EventType.ALTER_PARTITION, @@ -506,7 +519,7 @@ public Partition alterPartition(final RawStore msdb, Warehouse wh, final String Database db; try { msdb.openTransaction(); - Table tbl = msdb.getTable(DEFAULT_CATALOG_NAME, dbname, name); + Table tbl = msdb.getTable(DEFAULT_CATALOG_NAME, dbname, name, -1, null); if (tbl == null) { throw new InvalidObjectException( "Unable to alter partition because table or database does not exist."); @@ -606,7 +619,7 @@ public Partition alterPartition(final RawStore msdb, Warehouse wh, final String String newPartName = Warehouse.makePartName(tbl.getPartitionKeys(), new_part.getValues()); ColumnStatistics cs = updateOrGetPartitionColumnStats(msdb, catName, dbname, name, oldPart.getValues(), oldPart.getSd().getCols(), tbl, new_part, null); - msdb.alterPartition(catName, dbname, name, part_vals, new_part); + msdb.alterPartition(catName, dbname, name, part_vals, new_part, txnId, validWriteIds); if (cs != null) { cs.getStatsDesc().setPartName(newPartName); try { @@ -649,20 +662,23 @@ public Partition alterPartition(final RawStore msdb, Warehouse wh, final String return oldPart; } + @Deprecated @Override public List alterPartitions(final RawStore msdb, Warehouse wh, final String dbname, final String name, final List new_parts, EnvironmentContext environmentContext) throws InvalidOperationException, InvalidObjectException, AlreadyExistsException, MetaException { return alterPartitions(msdb, wh, DEFAULT_CATALOG_NAME, dbname, name, new_parts, - environmentContext, null); + environmentContext, -1, null, -1, null); } @Override public List alterPartitions(final RawStore msdb, Warehouse wh, final String catName, final String dbname, final String name, final List new_parts, - EnvironmentContext environmentContext, IHMSHandler handler) + EnvironmentContext environmentContext, + long txnId, String writeIdList, long writeId, + IHMSHandler handler) throws InvalidOperationException, InvalidObjectException, AlreadyExistsException, MetaException { List oldParts = new ArrayList<>(); List> partValsList = new ArrayList<>(); @@ -675,7 +691,8 @@ public Partition alterPartition(final RawStore msdb, Warehouse wh, final String try { msdb.openTransaction(); - Table tbl = msdb.getTable(catName, dbname, name); + // Note: should we pass in write ID here? We only update stats on parts so probably not. + Table tbl = msdb.getTable(catName, dbname, name, -1, null); if (tbl == null) { throw new InvalidObjectException( "Unable to alter partitions because table or database does not exist."); @@ -710,7 +727,7 @@ public Partition alterPartition(final RawStore msdb, Warehouse wh, final String } } - msdb.alterPartitions(catName, dbname, name, partValsList, new_parts); + msdb.alterPartitions(catName, dbname, name, partValsList, new_parts, writeId, txnId, writeIdList); Iterator oldPartsIt = oldParts.iterator(); for (Partition newPart : new_parts) { Partition oldPart; @@ -778,7 +795,8 @@ private Path constructRenamedPath(Path defaultNewPath, Path currentPath) { } @VisibleForTesting - void alterTableUpdateTableColumnStats(RawStore msdb, Table oldTable, Table newTable) + void alterTableUpdateTableColumnStats(RawStore msdb, Table oldTable, Table newTable, + EnvironmentContext ec) throws MetaException, InvalidObjectException { String catName = normalizeIdentifier(oldTable.isSetCatName() ? oldTable.getCatName() : getDefaultCatalog(conf)); @@ -786,6 +804,13 @@ void alterTableUpdateTableColumnStats(RawStore msdb, Table oldTable, Table newTa String tableName = normalizeIdentifier(oldTable.getTableName()); String newDbName = newTable.getDbName().toLowerCase(); String newTableName = normalizeIdentifier(newTable.getTableName()); + long txnId = -1; + String validWriteIds = null; + if (ec != null && ec.isSetProperties() && ec.getProperties().containsKey( + StatsSetupConst.VALID_WRITE_IDS)) { + txnId = Long.parseLong(ec.getProperties().get(StatsSetupConst.TXN_ID)); + validWriteIds = ec.getProperties().get(StatsSetupConst.VALID_WRITE_IDS); + } try { List oldCols = oldTable.getSd().getCols(); @@ -807,7 +832,7 @@ void alterTableUpdateTableColumnStats(RawStore msdb, Table oldTable, Table newTa oldColNames.add(oldCol.getName()); } - // Collect column stats which need to be rewritten and remove old stats + // Collect column stats which need to be rewritten and remove old stats. colStats = msdb.getTableColumnStatistics(catName, dbName, tableName, oldColNames); if (colStats == null) { updateColumnStats = false; @@ -842,7 +867,7 @@ void alterTableUpdateTableColumnStats(RawStore msdb, Table oldTable, Table newTa } // Change to new table and append stats for the new table - msdb.alterTable(catName, dbName, tableName, newTable); + msdb.alterTable(catName, dbName, tableName, newTable, txnId, validWriteIds); if (updateColumnStats && !newStatsObjs.isEmpty()) { ColumnStatisticsDesc statsDesc = colStats.getStatsDesc(); statsDesc.setDbName(newDbName); @@ -882,6 +907,7 @@ private ColumnStatistics updateOrGetPartitionColumnStats( oldColNames.add(oldCol.getName()); } List oldPartNames = Lists.newArrayList(oldPartName); + // Note: doesn't take txn stats into account. This method can only remove stats. List partsColStats = msdb.getPartitionColumnStatistics(catName, dbname, tblname, oldPartNames, oldColNames); assert (partsColStats.size() <= 1); diff --git standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/HiveMetaStore.java standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/HiveMetaStore.java index c6c04b757a..746e6cd5de 100644 --- standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/HiveMetaStore.java +++ standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/HiveMetaStore.java @@ -708,6 +708,10 @@ public static RawStore getMSForConf(Configuration conf) throws MetaException { @Override public TxnStore getTxnHandler() { + return getMsThreadTxnHandler(conf); + } + + public static TxnStore getMsThreadTxnHandler(Configuration conf) { TxnStore txn = threadLocalTxn.get(); if (txn == null) { txn = TxnUtils.getTxnStore(conf); @@ -2431,7 +2435,7 @@ public void add_check_constraint(AddCheckConstraintRequest req) private boolean is_table_exists(RawStore ms, String catName, String dbname, String name) throws MetaException { - return (ms.getTable(catName, dbname, name) != null); + return (ms.getTable(catName, dbname, name, -1, null) != null); } private boolean drop_table_core(final RawStore ms, final String catName, final String dbname, @@ -2844,7 +2848,8 @@ private boolean isExternalTablePurge(Table table) { public Table get_table(final String dbname, final String name) throws MetaException, NoSuchObjectException { String[] parsedDbName = parseDbName(dbname, conf); - return getTableInternal(parsedDbName[CAT_NAME], parsedDbName[DB_NAME], name, null); + return getTableInternal( + parsedDbName[CAT_NAME], parsedDbName[DB_NAME], name, null, -1, null); } @Override @@ -2852,11 +2857,12 @@ public GetTableResult get_table_req(GetTableRequest req) throws MetaException, NoSuchObjectException { String catName = req.isSetCatName() ? req.getCatName() : getDefaultCatalog(conf); return new GetTableResult(getTableInternal(catName, req.getDbName(), req.getTblName(), - req.getCapabilities())); + req.getCapabilities(), req.getTxnId(), req.getValidWriteIdList())); } private Table getTableInternal(String catName, String dbname, String name, - ClientCapabilities capabilities) throws MetaException, NoSuchObjectException { + ClientCapabilities capabilities, long txnId, String writeIdList) + throws MetaException, NoSuchObjectException { if (isInTest) { assertClientHasCapability(capabilities, ClientCapability.TEST_CAPABILITY, "Hive tests", "get_table_req"); @@ -2866,7 +2872,7 @@ private Table getTableInternal(String catName, String dbname, String name, startTableFunction("get_table", catName, dbname, name); Exception ex = null; try { - t = get_table_core(catName, dbname, name); + t = get_table_core(catName, dbname, name, txnId, writeIdList); if (MetaStoreUtils.isInsertOnlyTableParam(t.getParameters())) { assertClientHasCapability(capabilities, ClientCapability.INSERT_ONLY_TABLES, "insert-only tables", "get_table_req"); @@ -2901,11 +2907,25 @@ private Table getTableInternal(String catName, String dbname, String name, } @Override - public Table get_table_core(final String catName, final String dbname, final String name) + public Table get_table_core( + final String catName, + final String dbname, + final String name) + throws MetaException, NoSuchObjectException { + return get_table_core(catName, dbname, name, -1, null); + } + + @Override + public Table get_table_core( + final String catName, + final String dbname, + final String name, + final long txnId, + final String writeIdList) throws MetaException, NoSuchObjectException { Table t = null; try { - t = getMS().getTable(catName, dbname, name); + t = getMS().getTable(catName, dbname, name, txnId, writeIdList); if (t == null) { throw new NoSuchObjectException(TableName.getQualified(catName, dbname, name) + " table not found"); @@ -3087,7 +3107,7 @@ private Partition append_partition_common(RawStore ms, String catName, String db MetaStoreUtils.validatePartitionNameCharacters(part_vals, partitionValidationPattern); - tbl = ms.getTable(part.getCatName(), part.getDbName(), part.getTableName()); + tbl = ms.getTable(part.getCatName(), part.getDbName(), part.getTableName(), -1, null); if (tbl == null) { throw new InvalidObjectException( "Unable to add partition because table or database do not exist"); @@ -3281,7 +3301,7 @@ public boolean equals(Object obj) { try { ms.openTransaction(); - tbl = ms.getTable(catName, dbName, tblName); + tbl = ms.getTable(catName, dbName, tblName, -1, null); if (tbl == null) { throw new InvalidObjectException("Unable to add partitions because " + TableName.getQualified(catName, dbName, tblName) + @@ -3637,7 +3657,7 @@ private int add_partitions_pspec_core(RawStore ms, String catName, String dbName Database db = null; try { ms.openTransaction(); - tbl = ms.getTable(catName, dbName, tblName); + tbl = ms.getTable(catName, dbName, tblName, -1, null); if (tbl == null) { throw new InvalidObjectException("Unable to add partitions because " + "database or table " + dbName + "." + tblName + " does not exist"); @@ -3807,7 +3827,7 @@ private Partition add_partition_core(final RawStore ms, } try { ms.openTransaction(); - tbl = ms.getTable(part.getCatName(), part.getDbName(), part.getTableName()); + tbl = ms.getTable(part.getCatName(), part.getDbName(), part.getTableName(), -1, null); if (tbl == null) { throw new InvalidObjectException( "Unable to add partition because table or database do not exist"); @@ -3928,14 +3948,16 @@ public Partition exchange_partition(Map partitionSpecs, ms.openTransaction(); Table destinationTable = - ms.getTable(parsedDestDbName[CAT_NAME], parsedDestDbName[DB_NAME], destTableName); + ms.getTable( + parsedDestDbName[CAT_NAME], parsedDestDbName[DB_NAME], destTableName, -1, null); if (destinationTable == null) { throw new MetaException( "The destination table " + TableName.getQualified(parsedDestDbName[CAT_NAME], parsedDestDbName[DB_NAME], destTableName) + " not found"); } Table sourceTable = - ms.getTable(parsedSourceDbName[CAT_NAME], parsedSourceDbName[DB_NAME], sourceTableName); + ms.getTable( + parsedSourceDbName[CAT_NAME], parsedSourceDbName[DB_NAME], sourceTableName, -1, null); if (sourceTable == null) { throw new MetaException("The source table " + TableName.getQualified(parsedSourceDbName[CAT_NAME], @@ -4112,7 +4134,7 @@ private boolean drop_partition_common(RawStore ms, String catName, String db_nam try { ms.openTransaction(); part = ms.getPartition(catName, db_name, tbl_name, part_vals); - tbl = get_table_core(catName, db_name, tbl_name); + tbl = get_table_core(catName, db_name, tbl_name, -1, null); tableDataShouldBeDeleted = checkTableDataShouldBeDeleted(tbl, deleteData); firePreEvent(new PreDropPartitionEvent(tbl, part, deleteData, this)); mustPurge = isMustPurge(envContext, tbl); @@ -4840,7 +4862,7 @@ private void rename_partition(final String catName, final String db_name, final Table table = null; if (!listeners.isEmpty()) { if (table == null) { - table = getMS().getTable(catName, db_name, tbl_name); + table = getMS().getTable(catName, db_name, tbl_name, -1, null); } MetaStoreListenerNotifier.notifyEvent(listeners, @@ -4869,12 +4891,35 @@ private void rename_partition(final String catName, final String db_name, final public void alter_partitions(final String db_name, final String tbl_name, final List new_parts) throws TException { - alter_partitions_with_environment_context(db_name, tbl_name, new_parts, null); + alter_partitions_with_environment_context( + db_name, tbl_name, new_parts, null, -1, null, -1); + } + + @Override + public AlterPartitionsResponse alter_partitions_with_environment_context_req( + AlterPartitionsRequest req) + throws TException { + alter_partitions_with_environment_context( + req.getDbName(), req.getTableName(), req.getPartitions(), req.getEnvironmentContext(), + req.isSetTxnId() ? req.getTxnId() : -1, + req.isSetValidWriteIdList() ? req.getValidWriteIdList() : null, + req.isSetWriteId() ? req.getWriteId() : -1); + return new AlterPartitionsResponse(); } + // The old API we are keeping for backward compat. Not used within Hive. + @Deprecated @Override public void alter_partitions_with_environment_context(final String db_name, final String tbl_name, final List new_parts, EnvironmentContext environmentContext) + throws TException { + alter_partitions_with_environment_context(db_name, tbl_name, new_parts, environmentContext, + -1, null, -1); + } + + private void alter_partitions_with_environment_context(final String db_name, final String tbl_name, + final List new_parts, EnvironmentContext environmentContext, + long txnId, String writeIdList, long writeId) throws TException { String[] parsedDbName = parseDbName(db_name, conf); @@ -4898,7 +4943,7 @@ public void alter_partitions_with_environment_context(final String db_name, fina firePreEvent(new PreAlterPartitionEvent(parsedDbName[DB_NAME], tbl_name, null, tmpPart, this)); } oldParts = alterHandler.alterPartitions(getMS(), wh, parsedDbName[CAT_NAME], - parsedDbName[DB_NAME], tbl_name, new_parts, environmentContext, this); + parsedDbName[DB_NAME], tbl_name, new_parts, environmentContext, txnId, writeIdList, writeId, this); Iterator olditr = oldParts.iterator(); // Only fetch the table if we have a listener that needs it. Table table = null; @@ -4912,7 +4957,8 @@ public void alter_partitions_with_environment_context(final String db_name, fina } if (table == null) { - table = getMS().getTable(parsedDbName[CAT_NAME], parsedDbName[DB_NAME], tbl_name); + table = getMS().getTable( + parsedDbName[CAT_NAME], parsedDbName[DB_NAME], tbl_name, -1, null); } if (!listeners.isEmpty()) { @@ -5332,7 +5378,7 @@ public String get_config_value(String name, String defaultValue) private List getPartValsFromName(RawStore ms, String catName, String dbName, String tblName, String partName) throws MetaException, InvalidObjectException { - Table t = ms.getTable(catName, dbName, tblName); + Table t = ms.getTable(catName, dbName, tblName, -1, null); if (t == null) { throw new InvalidObjectException(dbName + "." + tblName + " table not found"); @@ -5587,7 +5633,8 @@ public ColumnStatistics get_table_column_statistics(String dbName, String tableN ColumnStatistics statsObj = null; try { statsObj = getMS().getTableColumnStatistics( - parsedDbName[CAT_NAME], parsedDbName[DB_NAME], tableName, Lists.newArrayList(colName)); + parsedDbName[CAT_NAME], parsedDbName[DB_NAME], tableName, Lists.newArrayList(colName), + -1, null); if (statsObj != null) { assert statsObj.getStatsObjSize() <= 1; } @@ -5611,7 +5658,9 @@ public TableStatsResult get_table_statistics_req(TableStatsRequest request) thro lowerCaseColNames.add(colName.toLowerCase()); } try { - ColumnStatistics cs = getMS().getTableColumnStatistics(catName, dbName, tblName, lowerCaseColNames); + ColumnStatistics cs = getMS().getTableColumnStatistics( + catName, dbName, tblName, lowerCaseColNames, + request.getTxnId(), request.getValidWriteIdList()); result = new TableStatsResult((cs == null || cs.getStatsObj() == null) ? Lists.newArrayList() : cs.getStatsObj()); } finally { @@ -5620,9 +5669,12 @@ public TableStatsResult get_table_statistics_req(TableStatsRequest request) thro return result; } + @Deprecated @Override public ColumnStatistics get_partition_column_statistics(String dbName, String tableName, String partName, String colName) throws TException { + // Note: this method appears to be unused within Hive. + // It doesn't take txn stats into account. dbName = dbName.toLowerCase(); String[] parsedDbName = parseDbName(dbName, conf); tableName = tableName.toLowerCase(); @@ -5670,7 +5722,9 @@ public PartitionsStatsResult get_partitions_statistics_req(PartitionsStatsReques } try { List stats = getMS().getPartitionColumnStatistics( - catName, dbName, tblName, lowerCasePartNames, lowerCaseColNames); + catName, dbName, tblName, lowerCasePartNames, lowerCaseColNames, + request.isSetTxnId() ? request.getTxnId() : -1, + request.isSetValidWriteIdList() ? request.getValidWriteIdList() : null); Map> map = new HashMap<>(); for (ColumnStatistics stat : stats) { map.put(stat.getStatsDesc().getPartName(), stat.getStatsObj()); @@ -7403,8 +7457,9 @@ public AggrStats get_aggr_stats_for(PartitionsStatsRequest request) throws TExce AggrStats aggrStats = null; try { - aggrStats = new AggrStats(getMS().get_aggr_stats_for(catName, dbName, tblName, - lowerCasePartNames, lowerCaseColNames)); + aggrStats = getMS().get_aggr_stats_for(catName, dbName, tblName, + lowerCasePartNames, lowerCaseColNames, request.getTxnId(), + request.getValidWriteIdList()); return aggrStats; } finally { endFunction("get_aggr_stats_for", aggrStats == null, null, request.getTblName()); @@ -7438,7 +7493,10 @@ public boolean set_aggr_stats_for(SetPartitionsStatsRequest request) throws TExc } else { if (request.isSetNeedMerge() && request.isNeedMerge()) { // one single call to get all column stats - ColumnStatistics csOld = getMS().getTableColumnStatistics(catName, dbName, tableName, colNames); + ColumnStatistics csOld = + getMS().getTableColumnStatistics( + catName, dbName, tableName, colNames, + request.getTxnId(), request.getValidWriteIdList()); Table t = getTable(catName, dbName, tableName); // we first use t.getParameters() to prune the stats MetaStoreUtils.getMergableCols(firstColStats, t.getParameters()); @@ -7478,8 +7536,10 @@ public boolean set_aggr_stats_for(SetPartitionsStatsRequest request) throws TExc // a single call to get all column stats for all partitions List partitionNames = new ArrayList<>(); partitionNames.addAll(newStatsMap.keySet()); - List csOlds = getMS().getPartitionColumnStatistics(catName, dbName, - tableName, partitionNames, colNames); + List csOlds = + getMS().getPartitionColumnStatistics( + catName, dbName, tableName, partitionNames, colNames, + request.getTxnId(), request.getValidWriteIdList()); if (newStatsMap.values().size() != csOlds.size()) { // some of the partitions miss stats. LOG.debug("Some of the partitions miss stats."); @@ -7487,13 +7547,15 @@ public boolean set_aggr_stats_for(SetPartitionsStatsRequest request) throws TExc for (ColumnStatistics csOld : csOlds) { oldStatsMap.put(csOld.getStatsDesc().getPartName(), csOld); } + // another single call to get all the partition objects partitions = getMS().getPartitionsByNames(catName, dbName, tableName, partitionNames); for (int index = 0; index < partitionNames.size(); index++) { mapToPart.put(partitionNames.get(index), partitions.get(index)); } } - Table t = getTable(catName, dbName, tableName); + Table t = getTable(catName, dbName, tableName, + request.getTxnId(), request.getValidWriteIdList()); for (Entry entry : newStatsMap.entrySet()) { ColumnStatistics csNew = entry.getValue(); ColumnStatistics csOld = oldStatsMap.get(entry.getKey()); @@ -7520,7 +7582,13 @@ public boolean set_aggr_stats_for(SetPartitionsStatsRequest request) throws TExc private Table getTable(String catName, String dbName, String tableName) throws MetaException, InvalidObjectException { - Table t = getMS().getTable(catName, dbName, tableName); + return getTable(catName, dbName, tableName, -1, null); + } + + private Table getTable(String catName, String dbName, String tableName, + long txnId, String writeIdList) + throws MetaException, InvalidObjectException { + Table t = getMS().getTable(catName, dbName, tableName, txnId, writeIdList); if (t == null) { throw new InvalidObjectException(TableName.getQualified(catName, dbName, tableName) + " table not found"); diff --git standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/HiveMetaStoreClient.java standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/HiveMetaStoreClient.java index bfd7141a8b..cc417eab3b 100644 --- standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/HiveMetaStoreClient.java +++ standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/HiveMetaStoreClient.java @@ -791,6 +791,50 @@ public Partition exchange_partition(Map partitionSpecs, String s } @Override + public Map> getPartitionColumnStatistics( + String dbName, String tableName, List partNames, List colNames, + long txnId, String validWriteIdList) + throws NoSuchObjectException, MetaException, TException { + return getPartitionColumnStatistics(getDefaultCatalog(conf), dbName, tableName, + partNames, colNames, txnId, validWriteIdList); + } + + @Override + public Map> getPartitionColumnStatistics( + String catName, String dbName, String tableName, List partNames, + List colNames, long txnId, String validWriteIdList) + throws NoSuchObjectException, MetaException, TException { + PartitionsStatsRequest rqst = new PartitionsStatsRequest(dbName, tableName, colNames, + partNames); + rqst.setCatName(catName); + rqst.setTxnId(txnId); + rqst.setValidWriteIdList(validWriteIdList); + return client.get_partitions_statistics_req(rqst).getPartStats(); + } + + @Override + public AggrStats getAggrColStatsFor(String dbName, String tblName, List colNames, + List partNames, long txnId, String writeIdList) + throws NoSuchObjectException, MetaException, TException { + return getAggrColStatsFor(getDefaultCatalog(conf), dbName, tblName, colNames, + partNames, txnId, writeIdList); } + + @Override + public AggrStats getAggrColStatsFor(String catName, String dbName, String tblName, List colNames, + List partNames, long txnId, String writeIdList) + throws NoSuchObjectException, MetaException, TException { + if (colNames.isEmpty() || partNames.isEmpty()) { + LOG.debug("Columns is empty or partNames is empty : Short-circuiting stats eval on client side."); + return new AggrStats(new ArrayList<>(),0); // Nothing to aggregate + } + PartitionsStatsRequest req = new PartitionsStatsRequest(dbName, tblName, colNames, partNames); + req.setCatName(catName); + req.setTxnId(txnId); + req.setValidWriteIdList(writeIdList); + return client.get_aggr_stats_for(req); + } + + @Override public List exchange_partitions(Map partitionSpecs, String sourceCat, String sourceDb, String sourceTable, String destCat, String destDb, String destTableName) throws TException { @@ -1584,6 +1628,14 @@ public Table getTable(String dbname, String name) throws TException { } @Override + public Table getTable(String dbname, String name, + long txnId, String validWriteIdList) + throws MetaException, TException, NoSuchObjectException{ + return getTable(getDefaultCatalog(conf), dbname, name, + txnId, validWriteIdList); + }; + + @Override public Table getTable(String catName, String dbName, String tableName) throws TException { GetTableRequest req = new GetTableRequest(dbName, tableName); req.setCatName(catName); @@ -1593,6 +1645,18 @@ public Table getTable(String catName, String dbName, String tableName) throws TE } @Override + public Table getTable(String catName, String dbName, String tableName, + long txnId, String validWriteIdList) throws TException { + GetTableRequest req = new GetTableRequest(dbName, tableName); + req.setCatName(catName); + req.setCapabilities(version); + req.setTxnId(txnId); + req.setValidWriteIdList(validWriteIdList); + Table t = client.get_table_req(req).getTable(); + return deepCopy(filterHook.filterTable(t)); + } + + @Override public List
getTableObjectsByName(String dbName, List tableNames) throws TException { return getTableObjectsByName(getDefaultCatalog(conf), dbName, tableNames); @@ -1821,21 +1885,43 @@ public void alter_partition(String catName, String dbName, String tblName, Parti @Override public void alter_partitions(String dbName, String tblName, List newParts) throws TException { - alter_partitions(getDefaultCatalog(conf), dbName, tblName, newParts, null); + alter_partitions( + getDefaultCatalog(conf), dbName, tblName, newParts, new EnvironmentContext(), -1, null, -1); } @Override public void alter_partitions(String dbName, String tblName, List newParts, EnvironmentContext environmentContext) throws TException { - alter_partitions(getDefaultCatalog(conf), dbName, tblName, newParts, environmentContext); + alter_partitions( + getDefaultCatalog(conf), dbName, tblName, newParts, environmentContext, -1, null, -1); + } + + @Override + public void alter_partitions(String dbName, String tblName, List newParts, + EnvironmentContext environmentContext, + long txnId, String writeIdList, long writeId) + throws InvalidOperationException, MetaException, TException { + //client.alter_partition_with_environment_context(getDefaultCatalog(conf), + // dbName, tblName, newParts, environmentContext); + alter_partitions(getDefaultCatalog(conf), + dbName, tblName, newParts, environmentContext, txnId, writeIdList, writeId); + } @Override public void alter_partitions(String catName, String dbName, String tblName, List newParts, - EnvironmentContext environmentContext) throws TException { - client.alter_partitions_with_environment_context(prependCatalogToDbName(catName, dbName, conf), - tblName, newParts, environmentContext); + EnvironmentContext environmentContext, + long txnId, String writeIdList, long writeId) throws TException { + AlterPartitionsRequest req = new AlterPartitionsRequest(); + req.setDbName(prependCatalogToDbName(catName, dbName, conf)); + req.setTableName(tblName); + req.setPartitions(newParts); + req.setEnvironmentContext(environmentContext); + req.setTxnId(txnId); + req.setValidWriteIdList(writeIdList); + req.setWriteId(writeId); + client.alter_partitions_with_environment_context_req(req); } @Override @@ -1967,6 +2053,28 @@ public void flushCache() { } @Override + public List getTableColumnStatistics(String dbName, String tableName, + List colNames, + long txnId, + String validWriteIdList) throws TException { + return getTableColumnStatistics(getDefaultCatalog(conf), dbName, tableName, colNames, + txnId, validWriteIdList); + } + + @Override + public List getTableColumnStatistics(String catName, String dbName, + String tableName, + List colNames, + long txnId, + String validWriteIdList) throws TException { + TableStatsRequest rqst = new TableStatsRequest(dbName, tableName, colNames); + rqst.setCatName(catName); + rqst.setTxnId(txnId); + rqst.setValidWriteIdList(validWriteIdList); + return client.get_table_statistics_req(rqst).getTableStats(); + } + + @Override public Map> getPartitionColumnStatistics( String dbName, String tableName, List partNames, List colNames) throws TException { @@ -3323,4 +3431,5 @@ public void addRuntimeStat(RuntimeStat stat) throws TException { req.setMaxCreateTime(maxCreateTime); return client.get_runtime_stats(req); } + } diff --git standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/IHMSHandler.java standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/IHMSHandler.java index 29c98d1fa7..3a65f77be4 100644 --- standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/IHMSHandler.java +++ standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/IHMSHandler.java @@ -90,6 +90,11 @@ Database get_database_core(final String catName, final String name) Table get_table_core(final String catName, final String dbname, final String name) throws MetaException, NoSuchObjectException; + Table get_table_core(final String catName, final String dbname, + final String name, final long txnId, + final String writeIdList) + throws MetaException, NoSuchObjectException; + /** * Get a list of all transactional listeners. * @return list of listeners. diff --git standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/IMetaStoreClient.java standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/IMetaStoreClient.java index b5d147bcf7..27d96e5f07 100644 --- standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/IMetaStoreClient.java +++ standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/IMetaStoreClient.java @@ -714,6 +714,10 @@ Database getDatabase(String catalogName, String databaseName) Table getTable(String dbName, String tableName) throws MetaException, TException, NoSuchObjectException; + Table getTable(String dbName, String tableName, + long txnId, String validWriteIdList) + throws MetaException, TException, NoSuchObjectException; + /** * Get a table object. * @param catName catalog the table is in. @@ -725,6 +729,8 @@ Table getTable(String dbName, String tableName) throws MetaException, */ Table getTable(String catName, String dbName, String tableName) throws MetaException, TException; + Table getTable(String catName, String dbName, String tableName, + long txnId, String validWriteIdList) throws TException; /** * Get tables as objects (rather than just fetching their names). This is more expensive and * should only be used if you actually need all the information about the tables. @@ -2127,6 +2133,11 @@ void alter_partitions(String dbName, String tblName, List newParts, EnvironmentContext environmentContext) throws InvalidOperationException, MetaException, TException; + void alter_partitions(String dbName, String tblName, List newParts, + EnvironmentContext environmentContext, + long txnId, String writeIdList, long writeId) + throws InvalidOperationException, MetaException, TException; + /** * updates a list of partitions * @param catName catalog name. @@ -2146,7 +2157,7 @@ void alter_partitions(String dbName, String tblName, List newParts, default void alter_partitions(String catName, String dbName, String tblName, List newParts) throws InvalidOperationException, MetaException, TException { - alter_partitions(catName, dbName, tblName, newParts, null); + alter_partitions(catName, dbName, tblName, newParts, new EnvironmentContext(), -1, null, -1); } /** @@ -2167,7 +2178,8 @@ default void alter_partitions(String catName, String dbName, String tblName, * if error in communicating with metastore server */ void alter_partitions(String catName, String dbName, String tblName, List newParts, - EnvironmentContext environmentContext) + EnvironmentContext environmentContext, + long txnId, String writeIdList, long writeId) throws InvalidOperationException, MetaException, TException; /** @@ -2348,6 +2360,12 @@ boolean updatePartitionColumnStatistics(ColumnStatistics statsObj) List getTableColumnStatistics(String dbName, String tableName, List colNames) throws NoSuchObjectException, MetaException, TException; + List getTableColumnStatistics(String dbName, String tableName, + List colNames, + long txnId, + String validWriteIdList) + throws NoSuchObjectException, MetaException, TException; + /** * Get the column statistics for a set of columns in a table. This should only be used for * non-partitioned tables. For partitioned tables use @@ -2365,6 +2383,11 @@ boolean updatePartitionColumnStatistics(ColumnStatistics statsObj) List colNames) throws NoSuchObjectException, MetaException, TException; + List getTableColumnStatistics(String catName, String dbName, String tableName, + List colNames, + long txnId, + String validWriteIdList) + throws NoSuchObjectException, MetaException, TException; /** * Get the column statistics for a set of columns in a partition. * @param dbName database name @@ -2381,6 +2404,11 @@ boolean updatePartitionColumnStatistics(ColumnStatistics statsObj) String tableName, List partNames, List colNames) throws NoSuchObjectException, MetaException, TException; + Map> getPartitionColumnStatistics(String dbName, + String tableName, List partNames, List colNames, + long txnId, String validWriteIdList) + throws NoSuchObjectException, MetaException, TException; + /** * Get the column statistics for a set of columns in a partition. * @param catName catalog name @@ -2398,6 +2426,11 @@ boolean updatePartitionColumnStatistics(ColumnStatistics statsObj) String catName, String dbName, String tableName, List partNames, List colNames) throws NoSuchObjectException, MetaException, TException; + Map> getPartitionColumnStatistics( + String catName, String dbName, String tableName, + List partNames, List colNames, + long txnId, String validWriteIdList) + throws NoSuchObjectException, MetaException, TException; /** * Delete partition level column statistics given dbName, tableName, partName and colName, or * all columns in a partition. @@ -3247,6 +3280,10 @@ GetRoleGrantsForPrincipalResponse get_role_grants_for_principal( AggrStats getAggrColStatsFor(String dbName, String tblName, List colNames, List partName) throws NoSuchObjectException, MetaException, TException; + AggrStats getAggrColStatsFor(String dbName, String tblName, + List colNames, List partName, + long txnId, String writeIdList) throws NoSuchObjectException, MetaException, TException; + /** * Get aggregated column stats for a set of partitions. * @param catName catalog name @@ -3263,6 +3300,10 @@ AggrStats getAggrColStatsFor(String catName, String dbName, String tblName, List colNames, List partNames) throws NoSuchObjectException, MetaException, TException; + AggrStats getAggrColStatsFor(String catName, String dbName, String tblName, + List colNames, List partNames, + long txnId, String writeIdList) + throws NoSuchObjectException, MetaException, TException; /** * Set table or partition column statistics. * @param request request object, contains all the table, partition, and statistics information diff --git standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/ObjectStore.java standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/ObjectStore.java index 8721022282..aa29dd9113 100644 --- standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/ObjectStore.java +++ standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/ObjectStore.java @@ -80,142 +80,29 @@ import org.apache.hadoop.conf.Configurable; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.Path; -import org.apache.hadoop.hive.common.DatabaseName; -import org.apache.hadoop.hive.common.StatsSetupConst; -import org.apache.hadoop.hive.common.TableName; +import org.apache.hadoop.hive.common.*; import org.apache.hadoop.hive.metastore.MetaStoreDirectSql.SqlFilterForPushdown; -import org.apache.hadoop.hive.metastore.api.AggrStats; -import org.apache.hadoop.hive.metastore.api.AlreadyExistsException; -import org.apache.hadoop.hive.metastore.api.Catalog; -import org.apache.hadoop.hive.metastore.api.ColumnStatistics; -import org.apache.hadoop.hive.metastore.api.ColumnStatisticsDesc; -import org.apache.hadoop.hive.metastore.api.ColumnStatisticsObj; -import org.apache.hadoop.hive.metastore.api.CreationMetadata; -import org.apache.hadoop.hive.metastore.api.CurrentNotificationEventId; -import org.apache.hadoop.hive.metastore.api.Database; -import org.apache.hadoop.hive.metastore.api.FieldSchema; -import org.apache.hadoop.hive.metastore.api.FileMetadataExprType; -import org.apache.hadoop.hive.metastore.api.Function; -import org.apache.hadoop.hive.metastore.api.FunctionType; -import org.apache.hadoop.hive.metastore.api.HiveObjectPrivilege; -import org.apache.hadoop.hive.metastore.api.HiveObjectRef; -import org.apache.hadoop.hive.metastore.api.HiveObjectType; -import org.apache.hadoop.hive.metastore.api.ISchema; -import org.apache.hadoop.hive.metastore.api.ISchemaName; -import org.apache.hadoop.hive.metastore.api.InvalidInputException; -import org.apache.hadoop.hive.metastore.api.InvalidObjectException; -import org.apache.hadoop.hive.metastore.api.InvalidOperationException; -import org.apache.hadoop.hive.metastore.api.InvalidPartitionException; -import org.apache.hadoop.hive.metastore.api.MetaException; -import org.apache.hadoop.hive.metastore.api.NoSuchObjectException; -import org.apache.hadoop.hive.metastore.api.NotificationEvent; -import org.apache.hadoop.hive.metastore.api.NotificationEventRequest; -import org.apache.hadoop.hive.metastore.api.NotificationEventResponse; -import org.apache.hadoop.hive.metastore.api.NotificationEventsCountRequest; -import org.apache.hadoop.hive.metastore.api.NotificationEventsCountResponse; -import org.apache.hadoop.hive.metastore.api.Order; -import org.apache.hadoop.hive.metastore.api.Partition; -import org.apache.hadoop.hive.metastore.api.PartitionEventType; -import org.apache.hadoop.hive.metastore.api.PartitionValuesResponse; -import org.apache.hadoop.hive.metastore.api.PartitionValuesRow; -import org.apache.hadoop.hive.metastore.api.PrincipalPrivilegeSet; -import org.apache.hadoop.hive.metastore.api.PrincipalType; -import org.apache.hadoop.hive.metastore.api.PrivilegeBag; -import org.apache.hadoop.hive.metastore.api.PrivilegeGrantInfo; -import org.apache.hadoop.hive.metastore.api.ResourceType; -import org.apache.hadoop.hive.metastore.api.ResourceUri; -import org.apache.hadoop.hive.metastore.api.Role; -import org.apache.hadoop.hive.metastore.api.RolePrincipalGrant; -import org.apache.hadoop.hive.metastore.api.RuntimeStat; -import org.apache.hadoop.hive.metastore.api.SQLCheckConstraint; -import org.apache.hadoop.hive.metastore.api.SQLDefaultConstraint; -import org.apache.hadoop.hive.metastore.api.SQLForeignKey; -import org.apache.hadoop.hive.metastore.api.SQLNotNullConstraint; -import org.apache.hadoop.hive.metastore.api.SQLPrimaryKey; -import org.apache.hadoop.hive.metastore.api.SQLUniqueConstraint; -import org.apache.hadoop.hive.metastore.api.SchemaCompatibility; -import org.apache.hadoop.hive.metastore.api.SchemaType; -import org.apache.hadoop.hive.metastore.api.SchemaValidation; -import org.apache.hadoop.hive.metastore.api.SchemaVersion; -import org.apache.hadoop.hive.metastore.api.SchemaVersionDescriptor; -import org.apache.hadoop.hive.metastore.api.SchemaVersionState; -import org.apache.hadoop.hive.metastore.api.SerDeInfo; -import org.apache.hadoop.hive.metastore.api.SerdeType; -import org.apache.hadoop.hive.metastore.api.SkewedInfo; -import org.apache.hadoop.hive.metastore.api.StorageDescriptor; -import org.apache.hadoop.hive.metastore.api.Table; -import org.apache.hadoop.hive.metastore.api.TableMeta; -import org.apache.hadoop.hive.metastore.api.Type; -import org.apache.hadoop.hive.metastore.api.UnknownDBException; -import org.apache.hadoop.hive.metastore.api.UnknownPartitionException; -import org.apache.hadoop.hive.metastore.api.UnknownTableException; -import org.apache.hadoop.hive.metastore.api.WMFullResourcePlan; -import org.apache.hadoop.hive.metastore.api.WMMapping; -import org.apache.hadoop.hive.metastore.api.WMNullablePool; -import org.apache.hadoop.hive.metastore.api.WMNullableResourcePlan; -import org.apache.hadoop.hive.metastore.api.WMPool; -import org.apache.hadoop.hive.metastore.api.WMPoolTrigger; -import org.apache.hadoop.hive.metastore.api.WMResourcePlan; -import org.apache.hadoop.hive.metastore.api.WMResourcePlanStatus; -import org.apache.hadoop.hive.metastore.api.WMTrigger; -import org.apache.hadoop.hive.metastore.api.WMValidateResourcePlanResponse; -import org.apache.hadoop.hive.metastore.api.WriteEventInfo; +import org.apache.hadoop.hive.metastore.api.*; import org.apache.hadoop.hive.metastore.conf.MetastoreConf; import org.apache.hadoop.hive.metastore.conf.MetastoreConf.ConfVars; import org.apache.hadoop.hive.metastore.datasource.DataSourceProvider; import org.apache.hadoop.hive.metastore.datasource.DataSourceProviderFactory; import org.apache.hadoop.hive.metastore.metrics.Metrics; import org.apache.hadoop.hive.metastore.metrics.MetricsConstants; -import org.apache.hadoop.hive.metastore.model.MCatalog; -import org.apache.hadoop.hive.metastore.model.MColumnDescriptor; -import org.apache.hadoop.hive.metastore.model.MConstraint; -import org.apache.hadoop.hive.metastore.model.MCreationMetadata; -import org.apache.hadoop.hive.metastore.model.MDBPrivilege; -import org.apache.hadoop.hive.metastore.model.MDatabase; -import org.apache.hadoop.hive.metastore.model.MDelegationToken; -import org.apache.hadoop.hive.metastore.model.MFieldSchema; -import org.apache.hadoop.hive.metastore.model.MFunction; -import org.apache.hadoop.hive.metastore.model.MGlobalPrivilege; -import org.apache.hadoop.hive.metastore.model.MISchema; -import org.apache.hadoop.hive.metastore.model.MMasterKey; -import org.apache.hadoop.hive.metastore.model.MMetastoreDBProperties; -import org.apache.hadoop.hive.metastore.model.MNotificationLog; -import org.apache.hadoop.hive.metastore.model.MNotificationNextId; -import org.apache.hadoop.hive.metastore.model.MOrder; -import org.apache.hadoop.hive.metastore.model.MPartition; -import org.apache.hadoop.hive.metastore.model.MPartitionColumnPrivilege; -import org.apache.hadoop.hive.metastore.model.MPartitionColumnStatistics; -import org.apache.hadoop.hive.metastore.model.MPartitionEvent; -import org.apache.hadoop.hive.metastore.model.MPartitionPrivilege; -import org.apache.hadoop.hive.metastore.model.MResourceUri; -import org.apache.hadoop.hive.metastore.model.MRole; -import org.apache.hadoop.hive.metastore.model.MRoleMap; -import org.apache.hadoop.hive.metastore.model.MRuntimeStat; -import org.apache.hadoop.hive.metastore.model.MSchemaVersion; -import org.apache.hadoop.hive.metastore.model.MSerDeInfo; -import org.apache.hadoop.hive.metastore.model.MStorageDescriptor; -import org.apache.hadoop.hive.metastore.model.MStringList; -import org.apache.hadoop.hive.metastore.model.MTable; -import org.apache.hadoop.hive.metastore.model.MTableColumnPrivilege; -import org.apache.hadoop.hive.metastore.model.MTableColumnStatistics; -import org.apache.hadoop.hive.metastore.model.MTablePrivilege; -import org.apache.hadoop.hive.metastore.model.MType; -import org.apache.hadoop.hive.metastore.model.MVersionTable; -import org.apache.hadoop.hive.metastore.model.MWMMapping; +import org.apache.hadoop.hive.metastore.model.*; import org.apache.hadoop.hive.metastore.model.MWMMapping.EntityType; -import org.apache.hadoop.hive.metastore.model.MWMPool; -import org.apache.hadoop.hive.metastore.model.MWMResourcePlan; import org.apache.hadoop.hive.metastore.model.MWMResourcePlan.Status; -import org.apache.hadoop.hive.metastore.model.MWMTrigger; -import org.apache.hadoop.hive.metastore.model.MTxnWriteNotificationLog; import org.apache.hadoop.hive.metastore.parser.ExpressionTree; import org.apache.hadoop.hive.metastore.parser.ExpressionTree.FilterBuilder; import org.apache.hadoop.hive.metastore.partition.spec.PartitionSpecProxy; import org.apache.hadoop.hive.metastore.tools.SQLGenerator; +import org.apache.hadoop.hive.metastore.txn.TxnDbUtil; +import org.apache.hadoop.hive.metastore.txn.TxnUtils; import org.apache.hadoop.hive.metastore.utils.FileUtils; import org.apache.hadoop.hive.metastore.utils.JavaUtils; import org.apache.hadoop.hive.metastore.utils.MetaStoreUtils; import org.apache.hadoop.hive.metastore.utils.ObjectPair; +import org.apache.hive.common.util.TxnIdUtils; import org.apache.thrift.TException; import org.datanucleus.AbstractNucleusContext; import org.datanucleus.ClassLoaderResolver; @@ -1305,10 +1192,15 @@ public boolean dropType(String typeName) { @Override public void createTable(Table tbl) throws InvalidObjectException, MetaException { boolean commited = false; + MTable mtbl = null; + try { openTransaction(); - MTable mtbl = convertToMTable(tbl); + mtbl = convertToMTable(tbl); + if (TxnUtils.isTransactionalTable(tbl)) { + mtbl.setWriteId(tbl.getWriteId()); + } pm.makePersistent(mtbl); if (tbl.getCreationMetadata() != null) { @@ -1419,6 +1311,9 @@ public boolean dropTable(String catName, String dbName, String tableName) TableName.getQualified(catName, dbName, tableName)); } + // TODO## remove? unused + Table table = convertToTable(tbl); + List tabConstraints = listAllTableConstraintsWithOptionalConstraintName( catName, dbName, tableName, null); if (CollectionUtils.isNotEmpty(tabConstraints)) { @@ -1517,17 +1412,51 @@ private boolean dropCreationMetadata(String catName, String dbName, String table return mConstraints; } + private static String getFullyQualifiedTableName(String dbName, String tblName) { + return ((dbName == null || dbName.isEmpty()) ? "" : "\"" + dbName + "\".\"") + + "\"" + tblName + "\""; + } + @Override - public Table getTable(String catName, String dbName, String tableName) throws MetaException { + public Table + getTable(String catName, String dbName, String tableName) + throws MetaException { + return getTable(catName, dbName, tableName, -1, null); + } + + @Override + public Table getTable(String catName, String dbName, String tableName, + long txnId, String writeIdList) + throws MetaException { boolean commited = false; Table tbl = null; try { openTransaction(); - tbl = convertToTable(getMTable(catName, dbName, tableName)); + MTable mtable = getMTable(catName, dbName, tableName); + tbl = convertToTable(mtable); // Retrieve creation metadata if needed if (tbl != null && TableType.MATERIALIZED_VIEW.toString().equals(tbl.getTableType())) { tbl.setCreationMetadata( - convertToCreationMetadata(getCreationMetadata(catName, dbName, tableName))); + convertToCreationMetadata(getCreationMetadata(catName, dbName, tableName))); + } + + // If transactional non partitioned table, + // check whether the current version table statistics + // in the metastore comply with the client query's snapshot isolation. + // Note: a partitioned table has table stats and table snapshot in MPartiiton. + if (writeIdList != null) { + if (tbl != null + && TxnUtils.isTransactionalTable(tbl) + && tbl.getPartitionKeysSize() == 0) { + if (isCurrentStatsValidForTheQuery(mtable, txnId, writeIdList)) { + tbl.setIsStatsCompliant(true); + } else { + tbl.setIsStatsCompliant(false); + // Do not make persistent the following state since it is the query specific (not global). + StatsSetupConst.setBasicStatsState(tbl.getParameters(), StatsSetupConst.FALSE); + LOG.info("Removed COLUMN_STATS_ACCURATE from Table's parameters."); + } + } } commited = commitTransaction(); } finally { @@ -2051,12 +1980,16 @@ private MTable convertToMTable(Table tbl) throws InvalidObjectException, String ownerType = (ownerPrincipalType == null) ? PrincipalType.USER.name() : ownerPrincipalType.name(); // A new table is always created with a new column descriptor - return new MTable(normalizeIdentifier(tbl.getTableName()), mdb, + MTable mtable = new MTable(normalizeIdentifier(tbl.getTableName()), mdb, convertToMStorageDescriptor(tbl.getSd()), tbl.getOwner(), ownerType, tbl .getCreateTime(), tbl.getLastAccessTime(), tbl.getRetention(), convertToMFieldSchemas(tbl.getPartitionKeys()), tbl.getParameters(), tbl.getViewOriginalText(), tbl.getViewExpandedText(), tbl.isRewriteEnabled(), tableType); + if (TxnUtils.isTransactionalTable(tbl)) { + mtable.setWriteId(tbl.getWriteId()); + } + return mtable; } private List convertToMFieldSchemas(List keys) { @@ -2333,6 +2266,7 @@ public boolean addPartitions(String catName, String dbName, String tblName, List + dbName + "." + tblName + ": " + part); } MPartition mpart = convertToMPart(part, table, true); + toPersist.add(mpart); int now = (int)(System.currentTimeMillis()/1000); if (tabGrants != null) { @@ -2444,7 +2378,9 @@ public boolean addPartition(Partition part) throws InvalidObjectException, MetaException { boolean success = false; boolean commited = false; + try { + openTransaction(); String catName = part.isSetCatName() ? part.getCatName() : getDefaultCatalog(conf); MTable table = this.getMTable(catName, part.getDbName(), part.getTableName()); List tabGrants = null; @@ -2454,8 +2390,7 @@ public boolean addPartition(Partition part) throws InvalidObjectException, tabColumnGrants = this.listTableAllColumnGrants( catName, part.getDbName(), part.getTableName()); } - openTransaction(); - MPartition mpart = convertToMPart(part, true); + MPartition mpart = convertToMPart(part, table, true); pm.makePersistent(mpart); int now = (int)(System.currentTimeMillis()/1000); @@ -2497,14 +2432,38 @@ public boolean addPartition(Partition part) throws InvalidObjectException, @Override public Partition getPartition(String catName, String dbName, String tableName, List part_vals) throws NoSuchObjectException, MetaException { + return getPartition(catName, dbName, tableName, part_vals, -1, null); + } + + @Override + public Partition getPartition(String catName, String dbName, String tableName, + List part_vals, + long txnId, String writeIdList) + throws NoSuchObjectException, MetaException { openTransaction(); - Partition part = convertToPart(getMPartition(catName, dbName, tableName, part_vals)); + MTable table = this.getMTable(catName, dbName, tableName); + MPartition mpart = getMPartition(catName, dbName, tableName, part_vals); + Partition part = convertToPart(mpart); commitTransaction(); if(part == null) { throw new NoSuchObjectException("partition values=" + part_vals.toString()); } part.setValues(part_vals); + // If transactional table partition, check whether the current version partition + // statistics in the metastore comply with the client query's snapshot isolation. + if (writeIdList != null) { + if (TxnUtils.isTransactionalTable(table.getParameters())) { + if (isCurrentStatsValidForTheQuery(mpart, txnId, writeIdList)) { + part.setIsStatsCompliant(true); + } else { + part.setIsStatsCompliant(false); + // Do not make persistent the following state since it is query specific (not global). + StatsSetupConst.setBasicStatsState(part.getParameters(), StatsSetupConst.FALSE); + LOG.info("Removed COLUMN_STATS_ACCURATE from Partition object's parameters."); + } + } + } return part; } @@ -2603,26 +2562,6 @@ private MPartition getMPartition(String catName, String dbName, String tableName * is true, then this partition's storage descriptor's column descriptor will point * to the same one as the table's storage descriptor. * @param part the partition to convert - * @param useTableCD whether to try to use the parent table's column descriptor. - * @return the model partition object, and null if the input partition is null. - * @throws InvalidObjectException - * @throws MetaException - */ - private MPartition convertToMPart(Partition part, boolean useTableCD) - throws InvalidObjectException, MetaException { - if (part == null) { - return null; - } - MTable mt = getMTable(part.getCatName(), part.getDbName(), part.getTableName()); - return convertToMPart(part, mt, useTableCD); - } - - /** - * Convert a Partition object into an MPartition, which is an object backed by the db - * If the Partition's set of columns is the same as the parent table's AND useTableCD - * is true, then this partition's storage descriptor's column descriptor will point - * to the same one as the table's storage descriptor. - * @param part the partition to convert * @param mt the parent table object * @param useTableCD whether to try to use the parent table's column descriptor. * @return the model partition object, and null if the input partition is null. @@ -2654,10 +2593,14 @@ private MPartition convertToMPart(Partition part, MTable mt, boolean useTableCD) msd = convertToMStorageDescriptor(part.getSd()); } - return new MPartition(Warehouse.makePartName(convertToFieldSchemas(mt + MPartition mpart = new MPartition(Warehouse.makePartName(convertToFieldSchemas(mt .getPartitionKeys()), part.getValues()), mt, part.getValues(), part .getCreateTime(), part.getLastAccessTime(), msd, part.getParameters()); + if (TxnUtils.isTransactionalTable(mt.getParameters())) { + mpart.setWriteId(part.getWriteId()); + } + return mpart; } private Partition convertToPart(MPartition mpart) throws MetaException { @@ -3079,7 +3022,7 @@ private PartitionValuesResponse extractPartitionNamesByFilter( TableName.getQualified(catName, dbName, tableName), filter, cols); List partitionNames = null; List partitions = null; - Table tbl = getTable(catName, dbName, tableName); + Table tbl = getTable(catName, dbName, tableName, -1, null); try { // Get partitions by name - ascending or descending partitionNames = getPartitionNamesByFilter(catName, dbName, tableName, filter, ascending, @@ -3212,7 +3155,8 @@ private PartitionValuesResponse getDistinctValuesForPartitionsNoTxn( if (applyDistinct) { partValuesSelect.append("DISTINCT "); } - List partitionKeys = getTable(catName, dbName, tableName).getPartitionKeys(); + List partitionKeys = + getTable(catName, dbName, tableName, -1, null).getPartitionKeys(); for (FieldSchema key : cols) { partValuesSelect.append(extractPartitionKey(key, partitionKeys)).append(", "); } @@ -3294,7 +3238,7 @@ private Collection getPartitionPsQueryResults(String catName, String dbName, Str catName = normalizeIdentifier(catName); dbName = normalizeIdentifier(dbName); tableName = normalizeIdentifier(tableName); - Table table = getTable(catName, dbName, tableName); + Table table = getTable(catName, dbName, tableName, -1, null); if (table == null) { throw new NoSuchObjectException(TableName.getQualified(catName, dbName, tableName) + " table not found"); @@ -3670,7 +3614,8 @@ private void dropPartitionsNoTxn(String catName, String dbName, String tblName, protected T results = null; public GetHelper(String catalogName, String dbName, String tblName, - boolean allowSql, boolean allowJdo) throws MetaException { + boolean allowSql, boolean allowJdo) + throws MetaException { assert allowSql || allowJdo; this.allowJdo = allowJdo; this.catName = (catalogName != null) ? normalizeIdentifier(catalogName) : null; @@ -3888,7 +3833,7 @@ protected String describeResult() { private abstract class GetStatHelper extends GetHelper { public GetStatHelper(String catalogName, String dbName, String tblName, boolean allowSql, - boolean allowJdo) throws MetaException { + boolean allowJdo, String writeIdList) throws MetaException { super(catalogName, dbName, tblName, allowSql, allowJdo); } @@ -4146,8 +4091,8 @@ private String makeParameterDeclarationStringObj(Map params) { } @Override - public void alterTable(String catName, String dbname, String name, Table newTable) - throws InvalidObjectException, MetaException { + public void alterTable(String catName, String dbname, String name, Table newTable, + long queryTxnId, String queryValidWriteIds) throws InvalidObjectException, MetaException { boolean success = false; boolean registerCreationSignature = false; try { @@ -4188,6 +4133,18 @@ public void alterTable(String catName, String dbname, String name, Table newTabl oldt.setViewExpandedText(newt.getViewExpandedText()); oldt.setRewriteEnabled(newt.isRewriteEnabled()); + // If transactional, update MTable to have txnId and the writeIdList + // for the current Stats updater query. + if (TxnUtils.isTransactionalTable(newTable) && queryValidWriteIds != null) { + // Check concurrent INSERT case and set false to the flag. + if (!isCurrentStatsValidForTheQuery(oldt, queryTxnId, queryValidWriteIds)) { + StatsSetupConst.setBasicStatsState(oldt.getParameters(), StatsSetupConst.FALSE); + LOG.info("Removed COLUMN_STATS_ACCURATE from the parameters of the table " + + dbname + "." + name + ". will be made persistent."); + } + oldt.setWriteId(newTable.getWriteId()); + } + // commit the changes success = commitTransaction(); } finally { @@ -4235,13 +4192,14 @@ public void updateCreationMetadata(String catName, String dbname, String tablena * @throws MetaException */ private MColumnDescriptor alterPartitionNoTxn(String catName, String dbname, String name, - List part_vals, Partition newPart) + List part_vals, Partition newPart, long queryTxnId, String queryValidWriteIds) throws InvalidObjectException, MetaException { catName = normalizeIdentifier(catName); name = normalizeIdentifier(name); dbname = normalizeIdentifier(dbname); + MTable table = this.getMTable(newPart.getCatName(), newPart.getDbName(), newPart.getTableName()); MPartition oldp = getMPartition(catName, dbname, name, part_vals); - MPartition newp = convertToMPart(newPart, false); + MPartition newp = convertToMPart(newPart, table, false); MColumnDescriptor oldCD = null; MStorageDescriptor oldSD = oldp.getSd(); if (oldSD != null) { @@ -4262,17 +4220,33 @@ private MColumnDescriptor alterPartitionNoTxn(String catName, String dbname, Str if (newp.getLastAccessTime() != oldp.getLastAccessTime()) { oldp.setLastAccessTime(newp.getLastAccessTime()); } + + // If transactional, add/update the MUPdaterTransaction + // for the current updater query. + if (queryValidWriteIds != null && TxnUtils.isTransactionalTable(table.getParameters())) { + // Check concurrent INSERT case and set false to the flag. + if (!isCurrentStatsValidForTheQuery(oldp, queryTxnId, queryValidWriteIds)) { + StatsSetupConst.setBasicStatsState(oldp.getParameters(), StatsSetupConst.FALSE); + LOG.info("Removed COLUMN_STATS_ACCURATE from the parameters of the partition " + + dbname + "." + name + "." + oldp.getPartitionName() + " will be made persistent."); + } + oldp.setWriteId(newPart.getWriteId()); + } return oldCD; } @Override public void alterPartition(String catName, String dbname, String name, List part_vals, - Partition newPart) throws InvalidObjectException, MetaException { + Partition newPart, long queryTxnId, String queryValidWriteIds) throws InvalidObjectException, MetaException { boolean success = false; Exception e = null; try { openTransaction(); - MColumnDescriptor oldCd = alterPartitionNoTxn(catName, dbname, name, part_vals, newPart); + if (newPart.isSetWriteId()) { + LOG.warn("Alter partitions with write ID called without transaction information"); + } + MColumnDescriptor oldCd = alterPartitionNoTxn( + catName, dbname, name, part_vals, newPart, queryTxnId, queryValidWriteIds); removeUnusedColumnDescriptor(oldCd); // commit the changes success = commitTransaction(); @@ -4293,8 +4267,9 @@ public void alterPartition(String catName, String dbname, String name, List> part_vals, List newParts) - throws InvalidObjectException, MetaException { + List> part_vals, List newParts, + long writeId, long queryTxnId, String queryWriteIdList) + throws InvalidObjectException, MetaException { boolean success = false; Exception e = null; try { @@ -4303,7 +4278,12 @@ public void alterPartitions(String catName, String dbname, String name, Set oldCds = new HashSet<>(); for (Partition tmpPart: newParts) { List tmpPartVals = part_val_itr.next(); - MColumnDescriptor oldCd = alterPartitionNoTxn(catName, dbname, name, tmpPartVals, tmpPart); + // We don't reset write ID when we invalidate stats; we unset the json boolean. + if (writeId > 0) { + tmpPart.setWriteId(writeId); + } + MColumnDescriptor oldCd = alterPartitionNoTxn( + catName, dbname, name, tmpPartVals, tmpPart, queryTxnId, queryWriteIdList); if (oldCd != null) { oldCds.add(oldCd); } @@ -6179,7 +6159,9 @@ public boolean revokePrivileges(PrivilegeBag privileges, boolean grantOption) } else if (hiveObject.getObjectType() == HiveObjectType.PARTITION) { boolean found = false; - Table tabObj = this.getTable(catName, hiveObject.getDbName(), hiveObject.getObjectName()); + Table tabObj = + this.getTable(catName, hiveObject.getDbName(), + hiveObject.getObjectName(), -1, null); String partName = null; if (hiveObject.getPartValues() != null) { partName = Warehouse.makePartName(tabObj.getPartitionKeys(), hiveObject.getPartValues()); @@ -6213,7 +6195,7 @@ public boolean revokePrivileges(PrivilegeBag privileges, boolean grantOption) } else if (hiveObject.getObjectType() == HiveObjectType.COLUMN) { Table tabObj = this.getTable(catName, hiveObject.getDbName(), hiveObject - .getObjectName()); + .getObjectName(), -1, null); String partName = null; if (hiveObject.getPartValues() != null) { partName = Warehouse.makePartName(tabObj.getPartitionKeys(), @@ -7735,7 +7717,7 @@ public boolean isPartitionMarkedForEvent(String catName, String dbName, String t query .declareParameters("java.lang.String t1, java.lang.String t2, java.lang.String t3, int t4," + "java.lang.String t5"); - Table tbl = getTable(catName, dbName, tblName); // Make sure dbName and tblName are valid. + Table tbl = getTable(catName, dbName, tblName, -1, null); // Make sure dbName and tblName are valid. if (null == tbl) { throw new UnknownTableException("Table: " + tblName + " is not found."); } @@ -7761,7 +7743,7 @@ public Table markPartitionForEvent(String catName, String dbName, String tblName Table tbl = null; try{ openTransaction(); - tbl = getTable(catName, dbName, tblName); // Make sure dbName and tblName are valid. + tbl = getTable(catName, dbName, tblName, -1, null); // Make sure dbName and tblName are valid. if(null == tbl) { throw new UnknownTableException("Table: "+ tblName + " is not found."); } @@ -8565,9 +8547,37 @@ public void validateTableCols(Table table, List colNames) throws MetaExc } @Override - public ColumnStatistics getTableColumnStatistics(String catName, String dbName, String tableName, + public ColumnStatistics getTableColumnStatistics( + String catName, + String dbName, + String tableName, List colNames) throws MetaException, NoSuchObjectException { - return getTableColumnStatisticsInternal(catName, dbName, tableName, colNames, true, true); + // Note: this will get stats without verifying ACID. + return getTableColumnStatisticsInternal( + catName, dbName, tableName, colNames, true, true); + } + + @Override + public ColumnStatistics getTableColumnStatistics( + String catName, + String dbName, + String tableName, + List colNames, + long txnId, + String writeIdList) throws MetaException, NoSuchObjectException { + Boolean iLL = null; + // If the current stats in the metastore doesn't comply with + // the isolation level of the query, set No to the compliance flag. + if (writeIdList != null) { + MTable table = this.getMTable(catName, dbName, tableName); + iLL = isCurrentStatsValidForTheQuery(table, txnId, writeIdList); + } + ColumnStatistics cS = getTableColumnStatisticsInternal( + catName, dbName, tableName, colNames, true, true); + if (cS != null && iLL != null) { + cS.setIsStatsCompliant(iLL); + } + return cS; } protected ColumnStatistics getTableColumnStatisticsInternal( @@ -8575,7 +8585,7 @@ protected ColumnStatistics getTableColumnStatisticsInternal( boolean allowJdo) throws MetaException, NoSuchObjectException { final boolean enableBitVector = MetastoreConf.getBoolVar(getConf(), ConfVars.STATS_FETCH_BITVECTOR); return new GetStatHelper(normalizeIdentifier(catName), normalizeIdentifier(dbName), - normalizeIdentifier(tableName), allowSql, allowJdo) { + normalizeIdentifier(tableName), allowSql, allowJdo, null) { @Override protected ColumnStatistics getSqlResult(GetHelper ctx) throws MetaException { return directSql.getTableStats(catName, dbName, tblName, colNames, enableBitVector); @@ -8586,7 +8596,8 @@ protected ColumnStatistics getJdoResult( QueryWrapper queryWrapper = new QueryWrapper(); try { - List mStats = getMTableColumnStatistics(getTable(), colNames, queryWrapper); + List mStats = + getMTableColumnStatistics(getTable(), colNames, queryWrapper); if (mStats.isEmpty()) { return null; } @@ -8612,6 +8623,39 @@ protected ColumnStatistics getJdoResult( @Override public List getPartitionColumnStatistics(String catName, String dbName, String tableName, List partNames, List colNames) throws MetaException, NoSuchObjectException { + // Note: this will get stats without verifying ACID. + return getPartitionColumnStatisticsInternal( + catName, dbName, tableName, partNames, colNames, true, true); + } + + @Override + public List getPartitionColumnStatistics( + String catName, String dbName, String tableName, + List partNames, List colNames, + long txnId, String writeIdList) + throws MetaException, NoSuchObjectException { + + // If any of the current partition stats in the metastore doesn't comply with + // the isolation level of the query, return null. + if (writeIdList != null) { + if (partNames == null && partNames.isEmpty()) { + LOG.warn("The given partNames does not have any name."); + return null; + } + // TODO## this is not correct; stats updater patch will fix it to return stats for valid partitions, + // and no stats for invalid. Remove this comment when merging that patch. + // Loop through the given "partNames" list + // checking isolation-level-compliance of each partition column stats. + for(String partName : partNames) { + MPartition mpart = getMPartition(catName, dbName, tableName, Warehouse.getPartValuesFromPartName(partName)); + if (!isCurrentStatsValidForTheQuery(mpart, txnId, writeIdList)) { + LOG.debug("The current metastore transactional partition column statistics for {}.{}.{} " + + "(write ID {}) are not valid for current query ({} {})", dbName, tableName, + mpart.getPartitionName(), mpart.getWriteId(), txnId, writeIdList); + return Lists.newArrayList(); + } + } + } return getPartitionColumnStatisticsInternal( catName, dbName, tableName, partNames, colNames, true, true); } @@ -8664,10 +8708,39 @@ protected ColumnStatistics getJdoResult( }.run(true); } + @Override + public AggrStats get_aggr_stats_for(String catName, String dbName, String tblName, + final List partNames, final List colNames, + long txnId, String writeIdList) throws MetaException, NoSuchObjectException { + // If the current stats in the metastore doesn't comply with + // the isolation level of the query, return null. + if (writeIdList != null) { + if (partNames == null && partNames.isEmpty()) { + LOG.warn("The given partNames does not have any name."); + return null; + } + + // TODO: this should probably also return stats for partitions with valid stats, + // and no stats for partitions with invalid stats. + // Loop through the given "partNames" list + // checking isolation-level-compliance of each partition column stats. + for(String partName : partNames) { + MPartition mpart = getMPartition(catName, dbName, tblName, Warehouse.getPartValuesFromPartName(partName)); + if (!isCurrentStatsValidForTheQuery(mpart, txnId, writeIdList)) { + LOG.debug("The current metastore transactional partition column statistics " + + "for " + dbName + "." + tblName + "." + mpart.getPartitionName() + " is not valid " + + "for the current query."); + return null; + } + } + } + return get_aggr_stats_for(catName, dbName, tblName, partNames, colNames); + } @Override public AggrStats get_aggr_stats_for(String catName, String dbName, String tblName, - final List partNames, final List colNames) throws MetaException, NoSuchObjectException { + final List partNames, final List colNames) + throws MetaException, NoSuchObjectException { final boolean useDensityFunctionForNDVEstimation = MetastoreConf.getBoolVar(getConf(), ConfVars.STATS_NDV_DENSITY_FUNCTION); final double ndvTuner = MetastoreConf.getDoubleVar(getConf(), ConfVars.STATS_NDV_TUNER); @@ -8699,7 +8772,8 @@ protected String describeResult() { throws MetaException, NoSuchObjectException { final boolean enableBitVector = MetastoreConf.getBoolVar(getConf(), ConfVars.STATS_FETCH_BITVECTOR); - return new GetHelper>(catName, dbName, null, true, false) { + return new GetHelper>( + catName, dbName, null, true, false) { @Override protected List getSqlResult( GetHelper> ctx) throws MetaException { @@ -12216,4 +12290,88 @@ public int deleteRuntimeStats(int maxRetainSecs) throws MetaException { return ret; } + /** + * Return true if the current statistics in the Metastore is valid + * for the query of the given "txnId" and "queryValidWriteIdList". + * + * Note that a statistics entity is valid iff + * the stats is written by the current query or + * the conjunction of the following two are true: + * ~ COLUMN_STATE_ACCURATE(CSA) state is true + * ~ Isolation-level (snapshot) compliant with the query + * @param tbl MTable of the stats entity + * @param queryTxnId transaction id of the query + * @param queryValidWriteIdList valid writeId list of the query + * @param queryWriteId writeId of the query + * @Precondition "tbl" should be retrieved from the TBLS table. + */ + private boolean isCurrentStatsValidForTheQuery( + MTable tbl, long queryTxnId, String queryValidWriteIdList) throws MetaException { + return isCurrentStatsValidForTheQuery(tbl.getDatabase().getName(), tbl.getTableName(), + tbl.getParameters(), tbl.getWriteId(), queryTxnId, queryValidWriteIdList); + } + + /** + * Return true if the current statistics in the Metastore is valid + * for the query of the given "txnId" and "queryValidWriteIdList". + * + * Note that a statistics entity is valid iff + * the stats is written by the current query or + * the conjunction of the following two are true: + * ~ COLUMN_STATE_ACCURATE(CSA) state is true + * ~ Isolation-level (snapshot) compliant with the query + * @param part MPartition of the stats entity + * @param txnId transaction id of the query + * @param queryValidWriteIdList valid writeId list of the query + * @Precondition "part" should be retrieved from the PARTITIONS table. + */ + private boolean isCurrentStatsValidForTheQuery( + MPartition part, long queryTxnId, String queryValidWriteIdList) + throws MetaException { + return isCurrentStatsValidForTheQuery(part.getTable().getDatabase().getName(), + part.getTable().getTableName(), part.getParameters(), part.getWriteId(), + queryTxnId, queryValidWriteIdList); + } + + private boolean isCurrentStatsValidForTheQuery(String dbName, String tblName, + Map statsParams, long statsWriteId, long queryTxnId, + String queryValidWriteIdList) throws MetaException { + + // Note: can be changed to debug/info to verify the calls. + LOG.trace("Called with stats write ID {}; query {}, {}; params {}", + statsWriteId, queryTxnId, queryValidWriteIdList, statsParams); + // if statsWriteIdList is null, + // return true since the stats does not seem to be transactional. + if (statsWriteId < 1) { + return true; + } + // This COLUMN_STATS_ACCURATE(CSA) state checking also includes the case that the stats is + // written by an aborted transaction but TXNS has no entry for the transaction + // after compaction. + if (!StatsSetupConst.areBasicStatsUptoDate(statsParams)) { + return false; + } + + // TODO## NUM_FILES could also be set to 0 by invalid update. We need to have a negative test. Or remove this and fix stuff. + // If the NUM_FILES of the table/partition is 0, return 'true' from this method. + // Since newly initialized empty table has 0 for the parameter. + if (Long.parseLong(statsParams.get(StatsSetupConst.NUM_FILES)) == 0) { + return true; + } + + ValidWriteIdList list4TheQuery = new ValidReaderWriteIdList(queryValidWriteIdList); + // Just check if the write ID is valid. If it's valid (i.e. we are allowed to see it), + // that means it cannot possibly be a concurrent write. If it's not valid (we are not + // allowed to see it), that means it's either concurrent or aborted, same thing for us. + if (list4TheQuery.isWriteIdValid(statsWriteId)) { + return true; + } + + // This assumes that all writes within the same txn are sequential and can see each other. + // TODO## Not clear if we need this check; each next write should have the previous + // one in its writeIdList; verify w/Eugene. + long statsTxnId = HiveMetaStore.HMSHandler.getMsThreadTxnHandler(conf).getTxnIdForWriteId( + dbName, tblName, statsWriteId); + return (statsTxnId == queryTxnId); + } } diff --git standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/RawStore.java standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/RawStore.java index 73a518d871..681e1e544a 100644 --- standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/RawStore.java +++ standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/RawStore.java @@ -19,11 +19,7 @@ package org.apache.hadoop.hive.metastore; import org.apache.hadoop.hive.common.TableName; -import org.apache.hadoop.hive.metastore.api.CreationMetadata; -import org.apache.hadoop.hive.metastore.api.ISchemaName; -import org.apache.hadoop.hive.metastore.api.SchemaVersionDescriptor; -import org.apache.hadoop.hive.metastore.api.WMFullResourcePlan; -import org.apache.hadoop.hive.metastore.api.WriteEventInfo; +import org.apache.hadoop.hive.metastore.api.*; import java.lang.annotation.ElementType; import java.lang.annotation.Retention; @@ -35,59 +31,6 @@ import org.apache.hadoop.classification.InterfaceStability; import org.apache.hadoop.conf.Configurable; -import org.apache.hadoop.hive.metastore.api.AggrStats; -import org.apache.hadoop.hive.metastore.api.AlreadyExistsException; -import org.apache.hadoop.hive.metastore.api.Catalog; -import org.apache.hadoop.hive.metastore.api.ColumnStatistics; -import org.apache.hadoop.hive.metastore.api.CurrentNotificationEventId; -import org.apache.hadoop.hive.metastore.api.Database; -import org.apache.hadoop.hive.metastore.api.FieldSchema; -import org.apache.hadoop.hive.metastore.api.FileMetadataExprType; -import org.apache.hadoop.hive.metastore.api.Function; -import org.apache.hadoop.hive.metastore.api.HiveObjectPrivilege; -import org.apache.hadoop.hive.metastore.api.HiveObjectRef; -import org.apache.hadoop.hive.metastore.api.ISchema; -import org.apache.hadoop.hive.metastore.api.InvalidInputException; -import org.apache.hadoop.hive.metastore.api.InvalidObjectException; -import org.apache.hadoop.hive.metastore.api.InvalidOperationException; -import org.apache.hadoop.hive.metastore.api.InvalidPartitionException; -import org.apache.hadoop.hive.metastore.api.MetaException; -import org.apache.hadoop.hive.metastore.api.NoSuchObjectException; -import org.apache.hadoop.hive.metastore.api.NotificationEvent; -import org.apache.hadoop.hive.metastore.api.NotificationEventRequest; -import org.apache.hadoop.hive.metastore.api.NotificationEventResponse; -import org.apache.hadoop.hive.metastore.api.NotificationEventsCountRequest; -import org.apache.hadoop.hive.metastore.api.NotificationEventsCountResponse; -import org.apache.hadoop.hive.metastore.api.Partition; -import org.apache.hadoop.hive.metastore.api.PartitionEventType; -import org.apache.hadoop.hive.metastore.api.PartitionValuesResponse; -import org.apache.hadoop.hive.metastore.api.PrincipalPrivilegeSet; -import org.apache.hadoop.hive.metastore.api.PrincipalType; -import org.apache.hadoop.hive.metastore.api.PrivilegeBag; -import org.apache.hadoop.hive.metastore.api.Role; -import org.apache.hadoop.hive.metastore.api.RolePrincipalGrant; -import org.apache.hadoop.hive.metastore.api.RuntimeStat; -import org.apache.hadoop.hive.metastore.api.SQLCheckConstraint; -import org.apache.hadoop.hive.metastore.api.SQLDefaultConstraint; -import org.apache.hadoop.hive.metastore.api.SQLForeignKey; -import org.apache.hadoop.hive.metastore.api.SQLNotNullConstraint; -import org.apache.hadoop.hive.metastore.api.SQLPrimaryKey; -import org.apache.hadoop.hive.metastore.api.SQLUniqueConstraint; -import org.apache.hadoop.hive.metastore.api.SchemaVersion; -import org.apache.hadoop.hive.metastore.api.SerDeInfo; -import org.apache.hadoop.hive.metastore.api.Table; -import org.apache.hadoop.hive.metastore.api.TableMeta; -import org.apache.hadoop.hive.metastore.api.Type; -import org.apache.hadoop.hive.metastore.api.UnknownDBException; -import org.apache.hadoop.hive.metastore.api.UnknownPartitionException; -import org.apache.hadoop.hive.metastore.api.UnknownTableException; -import org.apache.hadoop.hive.metastore.api.WMMapping; -import org.apache.hadoop.hive.metastore.api.WMNullablePool; -import org.apache.hadoop.hive.metastore.api.WMNullableResourcePlan; -import org.apache.hadoop.hive.metastore.api.WMPool; -import org.apache.hadoop.hive.metastore.api.WMResourcePlan; -import org.apache.hadoop.hive.metastore.api.WMTrigger; -import org.apache.hadoop.hive.metastore.api.WMValidateResourcePlanResponse; import org.apache.hadoop.hive.metastore.partition.spec.PartitionSpecProxy; import org.apache.hadoop.hive.metastore.utils.MetaStoreUtils.ColStatsObjWithSourceInfo; import org.apache.thrift.TException; @@ -267,6 +210,20 @@ boolean dropTable(String catalogName, String dbName, String tableName) Table getTable(String catalogName, String dbName, String tableName) throws MetaException; /** + * Get a table object. + * @param catalogName catalog the table is in. + * @param dbName database the table is in. + * @param tableName table name. + * @param txnId transaction id of the calling transaction + * @param writeIdList string format of valid writeId transaction list + * @return table object, or null if no such table exists (wow it would be nice if we either + * consistently returned null or consistently threw NoSuchObjectException). + * @throws MetaException something went wrong in the RDBMS + */ + Table getTable(String catalogName, String dbName, String tableName, + long txnId, String writeIdList) throws MetaException; + + /** * Add a partition. * @param part partition to add * @return true if the partition was successfully added. @@ -318,6 +275,22 @@ boolean addPartitions(String catName, String dbName, String tblName, */ Partition getPartition(String catName, String dbName, String tableName, List part_vals) throws MetaException, NoSuchObjectException; + /** + * Get a partition. + * @param catName catalog name. + * @param dbName database name. + * @param tableName table name. + * @param part_vals partition values for this table. + * @param txnId transaction id of the calling transaction + * @param writeIdList string format of valid writeId transaction list + * @return the partition. + * @throws MetaException error reading from RDBMS. + * @throws NoSuchObjectException no partition matching this specification exists. + */ + Partition getPartition(String catName, String dbName, String tableName, + List part_vals, + long txnId, String writeIdList) + throws MetaException, NoSuchObjectException; /** * Check whether a partition exists. @@ -388,7 +361,8 @@ boolean dropPartition(String catName, String dbName, String tableName, * @throws InvalidObjectException The new table object is invalid. * @throws MetaException something went wrong, usually in the RDBMS or storage. */ - void alterTable(String catName, String dbname, String name, Table newTable) + void alterTable(String catName, String dbname, String name, Table newTable, + long queryTxnId, String queryValidWriteIds) throws InvalidObjectException, MetaException; /** @@ -529,7 +503,8 @@ PartitionValuesResponse listPartitionValues(String catName, String db_name, Stri * @throws MetaException error accessing the RDBMS. */ void alterPartition(String catName, String db_name, String tbl_name, List part_vals, - Partition new_part) throws InvalidObjectException, MetaException; + Partition new_part, long queryTxnId, String queryValidWriteIds) + throws InvalidObjectException, MetaException; /** * Alter a set of partitions. @@ -541,11 +516,15 @@ void alterPartition(String catName, String db_name, String tbl_name, List> part_vals_list, List new_parts) + List> part_vals_list, List new_parts, long writeId, + long queryTxnId, String queryValidWriteIds) throws InvalidObjectException, MetaException; /** @@ -917,6 +896,25 @@ ColumnStatistics getTableColumnStatistics(String catName, String dbName, String List colName) throws MetaException, NoSuchObjectException; /** + * Returns the relevant column statistics for a given column in a given table in a given database + * if such statistics exist. + * @param catName catalog name. + * @param dbName name of the database, defaults to current database + * @param tableName name of the table + * @param colName names of the columns for which statistics is requested + * @param txnId transaction id of the calling transaction + * @param writeIdList string format of valid writeId transaction list + * @return Relevant column statistics for the column for the given table + * @throws NoSuchObjectException No such table + * @throws MetaException error accessing the RDBMS + * + */ + ColumnStatistics getTableColumnStatistics( + String catName, String dbName, String tableName, + List colName, long txnId, String writeIdList) + throws MetaException, NoSuchObjectException; + + /** * Get statistics for a partition for a set of columns. * @param catName catalog name. * @param dbName database name. @@ -932,6 +930,25 @@ ColumnStatistics getTableColumnStatistics(String catName, String dbName, String throws MetaException, NoSuchObjectException; /** + * Get statistics for a partition for a set of columns. + * @param catName catalog name. + * @param dbName database name. + * @param tblName table name. + * @param partNames list of partition names. These are names so must be key1=val1[/key2=val2...] + * @param colNames list of columns to get stats for + * @param txnId transaction id of the calling transaction + * @param writeIdList string format of valid writeId transaction list + * @return list of statistics objects + * @throws MetaException error accessing the RDBMS + * @throws NoSuchObjectException no such partition. + */ + List getPartitionColumnStatistics( + String catName, String dbName, String tblName, + List partNames, List colNames, + long txnId, String writeIdList) + throws MetaException, NoSuchObjectException; + + /** * Deletes column statistics if present associated with a given db, table, partition and col. If * null is passed instead of a colName, stats when present for all columns associated * with a given db, table and partition are deleted. @@ -1175,6 +1192,25 @@ AggrStats get_aggr_stats_for(String catName, String dbName, String tblName, List partNames, List colNames) throws MetaException, NoSuchObjectException; /** + * Get aggregated stats for a table or partition(s). + * @param catName catalog name. + * @param dbName database name. + * @param tblName table name. + * @param partNames list of partition names. These are the names of the partitions, not + * values. + * @param colNames list of column names + * @param txnId transaction id of the calling transaction + * @param writeIdList string format of valid writeId transaction list + * @return aggregated stats + * @throws MetaException error accessing RDBMS + * @throws NoSuchObjectException no such table or partition + */ + AggrStats get_aggr_stats_for(String catName, String dbName, String tblName, + List partNames, List colNames, + long txnId, String writeIdList) + throws MetaException, NoSuchObjectException; + + /** * Get column stats for all partitions of all tables in the database * @param catName catalog name * @param dbName database name diff --git standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/cache/CachedStore.java standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/cache/CachedStore.java index 8ff056f353..8539605e0f 100644 --- standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/cache/CachedStore.java +++ standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/cache/CachedStore.java @@ -49,69 +49,10 @@ import org.apache.hadoop.hive.metastore.RawStore; import org.apache.hadoop.hive.metastore.TableType; import org.apache.hadoop.hive.metastore.Warehouse; -import org.apache.hadoop.hive.metastore.api.AggrStats; -import org.apache.hadoop.hive.metastore.api.AlreadyExistsException; -import org.apache.hadoop.hive.metastore.api.Catalog; -import org.apache.hadoop.hive.metastore.api.ColumnStatistics; -import org.apache.hadoop.hive.metastore.api.ColumnStatisticsDesc; -import org.apache.hadoop.hive.metastore.api.ColumnStatisticsObj; -import org.apache.hadoop.hive.metastore.api.CreationMetadata; -import org.apache.hadoop.hive.metastore.api.CurrentNotificationEventId; -import org.apache.hadoop.hive.metastore.api.Database; -import org.apache.hadoop.hive.metastore.api.FieldSchema; -import org.apache.hadoop.hive.metastore.api.FileMetadataExprType; -import org.apache.hadoop.hive.metastore.api.Function; -import org.apache.hadoop.hive.metastore.api.HiveObjectPrivilege; -import org.apache.hadoop.hive.metastore.api.HiveObjectRef; -import org.apache.hadoop.hive.metastore.api.ISchema; -import org.apache.hadoop.hive.metastore.api.ISchemaName; -import org.apache.hadoop.hive.metastore.api.InvalidInputException; -import org.apache.hadoop.hive.metastore.api.InvalidObjectException; -import org.apache.hadoop.hive.metastore.api.InvalidOperationException; -import org.apache.hadoop.hive.metastore.api.InvalidPartitionException; -import org.apache.hadoop.hive.metastore.api.MetaException; -import org.apache.hadoop.hive.metastore.api.NoSuchObjectException; -import org.apache.hadoop.hive.metastore.api.NotificationEvent; -import org.apache.hadoop.hive.metastore.api.NotificationEventRequest; -import org.apache.hadoop.hive.metastore.api.NotificationEventResponse; -import org.apache.hadoop.hive.metastore.api.NotificationEventsCountRequest; -import org.apache.hadoop.hive.metastore.api.NotificationEventsCountResponse; -import org.apache.hadoop.hive.metastore.api.Partition; -import org.apache.hadoop.hive.metastore.api.PartitionEventType; -import org.apache.hadoop.hive.metastore.api.PartitionValuesResponse; -import org.apache.hadoop.hive.metastore.api.PrincipalPrivilegeSet; -import org.apache.hadoop.hive.metastore.api.PrincipalType; -import org.apache.hadoop.hive.metastore.api.PrivilegeBag; -import org.apache.hadoop.hive.metastore.api.WMNullablePool; -import org.apache.hadoop.hive.metastore.api.WMNullableResourcePlan; -import org.apache.hadoop.hive.metastore.api.WMResourcePlan; -import org.apache.hadoop.hive.metastore.api.WMTrigger; -import org.apache.hadoop.hive.metastore.api.WMValidateResourcePlanResponse; +import org.apache.hadoop.hive.metastore.api.*; import org.apache.hadoop.hive.metastore.cache.SharedCache.StatsType; import org.apache.hadoop.hive.metastore.columnstats.aggr.ColumnStatsAggregator; import org.apache.hadoop.hive.metastore.columnstats.aggr.ColumnStatsAggregatorFactory; -import org.apache.hadoop.hive.metastore.api.Role; -import org.apache.hadoop.hive.metastore.api.RolePrincipalGrant; -import org.apache.hadoop.hive.metastore.api.RuntimeStat; -import org.apache.hadoop.hive.metastore.api.SQLCheckConstraint; -import org.apache.hadoop.hive.metastore.api.SQLDefaultConstraint; -import org.apache.hadoop.hive.metastore.api.SQLForeignKey; -import org.apache.hadoop.hive.metastore.api.SQLNotNullConstraint; -import org.apache.hadoop.hive.metastore.api.SQLPrimaryKey; -import org.apache.hadoop.hive.metastore.api.SQLUniqueConstraint; -import org.apache.hadoop.hive.metastore.api.SchemaVersion; -import org.apache.hadoop.hive.metastore.api.SchemaVersionDescriptor; -import org.apache.hadoop.hive.metastore.api.SerDeInfo; -import org.apache.hadoop.hive.metastore.api.Table; -import org.apache.hadoop.hive.metastore.api.TableMeta; -import org.apache.hadoop.hive.metastore.api.Type; -import org.apache.hadoop.hive.metastore.api.UnknownDBException; -import org.apache.hadoop.hive.metastore.api.UnknownPartitionException; -import org.apache.hadoop.hive.metastore.api.UnknownTableException; -import org.apache.hadoop.hive.metastore.api.WMFullResourcePlan; -import org.apache.hadoop.hive.metastore.api.WMMapping; -import org.apache.hadoop.hive.metastore.api.WMPool; -import org.apache.hadoop.hive.metastore.api.WriteEventInfo; import org.apache.hadoop.hive.metastore.conf.MetastoreConf; import org.apache.hadoop.hive.metastore.conf.MetastoreConf.ConfVars; import org.apache.hadoop.hive.metastore.partition.spec.PartitionSpecProxy; @@ -338,6 +279,7 @@ static void prewarm(RawStore rawStore) { rawStore.getTableColumnStatistics(catName, dbName, tblName, colNames); Deadline.stopTimer(); } + // TODO## should this take write ID into account? or at least cache write ID to verify? // If the table could not cached due to memory limit, stop prewarm boolean isSuccess = sharedCache.populateTableInCache(table, tableColStats, partitions, partitionColStats, aggrStatsAllPartitions, aggrStatsAllButDefaultPartition); @@ -609,6 +551,7 @@ private void updateTableColStats(RawStore rawStore, String catName, String dbNam rawStore.getTableColumnStatistics(catName, dbName, tblName, colNames); Deadline.stopTimer(); if (tableColStats != null) { + // TODO## should this take write ID into account? or at least cache write ID to verify? sharedCache.refreshTableColStatsInCache(StringUtils.normalizeIdentifier(catName), StringUtils.normalizeIdentifier(dbName), StringUtils.normalizeIdentifier(tblName), tableColStats.getStatsObj()); @@ -639,6 +582,7 @@ private void updateTablePartitionColStats(RawStore rawStore, String catName, Str List partNames = rawStore.listPartitionNames(catName, dbName, tblName, (short) -1); // Get partition column stats for this table Deadline.startTimer("getPartitionColumnStatistics"); + // TODO## should this take write ID into account? or at least cache write ID to verify? List partitionColStats = rawStore.getPartitionColumnStatistics(catName, dbName, tblName, partNames, colNames); Deadline.stopTimer(); @@ -880,20 +824,29 @@ public boolean dropTable(String catName, String dbName, String tblName) @Override public Table getTable(String catName, String dbName, String tblName) throws MetaException { + return getTable(catName, dbName, tblName, -1, null); + } + + // TODO: if writeIdList is not null, check isolation level compliance for SVS, + // possibly with getTableFromCache() with table snapshot in cache. + @Override + public Table getTable(String catName, String dbName, String tblName, + long txnId, String writeIdList) + throws MetaException { catName = normalizeIdentifier(catName); dbName = StringUtils.normalizeIdentifier(dbName); tblName = StringUtils.normalizeIdentifier(tblName); if (!shouldCacheTable(catName, dbName, tblName)) { - return rawStore.getTable(catName, dbName, tblName); + return rawStore.getTable(catName, dbName, tblName, txnId,writeIdList); } Table tbl = sharedCache.getTableFromCache(catName, dbName, tblName); - if (tbl == null) { + if (tbl == null || writeIdList != null) { // This table is not yet loaded in cache // If the prewarm thread is working on this table's database, // let's move this table to the top of tblNamesBeingPrewarmed stack, // so that it gets loaded to the cache faster and is available for subsequent requests tblsPendingPrewarm.prioritizeTableForPrewarm(tblName); - return rawStore.getTable(catName, dbName, tblName); + return rawStore.getTable(catName, dbName, tblName, txnId, writeIdList); } if (tbl != null) { tbl.unsetPrivileges(); @@ -956,16 +909,26 @@ public boolean addPartitions(String catName, String dbName, String tblName, Part @Override public Partition getPartition(String catName, String dbName, String tblName, List part_vals) throws MetaException, NoSuchObjectException { + return getPartition(catName, dbName, tblName, part_vals, -1, null); + } + + // TODO: the same as getTable() + @Override + public Partition getPartition(String catName, String dbName, String tblName, + List part_vals, long txnId, String writeIdList) + throws MetaException, NoSuchObjectException { catName = normalizeIdentifier(catName); dbName = StringUtils.normalizeIdentifier(dbName); tblName = StringUtils.normalizeIdentifier(tblName); if (!shouldCacheTable(catName, dbName, tblName)) { - return rawStore.getPartition(catName, dbName, tblName, part_vals); + return rawStore.getPartition( + catName, dbName, tblName, part_vals, txnId, writeIdList); } Partition part = sharedCache.getPartitionFromCache(catName, dbName, tblName, part_vals); - if (part == null) { + if (part == null || writeIdList != null) { // The table containing the partition is not yet loaded in cache - return rawStore.getPartition(catName, dbName, tblName, part_vals); + return rawStore.getPartition( + catName, dbName, tblName, part_vals, txnId, writeIdList); } return part; } @@ -1046,9 +1009,9 @@ public void dropPartitions(String catName, String dbName, String tblName, List partVals, - Partition newPart) throws InvalidObjectException, MetaException { - rawStore.alterPartition(catName, dbName, tblName, partVals, newPart); + Partition newPart, long queryTxnId, String queryValidWriteIds) + throws InvalidObjectException, MetaException { + rawStore.alterPartition(catName, dbName, tblName, partVals, newPart, queryTxnId, queryValidWriteIds); catName = normalizeIdentifier(catName); dbName = normalizeIdentifier(dbName); tblName = normalizeIdentifier(tblName); @@ -1211,15 +1175,18 @@ public void alterPartition(String catName, String dbName, String tblName, List> partValsList, List newParts) + List> partValsList, List newParts, + long writeId, long txnId, String validWriteIds) throws InvalidObjectException, MetaException { - rawStore.alterPartitions(catName, dbName, tblName, partValsList, newParts); + rawStore.alterPartitions( + catName, dbName, tblName, partValsList, newParts, writeId, txnId, validWriteIds); catName = normalizeIdentifier(catName); dbName = normalizeIdentifier(dbName); tblName = normalizeIdentifier(tblName); if (!shouldCacheTable(catName, dbName, tblName)) { return; } + // TODO: modify the following method for the case when writeIdList != null. sharedCache.alterPartitionsInCache(catName, dbName, tblName, partValsList, newParts); } @@ -1663,16 +1630,27 @@ public boolean updateTableColumnStatistics(ColumnStatistics colStats) @Override public ColumnStatistics getTableColumnStatistics(String catName, String dbName, String tblName, List colNames) throws MetaException, NoSuchObjectException { + return getTableColumnStatistics(catName, dbName, tblName, colNames, -1, null); + } + + // TODO: the same as getTable() + @Override + public ColumnStatistics getTableColumnStatistics( + String catName, String dbName, String tblName, List colNames, + long txnId, String writeIdList) + throws MetaException, NoSuchObjectException { catName = StringUtils.normalizeIdentifier(catName); dbName = StringUtils.normalizeIdentifier(dbName); tblName = StringUtils.normalizeIdentifier(tblName); if (!shouldCacheTable(catName, dbName, tblName)) { - return rawStore.getTableColumnStatistics(catName, dbName, tblName, colNames); + return rawStore.getTableColumnStatistics( + catName, dbName, tblName, colNames, txnId, writeIdList); } Table table = sharedCache.getTableFromCache(catName, dbName, tblName); - if (table == null) { + if (table == null || writeIdList != null) { // The table is not yet loaded in cache - return rawStore.getTableColumnStatistics(catName, dbName, tblName, colNames); + return rawStore.getTableColumnStatistics( + catName, dbName, tblName, colNames, txnId, writeIdList); } ColumnStatisticsDesc csd = new ColumnStatisticsDesc(true, dbName, tblName); List colStatObjs = @@ -1730,6 +1708,15 @@ public boolean updatePartitionColumnStatistics(ColumnStatistics colStats, List getPartitionColumnStatistics( + String catName, String dbName, String tblName, List partNames, + List colNames, long txnId, String writeIdList) + throws MetaException, NoSuchObjectException { + return rawStore.getPartitionColumnStatistics( + catName, dbName, tblName, partNames, colNames, txnId, writeIdList); + } + + @Override public boolean deletePartitionColumnStatistics(String catName, String dbName, String tblName, String partName, List partVals, String colName) throws NoSuchObjectException, MetaException, InvalidObjectException, InvalidInputException { @@ -1750,17 +1737,28 @@ public boolean deletePartitionColumnStatistics(String catName, String dbName, St @Override public AggrStats get_aggr_stats_for(String catName, String dbName, String tblName, List partNames, List colNames) throws MetaException, NoSuchObjectException { + return get_aggr_stats_for(catName, dbName, tblName, partNames, colNames, -1, null); + } + + @Override + // TODO: the same as getTable() for transactional stats. + public AggrStats get_aggr_stats_for(String catName, String dbName, String tblName, + List partNames, List colNames, + long txnId, String writeIdList) + throws MetaException, NoSuchObjectException { List colStats; catName = normalizeIdentifier(catName); dbName = StringUtils.normalizeIdentifier(dbName); tblName = StringUtils.normalizeIdentifier(tblName); if (!shouldCacheTable(catName, dbName, tblName)) { - rawStore.get_aggr_stats_for(catName, dbName, tblName, partNames, colNames); + rawStore.get_aggr_stats_for( + catName, dbName, tblName, partNames, colNames, txnId, writeIdList); } Table table = sharedCache.getTableFromCache(catName, dbName, tblName); - if (table == null) { + if (table == null || writeIdList != null) { // The table is not yet loaded in cache - return rawStore.get_aggr_stats_for(catName, dbName, tblName, partNames, colNames); + return rawStore.get_aggr_stats_for( + catName, dbName, tblName, partNames, colNames, txnId, writeIdList); } List allPartNames = rawStore.listPartitionNames(catName, dbName, tblName, (short) -1); if (partNames.size() == allPartNames.size()) { diff --git standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/model/MPartition.java standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/model/MPartition.java index 4a97f891fe..267c9e8e5a 100644 --- standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/model/MPartition.java +++ standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/model/MPartition.java @@ -30,7 +30,7 @@ private int lastAccessTime; private MStorageDescriptor sd; private Map parameters; - + private long writeId; public MPartition() {} @@ -152,4 +152,11 @@ public void setCreateTime(int createTime) { this.createTime = createTime; } + public long getWriteId() { + return writeId; + } + + public void setWriteId(long writeId) { + this.writeId = writeId; + } } diff --git standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/model/MTable.java standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/model/MTable.java index 38ad47915b..deeb97133d 100644 --- standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/model/MTable.java +++ standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/model/MTable.java @@ -1,3 +1,4 @@ + /* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file @@ -37,6 +38,7 @@ private String viewExpandedText; private boolean rewriteEnabled; private String tableType; + private long writeId; public MTable() {} @@ -270,4 +272,12 @@ public void setTableType(String tableType) { public String getTableType() { return tableType; } + + public long getWriteId() { + return writeId; + } + + public void setWriteId(long writeId) { + this.writeId = writeId; + } } diff --git standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/txn/CompactionTxnHandler.java standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/txn/CompactionTxnHandler.java index 4e3068d7eb..1f559e95bb 100644 --- standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/txn/CompactionTxnHandler.java +++ standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/txn/CompactionTxnHandler.java @@ -17,6 +17,7 @@ */ package org.apache.hadoop.hive.metastore.txn; +import org.apache.hadoop.hive.common.StatsSetupConst; import org.apache.hadoop.hive.common.classification.RetrySemantics; import org.apache.hadoop.hive.metastore.api.CompactionType; import org.apache.hadoop.hive.metastore.api.MetaException; @@ -576,8 +577,8 @@ public void cleanEmptyAbortedTxns() throws MetaException { dbConn = getDbConn(Connection.TRANSACTION_READ_COMMITTED); stmt = dbConn.createStatement(); String s = "select txn_id from TXNS where " + - "txn_id not in (select tc_txnid from TXN_COMPONENTS) and " + - "txn_state = '" + TXN_ABORTED + "'"; + "txn_id not in (select tc_txnid from TXN_COMPONENTS) and " + + "txn_state = '" + TXN_ABORTED + "'"; LOG.debug("Going to execute query <" + s + ">"); rs = stmt.executeQuery(s); List txnids = new ArrayList<>(); @@ -587,10 +588,60 @@ public void cleanEmptyAbortedTxns() throws MetaException { return; } Collections.sort(txnids);//easier to read logs + List queries = new ArrayList<>(); StringBuilder prefix = new StringBuilder(); StringBuilder suffix = new StringBuilder(); + // Turn off COLUMN_STATS_ACCURATE for txnids' components in TBLS and PARTITIONS + prefix.append("select tbl_id from TBLS inner join DBS on TBLS.DB_ID = DBS.DB_ID " + + "inner join TXN_TO_WRITE_ID on t2w_database = DBS.NAME and t2w_table = TBLS.TBL_NAME" + + " and t2w_writeid = TBLS.WRITE_ID where "); + suffix.append(""); + TxnUtils.buildQueryWithINClause(conf, queries, prefix, suffix, txnids, "t2w_txnid", true, false); + + // Delete COLUMN_STATS_ACCURATE.BASIC_STATS rows from TABLE_PARAMS for the txnids. + List finalCommands = new ArrayList<>(queries.size()); + for (int i = 0; i < queries.size(); i++) { + String query = queries.get(i); + finalCommands.add(i, new StringBuilder("delete from TABLE_PARAMS " + + " where param_key = '" + "COLUMN_STATS_ACCURATE" + "' and tbl_id in (")); + finalCommands.get(i).append(query + ")"); + LOG.debug("Going to execute update <" + finalCommands.get(i) + ">"); + int rc = stmt.executeUpdate(finalCommands.get(i).toString()); + LOG.info("Turned off " + rc + " COLUMN_STATE_ACCURATE.BASIC_STATS states from TBLS"); + } + + queries.clear(); + prefix.setLength(0); + suffix.setLength(0); + finalCommands.clear(); + + // Delete COLUMN_STATS_ACCURATE.BASIC_STATS rows from PARTITIONS_PARAMS for the txnids. + prefix.append("select part_id from PARTITIONS " + + "inner join TBLS on PARTITIONS.TBL_ID = TBLS.TBL_ID " + + "inner join DBS on TBLS.DB_ID = DBS.DB_ID " + + "inner join TXN_TO_WRITE_ID on t2w_database = DBS.NAME and t2w_table = TBLS.TBL_NAME" + + " and t2w_writeid = TBLS.WRITE_ID where "); + suffix.append(""); + TxnUtils.buildQueryWithINClause(conf, queries, prefix, suffix, txnids, "t2w_txnid", true, false); + + for (int i = 0; i < queries.size(); i++) { + String query = queries.get(i); + finalCommands.add(i, new StringBuilder("delete from PARTITION_PARAMS " + + " where param_key = '" + "COLUMN_STATS_ACCURATE" + "' and part_id in (")); + finalCommands.get(i).append(query + ")"); + LOG.debug("Going to execute update <" + finalCommands.get(i) + ">"); + int rc = stmt.executeUpdate(finalCommands.get(i).toString()); + LOG.info("Turned off " + rc + " COLUMN_STATE_ACCURATE.BASIC_STATS states from PARTITIONS"); + } + + queries.clear(); + prefix.setLength(0); + suffix.setLength(0); + finalCommands.clear(); + + // Delete from TXNS. prefix.append("delete from TXNS where "); suffix.append(""); diff --git standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/txn/TxnDbUtil.java standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/txn/TxnDbUtil.java index f8c2ca2ea2..319e612f58 100644 --- standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/txn/TxnDbUtil.java +++ standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/txn/TxnDbUtil.java @@ -28,9 +28,12 @@ import java.util.Properties; import com.google.common.annotations.VisibleForTesting; +import jline.internal.Log; import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hive.metastore.api.MetaException; import org.apache.hadoop.hive.metastore.conf.MetastoreConf; import org.apache.hadoop.hive.metastore.conf.MetastoreConf.ConfVars; +import org.apache.zookeeper.txn.TxnHeader; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -195,6 +198,68 @@ public static void prepDb(Configuration conf) throws Exception { ); try { + stmt.execute("CREATE TABLE \"APP\".\"TBLS\" (\"TBL_ID\" BIGINT NOT NULL, " + + " \"CREATE_TIME\" INTEGER NOT NULL, \"DB_ID\" BIGINT, \"LAST_ACCESS_TIME\" INTEGER NOT NULL, " + + " \"OWNER\" VARCHAR(767), \"OWNER_TYPE\" VARCHAR(10), \"RETENTION\" INTEGER NOT NULL, " + + " \"SD_ID\" BIGINT, \"TBL_NAME\" VARCHAR(256), \"TBL_TYPE\" VARCHAR(128), " + + " \"VIEW_EXPANDED_TEXT\" LONG VARCHAR, \"VIEW_ORIGINAL_TEXT\" LONG VARCHAR, " + + " \"IS_REWRITE_ENABLED\" CHAR(1) NOT NULL DEFAULT \'N\', " + + " \"WRITE_ID\" BIGINT DEFAULT 0, " + + " PRIMARY KEY (TBL_ID))" + ); + } catch (SQLException e) { + if (e.getMessage() != null && e.getMessage().contains("already exists")) { + LOG.info("TBLS table already exist, ignoring"); + } else { + throw e; + } + } + + try { + stmt.execute("CREATE TABLE \"APP\".\"PARTITIONS\" (" + + " \"PART_ID\" BIGINT NOT NULL, \"CREATE_TIME\" INTEGER NOT NULL, " + + " \"LAST_ACCESS_TIME\" INTEGER NOT NULL, \"PART_NAME\" VARCHAR(767), " + + " \"SD_ID\" BIGINT, \"TBL_ID\" BIGINT, " + + " \"WRITE_ID\" BIGINT DEFAULT 0, " + + " PRIMARY KEY (PART_ID))" + ); + } catch (SQLException e) { + if (e.getMessage() != null && e.getMessage().contains("already exists")) { + LOG.info("PARTITIONS table already exist, ignoring"); + } else { + throw e; + } + } + + try { + stmt.execute("CREATE TABLE \"APP\".\"TABLE_PARAMS\" (" + + " \"TBL_ID\" BIGINT NOT NULL, \"PARAM_KEY\" VARCHAR(256) NOT NULL, " + + " \"PARAM_VALUE\" CLOB, " + + " PRIMARY KEY (TBL_ID, PARAM_KEY))" + ); + } catch (SQLException e) { + if (e.getMessage() != null && e.getMessage().contains("already exists")) { + LOG.info("TABLE_PARAMS table already exist, ignoring"); + } else { + throw e; + } + } + + try { + stmt.execute("CREATE TABLE \"APP\".\"PARTITION_PARAMS\" (" + + " \"PART_ID\" BIGINT NOT NULL, \"PARAM_KEY\" VARCHAR(256) NOT NULL, " + + " \"PARAM_VALUE\" VARCHAR(4000), " + + " PRIMARY KEY (PART_ID, PARAM_KEY))" + ); + } catch (SQLException e) { + if (e.getMessage() != null && e.getMessage().contains("already exists")) { + LOG.info("PARTITION_PARAMS table already exist, ignoring"); + } else { + throw e; + } + } + + try { stmt.execute("CREATE TABLE \"APP\".\"SEQUENCE_TABLE\" (\"SEQUENCE_NAME\" VARCHAR(256) NOT " + "NULL, \"NEXT_VAL\" BIGINT NOT NULL)" @@ -404,6 +469,35 @@ public static int countLockComponents(Configuration conf, long lockId) throws Ex } /** + * Return true if the transaction of the given txnId is open. + * @param conf HiveConf + * @param txnId transaction id to search for + * @return + * @throws Exception + */ + public static boolean isOpenOrAbortedTransaction(Configuration conf, long txnId) throws Exception { + Connection conn = null; + PreparedStatement stmt = null; + ResultSet rs = null; + try { + conn = getConnection(conf); + conn.setAutoCommit(false); + conn.setTransactionIsolation(Connection.TRANSACTION_READ_COMMITTED); + + stmt = conn.prepareStatement("SELECT txn_id FROM TXNS WHERE txn_id = ?"); + stmt.setLong(1, txnId); + rs = stmt.executeQuery(); + if (!rs.next()) { + return false; + } else { + return true; + } + } finally { + closeResources(conn, stmt, rs); + } + } + + /** * Utility method used to run COUNT queries like "select count(*) from ..." against metastore tables * @param countQuery countQuery text * @return count countQuery result diff --git standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/txn/TxnHandler.java standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/txn/TxnHandler.java index 3785f89f2d..4ef6786688 100644 --- standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/txn/TxnHandler.java +++ standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/txn/TxnHandler.java @@ -87,6 +87,7 @@ import org.apache.hadoop.util.StringUtils; import org.slf4j.Logger; import org.slf4j.LoggerFactory; + import com.google.common.annotations.VisibleForTesting; /** @@ -422,6 +423,7 @@ public GetOpenTxnsInfoResponse getOpenTxnsInfo() throws MetaException { return getOpenTxnsInfo(); } } + @Override @RetrySemantics.ReadOnly public GetOpenTxnsResponse getOpenTxns() throws MetaException { @@ -2400,6 +2402,47 @@ long generateCompactionQueueId(Statement stmt) throws SQLException, MetaExceptio return id; } } + + @Override + @RetrySemantics.ReadOnly + public long getTxnIdForWriteId( + String dbName, String tblName, long writeId) throws MetaException { + try { + Connection dbConn = null; + Statement stmt = null; + try { + /** + * This runs at READ_COMMITTED for exactly the same reason as {@link #getOpenTxnsInfo()} + */ + dbConn = getDbConn(Connection.TRANSACTION_READ_COMMITTED); + stmt = dbConn.createStatement(); + + String query = "select t2w_txnid from TXN_TO_WRITE_ID where" + + " t2w_database = " + quoteString(dbName) + + " and t2w_table = " + quoteString(tblName) + + " and t2w_writeid = " + writeId; + LOG.debug("Going to execute query <" + query + ">"); + ResultSet rs = stmt.executeQuery(query); + long txnId = -1; + if (rs.next()) { + txnId = rs.getLong(1); + } + dbConn.rollback(); + return txnId; + } catch (SQLException e) { + LOG.debug("Going to rollback"); + rollbackDBConn(dbConn); + checkRetryable(dbConn, e, "getTxnIdForWriteId"); + throw new MetaException("Unable to select from transaction database, " + + StringUtils.stringifyException(e)); + } finally { + close(null, stmt, dbConn); + } + } catch (RetryException e) { + return getTxnIdForWriteId(dbName, tblName, writeId); + } + } + @Override @RetrySemantics.Idempotent public CompactionResponse compact(CompactionRequest rqst) throws MetaException { diff --git standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/txn/TxnStore.java standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/txn/TxnStore.java index d972d10dfc..df35f22f68 100644 --- standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/txn/TxnStore.java +++ standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/txn/TxnStore.java @@ -18,6 +18,7 @@ package org.apache.hadoop.hive.metastore.txn; import com.google.common.annotations.VisibleForTesting; + import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; import org.apache.hadoop.conf.Configurable; @@ -137,6 +138,11 @@ void commitTxn(CommitTxnRequest rqst) BasicTxnInfo getFirstCompletedTransactionForTableAfterCommit( String inputDbName, String inputTableName, ValidWriteIdList txnList) throws MetaException; + + @RetrySemantics.ReadOnly + long getTxnIdForWriteId(String dbName, String tblName, long writeId) + throws MetaException; + /** * Gets the list of valid write ids for the given table wrt to current txn * @param rqst info on transaction and list of table names associated with given transaction diff --git standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/txn/TxnUtils.java standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/txn/TxnUtils.java index fa291d5f20..aac58110f9 100644 --- standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/txn/TxnUtils.java +++ standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/txn/TxnUtils.java @@ -25,11 +25,7 @@ import org.apache.hadoop.hive.common.ValidTxnWriteIdList; import org.apache.hadoop.hive.common.ValidWriteIdList; import org.apache.hadoop.hive.metastore.TransactionalValidationListener; -import org.apache.hadoop.hive.metastore.api.GetOpenTxnsResponse; -import org.apache.hadoop.hive.metastore.api.GetValidWriteIdsResponse; -import org.apache.hadoop.hive.metastore.api.Table; -import org.apache.hadoop.hive.metastore.api.TableValidWriteIds; -import org.apache.hadoop.hive.metastore.api.hive_metastoreConstants; +import org.apache.hadoop.hive.metastore.api.*; import org.apache.hadoop.hive.metastore.conf.MetastoreConf; import org.apache.hadoop.hive.metastore.conf.MetastoreConf.ConfVars; import org.apache.hadoop.hive.metastore.utils.JavaUtils; @@ -46,6 +42,12 @@ public class TxnUtils { private static final Logger LOG = LoggerFactory.getLogger(TxnUtils.class); + // Transactional stats states + static final public char STAT_OPEN = 'o'; + static final public char STAT_INVALID = 'i'; + static final public char STAT_COMMITTED = 'c'; + static final public char STAT_OBSOLETE = 's'; + /** * Transform a {@link org.apache.hadoop.hive.metastore.api.GetOpenTxnsResponse} to a * {@link org.apache.hadoop.hive.common.ValidTxnList}. This assumes that the caller intends to @@ -223,6 +225,14 @@ public static boolean isTransactionalTable(Table table) { return tableIsTransactional != null && tableIsTransactional.equalsIgnoreCase("true"); } + public static boolean isTransactionalTable(Map parameters) { + if (parameters == null) { + return false; + } + String tableIsTransactional = parameters.get(hive_metastoreConstants.TABLE_IS_TRANSACTIONAL); + return tableIsTransactional != null && tableIsTransactional.equalsIgnoreCase("true"); + } + /** * Should produce the same result as * {@link org.apache.hadoop.hive.ql.io.AcidUtils#isAcidTable(org.apache.hadoop.hive.ql.metadata.Table)}. diff --git standalone-metastore/src/main/resources/package.jdo standalone-metastore/src/main/resources/package.jdo index 5fb548cf88..70150da6f9 100644 --- standalone-metastore/src/main/resources/package.jdo +++ standalone-metastore/src/main/resources/package.jdo @@ -210,6 +210,9 @@ + + + @@ -489,6 +492,9 @@ + + + diff --git standalone-metastore/src/main/sql/derby/hive-schema-4.0.0.derby.sql standalone-metastore/src/main/sql/derby/hive-schema-4.0.0.derby.sql index 7cab4fb663..126907039a 100644 --- standalone-metastore/src/main/sql/derby/hive-schema-4.0.0.derby.sql +++ standalone-metastore/src/main/sql/derby/hive-schema-4.0.0.derby.sql @@ -47,7 +47,7 @@ CREATE TABLE "APP"."IDXS" ("INDEX_ID" BIGINT NOT NULL, "CREATE_TIME" INTEGER NOT CREATE TABLE "APP"."INDEX_PARAMS" ("INDEX_ID" BIGINT NOT NULL, "PARAM_KEY" VARCHAR(256) NOT NULL, "PARAM_VALUE" VARCHAR(4000)); -CREATE TABLE "APP"."PARTITIONS" ("PART_ID" BIGINT NOT NULL, "CREATE_TIME" INTEGER NOT NULL, "LAST_ACCESS_TIME" INTEGER NOT NULL, "PART_NAME" VARCHAR(767), "SD_ID" BIGINT, "TBL_ID" BIGINT); +CREATE TABLE "APP"."PARTITIONS" ("PART_ID" BIGINT NOT NULL, "CREATE_TIME" INTEGER NOT NULL, "LAST_ACCESS_TIME" INTEGER NOT NULL, "PART_NAME" VARCHAR(767), "SD_ID" BIGINT, "TBL_ID" BIGINT, "WRITE_ID" BIGINT DEFAULT 0); CREATE TABLE "APP"."SERDES" ("SERDE_ID" BIGINT NOT NULL, "NAME" VARCHAR(128), "SLIB" VARCHAR(4000), "DESCRIPTION" VARCHAR(4000), "SERIALIZER_CLASS" VARCHAR(4000), "DESERIALIZER_CLASS" VARCHAR(4000), SERDE_TYPE INTEGER); @@ -75,7 +75,7 @@ CREATE TABLE "APP"."COLUMNS" ("SD_ID" BIGINT NOT NULL, "COMMENT" VARCHAR(256), " CREATE TABLE "APP"."ROLES" ("ROLE_ID" BIGINT NOT NULL, "CREATE_TIME" INTEGER NOT NULL, "OWNER_NAME" VARCHAR(128), "ROLE_NAME" VARCHAR(128)); -CREATE TABLE "APP"."TBLS" ("TBL_ID" BIGINT NOT NULL, "CREATE_TIME" INTEGER NOT NULL, "DB_ID" BIGINT, "LAST_ACCESS_TIME" INTEGER NOT NULL, "OWNER" VARCHAR(767), "OWNER_TYPE" VARCHAR(10), "RETENTION" INTEGER NOT NULL, "SD_ID" BIGINT, "TBL_NAME" VARCHAR(256), "TBL_TYPE" VARCHAR(128), "VIEW_EXPANDED_TEXT" LONG VARCHAR, "VIEW_ORIGINAL_TEXT" LONG VARCHAR, "IS_REWRITE_ENABLED" CHAR(1) NOT NULL DEFAULT 'N'); +CREATE TABLE "APP"."TBLS" ("TBL_ID" BIGINT NOT NULL, "CREATE_TIME" INTEGER NOT NULL, "DB_ID" BIGINT, "LAST_ACCESS_TIME" INTEGER NOT NULL, "OWNER" VARCHAR(767), "OWNER_TYPE" VARCHAR(10), "RETENTION" INTEGER NOT NULL, "SD_ID" BIGINT, "TBL_NAME" VARCHAR(256), "TBL_TYPE" VARCHAR(128), "VIEW_EXPANDED_TEXT" LONG VARCHAR, "VIEW_ORIGINAL_TEXT" LONG VARCHAR, "IS_REWRITE_ENABLED" CHAR(1) NOT NULL DEFAULT 'N', "WRITE_ID" BIGINT DEFAULT 0); CREATE TABLE "APP"."PARTITION_KEYS" ("TBL_ID" BIGINT NOT NULL, "PKEY_COMMENT" VARCHAR(4000), "PKEY_NAME" VARCHAR(128) NOT NULL, "PKEY_TYPE" VARCHAR(767) NOT NULL, "INTEGER_IDX" INTEGER NOT NULL); diff --git standalone-metastore/src/main/sql/derby/upgrade-3.1.0-to-4.0.0.derby.sql standalone-metastore/src/main/sql/derby/upgrade-3.1.0-to-4.0.0.derby.sql index a51137636f..d4fb2990f2 100644 --- standalone-metastore/src/main/sql/derby/upgrade-3.1.0-to-4.0.0.derby.sql +++ standalone-metastore/src/main/sql/derby/upgrade-3.1.0-to-4.0.0.derby.sql @@ -1,5 +1,7 @@ -- Upgrade MetaStore schema from 3.1.0 to 4.0.0 - +-- HIVE-19416 +ALTER TABLE "APP"."TBLS" ADD WRITE_ID bigint DEFAULT 0; +ALTER TABLE "APP"."PARTITIONS" ADD WRITE_ID bigint DEFAULT 0; -- This needs to be the last thing done. Insert any changes above this line. UPDATE "APP".VERSION SET SCHEMA_VERSION='4.0.0', VERSION_COMMENT='Hive release version 4.0.0' where VER_ID=1; diff --git standalone-metastore/src/main/sql/mssql/hive-schema-4.0.0.mssql.sql standalone-metastore/src/main/sql/mssql/hive-schema-4.0.0.mssql.sql index a81fc40959..ad78ba6c07 100644 --- standalone-metastore/src/main/sql/mssql/hive-schema-4.0.0.mssql.sql +++ standalone-metastore/src/main/sql/mssql/hive-schema-4.0.0.mssql.sql @@ -145,8 +145,8 @@ CREATE TABLE PARTITIONS LAST_ACCESS_TIME int NOT NULL, PART_NAME nvarchar(767) NULL, SD_ID bigint NULL, - TBL_ID bigint NULL -); + TBL_ID bigint NULL, + WRITE_ID bigint NULL); ALTER TABLE PARTITIONS ADD CONSTRAINT PARTITIONS_PK PRIMARY KEY (PART_ID); @@ -377,8 +377,8 @@ CREATE TABLE TBLS TBL_TYPE nvarchar(128) NULL, VIEW_EXPANDED_TEXT text NULL, VIEW_ORIGINAL_TEXT text NULL, - IS_REWRITE_ENABLED bit NOT NULL DEFAULT 0 -); + IS_REWRITE_ENABLED bit NOT NULL DEFAULT 0, + WRITE_ID bigint NULL); ALTER TABLE TBLS ADD CONSTRAINT TBLS_PK PRIMARY KEY (TBL_ID); diff --git standalone-metastore/src/main/sql/mssql/upgrade-3.1.0-to-4.0.0.mssql.sql standalone-metastore/src/main/sql/mssql/upgrade-3.1.0-to-4.0.0.mssql.sql index 27b7026bbc..acc9361246 100644 --- standalone-metastore/src/main/sql/mssql/upgrade-3.1.0-to-4.0.0.mssql.sql +++ standalone-metastore/src/main/sql/mssql/upgrade-3.1.0-to-4.0.0.mssql.sql @@ -1,5 +1,9 @@ SELECT 'Upgrading MetaStore schema from 3.1.0 to 4.0.0' AS MESSAGE; +-- HIVE-19416 +ALTER TABLE TBLS ADD WRITE_ID bigint NULL; +ALTER TABLE PARTITIONS ADD WRITE_ID bigint NULL; + -- These lines need to be last. Insert any changes above. UPDATE VERSION SET SCHEMA_VERSION='4.0.0', VERSION_COMMENT='Hive release version 4.0.0' where VER_ID=1; SELECT 'Finished upgrading MetaStore schema from 3.1.0 to 4.0.0' AS MESSAGE; diff --git standalone-metastore/src/main/sql/mysql/hive-schema-4.0.0.mysql.sql standalone-metastore/src/main/sql/mysql/hive-schema-4.0.0.mysql.sql index 968f4a4a26..29d545a91e 100644 --- standalone-metastore/src/main/sql/mysql/hive-schema-4.0.0.mysql.sql +++ standalone-metastore/src/main/sql/mysql/hive-schema-4.0.0.mysql.sql @@ -224,6 +224,7 @@ CREATE TABLE IF NOT EXISTS `PARTITIONS` ( `PART_NAME` varchar(767) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL, `SD_ID` bigint(20) DEFAULT NULL, `TBL_ID` bigint(20) DEFAULT NULL, + `WRITE_ID` bigint(20) DEFAULT 0, PRIMARY KEY (`PART_ID`), UNIQUE KEY `UNIQUEPARTITION` (`PART_NAME`,`TBL_ID`), KEY `PARTITIONS_N49` (`TBL_ID`), @@ -629,6 +630,7 @@ CREATE TABLE IF NOT EXISTS `TBLS` ( `VIEW_EXPANDED_TEXT` mediumtext, `VIEW_ORIGINAL_TEXT` mediumtext, `IS_REWRITE_ENABLED` bit(1) NOT NULL DEFAULT 0, + `WRITE_ID` bigint(20) DEFAULT 0, PRIMARY KEY (`TBL_ID`), UNIQUE KEY `UNIQUETABLE` (`TBL_NAME`,`DB_ID`), KEY `TBLS_N50` (`SD_ID`), diff --git standalone-metastore/src/main/sql/mysql/upgrade-3.1.0-to-4.0.0.mysql.sql standalone-metastore/src/main/sql/mysql/upgrade-3.1.0-to-4.0.0.mysql.sql index b3789f9822..89265ad286 100644 --- standalone-metastore/src/main/sql/mysql/upgrade-3.1.0-to-4.0.0.mysql.sql +++ standalone-metastore/src/main/sql/mysql/upgrade-3.1.0-to-4.0.0.mysql.sql @@ -1,5 +1,9 @@ SELECT 'Upgrading MetaStore schema from 3.1.0 to 4.0.0' AS ' '; +-- HIVE-19416 +ALTER TABLE TBLS ADD WRITE_ID bigint; +ALTER TABLE PARTITIONS ADD WRITE_ID bigint; + -- These lines need to be last. Insert any changes above. UPDATE VERSION SET SCHEMA_VERSION='4.0.0', VERSION_COMMENT='Hive release version 4.0.0' where VER_ID=1; SELECT 'Finished upgrading MetaStore schema from 3.1.0 to 4.0.0' AS ' '; diff --git standalone-metastore/src/main/sql/oracle/hive-schema-4.0.0.oracle.sql standalone-metastore/src/main/sql/oracle/hive-schema-4.0.0.oracle.sql index faca66944e..e113b109a4 100644 --- standalone-metastore/src/main/sql/oracle/hive-schema-4.0.0.oracle.sql +++ standalone-metastore/src/main/sql/oracle/hive-schema-4.0.0.oracle.sql @@ -163,7 +163,8 @@ CREATE TABLE PARTITIONS LAST_ACCESS_TIME NUMBER (10) NOT NULL, PART_NAME VARCHAR2(767) NULL, SD_ID NUMBER NULL, - TBL_ID NUMBER NULL + TBL_ID NUMBER NULL, + WRITE_ID NUMBER NULL ); ALTER TABLE PARTITIONS ADD CONSTRAINT PARTITIONS_PK PRIMARY KEY (PART_ID); @@ -398,7 +399,8 @@ CREATE TABLE TBLS TBL_TYPE VARCHAR2(128) NULL, VIEW_EXPANDED_TEXT CLOB NULL, VIEW_ORIGINAL_TEXT CLOB NULL, - IS_REWRITE_ENABLED NUMBER(1) DEFAULT 0 NOT NULL CHECK (IS_REWRITE_ENABLED IN (1,0)) + IS_REWRITE_ENABLED NUMBER(1) DEFAULT 0 NOT NULL CHECK (IS_REWRITE_ENABLED IN (1,0)), + WRITE_ID NUMBER NULL ); ALTER TABLE TBLS ADD CONSTRAINT TBLS_PK PRIMARY KEY (TBL_ID); diff --git standalone-metastore/src/main/sql/oracle/upgrade-2.3.0-to-3.0.0.oracle.sql standalone-metastore/src/main/sql/oracle/upgrade-2.3.0-to-3.0.0.oracle.sql index 71f5034446..c9c6b30e50 100644 --- standalone-metastore/src/main/sql/oracle/upgrade-2.3.0-to-3.0.0.oracle.sql +++ standalone-metastore/src/main/sql/oracle/upgrade-2.3.0-to-3.0.0.oracle.sql @@ -340,3 +340,4 @@ ALTER TABLE TBLS ADD OWNER_TYPE VARCHAR2(10) NULL; -- These lines need to be last. Insert any changes above. UPDATE VERSION SET SCHEMA_VERSION='3.0.0', VERSION_COMMENT='Hive release version 3.0.0' where VER_ID=1; SELECT 'Finished upgrading MetaStore schema from 2.3.0 to 3.0.0' AS Status from dual; + diff --git standalone-metastore/src/main/sql/oracle/upgrade-3.1.0-to-4.0.0.oracle.sql standalone-metastore/src/main/sql/oracle/upgrade-3.1.0-to-4.0.0.oracle.sql index 6fa5e2dadc..c94e6ec71c 100644 --- standalone-metastore/src/main/sql/oracle/upgrade-3.1.0-to-4.0.0.oracle.sql +++ standalone-metastore/src/main/sql/oracle/upgrade-3.1.0-to-4.0.0.oracle.sql @@ -1,5 +1,8 @@ SELECT 'Upgrading MetaStore schema from 3.1.0 to 4.0.0' AS Status from dual; +ALTER TABLE TBLS ADD WRITE_ID number NULL; +ALTER TABLE PARTITIONS ADD WRITE_ID number NULL; + -- These lines need to be last. Insert any changes above. UPDATE VERSION SET SCHEMA_VERSION='4.0.0', VERSION_COMMENT='Hive release version 4.0.0' where VER_ID=1; SELECT 'Finished upgrading MetaStore schema from 3.1.0 to 4.0.0' AS Status from dual; diff --git standalone-metastore/src/main/sql/postgres/hive-schema-4.0.0.postgres.sql standalone-metastore/src/main/sql/postgres/hive-schema-4.0.0.postgres.sql index 2e7ac5af79..add7f99632 100644 --- standalone-metastore/src/main/sql/postgres/hive-schema-4.0.0.postgres.sql +++ standalone-metastore/src/main/sql/postgres/hive-schema-4.0.0.postgres.sql @@ -168,7 +168,8 @@ CREATE TABLE "PARTITIONS" ( "LAST_ACCESS_TIME" bigint NOT NULL, "PART_NAME" character varying(767) DEFAULT NULL::character varying, "SD_ID" bigint, - "TBL_ID" bigint + "TBL_ID" bigint, + "WRITE_ID" bigint DEFAULT 0 ); @@ -392,7 +393,8 @@ CREATE TABLE "TBLS" ( "TBL_TYPE" character varying(128) DEFAULT NULL::character varying, "VIEW_EXPANDED_TEXT" text, "VIEW_ORIGINAL_TEXT" text, - "IS_REWRITE_ENABLED" boolean NOT NULL DEFAULT false + "IS_REWRITE_ENABLED" boolean NOT NULL DEFAULT false, + "WRITE_ID" bigint DEFAULT 0 ); -- diff --git standalone-metastore/src/main/sql/postgres/upgrade-3.1.0-to-4.0.0.postgres.sql standalone-metastore/src/main/sql/postgres/upgrade-3.1.0-to-4.0.0.postgres.sql index 40d2795e91..eff08b3199 100644 --- standalone-metastore/src/main/sql/postgres/upgrade-3.1.0-to-4.0.0.postgres.sql +++ standalone-metastore/src/main/sql/postgres/upgrade-3.1.0-to-4.0.0.postgres.sql @@ -1,5 +1,9 @@ SELECT 'Upgrading MetaStore schema from 3.1.0 to 4.0.0'; +-- HIVE-19416 +ALTER TABLE "TBLS" ADD "WRITE_ID" bigint; +ALTER TABLE "PARTITIONS" ADD "WRITE_ID" bigint; + -- These lines need to be last. Insert any changes above. UPDATE "VERSION" SET "SCHEMA_VERSION"='4.0.0', "VERSION_COMMENT"='Hive release version 4.0.0' where "VER_ID"=1; SELECT 'Finished upgrading MetaStore schema from 3.1.0 to 4.0.0'; diff --git standalone-metastore/src/main/thrift/hive_metastore.thrift standalone-metastore/src/main/thrift/hive_metastore.thrift index 1ca6454776..1b4f01a715 100644 --- standalone-metastore/src/main/thrift/hive_metastore.thrift +++ standalone-metastore/src/main/thrift/hive_metastore.thrift @@ -430,7 +430,9 @@ struct Table { 15: optional bool rewriteEnabled, // rewrite enabled or not 16: optional CreationMetadata creationMetadata, // only for MVs, it stores table names used and txn list at MV creation 17: optional string catName, // Name of the catalog the table is in - 18: optional PrincipalType ownerType = PrincipalType.USER // owner type of this table (default to USER for backward compatibility) + 18: optional PrincipalType ownerType = PrincipalType.USER, // owner type of this table (default to USER for backward compatibility) + 19: optional i64 writeId=-1, + 20: optional bool isStatsCompliant } struct Partition { @@ -442,7 +444,9 @@ struct Partition { 6: StorageDescriptor sd, 7: map parameters, 8: optional PrincipalPrivilegeSet privileges, - 9: optional string catName + 9: optional string catName, + 10: optional i64 writeId=-1, + 11: optional bool isStatsCompliant } struct PartitionWithoutSD { @@ -469,7 +473,9 @@ struct PartitionSpec { 3: string rootPath, 4: optional PartitionSpecWithSharedSD sharedSDPartitionSpec, 5: optional PartitionListComposingSpec partitionList, - 6: optional string catName + 6: optional string catName, + 7: optional i64 writeId=-1, + 8: optional bool isStatsCompliant } // column statistics @@ -564,17 +570,25 @@ struct ColumnStatisticsDesc { struct ColumnStatistics { 1: required ColumnStatisticsDesc statsDesc, -2: required list statsObj; +2: required list statsObj, +3: optional i64 txnId=-1, // transaction id of the query that sends this structure TODO## needed? +4: optional string validWriteIdList, // valid write id list for the table for which this struct is being sent +5: optional bool isStatsCompliant // Are the stats isolation-level-compliant with the + // the calling query? } struct AggrStats { 1: required list colStats, -2: required i64 partsFound // number of partitions for which stats were found +2: required i64 partsFound, // number of partitions for which stats were found +3: optional bool isStatsCompliant } struct SetPartitionsStatsRequest { 1: required list colStats, -2: optional bool needMerge //stats need to be merged with the existing stats +2: optional bool needMerge, //stats need to be merged with the existing stats +3: optional i64 txnId=-1, // transaction id of the query that sends this structure +4: optional i64 writeId=-1, // writeId for the current query that updates the stats +5: optional string validWriteIdList // valid write id list for the table for which this struct is being sent } // schema of the table/query results etc. @@ -703,18 +717,22 @@ struct PartitionsByExprRequest { } struct TableStatsResult { - 1: required list tableStats + 1: required list tableStats, + 2: optional bool isStatsCompliant } struct PartitionsStatsResult { - 1: required map> partStats + 1: required map> partStats, + 2: optional bool isStatsCompliant } struct TableStatsRequest { 1: required string dbName, 2: required string tblName, 3: required list colNames - 4: optional string catName + 4: optional string catName, + 5: optional i64 txnId=-1, // transaction id of the query that sends this structure + 6: optional string validWriteIdList // valid write id list for the table for which this struct is being sent } struct PartitionsStatsRequest { @@ -722,12 +740,15 @@ struct PartitionsStatsRequest { 2: required string tblName, 3: required list colNames, 4: required list partNames, - 5: optional string catName + 5: optional string catName, + 6: optional i64 txnId=-1, // transaction id of the query that sends this structure + 7: optional string validWriteIdList // valid write id list for the table for which this struct is being sent } // Return type for add_partitions_req struct AddPartitionsResult { 1: optional list partitions, + 2: optional bool isStatsCompliant } // Request type for add_partitions_req @@ -737,7 +758,9 @@ struct AddPartitionsRequest { 3: required list parts, 4: required bool ifNotExists, 5: optional bool needResult=true, - 6: optional string catName + 6: optional string catName, + 7: optional i64 txnId=-1, + 8: optional string validWriteIdList } // Return type for drop_partitions_req @@ -1236,11 +1259,14 @@ struct GetTableRequest { 1: required string dbName, 2: required string tblName, 3: optional ClientCapabilities capabilities, - 4: optional string catName + 4: optional string catName, + 5: optional i64 txnId=-1, + 6: optional string validWriteIdList } struct GetTableResult { - 1: required Table table + 1: required Table table, + 2: optional bool isStatsCompliant } struct GetTablesRequest { @@ -1571,6 +1597,19 @@ struct GetRuntimeStatsRequest { 2: required i32 maxCreateTime } +struct AlterPartitionsRequest { + 1: required string dbName, + 2: required string tableName, + 3: required list partitions, + 4: required EnvironmentContext environmentContext, + 5: optional i64 txnId=-1, + 6: optional i64 writeId=-1, + 7: optional string validWriteIdList +} + +struct AlterPartitionsResponse { +} + // Exceptions. exception MetaException { @@ -1901,8 +1940,12 @@ service ThriftHiveMetastore extends fb303.FacebookService // prehooks are fired together followed by all post hooks void alter_partitions(1:string db_name, 2:string tbl_name, 3:list new_parts) throws (1:InvalidOperationException o1, 2:MetaException o2) + void alter_partitions_with_environment_context(1:string db_name, 2:string tbl_name, 3:list new_parts, 4:EnvironmentContext environment_context) throws (1:InvalidOperationException o1, 2:MetaException o2) + AlterPartitionsResponse alter_partitions_with_environment_context_req(1:AlterPartitionsRequest req) + throws (1:InvalidOperationException o1, 2:MetaException o2) + void alter_partition_with_environment_context(1:string db_name, 2:string tbl_name, 3:Partition new_part, 4:EnvironmentContext environment_context) diff --git standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/DummyRawStoreControlledCommit.java standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/DummyRawStoreControlledCommit.java index 2454479bf6..106d9f21d7 100644 --- standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/DummyRawStoreControlledCommit.java +++ standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/DummyRawStoreControlledCommit.java @@ -19,24 +19,12 @@ package org.apache.hadoop.hive.metastore; import org.apache.hadoop.hive.common.TableName; -import org.apache.hadoop.hive.metastore.api.CreationMetadata; -import org.apache.hadoop.hive.metastore.api.ISchemaName; -import org.apache.hadoop.hive.metastore.api.SchemaVersionDescriptor; -import org.apache.hadoop.hive.metastore.api.Catalog; -import org.apache.hadoop.hive.metastore.api.WMFullResourcePlan; - -import java.nio.ByteBuffer; -import java.util.ArrayList; -import java.util.Collections; -import java.util.List; -import java.util.Map; - -import org.apache.hadoop.conf.Configurable; -import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hive.metastore.api.AggrStats; import org.apache.hadoop.hive.metastore.api.AlreadyExistsException; +import org.apache.hadoop.hive.metastore.api.Catalog; import org.apache.hadoop.hive.metastore.api.ColumnStatistics; import org.apache.hadoop.hive.metastore.api.CurrentNotificationEventId; +import org.apache.hadoop.hive.metastore.api.CreationMetadata; import org.apache.hadoop.hive.metastore.api.Database; import org.apache.hadoop.hive.metastore.api.FieldSchema; import org.apache.hadoop.hive.metastore.api.FileMetadataExprType; @@ -85,6 +73,18 @@ import org.apache.hadoop.hive.metastore.api.UnknownTableException; import org.apache.hadoop.hive.metastore.api.WMMapping; import org.apache.hadoop.hive.metastore.api.WMPool; +import org.apache.hadoop.hive.metastore.api.WMFullResourcePlan; +import org.apache.hadoop.hive.metastore.api.ISchemaName; +import org.apache.hadoop.hive.metastore.api.SchemaVersionDescriptor; + +import java.nio.ByteBuffer; +import java.util.ArrayList; +import java.util.Collections; +import java.util.List; +import java.util.Map; + +import org.apache.hadoop.conf.Configurable; +import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hive.metastore.api.WriteEventInfo; import org.apache.hadoop.hive.metastore.partition.spec.PartitionSpecProxy; import org.apache.hadoop.hive.metastore.utils.MetaStoreUtils.ColStatsObjWithSourceInfo; @@ -248,6 +248,12 @@ public Table getTable(String catName, String dbName, String tableName) throws Me } @Override + public Table getTable(String catName, String dbName, String tableName, long txnId, String writeIdList) + throws MetaException { + return objectStore.getTable(catName, dbName, tableName, txnId, writeIdList); + } + + @Override public boolean addPartition(Partition part) throws InvalidObjectException, MetaException { return objectStore.addPartition(part); @@ -260,6 +266,13 @@ public Partition getPartition(String catName, String dbName, String tableName, L } @Override + public Partition getPartition(String catName, String dbName, String tableName, + List partVals, long txnId, String writeIdList) + throws MetaException, NoSuchObjectException { + return objectStore.getPartition(catName, dbName, tableName, partVals, txnId, writeIdList); + } + + @Override public boolean dropPartition(String catName, String dbName, String tableName, List partVals) throws MetaException, NoSuchObjectException, InvalidObjectException, InvalidInputException { @@ -279,9 +292,10 @@ public boolean dropPartition(String catName, String dbName, String tableName, Li } @Override - public void alterTable(String catName, String dbName, String name, Table newTable) + public void alterTable(String catName, String dbName, String name, Table newTable, + long queryTxnId, String queryValidWriteIds) throws InvalidObjectException, MetaException { - objectStore.alterTable(catName, dbName, name, newTable); + objectStore.alterTable(catName, dbName, name, newTable, queryTxnId, queryValidWriteIds); } @Override @@ -344,15 +358,16 @@ public PartitionValuesResponse listPartitionValues(String catName, String db_nam @Override public void alterPartition(String catName, String dbName, String tblName, List partVals, - Partition newPart) throws InvalidObjectException, MetaException { - objectStore.alterPartition(catName, dbName, tblName, partVals, newPart); + Partition newPart, long queryTxnId, String queryValidWriteIds) throws InvalidObjectException, MetaException { + objectStore.alterPartition(catName, dbName, tblName, partVals, newPart, queryTxnId, queryValidWriteIds); } @Override public void alterPartitions(String catName, String dbName, String tblName, - List> partValsList, List newParts) - throws InvalidObjectException, MetaException { - objectStore.alterPartitions(catName, dbName, tblName, partValsList, newParts); + List> partValsList, List newParts, + long writeId, long queryTxnId, String queryValidWriteIds) throws InvalidObjectException, MetaException { + objectStore.alterPartitions( + catName, dbName, tblName, partValsList, newParts, writeId, queryTxnId, queryValidWriteIds); } @Override @@ -654,6 +669,15 @@ public ColumnStatistics getTableColumnStatistics(String catName, String dbName, } @Override + public ColumnStatistics getTableColumnStatistics(String catName, String dbName, + String tableName, List colNames, + long txnId, String writeIdList) + throws MetaException, NoSuchObjectException { + return objectStore.getTableColumnStatistics( + catName, dbName, tableName, colNames, txnId, writeIdList); + } + + @Override public boolean deleteTableColumnStatistics(String catName, String dbName, String tableName, String colName) throws NoSuchObjectException, MetaException, InvalidObjectException, InvalidInputException { @@ -746,6 +770,15 @@ public void setMetaStoreSchemaVersion(String schemaVersion, String comment) thro } @Override + public List getPartitionColumnStatistics( + String catName, String dbName, String tblName, List partNames, + List colNames, long txnId, String writeIdList) + throws MetaException, NoSuchObjectException { + return objectStore.getPartitionColumnStatistics( + catName, dbName, tblName , colNames, partNames, txnId, writeIdList); + } + + @Override public boolean doesPartitionExist(String catName, String dbName, String tableName, List partKeys, List partVals) throws MetaException, NoSuchObjectException { @@ -814,6 +847,15 @@ public AggrStats get_aggr_stats_for(String catName, String dbName, } @Override + public AggrStats get_aggr_stats_for(String catName, String dbName, + String tblName, List partNames, + List colNames, + long txnId, String writeIdList) + throws MetaException, NoSuchObjectException { + return null; + } + + @Override public NotificationEventResponse getNextNotification(NotificationEventRequest rqst) { return objectStore.getNextNotification(rqst); } diff --git standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/DummyRawStoreForJdoConnection.java standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/DummyRawStoreForJdoConnection.java index 9b79446dae..2587a98130 100644 --- standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/DummyRawStoreForJdoConnection.java +++ standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/DummyRawStoreForJdoConnection.java @@ -244,6 +244,12 @@ public Table getTable(String catName, String dbName, String tableName) throws Me } @Override + public Table getTable(String catalogName, String dbName, String tableName, + long txnid, String writeIdList) throws MetaException { + return null; + } + + @Override public boolean addPartition(Partition part) throws InvalidObjectException, MetaException { return false; @@ -257,6 +263,13 @@ public Partition getPartition(String catName, String dbName, String tableName, L } @Override + public Partition getPartition(String catName, String dbName, String tableName, List part_vals, + long txnid, String writeIdList) + throws MetaException, NoSuchObjectException { + return null; + } + + @Override public boolean dropPartition(String catName, String dbName, String tableName, List part_vals) throws MetaException { @@ -277,7 +290,7 @@ public boolean dropPartition(String catName, String dbName, String tableName, Li } @Override - public void alterTable(String catName, String dbname, String name, Table newTable) + public void alterTable(String catName, String dbname, String name, Table newTable, long queryTxnId, String queryValidWriteIds) throws InvalidObjectException, MetaException { } @@ -346,15 +359,13 @@ public PartitionValuesResponse listPartitionValues(String catName, String db_nam @Override public void alterPartition(String catName, String db_name, String tbl_name, List part_vals, - Partition new_part) throws InvalidObjectException, MetaException { + Partition new_part, long queryTxnId, String queryValidWriteIds) throws InvalidObjectException, MetaException { } @Override public void alterPartitions(String catName, String db_name, String tbl_name, - List> part_vals_list, List new_parts) - throws InvalidObjectException, MetaException { - - + List> part_vals_list, List new_parts, + long writeId, long queryTxnId, String queryValidWriteIds) throws InvalidObjectException, MetaException { } @Override @@ -707,6 +718,14 @@ public ColumnStatistics getTableColumnStatistics(String catName, String dbName, } @Override + public ColumnStatistics getTableColumnStatistics( + String catName, String dbName, String tableName, List colName, + long txnid, String writeIdList) + throws MetaException, NoSuchObjectException { + return null; + } + + @Override public boolean deleteTableColumnStatistics(String catName, String dbName, String tableName, String colName) throws NoSuchObjectException, MetaException, InvalidObjectException { @@ -756,6 +775,14 @@ public void setMetaStoreSchemaVersion(String version, String comment) throws Met } @Override + public List getPartitionColumnStatistics( + String catName, String dbName, String tblName, List partNames, + List colNames, long txnid, String writeIdList) + throws MetaException, NoSuchObjectException { + return Collections.emptyList(); + } + + @Override public boolean doesPartitionExist(String catName, String dbName, String tableName, List partKeys, List partVals) throws MetaException, NoSuchObjectException { @@ -819,6 +846,14 @@ public AggrStats get_aggr_stats_for(String catName, String dbName, } @Override + public AggrStats get_aggr_stats_for( + String catName, String dbName, String tblName, List partNames, + List colNames, long txnid, String writeIdList) + throws MetaException, NoSuchObjectException { + return null; + } + + @Override public NotificationEventResponse getNextNotification(NotificationEventRequest rqst) { return null; } diff --git standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/HiveMetaStoreClientPreCatalog.java standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/HiveMetaStoreClientPreCatalog.java index 53c4d24ae0..6ef416f101 100644 --- standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/HiveMetaStoreClientPreCatalog.java +++ standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/HiveMetaStoreClientPreCatalog.java @@ -20,6 +20,7 @@ import static org.apache.hadoop.hive.metastore.Warehouse.DEFAULT_DATABASE_NAME; import static org.apache.hadoop.hive.metastore.utils.MetaStoreUtils.getDefaultCatalog; +import static org.apache.hadoop.hive.metastore.utils.MetaStoreUtils.prependCatalogToDbName; import java.io.IOException; import java.lang.reflect.Constructor; @@ -1429,6 +1430,17 @@ public Table getTable(String dbname, String name) throws MetaException, return fastpath ? t : deepCopy(filterHook.filterTable(t)); } + @Override + public Table getTable(String dbName, String tableName, long txnId, String validWriteIdList) + throws MetaException, TException, NoSuchObjectException { + GetTableRequest req = new GetTableRequest(dbName, tableName); + req.setCapabilities(version); + req.setTxnId(txnId); + req.setValidWriteIdList(validWriteIdList); + Table t = client.get_table_req(req).getTable(); + return fastpath ? t : deepCopy(filterHook.filterTable(t)); + } + /** {@inheritDoc} */ @Override public List
getTableObjectsByName(String dbName, List tableNames) @@ -1612,13 +1624,39 @@ public void alter_partition(String dbName, String tblName, Partition newPart, En @Override public void alter_partitions(String dbName, String tblName, List newParts) throws InvalidOperationException, MetaException, TException { - client.alter_partitions_with_environment_context(dbName, tblName, newParts, null); + client.alter_partitions(dbName, tblName, newParts); } @Override public void alter_partitions(String dbName, String tblName, List newParts, EnvironmentContext environmentContext) throws InvalidOperationException, MetaException, TException { - client.alter_partitions_with_environment_context(dbName, tblName, newParts, environmentContext); + AlterPartitionsRequest req = new AlterPartitionsRequest(); + req.setDbName(dbName); + req.setTableName(tblName); + req.setPartitions(newParts); + req.setEnvironmentContext(environmentContext); + // TODO: this is ugly... account for ability to pass via EC for the old API. + if (environmentContext != null && environmentContext.isSetProperties() + && environmentContext.getProperties().containsKey(StatsSetupConst.VALID_WRITE_IDS)) { + req.setTxnId(Long.parseLong(environmentContext.getProperties().get(StatsSetupConst.TXN_ID))); + req.setValidWriteIdList(environmentContext.getProperties().get(StatsSetupConst.VALID_WRITE_IDS)); + } + client.alter_partitions_with_environment_context_req(req); + } + + @Override + public void alter_partitions(String dbName, String tblName, List newParts, + EnvironmentContext environmentContext, + long txnId, String writeIdList, long writeId) + throws InvalidOperationException, MetaException, TException { + AlterPartitionsRequest req = new AlterPartitionsRequest(); + req.setDbName(dbName); + req.setTableName(tblName); + req.setPartitions(newParts); + req.setEnvironmentContext(environmentContext); + req.setTxnId(txnId); + req.setValidWriteIdList(writeIdList); + client.alter_partitions_with_environment_context_req(req); } @Override @@ -1727,6 +1765,17 @@ public void flushCache() { new TableStatsRequest(dbName, tableName, colNames)).getTableStats(); } + @Override + public List getTableColumnStatistics( + String dbName, String tableName, List colNames, long txnId, String validWriteIdList) + throws NoSuchObjectException, MetaException, TException { + TableStatsRequest tsr = new TableStatsRequest(dbName, tableName, colNames); + tsr.setTxnId(txnId); + tsr.setValidWriteIdList(validWriteIdList); + + return client.get_table_statistics_req(tsr).getTableStats(); + } + /** {@inheritDoc} */ @Override public Map> getPartitionColumnStatistics( @@ -1736,6 +1785,18 @@ public void flushCache() { new PartitionsStatsRequest(dbName, tableName, colNames, partNames)).getPartStats(); } + @Override + public Map> getPartitionColumnStatistics( + String dbName, String tableName, List partNames, + List colNames, long txnId, String validWriteIdList) + throws NoSuchObjectException, MetaException, TException { + PartitionsStatsRequest psr = new PartitionsStatsRequest(dbName, tableName, colNames, partNames); + psr.setTxnId(txnId); + psr.setValidWriteIdList(validWriteIdList); + return client.get_partitions_statistics_req( + psr).getPartStats(); + } + /** {@inheritDoc} */ @Override public boolean deletePartitionColumnStatistics(String dbName, String tableName, String partName, @@ -2597,6 +2658,21 @@ public AggrStats getAggrColStatsFor(String dbName, String tblName, } @Override + public AggrStats getAggrColStatsFor( + String dbName, String tblName, List colNames, + List partName, long txnId, String writeIdList) + throws NoSuchObjectException, MetaException, TException { + if (colNames.isEmpty() || partName.isEmpty()) { + LOG.debug("Columns is empty or partNames is empty : Short-circuiting stats eval on client side."); + return new AggrStats(new ArrayList<>(),0); // Nothing to aggregate + } + PartitionsStatsRequest req = new PartitionsStatsRequest(dbName, tblName, colNames, partName); + req.setTxnId(txnId); + req.setValidWriteIdList(writeIdList); + return client.get_aggr_stats_for(req); + } + + @Override public Iterable> getFileMetadata( final List fileIds) throws TException { return new MetastoreMapIterable() { @@ -3004,6 +3080,12 @@ public Table getTable(String catName, String dbName, String tableName) throws Me } @Override + public Table getTable(String catName, String dbName, String tableName, + long txnId, String validWriteIdList) throws TException { + throw new UnsupportedOperationException(); + } + + @Override public List
getTableObjectsByName(String catName, String dbName, List tableNames) throws MetaException, InvalidOperationException, UnknownDBException, TException { @@ -3230,7 +3312,8 @@ public void alter_partition(String catName, String dbName, String tblName, Parti @Override public void alter_partitions(String catName, String dbName, String tblName, List newParts, - EnvironmentContext environmentContext) throws + EnvironmentContext environmentContext, + long txnId, String writeIdList, long writeId) throws InvalidOperationException, MetaException, TException { throw new UnsupportedOperationException(); } @@ -3263,6 +3346,14 @@ public void renamePartition(String catName, String dbname, String tableName, } @Override + public List getTableColumnStatistics( + String catName, String dbName, String tableName, List colNames, + long txnId, String validWriteIdList) + throws NoSuchObjectException, MetaException, TException { + throw new UnsupportedOperationException(); + } + + @Override public Map> getPartitionColumnStatistics(String catName, String dbName, String tableName, @@ -3273,6 +3364,14 @@ public void renamePartition(String catName, String dbname, String tableName, } @Override + public Map> getPartitionColumnStatistics( + String catName, String dbName, String tableName, List partNames, + List colNames, long txnId, String validWriteIdList) + throws NoSuchObjectException, MetaException, TException { + throw new UnsupportedOperationException(); + } + + @Override public boolean deletePartitionColumnStatistics(String catName, String dbName, String tableName, String partName, String colName) throws NoSuchObjectException, MetaException, InvalidObjectException, TException, @@ -3320,6 +3419,14 @@ public AggrStats getAggrColStatsFor(String catName, String dbName, String tblNam } @Override + public AggrStats getAggrColStatsFor(String catName, String dbName, String tblName, + List colNames, List partNames, + long txnId, String writeIdList) + throws NoSuchObjectException, MetaException, TException { + throw new UnsupportedOperationException(); + } + + @Override public void dropConstraint(String catName, String dbName, String tableName, String constraintName) throws MetaException, NoSuchObjectException, TException { @@ -3424,4 +3531,5 @@ public void addRuntimeStat(RuntimeStat stat) throws TException { public List getRuntimeStats(int maxWeight, int maxCreateTime) throws TException { throw new UnsupportedOperationException(); } + } diff --git standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/InjectableBehaviourObjectStore.java standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/InjectableBehaviourObjectStore.java index abbcda3546..c5977b2de2 100644 --- standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/InjectableBehaviourObjectStore.java +++ standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/InjectableBehaviourObjectStore.java @@ -19,6 +19,7 @@ package org.apache.hadoop.hive.metastore; import java.util.List; + import org.apache.hadoop.hive.metastore.api.Function; import org.apache.hadoop.hive.metastore.api.InvalidObjectException; import org.apache.hadoop.hive.metastore.api.MetaException; @@ -135,6 +136,12 @@ public Table getTable(String catName, String dbName, String tableName) throws Me } @Override + public Table getTable(String catName, String dbName, String tableName, + long txnId, String writeIdList) throws MetaException { + return getTableModifier.apply(super.getTable(catName, dbName, tableName, txnId, writeIdList)); + } + + @Override public Partition getPartition(String catName, String dbName, String tableName, List partVals) throws NoSuchObjectException, MetaException { return getPartitionModifier.apply(super.getPartition(catName, dbName, tableName, partVals)); diff --git standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/TestHiveAlterHandler.java standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/TestHiveAlterHandler.java index adc82b0b9c..d9dd954f7e 100644 --- standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/TestHiveAlterHandler.java +++ standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/TestHiveAlterHandler.java @@ -60,7 +60,7 @@ public void testAlterTableAddColNotUpdateStats() throws MetaException, InvalidOb getDefaultCatalog(conf), oldTable.getDbName(), oldTable.getTableName(), Arrays.asList("col1", "col2", "col3")); HiveAlterHandler handler = new HiveAlterHandler(); handler.setConf(conf); - handler.alterTableUpdateTableColumnStats(msdb, oldTable, newTable); + handler.alterTableUpdateTableColumnStats(msdb, oldTable, newTable, null); } @Test @@ -85,7 +85,7 @@ public void testAlterTableDelColUpdateStats() throws MetaException, InvalidObjec RawStore msdb = Mockito.mock(RawStore.class); HiveAlterHandler handler = new HiveAlterHandler(); handler.setConf(conf); - handler.alterTableUpdateTableColumnStats(msdb, oldTable, newTable); + handler.alterTableUpdateTableColumnStats(msdb, oldTable, newTable, null); Mockito.verify(msdb, Mockito.times(1)).getTableColumnStatistics( getDefaultCatalog(conf), oldTable.getDbName(), oldTable.getTableName(), Arrays.asList("col1", "col2", "col3", "col4") ); @@ -115,7 +115,7 @@ public void testAlterTableChangePosNotUpdateStats() throws MetaException, Invali getDefaultCatalog(conf), oldTable.getDbName(), oldTable.getTableName(), Arrays.asList("col1", "col2", "col3", "col4")); HiveAlterHandler handler = new HiveAlterHandler(); handler.setConf(conf); - handler.alterTableUpdateTableColumnStats(msdb, oldTable, newTable); + handler.alterTableUpdateTableColumnStats(msdb, oldTable, newTable, null); } } diff --git standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/TestObjectStore.java standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/TestObjectStore.java index 833e2bdabf..c40d45d7fa 100644 --- standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/TestObjectStore.java +++ standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/TestObjectStore.java @@ -245,7 +245,7 @@ public void testTableOps() throws MetaException, InvalidObjectException, NoSuchO newTbl1.setOwner("role1"); newTbl1.setOwnerType(PrincipalType.ROLE); - objectStore.alterTable(DEFAULT_CATALOG_NAME, DB1, TABLE1, newTbl1); + objectStore.alterTable(DEFAULT_CATALOG_NAME, DB1, TABLE1, newTbl1, -1, null); tables = objectStore.getTables(DEFAULT_CATALOG_NAME, DB1, "new*"); Assert.assertEquals(1, tables.size()); Assert.assertEquals("new" + TABLE1, tables.get(0)); diff --git standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/cache/TestCachedStore.java standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/cache/TestCachedStore.java index 62ed380dfc..7cf5c267bb 100644 --- standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/cache/TestCachedStore.java +++ standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/cache/TestCachedStore.java @@ -232,7 +232,7 @@ public void testTableOps() throws Exception { tblOwner = "role1"; tbl.setOwner(tblOwner); tbl.setOwnerType(PrincipalType.ROLE); - objectStore.alterTable(DEFAULT_CATALOG_NAME, dbName, tblName, tbl); + objectStore.alterTable(DEFAULT_CATALOG_NAME, dbName, tblName, tbl, -1, null); tbl = objectStore.getTable(DEFAULT_CATALOG_NAME, dbName, tblName); Assert.assertEquals("Owner of the table did not change.", tblOwner, tbl.getOwner()); @@ -338,7 +338,7 @@ public void testPartitionOps() throws Exception { Partition ptn1Atl = new Partition(Arrays.asList(ptnColVal1Alt), dbName, tblName, 0, 0, tbl.getSd(), partParams); ptn1Atl.setCatName(DEFAULT_CATALOG_NAME); - objectStore.alterPartition(DEFAULT_CATALOG_NAME, dbName, tblName, Arrays.asList(ptnColVal1), ptn1Atl); + objectStore.alterPartition(DEFAULT_CATALOG_NAME, dbName, tblName, Arrays.asList(ptnColVal1), ptn1Atl, -1, null); ptn1Atl = objectStore.getPartition(DEFAULT_CATALOG_NAME, dbName, tblName, Arrays.asList(ptnColVal1Alt)); // Drop an existing partition ("bbb") via ObjectStore diff --git standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/client/TestAlterPartitions.java standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/client/TestAlterPartitions.java index 54bf3d7e25..9b9b101675 100644 --- standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/client/TestAlterPartitions.java +++ standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/client/TestAlterPartitions.java @@ -18,6 +18,7 @@ package org.apache.hadoop.hive.metastore.client; +import java.net.ProtocolException; import java.util.ArrayList; import java.util.Arrays; import java.util.Collections; @@ -41,6 +42,7 @@ import org.apache.hadoop.hive.metastore.client.builder.TableBuilder; import org.apache.hadoop.hive.metastore.minihms.AbstractMetaStoreService; import org.apache.thrift.TException; +import org.apache.thrift.protocol.TProtocolException; import org.apache.thrift.transport.TTransportException; import com.google.common.collect.Lists; @@ -692,11 +694,16 @@ public void testAlterPartitionsNoTblName() throws Exception { client.alter_partitions(DB_NAME, "", Lists.newArrayList(part)); } - @Test(expected = MetaException.class) + @Test public void testAlterPartitionsNullTblName() throws Exception { createTable4PartColsParts(client); Partition part = client.listPartitions(DB_NAME, TABLE_NAME, (short)-1).get(0); - client.alter_partitions(DB_NAME, null, Lists.newArrayList(part)); + try { + client.alter_partitions(DB_NAME, null, Lists.newArrayList(part)); + Assert.fail("didn't throw"); + } catch (TProtocolException | MetaException e) { + // By design + } } @Test(expected = NullPointerException.class) @@ -720,7 +727,7 @@ public void testAlterPartitionsNullPartitionList() throws Exception { Partition part = client.listPartitions(DB_NAME, TABLE_NAME, (short)-1).get(0); client.alter_partitions(DB_NAME, TABLE_NAME, null); fail("Should have thrown exception"); - } catch (NullPointerException | TTransportException e) { + } catch (NullPointerException | TTransportException | TProtocolException e) { //TODO: should not throw different exceptions for different HMS deployment types } } @@ -786,7 +793,7 @@ public void testAlterPartitionsWithEnvironmentCtx() throws Exception { assertPartitionsHaveCorrectValues(newParts, testValues); client.alter_partitions(DB_NAME, TABLE_NAME, newParts, new EnvironmentContext()); - client.alter_partitions(DB_NAME, TABLE_NAME, newParts, null); + client.alter_partitions(DB_NAME, TABLE_NAME, newParts); for (int i = 0; i < testValues.size(); ++i) { assertPartitionChanged(oldParts.get(i), testValues.get(i), PARTCOL_SCHEMA); @@ -835,7 +842,8 @@ public void testAlterPartitionsWithEnvironmentCtxMissingPartitionVals() throws E public void testAlterPartitionsWithEnvironmentCtxBogusCatalogName() throws Exception { createTable4PartColsParts(client); Partition part = client.listPartitions(DB_NAME, TABLE_NAME, (short)-1).get(0); - client.alter_partitions("nosuch", DB_NAME, TABLE_NAME, Lists.newArrayList(part), new EnvironmentContext()); + client.alter_partitions("nosuch", DB_NAME, TABLE_NAME, Lists.newArrayList(part), new EnvironmentContext(), + -1, null, -1); } @Test(expected = InvalidOperationException.class) @@ -859,11 +867,16 @@ public void testAlterPartitionsWithEnvironmentCtxNoTblName() throws Exception { client.alter_partitions(DB_NAME, "", Lists.newArrayList(part), new EnvironmentContext()); } - @Test(expected = MetaException.class) + @Test public void testAlterPartitionsWithEnvironmentCtxNullTblName() throws Exception { createTable4PartColsParts(client); Partition part = client.listPartitions(DB_NAME, TABLE_NAME, (short)-1).get(0); - client.alter_partitions(DB_NAME, null, Lists.newArrayList(part), new EnvironmentContext()); + try { + client.alter_partitions(DB_NAME, null, Lists.newArrayList(part), new EnvironmentContext()); + Assert.fail("didn't throw"); + } catch (MetaException | TProtocolException ex) { + // By design. + } } @Test(expected = NullPointerException.class) @@ -889,7 +902,7 @@ public void testAlterPartitionsWithEnvironmentCtxNullPartitionList() throws Exce Partition part = client.listPartitions(DB_NAME, TABLE_NAME, (short)-1).get(0); client.alter_partitions(DB_NAME, TABLE_NAME, null, new EnvironmentContext()); fail("Should have thrown exception"); - } catch (NullPointerException | TTransportException e) { + } catch (NullPointerException | TTransportException | TProtocolException e) { //TODO: should not throw different exceptions for different HMS deployment types } } diff --git storage-api/src/java/org/apache/hadoop/hive/common/ValidTxnWriteIdList.java storage-api/src/java/org/apache/hadoop/hive/common/ValidTxnWriteIdList.java index 9867a81a7b..cfe01feed0 100644 --- storage-api/src/java/org/apache/hadoop/hive/common/ValidTxnWriteIdList.java +++ storage-api/src/java/org/apache/hadoop/hive/common/ValidTxnWriteIdList.java @@ -63,6 +63,10 @@ public ValidWriteIdList getTableValidWriteIdList(String fullTableName) { return null; } + public boolean isEmpty() { + return tablesValidWriteIdList.isEmpty(); + } + // Each ValidWriteIdList is separated with "$" and each one maps to one table // Format $::::$... private void readFromString(String src) { diff --git storage-api/src/java/org/apache/hive/common/util/TxnIdUtils.java storage-api/src/java/org/apache/hive/common/util/TxnIdUtils.java index 17f3777fbd..4b3cb7d85e 100644 --- storage-api/src/java/org/apache/hive/common/util/TxnIdUtils.java +++ storage-api/src/java/org/apache/hive/common/util/TxnIdUtils.java @@ -19,6 +19,8 @@ import org.apache.hadoop.hive.common.ValidWriteIdList; +import java.util.*; + public class TxnIdUtils { /** @@ -36,8 +38,8 @@ public static boolean checkEquivalentWriteIds(ValidWriteIdList a, ValidWriteIdLi } return checkEquivalentCommittedIds( - older.getHighWatermark(), older.getInvalidWriteIds(), - newer.getHighWatermark(), newer.getInvalidWriteIds()); + older.getHighWatermark(), older.getInvalidWriteIds(), + newer.getHighWatermark(), newer.getInvalidWriteIds()); } /**