diff --git itests/hcatalog-unit/src/test/java/org/apache/hive/hcatalog/listener/DummyRawStoreFailEvent.java itests/hcatalog-unit/src/test/java/org/apache/hive/hcatalog/listener/DummyRawStoreFailEvent.java index 580bae9c3f..31d9b715de 100644 --- itests/hcatalog-unit/src/test/java/org/apache/hive/hcatalog/listener/DummyRawStoreFailEvent.java +++ itests/hcatalog-unit/src/test/java/org/apache/hive/hcatalog/listener/DummyRawStoreFailEvent.java @@ -396,9 +396,9 @@ public void alterPartition(String catName, String dbName, String tblName, List> partValsList, List newParts, - long txnId, String writeIdList) + long txnId, String writeIdList, long writeId) throws InvalidObjectException, MetaException { - objectStore.alterPartitions(catName, dbName, tblName, partValsList, newParts, txnId, writeIdList); + objectStore.alterPartitions(catName, dbName, tblName, partValsList, newParts, txnId, writeIdList, writeId); } @Override diff --git ql/src/java/org/apache/hadoop/hive/ql/io/AcidUtils.java ql/src/java/org/apache/hadoop/hive/ql/io/AcidUtils.java index e67b5798fa..09c03ebdd4 100644 --- ql/src/java/org/apache/hadoop/hive/ql/io/AcidUtils.java +++ ql/src/java/org/apache/hadoop/hive/ql/io/AcidUtils.java @@ -1625,13 +1625,15 @@ public static void setValidWriteIdList(Configuration conf, TableScanDesc tsDesc) public static class TableSnapshot { private long txnId; + private long writeId; private String validWriteIdList; public TableSnapshot() { } - public TableSnapshot(long txnId, String validWriteIdList) { + public TableSnapshot(long txnId, long writeId, String validWriteIdList) { this.txnId = txnId; + this.writeId = writeId; this.validWriteIdList = validWriteIdList; } @@ -1647,6 +1649,14 @@ public void setTxnId(long txnId) { this.txnId = txnId; } + public long getWriteId() { + return writeId; + } + + public void setWriteId(long writeId) { + this.writeId = writeId; + } + public void setValidWriteIdList(String validWriteIdList) { this.validWriteIdList = validWriteIdList; } @@ -1656,18 +1666,36 @@ public void setValidWriteIdList(String validWriteIdList) { * Create a TableShopshot with the given "conf" * for the table of the given "tbl". * - * @param conf - * @param tbl + * @param conf HiveConf + * @param tbl table to create a table snapshot on + * @return TableSnapshot on success, null on failure + * @throws LockException + */ + public static TableSnapshot getTableSnapshot( + Configuration conf, + Table tbl) throws LockException { + return getTableSnapshot(conf, tbl, false); + } + + /** + * Create a TableShopshot with the given "conf" + * for the table of the given "tbl". + * + * @param conf HiveConf + * @param tbl table to create a table snapshot on + * @param isStatsUpdater true if the caller is a stats updater * @return TableSnapshot on success, null on failure * @throws LockException */ public static TableSnapshot getTableSnapshot( Configuration conf, - Table tbl) throws LockException { + Table tbl, + boolean isStatsUpdater) throws LockException { if (!isTransactionalTable(tbl)) { return null; } else { - long txnId = 0; + long txnId = -1; + long writeId = -1; ValidWriteIdList validWriteIdList = null; HiveTxnManager sessionTxnMgr = SessionState.get().getTxnMgr(); @@ -1677,6 +1705,11 @@ public static TableSnapshot getTableSnapshot( } String fullTableName = getFullTableName(tbl.getDbName(), tbl.getTableName()); if (txnId > 0 && isTransactionalTable(tbl)) { + if (isStatsUpdater) { + writeId = SessionState.get().getTxnMgr() != null ? + SessionState.get().getTxnMgr().getAllocatedTableWriteId( + tbl.getDbName(), tbl.getTableName()) : -1; + } validWriteIdList = getTableValidWriteIdList(conf, fullTableName); // TODO: we shouldn't do this during normal Hive compilation, write IDs should be in conf. @@ -1694,7 +1727,7 @@ public static TableSnapshot getTableSnapshot( } } } - return new TableSnapshot(txnId, + return new TableSnapshot(txnId, writeId, validWriteIdList != null ? validWriteIdList.toString() : null); } } diff --git ql/src/java/org/apache/hadoop/hive/ql/lockmgr/DbTxnManager.java ql/src/java/org/apache/hadoop/hive/ql/lockmgr/DbTxnManager.java index 91047861a1..18eed52ca1 100644 --- ql/src/java/org/apache/hadoop/hive/ql/lockmgr/DbTxnManager.java +++ ql/src/java/org/apache/hadoop/hive/ql/lockmgr/DbTxnManager.java @@ -55,6 +55,7 @@ Licensed to the Apache Software Foundation (ASF) under one import org.apache.hadoop.hive.ql.plan.LockTableDesc; import org.apache.hadoop.hive.ql.plan.UnlockDatabaseDesc; import org.apache.hadoop.hive.ql.plan.UnlockTableDesc; +import org.apache.hadoop.hive.ql.session.*; import org.apache.hadoop.security.UserGroupInformation; import org.apache.hive.common.util.ShutdownHookManager; import org.apache.thrift.TException; @@ -1020,6 +1021,16 @@ public long getTableWriteId(String dbName, String tableName) throws LockExceptio return getTableWriteId(dbName, tableName, true); } + @Override + public long getAllocatedTableWriteId(String dbName, String tableName) throws LockException { + assert isTxnOpen(); + // Calls getTableWriteId() with allocateIfNotYet being false + // to return 0 if the dbName:tableName's writeId is yet allocated. + // This happens when the current context is before + // Driver.acquireLocks() is called. + return getTableWriteId(dbName, tableName, false); + } + private long getTableWriteId( String dbName, String tableName, boolean allocateIfNotYet) throws LockException { String fullTableName = AcidUtils.getFullTableName(dbName, tableName); diff --git ql/src/java/org/apache/hadoop/hive/ql/lockmgr/DummyTxnManager.java ql/src/java/org/apache/hadoop/hive/ql/lockmgr/DummyTxnManager.java index 78bb303bd6..2398419d7b 100644 --- ql/src/java/org/apache/hadoop/hive/ql/lockmgr/DummyTxnManager.java +++ ql/src/java/org/apache/hadoop/hive/ql/lockmgr/DummyTxnManager.java @@ -78,6 +78,11 @@ public long getTableWriteId(String dbName, String tableName) throws LockExceptio return 0L; } + @Override + public long getAllocatedTableWriteId(String dbName, String tableName) throws LockException { + return 0L; + } + @Override public void replAllocateTableWriteIdsBatch(String dbName, String tableName, String replPolicy, List srcTxnToWriteIdList) throws LockException { diff --git ql/src/java/org/apache/hadoop/hive/ql/lockmgr/HiveTxnManager.java ql/src/java/org/apache/hadoop/hive/ql/lockmgr/HiveTxnManager.java index 9ea40f4fa8..fcfa58b0b2 100644 --- ql/src/java/org/apache/hadoop/hive/ql/lockmgr/HiveTxnManager.java +++ ql/src/java/org/apache/hadoop/hive/ql/lockmgr/HiveTxnManager.java @@ -276,7 +276,19 @@ void replTableWriteIdState(String validWriteIdList, String dbName, String tableN * if {@code isTxnOpen()}, returns the table write ID associated with current active transaction. */ long getTableWriteId(String dbName, String tableName) throws LockException; - /** + + /** + * if {@code isTxnOpen()}, returns the already allocated table write ID of the table with + * the given "dbName.tableName" for the current active transaction. + * If not allocated, then returns 0. + * @param dbName + * @param tableName + * @return 0 if not yet allocated + * @throws LockException + */ + public long getAllocatedTableWriteId(String dbName, String tableName) throws LockException; + + /** * Allocates write id for each transaction in the list. * @param dbName database name * @param tableName the name of the table to allocate the write id diff --git ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java index 9f052ae9d8..141f71422c 100644 --- ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java +++ ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java @@ -640,7 +640,7 @@ public void alterTable(String dbName, String tblName, Table newTbl, boolean casc // Take a table snapshot and set it to newTbl. if (transactional) { - setTableSnapshotForTransactionalTable(conf, newTbl); + setTableSnapshotForTransactionalTable(conf, newTbl, true); } getMSC().alter_table_with_environmentContext(dbName, tblName, newTbl.getTTable(), environmentContext); @@ -723,7 +723,7 @@ public void alterPartition(String dbName, String tblName, Partition newPart, newPart.setLocation(location); } if (transactional) { - setTableSnapshotForTransactionalPartition(conf, newPart); + setTableSnapshotForTransactionalPartition(conf, newPart, true); } getSynchronizedMSC().alter_partition(dbName, tblName, newPart.getTPartition(), environmentContext); @@ -768,7 +768,7 @@ public void alterPartitions(String tblName, List newParts, try { AcidUtils.TableSnapshot tableSnapshot = null; if (transactional) { - tableSnapshot = AcidUtils.getTableSnapshot(conf, newParts.get(0).getTable()); + tableSnapshot = AcidUtils.getTableSnapshot(conf, newParts.get(0).getTable(), true); } // Remove the DDL time so that it gets refreshed for (Partition tmpPart: newParts) { @@ -784,7 +784,8 @@ public void alterPartitions(String tblName, List newParts, } getMSC().alter_partitions(names[0], names[1], newTParts, environmentContext, tableSnapshot != null ? tableSnapshot.getTxnId() : -1, - tableSnapshot != null ? tableSnapshot.getValidWriteIdList() : null); + tableSnapshot != null ? tableSnapshot.getValidWriteIdList() : null, + tableSnapshot != null ? tableSnapshot.getWriteId() : -1 ); } catch (MetaException e) { throw new HiveException("Unable to alter partition. " + e.getMessage(), e); } catch (TException e) { @@ -916,7 +917,7 @@ public void createTable(Table tbl, boolean ifNotExists, } } // Set table snapshot to api.Table to make it persistent. - setTableSnapshotForTransactionalTable(conf, tbl); + setTableSnapshotForTransactionalTable(conf, tbl, true); if (primaryKeys == null && foreignKeys == null && uniqueConstraints == null && notNullConstraints == null && defaultConstraints == null && checkConstraints == null) { @@ -1812,7 +1813,7 @@ public Partition loadPartition(Path loadPath, Table tbl, Map par Partition newTPart = oldPart != null ? oldPart : new Partition(tbl, partSpec, newPartPath); alterPartitionSpecInMemory(tbl, partSpec, newTPart.getTPartition(), inheritTableSpecs, newPartPath.toString()); validatePartition(newTPart); - setTableSnapshotForTransactionalPartition(conf, newTPart); + setTableSnapshotForTransactionalPartition(conf, newTPart, true); // If config is set, table is not temporary and partition being inserted exists, capture // the list of files added. For not yet existing partitions (insert overwrite to new partition @@ -4447,9 +4448,10 @@ public boolean setPartitionColumnStatistics( ColumnStatisticsDesc statsDesc = colStat.getStatsDesc(); Table tbl = getTable(statsDesc.getDbName(), statsDesc.getTableName()); - AcidUtils.TableSnapshot tableSnapshot = AcidUtils.getTableSnapshot(conf, tbl); + AcidUtils.TableSnapshot tableSnapshot = AcidUtils.getTableSnapshot(conf, tbl, true); request.setTxnId(tableSnapshot != null ? tableSnapshot.getTxnId() : 0); request.setValidWriteIdList(tableSnapshot != null ? tableSnapshot.getValidWriteIdList() : null); + request.setWriteId(tableSnapshot != null ? tableSnapshot.getWriteId() : 0); return getMSC().setPartitionColumnStatistics(request); } catch (Exception e) { LOG.debug(StringUtils.stringifyException(e)); @@ -5330,24 +5332,44 @@ public StorageHandlerInfo getStorageHandlerInfo(Table table) } private void setTableSnapshotForTransactionalTable( - HiveConf conf, Table newTbl) + HiveConf conf, Table newTbl) + throws LockException { + setTableSnapshotForTransactionalTable(conf, newTbl, false); + } + + private void setTableSnapshotForTransactionalTable( + HiveConf conf, Table newTbl, boolean isStatsUpdater) throws LockException { org.apache.hadoop.hive.metastore.api.Table newTTbl = newTbl.getTTable(); - AcidUtils.TableSnapshot tableSnapshot = AcidUtils.getTableSnapshot(conf, newTbl); + AcidUtils.TableSnapshot tableSnapshot = + AcidUtils.getTableSnapshot(conf, newTbl, isStatsUpdater); newTTbl.setTxnId(tableSnapshot != null ? tableSnapshot.getTxnId() : -1); newTTbl.setValidWriteIdList( tableSnapshot != null ? tableSnapshot.getValidWriteIdList() : null); + if (isStatsUpdater) { + newTTbl.setWriteId(tableSnapshot != null ? tableSnapshot.getWriteId() : -1); + } } private void setTableSnapshotForTransactionalPartition(HiveConf conf, Partition partition) + throws LockException { + setTableSnapshotForTransactionalPartition(conf, partition, false); + } + + private void setTableSnapshotForTransactionalPartition( + HiveConf conf, Partition partition, boolean isStatsUpdater) throws LockException { - AcidUtils.TableSnapshot tableSnapshot = AcidUtils.getTableSnapshot(conf, partition.getTable()); + AcidUtils.TableSnapshot tableSnapshot = + AcidUtils.getTableSnapshot(conf, partition.getTable(), isStatsUpdater); org.apache.hadoop.hive.metastore.api.Partition tpartition = partition.getTPartition(); tpartition.setTxnId(tableSnapshot != null ? tableSnapshot.getTxnId() : -1); tpartition.setValidWriteIdList( tableSnapshot != null ? tableSnapshot.getValidWriteIdList() : null); + if (isStatsUpdater) { + tpartition.setWriteId(tableSnapshot != null ? tableSnapshot.getWriteId() : -1); + } } } diff --git standalone-metastore/src/gen/thrift/gen-cpp/hive_metastore_types.cpp standalone-metastore/src/gen/thrift/gen-cpp/hive_metastore_types.cpp index c2d6a5694e..da14ac1637 100644 --- standalone-metastore/src/gen/thrift/gen-cpp/hive_metastore_types.cpp +++ standalone-metastore/src/gen/thrift/gen-cpp/hive_metastore_types.cpp @@ -6452,6 +6452,11 @@ void Table::__set_txnId(const int64_t val) { __isset.txnId = true; } +void Table::__set_writeId(const int64_t val) { + this->writeId = val; +__isset.writeId = true; +} + void Table::__set_validWriteIdList(const std::string& val) { this->validWriteIdList = val; __isset.validWriteIdList = true; @@ -6665,6 +6670,14 @@ uint32_t Table::read(::apache::thrift::protocol::TProtocol* iprot) { } break; case 20: + if (ftype == ::apache::thrift::protocol::T_I64) { + xfer += iprot->readI64(this->writeId); + this->__isset.writeId = true; + } else { + xfer += iprot->skip(ftype); + } + break; + case 21: if (ftype == ::apache::thrift::protocol::T_STRING) { xfer += iprot->readString(this->validWriteIdList); this->__isset.validWriteIdList = true; @@ -6672,7 +6685,7 @@ uint32_t Table::read(::apache::thrift::protocol::TProtocol* iprot) { xfer += iprot->skip(ftype); } break; - case 21: + case 22: if (ftype == ::apache::thrift::protocol::T_I32) { int32_t ecast249; xfer += iprot->readI32(ecast249); @@ -6799,13 +6812,18 @@ uint32_t Table::write(::apache::thrift::protocol::TProtocol* oprot) const { xfer += oprot->writeI64(this->txnId); xfer += oprot->writeFieldEnd(); } + if (this->__isset.writeId) { + xfer += oprot->writeFieldBegin("writeId", ::apache::thrift::protocol::T_I64, 20); + xfer += oprot->writeI64(this->writeId); + xfer += oprot->writeFieldEnd(); + } if (this->__isset.validWriteIdList) { - xfer += oprot->writeFieldBegin("validWriteIdList", ::apache::thrift::protocol::T_STRING, 20); + xfer += oprot->writeFieldBegin("validWriteIdList", ::apache::thrift::protocol::T_STRING, 21); xfer += oprot->writeString(this->validWriteIdList); xfer += oprot->writeFieldEnd(); } if (this->__isset.isStatsCompliant) { - xfer += oprot->writeFieldBegin("isStatsCompliant", ::apache::thrift::protocol::T_I32, 21); + xfer += oprot->writeFieldBegin("isStatsCompliant", ::apache::thrift::protocol::T_I32, 22); xfer += oprot->writeI32((int32_t)this->isStatsCompliant); xfer += oprot->writeFieldEnd(); } @@ -6835,6 +6853,7 @@ void swap(Table &a, Table &b) { swap(a.catName, b.catName); swap(a.ownerType, b.ownerType); swap(a.txnId, b.txnId); + swap(a.writeId, b.writeId); swap(a.validWriteIdList, b.validWriteIdList); swap(a.isStatsCompliant, b.isStatsCompliant); swap(a.__isset, b.__isset); @@ -6860,6 +6879,7 @@ Table::Table(const Table& other252) { catName = other252.catName; ownerType = other252.ownerType; txnId = other252.txnId; + writeId = other252.writeId; validWriteIdList = other252.validWriteIdList; isStatsCompliant = other252.isStatsCompliant; __isset = other252.__isset; @@ -6884,6 +6904,7 @@ Table& Table::operator=(const Table& other253) { catName = other253.catName; ownerType = other253.ownerType; txnId = other253.txnId; + writeId = other253.writeId; validWriteIdList = other253.validWriteIdList; isStatsCompliant = other253.isStatsCompliant; __isset = other253.__isset; @@ -6911,6 +6932,7 @@ void Table::printTo(std::ostream& out) const { out << ", " << "catName="; (__isset.catName ? (out << to_string(catName)) : (out << "")); out << ", " << "ownerType="; (__isset.ownerType ? (out << to_string(ownerType)) : (out << "")); out << ", " << "txnId="; (__isset.txnId ? (out << to_string(txnId)) : (out << "")); + out << ", " << "writeId="; (__isset.writeId ? (out << to_string(writeId)) : (out << "")); out << ", " << "validWriteIdList="; (__isset.validWriteIdList ? (out << to_string(validWriteIdList)) : (out << "")); out << ", " << "isStatsCompliant="; (__isset.isStatsCompliant ? (out << to_string(isStatsCompliant)) : (out << "")); out << ")"; @@ -6964,6 +6986,11 @@ void Partition::__set_txnId(const int64_t val) { __isset.txnId = true; } +void Partition::__set_writeId(const int64_t val) { + this->writeId = val; +__isset.writeId = true; +} + void Partition::__set_validWriteIdList(const std::string& val) { this->validWriteIdList = val; __isset.validWriteIdList = true; @@ -7103,6 +7130,14 @@ uint32_t Partition::read(::apache::thrift::protocol::TProtocol* iprot) { } break; case 11: + if (ftype == ::apache::thrift::protocol::T_I64) { + xfer += iprot->readI64(this->writeId); + this->__isset.writeId = true; + } else { + xfer += iprot->skip(ftype); + } + break; + case 12: if (ftype == ::apache::thrift::protocol::T_STRING) { xfer += iprot->readString(this->validWriteIdList); this->__isset.validWriteIdList = true; @@ -7110,7 +7145,7 @@ uint32_t Partition::read(::apache::thrift::protocol::TProtocol* iprot) { xfer += iprot->skip(ftype); } break; - case 12: + case 13: if (ftype == ::apache::thrift::protocol::T_I32) { int32_t ecast266; xfer += iprot->readI32(ecast266); @@ -7197,13 +7232,18 @@ uint32_t Partition::write(::apache::thrift::protocol::TProtocol* oprot) const { xfer += oprot->writeI64(this->txnId); xfer += oprot->writeFieldEnd(); } + if (this->__isset.writeId) { + xfer += oprot->writeFieldBegin("writeId", ::apache::thrift::protocol::T_I64, 11); + xfer += oprot->writeI64(this->writeId); + xfer += oprot->writeFieldEnd(); + } if (this->__isset.validWriteIdList) { - xfer += oprot->writeFieldBegin("validWriteIdList", ::apache::thrift::protocol::T_STRING, 11); + xfer += oprot->writeFieldBegin("validWriteIdList", ::apache::thrift::protocol::T_STRING, 12); xfer += oprot->writeString(this->validWriteIdList); xfer += oprot->writeFieldEnd(); } if (this->__isset.isStatsCompliant) { - xfer += oprot->writeFieldBegin("isStatsCompliant", ::apache::thrift::protocol::T_I32, 12); + xfer += oprot->writeFieldBegin("isStatsCompliant", ::apache::thrift::protocol::T_I32, 13); xfer += oprot->writeI32((int32_t)this->isStatsCompliant); xfer += oprot->writeFieldEnd(); } @@ -7224,6 +7264,7 @@ void swap(Partition &a, Partition &b) { swap(a.privileges, b.privileges); swap(a.catName, b.catName); swap(a.txnId, b.txnId); + swap(a.writeId, b.writeId); swap(a.validWriteIdList, b.validWriteIdList); swap(a.isStatsCompliant, b.isStatsCompliant); swap(a.__isset, b.__isset); @@ -7240,6 +7281,7 @@ Partition::Partition(const Partition& other269) { privileges = other269.privileges; catName = other269.catName; txnId = other269.txnId; + writeId = other269.writeId; validWriteIdList = other269.validWriteIdList; isStatsCompliant = other269.isStatsCompliant; __isset = other269.__isset; @@ -7255,6 +7297,7 @@ Partition& Partition::operator=(const Partition& other270) { privileges = other270.privileges; catName = other270.catName; txnId = other270.txnId; + writeId = other270.writeId; validWriteIdList = other270.validWriteIdList; isStatsCompliant = other270.isStatsCompliant; __isset = other270.__isset; @@ -7273,6 +7316,7 @@ void Partition::printTo(std::ostream& out) const { out << ", " << "privileges="; (__isset.privileges ? (out << to_string(privileges)) : (out << "")); out << ", " << "catName="; (__isset.catName ? (out << to_string(catName)) : (out << "")); out << ", " << "txnId="; (__isset.txnId ? (out << to_string(txnId)) : (out << "")); + out << ", " << "writeId="; (__isset.writeId ? (out << to_string(writeId)) : (out << "")); out << ", " << "validWriteIdList="; (__isset.validWriteIdList ? (out << to_string(validWriteIdList)) : (out << "")); out << ", " << "isStatsCompliant="; (__isset.isStatsCompliant ? (out << to_string(isStatsCompliant)) : (out << "")); out << ")"; @@ -7779,6 +7823,11 @@ void PartitionSpec::__set_txnId(const int64_t val) { __isset.txnId = true; } +void PartitionSpec::__set_writeId(const int64_t val) { + this->writeId = val; +__isset.writeId = true; +} + void PartitionSpec::__set_validWriteIdList(const std::string& val) { this->validWriteIdList = val; __isset.validWriteIdList = true; @@ -7867,6 +7916,14 @@ uint32_t PartitionSpec::read(::apache::thrift::protocol::TProtocol* iprot) { } break; case 8: + if (ftype == ::apache::thrift::protocol::T_I64) { + xfer += iprot->readI64(this->writeId); + this->__isset.writeId = true; + } else { + xfer += iprot->skip(ftype); + } + break; + case 9: if (ftype == ::apache::thrift::protocol::T_STRING) { xfer += iprot->readString(this->validWriteIdList); this->__isset.validWriteIdList = true; @@ -7874,7 +7931,7 @@ uint32_t PartitionSpec::read(::apache::thrift::protocol::TProtocol* iprot) { xfer += iprot->skip(ftype); } break; - case 9: + case 10: if (ftype == ::apache::thrift::protocol::T_I32) { int32_t ecast303; xfer += iprot->readI32(ecast303); @@ -7933,13 +7990,18 @@ uint32_t PartitionSpec::write(::apache::thrift::protocol::TProtocol* oprot) cons xfer += oprot->writeI64(this->txnId); xfer += oprot->writeFieldEnd(); } + if (this->__isset.writeId) { + xfer += oprot->writeFieldBegin("writeId", ::apache::thrift::protocol::T_I64, 8); + xfer += oprot->writeI64(this->writeId); + xfer += oprot->writeFieldEnd(); + } if (this->__isset.validWriteIdList) { - xfer += oprot->writeFieldBegin("validWriteIdList", ::apache::thrift::protocol::T_STRING, 8); + xfer += oprot->writeFieldBegin("validWriteIdList", ::apache::thrift::protocol::T_STRING, 9); xfer += oprot->writeString(this->validWriteIdList); xfer += oprot->writeFieldEnd(); } if (this->__isset.isStatsCompliant) { - xfer += oprot->writeFieldBegin("isStatsCompliant", ::apache::thrift::protocol::T_I32, 9); + xfer += oprot->writeFieldBegin("isStatsCompliant", ::apache::thrift::protocol::T_I32, 10); xfer += oprot->writeI32((int32_t)this->isStatsCompliant); xfer += oprot->writeFieldEnd(); } @@ -7957,6 +8019,7 @@ void swap(PartitionSpec &a, PartitionSpec &b) { swap(a.partitionList, b.partitionList); swap(a.catName, b.catName); swap(a.txnId, b.txnId); + swap(a.writeId, b.writeId); swap(a.validWriteIdList, b.validWriteIdList); swap(a.isStatsCompliant, b.isStatsCompliant); swap(a.__isset, b.__isset); @@ -7970,6 +8033,7 @@ PartitionSpec::PartitionSpec(const PartitionSpec& other304) { partitionList = other304.partitionList; catName = other304.catName; txnId = other304.txnId; + writeId = other304.writeId; validWriteIdList = other304.validWriteIdList; isStatsCompliant = other304.isStatsCompliant; __isset = other304.__isset; @@ -7982,6 +8046,7 @@ PartitionSpec& PartitionSpec::operator=(const PartitionSpec& other305) { partitionList = other305.partitionList; catName = other305.catName; txnId = other305.txnId; + writeId = other305.writeId; validWriteIdList = other305.validWriteIdList; isStatsCompliant = other305.isStatsCompliant; __isset = other305.__isset; @@ -7997,6 +8062,7 @@ void PartitionSpec::printTo(std::ostream& out) const { out << ", " << "partitionList="; (__isset.partitionList ? (out << to_string(partitionList)) : (out << "")); out << ", " << "catName="; (__isset.catName ? (out << to_string(catName)) : (out << "")); out << ", " << "txnId="; (__isset.txnId ? (out << to_string(txnId)) : (out << "")); + out << ", " << "writeId="; (__isset.writeId ? (out << to_string(writeId)) : (out << "")); out << ", " << "validWriteIdList="; (__isset.validWriteIdList ? (out << to_string(validWriteIdList)) : (out << "")); out << ", " << "isStatsCompliant="; (__isset.isStatsCompliant ? (out << to_string(isStatsCompliant)) : (out << "")); out << ")"; @@ -10317,6 +10383,11 @@ void SetPartitionsStatsRequest::__set_txnId(const int64_t val) { __isset.txnId = true; } +void SetPartitionsStatsRequest::__set_writeId(const int64_t val) { + this->writeId = val; +__isset.writeId = true; +} + void SetPartitionsStatsRequest::__set_validWriteIdList(const std::string& val) { this->validWriteIdList = val; __isset.validWriteIdList = true; @@ -10381,6 +10452,14 @@ uint32_t SetPartitionsStatsRequest::read(::apache::thrift::protocol::TProtocol* } break; case 4: + if (ftype == ::apache::thrift::protocol::T_I64) { + xfer += iprot->readI64(this->writeId); + this->__isset.writeId = true; + } else { + xfer += iprot->skip(ftype); + } + break; + case 5: if (ftype == ::apache::thrift::protocol::T_STRING) { xfer += iprot->readString(this->validWriteIdList); this->__isset.validWriteIdList = true; @@ -10429,8 +10508,13 @@ uint32_t SetPartitionsStatsRequest::write(::apache::thrift::protocol::TProtocol* xfer += oprot->writeI64(this->txnId); xfer += oprot->writeFieldEnd(); } + if (this->__isset.writeId) { + xfer += oprot->writeFieldBegin("writeId", ::apache::thrift::protocol::T_I64, 4); + xfer += oprot->writeI64(this->writeId); + xfer += oprot->writeFieldEnd(); + } if (this->__isset.validWriteIdList) { - xfer += oprot->writeFieldBegin("validWriteIdList", ::apache::thrift::protocol::T_STRING, 4); + xfer += oprot->writeFieldBegin("validWriteIdList", ::apache::thrift::protocol::T_STRING, 5); xfer += oprot->writeString(this->validWriteIdList); xfer += oprot->writeFieldEnd(); } @@ -10444,6 +10528,7 @@ void swap(SetPartitionsStatsRequest &a, SetPartitionsStatsRequest &b) { swap(a.colStats, b.colStats); swap(a.needMerge, b.needMerge); swap(a.txnId, b.txnId); + swap(a.writeId, b.writeId); swap(a.validWriteIdList, b.validWriteIdList); swap(a.__isset, b.__isset); } @@ -10452,6 +10537,7 @@ SetPartitionsStatsRequest::SetPartitionsStatsRequest(const SetPartitionsStatsReq colStats = other354.colStats; needMerge = other354.needMerge; txnId = other354.txnId; + writeId = other354.writeId; validWriteIdList = other354.validWriteIdList; __isset = other354.__isset; } @@ -10459,6 +10545,7 @@ SetPartitionsStatsRequest& SetPartitionsStatsRequest::operator=(const SetPartiti colStats = other355.colStats; needMerge = other355.needMerge; txnId = other355.txnId; + writeId = other355.writeId; validWriteIdList = other355.validWriteIdList; __isset = other355.__isset; return *this; @@ -10469,6 +10556,7 @@ void SetPartitionsStatsRequest::printTo(std::ostream& out) const { out << "colStats=" << to_string(colStats); out << ", " << "needMerge="; (__isset.needMerge ? (out << to_string(needMerge)) : (out << "")); out << ", " << "txnId="; (__isset.txnId ? (out << to_string(txnId)) : (out << "")); + out << ", " << "writeId="; (__isset.writeId ? (out << to_string(writeId)) : (out << "")); out << ", " << "validWriteIdList="; (__isset.validWriteIdList ? (out << to_string(validWriteIdList)) : (out << "")); out << ")"; } @@ -31831,6 +31919,11 @@ void AlterPartitionsRequest::__set_txnId(const int64_t val) { __isset.txnId = true; } +void AlterPartitionsRequest::__set_writeId(const int64_t val) { + this->writeId = val; +__isset.writeId = true; +} + void AlterPartitionsRequest::__set_validWriteIdList(const std::string& val) { this->validWriteIdList = val; __isset.validWriteIdList = true; @@ -31914,6 +32007,14 @@ uint32_t AlterPartitionsRequest::read(::apache::thrift::protocol::TProtocol* ipr } break; case 6: + if (ftype == ::apache::thrift::protocol::T_I64) { + xfer += iprot->readI64(this->writeId); + this->__isset.writeId = true; + } else { + xfer += iprot->skip(ftype); + } + break; + case 7: if (ftype == ::apache::thrift::protocol::T_STRING) { xfer += iprot->readString(this->validWriteIdList); this->__isset.validWriteIdList = true; @@ -31975,8 +32076,13 @@ uint32_t AlterPartitionsRequest::write(::apache::thrift::protocol::TProtocol* op xfer += oprot->writeI64(this->txnId); xfer += oprot->writeFieldEnd(); } + if (this->__isset.writeId) { + xfer += oprot->writeFieldBegin("writeId", ::apache::thrift::protocol::T_I64, 6); + xfer += oprot->writeI64(this->writeId); + xfer += oprot->writeFieldEnd(); + } if (this->__isset.validWriteIdList) { - xfer += oprot->writeFieldBegin("validWriteIdList", ::apache::thrift::protocol::T_STRING, 6); + xfer += oprot->writeFieldBegin("validWriteIdList", ::apache::thrift::protocol::T_STRING, 7); xfer += oprot->writeString(this->validWriteIdList); xfer += oprot->writeFieldEnd(); } @@ -31992,6 +32098,7 @@ void swap(AlterPartitionsRequest &a, AlterPartitionsRequest &b) { swap(a.partitions, b.partitions); swap(a.environmentContext, b.environmentContext); swap(a.txnId, b.txnId); + swap(a.writeId, b.writeId); swap(a.validWriteIdList, b.validWriteIdList); swap(a.__isset, b.__isset); } @@ -32002,6 +32109,7 @@ AlterPartitionsRequest::AlterPartitionsRequest(const AlterPartitionsRequest& oth partitions = other1187.partitions; environmentContext = other1187.environmentContext; txnId = other1187.txnId; + writeId = other1187.writeId; validWriteIdList = other1187.validWriteIdList; __isset = other1187.__isset; } @@ -32011,6 +32119,7 @@ AlterPartitionsRequest& AlterPartitionsRequest::operator=(const AlterPartitionsR partitions = other1188.partitions; environmentContext = other1188.environmentContext; txnId = other1188.txnId; + writeId = other1188.writeId; validWriteIdList = other1188.validWriteIdList; __isset = other1188.__isset; return *this; @@ -32023,6 +32132,7 @@ void AlterPartitionsRequest::printTo(std::ostream& out) const { out << ", " << "partitions=" << to_string(partitions); out << ", " << "environmentContext=" << to_string(environmentContext); out << ", " << "txnId="; (__isset.txnId ? (out << to_string(txnId)) : (out << "")); + out << ", " << "writeId="; (__isset.writeId ? (out << to_string(writeId)) : (out << "")); out << ", " << "validWriteIdList="; (__isset.validWriteIdList ? (out << to_string(validWriteIdList)) : (out << "")); out << ")"; } diff --git standalone-metastore/src/gen/thrift/gen-cpp/hive_metastore_types.h standalone-metastore/src/gen/thrift/gen-cpp/hive_metastore_types.h index 68e34d57bb..81eadbcf93 100644 --- standalone-metastore/src/gen/thrift/gen-cpp/hive_metastore_types.h +++ standalone-metastore/src/gen/thrift/gen-cpp/hive_metastore_types.h @@ -3115,7 +3115,7 @@ inline std::ostream& operator<<(std::ostream& out, const StorageDescriptor& obj) } typedef struct _Table__isset { - _Table__isset() : tableName(false), dbName(false), owner(false), createTime(false), lastAccessTime(false), retention(false), sd(false), partitionKeys(false), parameters(false), viewOriginalText(false), viewExpandedText(false), tableType(false), privileges(false), temporary(true), rewriteEnabled(false), creationMetadata(false), catName(false), ownerType(true), txnId(true), validWriteIdList(false), isStatsCompliant(false) {} + _Table__isset() : tableName(false), dbName(false), owner(false), createTime(false), lastAccessTime(false), retention(false), sd(false), partitionKeys(false), parameters(false), viewOriginalText(false), viewExpandedText(false), tableType(false), privileges(false), temporary(true), rewriteEnabled(false), creationMetadata(false), catName(false), ownerType(true), txnId(true), writeId(true), validWriteIdList(false), isStatsCompliant(false) {} bool tableName :1; bool dbName :1; bool owner :1; @@ -3135,6 +3135,7 @@ typedef struct _Table__isset { bool catName :1; bool ownerType :1; bool txnId :1; + bool writeId :1; bool validWriteIdList :1; bool isStatsCompliant :1; } _Table__isset; @@ -3144,7 +3145,7 @@ class Table { Table(const Table&); Table& operator=(const Table&); - Table() : tableName(), dbName(), owner(), createTime(0), lastAccessTime(0), retention(0), viewOriginalText(), viewExpandedText(), tableType(), temporary(false), rewriteEnabled(0), catName(), ownerType((PrincipalType::type)1), txnId(-1LL), validWriteIdList(), isStatsCompliant((IsolationLevelCompliance::type)0) { + Table() : tableName(), dbName(), owner(), createTime(0), lastAccessTime(0), retention(0), viewOriginalText(), viewExpandedText(), tableType(), temporary(false), rewriteEnabled(0), catName(), ownerType((PrincipalType::type)1), txnId(-1LL), writeId(-1LL), validWriteIdList(), isStatsCompliant((IsolationLevelCompliance::type)0) { ownerType = (PrincipalType::type)1; } @@ -3169,6 +3170,7 @@ class Table { std::string catName; PrincipalType::type ownerType; int64_t txnId; + int64_t writeId; std::string validWriteIdList; IsolationLevelCompliance::type isStatsCompliant; @@ -3212,6 +3214,8 @@ class Table { void __set_txnId(const int64_t val); + void __set_writeId(const int64_t val); + void __set_validWriteIdList(const std::string& val); void __set_isStatsCompliant(const IsolationLevelCompliance::type val); @@ -3270,6 +3274,10 @@ class Table { return false; else if (__isset.txnId && !(txnId == rhs.txnId)) return false; + if (__isset.writeId != rhs.__isset.writeId) + return false; + else if (__isset.writeId && !(writeId == rhs.writeId)) + return false; if (__isset.validWriteIdList != rhs.__isset.validWriteIdList) return false; else if (__isset.validWriteIdList && !(validWriteIdList == rhs.validWriteIdList)) @@ -3301,7 +3309,7 @@ inline std::ostream& operator<<(std::ostream& out, const Table& obj) } typedef struct _Partition__isset { - _Partition__isset() : values(false), dbName(false), tableName(false), createTime(false), lastAccessTime(false), sd(false), parameters(false), privileges(false), catName(false), txnId(true), validWriteIdList(false), isStatsCompliant(false) {} + _Partition__isset() : values(false), dbName(false), tableName(false), createTime(false), lastAccessTime(false), sd(false), parameters(false), privileges(false), catName(false), txnId(true), writeId(true), validWriteIdList(false), isStatsCompliant(false) {} bool values :1; bool dbName :1; bool tableName :1; @@ -3312,6 +3320,7 @@ typedef struct _Partition__isset { bool privileges :1; bool catName :1; bool txnId :1; + bool writeId :1; bool validWriteIdList :1; bool isStatsCompliant :1; } _Partition__isset; @@ -3321,7 +3330,7 @@ class Partition { Partition(const Partition&); Partition& operator=(const Partition&); - Partition() : dbName(), tableName(), createTime(0), lastAccessTime(0), catName(), txnId(-1LL), validWriteIdList(), isStatsCompliant((IsolationLevelCompliance::type)0) { + Partition() : dbName(), tableName(), createTime(0), lastAccessTime(0), catName(), txnId(-1LL), writeId(-1LL), validWriteIdList(), isStatsCompliant((IsolationLevelCompliance::type)0) { } virtual ~Partition() throw(); @@ -3335,6 +3344,7 @@ class Partition { PrincipalPrivilegeSet privileges; std::string catName; int64_t txnId; + int64_t writeId; std::string validWriteIdList; IsolationLevelCompliance::type isStatsCompliant; @@ -3360,6 +3370,8 @@ class Partition { void __set_txnId(const int64_t val); + void __set_writeId(const int64_t val); + void __set_validWriteIdList(const std::string& val); void __set_isStatsCompliant(const IsolationLevelCompliance::type val); @@ -3392,6 +3404,10 @@ class Partition { return false; else if (__isset.txnId && !(txnId == rhs.txnId)) return false; + if (__isset.writeId != rhs.__isset.writeId) + return false; + else if (__isset.writeId && !(writeId == rhs.writeId)) + return false; if (__isset.validWriteIdList != rhs.__isset.validWriteIdList) return false; else if (__isset.validWriteIdList && !(validWriteIdList == rhs.validWriteIdList)) @@ -3599,7 +3615,7 @@ inline std::ostream& operator<<(std::ostream& out, const PartitionListComposingS } typedef struct _PartitionSpec__isset { - _PartitionSpec__isset() : dbName(false), tableName(false), rootPath(false), sharedSDPartitionSpec(false), partitionList(false), catName(false), txnId(true), validWriteIdList(false), isStatsCompliant(false) {} + _PartitionSpec__isset() : dbName(false), tableName(false), rootPath(false), sharedSDPartitionSpec(false), partitionList(false), catName(false), txnId(true), writeId(true), validWriteIdList(false), isStatsCompliant(false) {} bool dbName :1; bool tableName :1; bool rootPath :1; @@ -3607,6 +3623,7 @@ typedef struct _PartitionSpec__isset { bool partitionList :1; bool catName :1; bool txnId :1; + bool writeId :1; bool validWriteIdList :1; bool isStatsCompliant :1; } _PartitionSpec__isset; @@ -3616,7 +3633,7 @@ class PartitionSpec { PartitionSpec(const PartitionSpec&); PartitionSpec& operator=(const PartitionSpec&); - PartitionSpec() : dbName(), tableName(), rootPath(), catName(), txnId(-1LL), validWriteIdList(), isStatsCompliant((IsolationLevelCompliance::type)0) { + PartitionSpec() : dbName(), tableName(), rootPath(), catName(), txnId(-1LL), writeId(-1LL), validWriteIdList(), isStatsCompliant((IsolationLevelCompliance::type)0) { } virtual ~PartitionSpec() throw(); @@ -3627,6 +3644,7 @@ class PartitionSpec { PartitionListComposingSpec partitionList; std::string catName; int64_t txnId; + int64_t writeId; std::string validWriteIdList; IsolationLevelCompliance::type isStatsCompliant; @@ -3646,6 +3664,8 @@ class PartitionSpec { void __set_txnId(const int64_t val); + void __set_writeId(const int64_t val); + void __set_validWriteIdList(const std::string& val); void __set_isStatsCompliant(const IsolationLevelCompliance::type val); @@ -3674,6 +3694,10 @@ class PartitionSpec { return false; else if (__isset.txnId && !(txnId == rhs.txnId)) return false; + if (__isset.writeId != rhs.__isset.writeId) + return false; + else if (__isset.writeId && !(writeId == rhs.writeId)) + return false; if (__isset.validWriteIdList != rhs.__isset.validWriteIdList) return false; else if (__isset.validWriteIdList && !(validWriteIdList == rhs.validWriteIdList)) @@ -4623,9 +4647,10 @@ inline std::ostream& operator<<(std::ostream& out, const AggrStats& obj) } typedef struct _SetPartitionsStatsRequest__isset { - _SetPartitionsStatsRequest__isset() : needMerge(false), txnId(true), validWriteIdList(false) {} + _SetPartitionsStatsRequest__isset() : needMerge(false), txnId(true), writeId(true), validWriteIdList(false) {} bool needMerge :1; bool txnId :1; + bool writeId :1; bool validWriteIdList :1; } _SetPartitionsStatsRequest__isset; @@ -4634,13 +4659,14 @@ class SetPartitionsStatsRequest { SetPartitionsStatsRequest(const SetPartitionsStatsRequest&); SetPartitionsStatsRequest& operator=(const SetPartitionsStatsRequest&); - SetPartitionsStatsRequest() : needMerge(0), txnId(-1LL), validWriteIdList() { + SetPartitionsStatsRequest() : needMerge(0), txnId(-1LL), writeId(-1LL), validWriteIdList() { } virtual ~SetPartitionsStatsRequest() throw(); std::vector colStats; bool needMerge; int64_t txnId; + int64_t writeId; std::string validWriteIdList; _SetPartitionsStatsRequest__isset __isset; @@ -4651,6 +4677,8 @@ class SetPartitionsStatsRequest { void __set_txnId(const int64_t val); + void __set_writeId(const int64_t val); + void __set_validWriteIdList(const std::string& val); bool operator == (const SetPartitionsStatsRequest & rhs) const @@ -4665,6 +4693,10 @@ class SetPartitionsStatsRequest { return false; else if (__isset.txnId && !(txnId == rhs.txnId)) return false; + if (__isset.writeId != rhs.__isset.writeId) + return false; + else if (__isset.writeId && !(writeId == rhs.writeId)) + return false; if (__isset.validWriteIdList != rhs.__isset.validWriteIdList) return false; else if (__isset.validWriteIdList && !(validWriteIdList == rhs.validWriteIdList)) @@ -13442,8 +13474,9 @@ inline std::ostream& operator<<(std::ostream& out, const GetRuntimeStatsRequest& } typedef struct _AlterPartitionsRequest__isset { - _AlterPartitionsRequest__isset() : txnId(true), validWriteIdList(false) {} + _AlterPartitionsRequest__isset() : txnId(true), writeId(true), validWriteIdList(false) {} bool txnId :1; + bool writeId :1; bool validWriteIdList :1; } _AlterPartitionsRequest__isset; @@ -13452,7 +13485,7 @@ class AlterPartitionsRequest { AlterPartitionsRequest(const AlterPartitionsRequest&); AlterPartitionsRequest& operator=(const AlterPartitionsRequest&); - AlterPartitionsRequest() : dbName(), tableName(), txnId(-1LL), validWriteIdList() { + AlterPartitionsRequest() : dbName(), tableName(), txnId(-1LL), writeId(-1LL), validWriteIdList() { } virtual ~AlterPartitionsRequest() throw(); @@ -13461,6 +13494,7 @@ class AlterPartitionsRequest { std::vector partitions; EnvironmentContext environmentContext; int64_t txnId; + int64_t writeId; std::string validWriteIdList; _AlterPartitionsRequest__isset __isset; @@ -13475,6 +13509,8 @@ class AlterPartitionsRequest { void __set_txnId(const int64_t val); + void __set_writeId(const int64_t val); + void __set_validWriteIdList(const std::string& val); bool operator == (const AlterPartitionsRequest & rhs) const @@ -13491,6 +13527,10 @@ class AlterPartitionsRequest { return false; else if (__isset.txnId && !(txnId == rhs.txnId)) return false; + if (__isset.writeId != rhs.__isset.writeId) + return false; + else if (__isset.writeId && !(writeId == rhs.writeId)) + return false; if (__isset.validWriteIdList != rhs.__isset.validWriteIdList) return false; else if (__isset.validWriteIdList && !(validWriteIdList == rhs.validWriteIdList)) diff --git standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/AlterPartitionsRequest.java standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/AlterPartitionsRequest.java index 8d4102fc4a..314ea90ee3 100644 --- standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/AlterPartitionsRequest.java +++ standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/AlterPartitionsRequest.java @@ -43,7 +43,8 @@ private static final org.apache.thrift.protocol.TField PARTITIONS_FIELD_DESC = new org.apache.thrift.protocol.TField("partitions", org.apache.thrift.protocol.TType.LIST, (short)3); private static final org.apache.thrift.protocol.TField ENVIRONMENT_CONTEXT_FIELD_DESC = new org.apache.thrift.protocol.TField("environmentContext", org.apache.thrift.protocol.TType.STRUCT, (short)4); private static final org.apache.thrift.protocol.TField TXN_ID_FIELD_DESC = new org.apache.thrift.protocol.TField("txnId", org.apache.thrift.protocol.TType.I64, (short)5); - private static final org.apache.thrift.protocol.TField VALID_WRITE_ID_LIST_FIELD_DESC = new org.apache.thrift.protocol.TField("validWriteIdList", org.apache.thrift.protocol.TType.STRING, (short)6); + private static final org.apache.thrift.protocol.TField WRITE_ID_FIELD_DESC = new org.apache.thrift.protocol.TField("writeId", org.apache.thrift.protocol.TType.I64, (short)6); + private static final org.apache.thrift.protocol.TField VALID_WRITE_ID_LIST_FIELD_DESC = new org.apache.thrift.protocol.TField("validWriteIdList", org.apache.thrift.protocol.TType.STRING, (short)7); private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); static { @@ -56,6 +57,7 @@ private List partitions; // required private EnvironmentContext environmentContext; // required private long txnId; // optional + private long writeId; // optional private String validWriteIdList; // optional /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ @@ -65,7 +67,8 @@ PARTITIONS((short)3, "partitions"), ENVIRONMENT_CONTEXT((short)4, "environmentContext"), TXN_ID((short)5, "txnId"), - VALID_WRITE_ID_LIST((short)6, "validWriteIdList"); + WRITE_ID((short)6, "writeId"), + VALID_WRITE_ID_LIST((short)7, "validWriteIdList"); private static final Map byName = new HashMap(); @@ -90,7 +93,9 @@ public static _Fields findByThriftId(int fieldId) { return ENVIRONMENT_CONTEXT; case 5: // TXN_ID return TXN_ID; - case 6: // VALID_WRITE_ID_LIST + case 6: // WRITE_ID + return WRITE_ID; + case 7: // VALID_WRITE_ID_LIST return VALID_WRITE_ID_LIST; default: return null; @@ -133,8 +138,9 @@ public String getFieldName() { // isset id assignments private static final int __TXNID_ISSET_ID = 0; + private static final int __WRITEID_ISSET_ID = 1; private byte __isset_bitfield = 0; - private static final _Fields optionals[] = {_Fields.TXN_ID,_Fields.VALID_WRITE_ID_LIST}; + private static final _Fields optionals[] = {_Fields.TXN_ID,_Fields.WRITE_ID,_Fields.VALID_WRITE_ID_LIST}; public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; static { Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); @@ -149,6 +155,8 @@ public String getFieldName() { new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, EnvironmentContext.class))); tmpMap.put(_Fields.TXN_ID, new org.apache.thrift.meta_data.FieldMetaData("txnId", org.apache.thrift.TFieldRequirementType.OPTIONAL, new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.I64))); + tmpMap.put(_Fields.WRITE_ID, new org.apache.thrift.meta_data.FieldMetaData("writeId", org.apache.thrift.TFieldRequirementType.OPTIONAL, + new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.I64))); tmpMap.put(_Fields.VALID_WRITE_ID_LIST, new org.apache.thrift.meta_data.FieldMetaData("validWriteIdList", org.apache.thrift.TFieldRequirementType.OPTIONAL, new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING))); metaDataMap = Collections.unmodifiableMap(tmpMap); @@ -158,6 +166,8 @@ public String getFieldName() { public AlterPartitionsRequest() { this.txnId = -1L; + this.writeId = -1L; + } public AlterPartitionsRequest( @@ -195,6 +205,7 @@ public AlterPartitionsRequest(AlterPartitionsRequest other) { this.environmentContext = new EnvironmentContext(other.environmentContext); } this.txnId = other.txnId; + this.writeId = other.writeId; if (other.isSetValidWriteIdList()) { this.validWriteIdList = other.validWriteIdList; } @@ -212,6 +223,8 @@ public void clear() { this.environmentContext = null; this.txnId = -1L; + this.writeId = -1L; + this.validWriteIdList = null; } @@ -344,6 +357,28 @@ public void setTxnIdIsSet(boolean value) { __isset_bitfield = EncodingUtils.setBit(__isset_bitfield, __TXNID_ISSET_ID, value); } + public long getWriteId() { + return this.writeId; + } + + public void setWriteId(long writeId) { + this.writeId = writeId; + setWriteIdIsSet(true); + } + + public void unsetWriteId() { + __isset_bitfield = EncodingUtils.clearBit(__isset_bitfield, __WRITEID_ISSET_ID); + } + + /** Returns true if field writeId is set (has been assigned a value) and false otherwise */ + public boolean isSetWriteId() { + return EncodingUtils.testBit(__isset_bitfield, __WRITEID_ISSET_ID); + } + + public void setWriteIdIsSet(boolean value) { + __isset_bitfield = EncodingUtils.setBit(__isset_bitfield, __WRITEID_ISSET_ID, value); + } + public String getValidWriteIdList() { return this.validWriteIdList; } @@ -409,6 +444,14 @@ public void setFieldValue(_Fields field, Object value) { } break; + case WRITE_ID: + if (value == null) { + unsetWriteId(); + } else { + setWriteId((Long)value); + } + break; + case VALID_WRITE_ID_LIST: if (value == null) { unsetValidWriteIdList(); @@ -437,6 +480,9 @@ public Object getFieldValue(_Fields field) { case TXN_ID: return getTxnId(); + case WRITE_ID: + return getWriteId(); + case VALID_WRITE_ID_LIST: return getValidWriteIdList(); @@ -461,6 +507,8 @@ public boolean isSet(_Fields field) { return isSetEnvironmentContext(); case TXN_ID: return isSetTxnId(); + case WRITE_ID: + return isSetWriteId(); case VALID_WRITE_ID_LIST: return isSetValidWriteIdList(); } @@ -525,6 +573,15 @@ public boolean equals(AlterPartitionsRequest that) { return false; } + boolean this_present_writeId = true && this.isSetWriteId(); + boolean that_present_writeId = true && that.isSetWriteId(); + if (this_present_writeId || that_present_writeId) { + if (!(this_present_writeId && that_present_writeId)) + return false; + if (this.writeId != that.writeId) + return false; + } + boolean this_present_validWriteIdList = true && this.isSetValidWriteIdList(); boolean that_present_validWriteIdList = true && that.isSetValidWriteIdList(); if (this_present_validWriteIdList || that_present_validWriteIdList) { @@ -566,6 +623,11 @@ public int hashCode() { if (present_txnId) list.add(txnId); + boolean present_writeId = true && (isSetWriteId()); + list.add(present_writeId); + if (present_writeId) + list.add(writeId); + boolean present_validWriteIdList = true && (isSetValidWriteIdList()); list.add(present_validWriteIdList); if (present_validWriteIdList) @@ -632,6 +694,16 @@ public int compareTo(AlterPartitionsRequest other) { return lastComparison; } } + lastComparison = Boolean.valueOf(isSetWriteId()).compareTo(other.isSetWriteId()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetWriteId()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.writeId, other.writeId); + if (lastComparison != 0) { + return lastComparison; + } + } lastComparison = Boolean.valueOf(isSetValidWriteIdList()).compareTo(other.isSetValidWriteIdList()); if (lastComparison != 0) { return lastComparison; @@ -699,6 +771,12 @@ public String toString() { sb.append(this.txnId); first = false; } + if (isSetWriteId()) { + if (!first) sb.append(", "); + sb.append("writeId:"); + sb.append(this.writeId); + first = false; + } if (isSetValidWriteIdList()) { if (!first) sb.append(", "); sb.append("validWriteIdList:"); @@ -825,7 +903,15 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, AlterPartitionsRequ org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); } break; - case 6: // VALID_WRITE_ID_LIST + case 6: // WRITE_ID + if (schemeField.type == org.apache.thrift.protocol.TType.I64) { + struct.writeId = iprot.readI64(); + struct.setWriteIdIsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + break; + case 7: // VALID_WRITE_ID_LIST if (schemeField.type == org.apache.thrift.protocol.TType.STRING) { struct.validWriteIdList = iprot.readString(); struct.setValidWriteIdListIsSet(true); @@ -878,6 +964,11 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, AlterPartitionsReq oprot.writeI64(struct.txnId); oprot.writeFieldEnd(); } + if (struct.isSetWriteId()) { + oprot.writeFieldBegin(WRITE_ID_FIELD_DESC); + oprot.writeI64(struct.writeId); + oprot.writeFieldEnd(); + } if (struct.validWriteIdList != null) { if (struct.isSetValidWriteIdList()) { oprot.writeFieldBegin(VALID_WRITE_ID_LIST_FIELD_DESC); @@ -916,13 +1007,19 @@ public void write(org.apache.thrift.protocol.TProtocol prot, AlterPartitionsRequ if (struct.isSetTxnId()) { optionals.set(0); } - if (struct.isSetValidWriteIdList()) { + if (struct.isSetWriteId()) { optionals.set(1); } - oprot.writeBitSet(optionals, 2); + if (struct.isSetValidWriteIdList()) { + optionals.set(2); + } + oprot.writeBitSet(optionals, 3); if (struct.isSetTxnId()) { oprot.writeI64(struct.txnId); } + if (struct.isSetWriteId()) { + oprot.writeI64(struct.writeId); + } if (struct.isSetValidWriteIdList()) { oprot.writeString(struct.validWriteIdList); } @@ -950,12 +1047,16 @@ public void read(org.apache.thrift.protocol.TProtocol prot, AlterPartitionsReque struct.environmentContext = new EnvironmentContext(); struct.environmentContext.read(iprot); struct.setEnvironmentContextIsSet(true); - BitSet incoming = iprot.readBitSet(2); + BitSet incoming = iprot.readBitSet(3); if (incoming.get(0)) { struct.txnId = iprot.readI64(); struct.setTxnIdIsSet(true); } if (incoming.get(1)) { + struct.writeId = iprot.readI64(); + struct.setWriteIdIsSet(true); + } + if (incoming.get(2)) { struct.validWriteIdList = iprot.readString(); struct.setValidWriteIdListIsSet(true); } diff --git standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/Partition.java standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/Partition.java index 5b40d2f55a..37515a0c33 100644 --- standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/Partition.java +++ standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/Partition.java @@ -48,8 +48,9 @@ private static final org.apache.thrift.protocol.TField PRIVILEGES_FIELD_DESC = new org.apache.thrift.protocol.TField("privileges", org.apache.thrift.protocol.TType.STRUCT, (short)8); private static final org.apache.thrift.protocol.TField CAT_NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("catName", org.apache.thrift.protocol.TType.STRING, (short)9); private static final org.apache.thrift.protocol.TField TXN_ID_FIELD_DESC = new org.apache.thrift.protocol.TField("txnId", org.apache.thrift.protocol.TType.I64, (short)10); - private static final org.apache.thrift.protocol.TField VALID_WRITE_ID_LIST_FIELD_DESC = new org.apache.thrift.protocol.TField("validWriteIdList", org.apache.thrift.protocol.TType.STRING, (short)11); - private static final org.apache.thrift.protocol.TField IS_STATS_COMPLIANT_FIELD_DESC = new org.apache.thrift.protocol.TField("isStatsCompliant", org.apache.thrift.protocol.TType.I32, (short)12); + private static final org.apache.thrift.protocol.TField WRITE_ID_FIELD_DESC = new org.apache.thrift.protocol.TField("writeId", org.apache.thrift.protocol.TType.I64, (short)11); + private static final org.apache.thrift.protocol.TField VALID_WRITE_ID_LIST_FIELD_DESC = new org.apache.thrift.protocol.TField("validWriteIdList", org.apache.thrift.protocol.TType.STRING, (short)12); + private static final org.apache.thrift.protocol.TField IS_STATS_COMPLIANT_FIELD_DESC = new org.apache.thrift.protocol.TField("isStatsCompliant", org.apache.thrift.protocol.TType.I32, (short)13); private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); static { @@ -67,6 +68,7 @@ private PrincipalPrivilegeSet privileges; // optional private String catName; // optional private long txnId; // optional + private long writeId; // optional private String validWriteIdList; // optional private IsolationLevelCompliance isStatsCompliant; // optional @@ -82,12 +84,13 @@ PRIVILEGES((short)8, "privileges"), CAT_NAME((short)9, "catName"), TXN_ID((short)10, "txnId"), - VALID_WRITE_ID_LIST((short)11, "validWriteIdList"), + WRITE_ID((short)11, "writeId"), + VALID_WRITE_ID_LIST((short)12, "validWriteIdList"), /** * * @see IsolationLevelCompliance */ - IS_STATS_COMPLIANT((short)12, "isStatsCompliant"); + IS_STATS_COMPLIANT((short)13, "isStatsCompliant"); private static final Map byName = new HashMap(); @@ -122,9 +125,11 @@ public static _Fields findByThriftId(int fieldId) { return CAT_NAME; case 10: // TXN_ID return TXN_ID; - case 11: // VALID_WRITE_ID_LIST + case 11: // WRITE_ID + return WRITE_ID; + case 12: // VALID_WRITE_ID_LIST return VALID_WRITE_ID_LIST; - case 12: // IS_STATS_COMPLIANT + case 13: // IS_STATS_COMPLIANT return IS_STATS_COMPLIANT; default: return null; @@ -169,8 +174,9 @@ public String getFieldName() { private static final int __CREATETIME_ISSET_ID = 0; private static final int __LASTACCESSTIME_ISSET_ID = 1; private static final int __TXNID_ISSET_ID = 2; + private static final int __WRITEID_ISSET_ID = 3; private byte __isset_bitfield = 0; - private static final _Fields optionals[] = {_Fields.PRIVILEGES,_Fields.CAT_NAME,_Fields.TXN_ID,_Fields.VALID_WRITE_ID_LIST,_Fields.IS_STATS_COMPLIANT}; + private static final _Fields optionals[] = {_Fields.PRIVILEGES,_Fields.CAT_NAME,_Fields.TXN_ID,_Fields.WRITE_ID,_Fields.VALID_WRITE_ID_LIST,_Fields.IS_STATS_COMPLIANT}; public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; static { Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); @@ -197,6 +203,8 @@ public String getFieldName() { new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING))); tmpMap.put(_Fields.TXN_ID, new org.apache.thrift.meta_data.FieldMetaData("txnId", org.apache.thrift.TFieldRequirementType.OPTIONAL, new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.I64))); + tmpMap.put(_Fields.WRITE_ID, new org.apache.thrift.meta_data.FieldMetaData("writeId", org.apache.thrift.TFieldRequirementType.OPTIONAL, + new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.I64))); tmpMap.put(_Fields.VALID_WRITE_ID_LIST, new org.apache.thrift.meta_data.FieldMetaData("validWriteIdList", org.apache.thrift.TFieldRequirementType.OPTIONAL, new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING))); tmpMap.put(_Fields.IS_STATS_COMPLIANT, new org.apache.thrift.meta_data.FieldMetaData("isStatsCompliant", org.apache.thrift.TFieldRequirementType.OPTIONAL, @@ -208,6 +216,8 @@ public String getFieldName() { public Partition() { this.txnId = -1L; + this.writeId = -1L; + } public Partition( @@ -262,6 +272,7 @@ public Partition(Partition other) { this.catName = org.apache.hadoop.hive.metastore.utils.StringUtils.intern(other.catName); } this.txnId = other.txnId; + this.writeId = other.writeId; if (other.isSetValidWriteIdList()) { this.validWriteIdList = other.validWriteIdList; } @@ -289,6 +300,8 @@ public void clear() { this.catName = null; this.txnId = -1L; + this.writeId = -1L; + this.validWriteIdList = null; this.isStatsCompliant = null; } @@ -546,6 +559,28 @@ public void setTxnIdIsSet(boolean value) { __isset_bitfield = EncodingUtils.setBit(__isset_bitfield, __TXNID_ISSET_ID, value); } + public long getWriteId() { + return this.writeId; + } + + public void setWriteId(long writeId) { + this.writeId = writeId; + setWriteIdIsSet(true); + } + + public void unsetWriteId() { + __isset_bitfield = EncodingUtils.clearBit(__isset_bitfield, __WRITEID_ISSET_ID); + } + + /** Returns true if field writeId is set (has been assigned a value) and false otherwise */ + public boolean isSetWriteId() { + return EncodingUtils.testBit(__isset_bitfield, __WRITEID_ISSET_ID); + } + + public void setWriteIdIsSet(boolean value) { + __isset_bitfield = EncodingUtils.setBit(__isset_bitfield, __WRITEID_ISSET_ID, value); + } + public String getValidWriteIdList() { return this.validWriteIdList; } @@ -682,6 +717,14 @@ public void setFieldValue(_Fields field, Object value) { } break; + case WRITE_ID: + if (value == null) { + unsetWriteId(); + } else { + setWriteId((Long)value); + } + break; + case VALID_WRITE_ID_LIST: if (value == null) { unsetValidWriteIdList(); @@ -733,6 +776,9 @@ public Object getFieldValue(_Fields field) { case TXN_ID: return getTxnId(); + case WRITE_ID: + return getWriteId(); + case VALID_WRITE_ID_LIST: return getValidWriteIdList(); @@ -770,6 +816,8 @@ public boolean isSet(_Fields field) { return isSetCatName(); case TXN_ID: return isSetTxnId(); + case WRITE_ID: + return isSetWriteId(); case VALID_WRITE_ID_LIST: return isSetValidWriteIdList(); case IS_STATS_COMPLIANT: @@ -881,6 +929,15 @@ public boolean equals(Partition that) { return false; } + boolean this_present_writeId = true && this.isSetWriteId(); + boolean that_present_writeId = true && that.isSetWriteId(); + if (this_present_writeId || that_present_writeId) { + if (!(this_present_writeId && that_present_writeId)) + return false; + if (this.writeId != that.writeId) + return false; + } + boolean this_present_validWriteIdList = true && this.isSetValidWriteIdList(); boolean that_present_validWriteIdList = true && that.isSetValidWriteIdList(); if (this_present_validWriteIdList || that_present_validWriteIdList) { @@ -956,6 +1013,11 @@ public int hashCode() { if (present_txnId) list.add(txnId); + boolean present_writeId = true && (isSetWriteId()); + list.add(present_writeId); + if (present_writeId) + list.add(writeId); + boolean present_validWriteIdList = true && (isSetValidWriteIdList()); list.add(present_validWriteIdList); if (present_validWriteIdList) @@ -1077,6 +1139,16 @@ public int compareTo(Partition other) { return lastComparison; } } + lastComparison = Boolean.valueOf(isSetWriteId()).compareTo(other.isSetWriteId()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetWriteId()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.writeId, other.writeId); + if (lastComparison != 0) { + return lastComparison; + } + } lastComparison = Boolean.valueOf(isSetValidWriteIdList()).compareTo(other.isSetValidWriteIdList()); if (lastComparison != 0) { return lastComparison; @@ -1190,6 +1262,12 @@ public String toString() { sb.append(this.txnId); first = false; } + if (isSetWriteId()) { + if (!first) sb.append(", "); + sb.append("writeId:"); + sb.append(this.writeId); + first = false; + } if (isSetValidWriteIdList()) { if (!first) sb.append(", "); sb.append("validWriteIdList:"); @@ -1365,7 +1443,15 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, Partition struct) t org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); } break; - case 11: // VALID_WRITE_ID_LIST + case 11: // WRITE_ID + if (schemeField.type == org.apache.thrift.protocol.TType.I64) { + struct.writeId = iprot.readI64(); + struct.setWriteIdIsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + break; + case 12: // VALID_WRITE_ID_LIST if (schemeField.type == org.apache.thrift.protocol.TType.STRING) { struct.validWriteIdList = iprot.readString(); struct.setValidWriteIdListIsSet(true); @@ -1373,7 +1459,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, Partition struct) t org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); } break; - case 12: // IS_STATS_COMPLIANT + case 13: // IS_STATS_COMPLIANT if (schemeField.type == org.apache.thrift.protocol.TType.I32) { struct.isStatsCompliant = org.apache.hadoop.hive.metastore.api.IsolationLevelCompliance.findByValue(iprot.readI32()); struct.setIsStatsCompliantIsSet(true); @@ -1459,6 +1545,11 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, Partition struct) oprot.writeI64(struct.txnId); oprot.writeFieldEnd(); } + if (struct.isSetWriteId()) { + oprot.writeFieldBegin(WRITE_ID_FIELD_DESC); + oprot.writeI64(struct.writeId); + oprot.writeFieldEnd(); + } if (struct.validWriteIdList != null) { if (struct.isSetValidWriteIdList()) { oprot.writeFieldBegin(VALID_WRITE_ID_LIST_FIELD_DESC); @@ -1521,13 +1612,16 @@ public void write(org.apache.thrift.protocol.TProtocol prot, Partition struct) t if (struct.isSetTxnId()) { optionals.set(9); } - if (struct.isSetValidWriteIdList()) { + if (struct.isSetWriteId()) { optionals.set(10); } - if (struct.isSetIsStatsCompliant()) { + if (struct.isSetValidWriteIdList()) { optionals.set(11); } - oprot.writeBitSet(optionals, 12); + if (struct.isSetIsStatsCompliant()) { + optionals.set(12); + } + oprot.writeBitSet(optionals, 13); if (struct.isSetValues()) { { oprot.writeI32(struct.values.size()); @@ -1571,6 +1665,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, Partition struct) t if (struct.isSetTxnId()) { oprot.writeI64(struct.txnId); } + if (struct.isSetWriteId()) { + oprot.writeI64(struct.writeId); + } if (struct.isSetValidWriteIdList()) { oprot.writeString(struct.validWriteIdList); } @@ -1582,7 +1679,7 @@ public void write(org.apache.thrift.protocol.TProtocol prot, Partition struct) t @Override public void read(org.apache.thrift.protocol.TProtocol prot, Partition struct) throws org.apache.thrift.TException { TTupleProtocol iprot = (TTupleProtocol) prot; - BitSet incoming = iprot.readBitSet(12); + BitSet incoming = iprot.readBitSet(13); if (incoming.get(0)) { { org.apache.thrift.protocol.TList _list227 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); @@ -1646,10 +1743,14 @@ public void read(org.apache.thrift.protocol.TProtocol prot, Partition struct) th struct.setTxnIdIsSet(true); } if (incoming.get(10)) { + struct.writeId = iprot.readI64(); + struct.setWriteIdIsSet(true); + } + if (incoming.get(11)) { struct.validWriteIdList = iprot.readString(); struct.setValidWriteIdListIsSet(true); } - if (incoming.get(11)) { + if (incoming.get(12)) { struct.isStatsCompliant = org.apache.hadoop.hive.metastore.api.IsolationLevelCompliance.findByValue(iprot.readI32()); struct.setIsStatsCompliantIsSet(true); } diff --git standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/PartitionSpec.java standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/PartitionSpec.java index bc625b04a6..63b88db473 100644 --- standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/PartitionSpec.java +++ standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/PartitionSpec.java @@ -45,8 +45,9 @@ private static final org.apache.thrift.protocol.TField PARTITION_LIST_FIELD_DESC = new org.apache.thrift.protocol.TField("partitionList", org.apache.thrift.protocol.TType.STRUCT, (short)5); private static final org.apache.thrift.protocol.TField CAT_NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("catName", org.apache.thrift.protocol.TType.STRING, (short)6); private static final org.apache.thrift.protocol.TField TXN_ID_FIELD_DESC = new org.apache.thrift.protocol.TField("txnId", org.apache.thrift.protocol.TType.I64, (short)7); - private static final org.apache.thrift.protocol.TField VALID_WRITE_ID_LIST_FIELD_DESC = new org.apache.thrift.protocol.TField("validWriteIdList", org.apache.thrift.protocol.TType.STRING, (short)8); - private static final org.apache.thrift.protocol.TField IS_STATS_COMPLIANT_FIELD_DESC = new org.apache.thrift.protocol.TField("isStatsCompliant", org.apache.thrift.protocol.TType.I32, (short)9); + private static final org.apache.thrift.protocol.TField WRITE_ID_FIELD_DESC = new org.apache.thrift.protocol.TField("writeId", org.apache.thrift.protocol.TType.I64, (short)8); + private static final org.apache.thrift.protocol.TField VALID_WRITE_ID_LIST_FIELD_DESC = new org.apache.thrift.protocol.TField("validWriteIdList", org.apache.thrift.protocol.TType.STRING, (short)9); + private static final org.apache.thrift.protocol.TField IS_STATS_COMPLIANT_FIELD_DESC = new org.apache.thrift.protocol.TField("isStatsCompliant", org.apache.thrift.protocol.TType.I32, (short)10); private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); static { @@ -61,6 +62,7 @@ private PartitionListComposingSpec partitionList; // optional private String catName; // optional private long txnId; // optional + private long writeId; // optional private String validWriteIdList; // optional private IsolationLevelCompliance isStatsCompliant; // optional @@ -73,12 +75,13 @@ PARTITION_LIST((short)5, "partitionList"), CAT_NAME((short)6, "catName"), TXN_ID((short)7, "txnId"), - VALID_WRITE_ID_LIST((short)8, "validWriteIdList"), + WRITE_ID((short)8, "writeId"), + VALID_WRITE_ID_LIST((short)9, "validWriteIdList"), /** * * @see IsolationLevelCompliance */ - IS_STATS_COMPLIANT((short)9, "isStatsCompliant"); + IS_STATS_COMPLIANT((short)10, "isStatsCompliant"); private static final Map byName = new HashMap(); @@ -107,9 +110,11 @@ public static _Fields findByThriftId(int fieldId) { return CAT_NAME; case 7: // TXN_ID return TXN_ID; - case 8: // VALID_WRITE_ID_LIST + case 8: // WRITE_ID + return WRITE_ID; + case 9: // VALID_WRITE_ID_LIST return VALID_WRITE_ID_LIST; - case 9: // IS_STATS_COMPLIANT + case 10: // IS_STATS_COMPLIANT return IS_STATS_COMPLIANT; default: return null; @@ -152,8 +157,9 @@ public String getFieldName() { // isset id assignments private static final int __TXNID_ISSET_ID = 0; + private static final int __WRITEID_ISSET_ID = 1; private byte __isset_bitfield = 0; - private static final _Fields optionals[] = {_Fields.SHARED_SDPARTITION_SPEC,_Fields.PARTITION_LIST,_Fields.CAT_NAME,_Fields.TXN_ID,_Fields.VALID_WRITE_ID_LIST,_Fields.IS_STATS_COMPLIANT}; + private static final _Fields optionals[] = {_Fields.SHARED_SDPARTITION_SPEC,_Fields.PARTITION_LIST,_Fields.CAT_NAME,_Fields.TXN_ID,_Fields.WRITE_ID,_Fields.VALID_WRITE_ID_LIST,_Fields.IS_STATS_COMPLIANT}; public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; static { Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); @@ -171,6 +177,8 @@ public String getFieldName() { new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING))); tmpMap.put(_Fields.TXN_ID, new org.apache.thrift.meta_data.FieldMetaData("txnId", org.apache.thrift.TFieldRequirementType.OPTIONAL, new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.I64))); + tmpMap.put(_Fields.WRITE_ID, new org.apache.thrift.meta_data.FieldMetaData("writeId", org.apache.thrift.TFieldRequirementType.OPTIONAL, + new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.I64))); tmpMap.put(_Fields.VALID_WRITE_ID_LIST, new org.apache.thrift.meta_data.FieldMetaData("validWriteIdList", org.apache.thrift.TFieldRequirementType.OPTIONAL, new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING))); tmpMap.put(_Fields.IS_STATS_COMPLIANT, new org.apache.thrift.meta_data.FieldMetaData("isStatsCompliant", org.apache.thrift.TFieldRequirementType.OPTIONAL, @@ -182,6 +190,8 @@ public String getFieldName() { public PartitionSpec() { this.txnId = -1L; + this.writeId = -1L; + } public PartitionSpec( @@ -219,6 +229,7 @@ public PartitionSpec(PartitionSpec other) { this.catName = other.catName; } this.txnId = other.txnId; + this.writeId = other.writeId; if (other.isSetValidWriteIdList()) { this.validWriteIdList = other.validWriteIdList; } @@ -241,6 +252,8 @@ public void clear() { this.catName = null; this.txnId = -1L; + this.writeId = -1L; + this.validWriteIdList = null; this.isStatsCompliant = null; } @@ -405,6 +418,28 @@ public void setTxnIdIsSet(boolean value) { __isset_bitfield = EncodingUtils.setBit(__isset_bitfield, __TXNID_ISSET_ID, value); } + public long getWriteId() { + return this.writeId; + } + + public void setWriteId(long writeId) { + this.writeId = writeId; + setWriteIdIsSet(true); + } + + public void unsetWriteId() { + __isset_bitfield = EncodingUtils.clearBit(__isset_bitfield, __WRITEID_ISSET_ID); + } + + /** Returns true if field writeId is set (has been assigned a value) and false otherwise */ + public boolean isSetWriteId() { + return EncodingUtils.testBit(__isset_bitfield, __WRITEID_ISSET_ID); + } + + public void setWriteIdIsSet(boolean value) { + __isset_bitfield = EncodingUtils.setBit(__isset_bitfield, __WRITEID_ISSET_ID, value); + } + public String getValidWriteIdList() { return this.validWriteIdList; } @@ -517,6 +552,14 @@ public void setFieldValue(_Fields field, Object value) { } break; + case WRITE_ID: + if (value == null) { + unsetWriteId(); + } else { + setWriteId((Long)value); + } + break; + case VALID_WRITE_ID_LIST: if (value == null) { unsetValidWriteIdList(); @@ -559,6 +602,9 @@ public Object getFieldValue(_Fields field) { case TXN_ID: return getTxnId(); + case WRITE_ID: + return getWriteId(); + case VALID_WRITE_ID_LIST: return getValidWriteIdList(); @@ -590,6 +636,8 @@ public boolean isSet(_Fields field) { return isSetCatName(); case TXN_ID: return isSetTxnId(); + case WRITE_ID: + return isSetWriteId(); case VALID_WRITE_ID_LIST: return isSetValidWriteIdList(); case IS_STATS_COMPLIANT: @@ -674,6 +722,15 @@ public boolean equals(PartitionSpec that) { return false; } + boolean this_present_writeId = true && this.isSetWriteId(); + boolean that_present_writeId = true && that.isSetWriteId(); + if (this_present_writeId || that_present_writeId) { + if (!(this_present_writeId && that_present_writeId)) + return false; + if (this.writeId != that.writeId) + return false; + } + boolean this_present_validWriteIdList = true && this.isSetValidWriteIdList(); boolean that_present_validWriteIdList = true && that.isSetValidWriteIdList(); if (this_present_validWriteIdList || that_present_validWriteIdList) { @@ -734,6 +791,11 @@ public int hashCode() { if (present_txnId) list.add(txnId); + boolean present_writeId = true && (isSetWriteId()); + list.add(present_writeId); + if (present_writeId) + list.add(writeId); + boolean present_validWriteIdList = true && (isSetValidWriteIdList()); list.add(present_validWriteIdList); if (present_validWriteIdList) @@ -825,6 +887,16 @@ public int compareTo(PartitionSpec other) { return lastComparison; } } + lastComparison = Boolean.valueOf(isSetWriteId()).compareTo(other.isSetWriteId()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetWriteId()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.writeId, other.writeId); + if (lastComparison != 0) { + return lastComparison; + } + } lastComparison = Boolean.valueOf(isSetValidWriteIdList()).compareTo(other.isSetValidWriteIdList()); if (lastComparison != 0) { return lastComparison; @@ -924,6 +996,12 @@ public String toString() { sb.append(this.txnId); first = false; } + if (isSetWriteId()) { + if (!first) sb.append(", "); + sb.append("writeId:"); + sb.append(this.writeId); + first = false; + } if (isSetValidWriteIdList()) { if (!first) sb.append(", "); sb.append("validWriteIdList:"); @@ -1053,7 +1131,15 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, PartitionSpec struc org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); } break; - case 8: // VALID_WRITE_ID_LIST + case 8: // WRITE_ID + if (schemeField.type == org.apache.thrift.protocol.TType.I64) { + struct.writeId = iprot.readI64(); + struct.setWriteIdIsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + break; + case 9: // VALID_WRITE_ID_LIST if (schemeField.type == org.apache.thrift.protocol.TType.STRING) { struct.validWriteIdList = iprot.readString(); struct.setValidWriteIdListIsSet(true); @@ -1061,7 +1147,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, PartitionSpec struc org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); } break; - case 9: // IS_STATS_COMPLIANT + case 10: // IS_STATS_COMPLIANT if (schemeField.type == org.apache.thrift.protocol.TType.I32) { struct.isStatsCompliant = org.apache.hadoop.hive.metastore.api.IsolationLevelCompliance.findByValue(iprot.readI32()); struct.setIsStatsCompliantIsSet(true); @@ -1123,6 +1209,11 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, PartitionSpec stru oprot.writeI64(struct.txnId); oprot.writeFieldEnd(); } + if (struct.isSetWriteId()) { + oprot.writeFieldBegin(WRITE_ID_FIELD_DESC); + oprot.writeI64(struct.writeId); + oprot.writeFieldEnd(); + } if (struct.validWriteIdList != null) { if (struct.isSetValidWriteIdList()) { oprot.writeFieldBegin(VALID_WRITE_ID_LIST_FIELD_DESC); @@ -1176,13 +1267,16 @@ public void write(org.apache.thrift.protocol.TProtocol prot, PartitionSpec struc if (struct.isSetTxnId()) { optionals.set(6); } - if (struct.isSetValidWriteIdList()) { + if (struct.isSetWriteId()) { optionals.set(7); } - if (struct.isSetIsStatsCompliant()) { + if (struct.isSetValidWriteIdList()) { optionals.set(8); } - oprot.writeBitSet(optionals, 9); + if (struct.isSetIsStatsCompliant()) { + optionals.set(9); + } + oprot.writeBitSet(optionals, 10); if (struct.isSetDbName()) { oprot.writeString(struct.dbName); } @@ -1204,6 +1298,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, PartitionSpec struc if (struct.isSetTxnId()) { oprot.writeI64(struct.txnId); } + if (struct.isSetWriteId()) { + oprot.writeI64(struct.writeId); + } if (struct.isSetValidWriteIdList()) { oprot.writeString(struct.validWriteIdList); } @@ -1215,7 +1312,7 @@ public void write(org.apache.thrift.protocol.TProtocol prot, PartitionSpec struc @Override public void read(org.apache.thrift.protocol.TProtocol prot, PartitionSpec struct) throws org.apache.thrift.TException { TTupleProtocol iprot = (TTupleProtocol) prot; - BitSet incoming = iprot.readBitSet(9); + BitSet incoming = iprot.readBitSet(10); if (incoming.get(0)) { struct.dbName = iprot.readString(); struct.setDbNameIsSet(true); @@ -1247,10 +1344,14 @@ public void read(org.apache.thrift.protocol.TProtocol prot, PartitionSpec struct struct.setTxnIdIsSet(true); } if (incoming.get(7)) { + struct.writeId = iprot.readI64(); + struct.setWriteIdIsSet(true); + } + if (incoming.get(8)) { struct.validWriteIdList = iprot.readString(); struct.setValidWriteIdListIsSet(true); } - if (incoming.get(8)) { + if (incoming.get(9)) { struct.isStatsCompliant = org.apache.hadoop.hive.metastore.api.IsolationLevelCompliance.findByValue(iprot.readI32()); struct.setIsStatsCompliantIsSet(true); } diff --git standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/SetPartitionsStatsRequest.java standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/SetPartitionsStatsRequest.java index 8f460129f5..2053e3e244 100644 --- standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/SetPartitionsStatsRequest.java +++ standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/SetPartitionsStatsRequest.java @@ -41,7 +41,8 @@ private static final org.apache.thrift.protocol.TField COL_STATS_FIELD_DESC = new org.apache.thrift.protocol.TField("colStats", org.apache.thrift.protocol.TType.LIST, (short)1); private static final org.apache.thrift.protocol.TField NEED_MERGE_FIELD_DESC = new org.apache.thrift.protocol.TField("needMerge", org.apache.thrift.protocol.TType.BOOL, (short)2); private static final org.apache.thrift.protocol.TField TXN_ID_FIELD_DESC = new org.apache.thrift.protocol.TField("txnId", org.apache.thrift.protocol.TType.I64, (short)3); - private static final org.apache.thrift.protocol.TField VALID_WRITE_ID_LIST_FIELD_DESC = new org.apache.thrift.protocol.TField("validWriteIdList", org.apache.thrift.protocol.TType.STRING, (short)4); + private static final org.apache.thrift.protocol.TField WRITE_ID_FIELD_DESC = new org.apache.thrift.protocol.TField("writeId", org.apache.thrift.protocol.TType.I64, (short)4); + private static final org.apache.thrift.protocol.TField VALID_WRITE_ID_LIST_FIELD_DESC = new org.apache.thrift.protocol.TField("validWriteIdList", org.apache.thrift.protocol.TType.STRING, (short)5); private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); static { @@ -52,6 +53,7 @@ private List colStats; // required private boolean needMerge; // optional private long txnId; // optional + private long writeId; // optional private String validWriteIdList; // optional /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ @@ -59,7 +61,8 @@ COL_STATS((short)1, "colStats"), NEED_MERGE((short)2, "needMerge"), TXN_ID((short)3, "txnId"), - VALID_WRITE_ID_LIST((short)4, "validWriteIdList"); + WRITE_ID((short)4, "writeId"), + VALID_WRITE_ID_LIST((short)5, "validWriteIdList"); private static final Map byName = new HashMap(); @@ -80,7 +83,9 @@ public static _Fields findByThriftId(int fieldId) { return NEED_MERGE; case 3: // TXN_ID return TXN_ID; - case 4: // VALID_WRITE_ID_LIST + case 4: // WRITE_ID + return WRITE_ID; + case 5: // VALID_WRITE_ID_LIST return VALID_WRITE_ID_LIST; default: return null; @@ -124,8 +129,9 @@ public String getFieldName() { // isset id assignments private static final int __NEEDMERGE_ISSET_ID = 0; private static final int __TXNID_ISSET_ID = 1; + private static final int __WRITEID_ISSET_ID = 2; private byte __isset_bitfield = 0; - private static final _Fields optionals[] = {_Fields.NEED_MERGE,_Fields.TXN_ID,_Fields.VALID_WRITE_ID_LIST}; + private static final _Fields optionals[] = {_Fields.NEED_MERGE,_Fields.TXN_ID,_Fields.WRITE_ID,_Fields.VALID_WRITE_ID_LIST}; public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; static { Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); @@ -136,6 +142,8 @@ public String getFieldName() { new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.BOOL))); tmpMap.put(_Fields.TXN_ID, new org.apache.thrift.meta_data.FieldMetaData("txnId", org.apache.thrift.TFieldRequirementType.OPTIONAL, new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.I64))); + tmpMap.put(_Fields.WRITE_ID, new org.apache.thrift.meta_data.FieldMetaData("writeId", org.apache.thrift.TFieldRequirementType.OPTIONAL, + new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.I64))); tmpMap.put(_Fields.VALID_WRITE_ID_LIST, new org.apache.thrift.meta_data.FieldMetaData("validWriteIdList", org.apache.thrift.TFieldRequirementType.OPTIONAL, new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING))); metaDataMap = Collections.unmodifiableMap(tmpMap); @@ -145,6 +153,8 @@ public String getFieldName() { public SetPartitionsStatsRequest() { this.txnId = -1L; + this.writeId = -1L; + } public SetPartitionsStatsRequest( @@ -168,6 +178,7 @@ public SetPartitionsStatsRequest(SetPartitionsStatsRequest other) { } this.needMerge = other.needMerge; this.txnId = other.txnId; + this.writeId = other.writeId; if (other.isSetValidWriteIdList()) { this.validWriteIdList = other.validWriteIdList; } @@ -184,6 +195,8 @@ public void clear() { this.needMerge = false; this.txnId = -1L; + this.writeId = -1L; + this.validWriteIdList = null; } @@ -269,6 +282,28 @@ public void setTxnIdIsSet(boolean value) { __isset_bitfield = EncodingUtils.setBit(__isset_bitfield, __TXNID_ISSET_ID, value); } + public long getWriteId() { + return this.writeId; + } + + public void setWriteId(long writeId) { + this.writeId = writeId; + setWriteIdIsSet(true); + } + + public void unsetWriteId() { + __isset_bitfield = EncodingUtils.clearBit(__isset_bitfield, __WRITEID_ISSET_ID); + } + + /** Returns true if field writeId is set (has been assigned a value) and false otherwise */ + public boolean isSetWriteId() { + return EncodingUtils.testBit(__isset_bitfield, __WRITEID_ISSET_ID); + } + + public void setWriteIdIsSet(boolean value) { + __isset_bitfield = EncodingUtils.setBit(__isset_bitfield, __WRITEID_ISSET_ID, value); + } + public String getValidWriteIdList() { return this.validWriteIdList; } @@ -318,6 +353,14 @@ public void setFieldValue(_Fields field, Object value) { } break; + case WRITE_ID: + if (value == null) { + unsetWriteId(); + } else { + setWriteId((Long)value); + } + break; + case VALID_WRITE_ID_LIST: if (value == null) { unsetValidWriteIdList(); @@ -340,6 +383,9 @@ public Object getFieldValue(_Fields field) { case TXN_ID: return getTxnId(); + case WRITE_ID: + return getWriteId(); + case VALID_WRITE_ID_LIST: return getValidWriteIdList(); @@ -360,6 +406,8 @@ public boolean isSet(_Fields field) { return isSetNeedMerge(); case TXN_ID: return isSetTxnId(); + case WRITE_ID: + return isSetWriteId(); case VALID_WRITE_ID_LIST: return isSetValidWriteIdList(); } @@ -406,6 +454,15 @@ public boolean equals(SetPartitionsStatsRequest that) { return false; } + boolean this_present_writeId = true && this.isSetWriteId(); + boolean that_present_writeId = true && that.isSetWriteId(); + if (this_present_writeId || that_present_writeId) { + if (!(this_present_writeId && that_present_writeId)) + return false; + if (this.writeId != that.writeId) + return false; + } + boolean this_present_validWriteIdList = true && this.isSetValidWriteIdList(); boolean that_present_validWriteIdList = true && that.isSetValidWriteIdList(); if (this_present_validWriteIdList || that_present_validWriteIdList) { @@ -437,6 +494,11 @@ public int hashCode() { if (present_txnId) list.add(txnId); + boolean present_writeId = true && (isSetWriteId()); + list.add(present_writeId); + if (present_writeId) + list.add(writeId); + boolean present_validWriteIdList = true && (isSetValidWriteIdList()); list.add(present_validWriteIdList); if (present_validWriteIdList) @@ -483,6 +545,16 @@ public int compareTo(SetPartitionsStatsRequest other) { return lastComparison; } } + lastComparison = Boolean.valueOf(isSetWriteId()).compareTo(other.isSetWriteId()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetWriteId()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.writeId, other.writeId); + if (lastComparison != 0) { + return lastComparison; + } + } lastComparison = Boolean.valueOf(isSetValidWriteIdList()).compareTo(other.isSetValidWriteIdList()); if (lastComparison != 0) { return lastComparison; @@ -532,6 +604,12 @@ public String toString() { sb.append(this.txnId); first = false; } + if (isSetWriteId()) { + if (!first) sb.append(", "); + sb.append("writeId:"); + sb.append(this.writeId); + first = false; + } if (isSetValidWriteIdList()) { if (!first) sb.append(", "); sb.append("validWriteIdList:"); @@ -626,7 +704,15 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, SetPartitionsStatsR org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); } break; - case 4: // VALID_WRITE_ID_LIST + case 4: // WRITE_ID + if (schemeField.type == org.apache.thrift.protocol.TType.I64) { + struct.writeId = iprot.readI64(); + struct.setWriteIdIsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + break; + case 5: // VALID_WRITE_ID_LIST if (schemeField.type == org.apache.thrift.protocol.TType.STRING) { struct.validWriteIdList = iprot.readString(); struct.setValidWriteIdListIsSet(true); @@ -669,6 +755,11 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, SetPartitionsStats oprot.writeI64(struct.txnId); oprot.writeFieldEnd(); } + if (struct.isSetWriteId()) { + oprot.writeFieldBegin(WRITE_ID_FIELD_DESC); + oprot.writeI64(struct.writeId); + oprot.writeFieldEnd(); + } if (struct.validWriteIdList != null) { if (struct.isSetValidWriteIdList()) { oprot.writeFieldBegin(VALID_WRITE_ID_LIST_FIELD_DESC); @@ -707,16 +798,22 @@ public void write(org.apache.thrift.protocol.TProtocol prot, SetPartitionsStatsR if (struct.isSetTxnId()) { optionals.set(1); } - if (struct.isSetValidWriteIdList()) { + if (struct.isSetWriteId()) { optionals.set(2); } - oprot.writeBitSet(optionals, 3); + if (struct.isSetValidWriteIdList()) { + optionals.set(3); + } + oprot.writeBitSet(optionals, 4); if (struct.isSetNeedMerge()) { oprot.writeBool(struct.needMerge); } if (struct.isSetTxnId()) { oprot.writeI64(struct.txnId); } + if (struct.isSetWriteId()) { + oprot.writeI64(struct.writeId); + } if (struct.isSetValidWriteIdList()) { oprot.writeString(struct.validWriteIdList); } @@ -737,7 +834,7 @@ public void read(org.apache.thrift.protocol.TProtocol prot, SetPartitionsStatsRe } } struct.setColStatsIsSet(true); - BitSet incoming = iprot.readBitSet(3); + BitSet incoming = iprot.readBitSet(4); if (incoming.get(0)) { struct.needMerge = iprot.readBool(); struct.setNeedMergeIsSet(true); @@ -747,6 +844,10 @@ public void read(org.apache.thrift.protocol.TProtocol prot, SetPartitionsStatsRe struct.setTxnIdIsSet(true); } if (incoming.get(2)) { + struct.writeId = iprot.readI64(); + struct.setWriteIdIsSet(true); + } + if (incoming.get(3)) { struct.validWriteIdList = iprot.readString(); struct.setValidWriteIdListIsSet(true); } diff --git standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/Table.java standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/Table.java index d9f17cc53a..27c0528d5b 100644 --- standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/Table.java +++ standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/Table.java @@ -57,8 +57,9 @@ private static final org.apache.thrift.protocol.TField CAT_NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("catName", org.apache.thrift.protocol.TType.STRING, (short)17); private static final org.apache.thrift.protocol.TField OWNER_TYPE_FIELD_DESC = new org.apache.thrift.protocol.TField("ownerType", org.apache.thrift.protocol.TType.I32, (short)18); private static final org.apache.thrift.protocol.TField TXN_ID_FIELD_DESC = new org.apache.thrift.protocol.TField("txnId", org.apache.thrift.protocol.TType.I64, (short)19); - private static final org.apache.thrift.protocol.TField VALID_WRITE_ID_LIST_FIELD_DESC = new org.apache.thrift.protocol.TField("validWriteIdList", org.apache.thrift.protocol.TType.STRING, (short)20); - private static final org.apache.thrift.protocol.TField IS_STATS_COMPLIANT_FIELD_DESC = new org.apache.thrift.protocol.TField("isStatsCompliant", org.apache.thrift.protocol.TType.I32, (short)21); + private static final org.apache.thrift.protocol.TField WRITE_ID_FIELD_DESC = new org.apache.thrift.protocol.TField("writeId", org.apache.thrift.protocol.TType.I64, (short)20); + private static final org.apache.thrift.protocol.TField VALID_WRITE_ID_LIST_FIELD_DESC = new org.apache.thrift.protocol.TField("validWriteIdList", org.apache.thrift.protocol.TType.STRING, (short)21); + private static final org.apache.thrift.protocol.TField IS_STATS_COMPLIANT_FIELD_DESC = new org.apache.thrift.protocol.TField("isStatsCompliant", org.apache.thrift.protocol.TType.I32, (short)22); private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); static { @@ -85,6 +86,7 @@ private String catName; // optional private PrincipalType ownerType; // optional private long txnId; // optional + private long writeId; // optional private String validWriteIdList; // optional private IsolationLevelCompliance isStatsCompliant; // optional @@ -113,12 +115,13 @@ */ OWNER_TYPE((short)18, "ownerType"), TXN_ID((short)19, "txnId"), - VALID_WRITE_ID_LIST((short)20, "validWriteIdList"), + WRITE_ID((short)20, "writeId"), + VALID_WRITE_ID_LIST((short)21, "validWriteIdList"), /** * * @see IsolationLevelCompliance */ - IS_STATS_COMPLIANT((short)21, "isStatsCompliant"); + IS_STATS_COMPLIANT((short)22, "isStatsCompliant"); private static final Map byName = new HashMap(); @@ -171,9 +174,11 @@ public static _Fields findByThriftId(int fieldId) { return OWNER_TYPE; case 19: // TXN_ID return TXN_ID; - case 20: // VALID_WRITE_ID_LIST + case 20: // WRITE_ID + return WRITE_ID; + case 21: // VALID_WRITE_ID_LIST return VALID_WRITE_ID_LIST; - case 21: // IS_STATS_COMPLIANT + case 22: // IS_STATS_COMPLIANT return IS_STATS_COMPLIANT; default: return null; @@ -221,8 +226,9 @@ public String getFieldName() { private static final int __TEMPORARY_ISSET_ID = 3; private static final int __REWRITEENABLED_ISSET_ID = 4; private static final int __TXNID_ISSET_ID = 5; + private static final int __WRITEID_ISSET_ID = 6; private byte __isset_bitfield = 0; - private static final _Fields optionals[] = {_Fields.PRIVILEGES,_Fields.TEMPORARY,_Fields.REWRITE_ENABLED,_Fields.CREATION_METADATA,_Fields.CAT_NAME,_Fields.OWNER_TYPE,_Fields.TXN_ID,_Fields.VALID_WRITE_ID_LIST,_Fields.IS_STATS_COMPLIANT}; + private static final _Fields optionals[] = {_Fields.PRIVILEGES,_Fields.TEMPORARY,_Fields.REWRITE_ENABLED,_Fields.CREATION_METADATA,_Fields.CAT_NAME,_Fields.OWNER_TYPE,_Fields.TXN_ID,_Fields.WRITE_ID,_Fields.VALID_WRITE_ID_LIST,_Fields.IS_STATS_COMPLIANT}; public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; static { Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); @@ -267,6 +273,8 @@ public String getFieldName() { new org.apache.thrift.meta_data.EnumMetaData(org.apache.thrift.protocol.TType.ENUM, PrincipalType.class))); tmpMap.put(_Fields.TXN_ID, new org.apache.thrift.meta_data.FieldMetaData("txnId", org.apache.thrift.TFieldRequirementType.OPTIONAL, new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.I64))); + tmpMap.put(_Fields.WRITE_ID, new org.apache.thrift.meta_data.FieldMetaData("writeId", org.apache.thrift.TFieldRequirementType.OPTIONAL, + new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.I64))); tmpMap.put(_Fields.VALID_WRITE_ID_LIST, new org.apache.thrift.meta_data.FieldMetaData("validWriteIdList", org.apache.thrift.TFieldRequirementType.OPTIONAL, new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING))); tmpMap.put(_Fields.IS_STATS_COMPLIANT, new org.apache.thrift.meta_data.FieldMetaData("isStatsCompliant", org.apache.thrift.TFieldRequirementType.OPTIONAL, @@ -282,6 +290,8 @@ public Table() { this.txnId = -1L; + this.writeId = -1L; + } public Table( @@ -371,6 +381,7 @@ public Table(Table other) { this.ownerType = other.ownerType; } this.txnId = other.txnId; + this.writeId = other.writeId; if (other.isSetValidWriteIdList()) { this.validWriteIdList = other.validWriteIdList; } @@ -411,6 +422,8 @@ public void clear() { this.txnId = -1L; + this.writeId = -1L; + this.validWriteIdList = null; this.isStatsCompliant = null; } @@ -880,6 +893,28 @@ public void setTxnIdIsSet(boolean value) { __isset_bitfield = EncodingUtils.setBit(__isset_bitfield, __TXNID_ISSET_ID, value); } + public long getWriteId() { + return this.writeId; + } + + public void setWriteId(long writeId) { + this.writeId = writeId; + setWriteIdIsSet(true); + } + + public void unsetWriteId() { + __isset_bitfield = EncodingUtils.clearBit(__isset_bitfield, __WRITEID_ISSET_ID); + } + + /** Returns true if field writeId is set (has been assigned a value) and false otherwise */ + public boolean isSetWriteId() { + return EncodingUtils.testBit(__isset_bitfield, __WRITEID_ISSET_ID); + } + + public void setWriteIdIsSet(boolean value) { + __isset_bitfield = EncodingUtils.setBit(__isset_bitfield, __WRITEID_ISSET_ID, value); + } + public String getValidWriteIdList() { return this.validWriteIdList; } @@ -1088,6 +1123,14 @@ public void setFieldValue(_Fields field, Object value) { } break; + case WRITE_ID: + if (value == null) { + unsetWriteId(); + } else { + setWriteId((Long)value); + } + break; + case VALID_WRITE_ID_LIST: if (value == null) { unsetValidWriteIdList(); @@ -1166,6 +1209,9 @@ public Object getFieldValue(_Fields field) { case TXN_ID: return getTxnId(); + case WRITE_ID: + return getWriteId(); + case VALID_WRITE_ID_LIST: return getValidWriteIdList(); @@ -1221,6 +1267,8 @@ public boolean isSet(_Fields field) { return isSetOwnerType(); case TXN_ID: return isSetTxnId(); + case WRITE_ID: + return isSetWriteId(); case VALID_WRITE_ID_LIST: return isSetValidWriteIdList(); case IS_STATS_COMPLIANT: @@ -1413,6 +1461,15 @@ public boolean equals(Table that) { return false; } + boolean this_present_writeId = true && this.isSetWriteId(); + boolean that_present_writeId = true && that.isSetWriteId(); + if (this_present_writeId || that_present_writeId) { + if (!(this_present_writeId && that_present_writeId)) + return false; + if (this.writeId != that.writeId) + return false; + } + boolean this_present_validWriteIdList = true && this.isSetValidWriteIdList(); boolean that_present_validWriteIdList = true && that.isSetValidWriteIdList(); if (this_present_validWriteIdList || that_present_validWriteIdList) { @@ -1533,6 +1590,11 @@ public int hashCode() { if (present_txnId) list.add(txnId); + boolean present_writeId = true && (isSetWriteId()); + list.add(present_writeId); + if (present_writeId) + list.add(writeId); + boolean present_validWriteIdList = true && (isSetValidWriteIdList()); list.add(present_validWriteIdList); if (present_validWriteIdList) @@ -1744,6 +1806,16 @@ public int compareTo(Table other) { return lastComparison; } } + lastComparison = Boolean.valueOf(isSetWriteId()).compareTo(other.isSetWriteId()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetWriteId()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.writeId, other.writeId); + if (lastComparison != 0) { + return lastComparison; + } + } lastComparison = Boolean.valueOf(isSetValidWriteIdList()).compareTo(other.isSetValidWriteIdList()); if (lastComparison != 0) { return lastComparison; @@ -1925,6 +1997,12 @@ public String toString() { sb.append(this.txnId); first = false; } + if (isSetWriteId()) { + if (!first) sb.append(", "); + sb.append("writeId:"); + sb.append(this.writeId); + first = false; + } if (isSetValidWriteIdList()) { if (!first) sb.append(", "); sb.append("validWriteIdList:"); @@ -2174,7 +2252,15 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, Table struct) throw org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); } break; - case 20: // VALID_WRITE_ID_LIST + case 20: // WRITE_ID + if (schemeField.type == org.apache.thrift.protocol.TType.I64) { + struct.writeId = iprot.readI64(); + struct.setWriteIdIsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + break; + case 21: // VALID_WRITE_ID_LIST if (schemeField.type == org.apache.thrift.protocol.TType.STRING) { struct.validWriteIdList = iprot.readString(); struct.setValidWriteIdListIsSet(true); @@ -2182,7 +2268,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, Table struct) throw org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); } break; - case 21: // IS_STATS_COMPLIANT + case 22: // IS_STATS_COMPLIANT if (schemeField.type == org.apache.thrift.protocol.TType.I32) { struct.isStatsCompliant = org.apache.hadoop.hive.metastore.api.IsolationLevelCompliance.findByValue(iprot.readI32()); struct.setIsStatsCompliantIsSet(true); @@ -2315,6 +2401,11 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, Table struct) thro oprot.writeI64(struct.txnId); oprot.writeFieldEnd(); } + if (struct.isSetWriteId()) { + oprot.writeFieldBegin(WRITE_ID_FIELD_DESC); + oprot.writeI64(struct.writeId); + oprot.writeFieldEnd(); + } if (struct.validWriteIdList != null) { if (struct.isSetValidWriteIdList()) { oprot.writeFieldBegin(VALID_WRITE_ID_LIST_FIELD_DESC); @@ -2404,13 +2495,16 @@ public void write(org.apache.thrift.protocol.TProtocol prot, Table struct) throw if (struct.isSetTxnId()) { optionals.set(18); } - if (struct.isSetValidWriteIdList()) { + if (struct.isSetWriteId()) { optionals.set(19); } - if (struct.isSetIsStatsCompliant()) { + if (struct.isSetValidWriteIdList()) { optionals.set(20); } - oprot.writeBitSet(optionals, 21); + if (struct.isSetIsStatsCompliant()) { + optionals.set(21); + } + oprot.writeBitSet(optionals, 22); if (struct.isSetTableName()) { oprot.writeString(struct.tableName); } @@ -2481,6 +2575,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, Table struct) throw if (struct.isSetTxnId()) { oprot.writeI64(struct.txnId); } + if (struct.isSetWriteId()) { + oprot.writeI64(struct.writeId); + } if (struct.isSetValidWriteIdList()) { oprot.writeString(struct.validWriteIdList); } @@ -2492,7 +2589,7 @@ public void write(org.apache.thrift.protocol.TProtocol prot, Table struct) throw @Override public void read(org.apache.thrift.protocol.TProtocol prot, Table struct) throws org.apache.thrift.TException { TTupleProtocol iprot = (TTupleProtocol) prot; - BitSet incoming = iprot.readBitSet(21); + BitSet incoming = iprot.readBitSet(22); if (incoming.get(0)) { struct.tableName = iprot.readString(); struct.setTableNameIsSet(true); @@ -2594,10 +2691,14 @@ public void read(org.apache.thrift.protocol.TProtocol prot, Table struct) throws struct.setTxnIdIsSet(true); } if (incoming.get(19)) { + struct.writeId = iprot.readI64(); + struct.setWriteIdIsSet(true); + } + if (incoming.get(20)) { struct.validWriteIdList = iprot.readString(); struct.setValidWriteIdListIsSet(true); } - if (incoming.get(20)) { + if (incoming.get(21)) { struct.isStatsCompliant = org.apache.hadoop.hive.metastore.api.IsolationLevelCompliance.findByValue(iprot.readI32()); struct.setIsStatsCompliantIsSet(true); } diff --git standalone-metastore/src/gen/thrift/gen-php/metastore/Types.php standalone-metastore/src/gen/thrift/gen-php/metastore/Types.php index 9033e9ac8d..7ced369c93 100644 --- standalone-metastore/src/gen/thrift/gen-php/metastore/Types.php +++ standalone-metastore/src/gen/thrift/gen-php/metastore/Types.php @@ -6532,6 +6532,10 @@ class Table { * @var int */ public $txnId = -1; + /** + * @var int + */ + public $writeId = -1; /** * @var string */ @@ -6637,10 +6641,14 @@ class Table { 'type' => TType::I64, ), 20 => array( + 'var' => 'writeId', + 'type' => TType::I64, + ), + 21 => array( 'var' => 'validWriteIdList', 'type' => TType::STRING, ), - 21 => array( + 22 => array( 'var' => 'isStatsCompliant', 'type' => TType::I32, ), @@ -6704,6 +6712,9 @@ class Table { if (isset($vals['txnId'])) { $this->txnId = $vals['txnId']; } + if (isset($vals['writeId'])) { + $this->writeId = $vals['writeId']; + } if (isset($vals['validWriteIdList'])) { $this->validWriteIdList = $vals['validWriteIdList']; } @@ -6893,13 +6904,20 @@ class Table { } break; case 20: + if ($ftype == TType::I64) { + $xfer += $input->readI64($this->writeId); + } else { + $xfer += $input->skip($ftype); + } + break; + case 21: if ($ftype == TType::STRING) { $xfer += $input->readString($this->validWriteIdList); } else { $xfer += $input->skip($ftype); } break; - case 21: + case 22: if ($ftype == TType::I32) { $xfer += $input->readI32($this->isStatsCompliant); } else { @@ -7048,13 +7066,18 @@ class Table { $xfer += $output->writeI64($this->txnId); $xfer += $output->writeFieldEnd(); } + if ($this->writeId !== null) { + $xfer += $output->writeFieldBegin('writeId', TType::I64, 20); + $xfer += $output->writeI64($this->writeId); + $xfer += $output->writeFieldEnd(); + } if ($this->validWriteIdList !== null) { - $xfer += $output->writeFieldBegin('validWriteIdList', TType::STRING, 20); + $xfer += $output->writeFieldBegin('validWriteIdList', TType::STRING, 21); $xfer += $output->writeString($this->validWriteIdList); $xfer += $output->writeFieldEnd(); } if ($this->isStatsCompliant !== null) { - $xfer += $output->writeFieldBegin('isStatsCompliant', TType::I32, 21); + $xfer += $output->writeFieldBegin('isStatsCompliant', TType::I32, 22); $xfer += $output->writeI32($this->isStatsCompliant); $xfer += $output->writeFieldEnd(); } @@ -7108,6 +7131,10 @@ class Partition { * @var int */ public $txnId = -1; + /** + * @var int + */ + public $writeId = -1; /** * @var string */ @@ -7175,10 +7202,14 @@ class Partition { 'type' => TType::I64, ), 11 => array( + 'var' => 'writeId', + 'type' => TType::I64, + ), + 12 => array( 'var' => 'validWriteIdList', 'type' => TType::STRING, ), - 12 => array( + 13 => array( 'var' => 'isStatsCompliant', 'type' => TType::I32, ), @@ -7215,6 +7246,9 @@ class Partition { if (isset($vals['txnId'])) { $this->txnId = $vals['txnId']; } + if (isset($vals['writeId'])) { + $this->writeId = $vals['writeId']; + } if (isset($vals['validWriteIdList'])) { $this->validWriteIdList = $vals['validWriteIdList']; } @@ -7339,13 +7373,20 @@ class Partition { } break; case 11: + if ($ftype == TType::I64) { + $xfer += $input->readI64($this->writeId); + } else { + $xfer += $input->skip($ftype); + } + break; + case 12: if ($ftype == TType::STRING) { $xfer += $input->readString($this->validWriteIdList); } else { $xfer += $input->skip($ftype); } break; - case 12: + case 13: if ($ftype == TType::I32) { $xfer += $input->readI32($this->isStatsCompliant); } else { @@ -7446,13 +7487,18 @@ class Partition { $xfer += $output->writeI64($this->txnId); $xfer += $output->writeFieldEnd(); } + if ($this->writeId !== null) { + $xfer += $output->writeFieldBegin('writeId', TType::I64, 11); + $xfer += $output->writeI64($this->writeId); + $xfer += $output->writeFieldEnd(); + } if ($this->validWriteIdList !== null) { - $xfer += $output->writeFieldBegin('validWriteIdList', TType::STRING, 11); + $xfer += $output->writeFieldBegin('validWriteIdList', TType::STRING, 12); $xfer += $output->writeString($this->validWriteIdList); $xfer += $output->writeFieldEnd(); } if ($this->isStatsCompliant !== null) { - $xfer += $output->writeFieldBegin('isStatsCompliant', TType::I32, 12); + $xfer += $output->writeFieldBegin('isStatsCompliant', TType::I32, 13); $xfer += $output->writeI32($this->isStatsCompliant); $xfer += $output->writeFieldEnd(); } @@ -7983,6 +8029,10 @@ class PartitionSpec { * @var int */ public $txnId = -1; + /** + * @var int + */ + public $writeId = -1; /** * @var string */ @@ -8026,10 +8076,14 @@ class PartitionSpec { 'type' => TType::I64, ), 8 => array( + 'var' => 'writeId', + 'type' => TType::I64, + ), + 9 => array( 'var' => 'validWriteIdList', 'type' => TType::STRING, ), - 9 => array( + 10 => array( 'var' => 'isStatsCompliant', 'type' => TType::I32, ), @@ -8057,6 +8111,9 @@ class PartitionSpec { if (isset($vals['txnId'])) { $this->txnId = $vals['txnId']; } + if (isset($vals['writeId'])) { + $this->writeId = $vals['writeId']; + } if (isset($vals['validWriteIdList'])) { $this->validWriteIdList = $vals['validWriteIdList']; } @@ -8137,13 +8194,20 @@ class PartitionSpec { } break; case 8: + if ($ftype == TType::I64) { + $xfer += $input->readI64($this->writeId); + } else { + $xfer += $input->skip($ftype); + } + break; + case 9: if ($ftype == TType::STRING) { $xfer += $input->readString($this->validWriteIdList); } else { $xfer += $input->skip($ftype); } break; - case 9: + case 10: if ($ftype == TType::I32) { $xfer += $input->readI32($this->isStatsCompliant); } else { @@ -8204,13 +8268,18 @@ class PartitionSpec { $xfer += $output->writeI64($this->txnId); $xfer += $output->writeFieldEnd(); } + if ($this->writeId !== null) { + $xfer += $output->writeFieldBegin('writeId', TType::I64, 8); + $xfer += $output->writeI64($this->writeId); + $xfer += $output->writeFieldEnd(); + } if ($this->validWriteIdList !== null) { - $xfer += $output->writeFieldBegin('validWriteIdList', TType::STRING, 8); + $xfer += $output->writeFieldBegin('validWriteIdList', TType::STRING, 9); $xfer += $output->writeString($this->validWriteIdList); $xfer += $output->writeFieldEnd(); } if ($this->isStatsCompliant !== null) { - $xfer += $output->writeFieldBegin('isStatsCompliant', TType::I32, 9); + $xfer += $output->writeFieldBegin('isStatsCompliant', TType::I32, 10); $xfer += $output->writeI32($this->isStatsCompliant); $xfer += $output->writeFieldEnd(); } @@ -10465,6 +10534,10 @@ class SetPartitionsStatsRequest { * @var int */ public $txnId = -1; + /** + * @var int + */ + public $writeId = -1; /** * @var string */ @@ -10491,6 +10564,10 @@ class SetPartitionsStatsRequest { 'type' => TType::I64, ), 4 => array( + 'var' => 'writeId', + 'type' => TType::I64, + ), + 5 => array( 'var' => 'validWriteIdList', 'type' => TType::STRING, ), @@ -10506,6 +10583,9 @@ class SetPartitionsStatsRequest { if (isset($vals['txnId'])) { $this->txnId = $vals['txnId']; } + if (isset($vals['writeId'])) { + $this->writeId = $vals['writeId']; + } if (isset($vals['validWriteIdList'])) { $this->validWriteIdList = $vals['validWriteIdList']; } @@ -10564,6 +10644,13 @@ class SetPartitionsStatsRequest { } break; case 4: + if ($ftype == TType::I64) { + $xfer += $input->readI64($this->writeId); + } else { + $xfer += $input->skip($ftype); + } + break; + case 5: if ($ftype == TType::STRING) { $xfer += $input->readString($this->validWriteIdList); } else { @@ -10610,8 +10697,13 @@ class SetPartitionsStatsRequest { $xfer += $output->writeI64($this->txnId); $xfer += $output->writeFieldEnd(); } + if ($this->writeId !== null) { + $xfer += $output->writeFieldBegin('writeId', TType::I64, 4); + $xfer += $output->writeI64($this->writeId); + $xfer += $output->writeFieldEnd(); + } if ($this->validWriteIdList !== null) { - $xfer += $output->writeFieldBegin('validWriteIdList', TType::STRING, 4); + $xfer += $output->writeFieldBegin('validWriteIdList', TType::STRING, 5); $xfer += $output->writeString($this->validWriteIdList); $xfer += $output->writeFieldEnd(); } @@ -30961,6 +31053,10 @@ class AlterPartitionsRequest { * @var int */ public $txnId = -1; + /** + * @var int + */ + public $writeId = -1; /** * @var string */ @@ -30996,6 +31092,10 @@ class AlterPartitionsRequest { 'type' => TType::I64, ), 6 => array( + 'var' => 'writeId', + 'type' => TType::I64, + ), + 7 => array( 'var' => 'validWriteIdList', 'type' => TType::STRING, ), @@ -31017,6 +31117,9 @@ class AlterPartitionsRequest { if (isset($vals['txnId'])) { $this->txnId = $vals['txnId']; } + if (isset($vals['writeId'])) { + $this->writeId = $vals['writeId']; + } if (isset($vals['validWriteIdList'])) { $this->validWriteIdList = $vals['validWriteIdList']; } @@ -31090,6 +31193,13 @@ class AlterPartitionsRequest { } break; case 6: + if ($ftype == TType::I64) { + $xfer += $input->readI64($this->writeId); + } else { + $xfer += $input->skip($ftype); + } + break; + case 7: if ($ftype == TType::STRING) { $xfer += $input->readString($this->validWriteIdList); } else { @@ -31149,8 +31259,13 @@ class AlterPartitionsRequest { $xfer += $output->writeI64($this->txnId); $xfer += $output->writeFieldEnd(); } + if ($this->writeId !== null) { + $xfer += $output->writeFieldBegin('writeId', TType::I64, 6); + $xfer += $output->writeI64($this->writeId); + $xfer += $output->writeFieldEnd(); + } if ($this->validWriteIdList !== null) { - $xfer += $output->writeFieldBegin('validWriteIdList', TType::STRING, 6); + $xfer += $output->writeFieldBegin('validWriteIdList', TType::STRING, 7); $xfer += $output->writeString($this->validWriteIdList); $xfer += $output->writeFieldEnd(); } diff --git standalone-metastore/src/gen/thrift/gen-py/hive_metastore/ttypes.py standalone-metastore/src/gen/thrift/gen-py/hive_metastore/ttypes.py index ccca4e97ad..9b02951a0a 100644 --- standalone-metastore/src/gen/thrift/gen-py/hive_metastore/ttypes.py +++ standalone-metastore/src/gen/thrift/gen-py/hive_metastore/ttypes.py @@ -4568,6 +4568,7 @@ class Table: - catName - ownerType - txnId + - writeId - validWriteIdList - isStatsCompliant """ @@ -4593,11 +4594,12 @@ class Table: (17, TType.STRING, 'catName', None, None, ), # 17 (18, TType.I32, 'ownerType', None, 1, ), # 18 (19, TType.I64, 'txnId', None, -1, ), # 19 - (20, TType.STRING, 'validWriteIdList', None, None, ), # 20 - (21, TType.I32, 'isStatsCompliant', None, None, ), # 21 + (20, TType.I64, 'writeId', None, -1, ), # 20 + (21, TType.STRING, 'validWriteIdList', None, None, ), # 21 + (22, TType.I32, 'isStatsCompliant', None, None, ), # 22 ) - def __init__(self, tableName=None, dbName=None, owner=None, createTime=None, lastAccessTime=None, retention=None, sd=None, partitionKeys=None, parameters=None, viewOriginalText=None, viewExpandedText=None, tableType=None, privileges=None, temporary=thrift_spec[14][4], rewriteEnabled=None, creationMetadata=None, catName=None, ownerType=thrift_spec[18][4], txnId=thrift_spec[19][4], validWriteIdList=None, isStatsCompliant=None,): + def __init__(self, tableName=None, dbName=None, owner=None, createTime=None, lastAccessTime=None, retention=None, sd=None, partitionKeys=None, parameters=None, viewOriginalText=None, viewExpandedText=None, tableType=None, privileges=None, temporary=thrift_spec[14][4], rewriteEnabled=None, creationMetadata=None, catName=None, ownerType=thrift_spec[18][4], txnId=thrift_spec[19][4], writeId=thrift_spec[20][4], validWriteIdList=None, isStatsCompliant=None,): self.tableName = tableName self.dbName = dbName self.owner = owner @@ -4617,6 +4619,7 @@ def __init__(self, tableName=None, dbName=None, owner=None, createTime=None, las self.catName = catName self.ownerType = ownerType self.txnId = txnId + self.writeId = writeId self.validWriteIdList = validWriteIdList self.isStatsCompliant = isStatsCompliant @@ -4740,11 +4743,16 @@ def read(self, iprot): else: iprot.skip(ftype) elif fid == 20: + if ftype == TType.I64: + self.writeId = iprot.readI64() + else: + iprot.skip(ftype) + elif fid == 21: if ftype == TType.STRING: self.validWriteIdList = iprot.readString() else: iprot.skip(ftype) - elif fid == 21: + elif fid == 22: if ftype == TType.I32: self.isStatsCompliant = iprot.readI32() else: @@ -4842,12 +4850,16 @@ def write(self, oprot): oprot.writeFieldBegin('txnId', TType.I64, 19) oprot.writeI64(self.txnId) oprot.writeFieldEnd() + if self.writeId is not None: + oprot.writeFieldBegin('writeId', TType.I64, 20) + oprot.writeI64(self.writeId) + oprot.writeFieldEnd() if self.validWriteIdList is not None: - oprot.writeFieldBegin('validWriteIdList', TType.STRING, 20) + oprot.writeFieldBegin('validWriteIdList', TType.STRING, 21) oprot.writeString(self.validWriteIdList) oprot.writeFieldEnd() if self.isStatsCompliant is not None: - oprot.writeFieldBegin('isStatsCompliant', TType.I32, 21) + oprot.writeFieldBegin('isStatsCompliant', TType.I32, 22) oprot.writeI32(self.isStatsCompliant) oprot.writeFieldEnd() oprot.writeFieldStop() @@ -4878,6 +4890,7 @@ def __hash__(self): value = (value * 31) ^ hash(self.catName) value = (value * 31) ^ hash(self.ownerType) value = (value * 31) ^ hash(self.txnId) + value = (value * 31) ^ hash(self.writeId) value = (value * 31) ^ hash(self.validWriteIdList) value = (value * 31) ^ hash(self.isStatsCompliant) return value @@ -4906,6 +4919,7 @@ class Partition: - privileges - catName - txnId + - writeId - validWriteIdList - isStatsCompliant """ @@ -4922,11 +4936,12 @@ class Partition: (8, TType.STRUCT, 'privileges', (PrincipalPrivilegeSet, PrincipalPrivilegeSet.thrift_spec), None, ), # 8 (9, TType.STRING, 'catName', None, None, ), # 9 (10, TType.I64, 'txnId', None, -1, ), # 10 - (11, TType.STRING, 'validWriteIdList', None, None, ), # 11 - (12, TType.I32, 'isStatsCompliant', None, None, ), # 12 + (11, TType.I64, 'writeId', None, -1, ), # 11 + (12, TType.STRING, 'validWriteIdList', None, None, ), # 12 + (13, TType.I32, 'isStatsCompliant', None, None, ), # 13 ) - def __init__(self, values=None, dbName=None, tableName=None, createTime=None, lastAccessTime=None, sd=None, parameters=None, privileges=None, catName=None, txnId=thrift_spec[10][4], validWriteIdList=None, isStatsCompliant=None,): + def __init__(self, values=None, dbName=None, tableName=None, createTime=None, lastAccessTime=None, sd=None, parameters=None, privileges=None, catName=None, txnId=thrift_spec[10][4], writeId=thrift_spec[11][4], validWriteIdList=None, isStatsCompliant=None,): self.values = values self.dbName = dbName self.tableName = tableName @@ -4937,6 +4952,7 @@ def __init__(self, values=None, dbName=None, tableName=None, createTime=None, la self.privileges = privileges self.catName = catName self.txnId = txnId + self.writeId = writeId self.validWriteIdList = validWriteIdList self.isStatsCompliant = isStatsCompliant @@ -5013,11 +5029,16 @@ def read(self, iprot): else: iprot.skip(ftype) elif fid == 11: + if ftype == TType.I64: + self.writeId = iprot.readI64() + else: + iprot.skip(ftype) + elif fid == 12: if ftype == TType.STRING: self.validWriteIdList = iprot.readString() else: iprot.skip(ftype) - elif fid == 12: + elif fid == 13: if ftype == TType.I32: self.isStatsCompliant = iprot.readI32() else: @@ -5079,12 +5100,16 @@ def write(self, oprot): oprot.writeFieldBegin('txnId', TType.I64, 10) oprot.writeI64(self.txnId) oprot.writeFieldEnd() + if self.writeId is not None: + oprot.writeFieldBegin('writeId', TType.I64, 11) + oprot.writeI64(self.writeId) + oprot.writeFieldEnd() if self.validWriteIdList is not None: - oprot.writeFieldBegin('validWriteIdList', TType.STRING, 11) + oprot.writeFieldBegin('validWriteIdList', TType.STRING, 12) oprot.writeString(self.validWriteIdList) oprot.writeFieldEnd() if self.isStatsCompliant is not None: - oprot.writeFieldBegin('isStatsCompliant', TType.I32, 12) + oprot.writeFieldBegin('isStatsCompliant', TType.I32, 13) oprot.writeI32(self.isStatsCompliant) oprot.writeFieldEnd() oprot.writeFieldStop() @@ -5106,6 +5131,7 @@ def __hash__(self): value = (value * 31) ^ hash(self.privileges) value = (value * 31) ^ hash(self.catName) value = (value * 31) ^ hash(self.txnId) + value = (value * 31) ^ hash(self.writeId) value = (value * 31) ^ hash(self.validWriteIdList) value = (value * 31) ^ hash(self.isStatsCompliant) return value @@ -5442,6 +5468,7 @@ class PartitionSpec: - partitionList - catName - txnId + - writeId - validWriteIdList - isStatsCompliant """ @@ -5455,11 +5482,12 @@ class PartitionSpec: (5, TType.STRUCT, 'partitionList', (PartitionListComposingSpec, PartitionListComposingSpec.thrift_spec), None, ), # 5 (6, TType.STRING, 'catName', None, None, ), # 6 (7, TType.I64, 'txnId', None, -1, ), # 7 - (8, TType.STRING, 'validWriteIdList', None, None, ), # 8 - (9, TType.I32, 'isStatsCompliant', None, None, ), # 9 + (8, TType.I64, 'writeId', None, -1, ), # 8 + (9, TType.STRING, 'validWriteIdList', None, None, ), # 9 + (10, TType.I32, 'isStatsCompliant', None, None, ), # 10 ) - def __init__(self, dbName=None, tableName=None, rootPath=None, sharedSDPartitionSpec=None, partitionList=None, catName=None, txnId=thrift_spec[7][4], validWriteIdList=None, isStatsCompliant=None,): + def __init__(self, dbName=None, tableName=None, rootPath=None, sharedSDPartitionSpec=None, partitionList=None, catName=None, txnId=thrift_spec[7][4], writeId=thrift_spec[8][4], validWriteIdList=None, isStatsCompliant=None,): self.dbName = dbName self.tableName = tableName self.rootPath = rootPath @@ -5467,6 +5495,7 @@ def __init__(self, dbName=None, tableName=None, rootPath=None, sharedSDPartition self.partitionList = partitionList self.catName = catName self.txnId = txnId + self.writeId = writeId self.validWriteIdList = validWriteIdList self.isStatsCompliant = isStatsCompliant @@ -5517,11 +5546,16 @@ def read(self, iprot): else: iprot.skip(ftype) elif fid == 8: + if ftype == TType.I64: + self.writeId = iprot.readI64() + else: + iprot.skip(ftype) + elif fid == 9: if ftype == TType.STRING: self.validWriteIdList = iprot.readString() else: iprot.skip(ftype) - elif fid == 9: + elif fid == 10: if ftype == TType.I32: self.isStatsCompliant = iprot.readI32() else: @@ -5564,12 +5598,16 @@ def write(self, oprot): oprot.writeFieldBegin('txnId', TType.I64, 7) oprot.writeI64(self.txnId) oprot.writeFieldEnd() + if self.writeId is not None: + oprot.writeFieldBegin('writeId', TType.I64, 8) + oprot.writeI64(self.writeId) + oprot.writeFieldEnd() if self.validWriteIdList is not None: - oprot.writeFieldBegin('validWriteIdList', TType.STRING, 8) + oprot.writeFieldBegin('validWriteIdList', TType.STRING, 9) oprot.writeString(self.validWriteIdList) oprot.writeFieldEnd() if self.isStatsCompliant is not None: - oprot.writeFieldBegin('isStatsCompliant', TType.I32, 9) + oprot.writeFieldBegin('isStatsCompliant', TType.I32, 10) oprot.writeI32(self.isStatsCompliant) oprot.writeFieldEnd() oprot.writeFieldStop() @@ -5588,6 +5626,7 @@ def __hash__(self): value = (value * 31) ^ hash(self.partitionList) value = (value * 31) ^ hash(self.catName) value = (value * 31) ^ hash(self.txnId) + value = (value * 31) ^ hash(self.writeId) value = (value * 31) ^ hash(self.validWriteIdList) value = (value * 31) ^ hash(self.isStatsCompliant) return value @@ -7211,6 +7250,7 @@ class SetPartitionsStatsRequest: - colStats - needMerge - txnId + - writeId - validWriteIdList """ @@ -7219,13 +7259,15 @@ class SetPartitionsStatsRequest: (1, TType.LIST, 'colStats', (TType.STRUCT,(ColumnStatistics, ColumnStatistics.thrift_spec)), None, ), # 1 (2, TType.BOOL, 'needMerge', None, None, ), # 2 (3, TType.I64, 'txnId', None, -1, ), # 3 - (4, TType.STRING, 'validWriteIdList', None, None, ), # 4 + (4, TType.I64, 'writeId', None, -1, ), # 4 + (5, TType.STRING, 'validWriteIdList', None, None, ), # 5 ) - def __init__(self, colStats=None, needMerge=None, txnId=thrift_spec[3][4], validWriteIdList=None,): + def __init__(self, colStats=None, needMerge=None, txnId=thrift_spec[3][4], writeId=thrift_spec[4][4], validWriteIdList=None,): self.colStats = colStats self.needMerge = needMerge self.txnId = txnId + self.writeId = writeId self.validWriteIdList = validWriteIdList def read(self, iprot): @@ -7259,6 +7301,11 @@ def read(self, iprot): else: iprot.skip(ftype) elif fid == 4: + if ftype == TType.I64: + self.writeId = iprot.readI64() + else: + iprot.skip(ftype) + elif fid == 5: if ftype == TType.STRING: self.validWriteIdList = iprot.readString() else: @@ -7288,8 +7335,12 @@ def write(self, oprot): oprot.writeFieldBegin('txnId', TType.I64, 3) oprot.writeI64(self.txnId) oprot.writeFieldEnd() + if self.writeId is not None: + oprot.writeFieldBegin('writeId', TType.I64, 4) + oprot.writeI64(self.writeId) + oprot.writeFieldEnd() if self.validWriteIdList is not None: - oprot.writeFieldBegin('validWriteIdList', TType.STRING, 4) + oprot.writeFieldBegin('validWriteIdList', TType.STRING, 5) oprot.writeString(self.validWriteIdList) oprot.writeFieldEnd() oprot.writeFieldStop() @@ -7306,6 +7357,7 @@ def __hash__(self): value = (value * 31) ^ hash(self.colStats) value = (value * 31) ^ hash(self.needMerge) value = (value * 31) ^ hash(self.txnId) + value = (value * 31) ^ hash(self.writeId) value = (value * 31) ^ hash(self.validWriteIdList) return value @@ -22042,6 +22094,7 @@ class AlterPartitionsRequest: - partitions - environmentContext - txnId + - writeId - validWriteIdList """ @@ -22052,15 +22105,17 @@ class AlterPartitionsRequest: (3, TType.LIST, 'partitions', (TType.STRUCT,(Partition, Partition.thrift_spec)), None, ), # 3 (4, TType.STRUCT, 'environmentContext', (EnvironmentContext, EnvironmentContext.thrift_spec), None, ), # 4 (5, TType.I64, 'txnId', None, -1, ), # 5 - (6, TType.STRING, 'validWriteIdList', None, None, ), # 6 + (6, TType.I64, 'writeId', None, -1, ), # 6 + (7, TType.STRING, 'validWriteIdList', None, None, ), # 7 ) - def __init__(self, dbName=None, tableName=None, partitions=None, environmentContext=None, txnId=thrift_spec[5][4], validWriteIdList=None,): + def __init__(self, dbName=None, tableName=None, partitions=None, environmentContext=None, txnId=thrift_spec[5][4], writeId=thrift_spec[6][4], validWriteIdList=None,): self.dbName = dbName self.tableName = tableName self.partitions = partitions self.environmentContext = environmentContext self.txnId = txnId + self.writeId = writeId self.validWriteIdList = validWriteIdList def read(self, iprot): @@ -22105,6 +22160,11 @@ def read(self, iprot): else: iprot.skip(ftype) elif fid == 6: + if ftype == TType.I64: + self.writeId = iprot.readI64() + else: + iprot.skip(ftype) + elif fid == 7: if ftype == TType.STRING: self.validWriteIdList = iprot.readString() else: @@ -22142,8 +22202,12 @@ def write(self, oprot): oprot.writeFieldBegin('txnId', TType.I64, 5) oprot.writeI64(self.txnId) oprot.writeFieldEnd() + if self.writeId is not None: + oprot.writeFieldBegin('writeId', TType.I64, 6) + oprot.writeI64(self.writeId) + oprot.writeFieldEnd() if self.validWriteIdList is not None: - oprot.writeFieldBegin('validWriteIdList', TType.STRING, 6) + oprot.writeFieldBegin('validWriteIdList', TType.STRING, 7) oprot.writeString(self.validWriteIdList) oprot.writeFieldEnd() oprot.writeFieldStop() @@ -22168,6 +22232,7 @@ def __hash__(self): value = (value * 31) ^ hash(self.partitions) value = (value * 31) ^ hash(self.environmentContext) value = (value * 31) ^ hash(self.txnId) + value = (value * 31) ^ hash(self.writeId) value = (value * 31) ^ hash(self.validWriteIdList) return value diff --git standalone-metastore/src/gen/thrift/gen-rb/hive_metastore_types.rb standalone-metastore/src/gen/thrift/gen-rb/hive_metastore_types.rb index 7b5132c7f1..d58a60e92d 100644 --- standalone-metastore/src/gen/thrift/gen-rb/hive_metastore_types.rb +++ standalone-metastore/src/gen/thrift/gen-rb/hive_metastore_types.rb @@ -1071,8 +1071,9 @@ class Table CATNAME = 17 OWNERTYPE = 18 TXNID = 19 - VALIDWRITEIDLIST = 20 - ISSTATSCOMPLIANT = 21 + WRITEID = 20 + VALIDWRITEIDLIST = 21 + ISSTATSCOMPLIANT = 22 FIELDS = { TABLENAME => {:type => ::Thrift::Types::STRING, :name => 'tableName'}, @@ -1094,6 +1095,7 @@ class Table CATNAME => {:type => ::Thrift::Types::STRING, :name => 'catName', :optional => true}, OWNERTYPE => {:type => ::Thrift::Types::I32, :name => 'ownerType', :default => 1, :optional => true, :enum_class => ::PrincipalType}, TXNID => {:type => ::Thrift::Types::I64, :name => 'txnId', :default => -1, :optional => true}, + WRITEID => {:type => ::Thrift::Types::I64, :name => 'writeId', :default => -1, :optional => true}, VALIDWRITEIDLIST => {:type => ::Thrift::Types::STRING, :name => 'validWriteIdList', :optional => true}, ISSTATSCOMPLIANT => {:type => ::Thrift::Types::I32, :name => 'isStatsCompliant', :optional => true, :enum_class => ::IsolationLevelCompliance} } @@ -1124,8 +1126,9 @@ class Partition PRIVILEGES = 8 CATNAME = 9 TXNID = 10 - VALIDWRITEIDLIST = 11 - ISSTATSCOMPLIANT = 12 + WRITEID = 11 + VALIDWRITEIDLIST = 12 + ISSTATSCOMPLIANT = 13 FIELDS = { VALUES => {:type => ::Thrift::Types::LIST, :name => 'values', :element => {:type => ::Thrift::Types::STRING}}, @@ -1138,6 +1141,7 @@ class Partition PRIVILEGES => {:type => ::Thrift::Types::STRUCT, :name => 'privileges', :class => ::PrincipalPrivilegeSet, :optional => true}, CATNAME => {:type => ::Thrift::Types::STRING, :name => 'catName', :optional => true}, TXNID => {:type => ::Thrift::Types::I64, :name => 'txnId', :default => -1, :optional => true}, + WRITEID => {:type => ::Thrift::Types::I64, :name => 'writeId', :default => -1, :optional => true}, VALIDWRITEIDLIST => {:type => ::Thrift::Types::STRING, :name => 'validWriteIdList', :optional => true}, ISSTATSCOMPLIANT => {:type => ::Thrift::Types::I32, :name => 'isStatsCompliant', :optional => true, :enum_class => ::IsolationLevelCompliance} } @@ -1222,8 +1226,9 @@ class PartitionSpec PARTITIONLIST = 5 CATNAME = 6 TXNID = 7 - VALIDWRITEIDLIST = 8 - ISSTATSCOMPLIANT = 9 + WRITEID = 8 + VALIDWRITEIDLIST = 9 + ISSTATSCOMPLIANT = 10 FIELDS = { DBNAME => {:type => ::Thrift::Types::STRING, :name => 'dbName'}, @@ -1233,6 +1238,7 @@ class PartitionSpec PARTITIONLIST => {:type => ::Thrift::Types::STRUCT, :name => 'partitionList', :class => ::PartitionListComposingSpec, :optional => true}, CATNAME => {:type => ::Thrift::Types::STRING, :name => 'catName', :optional => true}, TXNID => {:type => ::Thrift::Types::I64, :name => 'txnId', :default => -1, :optional => true}, + WRITEID => {:type => ::Thrift::Types::I64, :name => 'writeId', :default => -1, :optional => true}, VALIDWRITEIDLIST => {:type => ::Thrift::Types::STRING, :name => 'validWriteIdList', :optional => true}, ISSTATSCOMPLIANT => {:type => ::Thrift::Types::I32, :name => 'isStatsCompliant', :optional => true, :enum_class => ::IsolationLevelCompliance} } @@ -1637,12 +1643,14 @@ class SetPartitionsStatsRequest COLSTATS = 1 NEEDMERGE = 2 TXNID = 3 - VALIDWRITEIDLIST = 4 + WRITEID = 4 + VALIDWRITEIDLIST = 5 FIELDS = { COLSTATS => {:type => ::Thrift::Types::LIST, :name => 'colStats', :element => {:type => ::Thrift::Types::STRUCT, :class => ::ColumnStatistics}}, NEEDMERGE => {:type => ::Thrift::Types::BOOL, :name => 'needMerge', :optional => true}, TXNID => {:type => ::Thrift::Types::I64, :name => 'txnId', :default => -1, :optional => true}, + WRITEID => {:type => ::Thrift::Types::I64, :name => 'writeId', :default => -1, :optional => true}, VALIDWRITEIDLIST => {:type => ::Thrift::Types::STRING, :name => 'validWriteIdList', :optional => true} } @@ -5019,7 +5027,8 @@ class AlterPartitionsRequest PARTITIONS = 3 ENVIRONMENTCONTEXT = 4 TXNID = 5 - VALIDWRITEIDLIST = 6 + WRITEID = 6 + VALIDWRITEIDLIST = 7 FIELDS = { DBNAME => {:type => ::Thrift::Types::STRING, :name => 'dbName'}, @@ -5027,6 +5036,7 @@ class AlterPartitionsRequest PARTITIONS => {:type => ::Thrift::Types::LIST, :name => 'partitions', :element => {:type => ::Thrift::Types::STRUCT, :class => ::Partition}}, ENVIRONMENTCONTEXT => {:type => ::Thrift::Types::STRUCT, :name => 'environmentContext', :class => ::EnvironmentContext}, TXNID => {:type => ::Thrift::Types::I64, :name => 'txnId', :default => -1, :optional => true}, + WRITEID => {:type => ::Thrift::Types::I64, :name => 'writeId', :default => -1, :optional => true}, VALIDWRITEIDLIST => {:type => ::Thrift::Types::STRING, :name => 'validWriteIdList', :optional => true} } diff --git standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/AlterHandler.java standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/AlterHandler.java index 010870dcf1..e7cf07ff09 100644 --- standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/AlterHandler.java +++ standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/AlterHandler.java @@ -197,6 +197,7 @@ Partition alterPartition(final RawStore msdb, Warehouse wh, final String catName */ List alterPartitions(final RawStore msdb, Warehouse wh, final String catName, final String dbname, final String name, final List new_parts, - EnvironmentContext environmentContext, long txnId, String writeIdList, IHMSHandler handler) + EnvironmentContext environmentContext, long txnId, String writeIdList, long writeId, + IHMSHandler handler) throws InvalidOperationException, InvalidObjectException, AlreadyExistsException, MetaException; } \ No newline at end of file diff --git standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/HiveAlterHandler.java standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/HiveAlterHandler.java index 5b70307920..e532b2eba0 100644 --- standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/HiveAlterHandler.java +++ standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/HiveAlterHandler.java @@ -297,7 +297,7 @@ public void alterTable(RawStore msdb, Warehouse wh, String catName, String dbnam partValues.add(part.getValues()); } msdb.alterPartitions( - catName, newDbName, newTblName, partValues, partBatch, -1, null); + catName, newDbName, newTblName, partValues, partBatch, -1, null, -1); } } @@ -659,7 +659,7 @@ public Partition alterPartition(final RawStore msdb, Warehouse wh, final String EnvironmentContext environmentContext) throws InvalidOperationException, InvalidObjectException, AlreadyExistsException, MetaException { return alterPartitions(msdb, wh, DEFAULT_CATALOG_NAME, dbname, name, new_parts, - environmentContext, -1, null, null); + environmentContext, -1, null, -1, null); } @Override @@ -667,7 +667,8 @@ public Partition alterPartition(final RawStore msdb, Warehouse wh, final String final String dbname, final String name, final List new_parts, EnvironmentContext environmentContext, - long txnId, String writeIdList, IHMSHandler handler) + long txnId, String writeIdList, long writeId, + IHMSHandler handler) throws InvalidOperationException, InvalidObjectException, AlreadyExistsException, MetaException { List oldParts = new ArrayList<>(); List> partValsList = new ArrayList<>(); @@ -715,7 +716,7 @@ public Partition alterPartition(final RawStore msdb, Warehouse wh, final String } } - msdb.alterPartitions(catName, dbname, name, partValsList, new_parts, txnId, writeIdList); + msdb.alterPartitions(catName, dbname, name, partValsList, new_parts, txnId, writeIdList, writeId); Iterator oldPartsIt = oldParts.iterator(); for (Partition newPart : new_parts) { Partition oldPart; diff --git standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/HiveMetaStore.java standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/HiveMetaStore.java index 706d831435..53db447d24 100644 --- standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/HiveMetaStore.java +++ standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/HiveMetaStore.java @@ -4868,7 +4868,7 @@ public void alter_partitions(final String db_name, final String tbl_name, final List new_parts) throws TException { alter_partitions_with_environment_context( - db_name, tbl_name, new_parts, null, -1, null); + db_name, tbl_name, new_parts, null, -1, null, -1); } @Override @@ -4878,13 +4878,14 @@ public AlterPartitionsResponse alter_partitions_with_environment_context( alter_partitions_with_environment_context( req.getDbName(), req.getTableName(), req.getPartitions(), req.getEnvironmentContext(), req.isSetTxnId() ? req.getTxnId() : -1, - req.isSetValidWriteIdList() ? req.getValidWriteIdList() : null); + req.isSetValidWriteIdList() ? req.getValidWriteIdList() : null, + req.isSetWriteId() ? req.getWriteId() : -1); return new AlterPartitionsResponse(); } private void alter_partitions_with_environment_context(final String db_name, final String tbl_name, final List new_parts, EnvironmentContext environmentContext, - long txnId, String writeIdList) + long txnId, String writeIdList, long writeId) throws TException { String[] parsedDbName = parseDbName(db_name, conf); @@ -4908,7 +4909,7 @@ private void alter_partitions_with_environment_context(final String db_name, fin firePreEvent(new PreAlterPartitionEvent(parsedDbName[DB_NAME], tbl_name, null, tmpPart, this)); } oldParts = alterHandler.alterPartitions(getMS(), wh, parsedDbName[CAT_NAME], - parsedDbName[DB_NAME], tbl_name, new_parts, environmentContext, txnId, writeIdList, this); + parsedDbName[DB_NAME], tbl_name, new_parts, environmentContext, txnId, writeIdList, writeId, this); Iterator olditr = oldParts.iterator(); // Only fetch the table if we have a listener that needs it. Table table = null; diff --git standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/HiveMetaStoreClient.java standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/HiveMetaStoreClient.java index 626e1039af..8aab46bac6 100644 --- standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/HiveMetaStoreClient.java +++ standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/HiveMetaStoreClient.java @@ -1886,25 +1886,25 @@ public void alter_partition(String catName, String dbName, String tblName, Parti public void alter_partitions(String dbName, String tblName, List newParts) throws TException { alter_partitions( - getDefaultCatalog(conf), dbName, tblName, newParts, null, -1, null); + getDefaultCatalog(conf), dbName, tblName, newParts, null, -1, null, -1); } @Override public void alter_partitions(String dbName, String tblName, List newParts, EnvironmentContext environmentContext) throws TException { alter_partitions( - getDefaultCatalog(conf), dbName, tblName, newParts, environmentContext, -1, null); + getDefaultCatalog(conf), dbName, tblName, newParts, environmentContext, -1, null, -1); } @Override public void alter_partitions(String dbName, String tblName, List newParts, EnvironmentContext environmentContext, - long txnId, String writeIdList) + long txnId, String writeIdList, long writeId) throws InvalidOperationException, MetaException, TException { //client.alter_partition_with_environment_context(getDefaultCatalog(conf), // dbName, tblName, newParts, environmentContext); alter_partitions(getDefaultCatalog(conf), - dbName, tblName, newParts, environmentContext, txnId, writeIdList); + dbName, tblName, newParts, environmentContext, txnId, writeIdList, writeId); } @@ -1912,7 +1912,7 @@ public void alter_partitions(String dbName, String tblName, List newP public void alter_partitions(String catName, String dbName, String tblName, List newParts, EnvironmentContext environmentContext, - long txnId, String writeIdList) throws TException { + long txnId, String writeIdList, long writeId) throws TException { AlterPartitionsRequest req = new AlterPartitionsRequest(); req.setDbName(prependCatalogToDbName(catName, dbName, conf)); req.setTableName(tblName); @@ -1920,6 +1920,7 @@ public void alter_partitions(String catName, String dbName, String tblName, req.setEnvironmentContext(environmentContext); req.setTxnId(txnId); req.setValidWriteIdList(writeIdList); + req.setWriteId(writeId); client.alter_partitions_with_environment_context(req); } diff --git standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/IMetaStoreClient.java standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/IMetaStoreClient.java index c4cd8b4161..914f9bd8fb 100644 --- standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/IMetaStoreClient.java +++ standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/IMetaStoreClient.java @@ -2133,7 +2133,7 @@ void alter_partitions(String dbName, String tblName, List newParts, void alter_partitions(String dbName, String tblName, List newParts, EnvironmentContext environmentContext, - long txnId, String writeIdList) + long txnId, String writeIdList, long writeId) throws InvalidOperationException, MetaException, TException; /** @@ -2155,7 +2155,7 @@ void alter_partitions(String dbName, String tblName, List newParts, default void alter_partitions(String catName, String dbName, String tblName, List newParts) throws InvalidOperationException, MetaException, TException { - alter_partitions(catName, dbName, tblName, newParts, null,-1, null); + alter_partitions(catName, dbName, tblName, newParts, null,-1, null, -1); } /** @@ -2177,7 +2177,7 @@ default void alter_partitions(String catName, String dbName, String tblName, */ void alter_partitions(String catName, String dbName, String tblName, List newParts, EnvironmentContext environmentContext, - long txnId, String writeIdList) + long txnId, String writeIdList, long writeId) throws InvalidOperationException, MetaException, TException; /** diff --git standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/ObjectStore.java standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/ObjectStore.java index 9266879ad0..3a8956c89d 100644 --- standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/ObjectStore.java +++ standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/ObjectStore.java @@ -1989,6 +1989,7 @@ private MTable convertToMTable(Table tbl) throws InvalidObjectException, if (TxnUtils.isTransactionalTable(tbl)) { mtable.setTxnId(tbl.getTxnId()); mtable.setWriteIdList(tbl.getValidWriteIdList()); + mtable.setWriteId(tbl.getWriteId()); } return mtable; } @@ -2601,6 +2602,7 @@ private MPartition convertToMPart(Partition part, MTable mt, boolean useTableCD) if (TxnUtils.isTransactionalTable(mt.getParameters())) { mpart.setTxnId(part.getTxnId()); mpart.setWriteIdList(part.getValidWriteIdList()); + mpart.setWriteId(part.getWriteId()); } return mpart; } @@ -4141,12 +4143,13 @@ public void alterTable(String catName, String dbname, String name, Table newTabl TxnUtils.isTransactionalTable(newTable)) { // Check concurrent INSERT case and set false to the flag. if (!isCurrentStatsValidForTheQuery(oldt, newt.getTxnId(), newt.getWriteIdList(), - -1, true)) { + newt.getWriteId(), true)) { StatsSetupConst.setBasicStatsState(oldt.getParameters(), StatsSetupConst.FALSE); LOG.info("Removed COLUMN_STATS_ACCURATE from the parameters of the table " + dbname + "." + name + ". will be made persistent."); } oldt.setTxnId(newTable.getTxnId()); + oldt.setWriteId(newTable.getWriteId()); oldt.setWriteIdList(newTable.getValidWriteIdList()); } @@ -4231,13 +4234,14 @@ private MColumnDescriptor alterPartitionNoTxn(String catName, String dbname, Str TxnUtils.isTransactionalTable(table.getParameters())) { // Check concurrent INSERT case and set false to the flag. if (!isCurrentStatsValidForTheQuery(oldp, newp.getTxnId(), newp.getWriteIdList(), - -1, true)) { + newp.getWriteId(), true)) { StatsSetupConst.setBasicStatsState(oldp.getParameters(), StatsSetupConst.FALSE); LOG.info("Removed COLUMN_STATS_ACCURATE from the parameters of the partition " + dbname + "." + name + "." + oldp.getPartitionName() + " will be made persistent."); } oldp.setTxnId(newPart.getTxnId()); oldp.setWriteIdList(newPart.getValidWriteIdList()); + oldp.setWriteId(newPart.getWriteId()); } return oldCD; } @@ -4271,7 +4275,7 @@ public void alterPartition(String catName, String dbname, String name, List> part_vals, List newParts, - long txnId, String writeIdList) + long txnId, String writeIdList, long writeId) throws InvalidObjectException, MetaException { boolean success = false; Exception e = null; @@ -4284,6 +4288,7 @@ public void alterPartitions(String catName, String dbname, String name, if (txnId > 0) { tmpPart.setTxnId(txnId); tmpPart.setValidWriteIdList(writeIdList); + tmpPart.setWriteId(writeId); } MColumnDescriptor oldCd = alterPartitionNoTxn(catName, dbname, name, tmpPartVals, tmpPart); if (oldCd != null) { @@ -12242,16 +12247,18 @@ public int deleteRuntimeStats(int maxRetainSecs) throws MetaException { * ~ COLUMN_STATE_ACCURATE(CSA) state is true * ~ Isolation-level (snapshot) compliant with the query * @param tbl MTable of the stats entity - * @param txnId transaction id of the query + * @param queryTxnId transaction id of the query * @param queryValidWriteIdList valid writeId list of the query + * @param queryWriteId writeId of the query * @Precondition "tbl" should be retrieved from the TBLS table. */ private boolean isCurrentStatsValidForTheQuery( - MTable tbl, long txnId, String queryValidWriteIdList, - long statsWriteId, boolean checkConcurrentWrites) + MTable tbl, long queryTxnId, String queryValidWriteIdList, + long queryWriteId, boolean checkConcurrentWrites) throws MetaException { - return isCurrentStatsValidForTheQuery(tbl.getTxnId(), tbl.getParameters(), tbl.getWriteIdList(), - txnId, queryValidWriteIdList, statsWriteId, checkConcurrentWrites); + return isCurrentStatsValidForTheQuery(tbl.getParameters(), tbl.getTxnId(), + tbl.getWriteIdList(),tbl.getWriteId(), + queryTxnId, queryValidWriteIdList, queryWriteId, checkConcurrentWrites); } /** @@ -12264,23 +12271,35 @@ private boolean isCurrentStatsValidForTheQuery( * ~ COLUMN_STATE_ACCURATE(CSA) state is true * ~ Isolation-level (snapshot) compliant with the query * @param part MPartition of the stats entity - * @param txnId transaction id of the query + * @param queryTxnId transaction id of the query * @param queryValidWriteIdList valid writeId list of the query + * @param queryWriteId writeId of the query * @Precondition "part" should be retrieved from the PARTITIONS table. */ private boolean isCurrentStatsValidForTheQuery( - MPartition part, long txnId, String queryValidWriteIdList, - long statsWriteId, boolean checkConcurrentWrites) + MPartition part, long queryTxnId, String queryValidWriteIdList, + long queryWriteId, boolean checkConcurrentWrites) throws MetaException { - return isCurrentStatsValidForTheQuery(part.getTxnId(), part.getParameters(), part.getWriteIdList(), - txnId, queryValidWriteIdList, statsWriteId, checkConcurrentWrites); + return isCurrentStatsValidForTheQuery(part.getParameters(), part.getTxnId(), + part.getWriteIdList(), part.getWriteId(), + queryTxnId, queryValidWriteIdList, queryWriteId, checkConcurrentWrites); } private boolean isCurrentStatsValidForTheQuery( - long statsTxnId, Map statsParams, String statsWriteIdList, + Map statsParams, long statsTxnId, + String statsWriteIdList, long statsWriteId, long queryTxnId, String queryValidWriteIdList, - long statsWriteId, boolean checkConcurrentWrites) + long queryWriteId, boolean checkConcurrentWrites) throws MetaException { + // If checkConcurrentWrites is true and + // statsWriteId or queryWriteId is -1 or 0, + // return true since -1 or 0 is not a valid writeId. + if (checkConcurrentWrites) { + if (statsWriteId < 1 || queryWriteId < 1) { + return true; + } + } + // if statsWriteIdList is null, // return true since the stats does not seem to be transactional. if (statsWriteIdList == null) { @@ -12319,6 +12338,6 @@ private boolean isCurrentStatsValidForTheQuery( ValidWriteIdList list4TheQuery = new ValidReaderWriteIdList(queryValidWriteIdList); return !checkConcurrentWrites ? TxnIdUtils.checkEquivalentWriteIds(list4Stats, list4TheQuery) : - !TxnIdUtils.areTheseConcurrentWrites(list4Stats, list4TheQuery, statsWriteId); + !TxnIdUtils.areTheseConcurrentWrites(list4Stats, statsWriteId, list4TheQuery, queryWriteId); } } diff --git standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/RawStore.java standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/RawStore.java index 8cc9d2c586..4800c8af2c 100644 --- standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/RawStore.java +++ standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/RawStore.java @@ -516,12 +516,13 @@ void alterPartition(String catName, String db_name, String tbl_name, List> part_vals_list, List new_parts, - long txnId, String writeIdList) + long txnId, String writeIdList, long writeId) throws InvalidObjectException, MetaException; /** diff --git standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/cache/CachedStore.java standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/cache/CachedStore.java index e4894fa12b..42cf485cfd 100644 --- standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/cache/CachedStore.java +++ standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/cache/CachedStore.java @@ -1172,9 +1172,9 @@ public void alterPartition(String catName, String dbName, String tblName, List> partValsList, List newParts, - long txnId, String writeIdList) + long txnId, String writeIdList, long writeId) throws InvalidObjectException, MetaException { - rawStore.alterPartitions(catName, dbName, tblName, partValsList, newParts, txnId, writeIdList); + rawStore.alterPartitions(catName, dbName, tblName, partValsList, newParts, txnId, writeIdList, writeId); catName = normalizeIdentifier(catName); dbName = normalizeIdentifier(dbName); tblName = normalizeIdentifier(tblName); diff --git standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/model/MPartition.java standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/model/MPartition.java index 56f904835e..b5218f36a5 100644 --- standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/model/MPartition.java +++ standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/model/MPartition.java @@ -31,6 +31,7 @@ private MStorageDescriptor sd; private Map parameters; private long txnId; + private long writeId; private String writeIdList; public MPartition() {} @@ -161,6 +162,14 @@ public void setTxnId(long txnId) { this.txnId = txnId; } + public long getWriteId() { + return writeId; + } + + public void setWriteId(long writeId) { + this.writeId = writeId; + } + public String getWriteIdList() { return writeIdList; } diff --git standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/model/MTable.java standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/model/MTable.java index 7ef1ef65d5..269325175d 100644 --- standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/model/MTable.java +++ standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/model/MTable.java @@ -39,6 +39,7 @@ private boolean rewriteEnabled; private String tableType; private long txnId; + private long writeId; private String writeIdList; public MTable() {} @@ -282,6 +283,14 @@ public void setTxnId(long txnId) { this.txnId = txnId; } + public long getWriteId() { + return writeId; + } + + public void setWriteId(long writeId) { + this.writeId = writeId; + } + public String getWriteIdList() { return writeIdList; } diff --git standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/txn/TxnDbUtil.java standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/txn/TxnDbUtil.java index bfbd928b0b..7f1c89b0c9 100644 --- standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/txn/TxnDbUtil.java +++ standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/txn/TxnDbUtil.java @@ -204,7 +204,7 @@ public static void prepDb(Configuration conf) throws Exception { " \"SD_ID\" BIGINT, \"TBL_NAME\" VARCHAR(256), \"TBL_TYPE\" VARCHAR(128), " + " \"VIEW_EXPANDED_TEXT\" LONG VARCHAR, \"VIEW_ORIGINAL_TEXT\" LONG VARCHAR, " + " \"IS_REWRITE_ENABLED\" CHAR(1) NOT NULL DEFAULT \'N\', \"TXN_ID\" BIGINT DEFAULT 0, " + - " \"WRITEID_LIST\" CLOB, " + + " \"WRITE_ID\" BIGINT DEFAULT 0, \"WRITEID_LIST\" CLOB, " + " PRIMARY KEY (TBL_ID))" ); } catch (SQLException e) { @@ -220,7 +220,7 @@ public static void prepDb(Configuration conf) throws Exception { " \"PART_ID\" BIGINT NOT NULL, \"CREATE_TIME\" INTEGER NOT NULL, " + " \"LAST_ACCESS_TIME\" INTEGER NOT NULL, \"PART_NAME\" VARCHAR(767), " + " \"SD_ID\" BIGINT, \"TBL_ID\" BIGINT, \"TXN_ID\" BIGINT DEFAULT 0, " + - " \"WRITEID_LIST\" CLOB, " + + " \"WRITE_ID\" BIGINT DEFAULT 0, \"WRITEID_LIST\" CLOB, " + " PRIMARY KEY (PART_ID))" ); } catch (SQLException e) { diff --git standalone-metastore/src/main/resources/package.jdo standalone-metastore/src/main/resources/package.jdo index 92b803f350..4746aee4c5 100644 --- standalone-metastore/src/main/resources/package.jdo +++ standalone-metastore/src/main/resources/package.jdo @@ -213,6 +213,9 @@ + + + @@ -498,6 +501,9 @@ + + + diff --git standalone-metastore/src/main/sql/derby/hive-schema-4.0.0.derby.sql standalone-metastore/src/main/sql/derby/hive-schema-4.0.0.derby.sql index 062e374ac0..1af5dbccf5 100644 --- standalone-metastore/src/main/sql/derby/hive-schema-4.0.0.derby.sql +++ standalone-metastore/src/main/sql/derby/hive-schema-4.0.0.derby.sql @@ -47,7 +47,7 @@ CREATE TABLE "APP"."IDXS" ("INDEX_ID" BIGINT NOT NULL, "CREATE_TIME" INTEGER NOT CREATE TABLE "APP"."INDEX_PARAMS" ("INDEX_ID" BIGINT NOT NULL, "PARAM_KEY" VARCHAR(256) NOT NULL, "PARAM_VALUE" VARCHAR(4000)); -CREATE TABLE "APP"."PARTITIONS" ("PART_ID" BIGINT NOT NULL, "CREATE_TIME" INTEGER NOT NULL, "LAST_ACCESS_TIME" INTEGER NOT NULL, "PART_NAME" VARCHAR(767), "SD_ID" BIGINT, "TBL_ID" BIGINT, "TXN_ID" BIGINT DEFAULT 0, "WRITEID_LIST" CLOB); +CREATE TABLE "APP"."PARTITIONS" ("PART_ID" BIGINT NOT NULL, "CREATE_TIME" INTEGER NOT NULL, "LAST_ACCESS_TIME" INTEGER NOT NULL, "PART_NAME" VARCHAR(767), "SD_ID" BIGINT, "TBL_ID" BIGINT, "TXN_ID" BIGINT DEFAULT 0, "WRITE_ID" BIGINT DEFAULT 0, "WRITEID_LIST" CLOB); CREATE TABLE "APP"."SERDES" ("SERDE_ID" BIGINT NOT NULL, "NAME" VARCHAR(128), "SLIB" VARCHAR(4000), "DESCRIPTION" VARCHAR(4000), "SERIALIZER_CLASS" VARCHAR(4000), "DESERIALIZER_CLASS" VARCHAR(4000), SERDE_TYPE INTEGER); @@ -75,7 +75,7 @@ CREATE TABLE "APP"."COLUMNS" ("SD_ID" BIGINT NOT NULL, "COMMENT" VARCHAR(256), " CREATE TABLE "APP"."ROLES" ("ROLE_ID" BIGINT NOT NULL, "CREATE_TIME" INTEGER NOT NULL, "OWNER_NAME" VARCHAR(128), "ROLE_NAME" VARCHAR(128)); -CREATE TABLE "APP"."TBLS" ("TBL_ID" BIGINT NOT NULL, "CREATE_TIME" INTEGER NOT NULL, "DB_ID" BIGINT, "LAST_ACCESS_TIME" INTEGER NOT NULL, "OWNER" VARCHAR(767), "OWNER_TYPE" VARCHAR(10), "RETENTION" INTEGER NOT NULL, "SD_ID" BIGINT, "TBL_NAME" VARCHAR(256), "TBL_TYPE" VARCHAR(128), "VIEW_EXPANDED_TEXT" LONG VARCHAR, "VIEW_ORIGINAL_TEXT" LONG VARCHAR, "IS_REWRITE_ENABLED" CHAR(1) NOT NULL DEFAULT 'N', "TXN_ID" BIGINT DEFAULT 0, "WRITEID_LIST" CLOB); +CREATE TABLE "APP"."TBLS" ("TBL_ID" BIGINT NOT NULL, "CREATE_TIME" INTEGER NOT NULL, "DB_ID" BIGINT, "LAST_ACCESS_TIME" INTEGER NOT NULL, "OWNER" VARCHAR(767), "OWNER_TYPE" VARCHAR(10), "RETENTION" INTEGER NOT NULL, "SD_ID" BIGINT, "TBL_NAME" VARCHAR(256), "TBL_TYPE" VARCHAR(128), "VIEW_EXPANDED_TEXT" LONG VARCHAR, "VIEW_ORIGINAL_TEXT" LONG VARCHAR, "IS_REWRITE_ENABLED" CHAR(1) NOT NULL DEFAULT 'N', "TXN_ID" BIGINT DEFAULT 0, "WRITE_ID" BIGINT DEFAULT 0, "WRITEID_LIST" CLOB); CREATE TABLE "APP"."PARTITION_KEYS" ("TBL_ID" BIGINT NOT NULL, "PKEY_COMMENT" VARCHAR(4000), "PKEY_NAME" VARCHAR(128) NOT NULL, "PKEY_TYPE" VARCHAR(767) NOT NULL, "INTEGER_IDX" INTEGER NOT NULL); diff --git standalone-metastore/src/main/sql/derby/upgrade-3.1.0-to-4.0.0.derby.sql standalone-metastore/src/main/sql/derby/upgrade-3.1.0-to-4.0.0.derby.sql index 38eecd970a..41ffe1ce69 100644 --- standalone-metastore/src/main/sql/derby/upgrade-3.1.0-to-4.0.0.derby.sql +++ standalone-metastore/src/main/sql/derby/upgrade-3.1.0-to-4.0.0.derby.sql @@ -2,8 +2,10 @@ -- HIVE-19416 ALTER TABLE "APP"."TBLS" ADD WRITEID_LIST CLOB; ALTER TABLE "APP"."TBLS" ADD TXN_ID bigint DEFAULT 0; +ALTER TABLE "APP"."TBLS" ADD WRITE_ID bigint DEFAULT 0; ALTER TABLE "APP"."PARTITIONS" ADD WRITEID_LIST CLOB; ALTER TABLE "APP"."PARTITIONS" ADD TXN_ID bigint DEFAULT 0; +ALTER TABLE "APP"."PARTITIONS" ADD WRITE_ID bigint DEFAULT 0; -- This needs to be the last thing done. Insert any changes above this line. UPDATE "APP".VERSION SET SCHEMA_VERSION='4.0.0', VERSION_COMMENT='Hive release version 4.0.0' where VER_ID=1; diff --git standalone-metastore/src/main/sql/mssql/hive-schema-4.0.0.mssql.sql standalone-metastore/src/main/sql/mssql/hive-schema-4.0.0.mssql.sql index 1f31341113..6ce2639b9b 100644 --- standalone-metastore/src/main/sql/mssql/hive-schema-4.0.0.mssql.sql +++ standalone-metastore/src/main/sql/mssql/hive-schema-4.0.0.mssql.sql @@ -381,6 +381,7 @@ CREATE TABLE TBLS VIEW_ORIGINAL_TEXT text NULL, IS_REWRITE_ENABLED bit NOT NULL DEFAULT 0, TXN_ID bigint NULL, + WRITE_ID bigint NULL, WRITEID_LIST text NULL ); diff --git standalone-metastore/src/main/sql/mssql/upgrade-3.1.0-to-4.0.0.mssql.sql standalone-metastore/src/main/sql/mssql/upgrade-3.1.0-to-4.0.0.mssql.sql index 594d165ef2..44939d2ce3 100644 --- standalone-metastore/src/main/sql/mssql/upgrade-3.1.0-to-4.0.0.mssql.sql +++ standalone-metastore/src/main/sql/mssql/upgrade-3.1.0-to-4.0.0.mssql.sql @@ -3,8 +3,10 @@ SELECT 'Upgrading MetaStore schema from 3.1.0 to 4.0.0' AS MESSAGE; -- HIVE-19416 ALTER TABLE TBLS ADD WRITEID_LIST text NULL; ALTER TABLE TBLS ADD TXN_ID bigint NULL; +ALTER TABLE TBLS ADD WRITE_ID bigint NULL; ALTER TABLE PARTITIONS ADD WRITEID_LIST text NULL; ALTER TABLE PARTITIONS ADD TXN_ID bigint NULL; +ALTER TABLE PARTITIONS ADD WRITE_ID bigint NULL; -- These lines need to be last. Insert any changes above. UPDATE VERSION SET SCHEMA_VERSION='4.0.0', VERSION_COMMENT='Hive release version 4.0.0' where VER_ID=1; diff --git standalone-metastore/src/main/sql/mysql/hive-schema-4.0.0.mysql.sql standalone-metastore/src/main/sql/mysql/hive-schema-4.0.0.mysql.sql index 90f45ac224..86fef268be 100644 --- standalone-metastore/src/main/sql/mysql/hive-schema-4.0.0.mysql.sql +++ standalone-metastore/src/main/sql/mysql/hive-schema-4.0.0.mysql.sql @@ -225,6 +225,7 @@ CREATE TABLE IF NOT EXISTS `PARTITIONS` ( `SD_ID` bigint(20) DEFAULT NULL, `TBL_ID` bigint(20) DEFAULT NULL, `TXN_ID` bigint(20) DEFAULT 0, + `WRITE_ID` bigint(20) DEFAULT 0, `WRITEID_LIST` text DEFAULT NULL, PRIMARY KEY (`PART_ID`), UNIQUE KEY `UNIQUEPARTITION` (`PART_NAME`,`TBL_ID`), @@ -632,6 +633,7 @@ CREATE TABLE IF NOT EXISTS `TBLS` ( `VIEW_ORIGINAL_TEXT` mediumtext, `IS_REWRITE_ENABLED` bit(1) NOT NULL DEFAULT 0, `TXN_ID` bigint(20) DEFAULT 0, + `WRITE_ID` bigint(20) DEFAULT 0, `WRITEID_LIST` text DEFAULT NULL, PRIMARY KEY (`TBL_ID`), UNIQUE KEY `UNIQUETABLE` (`TBL_NAME`,`DB_ID`), diff --git standalone-metastore/src/main/sql/mysql/upgrade-3.1.0-to-4.0.0.mysql.sql standalone-metastore/src/main/sql/mysql/upgrade-3.1.0-to-4.0.0.mysql.sql index dc011c245d..9228cca3db 100644 --- standalone-metastore/src/main/sql/mysql/upgrade-3.1.0-to-4.0.0.mysql.sql +++ standalone-metastore/src/main/sql/mysql/upgrade-3.1.0-to-4.0.0.mysql.sql @@ -2,8 +2,10 @@ SELECT 'Upgrading MetaStore schema from 3.1.0 to 4.0.0' AS ' '; -- HIVE-19416 ALTER TABLE TBLS ADD TXN_ID bigint; +ALTER TABLE TBLS ADD WRITE_ID bigint; ALTER TABLE TBLS ADD WRITEID_LIST CLOB; ALTER TABLE PARTITIONS ADD TXN_ID bigint; +ALTER TABLE PARTITIONS ADD WRITE_ID bigint; ALTER TABLE PARTITIONS ADD WRITEID_LIST CLOB; -- These lines need to be last. Insert any changes above. diff --git standalone-metastore/src/main/sql/oracle/hive-schema-4.0.0.oracle.sql standalone-metastore/src/main/sql/oracle/hive-schema-4.0.0.oracle.sql index cc08dc1db9..dde7025a12 100644 --- standalone-metastore/src/main/sql/oracle/hive-schema-4.0.0.oracle.sql +++ standalone-metastore/src/main/sql/oracle/hive-schema-4.0.0.oracle.sql @@ -165,6 +165,7 @@ CREATE TABLE PARTITIONS SD_ID NUMBER NULL, TBL_ID NUMBER NULL, TXN_ID NUMBER NULL, + WRITE_ID NUMBER NULL, WRITEID_LIST CLOB NULL ); @@ -402,6 +403,7 @@ CREATE TABLE TBLS VIEW_ORIGINAL_TEXT CLOB NULL, IS_REWRITE_ENABLED NUMBER(1) DEFAULT 0 NOT NULL CHECK (IS_REWRITE_ENABLED IN (1,0)), TXN_ID NUMBER NULL, + WRITE_ID NUMBER NULL, WRITEID_LIST CLOB NULL ); diff --git standalone-metastore/src/main/sql/oracle/upgrade-3.1.0-to-4.0.0.oracle.sql standalone-metastore/src/main/sql/oracle/upgrade-3.1.0-to-4.0.0.oracle.sql index 9e1e6cb539..0b75831f26 100644 --- standalone-metastore/src/main/sql/oracle/upgrade-3.1.0-to-4.0.0.oracle.sql +++ standalone-metastore/src/main/sql/oracle/upgrade-3.1.0-to-4.0.0.oracle.sql @@ -1,8 +1,10 @@ SELECT 'Upgrading MetaStore schema from 3.1.0 to 4.0.0' AS Status from dual; ALTER TABLE TBLS ADD TXN_ID number NULL; +ALTER TABLE TBLS ADD WRITE_ID number NULL; ALTER TABLE TBLS ADD WRITEID_LIST CLOB NULL; ALTER TABLE PARTITIONS ADD TXN_ID number NULL; +ALTER TABLE PARTITIONS ADD WRITE_ID number NULL; ALTER TABLE PARTITIONS ADD WRITEID_LIST CLOB NULL; -- These lines need to be last. Insert any changes above. diff --git standalone-metastore/src/main/sql/postgres/hive-schema-4.0.0.postgres.sql standalone-metastore/src/main/sql/postgres/hive-schema-4.0.0.postgres.sql index c7add637e1..ad193f9eca 100644 --- standalone-metastore/src/main/sql/postgres/hive-schema-4.0.0.postgres.sql +++ standalone-metastore/src/main/sql/postgres/hive-schema-4.0.0.postgres.sql @@ -396,6 +396,7 @@ CREATE TABLE "TBLS" ( "VIEW_ORIGINAL_TEXT" text, "IS_REWRITE_ENABLED" boolean NOT NULL DEFAULT false, "TXN_ID" bigint, + "WRITE_ID" bigint, "WRITEID_LIST" text ); diff --git standalone-metastore/src/main/sql/postgres/upgrade-3.1.0-to-4.0.0.postgres.sql standalone-metastore/src/main/sql/postgres/upgrade-3.1.0-to-4.0.0.postgres.sql index 0692db1976..d7887197c9 100644 --- standalone-metastore/src/main/sql/postgres/upgrade-3.1.0-to-4.0.0.postgres.sql +++ standalone-metastore/src/main/sql/postgres/upgrade-3.1.0-to-4.0.0.postgres.sql @@ -2,8 +2,10 @@ SELECT 'Upgrading MetaStore schema from 3.1.0 to 4.0.0'; -- HIVE-19416 ALTER TABLE "TBLS" ADD "TXN_ID" bigint; +ALTER TABLE "TBLS" ADD "WRITE_ID" bigint; ALTER TABLE "TBLS" ADD "WRITEID_LIST" text; ALTER TABLE "PARTITIONS" ADD "TXN_ID" bigint; +ALTER TABLE "PARTITIONS" ADD "WRITE_ID" bigint; ALTER TABLE "PARTITIONS" ADD "WRITEID_LIST" text; -- These lines need to be last. Insert any changes above. diff --git standalone-metastore/src/main/thrift/hive_metastore.thrift standalone-metastore/src/main/thrift/hive_metastore.thrift index ecd2001e11..1cf2d17603 100644 --- standalone-metastore/src/main/thrift/hive_metastore.thrift +++ standalone-metastore/src/main/thrift/hive_metastore.thrift @@ -438,8 +438,9 @@ struct Table { 17: optional string catName, // Name of the catalog the table is in 18: optional PrincipalType ownerType = PrincipalType.USER, // owner type of this table (default to USER for backward compatibility) 19: optional i64 txnId=-1, - 20: optional string validWriteIdList, - 21: optional IsolationLevelCompliance isStatsCompliant + 20: optional i64 writeId=-1, + 21: optional string validWriteIdList, + 22: optional IsolationLevelCompliance isStatsCompliant } struct Partition { @@ -453,8 +454,9 @@ struct Partition { 8: optional PrincipalPrivilegeSet privileges, 9: optional string catName, 10: optional i64 txnId=-1, - 11: optional string validWriteIdList, - 12: optional IsolationLevelCompliance isStatsCompliant + 11: optional i64 writeId=-1, + 12: optional string validWriteIdList, + 13: optional IsolationLevelCompliance isStatsCompliant } struct PartitionWithoutSD { @@ -483,8 +485,9 @@ struct PartitionSpec { 5: optional PartitionListComposingSpec partitionList, 6: optional string catName, 7: optional i64 txnId=-1, - 8: optional string validWriteIdList, - 9: optional IsolationLevelCompliance isStatsCompliant + 8: optional i64 writeId=-1, + 9: optional string validWriteIdList, + 10: optional IsolationLevelCompliance isStatsCompliant } // column statistics @@ -596,7 +599,8 @@ struct SetPartitionsStatsRequest { 1: required list colStats, 2: optional bool needMerge, //stats need to be merged with the existing stats 3: optional i64 txnId=-1, // transaction id of the query that sends this structure -4: optional string validWriteIdList // valid write id list for the table for which this struct is being sent +4: optional i64 writeId=-1, // writeId for the current query that updates the stats +5: optional string validWriteIdList // valid write id list for the table for which this struct is being sent } // schema of the table/query results etc. @@ -1584,7 +1588,8 @@ struct AlterPartitionsRequest { 3: required list partitions, 4: required EnvironmentContext environmentContext, 5: optional i64 txnId=-1, - 6: optional string validWriteIdList + 6: optional i64 writeId=-1, + 7: optional string validWriteIdList } struct AlterPartitionsResponse { diff --git standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/DummyRawStoreControlledCommit.java standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/DummyRawStoreControlledCommit.java index 001c3edcff..70771cd477 100644 --- standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/DummyRawStoreControlledCommit.java +++ standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/DummyRawStoreControlledCommit.java @@ -363,10 +363,10 @@ public void alterPartition(String catName, String dbName, String tblName, List> partValsList, List newParts, - long txnId, String writeIdList) + long txnId, String writeIdList, long writeId) throws InvalidObjectException, MetaException { objectStore.alterPartitions( - catName, dbName, tblName, partValsList, newParts, txnId, writeIdList); + catName, dbName, tblName, partValsList, newParts, txnId, writeIdList, writeId); } @Override diff --git standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/DummyRawStoreForJdoConnection.java standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/DummyRawStoreForJdoConnection.java index d6a882e8e9..e9c8993f5e 100644 --- standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/DummyRawStoreForJdoConnection.java +++ standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/DummyRawStoreForJdoConnection.java @@ -364,7 +364,7 @@ public void alterPartition(String catName, String db_name, String tbl_name, List @Override public void alterPartitions(String catName, String db_name, String tbl_name, List> part_vals_list, List new_parts, - long txnId, String writeIdList) + long txnId, String writeIdList, long writeId) throws InvalidObjectException, MetaException { } diff --git standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/HiveMetaStoreClientPreCatalog.java standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/HiveMetaStoreClientPreCatalog.java index 3899f03aba..28cb3d9ec3 100644 --- standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/HiveMetaStoreClientPreCatalog.java +++ standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/HiveMetaStoreClientPreCatalog.java @@ -1641,7 +1641,7 @@ public void alter_partitions(String dbName, String tblName, List newP @Override public void alter_partitions(String dbName, String tblName, List newParts, EnvironmentContext environmentContext, - long txnId, String writeIdList) + long txnId, String writeIdList, long writeId) throws InvalidOperationException, MetaException, TException { AlterPartitionsRequest req = new AlterPartitionsRequest(); req.setDbName(dbName); @@ -3303,7 +3303,7 @@ public void alter_partition(String catName, String dbName, String tblName, Parti public void alter_partitions(String catName, String dbName, String tblName, List newParts, EnvironmentContext environmentContext, - long txnId, String writeIdList) throws + long txnId, String writeIdList, long writeId) throws InvalidOperationException, MetaException, TException { throw new UnsupportedOperationException(); } diff --git standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/client/TestAlterPartitions.java standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/client/TestAlterPartitions.java index f19b5053f4..2b8b3c2336 100644 --- standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/client/TestAlterPartitions.java +++ standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/client/TestAlterPartitions.java @@ -836,7 +836,7 @@ public void testAlterPartitionsWithEnvironmentCtxBogusCatalogName() throws Excep createTable4PartColsParts(client); Partition part = client.listPartitions(DB_NAME, TABLE_NAME, (short)-1).get(0); client.alter_partitions("nosuch", DB_NAME, TABLE_NAME, Lists.newArrayList(part), new EnvironmentContext(), - -1, null); + -1, null, -1); } @Test(expected = InvalidOperationException.class) diff --git storage-api/src/java/org/apache/hive/common/util/TxnIdUtils.java storage-api/src/java/org/apache/hive/common/util/TxnIdUtils.java index dc50f1b554..5199a54c49 100644 --- storage-api/src/java/org/apache/hive/common/util/TxnIdUtils.java +++ storage-api/src/java/org/apache/hive/common/util/TxnIdUtils.java @@ -19,6 +19,8 @@ import org.apache.hadoop.hive.common.ValidWriteIdList; +import java.util.*; + public class TxnIdUtils { /** @@ -45,15 +47,29 @@ public static boolean checkEquivalentWriteIds(ValidWriteIdList a, ValidWriteIdLi * on the table. */ public static boolean areTheseConcurrentWrites( - ValidWriteIdList older, ValidWriteIdList newer, long statsWriteId) { + ValidWriteIdList older, long olderWriteId, + ValidWriteIdList newer, long newerWriteId) { if (!older.getTableName().equalsIgnoreCase(newer.getTableName())) { return false; } assert(older.getHighWatermark() <= newer.getHighWatermark()); - // TODO: Just return false for now. - return false; + // Return false when a write id is not positive. + if (olderWriteId <= 0 || newerWriteId <= 0) { + return false; + } + + // If olderWriteId is for aborted write, return false. + if (newer.isWriteIdAborted(olderWriteId)) { + return false; + } + + // If either writeId is contained in the other's writeIdList, + // it is a concurrent INSERTs case. + int index2Older = Arrays.binarySearch(older.getInvalidWriteIds(), olderWriteId); + int index2Newer = Arrays.binarySearch(newer.getInvalidWriteIds(), newerWriteId); + return index2Older >= 0 || index2Newer >= 0; } /**