commit f817f5c736eecbadde2d97b3c658dd72b276b16b Author: Alan Gates Date: Thu Apr 26 16:27:18 2018 -0700 HIVE-18685 Make transaction system work with catalogs diff --git itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/txn/compactor/TestCompactor.java itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/txn/compactor/TestCompactor.java index 4a0e834795..00db28147d 100644 --- itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/txn/compactor/TestCompactor.java +++ itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/txn/compactor/TestCompactor.java @@ -417,14 +417,14 @@ public void testStatsAfterCompactionPartTbl() throws Exception { tblName + " after load:"); TxnStore txnHandler = TxnUtils.getTxnStore(conf); - CompactionInfo ci = new CompactionInfo("default", tblName, "bkt=0", CompactionType.MAJOR); + CompactionInfo ci = new CompactionInfo("hive", "default", tblName, "bkt=0", CompactionType.MAJOR); LOG.debug("List of stats columns before analyze Part1: " + txnHandler.findColumnsWithStats(ci)); Worker.StatsUpdater su = Worker.StatsUpdater.init(ci, colNames, conf, System.getProperty("user.name")); su.gatherStats();//compute stats before compaction LOG.debug("List of stats columns after analyze Part1: " + txnHandler.findColumnsWithStats(ci)); - CompactionInfo ciPart2 = new CompactionInfo("default", tblName, "bkt=1", CompactionType.MAJOR); + CompactionInfo ciPart2 = new CompactionInfo("hive", "default", tblName, "bkt=1", CompactionType .MAJOR); LOG.debug("List of stats columns before analyze Part2: " + txnHandler.findColumnsWithStats(ci)); su = Worker.StatsUpdater.init(ciPart2, colNames, conf, System.getProperty("user.name")); su.gatherStats();//compute stats before compaction @@ -1737,8 +1737,8 @@ public void testTableProperties() throws Exception { @Test public void testCompactionInfoEquals() { - CompactionInfo compactionInfo = new CompactionInfo("dbName", "tableName", "partName", CompactionType.MINOR); - CompactionInfo compactionInfo1 = new CompactionInfo("dbName", "tableName", "partName", CompactionType.MINOR); + CompactionInfo compactionInfo = new CompactionInfo("hive", "dbName", "tableName", "partName", CompactionType.MINOR); + CompactionInfo compactionInfo1 = new CompactionInfo("hive", "dbName", "tableName", "partName", CompactionType.MINOR); Assert.assertTrue("The object must be equal", compactionInfo.equals(compactionInfo)); Assert.assertFalse("The object must be not equal", compactionInfo.equals(new Object())); @@ -1747,8 +1747,8 @@ public void testCompactionInfoEquals() { @Test public void testCompactionInfoHashCode() { - CompactionInfo compactionInfo = new CompactionInfo("dbName", "tableName", "partName", CompactionType.MINOR); - CompactionInfo compactionInfo1 = new CompactionInfo("dbName", "tableName", "partName", CompactionType.MINOR); + CompactionInfo compactionInfo = new CompactionInfo("hive", "dbName", "tableName", "partName", CompactionType.MINOR); + CompactionInfo compactionInfo1 = new CompactionInfo("hive", "dbName", "tableName", "partName", CompactionType.MINOR); Assert.assertEquals("The hash codes must be equal", compactionInfo.hashCode(), compactionInfo1.hashCode()); } diff --git ql/src/java/org/apache/hadoop/hive/ql/txn/compactor/Initiator.java ql/src/java/org/apache/hadoop/hive/ql/txn/compactor/Initiator.java index a61b6e8d37..d40c4a875d 100644 --- ql/src/java/org/apache/hadoop/hive/ql/txn/compactor/Initiator.java +++ ql/src/java/org/apache/hadoop/hive/ql/txn/compactor/Initiator.java @@ -23,6 +23,7 @@ import org.apache.hadoop.hive.common.FileUtils; import org.apache.hadoop.hive.common.ValidWriteIdList; import org.apache.hadoop.hive.conf.HiveConf; +import org.apache.hadoop.hive.metastore.Warehouse; import org.apache.hadoop.hive.metastore.api.CompactionRequest; import org.apache.hadoop.hive.metastore.api.CompactionResponse; import org.apache.hadoop.hive.metastore.api.CompactionType; @@ -146,7 +147,7 @@ public void run() { // Compaction doesn't work under a transaction and hence pass null for validTxnList // The response will have one entry per table and hence we get only one ValidWriteIdList - String fullTableName = TxnUtils.getFullTableName(t.getDbName(), t.getTableName()); + String fullTableName = Warehouse.getQualifiedName(t.getDbName(), t.getTableName()); GetValidWriteIdsRequest rqst = new GetValidWriteIdsRequest(Collections.singletonList(fullTableName), null); ValidWriteIdList tblValidWriteIds = TxnUtils.createValidCompactWriteIdList( diff --git ql/src/java/org/apache/hadoop/hive/ql/txn/compactor/Worker.java ql/src/java/org/apache/hadoop/hive/ql/txn/compactor/Worker.java index 74612990de..77f24e5c5b 100644 --- ql/src/java/org/apache/hadoop/hive/ql/txn/compactor/Worker.java +++ ql/src/java/org/apache/hadoop/hive/ql/txn/compactor/Worker.java @@ -143,7 +143,7 @@ public void run() { // Compaction doesn't work under a transaction and hence pass 0 for current txn Id // The response will have one entry per table and hence we get only one OpenWriteIds - String fullTableName = TxnUtils.getFullTableName(t.getDbName(), t.getTableName()); + String fullTableName = Warehouse.getQualifiedName(t.getDbName(), t.getTableName()); GetValidWriteIdsRequest rqst = new GetValidWriteIdsRequest(Collections.singletonList(fullTableName), null); final ValidWriteIdList tblValidWriteIds = TxnUtils.createValidCompactWriteIdList(txnHandler.getValidWriteIds(rqst).getTblValidWriteIds().get(0)); diff --git standalone-metastore/src/gen/thrift/gen-cpp/hive_metastore_types.cpp standalone-metastore/src/gen/thrift/gen-cpp/hive_metastore_types.cpp index 8925fe248d..f71a4b25b7 100644 --- standalone-metastore/src/gen/thrift/gen-cpp/hive_metastore_types.cpp +++ standalone-metastore/src/gen/thrift/gen-cpp/hive_metastore_types.cpp @@ -16613,6 +16613,11 @@ void ReplTblWriteIdStateRequest::__set_partNames(const std::vector __isset.partNames = true; } +void ReplTblWriteIdStateRequest::__set_catName(const std::string& val) { + this->catName = val; +__isset.catName = true; +} + uint32_t ReplTblWriteIdStateRequest::read(::apache::thrift::protocol::TProtocol* iprot) { apache::thrift::protocol::TInputRecursionTracker tracker(*iprot); @@ -16699,6 +16704,14 @@ uint32_t ReplTblWriteIdStateRequest::read(::apache::thrift::protocol::TProtocol* xfer += iprot->skip(ftype); } break; + case 7: + if (ftype == ::apache::thrift::protocol::T_STRING) { + xfer += iprot->readString(this->catName); + this->__isset.catName = true; + } else { + xfer += iprot->skip(ftype); + } + break; default: xfer += iprot->skip(ftype); break; @@ -16759,6 +16772,11 @@ uint32_t ReplTblWriteIdStateRequest::write(::apache::thrift::protocol::TProtocol } xfer += oprot->writeFieldEnd(); } + if (this->__isset.catName) { + xfer += oprot->writeFieldBegin("catName", ::apache::thrift::protocol::T_STRING, 7); + xfer += oprot->writeString(this->catName); + xfer += oprot->writeFieldEnd(); + } xfer += oprot->writeFieldStop(); xfer += oprot->writeStructEnd(); return xfer; @@ -16772,6 +16790,7 @@ void swap(ReplTblWriteIdStateRequest &a, ReplTblWriteIdStateRequest &b) { swap(a.dbName, b.dbName); swap(a.tableName, b.tableName); swap(a.partNames, b.partNames); + swap(a.catName, b.catName); swap(a.__isset, b.__isset); } @@ -16782,6 +16801,7 @@ ReplTblWriteIdStateRequest::ReplTblWriteIdStateRequest(const ReplTblWriteIdState dbName = other679.dbName; tableName = other679.tableName; partNames = other679.partNames; + catName = other679.catName; __isset = other679.__isset; } ReplTblWriteIdStateRequest& ReplTblWriteIdStateRequest::operator=(const ReplTblWriteIdStateRequest& other680) { @@ -16791,6 +16811,7 @@ ReplTblWriteIdStateRequest& ReplTblWriteIdStateRequest::operator=(const ReplTblW dbName = other680.dbName; tableName = other680.tableName; partNames = other680.partNames; + catName = other680.catName; __isset = other680.__isset; return *this; } @@ -16803,6 +16824,7 @@ void ReplTblWriteIdStateRequest::printTo(std::ostream& out) const { out << ", " << "dbName=" << to_string(dbName); out << ", " << "tableName=" << to_string(tableName); out << ", " << "partNames="; (__isset.partNames ? (out << to_string(partNames)) : (out << "")); + out << ", " << "catName="; (__isset.catName ? (out << to_string(catName)) : (out << "")); out << ")"; } @@ -17269,6 +17291,11 @@ void AllocateTableWriteIdsRequest::__set_srcTxnToWriteIdList(const std::vectorcatName = val; +__isset.catName = true; +} + uint32_t AllocateTableWriteIdsRequest::read(::apache::thrift::protocol::TProtocol* iprot) { apache::thrift::protocol::TInputRecursionTracker tracker(*iprot); @@ -17356,6 +17383,14 @@ uint32_t AllocateTableWriteIdsRequest::read(::apache::thrift::protocol::TProtoco xfer += iprot->skip(ftype); } break; + case 6: + if (ftype == ::apache::thrift::protocol::T_STRING) { + xfer += iprot->readString(this->catName); + this->__isset.catName = true; + } else { + xfer += iprot->skip(ftype); + } + break; default: xfer += iprot->skip(ftype); break; @@ -17416,6 +17451,11 @@ uint32_t AllocateTableWriteIdsRequest::write(::apache::thrift::protocol::TProtoc } xfer += oprot->writeFieldEnd(); } + if (this->__isset.catName) { + xfer += oprot->writeFieldBegin("catName", ::apache::thrift::protocol::T_STRING, 6); + xfer += oprot->writeString(this->catName); + xfer += oprot->writeFieldEnd(); + } xfer += oprot->writeFieldStop(); xfer += oprot->writeStructEnd(); return xfer; @@ -17428,6 +17468,7 @@ void swap(AllocateTableWriteIdsRequest &a, AllocateTableWriteIdsRequest &b) { swap(a.txnIds, b.txnIds); swap(a.replPolicy, b.replPolicy); swap(a.srcTxnToWriteIdList, b.srcTxnToWriteIdList); + swap(a.catName, b.catName); swap(a.__isset, b.__isset); } @@ -17437,6 +17478,7 @@ AllocateTableWriteIdsRequest::AllocateTableWriteIdsRequest(const AllocateTableWr txnIds = other717.txnIds; replPolicy = other717.replPolicy; srcTxnToWriteIdList = other717.srcTxnToWriteIdList; + catName = other717.catName; __isset = other717.__isset; } AllocateTableWriteIdsRequest& AllocateTableWriteIdsRequest::operator=(const AllocateTableWriteIdsRequest& other718) { @@ -17445,6 +17487,7 @@ AllocateTableWriteIdsRequest& AllocateTableWriteIdsRequest::operator=(const Allo txnIds = other718.txnIds; replPolicy = other718.replPolicy; srcTxnToWriteIdList = other718.srcTxnToWriteIdList; + catName = other718.catName; __isset = other718.__isset; return *this; } @@ -17456,6 +17499,7 @@ void AllocateTableWriteIdsRequest::printTo(std::ostream& out) const { out << ", " << "txnIds="; (__isset.txnIds ? (out << to_string(txnIds)) : (out << "")); out << ", " << "replPolicy="; (__isset.replPolicy ? (out << to_string(replPolicy)) : (out << "")); out << ", " << "srcTxnToWriteIdList="; (__isset.srcTxnToWriteIdList ? (out << to_string(srcTxnToWriteIdList)) : (out << "")); + out << ", " << "catName="; (__isset.catName ? (out << to_string(catName)) : (out << "")); out << ")"; } @@ -17716,6 +17760,11 @@ void LockComponent::__set_isDynamicPartitionWrite(const bool val) { __isset.isDynamicPartitionWrite = true; } +void LockComponent::__set_catName(const std::string& val) { + this->catName = val; +__isset.catName = true; +} + uint32_t LockComponent::read(::apache::thrift::protocol::TProtocol* iprot) { apache::thrift::protocol::TInputRecursionTracker tracker(*iprot); @@ -17810,6 +17859,14 @@ uint32_t LockComponent::read(::apache::thrift::protocol::TProtocol* iprot) { xfer += iprot->skip(ftype); } break; + case 9: + if (ftype == ::apache::thrift::protocol::T_STRING) { + xfer += iprot->readString(this->catName); + this->__isset.catName = true; + } else { + xfer += iprot->skip(ftype); + } + break; default: xfer += iprot->skip(ftype); break; @@ -17870,6 +17927,11 @@ uint32_t LockComponent::write(::apache::thrift::protocol::TProtocol* oprot) cons xfer += oprot->writeBool(this->isDynamicPartitionWrite); xfer += oprot->writeFieldEnd(); } + if (this->__isset.catName) { + xfer += oprot->writeFieldBegin("catName", ::apache::thrift::protocol::T_STRING, 9); + xfer += oprot->writeString(this->catName); + xfer += oprot->writeFieldEnd(); + } xfer += oprot->writeFieldStop(); xfer += oprot->writeStructEnd(); return xfer; @@ -17885,6 +17947,7 @@ void swap(LockComponent &a, LockComponent &b) { swap(a.operationType, b.operationType); swap(a.isTransactional, b.isTransactional); swap(a.isDynamicPartitionWrite, b.isDynamicPartitionWrite); + swap(a.catName, b.catName); swap(a.__isset, b.__isset); } @@ -17897,6 +17960,7 @@ LockComponent::LockComponent(const LockComponent& other732) { operationType = other732.operationType; isTransactional = other732.isTransactional; isDynamicPartitionWrite = other732.isDynamicPartitionWrite; + catName = other732.catName; __isset = other732.__isset; } LockComponent& LockComponent::operator=(const LockComponent& other733) { @@ -17908,6 +17972,7 @@ LockComponent& LockComponent::operator=(const LockComponent& other733) { operationType = other733.operationType; isTransactional = other733.isTransactional; isDynamicPartitionWrite = other733.isDynamicPartitionWrite; + catName = other733.catName; __isset = other733.__isset; return *this; } @@ -17922,6 +17987,7 @@ void LockComponent::printTo(std::ostream& out) const { out << ", " << "operationType="; (__isset.operationType ? (out << to_string(operationType)) : (out << "")); out << ", " << "isTransactional="; (__isset.isTransactional ? (out << to_string(isTransactional)) : (out << "")); out << ", " << "isDynamicPartitionWrite="; (__isset.isDynamicPartitionWrite ? (out << to_string(isDynamicPartitionWrite)) : (out << "")); + out << ", " << "catName="; (__isset.catName ? (out << to_string(catName)) : (out << "")); out << ")"; } @@ -17952,6 +18018,11 @@ void LockRequest::__set_agentInfo(const std::string& val) { __isset.agentInfo = true; } +void LockRequest::__set_catName(const std::string& val) { + this->catName = val; +__isset.catName = true; +} + uint32_t LockRequest::read(::apache::thrift::protocol::TProtocol* iprot) { apache::thrift::protocol::TInputRecursionTracker tracker(*iprot); @@ -18028,6 +18099,14 @@ uint32_t LockRequest::read(::apache::thrift::protocol::TProtocol* iprot) { xfer += iprot->skip(ftype); } break; + case 6: + if (ftype == ::apache::thrift::protocol::T_STRING) { + xfer += iprot->readString(this->catName); + this->__isset.catName = true; + } else { + xfer += iprot->skip(ftype); + } + break; default: xfer += iprot->skip(ftype); break; @@ -18081,6 +18160,11 @@ uint32_t LockRequest::write(::apache::thrift::protocol::TProtocol* oprot) const xfer += oprot->writeString(this->agentInfo); xfer += oprot->writeFieldEnd(); } + if (this->__isset.catName) { + xfer += oprot->writeFieldBegin("catName", ::apache::thrift::protocol::T_STRING, 6); + xfer += oprot->writeString(this->catName); + xfer += oprot->writeFieldEnd(); + } xfer += oprot->writeFieldStop(); xfer += oprot->writeStructEnd(); return xfer; @@ -18093,6 +18177,7 @@ void swap(LockRequest &a, LockRequest &b) { swap(a.user, b.user); swap(a.hostname, b.hostname); swap(a.agentInfo, b.agentInfo); + swap(a.catName, b.catName); swap(a.__isset, b.__isset); } @@ -18102,6 +18187,7 @@ LockRequest::LockRequest(const LockRequest& other740) { user = other740.user; hostname = other740.hostname; agentInfo = other740.agentInfo; + catName = other740.catName; __isset = other740.__isset; } LockRequest& LockRequest::operator=(const LockRequest& other741) { @@ -18110,6 +18196,7 @@ LockRequest& LockRequest::operator=(const LockRequest& other741) { user = other741.user; hostname = other741.hostname; agentInfo = other741.agentInfo; + catName = other741.catName; __isset = other741.__isset; return *this; } @@ -18121,6 +18208,7 @@ void LockRequest::printTo(std::ostream& out) const { out << ", " << "user=" << to_string(user); out << ", " << "hostname=" << to_string(hostname); out << ", " << "agentInfo="; (__isset.agentInfo ? (out << to_string(agentInfo)) : (out << "")); + out << ", " << "catName="; (__isset.catName ? (out << to_string(catName)) : (out << "")); out << ")"; } @@ -18479,6 +18567,11 @@ void ShowLocksRequest::__set_isExtended(const bool val) { __isset.isExtended = true; } +void ShowLocksRequest::__set_catName(const std::string& val) { + this->catName = val; +__isset.catName = true; +} + uint32_t ShowLocksRequest::read(::apache::thrift::protocol::TProtocol* iprot) { apache::thrift::protocol::TInputRecursionTracker tracker(*iprot); @@ -18532,6 +18625,14 @@ uint32_t ShowLocksRequest::read(::apache::thrift::protocol::TProtocol* iprot) { xfer += iprot->skip(ftype); } break; + case 5: + if (ftype == ::apache::thrift::protocol::T_STRING) { + xfer += iprot->readString(this->catName); + this->__isset.catName = true; + } else { + xfer += iprot->skip(ftype); + } + break; default: xfer += iprot->skip(ftype); break; @@ -18569,6 +18670,11 @@ uint32_t ShowLocksRequest::write(::apache::thrift::protocol::TProtocol* oprot) c xfer += oprot->writeBool(this->isExtended); xfer += oprot->writeFieldEnd(); } + if (this->__isset.catName) { + xfer += oprot->writeFieldBegin("catName", ::apache::thrift::protocol::T_STRING, 5); + xfer += oprot->writeString(this->catName); + xfer += oprot->writeFieldEnd(); + } xfer += oprot->writeFieldStop(); xfer += oprot->writeStructEnd(); return xfer; @@ -18580,6 +18686,7 @@ void swap(ShowLocksRequest &a, ShowLocksRequest &b) { swap(a.tablename, b.tablename); swap(a.partname, b.partname); swap(a.isExtended, b.isExtended); + swap(a.catName, b.catName); swap(a.__isset, b.__isset); } @@ -18588,6 +18695,7 @@ ShowLocksRequest::ShowLocksRequest(const ShowLocksRequest& other749) { tablename = other749.tablename; partname = other749.partname; isExtended = other749.isExtended; + catName = other749.catName; __isset = other749.__isset; } ShowLocksRequest& ShowLocksRequest::operator=(const ShowLocksRequest& other750) { @@ -18595,6 +18703,7 @@ ShowLocksRequest& ShowLocksRequest::operator=(const ShowLocksRequest& other750) tablename = other750.tablename; partname = other750.partname; isExtended = other750.isExtended; + catName = other750.catName; __isset = other750.__isset; return *this; } @@ -18605,6 +18714,7 @@ void ShowLocksRequest::printTo(std::ostream& out) const { out << ", " << "tablename="; (__isset.tablename ? (out << to_string(tablename)) : (out << "")); out << ", " << "partname="; (__isset.partname ? (out << to_string(partname)) : (out << "")); out << ", " << "isExtended="; (__isset.isExtended ? (out << to_string(isExtended)) : (out << "")); + out << ", " << "catName="; (__isset.catName ? (out << to_string(catName)) : (out << "")); out << ")"; } @@ -18686,6 +18796,11 @@ void ShowLocksResponseElement::__set_lockIdInternal(const int64_t val) { __isset.lockIdInternal = true; } +void ShowLocksResponseElement::__set_catName(const std::string& val) { + this->catName = val; +__isset.catName = true; +} + uint32_t ShowLocksResponseElement::read(::apache::thrift::protocol::TProtocol* iprot) { apache::thrift::protocol::TInputRecursionTracker tracker(*iprot); @@ -18846,6 +18961,14 @@ uint32_t ShowLocksResponseElement::read(::apache::thrift::protocol::TProtocol* i xfer += iprot->skip(ftype); } break; + case 17: + if (ftype == ::apache::thrift::protocol::T_STRING) { + xfer += iprot->readString(this->catName); + this->__isset.catName = true; + } else { + xfer += iprot->skip(ftype); + } + break; default: xfer += iprot->skip(ftype); break; @@ -18950,6 +19073,11 @@ uint32_t ShowLocksResponseElement::write(::apache::thrift::protocol::TProtocol* xfer += oprot->writeI64(this->lockIdInternal); xfer += oprot->writeFieldEnd(); } + if (this->__isset.catName) { + xfer += oprot->writeFieldBegin("catName", ::apache::thrift::protocol::T_STRING, 17); + xfer += oprot->writeString(this->catName); + xfer += oprot->writeFieldEnd(); + } xfer += oprot->writeFieldStop(); xfer += oprot->writeStructEnd(); return xfer; @@ -18973,6 +19101,7 @@ void swap(ShowLocksResponseElement &a, ShowLocksResponseElement &b) { swap(a.blockedByExtId, b.blockedByExtId); swap(a.blockedByIntId, b.blockedByIntId); swap(a.lockIdInternal, b.lockIdInternal); + swap(a.catName, b.catName); swap(a.__isset, b.__isset); } @@ -18993,6 +19122,7 @@ ShowLocksResponseElement::ShowLocksResponseElement(const ShowLocksResponseElemen blockedByExtId = other753.blockedByExtId; blockedByIntId = other753.blockedByIntId; lockIdInternal = other753.lockIdInternal; + catName = other753.catName; __isset = other753.__isset; } ShowLocksResponseElement& ShowLocksResponseElement::operator=(const ShowLocksResponseElement& other754) { @@ -19012,6 +19142,7 @@ ShowLocksResponseElement& ShowLocksResponseElement::operator=(const ShowLocksRes blockedByExtId = other754.blockedByExtId; blockedByIntId = other754.blockedByIntId; lockIdInternal = other754.lockIdInternal; + catName = other754.catName; __isset = other754.__isset; return *this; } @@ -19034,6 +19165,7 @@ void ShowLocksResponseElement::printTo(std::ostream& out) const { out << ", " << "blockedByExtId="; (__isset.blockedByExtId ? (out << to_string(blockedByExtId)) : (out << "")); out << ", " << "blockedByIntId="; (__isset.blockedByIntId ? (out << to_string(blockedByIntId)) : (out << "")); out << ", " << "lockIdInternal="; (__isset.lockIdInternal ? (out << to_string(lockIdInternal)) : (out << "")); + out << ", " << "catName="; (__isset.catName ? (out << to_string(catName)) : (out << "")); out << ")"; } @@ -19545,6 +19677,11 @@ void CompactionRequest::__set_properties(const std::mapcatName = val; +__isset.catName = true; +} + uint32_t CompactionRequest::read(::apache::thrift::protocol::TProtocol* iprot) { apache::thrift::protocol::TInputRecursionTracker tracker(*iprot); @@ -19634,6 +19771,14 @@ uint32_t CompactionRequest::read(::apache::thrift::protocol::TProtocol* iprot) { xfer += iprot->skip(ftype); } break; + case 7: + if (ftype == ::apache::thrift::protocol::T_STRING) { + xfer += iprot->readString(this->catName); + this->__isset.catName = true; + } else { + xfer += iprot->skip(ftype); + } + break; default: xfer += iprot->skip(ftype); break; @@ -19693,6 +19838,11 @@ uint32_t CompactionRequest::write(::apache::thrift::protocol::TProtocol* oprot) } xfer += oprot->writeFieldEnd(); } + if (this->__isset.catName) { + xfer += oprot->writeFieldBegin("catName", ::apache::thrift::protocol::T_STRING, 7); + xfer += oprot->writeString(this->catName); + xfer += oprot->writeFieldEnd(); + } xfer += oprot->writeFieldStop(); xfer += oprot->writeStructEnd(); return xfer; @@ -19706,6 +19856,7 @@ void swap(CompactionRequest &a, CompactionRequest &b) { swap(a.type, b.type); swap(a.runas, b.runas); swap(a.properties, b.properties); + swap(a.catName, b.catName); swap(a.__isset, b.__isset); } @@ -19716,6 +19867,7 @@ CompactionRequest::CompactionRequest(const CompactionRequest& other792) { type = other792.type; runas = other792.runas; properties = other792.properties; + catName = other792.catName; __isset = other792.__isset; } CompactionRequest& CompactionRequest::operator=(const CompactionRequest& other793) { @@ -19725,6 +19877,7 @@ CompactionRequest& CompactionRequest::operator=(const CompactionRequest& other79 type = other793.type; runas = other793.runas; properties = other793.properties; + catName = other793.catName; __isset = other793.__isset; return *this; } @@ -19737,6 +19890,7 @@ void CompactionRequest::printTo(std::ostream& out) const { out << ", " << "type=" << to_string(type); out << ", " << "runas="; (__isset.runas ? (out << to_string(runas)) : (out << "")); out << ", " << "properties="; (__isset.properties ? (out << to_string(properties)) : (out << "")); + out << ", " << "catName="; (__isset.catName ? (out << to_string(catName)) : (out << "")); out << ")"; } @@ -20000,6 +20154,11 @@ void ShowCompactResponseElement::__set_id(const int64_t val) { __isset.id = true; } +void ShowCompactResponseElement::__set_catName(const std::string& val) { + this->catName = val; +__isset.catName = true; +} + uint32_t ShowCompactResponseElement::read(::apache::thrift::protocol::TProtocol* iprot) { apache::thrift::protocol::TInputRecursionTracker tracker(*iprot); @@ -20131,6 +20290,14 @@ uint32_t ShowCompactResponseElement::read(::apache::thrift::protocol::TProtocol* xfer += iprot->skip(ftype); } break; + case 14: + if (ftype == ::apache::thrift::protocol::T_STRING) { + xfer += iprot->readString(this->catName); + this->__isset.catName = true; + } else { + xfer += iprot->skip(ftype); + } + break; default: xfer += iprot->skip(ftype); break; @@ -20217,6 +20384,11 @@ uint32_t ShowCompactResponseElement::write(::apache::thrift::protocol::TProtocol xfer += oprot->writeI64(this->id); xfer += oprot->writeFieldEnd(); } + if (this->__isset.catName) { + xfer += oprot->writeFieldBegin("catName", ::apache::thrift::protocol::T_STRING, 14); + xfer += oprot->writeString(this->catName); + xfer += oprot->writeFieldEnd(); + } xfer += oprot->writeFieldStop(); xfer += oprot->writeStructEnd(); return xfer; @@ -20237,6 +20409,7 @@ void swap(ShowCompactResponseElement &a, ShowCompactResponseElement &b) { swap(a.endTime, b.endTime); swap(a.hadoopJobId, b.hadoopJobId); swap(a.id, b.id); + swap(a.catName, b.catName); swap(a.__isset, b.__isset); } @@ -20254,6 +20427,7 @@ ShowCompactResponseElement::ShowCompactResponseElement(const ShowCompactResponse endTime = other799.endTime; hadoopJobId = other799.hadoopJobId; id = other799.id; + catName = other799.catName; __isset = other799.__isset; } ShowCompactResponseElement& ShowCompactResponseElement::operator=(const ShowCompactResponseElement& other800) { @@ -20270,6 +20444,7 @@ ShowCompactResponseElement& ShowCompactResponseElement::operator=(const ShowComp endTime = other800.endTime; hadoopJobId = other800.hadoopJobId; id = other800.id; + catName = other800.catName; __isset = other800.__isset; return *this; } @@ -20289,6 +20464,7 @@ void ShowCompactResponseElement::printTo(std::ostream& out) const { out << ", " << "endTime="; (__isset.endTime ? (out << to_string(endTime)) : (out << "")); out << ", " << "hadoopJobId="; (__isset.hadoopJobId ? (out << to_string(hadoopJobId)) : (out << "")); out << ", " << "id="; (__isset.id ? (out << to_string(id)) : (out << "")); + out << ", " << "catName="; (__isset.catName ? (out << to_string(catName)) : (out << "")); out << ")"; } @@ -20428,6 +20604,11 @@ void AddDynamicPartitions::__set_operationType(const DataOperationType::type val __isset.operationType = true; } +void AddDynamicPartitions::__set_catName(const std::string& val) { + this->catName = val; +__isset.catName = true; +} + uint32_t AddDynamicPartitions::read(::apache::thrift::protocol::TProtocol* iprot) { apache::thrift::protocol::TInputRecursionTracker tracker(*iprot); @@ -20516,6 +20697,14 @@ uint32_t AddDynamicPartitions::read(::apache::thrift::protocol::TProtocol* iprot xfer += iprot->skip(ftype); } break; + case 7: + if (ftype == ::apache::thrift::protocol::T_STRING) { + xfer += iprot->readString(this->catName); + this->__isset.catName = true; + } else { + xfer += iprot->skip(ftype); + } + break; default: xfer += iprot->skip(ftype); break; @@ -20576,6 +20765,11 @@ uint32_t AddDynamicPartitions::write(::apache::thrift::protocol::TProtocol* opro xfer += oprot->writeI32((int32_t)this->operationType); xfer += oprot->writeFieldEnd(); } + if (this->__isset.catName) { + xfer += oprot->writeFieldBegin("catName", ::apache::thrift::protocol::T_STRING, 7); + xfer += oprot->writeString(this->catName); + xfer += oprot->writeFieldEnd(); + } xfer += oprot->writeFieldStop(); xfer += oprot->writeStructEnd(); return xfer; @@ -20589,6 +20783,7 @@ void swap(AddDynamicPartitions &a, AddDynamicPartitions &b) { swap(a.tablename, b.tablename); swap(a.partitionnames, b.partitionnames); swap(a.operationType, b.operationType); + swap(a.catName, b.catName); swap(a.__isset, b.__isset); } @@ -20599,6 +20794,7 @@ AddDynamicPartitions::AddDynamicPartitions(const AddDynamicPartitions& other816) tablename = other816.tablename; partitionnames = other816.partitionnames; operationType = other816.operationType; + catName = other816.catName; __isset = other816.__isset; } AddDynamicPartitions& AddDynamicPartitions::operator=(const AddDynamicPartitions& other817) { @@ -20608,6 +20804,7 @@ AddDynamicPartitions& AddDynamicPartitions::operator=(const AddDynamicPartitions tablename = other817.tablename; partitionnames = other817.partitionnames; operationType = other817.operationType; + catName = other817.catName; __isset = other817.__isset; return *this; } @@ -20620,6 +20817,7 @@ void AddDynamicPartitions::printTo(std::ostream& out) const { out << ", " << "tablename=" << to_string(tablename); out << ", " << "partitionnames=" << to_string(partitionnames); out << ", " << "operationType="; (__isset.operationType ? (out << to_string(operationType)) : (out << "")); + out << ", " << "catName="; (__isset.catName ? (out << to_string(catName)) : (out << "")); out << ")"; } @@ -20657,6 +20855,11 @@ void BasicTxnInfo::__set_partitionname(const std::string& val) { __isset.partitionname = true; } +void BasicTxnInfo::__set_catName(const std::string& val) { + this->catName = val; +__isset.catName = true; +} + uint32_t BasicTxnInfo::read(::apache::thrift::protocol::TProtocol* iprot) { apache::thrift::protocol::TInputRecursionTracker tracker(*iprot); @@ -20727,6 +20930,14 @@ uint32_t BasicTxnInfo::read(::apache::thrift::protocol::TProtocol* iprot) { xfer += iprot->skip(ftype); } break; + case 7: + if (ftype == ::apache::thrift::protocol::T_STRING) { + xfer += iprot->readString(this->catName); + this->__isset.catName = true; + } else { + xfer += iprot->skip(ftype); + } + break; default: xfer += iprot->skip(ftype); break; @@ -20775,6 +20986,11 @@ uint32_t BasicTxnInfo::write(::apache::thrift::protocol::TProtocol* oprot) const xfer += oprot->writeString(this->partitionname); xfer += oprot->writeFieldEnd(); } + if (this->__isset.catName) { + xfer += oprot->writeFieldBegin("catName", ::apache::thrift::protocol::T_STRING, 7); + xfer += oprot->writeString(this->catName); + xfer += oprot->writeFieldEnd(); + } xfer += oprot->writeFieldStop(); xfer += oprot->writeStructEnd(); return xfer; @@ -20788,6 +21004,7 @@ void swap(BasicTxnInfo &a, BasicTxnInfo &b) { swap(a.dbname, b.dbname); swap(a.tablename, b.tablename); swap(a.partitionname, b.partitionname); + swap(a.catName, b.catName); swap(a.__isset, b.__isset); } @@ -20798,6 +21015,7 @@ BasicTxnInfo::BasicTxnInfo(const BasicTxnInfo& other818) { dbname = other818.dbname; tablename = other818.tablename; partitionname = other818.partitionname; + catName = other818.catName; __isset = other818.__isset; } BasicTxnInfo& BasicTxnInfo::operator=(const BasicTxnInfo& other819) { @@ -20807,6 +21025,7 @@ BasicTxnInfo& BasicTxnInfo::operator=(const BasicTxnInfo& other819) { dbname = other819.dbname; tablename = other819.tablename; partitionname = other819.partitionname; + catName = other819.catName; __isset = other819.__isset; return *this; } @@ -20819,6 +21038,7 @@ void BasicTxnInfo::printTo(std::ostream& out) const { out << ", " << "dbname="; (__isset.dbname ? (out << to_string(dbname)) : (out << "")); out << ", " << "tablename="; (__isset.tablename ? (out << to_string(tablename)) : (out << "")); out << ", " << "partitionname="; (__isset.partitionname ? (out << to_string(partitionname)) : (out << "")); + out << ", " << "catName="; (__isset.catName ? (out << to_string(catName)) : (out << "")); out << ")"; } diff --git standalone-metastore/src/gen/thrift/gen-cpp/hive_metastore_types.h standalone-metastore/src/gen/thrift/gen-cpp/hive_metastore_types.h index 78656d9328..b30fc22546 100644 --- standalone-metastore/src/gen/thrift/gen-cpp/hive_metastore_types.h +++ standalone-metastore/src/gen/thrift/gen-cpp/hive_metastore_types.h @@ -6984,8 +6984,9 @@ inline std::ostream& operator<<(std::ostream& out, const CommitTxnRequest& obj) } typedef struct _ReplTblWriteIdStateRequest__isset { - _ReplTblWriteIdStateRequest__isset() : partNames(false) {} + _ReplTblWriteIdStateRequest__isset() : partNames(false), catName(false) {} bool partNames :1; + bool catName :1; } _ReplTblWriteIdStateRequest__isset; class ReplTblWriteIdStateRequest { @@ -6993,7 +6994,7 @@ class ReplTblWriteIdStateRequest { ReplTblWriteIdStateRequest(const ReplTblWriteIdStateRequest&); ReplTblWriteIdStateRequest& operator=(const ReplTblWriteIdStateRequest&); - ReplTblWriteIdStateRequest() : validWriteIdlist(), user(), hostName(), dbName(), tableName() { + ReplTblWriteIdStateRequest() : validWriteIdlist(), user(), hostName(), dbName(), tableName(), catName() { } virtual ~ReplTblWriteIdStateRequest() throw(); @@ -7003,6 +7004,7 @@ class ReplTblWriteIdStateRequest { std::string dbName; std::string tableName; std::vector partNames; + std::string catName; _ReplTblWriteIdStateRequest__isset __isset; @@ -7018,6 +7020,8 @@ class ReplTblWriteIdStateRequest { void __set_partNames(const std::vector & val); + void __set_catName(const std::string& val); + bool operator == (const ReplTblWriteIdStateRequest & rhs) const { if (!(validWriteIdlist == rhs.validWriteIdlist)) @@ -7034,6 +7038,10 @@ class ReplTblWriteIdStateRequest { return false; else if (__isset.partNames && !(partNames == rhs.partNames)) return false; + if (__isset.catName != rhs.__isset.catName) + return false; + else if (__isset.catName && !(catName == rhs.catName)) + return false; return true; } bool operator != (const ReplTblWriteIdStateRequest &rhs) const { @@ -7210,10 +7218,11 @@ inline std::ostream& operator<<(std::ostream& out, const GetValidWriteIdsRespons } typedef struct _AllocateTableWriteIdsRequest__isset { - _AllocateTableWriteIdsRequest__isset() : txnIds(false), replPolicy(false), srcTxnToWriteIdList(false) {} + _AllocateTableWriteIdsRequest__isset() : txnIds(false), replPolicy(false), srcTxnToWriteIdList(false), catName(false) {} bool txnIds :1; bool replPolicy :1; bool srcTxnToWriteIdList :1; + bool catName :1; } _AllocateTableWriteIdsRequest__isset; class AllocateTableWriteIdsRequest { @@ -7221,7 +7230,7 @@ class AllocateTableWriteIdsRequest { AllocateTableWriteIdsRequest(const AllocateTableWriteIdsRequest&); AllocateTableWriteIdsRequest& operator=(const AllocateTableWriteIdsRequest&); - AllocateTableWriteIdsRequest() : dbName(), tableName(), replPolicy() { + AllocateTableWriteIdsRequest() : dbName(), tableName(), replPolicy(), catName() { } virtual ~AllocateTableWriteIdsRequest() throw(); @@ -7230,6 +7239,7 @@ class AllocateTableWriteIdsRequest { std::vector txnIds; std::string replPolicy; std::vector srcTxnToWriteIdList; + std::string catName; _AllocateTableWriteIdsRequest__isset __isset; @@ -7243,6 +7253,8 @@ class AllocateTableWriteIdsRequest { void __set_srcTxnToWriteIdList(const std::vector & val); + void __set_catName(const std::string& val); + bool operator == (const AllocateTableWriteIdsRequest & rhs) const { if (!(dbName == rhs.dbName)) @@ -7261,6 +7273,10 @@ class AllocateTableWriteIdsRequest { return false; else if (__isset.srcTxnToWriteIdList && !(srcTxnToWriteIdList == rhs.srcTxnToWriteIdList)) return false; + if (__isset.catName != rhs.__isset.catName) + return false; + else if (__isset.catName && !(catName == rhs.catName)) + return false; return true; } bool operator != (const AllocateTableWriteIdsRequest &rhs) const { @@ -7369,12 +7385,13 @@ inline std::ostream& operator<<(std::ostream& out, const AllocateTableWriteIdsRe } typedef struct _LockComponent__isset { - _LockComponent__isset() : tablename(false), partitionname(false), operationType(true), isTransactional(true), isDynamicPartitionWrite(true) {} + _LockComponent__isset() : tablename(false), partitionname(false), operationType(true), isTransactional(true), isDynamicPartitionWrite(true), catName(false) {} bool tablename :1; bool partitionname :1; bool operationType :1; bool isTransactional :1; bool isDynamicPartitionWrite :1; + bool catName :1; } _LockComponent__isset; class LockComponent { @@ -7382,7 +7399,7 @@ class LockComponent { LockComponent(const LockComponent&); LockComponent& operator=(const LockComponent&); - LockComponent() : type((LockType::type)0), level((LockLevel::type)0), dbname(), tablename(), partitionname(), operationType((DataOperationType::type)5), isTransactional(false), isDynamicPartitionWrite(false) { + LockComponent() : type((LockType::type)0), level((LockLevel::type)0), dbname(), tablename(), partitionname(), operationType((DataOperationType::type)5), isTransactional(false), isDynamicPartitionWrite(false), catName() { operationType = (DataOperationType::type)5; } @@ -7396,6 +7413,7 @@ class LockComponent { DataOperationType::type operationType; bool isTransactional; bool isDynamicPartitionWrite; + std::string catName; _LockComponent__isset __isset; @@ -7415,6 +7433,8 @@ class LockComponent { void __set_isDynamicPartitionWrite(const bool val); + void __set_catName(const std::string& val); + bool operator == (const LockComponent & rhs) const { if (!(type == rhs.type)) @@ -7443,6 +7463,10 @@ class LockComponent { return false; else if (__isset.isDynamicPartitionWrite && !(isDynamicPartitionWrite == rhs.isDynamicPartitionWrite)) return false; + if (__isset.catName != rhs.__isset.catName) + return false; + else if (__isset.catName && !(catName == rhs.catName)) + return false; return true; } bool operator != (const LockComponent &rhs) const { @@ -7466,9 +7490,10 @@ inline std::ostream& operator<<(std::ostream& out, const LockComponent& obj) } typedef struct _LockRequest__isset { - _LockRequest__isset() : txnid(false), agentInfo(true) {} + _LockRequest__isset() : txnid(false), agentInfo(true), catName(false) {} bool txnid :1; bool agentInfo :1; + bool catName :1; } _LockRequest__isset; class LockRequest { @@ -7476,7 +7501,7 @@ class LockRequest { LockRequest(const LockRequest&); LockRequest& operator=(const LockRequest&); - LockRequest() : txnid(0), user(), hostname(), agentInfo("Unknown") { + LockRequest() : txnid(0), user(), hostname(), agentInfo("Unknown"), catName() { } virtual ~LockRequest() throw(); @@ -7485,6 +7510,7 @@ class LockRequest { std::string user; std::string hostname; std::string agentInfo; + std::string catName; _LockRequest__isset __isset; @@ -7498,6 +7524,8 @@ class LockRequest { void __set_agentInfo(const std::string& val); + void __set_catName(const std::string& val); + bool operator == (const LockRequest & rhs) const { if (!(component == rhs.component)) @@ -7514,6 +7542,10 @@ class LockRequest { return false; else if (__isset.agentInfo && !(agentInfo == rhs.agentInfo)) return false; + if (__isset.catName != rhs.__isset.catName) + return false; + else if (__isset.catName && !(catName == rhs.catName)) + return false; return true; } bool operator != (const LockRequest &rhs) const { @@ -7683,11 +7715,12 @@ inline std::ostream& operator<<(std::ostream& out, const UnlockRequest& obj) } typedef struct _ShowLocksRequest__isset { - _ShowLocksRequest__isset() : dbname(false), tablename(false), partname(false), isExtended(true) {} + _ShowLocksRequest__isset() : dbname(false), tablename(false), partname(false), isExtended(true), catName(false) {} bool dbname :1; bool tablename :1; bool partname :1; bool isExtended :1; + bool catName :1; } _ShowLocksRequest__isset; class ShowLocksRequest { @@ -7695,7 +7728,7 @@ class ShowLocksRequest { ShowLocksRequest(const ShowLocksRequest&); ShowLocksRequest& operator=(const ShowLocksRequest&); - ShowLocksRequest() : dbname(), tablename(), partname(), isExtended(false) { + ShowLocksRequest() : dbname(), tablename(), partname(), isExtended(false), catName() { } virtual ~ShowLocksRequest() throw(); @@ -7703,6 +7736,7 @@ class ShowLocksRequest { std::string tablename; std::string partname; bool isExtended; + std::string catName; _ShowLocksRequest__isset __isset; @@ -7714,6 +7748,8 @@ class ShowLocksRequest { void __set_isExtended(const bool val); + void __set_catName(const std::string& val); + bool operator == (const ShowLocksRequest & rhs) const { if (__isset.dbname != rhs.__isset.dbname) @@ -7732,6 +7768,10 @@ class ShowLocksRequest { return false; else if (__isset.isExtended && !(isExtended == rhs.isExtended)) return false; + if (__isset.catName != rhs.__isset.catName) + return false; + else if (__isset.catName && !(catName == rhs.catName)) + return false; return true; } bool operator != (const ShowLocksRequest &rhs) const { @@ -7755,7 +7795,7 @@ inline std::ostream& operator<<(std::ostream& out, const ShowLocksRequest& obj) } typedef struct _ShowLocksResponseElement__isset { - _ShowLocksResponseElement__isset() : tablename(false), partname(false), txnid(false), acquiredat(false), heartbeatCount(true), agentInfo(false), blockedByExtId(false), blockedByIntId(false), lockIdInternal(false) {} + _ShowLocksResponseElement__isset() : tablename(false), partname(false), txnid(false), acquiredat(false), heartbeatCount(true), agentInfo(false), blockedByExtId(false), blockedByIntId(false), lockIdInternal(false), catName(false) {} bool tablename :1; bool partname :1; bool txnid :1; @@ -7765,6 +7805,7 @@ typedef struct _ShowLocksResponseElement__isset { bool blockedByExtId :1; bool blockedByIntId :1; bool lockIdInternal :1; + bool catName :1; } _ShowLocksResponseElement__isset; class ShowLocksResponseElement { @@ -7772,7 +7813,7 @@ class ShowLocksResponseElement { ShowLocksResponseElement(const ShowLocksResponseElement&); ShowLocksResponseElement& operator=(const ShowLocksResponseElement&); - ShowLocksResponseElement() : lockid(0), dbname(), tablename(), partname(), state((LockState::type)0), type((LockType::type)0), txnid(0), lastheartbeat(0), acquiredat(0), user(), hostname(), heartbeatCount(0), agentInfo(), blockedByExtId(0), blockedByIntId(0), lockIdInternal(0) { + ShowLocksResponseElement() : lockid(0), dbname(), tablename(), partname(), state((LockState::type)0), type((LockType::type)0), txnid(0), lastheartbeat(0), acquiredat(0), user(), hostname(), heartbeatCount(0), agentInfo(), blockedByExtId(0), blockedByIntId(0), lockIdInternal(0), catName() { } virtual ~ShowLocksResponseElement() throw(); @@ -7792,6 +7833,7 @@ class ShowLocksResponseElement { int64_t blockedByExtId; int64_t blockedByIntId; int64_t lockIdInternal; + std::string catName; _ShowLocksResponseElement__isset __isset; @@ -7827,6 +7869,8 @@ class ShowLocksResponseElement { void __set_lockIdInternal(const int64_t val); + void __set_catName(const std::string& val); + bool operator == (const ShowLocksResponseElement & rhs) const { if (!(lockid == rhs.lockid)) @@ -7879,6 +7923,10 @@ class ShowLocksResponseElement { return false; else if (__isset.lockIdInternal && !(lockIdInternal == rhs.lockIdInternal)) return false; + if (__isset.catName != rhs.__isset.catName) + return false; + else if (__isset.catName && !(catName == rhs.catName)) + return false; return true; } bool operator != (const ShowLocksResponseElement &rhs) const { @@ -8094,10 +8142,11 @@ inline std::ostream& operator<<(std::ostream& out, const HeartbeatTxnRangeRespon } typedef struct _CompactionRequest__isset { - _CompactionRequest__isset() : partitionname(false), runas(false), properties(false) {} + _CompactionRequest__isset() : partitionname(false), runas(false), properties(false), catName(false) {} bool partitionname :1; bool runas :1; bool properties :1; + bool catName :1; } _CompactionRequest__isset; class CompactionRequest { @@ -8105,7 +8154,7 @@ class CompactionRequest { CompactionRequest(const CompactionRequest&); CompactionRequest& operator=(const CompactionRequest&); - CompactionRequest() : dbname(), tablename(), partitionname(), type((CompactionType::type)0), runas() { + CompactionRequest() : dbname(), tablename(), partitionname(), type((CompactionType::type)0), runas(), catName() { } virtual ~CompactionRequest() throw(); @@ -8115,6 +8164,7 @@ class CompactionRequest { CompactionType::type type; std::string runas; std::map properties; + std::string catName; _CompactionRequest__isset __isset; @@ -8130,6 +8180,8 @@ class CompactionRequest { void __set_properties(const std::map & val); + void __set_catName(const std::string& val); + bool operator == (const CompactionRequest & rhs) const { if (!(dbname == rhs.dbname)) @@ -8150,6 +8202,10 @@ class CompactionRequest { return false; else if (__isset.properties && !(properties == rhs.properties)) return false; + if (__isset.catName != rhs.__isset.catName) + return false; + else if (__isset.catName && !(catName == rhs.catName)) + return false; return true; } bool operator != (const CompactionRequest &rhs) const { @@ -8258,7 +8314,7 @@ inline std::ostream& operator<<(std::ostream& out, const ShowCompactRequest& obj } typedef struct _ShowCompactResponseElement__isset { - _ShowCompactResponseElement__isset() : partitionname(false), workerid(false), start(false), runAs(false), hightestTxnId(false), metaInfo(false), endTime(false), hadoopJobId(true), id(false) {} + _ShowCompactResponseElement__isset() : partitionname(false), workerid(false), start(false), runAs(false), hightestTxnId(false), metaInfo(false), endTime(false), hadoopJobId(true), id(false), catName(false) {} bool partitionname :1; bool workerid :1; bool start :1; @@ -8268,6 +8324,7 @@ typedef struct _ShowCompactResponseElement__isset { bool endTime :1; bool hadoopJobId :1; bool id :1; + bool catName :1; } _ShowCompactResponseElement__isset; class ShowCompactResponseElement { @@ -8275,7 +8332,7 @@ class ShowCompactResponseElement { ShowCompactResponseElement(const ShowCompactResponseElement&); ShowCompactResponseElement& operator=(const ShowCompactResponseElement&); - ShowCompactResponseElement() : dbname(), tablename(), partitionname(), type((CompactionType::type)0), state(), workerid(), start(0), runAs(), hightestTxnId(0), metaInfo(), endTime(0), hadoopJobId("None"), id(0) { + ShowCompactResponseElement() : dbname(), tablename(), partitionname(), type((CompactionType::type)0), state(), workerid(), start(0), runAs(), hightestTxnId(0), metaInfo(), endTime(0), hadoopJobId("None"), id(0), catName() { } virtual ~ShowCompactResponseElement() throw(); @@ -8292,6 +8349,7 @@ class ShowCompactResponseElement { int64_t endTime; std::string hadoopJobId; int64_t id; + std::string catName; _ShowCompactResponseElement__isset __isset; @@ -8321,6 +8379,8 @@ class ShowCompactResponseElement { void __set_id(const int64_t val); + void __set_catName(const std::string& val); + bool operator == (const ShowCompactResponseElement & rhs) const { if (!(dbname == rhs.dbname)) @@ -8367,6 +8427,10 @@ class ShowCompactResponseElement { return false; else if (__isset.id && !(id == rhs.id)) return false; + if (__isset.catName != rhs.__isset.catName) + return false; + else if (__isset.catName && !(catName == rhs.catName)) + return false; return true; } bool operator != (const ShowCompactResponseElement &rhs) const { @@ -8430,8 +8494,9 @@ inline std::ostream& operator<<(std::ostream& out, const ShowCompactResponse& ob } typedef struct _AddDynamicPartitions__isset { - _AddDynamicPartitions__isset() : operationType(true) {} + _AddDynamicPartitions__isset() : operationType(true), catName(false) {} bool operationType :1; + bool catName :1; } _AddDynamicPartitions__isset; class AddDynamicPartitions { @@ -8439,7 +8504,7 @@ class AddDynamicPartitions { AddDynamicPartitions(const AddDynamicPartitions&); AddDynamicPartitions& operator=(const AddDynamicPartitions&); - AddDynamicPartitions() : txnid(0), writeid(0), dbname(), tablename(), operationType((DataOperationType::type)5) { + AddDynamicPartitions() : txnid(0), writeid(0), dbname(), tablename(), operationType((DataOperationType::type)5), catName() { operationType = (DataOperationType::type)5; } @@ -8451,6 +8516,7 @@ class AddDynamicPartitions { std::string tablename; std::vector partitionnames; DataOperationType::type operationType; + std::string catName; _AddDynamicPartitions__isset __isset; @@ -8466,6 +8532,8 @@ class AddDynamicPartitions { void __set_operationType(const DataOperationType::type val); + void __set_catName(const std::string& val); + bool operator == (const AddDynamicPartitions & rhs) const { if (!(txnid == rhs.txnid)) @@ -8482,6 +8550,10 @@ class AddDynamicPartitions { return false; else if (__isset.operationType && !(operationType == rhs.operationType)) return false; + if (__isset.catName != rhs.__isset.catName) + return false; + else if (__isset.catName && !(catName == rhs.catName)) + return false; return true; } bool operator != (const AddDynamicPartitions &rhs) const { @@ -8505,12 +8577,13 @@ inline std::ostream& operator<<(std::ostream& out, const AddDynamicPartitions& o } typedef struct _BasicTxnInfo__isset { - _BasicTxnInfo__isset() : time(false), txnid(false), dbname(false), tablename(false), partitionname(false) {} + _BasicTxnInfo__isset() : time(false), txnid(false), dbname(false), tablename(false), partitionname(false), catName(false) {} bool time :1; bool txnid :1; bool dbname :1; bool tablename :1; bool partitionname :1; + bool catName :1; } _BasicTxnInfo__isset; class BasicTxnInfo { @@ -8518,7 +8591,7 @@ class BasicTxnInfo { BasicTxnInfo(const BasicTxnInfo&); BasicTxnInfo& operator=(const BasicTxnInfo&); - BasicTxnInfo() : isnull(0), time(0), txnid(0), dbname(), tablename(), partitionname() { + BasicTxnInfo() : isnull(0), time(0), txnid(0), dbname(), tablename(), partitionname(), catName() { } virtual ~BasicTxnInfo() throw(); @@ -8528,6 +8601,7 @@ class BasicTxnInfo { std::string dbname; std::string tablename; std::string partitionname; + std::string catName; _BasicTxnInfo__isset __isset; @@ -8543,6 +8617,8 @@ class BasicTxnInfo { void __set_partitionname(const std::string& val); + void __set_catName(const std::string& val); + bool operator == (const BasicTxnInfo & rhs) const { if (!(isnull == rhs.isnull)) @@ -8567,6 +8643,10 @@ class BasicTxnInfo { return false; else if (__isset.partitionname && !(partitionname == rhs.partitionname)) return false; + if (__isset.catName != rhs.__isset.catName) + return false; + else if (__isset.catName && !(catName == rhs.catName)) + return false; return true; } bool operator != (const BasicTxnInfo &rhs) const { diff --git standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/AddDynamicPartitions.java standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/AddDynamicPartitions.java index 1dcc8707b5..7a657bfb19 100644 --- standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/AddDynamicPartitions.java +++ standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/AddDynamicPartitions.java @@ -44,6 +44,7 @@ private static final org.apache.thrift.protocol.TField TABLENAME_FIELD_DESC = new org.apache.thrift.protocol.TField("tablename", org.apache.thrift.protocol.TType.STRING, (short)4); private static final org.apache.thrift.protocol.TField PARTITIONNAMES_FIELD_DESC = new org.apache.thrift.protocol.TField("partitionnames", org.apache.thrift.protocol.TType.LIST, (short)5); private static final org.apache.thrift.protocol.TField OPERATION_TYPE_FIELD_DESC = new org.apache.thrift.protocol.TField("operationType", org.apache.thrift.protocol.TType.I32, (short)6); + private static final org.apache.thrift.protocol.TField CAT_NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("catName", org.apache.thrift.protocol.TType.STRING, (short)7); private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); static { @@ -57,6 +58,7 @@ private String tablename; // required private List partitionnames; // required private DataOperationType operationType; // optional + private String catName; // optional /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ public enum _Fields implements org.apache.thrift.TFieldIdEnum { @@ -69,7 +71,8 @@ * * @see DataOperationType */ - OPERATION_TYPE((short)6, "operationType"); + OPERATION_TYPE((short)6, "operationType"), + CAT_NAME((short)7, "catName"); private static final Map byName = new HashMap(); @@ -96,6 +99,8 @@ public static _Fields findByThriftId(int fieldId) { return PARTITIONNAMES; case 6: // OPERATION_TYPE return OPERATION_TYPE; + case 7: // CAT_NAME + return CAT_NAME; default: return null; } @@ -139,7 +144,7 @@ public String getFieldName() { private static final int __TXNID_ISSET_ID = 0; private static final int __WRITEID_ISSET_ID = 1; private byte __isset_bitfield = 0; - private static final _Fields optionals[] = {_Fields.OPERATION_TYPE}; + private static final _Fields optionals[] = {_Fields.OPERATION_TYPE,_Fields.CAT_NAME}; public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; static { Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); @@ -156,6 +161,8 @@ public String getFieldName() { new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING)))); tmpMap.put(_Fields.OPERATION_TYPE, new org.apache.thrift.meta_data.FieldMetaData("operationType", org.apache.thrift.TFieldRequirementType.OPTIONAL, new org.apache.thrift.meta_data.EnumMetaData(org.apache.thrift.protocol.TType.ENUM, DataOperationType.class))); + tmpMap.put(_Fields.CAT_NAME, new org.apache.thrift.meta_data.FieldMetaData("catName", org.apache.thrift.TFieldRequirementType.OPTIONAL, + new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING))); metaDataMap = Collections.unmodifiableMap(tmpMap); org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(AddDynamicPartitions.class, metaDataMap); } @@ -202,6 +209,9 @@ public AddDynamicPartitions(AddDynamicPartitions other) { if (other.isSetOperationType()) { this.operationType = other.operationType; } + if (other.isSetCatName()) { + this.catName = other.catName; + } } public AddDynamicPartitions deepCopy() { @@ -219,6 +229,7 @@ public void clear() { this.partitionnames = null; this.operationType = org.apache.hadoop.hive.metastore.api.DataOperationType.UNSET; + this.catName = null; } public long getTxnid() { @@ -380,6 +391,29 @@ public void setOperationTypeIsSet(boolean value) { } } + public String getCatName() { + return this.catName; + } + + public void setCatName(String catName) { + this.catName = catName; + } + + public void unsetCatName() { + this.catName = null; + } + + /** Returns true if field catName is set (has been assigned a value) and false otherwise */ + public boolean isSetCatName() { + return this.catName != null; + } + + public void setCatNameIsSet(boolean value) { + if (!value) { + this.catName = null; + } + } + public void setFieldValue(_Fields field, Object value) { switch (field) { case TXNID: @@ -430,6 +464,14 @@ public void setFieldValue(_Fields field, Object value) { } break; + case CAT_NAME: + if (value == null) { + unsetCatName(); + } else { + setCatName((String)value); + } + break; + } } @@ -453,6 +495,9 @@ public Object getFieldValue(_Fields field) { case OPERATION_TYPE: return getOperationType(); + case CAT_NAME: + return getCatName(); + } throw new IllegalStateException(); } @@ -476,6 +521,8 @@ public boolean isSet(_Fields field) { return isSetPartitionnames(); case OPERATION_TYPE: return isSetOperationType(); + case CAT_NAME: + return isSetCatName(); } throw new IllegalStateException(); } @@ -547,6 +594,15 @@ public boolean equals(AddDynamicPartitions that) { return false; } + boolean this_present_catName = true && this.isSetCatName(); + boolean that_present_catName = true && that.isSetCatName(); + if (this_present_catName || that_present_catName) { + if (!(this_present_catName && that_present_catName)) + return false; + if (!this.catName.equals(that.catName)) + return false; + } + return true; } @@ -584,6 +640,11 @@ public int hashCode() { if (present_operationType) list.add(operationType.getValue()); + boolean present_catName = true && (isSetCatName()); + list.add(present_catName); + if (present_catName) + list.add(catName); + return list.hashCode(); } @@ -655,6 +716,16 @@ public int compareTo(AddDynamicPartitions other) { return lastComparison; } } + lastComparison = Boolean.valueOf(isSetCatName()).compareTo(other.isSetCatName()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetCatName()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.catName, other.catName); + if (lastComparison != 0) { + return lastComparison; + } + } return 0; } @@ -716,6 +787,16 @@ public String toString() { } first = false; } + if (isSetCatName()) { + if (!first) sb.append(", "); + sb.append("catName:"); + if (this.catName == null) { + sb.append("null"); + } else { + sb.append(this.catName); + } + first = false; + } sb.append(")"); return sb.toString(); } @@ -839,6 +920,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, AddDynamicPartition org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); } break; + case 7: // CAT_NAME + if (schemeField.type == org.apache.thrift.protocol.TType.STRING) { + struct.catName = iprot.readString(); + struct.setCatNameIsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + break; default: org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); } @@ -887,6 +976,13 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, AddDynamicPartitio oprot.writeFieldEnd(); } } + if (struct.catName != null) { + if (struct.isSetCatName()) { + oprot.writeFieldBegin(CAT_NAME_FIELD_DESC); + oprot.writeString(struct.catName); + oprot.writeFieldEnd(); + } + } oprot.writeFieldStop(); oprot.writeStructEnd(); } @@ -919,10 +1015,16 @@ public void write(org.apache.thrift.protocol.TProtocol prot, AddDynamicPartition if (struct.isSetOperationType()) { optionals.set(0); } - oprot.writeBitSet(optionals, 1); + if (struct.isSetCatName()) { + optionals.set(1); + } + oprot.writeBitSet(optionals, 2); if (struct.isSetOperationType()) { oprot.writeI32(struct.operationType.getValue()); } + if (struct.isSetCatName()) { + oprot.writeString(struct.catName); + } } @Override @@ -947,11 +1049,15 @@ public void read(org.apache.thrift.protocol.TProtocol prot, AddDynamicPartitions } } struct.setPartitionnamesIsSet(true); - BitSet incoming = iprot.readBitSet(1); + BitSet incoming = iprot.readBitSet(2); if (incoming.get(0)) { struct.operationType = org.apache.hadoop.hive.metastore.api.DataOperationType.findByValue(iprot.readI32()); struct.setOperationTypeIsSet(true); } + if (incoming.get(1)) { + struct.catName = iprot.readString(); + struct.setCatNameIsSet(true); + } } } diff --git standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/AllocateTableWriteIdsRequest.java standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/AllocateTableWriteIdsRequest.java index fa33963799..9e6fe64177 100644 --- standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/AllocateTableWriteIdsRequest.java +++ standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/AllocateTableWriteIdsRequest.java @@ -43,6 +43,7 @@ private static final org.apache.thrift.protocol.TField TXN_IDS_FIELD_DESC = new org.apache.thrift.protocol.TField("txnIds", org.apache.thrift.protocol.TType.LIST, (short)3); private static final org.apache.thrift.protocol.TField REPL_POLICY_FIELD_DESC = new org.apache.thrift.protocol.TField("replPolicy", org.apache.thrift.protocol.TType.STRING, (short)4); private static final org.apache.thrift.protocol.TField SRC_TXN_TO_WRITE_ID_LIST_FIELD_DESC = new org.apache.thrift.protocol.TField("srcTxnToWriteIdList", org.apache.thrift.protocol.TType.LIST, (short)5); + private static final org.apache.thrift.protocol.TField CAT_NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("catName", org.apache.thrift.protocol.TType.STRING, (short)6); private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); static { @@ -55,6 +56,7 @@ private List txnIds; // optional private String replPolicy; // optional private List srcTxnToWriteIdList; // optional + private String catName; // optional /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ public enum _Fields implements org.apache.thrift.TFieldIdEnum { @@ -62,7 +64,8 @@ TABLE_NAME((short)2, "tableName"), TXN_IDS((short)3, "txnIds"), REPL_POLICY((short)4, "replPolicy"), - SRC_TXN_TO_WRITE_ID_LIST((short)5, "srcTxnToWriteIdList"); + SRC_TXN_TO_WRITE_ID_LIST((short)5, "srcTxnToWriteIdList"), + CAT_NAME((short)6, "catName"); private static final Map byName = new HashMap(); @@ -87,6 +90,8 @@ public static _Fields findByThriftId(int fieldId) { return REPL_POLICY; case 5: // SRC_TXN_TO_WRITE_ID_LIST return SRC_TXN_TO_WRITE_ID_LIST; + case 6: // CAT_NAME + return CAT_NAME; default: return null; } @@ -127,7 +132,7 @@ public String getFieldName() { } // isset id assignments - private static final _Fields optionals[] = {_Fields.TXN_IDS,_Fields.REPL_POLICY,_Fields.SRC_TXN_TO_WRITE_ID_LIST}; + private static final _Fields optionals[] = {_Fields.TXN_IDS,_Fields.REPL_POLICY,_Fields.SRC_TXN_TO_WRITE_ID_LIST,_Fields.CAT_NAME}; public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; static { Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); @@ -143,6 +148,8 @@ public String getFieldName() { tmpMap.put(_Fields.SRC_TXN_TO_WRITE_ID_LIST, new org.apache.thrift.meta_data.FieldMetaData("srcTxnToWriteIdList", org.apache.thrift.TFieldRequirementType.OPTIONAL, new org.apache.thrift.meta_data.ListMetaData(org.apache.thrift.protocol.TType.LIST, new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRUCT , "TxnToWriteId")))); + tmpMap.put(_Fields.CAT_NAME, new org.apache.thrift.meta_data.FieldMetaData("catName", org.apache.thrift.TFieldRequirementType.OPTIONAL, + new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING))); metaDataMap = Collections.unmodifiableMap(tmpMap); org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(AllocateTableWriteIdsRequest.class, metaDataMap); } @@ -183,6 +190,9 @@ public AllocateTableWriteIdsRequest(AllocateTableWriteIdsRequest other) { } this.srcTxnToWriteIdList = __this__srcTxnToWriteIdList; } + if (other.isSetCatName()) { + this.catName = other.catName; + } } public AllocateTableWriteIdsRequest deepCopy() { @@ -196,6 +206,7 @@ public void clear() { this.txnIds = null; this.replPolicy = null; this.srcTxnToWriteIdList = null; + this.catName = null; } public String getDbName() { @@ -343,6 +354,29 @@ public void setSrcTxnToWriteIdListIsSet(boolean value) { } } + public String getCatName() { + return this.catName; + } + + public void setCatName(String catName) { + this.catName = catName; + } + + public void unsetCatName() { + this.catName = null; + } + + /** Returns true if field catName is set (has been assigned a value) and false otherwise */ + public boolean isSetCatName() { + return this.catName != null; + } + + public void setCatNameIsSet(boolean value) { + if (!value) { + this.catName = null; + } + } + public void setFieldValue(_Fields field, Object value) { switch (field) { case DB_NAME: @@ -385,6 +419,14 @@ public void setFieldValue(_Fields field, Object value) { } break; + case CAT_NAME: + if (value == null) { + unsetCatName(); + } else { + setCatName((String)value); + } + break; + } } @@ -405,6 +447,9 @@ public Object getFieldValue(_Fields field) { case SRC_TXN_TO_WRITE_ID_LIST: return getSrcTxnToWriteIdList(); + case CAT_NAME: + return getCatName(); + } throw new IllegalStateException(); } @@ -426,6 +471,8 @@ public boolean isSet(_Fields field) { return isSetReplPolicy(); case SRC_TXN_TO_WRITE_ID_LIST: return isSetSrcTxnToWriteIdList(); + case CAT_NAME: + return isSetCatName(); } throw new IllegalStateException(); } @@ -488,6 +535,15 @@ public boolean equals(AllocateTableWriteIdsRequest that) { return false; } + boolean this_present_catName = true && this.isSetCatName(); + boolean that_present_catName = true && that.isSetCatName(); + if (this_present_catName || that_present_catName) { + if (!(this_present_catName && that_present_catName)) + return false; + if (!this.catName.equals(that.catName)) + return false; + } + return true; } @@ -520,6 +576,11 @@ public int hashCode() { if (present_srcTxnToWriteIdList) list.add(srcTxnToWriteIdList); + boolean present_catName = true && (isSetCatName()); + list.add(present_catName); + if (present_catName) + list.add(catName); + return list.hashCode(); } @@ -581,6 +642,16 @@ public int compareTo(AllocateTableWriteIdsRequest other) { return lastComparison; } } + lastComparison = Boolean.valueOf(isSetCatName()).compareTo(other.isSetCatName()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetCatName()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.catName, other.catName); + if (lastComparison != 0) { + return lastComparison; + } + } return 0; } @@ -646,6 +717,16 @@ public String toString() { } first = false; } + if (isSetCatName()) { + if (!first) sb.append(", "); + sb.append("catName:"); + if (this.catName == null) { + sb.append("null"); + } else { + sb.append(this.catName); + } + first = false; + } sb.append(")"); return sb.toString(); } @@ -758,6 +839,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, AllocateTableWriteI org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); } break; + case 6: // CAT_NAME + if (schemeField.type == org.apache.thrift.protocol.TType.STRING) { + struct.catName = iprot.readString(); + struct.setCatNameIsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + break; default: org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); } @@ -816,6 +905,13 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, AllocateTableWrite oprot.writeFieldEnd(); } } + if (struct.catName != null) { + if (struct.isSetCatName()) { + oprot.writeFieldBegin(CAT_NAME_FIELD_DESC); + oprot.writeString(struct.catName); + oprot.writeFieldEnd(); + } + } oprot.writeFieldStop(); oprot.writeStructEnd(); } @@ -845,7 +941,10 @@ public void write(org.apache.thrift.protocol.TProtocol prot, AllocateTableWriteI if (struct.isSetSrcTxnToWriteIdList()) { optionals.set(2); } - oprot.writeBitSet(optionals, 3); + if (struct.isSetCatName()) { + optionals.set(3); + } + oprot.writeBitSet(optionals, 4); if (struct.isSetTxnIds()) { { oprot.writeI32(struct.txnIds.size()); @@ -867,6 +966,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, AllocateTableWriteI } } } + if (struct.isSetCatName()) { + oprot.writeString(struct.catName); + } } @Override @@ -876,7 +978,7 @@ public void read(org.apache.thrift.protocol.TProtocol prot, AllocateTableWriteId struct.setDbNameIsSet(true); struct.tableName = iprot.readString(); struct.setTableNameIsSet(true); - BitSet incoming = iprot.readBitSet(3); + BitSet incoming = iprot.readBitSet(4); if (incoming.get(0)) { { org.apache.thrift.protocol.TList _list636 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.I64, iprot.readI32()); @@ -908,6 +1010,10 @@ public void read(org.apache.thrift.protocol.TProtocol prot, AllocateTableWriteId } struct.setSrcTxnToWriteIdListIsSet(true); } + if (incoming.get(3)) { + struct.catName = iprot.readString(); + struct.setCatNameIsSet(true); + } } } diff --git standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/BasicTxnInfo.java standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/BasicTxnInfo.java index da37d03e98..243e776d8b 100644 --- standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/BasicTxnInfo.java +++ standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/BasicTxnInfo.java @@ -44,6 +44,7 @@ private static final org.apache.thrift.protocol.TField DBNAME_FIELD_DESC = new org.apache.thrift.protocol.TField("dbname", org.apache.thrift.protocol.TType.STRING, (short)4); private static final org.apache.thrift.protocol.TField TABLENAME_FIELD_DESC = new org.apache.thrift.protocol.TField("tablename", org.apache.thrift.protocol.TType.STRING, (short)5); private static final org.apache.thrift.protocol.TField PARTITIONNAME_FIELD_DESC = new org.apache.thrift.protocol.TField("partitionname", org.apache.thrift.protocol.TType.STRING, (short)6); + private static final org.apache.thrift.protocol.TField CAT_NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("catName", org.apache.thrift.protocol.TType.STRING, (short)7); private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); static { @@ -57,6 +58,7 @@ private String dbname; // optional private String tablename; // optional private String partitionname; // optional + private String catName; // optional /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ public enum _Fields implements org.apache.thrift.TFieldIdEnum { @@ -65,7 +67,8 @@ TXNID((short)3, "txnid"), DBNAME((short)4, "dbname"), TABLENAME((short)5, "tablename"), - PARTITIONNAME((short)6, "partitionname"); + PARTITIONNAME((short)6, "partitionname"), + CAT_NAME((short)7, "catName"); private static final Map byName = new HashMap(); @@ -92,6 +95,8 @@ public static _Fields findByThriftId(int fieldId) { return TABLENAME; case 6: // PARTITIONNAME return PARTITIONNAME; + case 7: // CAT_NAME + return CAT_NAME; default: return null; } @@ -136,7 +141,7 @@ public String getFieldName() { private static final int __TIME_ISSET_ID = 1; private static final int __TXNID_ISSET_ID = 2; private byte __isset_bitfield = 0; - private static final _Fields optionals[] = {_Fields.TIME,_Fields.TXNID,_Fields.DBNAME,_Fields.TABLENAME,_Fields.PARTITIONNAME}; + private static final _Fields optionals[] = {_Fields.TIME,_Fields.TXNID,_Fields.DBNAME,_Fields.TABLENAME,_Fields.PARTITIONNAME,_Fields.CAT_NAME}; public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; static { Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); @@ -152,6 +157,8 @@ public String getFieldName() { new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING))); tmpMap.put(_Fields.PARTITIONNAME, new org.apache.thrift.meta_data.FieldMetaData("partitionname", org.apache.thrift.TFieldRequirementType.OPTIONAL, new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING))); + tmpMap.put(_Fields.CAT_NAME, new org.apache.thrift.meta_data.FieldMetaData("catName", org.apache.thrift.TFieldRequirementType.OPTIONAL, + new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING))); metaDataMap = Collections.unmodifiableMap(tmpMap); org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(BasicTxnInfo.class, metaDataMap); } @@ -184,6 +191,9 @@ public BasicTxnInfo(BasicTxnInfo other) { if (other.isSetPartitionname()) { this.partitionname = other.partitionname; } + if (other.isSetCatName()) { + this.catName = other.catName; + } } public BasicTxnInfo deepCopy() { @@ -201,6 +211,7 @@ public void clear() { this.dbname = null; this.tablename = null; this.partitionname = null; + this.catName = null; } public boolean isIsnull() { @@ -338,6 +349,29 @@ public void setPartitionnameIsSet(boolean value) { } } + public String getCatName() { + return this.catName; + } + + public void setCatName(String catName) { + this.catName = catName; + } + + public void unsetCatName() { + this.catName = null; + } + + /** Returns true if field catName is set (has been assigned a value) and false otherwise */ + public boolean isSetCatName() { + return this.catName != null; + } + + public void setCatNameIsSet(boolean value) { + if (!value) { + this.catName = null; + } + } + public void setFieldValue(_Fields field, Object value) { switch (field) { case ISNULL: @@ -388,6 +422,14 @@ public void setFieldValue(_Fields field, Object value) { } break; + case CAT_NAME: + if (value == null) { + unsetCatName(); + } else { + setCatName((String)value); + } + break; + } } @@ -411,6 +453,9 @@ public Object getFieldValue(_Fields field) { case PARTITIONNAME: return getPartitionname(); + case CAT_NAME: + return getCatName(); + } throw new IllegalStateException(); } @@ -434,6 +479,8 @@ public boolean isSet(_Fields field) { return isSetTablename(); case PARTITIONNAME: return isSetPartitionname(); + case CAT_NAME: + return isSetCatName(); } throw new IllegalStateException(); } @@ -505,6 +552,15 @@ public boolean equals(BasicTxnInfo that) { return false; } + boolean this_present_catName = true && this.isSetCatName(); + boolean that_present_catName = true && that.isSetCatName(); + if (this_present_catName || that_present_catName) { + if (!(this_present_catName && that_present_catName)) + return false; + if (!this.catName.equals(that.catName)) + return false; + } + return true; } @@ -542,6 +598,11 @@ public int hashCode() { if (present_partitionname) list.add(partitionname); + boolean present_catName = true && (isSetCatName()); + list.add(present_catName); + if (present_catName) + list.add(catName); + return list.hashCode(); } @@ -613,6 +674,16 @@ public int compareTo(BasicTxnInfo other) { return lastComparison; } } + lastComparison = Boolean.valueOf(isSetCatName()).compareTo(other.isSetCatName()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetCatName()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.catName, other.catName); + if (lastComparison != 0) { + return lastComparison; + } + } return 0; } @@ -678,6 +749,16 @@ public String toString() { } first = false; } + if (isSetCatName()) { + if (!first) sb.append(", "); + sb.append("catName:"); + if (this.catName == null) { + sb.append("null"); + } else { + sb.append(this.catName); + } + first = false; + } sb.append(")"); return sb.toString(); } @@ -775,6 +856,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, BasicTxnInfo struct org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); } break; + case 7: // CAT_NAME + if (schemeField.type == org.apache.thrift.protocol.TType.STRING) { + struct.catName = iprot.readString(); + struct.setCatNameIsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + break; default: org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); } @@ -822,6 +911,13 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, BasicTxnInfo struc oprot.writeFieldEnd(); } } + if (struct.catName != null) { + if (struct.isSetCatName()) { + oprot.writeFieldBegin(CAT_NAME_FIELD_DESC); + oprot.writeString(struct.catName); + oprot.writeFieldEnd(); + } + } oprot.writeFieldStop(); oprot.writeStructEnd(); } @@ -856,7 +952,10 @@ public void write(org.apache.thrift.protocol.TProtocol prot, BasicTxnInfo struct if (struct.isSetPartitionname()) { optionals.set(4); } - oprot.writeBitSet(optionals, 5); + if (struct.isSetCatName()) { + optionals.set(5); + } + oprot.writeBitSet(optionals, 6); if (struct.isSetTime()) { oprot.writeI64(struct.time); } @@ -872,6 +971,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, BasicTxnInfo struct if (struct.isSetPartitionname()) { oprot.writeString(struct.partitionname); } + if (struct.isSetCatName()) { + oprot.writeString(struct.catName); + } } @Override @@ -879,7 +981,7 @@ public void read(org.apache.thrift.protocol.TProtocol prot, BasicTxnInfo struct) TTupleProtocol iprot = (TTupleProtocol) prot; struct.isnull = iprot.readBool(); struct.setIsnullIsSet(true); - BitSet incoming = iprot.readBitSet(5); + BitSet incoming = iprot.readBitSet(6); if (incoming.get(0)) { struct.time = iprot.readI64(); struct.setTimeIsSet(true); @@ -900,6 +1002,10 @@ public void read(org.apache.thrift.protocol.TProtocol prot, BasicTxnInfo struct) struct.partitionname = iprot.readString(); struct.setPartitionnameIsSet(true); } + if (incoming.get(5)) { + struct.catName = iprot.readString(); + struct.setCatNameIsSet(true); + } } } diff --git standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/CompactionRequest.java standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/CompactionRequest.java index 31f2e144a9..4d82379d61 100644 --- standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/CompactionRequest.java +++ standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/CompactionRequest.java @@ -44,6 +44,7 @@ private static final org.apache.thrift.protocol.TField TYPE_FIELD_DESC = new org.apache.thrift.protocol.TField("type", org.apache.thrift.protocol.TType.I32, (short)4); private static final org.apache.thrift.protocol.TField RUNAS_FIELD_DESC = new org.apache.thrift.protocol.TField("runas", org.apache.thrift.protocol.TType.STRING, (short)5); private static final org.apache.thrift.protocol.TField PROPERTIES_FIELD_DESC = new org.apache.thrift.protocol.TField("properties", org.apache.thrift.protocol.TType.MAP, (short)6); + private static final org.apache.thrift.protocol.TField CAT_NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("catName", org.apache.thrift.protocol.TType.STRING, (short)7); private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); static { @@ -57,6 +58,7 @@ private CompactionType type; // required private String runas; // optional private Map properties; // optional + private String catName; // optional /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ public enum _Fields implements org.apache.thrift.TFieldIdEnum { @@ -69,7 +71,8 @@ */ TYPE((short)4, "type"), RUNAS((short)5, "runas"), - PROPERTIES((short)6, "properties"); + PROPERTIES((short)6, "properties"), + CAT_NAME((short)7, "catName"); private static final Map byName = new HashMap(); @@ -96,6 +99,8 @@ public static _Fields findByThriftId(int fieldId) { return RUNAS; case 6: // PROPERTIES return PROPERTIES; + case 7: // CAT_NAME + return CAT_NAME; default: return null; } @@ -136,7 +141,7 @@ public String getFieldName() { } // isset id assignments - private static final _Fields optionals[] = {_Fields.PARTITIONNAME,_Fields.RUNAS,_Fields.PROPERTIES}; + private static final _Fields optionals[] = {_Fields.PARTITIONNAME,_Fields.RUNAS,_Fields.PROPERTIES,_Fields.CAT_NAME}; public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; static { Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); @@ -154,6 +159,8 @@ public String getFieldName() { new org.apache.thrift.meta_data.MapMetaData(org.apache.thrift.protocol.TType.MAP, new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING), new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING)))); + tmpMap.put(_Fields.CAT_NAME, new org.apache.thrift.meta_data.FieldMetaData("catName", org.apache.thrift.TFieldRequirementType.OPTIONAL, + new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING))); metaDataMap = Collections.unmodifiableMap(tmpMap); org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(CompactionRequest.class, metaDataMap); } @@ -195,6 +202,9 @@ public CompactionRequest(CompactionRequest other) { Map __this__properties = new HashMap(other.properties); this.properties = __this__properties; } + if (other.isSetCatName()) { + this.catName = other.catName; + } } public CompactionRequest deepCopy() { @@ -209,6 +219,7 @@ public void clear() { this.type = null; this.runas = null; this.properties = null; + this.catName = null; } public String getDbname() { @@ -368,6 +379,29 @@ public void setPropertiesIsSet(boolean value) { } } + public String getCatName() { + return this.catName; + } + + public void setCatName(String catName) { + this.catName = catName; + } + + public void unsetCatName() { + this.catName = null; + } + + /** Returns true if field catName is set (has been assigned a value) and false otherwise */ + public boolean isSetCatName() { + return this.catName != null; + } + + public void setCatNameIsSet(boolean value) { + if (!value) { + this.catName = null; + } + } + public void setFieldValue(_Fields field, Object value) { switch (field) { case DBNAME: @@ -418,6 +452,14 @@ public void setFieldValue(_Fields field, Object value) { } break; + case CAT_NAME: + if (value == null) { + unsetCatName(); + } else { + setCatName((String)value); + } + break; + } } @@ -441,6 +483,9 @@ public Object getFieldValue(_Fields field) { case PROPERTIES: return getProperties(); + case CAT_NAME: + return getCatName(); + } throw new IllegalStateException(); } @@ -464,6 +509,8 @@ public boolean isSet(_Fields field) { return isSetRunas(); case PROPERTIES: return isSetProperties(); + case CAT_NAME: + return isSetCatName(); } throw new IllegalStateException(); } @@ -535,6 +582,15 @@ public boolean equals(CompactionRequest that) { return false; } + boolean this_present_catName = true && this.isSetCatName(); + boolean that_present_catName = true && that.isSetCatName(); + if (this_present_catName || that_present_catName) { + if (!(this_present_catName && that_present_catName)) + return false; + if (!this.catName.equals(that.catName)) + return false; + } + return true; } @@ -572,6 +628,11 @@ public int hashCode() { if (present_properties) list.add(properties); + boolean present_catName = true && (isSetCatName()); + list.add(present_catName); + if (present_catName) + list.add(catName); + return list.hashCode(); } @@ -643,6 +704,16 @@ public int compareTo(CompactionRequest other) { return lastComparison; } } + lastComparison = Boolean.valueOf(isSetCatName()).compareTo(other.isSetCatName()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetCatName()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.catName, other.catName); + if (lastComparison != 0) { + return lastComparison; + } + } return 0; } @@ -716,6 +787,16 @@ public String toString() { } first = false; } + if (isSetCatName()) { + if (!first) sb.append(", "); + sb.append("catName:"); + if (this.catName == null) { + sb.append("null"); + } else { + sb.append(this.catName); + } + first = false; + } sb.append(")"); return sb.toString(); } @@ -831,6 +912,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, CompactionRequest s org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); } break; + case 7: // CAT_NAME + if (schemeField.type == org.apache.thrift.protocol.TType.STRING) { + struct.catName = iprot.readString(); + struct.setCatNameIsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + break; default: org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); } @@ -888,6 +977,13 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, CompactionRequest oprot.writeFieldEnd(); } } + if (struct.catName != null) { + if (struct.isSetCatName()) { + oprot.writeFieldBegin(CAT_NAME_FIELD_DESC); + oprot.writeString(struct.catName); + oprot.writeFieldEnd(); + } + } oprot.writeFieldStop(); oprot.writeStructEnd(); } @@ -918,7 +1014,10 @@ public void write(org.apache.thrift.protocol.TProtocol prot, CompactionRequest s if (struct.isSetProperties()) { optionals.set(2); } - oprot.writeBitSet(optionals, 3); + if (struct.isSetCatName()) { + optionals.set(3); + } + oprot.writeBitSet(optionals, 4); if (struct.isSetPartitionname()) { oprot.writeString(struct.partitionname); } @@ -935,6 +1034,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, CompactionRequest s } } } + if (struct.isSetCatName()) { + oprot.writeString(struct.catName); + } } @Override @@ -946,7 +1048,7 @@ public void read(org.apache.thrift.protocol.TProtocol prot, CompactionRequest st struct.setTablenameIsSet(true); struct.type = org.apache.hadoop.hive.metastore.api.CompactionType.findByValue(iprot.readI32()); struct.setTypeIsSet(true); - BitSet incoming = iprot.readBitSet(3); + BitSet incoming = iprot.readBitSet(4); if (incoming.get(0)) { struct.partitionname = iprot.readString(); struct.setPartitionnameIsSet(true); @@ -970,6 +1072,10 @@ public void read(org.apache.thrift.protocol.TProtocol prot, CompactionRequest st } struct.setPropertiesIsSet(true); } + if (incoming.get(3)) { + struct.catName = iprot.readString(); + struct.setCatNameIsSet(true); + } } } diff --git standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/LockComponent.java standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/LockComponent.java index 77de5c9cf8..6fe7f24edb 100644 --- standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/LockComponent.java +++ standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/LockComponent.java @@ -46,6 +46,7 @@ private static final org.apache.thrift.protocol.TField OPERATION_TYPE_FIELD_DESC = new org.apache.thrift.protocol.TField("operationType", org.apache.thrift.protocol.TType.I32, (short)6); private static final org.apache.thrift.protocol.TField IS_TRANSACTIONAL_FIELD_DESC = new org.apache.thrift.protocol.TField("isTransactional", org.apache.thrift.protocol.TType.BOOL, (short)7); private static final org.apache.thrift.protocol.TField IS_DYNAMIC_PARTITION_WRITE_FIELD_DESC = new org.apache.thrift.protocol.TField("isDynamicPartitionWrite", org.apache.thrift.protocol.TType.BOOL, (short)8); + private static final org.apache.thrift.protocol.TField CAT_NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("catName", org.apache.thrift.protocol.TType.STRING, (short)9); private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); static { @@ -61,6 +62,7 @@ private DataOperationType operationType; // optional private boolean isTransactional; // optional private boolean isDynamicPartitionWrite; // optional + private String catName; // optional /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ public enum _Fields implements org.apache.thrift.TFieldIdEnum { @@ -83,7 +85,8 @@ */ OPERATION_TYPE((short)6, "operationType"), IS_TRANSACTIONAL((short)7, "isTransactional"), - IS_DYNAMIC_PARTITION_WRITE((short)8, "isDynamicPartitionWrite"); + IS_DYNAMIC_PARTITION_WRITE((short)8, "isDynamicPartitionWrite"), + CAT_NAME((short)9, "catName"); private static final Map byName = new HashMap(); @@ -114,6 +117,8 @@ public static _Fields findByThriftId(int fieldId) { return IS_TRANSACTIONAL; case 8: // IS_DYNAMIC_PARTITION_WRITE return IS_DYNAMIC_PARTITION_WRITE; + case 9: // CAT_NAME + return CAT_NAME; default: return null; } @@ -157,7 +162,7 @@ public String getFieldName() { private static final int __ISTRANSACTIONAL_ISSET_ID = 0; private static final int __ISDYNAMICPARTITIONWRITE_ISSET_ID = 1; private byte __isset_bitfield = 0; - private static final _Fields optionals[] = {_Fields.TABLENAME,_Fields.PARTITIONNAME,_Fields.OPERATION_TYPE,_Fields.IS_TRANSACTIONAL,_Fields.IS_DYNAMIC_PARTITION_WRITE}; + private static final _Fields optionals[] = {_Fields.TABLENAME,_Fields.PARTITIONNAME,_Fields.OPERATION_TYPE,_Fields.IS_TRANSACTIONAL,_Fields.IS_DYNAMIC_PARTITION_WRITE,_Fields.CAT_NAME}; public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; static { Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); @@ -177,6 +182,8 @@ public String getFieldName() { new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.BOOL))); tmpMap.put(_Fields.IS_DYNAMIC_PARTITION_WRITE, new org.apache.thrift.meta_data.FieldMetaData("isDynamicPartitionWrite", org.apache.thrift.TFieldRequirementType.OPTIONAL, new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.BOOL))); + tmpMap.put(_Fields.CAT_NAME, new org.apache.thrift.meta_data.FieldMetaData("catName", org.apache.thrift.TFieldRequirementType.OPTIONAL, + new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING))); metaDataMap = Collections.unmodifiableMap(tmpMap); org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(LockComponent.class, metaDataMap); } @@ -226,6 +233,9 @@ public LockComponent(LockComponent other) { } this.isTransactional = other.isTransactional; this.isDynamicPartitionWrite = other.isDynamicPartitionWrite; + if (other.isSetCatName()) { + this.catName = other.catName; + } } public LockComponent deepCopy() { @@ -245,6 +255,7 @@ public void clear() { this.isDynamicPartitionWrite = false; + this.catName = null; } /** @@ -453,6 +464,29 @@ public void setIsDynamicPartitionWriteIsSet(boolean value) { __isset_bitfield = EncodingUtils.setBit(__isset_bitfield, __ISDYNAMICPARTITIONWRITE_ISSET_ID, value); } + public String getCatName() { + return this.catName; + } + + public void setCatName(String catName) { + this.catName = catName; + } + + public void unsetCatName() { + this.catName = null; + } + + /** Returns true if field catName is set (has been assigned a value) and false otherwise */ + public boolean isSetCatName() { + return this.catName != null; + } + + public void setCatNameIsSet(boolean value) { + if (!value) { + this.catName = null; + } + } + public void setFieldValue(_Fields field, Object value) { switch (field) { case TYPE: @@ -519,6 +553,14 @@ public void setFieldValue(_Fields field, Object value) { } break; + case CAT_NAME: + if (value == null) { + unsetCatName(); + } else { + setCatName((String)value); + } + break; + } } @@ -548,6 +590,9 @@ public Object getFieldValue(_Fields field) { case IS_DYNAMIC_PARTITION_WRITE: return isIsDynamicPartitionWrite(); + case CAT_NAME: + return getCatName(); + } throw new IllegalStateException(); } @@ -575,6 +620,8 @@ public boolean isSet(_Fields field) { return isSetIsTransactional(); case IS_DYNAMIC_PARTITION_WRITE: return isSetIsDynamicPartitionWrite(); + case CAT_NAME: + return isSetCatName(); } throw new IllegalStateException(); } @@ -664,6 +711,15 @@ public boolean equals(LockComponent that) { return false; } + boolean this_present_catName = true && this.isSetCatName(); + boolean that_present_catName = true && that.isSetCatName(); + if (this_present_catName || that_present_catName) { + if (!(this_present_catName && that_present_catName)) + return false; + if (!this.catName.equals(that.catName)) + return false; + } + return true; } @@ -711,6 +767,11 @@ public int hashCode() { if (present_isDynamicPartitionWrite) list.add(isDynamicPartitionWrite); + boolean present_catName = true && (isSetCatName()); + list.add(present_catName); + if (present_catName) + list.add(catName); + return list.hashCode(); } @@ -802,6 +863,16 @@ public int compareTo(LockComponent other) { return lastComparison; } } + lastComparison = Boolean.valueOf(isSetCatName()).compareTo(other.isSetCatName()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetCatName()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.catName, other.catName); + if (lastComparison != 0) { + return lastComparison; + } + } return 0; } @@ -887,6 +958,16 @@ public String toString() { sb.append(this.isDynamicPartitionWrite); first = false; } + if (isSetCatName()) { + if (!first) sb.append(", "); + sb.append("catName:"); + if (this.catName == null) { + sb.append("null"); + } else { + sb.append(this.catName); + } + first = false; + } sb.append(")"); return sb.toString(); } @@ -1008,6 +1089,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, LockComponent struc org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); } break; + case 9: // CAT_NAME + if (schemeField.type == org.apache.thrift.protocol.TType.STRING) { + struct.catName = iprot.readString(); + struct.setCatNameIsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + break; default: org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); } @@ -1067,6 +1156,13 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, LockComponent stru oprot.writeBool(struct.isDynamicPartitionWrite); oprot.writeFieldEnd(); } + if (struct.catName != null) { + if (struct.isSetCatName()) { + oprot.writeFieldBegin(CAT_NAME_FIELD_DESC); + oprot.writeString(struct.catName); + oprot.writeFieldEnd(); + } + } oprot.writeFieldStop(); oprot.writeStructEnd(); } @@ -1103,7 +1199,10 @@ public void write(org.apache.thrift.protocol.TProtocol prot, LockComponent struc if (struct.isSetIsDynamicPartitionWrite()) { optionals.set(4); } - oprot.writeBitSet(optionals, 5); + if (struct.isSetCatName()) { + optionals.set(5); + } + oprot.writeBitSet(optionals, 6); if (struct.isSetTablename()) { oprot.writeString(struct.tablename); } @@ -1119,6 +1218,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, LockComponent struc if (struct.isSetIsDynamicPartitionWrite()) { oprot.writeBool(struct.isDynamicPartitionWrite); } + if (struct.isSetCatName()) { + oprot.writeString(struct.catName); + } } @Override @@ -1130,7 +1232,7 @@ public void read(org.apache.thrift.protocol.TProtocol prot, LockComponent struct struct.setLevelIsSet(true); struct.dbname = iprot.readString(); struct.setDbnameIsSet(true); - BitSet incoming = iprot.readBitSet(5); + BitSet incoming = iprot.readBitSet(6); if (incoming.get(0)) { struct.tablename = iprot.readString(); struct.setTablenameIsSet(true); @@ -1151,6 +1253,10 @@ public void read(org.apache.thrift.protocol.TProtocol prot, LockComponent struct struct.isDynamicPartitionWrite = iprot.readBool(); struct.setIsDynamicPartitionWriteIsSet(true); } + if (incoming.get(5)) { + struct.catName = iprot.readString(); + struct.setCatNameIsSet(true); + } } } diff --git standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/LockRequest.java standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/LockRequest.java index d0dc21c319..ef53fa37a3 100644 --- standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/LockRequest.java +++ standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/LockRequest.java @@ -43,6 +43,7 @@ private static final org.apache.thrift.protocol.TField USER_FIELD_DESC = new org.apache.thrift.protocol.TField("user", org.apache.thrift.protocol.TType.STRING, (short)3); private static final org.apache.thrift.protocol.TField HOSTNAME_FIELD_DESC = new org.apache.thrift.protocol.TField("hostname", org.apache.thrift.protocol.TType.STRING, (short)4); private static final org.apache.thrift.protocol.TField AGENT_INFO_FIELD_DESC = new org.apache.thrift.protocol.TField("agentInfo", org.apache.thrift.protocol.TType.STRING, (short)5); + private static final org.apache.thrift.protocol.TField CAT_NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("catName", org.apache.thrift.protocol.TType.STRING, (short)6); private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); static { @@ -55,6 +56,7 @@ private String user; // required private String hostname; // required private String agentInfo; // optional + private String catName; // optional /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ public enum _Fields implements org.apache.thrift.TFieldIdEnum { @@ -62,7 +64,8 @@ TXNID((short)2, "txnid"), USER((short)3, "user"), HOSTNAME((short)4, "hostname"), - AGENT_INFO((short)5, "agentInfo"); + AGENT_INFO((short)5, "agentInfo"), + CAT_NAME((short)6, "catName"); private static final Map byName = new HashMap(); @@ -87,6 +90,8 @@ public static _Fields findByThriftId(int fieldId) { return HOSTNAME; case 5: // AGENT_INFO return AGENT_INFO; + case 6: // CAT_NAME + return CAT_NAME; default: return null; } @@ -129,7 +134,7 @@ public String getFieldName() { // isset id assignments private static final int __TXNID_ISSET_ID = 0; private byte __isset_bitfield = 0; - private static final _Fields optionals[] = {_Fields.TXNID,_Fields.AGENT_INFO}; + private static final _Fields optionals[] = {_Fields.TXNID,_Fields.AGENT_INFO,_Fields.CAT_NAME}; public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; static { Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); @@ -144,6 +149,8 @@ public String getFieldName() { new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING))); tmpMap.put(_Fields.AGENT_INFO, new org.apache.thrift.meta_data.FieldMetaData("agentInfo", org.apache.thrift.TFieldRequirementType.OPTIONAL, new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING))); + tmpMap.put(_Fields.CAT_NAME, new org.apache.thrift.meta_data.FieldMetaData("catName", org.apache.thrift.TFieldRequirementType.OPTIONAL, + new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING))); metaDataMap = Collections.unmodifiableMap(tmpMap); org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(LockRequest.class, metaDataMap); } @@ -186,6 +193,9 @@ public LockRequest(LockRequest other) { if (other.isSetAgentInfo()) { this.agentInfo = other.agentInfo; } + if (other.isSetCatName()) { + this.catName = other.catName; + } } public LockRequest deepCopy() { @@ -201,6 +211,7 @@ public void clear() { this.hostname = null; this.agentInfo = "Unknown"; + this.catName = null; } public int getComponentSize() { @@ -332,6 +343,29 @@ public void setAgentInfoIsSet(boolean value) { } } + public String getCatName() { + return this.catName; + } + + public void setCatName(String catName) { + this.catName = catName; + } + + public void unsetCatName() { + this.catName = null; + } + + /** Returns true if field catName is set (has been assigned a value) and false otherwise */ + public boolean isSetCatName() { + return this.catName != null; + } + + public void setCatNameIsSet(boolean value) { + if (!value) { + this.catName = null; + } + } + public void setFieldValue(_Fields field, Object value) { switch (field) { case COMPONENT: @@ -374,6 +408,14 @@ public void setFieldValue(_Fields field, Object value) { } break; + case CAT_NAME: + if (value == null) { + unsetCatName(); + } else { + setCatName((String)value); + } + break; + } } @@ -394,6 +436,9 @@ public Object getFieldValue(_Fields field) { case AGENT_INFO: return getAgentInfo(); + case CAT_NAME: + return getCatName(); + } throw new IllegalStateException(); } @@ -415,6 +460,8 @@ public boolean isSet(_Fields field) { return isSetHostname(); case AGENT_INFO: return isSetAgentInfo(); + case CAT_NAME: + return isSetCatName(); } throw new IllegalStateException(); } @@ -477,6 +524,15 @@ public boolean equals(LockRequest that) { return false; } + boolean this_present_catName = true && this.isSetCatName(); + boolean that_present_catName = true && that.isSetCatName(); + if (this_present_catName || that_present_catName) { + if (!(this_present_catName && that_present_catName)) + return false; + if (!this.catName.equals(that.catName)) + return false; + } + return true; } @@ -509,6 +565,11 @@ public int hashCode() { if (present_agentInfo) list.add(agentInfo); + boolean present_catName = true && (isSetCatName()); + list.add(present_catName); + if (present_catName) + list.add(catName); + return list.hashCode(); } @@ -570,6 +631,16 @@ public int compareTo(LockRequest other) { return lastComparison; } } + lastComparison = Boolean.valueOf(isSetCatName()).compareTo(other.isSetCatName()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetCatName()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.catName, other.catName); + if (lastComparison != 0) { + return lastComparison; + } + } return 0; } @@ -629,6 +700,16 @@ public String toString() { } first = false; } + if (isSetCatName()) { + if (!first) sb.append(", "); + sb.append("catName:"); + if (this.catName == null) { + sb.append("null"); + } else { + sb.append(this.catName); + } + first = false; + } sb.append(")"); return sb.toString(); } @@ -737,6 +818,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, LockRequest struct) org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); } break; + case 6: // CAT_NAME + if (schemeField.type == org.apache.thrift.protocol.TType.STRING) { + struct.catName = iprot.readString(); + struct.setCatNameIsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + break; default: org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); } @@ -784,6 +873,13 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, LockRequest struct oprot.writeFieldEnd(); } } + if (struct.catName != null) { + if (struct.isSetCatName()) { + oprot.writeFieldBegin(CAT_NAME_FIELD_DESC); + oprot.writeString(struct.catName); + oprot.writeFieldEnd(); + } + } oprot.writeFieldStop(); oprot.writeStructEnd(); } @@ -817,13 +913,19 @@ public void write(org.apache.thrift.protocol.TProtocol prot, LockRequest struct) if (struct.isSetAgentInfo()) { optionals.set(1); } - oprot.writeBitSet(optionals, 2); + if (struct.isSetCatName()) { + optionals.set(2); + } + oprot.writeBitSet(optionals, 3); if (struct.isSetTxnid()) { oprot.writeI64(struct.txnid); } if (struct.isSetAgentInfo()) { oprot.writeString(struct.agentInfo); } + if (struct.isSetCatName()) { + oprot.writeString(struct.catName); + } } @Override @@ -845,7 +947,7 @@ public void read(org.apache.thrift.protocol.TProtocol prot, LockRequest struct) struct.setUserIsSet(true); struct.hostname = iprot.readString(); struct.setHostnameIsSet(true); - BitSet incoming = iprot.readBitSet(2); + BitSet incoming = iprot.readBitSet(3); if (incoming.get(0)) { struct.txnid = iprot.readI64(); struct.setTxnidIsSet(true); @@ -854,6 +956,10 @@ public void read(org.apache.thrift.protocol.TProtocol prot, LockRequest struct) struct.agentInfo = iprot.readString(); struct.setAgentInfoIsSet(true); } + if (incoming.get(2)) { + struct.catName = iprot.readString(); + struct.setCatNameIsSet(true); + } } } diff --git standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ReplTblWriteIdStateRequest.java standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ReplTblWriteIdStateRequest.java index 97bb8a47ac..4bc0169af2 100644 --- standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ReplTblWriteIdStateRequest.java +++ standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ReplTblWriteIdStateRequest.java @@ -44,6 +44,7 @@ private static final org.apache.thrift.protocol.TField DB_NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("dbName", org.apache.thrift.protocol.TType.STRING, (short)4); private static final org.apache.thrift.protocol.TField TABLE_NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("tableName", org.apache.thrift.protocol.TType.STRING, (short)5); private static final org.apache.thrift.protocol.TField PART_NAMES_FIELD_DESC = new org.apache.thrift.protocol.TField("partNames", org.apache.thrift.protocol.TType.LIST, (short)6); + private static final org.apache.thrift.protocol.TField CAT_NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("catName", org.apache.thrift.protocol.TType.STRING, (short)7); private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); static { @@ -57,6 +58,7 @@ private String dbName; // required private String tableName; // required private List partNames; // optional + private String catName; // optional /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ public enum _Fields implements org.apache.thrift.TFieldIdEnum { @@ -65,7 +67,8 @@ HOST_NAME((short)3, "hostName"), DB_NAME((short)4, "dbName"), TABLE_NAME((short)5, "tableName"), - PART_NAMES((short)6, "partNames"); + PART_NAMES((short)6, "partNames"), + CAT_NAME((short)7, "catName"); private static final Map byName = new HashMap(); @@ -92,6 +95,8 @@ public static _Fields findByThriftId(int fieldId) { return TABLE_NAME; case 6: // PART_NAMES return PART_NAMES; + case 7: // CAT_NAME + return CAT_NAME; default: return null; } @@ -132,7 +137,7 @@ public String getFieldName() { } // isset id assignments - private static final _Fields optionals[] = {_Fields.PART_NAMES}; + private static final _Fields optionals[] = {_Fields.PART_NAMES,_Fields.CAT_NAME}; public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; static { Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); @@ -149,6 +154,8 @@ public String getFieldName() { tmpMap.put(_Fields.PART_NAMES, new org.apache.thrift.meta_data.FieldMetaData("partNames", org.apache.thrift.TFieldRequirementType.OPTIONAL, new org.apache.thrift.meta_data.ListMetaData(org.apache.thrift.protocol.TType.LIST, new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING)))); + tmpMap.put(_Fields.CAT_NAME, new org.apache.thrift.meta_data.FieldMetaData("catName", org.apache.thrift.TFieldRequirementType.OPTIONAL, + new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING))); metaDataMap = Collections.unmodifiableMap(tmpMap); org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(ReplTblWriteIdStateRequest.class, metaDataMap); } @@ -194,6 +201,9 @@ public ReplTblWriteIdStateRequest(ReplTblWriteIdStateRequest other) { List __this__partNames = new ArrayList(other.partNames); this.partNames = __this__partNames; } + if (other.isSetCatName()) { + this.catName = other.catName; + } } public ReplTblWriteIdStateRequest deepCopy() { @@ -208,6 +218,7 @@ public void clear() { this.dbName = null; this.tableName = null; this.partNames = null; + this.catName = null; } public String getValidWriteIdlist() { @@ -363,6 +374,29 @@ public void setPartNamesIsSet(boolean value) { } } + public String getCatName() { + return this.catName; + } + + public void setCatName(String catName) { + this.catName = catName; + } + + public void unsetCatName() { + this.catName = null; + } + + /** Returns true if field catName is set (has been assigned a value) and false otherwise */ + public boolean isSetCatName() { + return this.catName != null; + } + + public void setCatNameIsSet(boolean value) { + if (!value) { + this.catName = null; + } + } + public void setFieldValue(_Fields field, Object value) { switch (field) { case VALID_WRITE_IDLIST: @@ -413,6 +447,14 @@ public void setFieldValue(_Fields field, Object value) { } break; + case CAT_NAME: + if (value == null) { + unsetCatName(); + } else { + setCatName((String)value); + } + break; + } } @@ -436,6 +478,9 @@ public Object getFieldValue(_Fields field) { case PART_NAMES: return getPartNames(); + case CAT_NAME: + return getCatName(); + } throw new IllegalStateException(); } @@ -459,6 +504,8 @@ public boolean isSet(_Fields field) { return isSetTableName(); case PART_NAMES: return isSetPartNames(); + case CAT_NAME: + return isSetCatName(); } throw new IllegalStateException(); } @@ -530,6 +577,15 @@ public boolean equals(ReplTblWriteIdStateRequest that) { return false; } + boolean this_present_catName = true && this.isSetCatName(); + boolean that_present_catName = true && that.isSetCatName(); + if (this_present_catName || that_present_catName) { + if (!(this_present_catName && that_present_catName)) + return false; + if (!this.catName.equals(that.catName)) + return false; + } + return true; } @@ -567,6 +623,11 @@ public int hashCode() { if (present_partNames) list.add(partNames); + boolean present_catName = true && (isSetCatName()); + list.add(present_catName); + if (present_catName) + list.add(catName); + return list.hashCode(); } @@ -638,6 +699,16 @@ public int compareTo(ReplTblWriteIdStateRequest other) { return lastComparison; } } + lastComparison = Boolean.valueOf(isSetCatName()).compareTo(other.isSetCatName()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetCatName()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.catName, other.catName); + if (lastComparison != 0) { + return lastComparison; + } + } return 0; } @@ -707,6 +778,16 @@ public String toString() { } first = false; } + if (isSetCatName()) { + if (!first) sb.append(", "); + sb.append("catName:"); + if (this.catName == null) { + sb.append("null"); + } else { + sb.append(this.catName); + } + first = false; + } sb.append(")"); return sb.toString(); } @@ -828,6 +909,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, ReplTblWriteIdState org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); } break; + case 7: // CAT_NAME + if (schemeField.type == org.apache.thrift.protocol.TType.STRING) { + struct.catName = iprot.readString(); + struct.setCatNameIsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + break; default: org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); } @@ -880,6 +969,13 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, ReplTblWriteIdStat oprot.writeFieldEnd(); } } + if (struct.catName != null) { + if (struct.isSetCatName()) { + oprot.writeFieldBegin(CAT_NAME_FIELD_DESC); + oprot.writeString(struct.catName); + oprot.writeFieldEnd(); + } + } oprot.writeFieldStop(); oprot.writeStructEnd(); } @@ -906,7 +1002,10 @@ public void write(org.apache.thrift.protocol.TProtocol prot, ReplTblWriteIdState if (struct.isSetPartNames()) { optionals.set(0); } - oprot.writeBitSet(optionals, 1); + if (struct.isSetCatName()) { + optionals.set(1); + } + oprot.writeBitSet(optionals, 2); if (struct.isSetPartNames()) { { oprot.writeI32(struct.partNames.size()); @@ -916,6 +1015,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, ReplTblWriteIdState } } } + if (struct.isSetCatName()) { + oprot.writeString(struct.catName); + } } @Override @@ -931,7 +1033,7 @@ public void read(org.apache.thrift.protocol.TProtocol prot, ReplTblWriteIdStateR struct.setDbNameIsSet(true); struct.tableName = iprot.readString(); struct.setTableNameIsSet(true); - BitSet incoming = iprot.readBitSet(1); + BitSet incoming = iprot.readBitSet(2); if (incoming.get(0)) { { org.apache.thrift.protocol.TList _list599 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); @@ -945,6 +1047,10 @@ public void read(org.apache.thrift.protocol.TProtocol prot, ReplTblWriteIdStateR } struct.setPartNamesIsSet(true); } + if (incoming.get(1)) { + struct.catName = iprot.readString(); + struct.setCatNameIsSet(true); + } } } diff --git standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ShowCompactResponseElement.java standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ShowCompactResponseElement.java index 8a5682a013..e6cad2205c 100644 --- standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ShowCompactResponseElement.java +++ standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ShowCompactResponseElement.java @@ -51,6 +51,7 @@ private static final org.apache.thrift.protocol.TField END_TIME_FIELD_DESC = new org.apache.thrift.protocol.TField("endTime", org.apache.thrift.protocol.TType.I64, (short)11); private static final org.apache.thrift.protocol.TField HADOOP_JOB_ID_FIELD_DESC = new org.apache.thrift.protocol.TField("hadoopJobId", org.apache.thrift.protocol.TType.STRING, (short)12); private static final org.apache.thrift.protocol.TField ID_FIELD_DESC = new org.apache.thrift.protocol.TField("id", org.apache.thrift.protocol.TType.I64, (short)13); + private static final org.apache.thrift.protocol.TField CAT_NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("catName", org.apache.thrift.protocol.TType.STRING, (short)14); private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); static { @@ -71,6 +72,7 @@ private long endTime; // optional private String hadoopJobId; // optional private long id; // optional + private String catName; // optional /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ public enum _Fields implements org.apache.thrift.TFieldIdEnum { @@ -90,7 +92,8 @@ META_INFO((short)10, "metaInfo"), END_TIME((short)11, "endTime"), HADOOP_JOB_ID((short)12, "hadoopJobId"), - ID((short)13, "id"); + ID((short)13, "id"), + CAT_NAME((short)14, "catName"); private static final Map byName = new HashMap(); @@ -131,6 +134,8 @@ public static _Fields findByThriftId(int fieldId) { return HADOOP_JOB_ID; case 13: // ID return ID; + case 14: // CAT_NAME + return CAT_NAME; default: return null; } @@ -176,7 +181,7 @@ public String getFieldName() { private static final int __ENDTIME_ISSET_ID = 2; private static final int __ID_ISSET_ID = 3; private byte __isset_bitfield = 0; - private static final _Fields optionals[] = {_Fields.PARTITIONNAME,_Fields.WORKERID,_Fields.START,_Fields.RUN_AS,_Fields.HIGHTEST_TXN_ID,_Fields.META_INFO,_Fields.END_TIME,_Fields.HADOOP_JOB_ID,_Fields.ID}; + private static final _Fields optionals[] = {_Fields.PARTITIONNAME,_Fields.WORKERID,_Fields.START,_Fields.RUN_AS,_Fields.HIGHTEST_TXN_ID,_Fields.META_INFO,_Fields.END_TIME,_Fields.HADOOP_JOB_ID,_Fields.ID,_Fields.CAT_NAME}; public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; static { Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); @@ -206,6 +211,8 @@ public String getFieldName() { new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING))); tmpMap.put(_Fields.ID, new org.apache.thrift.meta_data.FieldMetaData("id", org.apache.thrift.TFieldRequirementType.OPTIONAL, new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.I64))); + tmpMap.put(_Fields.CAT_NAME, new org.apache.thrift.meta_data.FieldMetaData("catName", org.apache.thrift.TFieldRequirementType.OPTIONAL, + new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING))); metaDataMap = Collections.unmodifiableMap(tmpMap); org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(ShowCompactResponseElement.class, metaDataMap); } @@ -264,6 +271,9 @@ public ShowCompactResponseElement(ShowCompactResponseElement other) { this.hadoopJobId = other.hadoopJobId; } this.id = other.id; + if (other.isSetCatName()) { + this.catName = other.catName; + } } public ShowCompactResponseElement deepCopy() { @@ -290,6 +300,7 @@ public void clear() { setIdIsSet(false); this.id = 0; + this.catName = null; } public String getDbname() { @@ -595,6 +606,29 @@ public void setIdIsSet(boolean value) { __isset_bitfield = EncodingUtils.setBit(__isset_bitfield, __ID_ISSET_ID, value); } + public String getCatName() { + return this.catName; + } + + public void setCatName(String catName) { + this.catName = catName; + } + + public void unsetCatName() { + this.catName = null; + } + + /** Returns true if field catName is set (has been assigned a value) and false otherwise */ + public boolean isSetCatName() { + return this.catName != null; + } + + public void setCatNameIsSet(boolean value) { + if (!value) { + this.catName = null; + } + } + public void setFieldValue(_Fields field, Object value) { switch (field) { case DBNAME: @@ -701,6 +735,14 @@ public void setFieldValue(_Fields field, Object value) { } break; + case CAT_NAME: + if (value == null) { + unsetCatName(); + } else { + setCatName((String)value); + } + break; + } } @@ -745,6 +787,9 @@ public Object getFieldValue(_Fields field) { case ID: return getId(); + case CAT_NAME: + return getCatName(); + } throw new IllegalStateException(); } @@ -782,6 +827,8 @@ public boolean isSet(_Fields field) { return isSetHadoopJobId(); case ID: return isSetId(); + case CAT_NAME: + return isSetCatName(); } throw new IllegalStateException(); } @@ -916,6 +963,15 @@ public boolean equals(ShowCompactResponseElement that) { return false; } + boolean this_present_catName = true && this.isSetCatName(); + boolean that_present_catName = true && that.isSetCatName(); + if (this_present_catName || that_present_catName) { + if (!(this_present_catName && that_present_catName)) + return false; + if (!this.catName.equals(that.catName)) + return false; + } + return true; } @@ -988,6 +1044,11 @@ public int hashCode() { if (present_id) list.add(id); + boolean present_catName = true && (isSetCatName()); + list.add(present_catName); + if (present_catName) + list.add(catName); + return list.hashCode(); } @@ -1129,6 +1190,16 @@ public int compareTo(ShowCompactResponseElement other) { return lastComparison; } } + lastComparison = Boolean.valueOf(isSetCatName()).compareTo(other.isSetCatName()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetCatName()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.catName, other.catName); + if (lastComparison != 0) { + return lastComparison; + } + } return 0; } @@ -1254,6 +1325,16 @@ public String toString() { sb.append(this.id); first = false; } + if (isSetCatName()) { + if (!first) sb.append(", "); + sb.append("catName:"); + if (this.catName == null) { + sb.append("null"); + } else { + sb.append(this.catName); + } + first = false; + } sb.append(")"); return sb.toString(); } @@ -1419,6 +1500,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, ShowCompactResponse org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); } break; + case 14: // CAT_NAME + if (schemeField.type == org.apache.thrift.protocol.TType.STRING) { + struct.catName = iprot.readString(); + struct.setCatNameIsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + break; default: org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); } @@ -1507,6 +1596,13 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, ShowCompactRespons oprot.writeI64(struct.id); oprot.writeFieldEnd(); } + if (struct.catName != null) { + if (struct.isSetCatName()) { + oprot.writeFieldBegin(CAT_NAME_FIELD_DESC); + oprot.writeString(struct.catName); + oprot.writeFieldEnd(); + } + } oprot.writeFieldStop(); oprot.writeStructEnd(); } @@ -1556,7 +1652,10 @@ public void write(org.apache.thrift.protocol.TProtocol prot, ShowCompactResponse if (struct.isSetId()) { optionals.set(8); } - oprot.writeBitSet(optionals, 9); + if (struct.isSetCatName()) { + optionals.set(9); + } + oprot.writeBitSet(optionals, 10); if (struct.isSetPartitionname()) { oprot.writeString(struct.partitionname); } @@ -1584,6 +1683,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, ShowCompactResponse if (struct.isSetId()) { oprot.writeI64(struct.id); } + if (struct.isSetCatName()) { + oprot.writeString(struct.catName); + } } @Override @@ -1597,7 +1699,7 @@ public void read(org.apache.thrift.protocol.TProtocol prot, ShowCompactResponseE struct.setTypeIsSet(true); struct.state = iprot.readString(); struct.setStateIsSet(true); - BitSet incoming = iprot.readBitSet(9); + BitSet incoming = iprot.readBitSet(10); if (incoming.get(0)) { struct.partitionname = iprot.readString(); struct.setPartitionnameIsSet(true); @@ -1634,6 +1736,10 @@ public void read(org.apache.thrift.protocol.TProtocol prot, ShowCompactResponseE struct.id = iprot.readI64(); struct.setIdIsSet(true); } + if (incoming.get(9)) { + struct.catName = iprot.readString(); + struct.setCatNameIsSet(true); + } } } diff --git standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ShowLocksRequest.java standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ShowLocksRequest.java index 9fbab42e56..0abcceea09 100644 --- standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ShowLocksRequest.java +++ standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ShowLocksRequest.java @@ -42,6 +42,7 @@ private static final org.apache.thrift.protocol.TField TABLENAME_FIELD_DESC = new org.apache.thrift.protocol.TField("tablename", org.apache.thrift.protocol.TType.STRING, (short)2); private static final org.apache.thrift.protocol.TField PARTNAME_FIELD_DESC = new org.apache.thrift.protocol.TField("partname", org.apache.thrift.protocol.TType.STRING, (short)3); private static final org.apache.thrift.protocol.TField IS_EXTENDED_FIELD_DESC = new org.apache.thrift.protocol.TField("isExtended", org.apache.thrift.protocol.TType.BOOL, (short)4); + private static final org.apache.thrift.protocol.TField CAT_NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("catName", org.apache.thrift.protocol.TType.STRING, (short)5); private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); static { @@ -53,13 +54,15 @@ private String tablename; // optional private String partname; // optional private boolean isExtended; // optional + private String catName; // optional /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ public enum _Fields implements org.apache.thrift.TFieldIdEnum { DBNAME((short)1, "dbname"), TABLENAME((short)2, "tablename"), PARTNAME((short)3, "partname"), - IS_EXTENDED((short)4, "isExtended"); + IS_EXTENDED((short)4, "isExtended"), + CAT_NAME((short)5, "catName"); private static final Map byName = new HashMap(); @@ -82,6 +85,8 @@ public static _Fields findByThriftId(int fieldId) { return PARTNAME; case 4: // IS_EXTENDED return IS_EXTENDED; + case 5: // CAT_NAME + return CAT_NAME; default: return null; } @@ -124,7 +129,7 @@ public String getFieldName() { // isset id assignments private static final int __ISEXTENDED_ISSET_ID = 0; private byte __isset_bitfield = 0; - private static final _Fields optionals[] = {_Fields.DBNAME,_Fields.TABLENAME,_Fields.PARTNAME,_Fields.IS_EXTENDED}; + private static final _Fields optionals[] = {_Fields.DBNAME,_Fields.TABLENAME,_Fields.PARTNAME,_Fields.IS_EXTENDED,_Fields.CAT_NAME}; public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; static { Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); @@ -136,6 +141,8 @@ public String getFieldName() { new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING))); tmpMap.put(_Fields.IS_EXTENDED, new org.apache.thrift.meta_data.FieldMetaData("isExtended", org.apache.thrift.TFieldRequirementType.OPTIONAL, new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.BOOL))); + tmpMap.put(_Fields.CAT_NAME, new org.apache.thrift.meta_data.FieldMetaData("catName", org.apache.thrift.TFieldRequirementType.OPTIONAL, + new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING))); metaDataMap = Collections.unmodifiableMap(tmpMap); org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(ShowLocksRequest.class, metaDataMap); } @@ -160,6 +167,9 @@ public ShowLocksRequest(ShowLocksRequest other) { this.partname = other.partname; } this.isExtended = other.isExtended; + if (other.isSetCatName()) { + this.catName = other.catName; + } } public ShowLocksRequest deepCopy() { @@ -173,6 +183,7 @@ public void clear() { this.partname = null; this.isExtended = false; + this.catName = null; } public String getDbname() { @@ -266,6 +277,29 @@ public void setIsExtendedIsSet(boolean value) { __isset_bitfield = EncodingUtils.setBit(__isset_bitfield, __ISEXTENDED_ISSET_ID, value); } + public String getCatName() { + return this.catName; + } + + public void setCatName(String catName) { + this.catName = catName; + } + + public void unsetCatName() { + this.catName = null; + } + + /** Returns true if field catName is set (has been assigned a value) and false otherwise */ + public boolean isSetCatName() { + return this.catName != null; + } + + public void setCatNameIsSet(boolean value) { + if (!value) { + this.catName = null; + } + } + public void setFieldValue(_Fields field, Object value) { switch (field) { case DBNAME: @@ -300,6 +334,14 @@ public void setFieldValue(_Fields field, Object value) { } break; + case CAT_NAME: + if (value == null) { + unsetCatName(); + } else { + setCatName((String)value); + } + break; + } } @@ -317,6 +359,9 @@ public Object getFieldValue(_Fields field) { case IS_EXTENDED: return isIsExtended(); + case CAT_NAME: + return getCatName(); + } throw new IllegalStateException(); } @@ -336,6 +381,8 @@ public boolean isSet(_Fields field) { return isSetPartname(); case IS_EXTENDED: return isSetIsExtended(); + case CAT_NAME: + return isSetCatName(); } throw new IllegalStateException(); } @@ -389,6 +436,15 @@ public boolean equals(ShowLocksRequest that) { return false; } + boolean this_present_catName = true && this.isSetCatName(); + boolean that_present_catName = true && that.isSetCatName(); + if (this_present_catName || that_present_catName) { + if (!(this_present_catName && that_present_catName)) + return false; + if (!this.catName.equals(that.catName)) + return false; + } + return true; } @@ -416,6 +472,11 @@ public int hashCode() { if (present_isExtended) list.add(isExtended); + boolean present_catName = true && (isSetCatName()); + list.add(present_catName); + if (present_catName) + list.add(catName); + return list.hashCode(); } @@ -467,6 +528,16 @@ public int compareTo(ShowLocksRequest other) { return lastComparison; } } + lastComparison = Boolean.valueOf(isSetCatName()).compareTo(other.isSetCatName()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetCatName()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.catName, other.catName); + if (lastComparison != 0) { + return lastComparison; + } + } return 0; } @@ -522,6 +593,16 @@ public String toString() { sb.append(this.isExtended); first = false; } + if (isSetCatName()) { + if (!first) sb.append(", "); + sb.append("catName:"); + if (this.catName == null) { + sb.append("null"); + } else { + sb.append(this.catName); + } + first = false; + } sb.append(")"); return sb.toString(); } @@ -599,6 +680,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, ShowLocksRequest st org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); } break; + case 5: // CAT_NAME + if (schemeField.type == org.apache.thrift.protocol.TType.STRING) { + struct.catName = iprot.readString(); + struct.setCatNameIsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + break; default: org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); } @@ -638,6 +727,13 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, ShowLocksRequest s oprot.writeBool(struct.isExtended); oprot.writeFieldEnd(); } + if (struct.catName != null) { + if (struct.isSetCatName()) { + oprot.writeFieldBegin(CAT_NAME_FIELD_DESC); + oprot.writeString(struct.catName); + oprot.writeFieldEnd(); + } + } oprot.writeFieldStop(); oprot.writeStructEnd(); } @@ -668,7 +764,10 @@ public void write(org.apache.thrift.protocol.TProtocol prot, ShowLocksRequest st if (struct.isSetIsExtended()) { optionals.set(3); } - oprot.writeBitSet(optionals, 4); + if (struct.isSetCatName()) { + optionals.set(4); + } + oprot.writeBitSet(optionals, 5); if (struct.isSetDbname()) { oprot.writeString(struct.dbname); } @@ -681,12 +780,15 @@ public void write(org.apache.thrift.protocol.TProtocol prot, ShowLocksRequest st if (struct.isSetIsExtended()) { oprot.writeBool(struct.isExtended); } + if (struct.isSetCatName()) { + oprot.writeString(struct.catName); + } } @Override public void read(org.apache.thrift.protocol.TProtocol prot, ShowLocksRequest struct) throws org.apache.thrift.TException { TTupleProtocol iprot = (TTupleProtocol) prot; - BitSet incoming = iprot.readBitSet(4); + BitSet incoming = iprot.readBitSet(5); if (incoming.get(0)) { struct.dbname = iprot.readString(); struct.setDbnameIsSet(true); @@ -703,6 +805,10 @@ public void read(org.apache.thrift.protocol.TProtocol prot, ShowLocksRequest str struct.isExtended = iprot.readBool(); struct.setIsExtendedIsSet(true); } + if (incoming.get(4)) { + struct.catName = iprot.readString(); + struct.setCatNameIsSet(true); + } } } diff --git standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ShowLocksResponseElement.java standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ShowLocksResponseElement.java index 13df2bf82b..6127bce89a 100644 --- standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ShowLocksResponseElement.java +++ standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ShowLocksResponseElement.java @@ -54,6 +54,7 @@ private static final org.apache.thrift.protocol.TField BLOCKED_BY_EXT_ID_FIELD_DESC = new org.apache.thrift.protocol.TField("blockedByExtId", org.apache.thrift.protocol.TType.I64, (short)14); private static final org.apache.thrift.protocol.TField BLOCKED_BY_INT_ID_FIELD_DESC = new org.apache.thrift.protocol.TField("blockedByIntId", org.apache.thrift.protocol.TType.I64, (short)15); private static final org.apache.thrift.protocol.TField LOCK_ID_INTERNAL_FIELD_DESC = new org.apache.thrift.protocol.TField("lockIdInternal", org.apache.thrift.protocol.TType.I64, (short)16); + private static final org.apache.thrift.protocol.TField CAT_NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("catName", org.apache.thrift.protocol.TType.STRING, (short)17); private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); static { @@ -77,6 +78,7 @@ private long blockedByExtId; // optional private long blockedByIntId; // optional private long lockIdInternal; // optional + private String catName; // optional /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ public enum _Fields implements org.apache.thrift.TFieldIdEnum { @@ -103,7 +105,8 @@ AGENT_INFO((short)13, "agentInfo"), BLOCKED_BY_EXT_ID((short)14, "blockedByExtId"), BLOCKED_BY_INT_ID((short)15, "blockedByIntId"), - LOCK_ID_INTERNAL((short)16, "lockIdInternal"); + LOCK_ID_INTERNAL((short)16, "lockIdInternal"), + CAT_NAME((short)17, "catName"); private static final Map byName = new HashMap(); @@ -150,6 +153,8 @@ public static _Fields findByThriftId(int fieldId) { return BLOCKED_BY_INT_ID; case 16: // LOCK_ID_INTERNAL return LOCK_ID_INTERNAL; + case 17: // CAT_NAME + return CAT_NAME; default: return null; } @@ -199,7 +204,7 @@ public String getFieldName() { private static final int __BLOCKEDBYINTID_ISSET_ID = 6; private static final int __LOCKIDINTERNAL_ISSET_ID = 7; private byte __isset_bitfield = 0; - private static final _Fields optionals[] = {_Fields.TABLENAME,_Fields.PARTNAME,_Fields.TXNID,_Fields.ACQUIREDAT,_Fields.HEARTBEAT_COUNT,_Fields.AGENT_INFO,_Fields.BLOCKED_BY_EXT_ID,_Fields.BLOCKED_BY_INT_ID,_Fields.LOCK_ID_INTERNAL}; + private static final _Fields optionals[] = {_Fields.TABLENAME,_Fields.PARTNAME,_Fields.TXNID,_Fields.ACQUIREDAT,_Fields.HEARTBEAT_COUNT,_Fields.AGENT_INFO,_Fields.BLOCKED_BY_EXT_ID,_Fields.BLOCKED_BY_INT_ID,_Fields.LOCK_ID_INTERNAL,_Fields.CAT_NAME}; public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; static { Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); @@ -235,6 +240,8 @@ public String getFieldName() { new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.I64))); tmpMap.put(_Fields.LOCK_ID_INTERNAL, new org.apache.thrift.meta_data.FieldMetaData("lockIdInternal", org.apache.thrift.TFieldRequirementType.OPTIONAL, new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.I64))); + tmpMap.put(_Fields.CAT_NAME, new org.apache.thrift.meta_data.FieldMetaData("catName", org.apache.thrift.TFieldRequirementType.OPTIONAL, + new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING))); metaDataMap = Collections.unmodifiableMap(tmpMap); org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(ShowLocksResponseElement.class, metaDataMap); } @@ -302,6 +309,9 @@ public ShowLocksResponseElement(ShowLocksResponseElement other) { this.blockedByExtId = other.blockedByExtId; this.blockedByIntId = other.blockedByIntId; this.lockIdInternal = other.lockIdInternal; + if (other.isSetCatName()) { + this.catName = other.catName; + } } public ShowLocksResponseElement deepCopy() { @@ -334,6 +344,7 @@ public void clear() { this.blockedByIntId = 0; setLockIdInternalIsSet(false); this.lockIdInternal = 0; + this.catName = null; } public long getLockid() { @@ -712,6 +723,29 @@ public void setLockIdInternalIsSet(boolean value) { __isset_bitfield = EncodingUtils.setBit(__isset_bitfield, __LOCKIDINTERNAL_ISSET_ID, value); } + public String getCatName() { + return this.catName; + } + + public void setCatName(String catName) { + this.catName = catName; + } + + public void unsetCatName() { + this.catName = null; + } + + /** Returns true if field catName is set (has been assigned a value) and false otherwise */ + public boolean isSetCatName() { + return this.catName != null; + } + + public void setCatNameIsSet(boolean value) { + if (!value) { + this.catName = null; + } + } + public void setFieldValue(_Fields field, Object value) { switch (field) { case LOCKID: @@ -842,6 +876,14 @@ public void setFieldValue(_Fields field, Object value) { } break; + case CAT_NAME: + if (value == null) { + unsetCatName(); + } else { + setCatName((String)value); + } + break; + } } @@ -895,6 +937,9 @@ public Object getFieldValue(_Fields field) { case LOCK_ID_INTERNAL: return getLockIdInternal(); + case CAT_NAME: + return getCatName(); + } throw new IllegalStateException(); } @@ -938,6 +983,8 @@ public boolean isSet(_Fields field) { return isSetBlockedByIntId(); case LOCK_ID_INTERNAL: return isSetLockIdInternal(); + case CAT_NAME: + return isSetCatName(); } throw new IllegalStateException(); } @@ -1099,6 +1146,15 @@ public boolean equals(ShowLocksResponseElement that) { return false; } + boolean this_present_catName = true && this.isSetCatName(); + boolean that_present_catName = true && that.isSetCatName(); + if (this_present_catName || that_present_catName) { + if (!(this_present_catName && that_present_catName)) + return false; + if (!this.catName.equals(that.catName)) + return false; + } + return true; } @@ -1186,6 +1242,11 @@ public int hashCode() { if (present_lockIdInternal) list.add(lockIdInternal); + boolean present_catName = true && (isSetCatName()); + list.add(present_catName); + if (present_catName) + list.add(catName); + return list.hashCode(); } @@ -1357,6 +1418,16 @@ public int compareTo(ShowLocksResponseElement other) { return lastComparison; } } + lastComparison = Boolean.valueOf(isSetCatName()).compareTo(other.isSetCatName()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetCatName()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.catName, other.catName); + if (lastComparison != 0) { + return lastComparison; + } + } return 0; } @@ -1490,6 +1561,16 @@ public String toString() { sb.append(this.lockIdInternal); first = false; } + if (isSetCatName()) { + if (!first) sb.append(", "); + sb.append("catName:"); + if (this.catName == null) { + sb.append("null"); + } else { + sb.append(this.catName); + } + first = false; + } sb.append(")"); return sb.toString(); } @@ -1691,6 +1772,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, ShowLocksResponseEl org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); } break; + case 17: // CAT_NAME + if (schemeField.type == org.apache.thrift.protocol.TType.STRING) { + struct.catName = iprot.readString(); + struct.setCatNameIsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + break; default: org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); } @@ -1786,6 +1875,13 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, ShowLocksResponseE oprot.writeI64(struct.lockIdInternal); oprot.writeFieldEnd(); } + if (struct.catName != null) { + if (struct.isSetCatName()) { + oprot.writeFieldBegin(CAT_NAME_FIELD_DESC); + oprot.writeString(struct.catName); + oprot.writeFieldEnd(); + } + } oprot.writeFieldStop(); oprot.writeStructEnd(); } @@ -1838,7 +1934,10 @@ public void write(org.apache.thrift.protocol.TProtocol prot, ShowLocksResponseEl if (struct.isSetLockIdInternal()) { optionals.set(8); } - oprot.writeBitSet(optionals, 9); + if (struct.isSetCatName()) { + optionals.set(9); + } + oprot.writeBitSet(optionals, 10); if (struct.isSetTablename()) { oprot.writeString(struct.tablename); } @@ -1866,6 +1965,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, ShowLocksResponseEl if (struct.isSetLockIdInternal()) { oprot.writeI64(struct.lockIdInternal); } + if (struct.isSetCatName()) { + oprot.writeString(struct.catName); + } } @Override @@ -1885,7 +1987,7 @@ public void read(org.apache.thrift.protocol.TProtocol prot, ShowLocksResponseEle struct.setUserIsSet(true); struct.hostname = iprot.readString(); struct.setHostnameIsSet(true); - BitSet incoming = iprot.readBitSet(9); + BitSet incoming = iprot.readBitSet(10); if (incoming.get(0)) { struct.tablename = iprot.readString(); struct.setTablenameIsSet(true); @@ -1922,6 +2024,10 @@ public void read(org.apache.thrift.protocol.TProtocol prot, ShowLocksResponseEle struct.lockIdInternal = iprot.readI64(); struct.setLockIdInternalIsSet(true); } + if (incoming.get(9)) { + struct.catName = iprot.readString(); + struct.setCatNameIsSet(true); + } } } diff --git standalone-metastore/src/gen/thrift/gen-php/metastore/Types.php standalone-metastore/src/gen/thrift/gen-php/metastore/Types.php index fe545150d5..6b26391def 100644 --- standalone-metastore/src/gen/thrift/gen-php/metastore/Types.php +++ standalone-metastore/src/gen/thrift/gen-php/metastore/Types.php @@ -16569,6 +16569,10 @@ class ReplTblWriteIdStateRequest { * @var string[] */ public $partNames = null; + /** + * @var string + */ + public $catName = null; public function __construct($vals=null) { if (!isset(self::$_TSPEC)) { @@ -16601,6 +16605,10 @@ class ReplTblWriteIdStateRequest { 'type' => TType::STRING, ), ), + 7 => array( + 'var' => 'catName', + 'type' => TType::STRING, + ), ); } if (is_array($vals)) { @@ -16622,6 +16630,9 @@ class ReplTblWriteIdStateRequest { if (isset($vals['partNames'])) { $this->partNames = $vals['partNames']; } + if (isset($vals['catName'])) { + $this->catName = $vals['catName']; + } } } @@ -16696,6 +16707,13 @@ class ReplTblWriteIdStateRequest { $xfer += $input->skip($ftype); } break; + case 7: + if ($ftype == TType::STRING) { + $xfer += $input->readString($this->catName); + } else { + $xfer += $input->skip($ftype); + } + break; default: $xfer += $input->skip($ftype); break; @@ -16751,6 +16769,11 @@ class ReplTblWriteIdStateRequest { } $xfer += $output->writeFieldEnd(); } + if ($this->catName !== null) { + $xfer += $output->writeFieldBegin('catName', TType::STRING, 7); + $xfer += $output->writeString($this->catName); + $xfer += $output->writeFieldEnd(); + } $xfer += $output->writeFieldStop(); $xfer += $output->writeStructEnd(); return $xfer; @@ -17201,6 +17224,10 @@ class AllocateTableWriteIdsRequest { * @var \metastore\TxnToWriteId[] */ public $srcTxnToWriteIdList = null; + /** + * @var string + */ + public $catName = null; public function __construct($vals=null) { if (!isset(self::$_TSPEC)) { @@ -17234,6 +17261,10 @@ class AllocateTableWriteIdsRequest { 'class' => '\metastore\TxnToWriteId', ), ), + 6 => array( + 'var' => 'catName', + 'type' => TType::STRING, + ), ); } if (is_array($vals)) { @@ -17252,6 +17283,9 @@ class AllocateTableWriteIdsRequest { if (isset($vals['srcTxnToWriteIdList'])) { $this->srcTxnToWriteIdList = $vals['srcTxnToWriteIdList']; } + if (isset($vals['catName'])) { + $this->catName = $vals['catName']; + } } } @@ -17330,6 +17364,13 @@ class AllocateTableWriteIdsRequest { $xfer += $input->skip($ftype); } break; + case 6: + if ($ftype == TType::STRING) { + $xfer += $input->readString($this->catName); + } else { + $xfer += $input->skip($ftype); + } + break; default: $xfer += $input->skip($ftype); break; @@ -17392,6 +17433,11 @@ class AllocateTableWriteIdsRequest { } $xfer += $output->writeFieldEnd(); } + if ($this->catName !== null) { + $xfer += $output->writeFieldBegin('catName', TType::STRING, 6); + $xfer += $output->writeString($this->catName); + $xfer += $output->writeFieldEnd(); + } $xfer += $output->writeFieldStop(); $xfer += $output->writeStructEnd(); return $xfer; @@ -17635,6 +17681,10 @@ class LockComponent { * @var bool */ public $isDynamicPartitionWrite = false; + /** + * @var string + */ + public $catName = null; public function __construct($vals=null) { if (!isset(self::$_TSPEC)) { @@ -17671,6 +17721,10 @@ class LockComponent { 'var' => 'isDynamicPartitionWrite', 'type' => TType::BOOL, ), + 9 => array( + 'var' => 'catName', + 'type' => TType::STRING, + ), ); } if (is_array($vals)) { @@ -17698,6 +17752,9 @@ class LockComponent { if (isset($vals['isDynamicPartitionWrite'])) { $this->isDynamicPartitionWrite = $vals['isDynamicPartitionWrite']; } + if (isset($vals['catName'])) { + $this->catName = $vals['catName']; + } } } @@ -17776,6 +17833,13 @@ class LockComponent { $xfer += $input->skip($ftype); } break; + case 9: + if ($ftype == TType::STRING) { + $xfer += $input->readString($this->catName); + } else { + $xfer += $input->skip($ftype); + } + break; default: $xfer += $input->skip($ftype); break; @@ -17829,6 +17893,11 @@ class LockComponent { $xfer += $output->writeBool($this->isDynamicPartitionWrite); $xfer += $output->writeFieldEnd(); } + if ($this->catName !== null) { + $xfer += $output->writeFieldBegin('catName', TType::STRING, 9); + $xfer += $output->writeString($this->catName); + $xfer += $output->writeFieldEnd(); + } $xfer += $output->writeFieldStop(); $xfer += $output->writeStructEnd(); return $xfer; @@ -17859,6 +17928,10 @@ class LockRequest { * @var string */ public $agentInfo = "Unknown"; + /** + * @var string + */ + public $catName = null; public function __construct($vals=null) { if (!isset(self::$_TSPEC)) { @@ -17888,6 +17961,10 @@ class LockRequest { 'var' => 'agentInfo', 'type' => TType::STRING, ), + 6 => array( + 'var' => 'catName', + 'type' => TType::STRING, + ), ); } if (is_array($vals)) { @@ -17906,6 +17983,9 @@ class LockRequest { if (isset($vals['agentInfo'])) { $this->agentInfo = $vals['agentInfo']; } + if (isset($vals['catName'])) { + $this->catName = $vals['catName']; + } } } @@ -17974,6 +18054,13 @@ class LockRequest { $xfer += $input->skip($ftype); } break; + case 6: + if ($ftype == TType::STRING) { + $xfer += $input->readString($this->catName); + } else { + $xfer += $input->skip($ftype); + } + break; default: $xfer += $input->skip($ftype); break; @@ -18024,6 +18111,11 @@ class LockRequest { $xfer += $output->writeString($this->agentInfo); $xfer += $output->writeFieldEnd(); } + if ($this->catName !== null) { + $xfer += $output->writeFieldBegin('catName', TType::STRING, 6); + $xfer += $output->writeString($this->catName); + $xfer += $output->writeFieldEnd(); + } $xfer += $output->writeFieldStop(); $xfer += $output->writeStructEnd(); return $xfer; @@ -18344,6 +18436,10 @@ class ShowLocksRequest { * @var bool */ public $isExtended = false; + /** + * @var string + */ + public $catName = null; public function __construct($vals=null) { if (!isset(self::$_TSPEC)) { @@ -18364,6 +18460,10 @@ class ShowLocksRequest { 'var' => 'isExtended', 'type' => TType::BOOL, ), + 5 => array( + 'var' => 'catName', + 'type' => TType::STRING, + ), ); } if (is_array($vals)) { @@ -18379,6 +18479,9 @@ class ShowLocksRequest { if (isset($vals['isExtended'])) { $this->isExtended = $vals['isExtended']; } + if (isset($vals['catName'])) { + $this->catName = $vals['catName']; + } } } @@ -18429,6 +18532,13 @@ class ShowLocksRequest { $xfer += $input->skip($ftype); } break; + case 5: + if ($ftype == TType::STRING) { + $xfer += $input->readString($this->catName); + } else { + $xfer += $input->skip($ftype); + } + break; default: $xfer += $input->skip($ftype); break; @@ -18462,6 +18572,11 @@ class ShowLocksRequest { $xfer += $output->writeBool($this->isExtended); $xfer += $output->writeFieldEnd(); } + if ($this->catName !== null) { + $xfer += $output->writeFieldBegin('catName', TType::STRING, 5); + $xfer += $output->writeString($this->catName); + $xfer += $output->writeFieldEnd(); + } $xfer += $output->writeFieldStop(); $xfer += $output->writeStructEnd(); return $xfer; @@ -18536,6 +18651,10 @@ class ShowLocksResponseElement { * @var int */ public $lockIdInternal = null; + /** + * @var string + */ + public $catName = null; public function __construct($vals=null) { if (!isset(self::$_TSPEC)) { @@ -18604,6 +18723,10 @@ class ShowLocksResponseElement { 'var' => 'lockIdInternal', 'type' => TType::I64, ), + 17 => array( + 'var' => 'catName', + 'type' => TType::STRING, + ), ); } if (is_array($vals)) { @@ -18655,6 +18778,9 @@ class ShowLocksResponseElement { if (isset($vals['lockIdInternal'])) { $this->lockIdInternal = $vals['lockIdInternal']; } + if (isset($vals['catName'])) { + $this->catName = $vals['catName']; + } } } @@ -18789,6 +18915,13 @@ class ShowLocksResponseElement { $xfer += $input->skip($ftype); } break; + case 17: + if ($ftype == TType::STRING) { + $xfer += $input->readString($this->catName); + } else { + $xfer += $input->skip($ftype); + } + break; default: $xfer += $input->skip($ftype); break; @@ -18882,6 +19015,11 @@ class ShowLocksResponseElement { $xfer += $output->writeI64($this->lockIdInternal); $xfer += $output->writeFieldEnd(); } + if ($this->catName !== null) { + $xfer += $output->writeFieldBegin('catName', TType::STRING, 17); + $xfer += $output->writeString($this->catName); + $xfer += $output->writeFieldEnd(); + } $xfer += $output->writeFieldStop(); $xfer += $output->writeStructEnd(); return $xfer; @@ -19381,6 +19519,10 @@ class CompactionRequest { * @var array */ public $properties = null; + /** + * @var string + */ + public $catName = null; public function __construct($vals=null) { if (!isset(self::$_TSPEC)) { @@ -19417,6 +19559,10 @@ class CompactionRequest { 'type' => TType::STRING, ), ), + 7 => array( + 'var' => 'catName', + 'type' => TType::STRING, + ), ); } if (is_array($vals)) { @@ -19438,6 +19584,9 @@ class CompactionRequest { if (isset($vals['properties'])) { $this->properties = $vals['properties']; } + if (isset($vals['catName'])) { + $this->catName = $vals['catName']; + } } } @@ -19515,6 +19664,13 @@ class CompactionRequest { $xfer += $input->skip($ftype); } break; + case 7: + if ($ftype == TType::STRING) { + $xfer += $input->readString($this->catName); + } else { + $xfer += $input->skip($ftype); + } + break; default: $xfer += $input->skip($ftype); break; @@ -19571,6 +19727,11 @@ class CompactionRequest { } $xfer += $output->writeFieldEnd(); } + if ($this->catName !== null) { + $xfer += $output->writeFieldBegin('catName', TType::STRING, 7); + $xfer += $output->writeString($this->catName); + $xfer += $output->writeFieldEnd(); + } $xfer += $output->writeFieldStop(); $xfer += $output->writeStructEnd(); return $xfer; @@ -19804,6 +19965,10 @@ class ShowCompactResponseElement { * @var int */ public $id = null; + /** + * @var string + */ + public $catName = null; public function __construct($vals=null) { if (!isset(self::$_TSPEC)) { @@ -19860,6 +20025,10 @@ class ShowCompactResponseElement { 'var' => 'id', 'type' => TType::I64, ), + 14 => array( + 'var' => 'catName', + 'type' => TType::STRING, + ), ); } if (is_array($vals)) { @@ -19902,6 +20071,9 @@ class ShowCompactResponseElement { if (isset($vals['id'])) { $this->id = $vals['id']; } + if (isset($vals['catName'])) { + $this->catName = $vals['catName']; + } } } @@ -20015,6 +20187,13 @@ class ShowCompactResponseElement { $xfer += $input->skip($ftype); } break; + case 14: + if ($ftype == TType::STRING) { + $xfer += $input->readString($this->catName); + } else { + $xfer += $input->skip($ftype); + } + break; default: $xfer += $input->skip($ftype); break; @@ -20093,6 +20272,11 @@ class ShowCompactResponseElement { $xfer += $output->writeI64($this->id); $xfer += $output->writeFieldEnd(); } + if ($this->catName !== null) { + $xfer += $output->writeFieldBegin('catName', TType::STRING, 14); + $xfer += $output->writeString($this->catName); + $xfer += $output->writeFieldEnd(); + } $xfer += $output->writeFieldStop(); $xfer += $output->writeStructEnd(); return $xfer; @@ -20230,6 +20414,10 @@ class AddDynamicPartitions { * @var int */ public $operationType = 5; + /** + * @var string + */ + public $catName = null; public function __construct($vals=null) { if (!isset(self::$_TSPEC)) { @@ -20262,6 +20450,10 @@ class AddDynamicPartitions { 'var' => 'operationType', 'type' => TType::I32, ), + 7 => array( + 'var' => 'catName', + 'type' => TType::STRING, + ), ); } if (is_array($vals)) { @@ -20283,6 +20475,9 @@ class AddDynamicPartitions { if (isset($vals['operationType'])) { $this->operationType = $vals['operationType']; } + if (isset($vals['catName'])) { + $this->catName = $vals['catName']; + } } } @@ -20357,6 +20552,13 @@ class AddDynamicPartitions { $xfer += $input->skip($ftype); } break; + case 7: + if ($ftype == TType::STRING) { + $xfer += $input->readString($this->catName); + } else { + $xfer += $input->skip($ftype); + } + break; default: $xfer += $input->skip($ftype); break; @@ -20412,6 +20614,11 @@ class AddDynamicPartitions { $xfer += $output->writeI32($this->operationType); $xfer += $output->writeFieldEnd(); } + if ($this->catName !== null) { + $xfer += $output->writeFieldBegin('catName', TType::STRING, 7); + $xfer += $output->writeString($this->catName); + $xfer += $output->writeFieldEnd(); + } $xfer += $output->writeFieldStop(); $xfer += $output->writeStructEnd(); return $xfer; @@ -20446,6 +20653,10 @@ class BasicTxnInfo { * @var string */ public $partitionname = null; + /** + * @var string + */ + public $catName = null; public function __construct($vals=null) { if (!isset(self::$_TSPEC)) { @@ -20474,6 +20685,10 @@ class BasicTxnInfo { 'var' => 'partitionname', 'type' => TType::STRING, ), + 7 => array( + 'var' => 'catName', + 'type' => TType::STRING, + ), ); } if (is_array($vals)) { @@ -20495,6 +20710,9 @@ class BasicTxnInfo { if (isset($vals['partitionname'])) { $this->partitionname = $vals['partitionname']; } + if (isset($vals['catName'])) { + $this->catName = $vals['catName']; + } } } @@ -20559,6 +20777,13 @@ class BasicTxnInfo { $xfer += $input->skip($ftype); } break; + case 7: + if ($ftype == TType::STRING) { + $xfer += $input->readString($this->catName); + } else { + $xfer += $input->skip($ftype); + } + break; default: $xfer += $input->skip($ftype); break; @@ -20602,6 +20827,11 @@ class BasicTxnInfo { $xfer += $output->writeString($this->partitionname); $xfer += $output->writeFieldEnd(); } + if ($this->catName !== null) { + $xfer += $output->writeFieldBegin('catName', TType::STRING, 7); + $xfer += $output->writeString($this->catName); + $xfer += $output->writeFieldEnd(); + } $xfer += $output->writeFieldStop(); $xfer += $output->writeStructEnd(); return $xfer; diff --git standalone-metastore/src/gen/thrift/gen-py/hive_metastore/ttypes.py standalone-metastore/src/gen/thrift/gen-py/hive_metastore/ttypes.py index 786c8c531d..19793f9853 100644 --- standalone-metastore/src/gen/thrift/gen-py/hive_metastore/ttypes.py +++ standalone-metastore/src/gen/thrift/gen-py/hive_metastore/ttypes.py @@ -11567,6 +11567,7 @@ class ReplTblWriteIdStateRequest: - dbName - tableName - partNames + - catName """ thrift_spec = ( @@ -11577,15 +11578,17 @@ class ReplTblWriteIdStateRequest: (4, TType.STRING, 'dbName', None, None, ), # 4 (5, TType.STRING, 'tableName', None, None, ), # 5 (6, TType.LIST, 'partNames', (TType.STRING,None), None, ), # 6 + (7, TType.STRING, 'catName', None, None, ), # 7 ) - def __init__(self, validWriteIdlist=None, user=None, hostName=None, dbName=None, tableName=None, partNames=None,): + def __init__(self, validWriteIdlist=None, user=None, hostName=None, dbName=None, tableName=None, partNames=None, catName=None,): self.validWriteIdlist = validWriteIdlist self.user = user self.hostName = hostName self.dbName = dbName self.tableName = tableName self.partNames = partNames + self.catName = catName def read(self, iprot): if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: @@ -11631,6 +11634,11 @@ def read(self, iprot): iprot.readListEnd() else: iprot.skip(ftype) + elif fid == 7: + if ftype == TType.STRING: + self.catName = iprot.readString() + else: + iprot.skip(ftype) else: iprot.skip(ftype) iprot.readFieldEnd() @@ -11668,6 +11676,10 @@ def write(self, oprot): oprot.writeString(iter529) oprot.writeListEnd() oprot.writeFieldEnd() + if self.catName is not None: + oprot.writeFieldBegin('catName', TType.STRING, 7) + oprot.writeString(self.catName) + oprot.writeFieldEnd() oprot.writeFieldStop() oprot.writeStructEnd() @@ -11693,6 +11705,7 @@ def __hash__(self): value = (value * 31) ^ hash(self.dbName) value = (value * 31) ^ hash(self.tableName) value = (value * 31) ^ hash(self.partNames) + value = (value * 31) ^ hash(self.catName) return value def __repr__(self): @@ -12013,6 +12026,7 @@ class AllocateTableWriteIdsRequest: - txnIds - replPolicy - srcTxnToWriteIdList + - catName """ thrift_spec = ( @@ -12022,14 +12036,16 @@ class AllocateTableWriteIdsRequest: (3, TType.LIST, 'txnIds', (TType.I64,None), None, ), # 3 (4, TType.STRING, 'replPolicy', None, None, ), # 4 (5, TType.LIST, 'srcTxnToWriteIdList', (TType.STRUCT,(TxnToWriteId, TxnToWriteId.thrift_spec)), None, ), # 5 + (6, TType.STRING, 'catName', None, None, ), # 6 ) - def __init__(self, dbName=None, tableName=None, txnIds=None, replPolicy=None, srcTxnToWriteIdList=None,): + def __init__(self, dbName=None, tableName=None, txnIds=None, replPolicy=None, srcTxnToWriteIdList=None, catName=None,): self.dbName = dbName self.tableName = tableName self.txnIds = txnIds self.replPolicy = replPolicy self.srcTxnToWriteIdList = srcTxnToWriteIdList + self.catName = catName def read(self, iprot): if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: @@ -12076,6 +12092,11 @@ def read(self, iprot): iprot.readListEnd() else: iprot.skip(ftype) + elif fid == 6: + if ftype == TType.STRING: + self.catName = iprot.readString() + else: + iprot.skip(ftype) else: iprot.skip(ftype) iprot.readFieldEnd() @@ -12112,6 +12133,10 @@ def write(self, oprot): iter564.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() + if self.catName is not None: + oprot.writeFieldBegin('catName', TType.STRING, 6) + oprot.writeString(self.catName) + oprot.writeFieldEnd() oprot.writeFieldStop() oprot.writeStructEnd() @@ -12130,6 +12155,7 @@ def __hash__(self): value = (value * 31) ^ hash(self.txnIds) value = (value * 31) ^ hash(self.replPolicy) value = (value * 31) ^ hash(self.srcTxnToWriteIdList) + value = (value * 31) ^ hash(self.catName) return value def __repr__(self): @@ -12312,6 +12338,7 @@ class LockComponent: - operationType - isTransactional - isDynamicPartitionWrite + - catName """ thrift_spec = ( @@ -12324,9 +12351,10 @@ class LockComponent: (6, TType.I32, 'operationType', None, 5, ), # 6 (7, TType.BOOL, 'isTransactional', None, False, ), # 7 (8, TType.BOOL, 'isDynamicPartitionWrite', None, False, ), # 8 + (9, TType.STRING, 'catName', None, None, ), # 9 ) - def __init__(self, type=None, level=None, dbname=None, tablename=None, partitionname=None, operationType=thrift_spec[6][4], isTransactional=thrift_spec[7][4], isDynamicPartitionWrite=thrift_spec[8][4],): + def __init__(self, type=None, level=None, dbname=None, tablename=None, partitionname=None, operationType=thrift_spec[6][4], isTransactional=thrift_spec[7][4], isDynamicPartitionWrite=thrift_spec[8][4], catName=None,): self.type = type self.level = level self.dbname = dbname @@ -12335,6 +12363,7 @@ def __init__(self, type=None, level=None, dbname=None, tablename=None, partition self.operationType = operationType self.isTransactional = isTransactional self.isDynamicPartitionWrite = isDynamicPartitionWrite + self.catName = catName def read(self, iprot): if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: @@ -12385,6 +12414,11 @@ def read(self, iprot): self.isDynamicPartitionWrite = iprot.readBool() else: iprot.skip(ftype) + elif fid == 9: + if ftype == TType.STRING: + self.catName = iprot.readString() + else: + iprot.skip(ftype) else: iprot.skip(ftype) iprot.readFieldEnd() @@ -12427,6 +12461,10 @@ def write(self, oprot): oprot.writeFieldBegin('isDynamicPartitionWrite', TType.BOOL, 8) oprot.writeBool(self.isDynamicPartitionWrite) oprot.writeFieldEnd() + if self.catName is not None: + oprot.writeFieldBegin('catName', TType.STRING, 9) + oprot.writeString(self.catName) + oprot.writeFieldEnd() oprot.writeFieldStop() oprot.writeStructEnd() @@ -12450,6 +12488,7 @@ def __hash__(self): value = (value * 31) ^ hash(self.operationType) value = (value * 31) ^ hash(self.isTransactional) value = (value * 31) ^ hash(self.isDynamicPartitionWrite) + value = (value * 31) ^ hash(self.catName) return value def __repr__(self): @@ -12471,6 +12510,7 @@ class LockRequest: - user - hostname - agentInfo + - catName """ thrift_spec = ( @@ -12480,14 +12520,16 @@ class LockRequest: (3, TType.STRING, 'user', None, None, ), # 3 (4, TType.STRING, 'hostname', None, None, ), # 4 (5, TType.STRING, 'agentInfo', None, "Unknown", ), # 5 + (6, TType.STRING, 'catName', None, None, ), # 6 ) - def __init__(self, component=None, txnid=None, user=None, hostname=None, agentInfo=thrift_spec[5][4],): + def __init__(self, component=None, txnid=None, user=None, hostname=None, agentInfo=thrift_spec[5][4], catName=None,): self.component = component self.txnid = txnid self.user = user self.hostname = hostname self.agentInfo = agentInfo + self.catName = catName def read(self, iprot): if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: @@ -12529,6 +12571,11 @@ def read(self, iprot): self.agentInfo = iprot.readString() else: iprot.skip(ftype) + elif fid == 6: + if ftype == TType.STRING: + self.catName = iprot.readString() + else: + iprot.skip(ftype) else: iprot.skip(ftype) iprot.readFieldEnd() @@ -12562,6 +12609,10 @@ def write(self, oprot): oprot.writeFieldBegin('agentInfo', TType.STRING, 5) oprot.writeString(self.agentInfo) oprot.writeFieldEnd() + if self.catName is not None: + oprot.writeFieldBegin('catName', TType.STRING, 6) + oprot.writeString(self.catName) + oprot.writeFieldEnd() oprot.writeFieldStop() oprot.writeStructEnd() @@ -12582,6 +12633,7 @@ def __hash__(self): value = (value * 31) ^ hash(self.user) value = (value * 31) ^ hash(self.hostname) value = (value * 31) ^ hash(self.agentInfo) + value = (value * 31) ^ hash(self.catName) return value def __repr__(self): @@ -12844,6 +12896,7 @@ class ShowLocksRequest: - tablename - partname - isExtended + - catName """ thrift_spec = ( @@ -12852,13 +12905,15 @@ class ShowLocksRequest: (2, TType.STRING, 'tablename', None, None, ), # 2 (3, TType.STRING, 'partname', None, None, ), # 3 (4, TType.BOOL, 'isExtended', None, False, ), # 4 + (5, TType.STRING, 'catName', None, None, ), # 5 ) - def __init__(self, dbname=None, tablename=None, partname=None, isExtended=thrift_spec[4][4],): + def __init__(self, dbname=None, tablename=None, partname=None, isExtended=thrift_spec[4][4], catName=None,): self.dbname = dbname self.tablename = tablename self.partname = partname self.isExtended = isExtended + self.catName = catName def read(self, iprot): if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: @@ -12889,6 +12944,11 @@ def read(self, iprot): self.isExtended = iprot.readBool() else: iprot.skip(ftype) + elif fid == 5: + if ftype == TType.STRING: + self.catName = iprot.readString() + else: + iprot.skip(ftype) else: iprot.skip(ftype) iprot.readFieldEnd() @@ -12915,6 +12975,10 @@ def write(self, oprot): oprot.writeFieldBegin('isExtended', TType.BOOL, 4) oprot.writeBool(self.isExtended) oprot.writeFieldEnd() + if self.catName is not None: + oprot.writeFieldBegin('catName', TType.STRING, 5) + oprot.writeString(self.catName) + oprot.writeFieldEnd() oprot.writeFieldStop() oprot.writeStructEnd() @@ -12928,6 +12992,7 @@ def __hash__(self): value = (value * 31) ^ hash(self.tablename) value = (value * 31) ^ hash(self.partname) value = (value * 31) ^ hash(self.isExtended) + value = (value * 31) ^ hash(self.catName) return value def __repr__(self): @@ -12960,6 +13025,7 @@ class ShowLocksResponseElement: - blockedByExtId - blockedByIntId - lockIdInternal + - catName """ thrift_spec = ( @@ -12980,9 +13046,10 @@ class ShowLocksResponseElement: (14, TType.I64, 'blockedByExtId', None, None, ), # 14 (15, TType.I64, 'blockedByIntId', None, None, ), # 15 (16, TType.I64, 'lockIdInternal', None, None, ), # 16 + (17, TType.STRING, 'catName', None, None, ), # 17 ) - def __init__(self, lockid=None, dbname=None, tablename=None, partname=None, state=None, type=None, txnid=None, lastheartbeat=None, acquiredat=None, user=None, hostname=None, heartbeatCount=thrift_spec[12][4], agentInfo=None, blockedByExtId=None, blockedByIntId=None, lockIdInternal=None,): + def __init__(self, lockid=None, dbname=None, tablename=None, partname=None, state=None, type=None, txnid=None, lastheartbeat=None, acquiredat=None, user=None, hostname=None, heartbeatCount=thrift_spec[12][4], agentInfo=None, blockedByExtId=None, blockedByIntId=None, lockIdInternal=None, catName=None,): self.lockid = lockid self.dbname = dbname self.tablename = tablename @@ -12999,6 +13066,7 @@ def __init__(self, lockid=None, dbname=None, tablename=None, partname=None, stat self.blockedByExtId = blockedByExtId self.blockedByIntId = blockedByIntId self.lockIdInternal = lockIdInternal + self.catName = catName def read(self, iprot): if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: @@ -13089,6 +13157,11 @@ def read(self, iprot): self.lockIdInternal = iprot.readI64() else: iprot.skip(ftype) + elif fid == 17: + if ftype == TType.STRING: + self.catName = iprot.readString() + else: + iprot.skip(ftype) else: iprot.skip(ftype) iprot.readFieldEnd() @@ -13163,6 +13236,10 @@ def write(self, oprot): oprot.writeFieldBegin('lockIdInternal', TType.I64, 16) oprot.writeI64(self.lockIdInternal) oprot.writeFieldEnd() + if self.catName is not None: + oprot.writeFieldBegin('catName', TType.STRING, 17) + oprot.writeString(self.catName) + oprot.writeFieldEnd() oprot.writeFieldStop() oprot.writeStructEnd() @@ -13202,6 +13279,7 @@ def __hash__(self): value = (value * 31) ^ hash(self.blockedByExtId) value = (value * 31) ^ hash(self.blockedByIntId) value = (value * 31) ^ hash(self.lockIdInternal) + value = (value * 31) ^ hash(self.catName) return value def __repr__(self): @@ -13556,6 +13634,7 @@ class CompactionRequest: - type - runas - properties + - catName """ thrift_spec = ( @@ -13566,15 +13645,17 @@ class CompactionRequest: (4, TType.I32, 'type', None, None, ), # 4 (5, TType.STRING, 'runas', None, None, ), # 5 (6, TType.MAP, 'properties', (TType.STRING,None,TType.STRING,None), None, ), # 6 + (7, TType.STRING, 'catName', None, None, ), # 7 ) - def __init__(self, dbname=None, tablename=None, partitionname=None, type=None, runas=None, properties=None,): + def __init__(self, dbname=None, tablename=None, partitionname=None, type=None, runas=None, properties=None, catName=None,): self.dbname = dbname self.tablename = tablename self.partitionname = partitionname self.type = type self.runas = runas self.properties = properties + self.catName = catName def read(self, iprot): if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: @@ -13621,6 +13702,11 @@ def read(self, iprot): iprot.readMapEnd() else: iprot.skip(ftype) + elif fid == 7: + if ftype == TType.STRING: + self.catName = iprot.readString() + else: + iprot.skip(ftype) else: iprot.skip(ftype) iprot.readFieldEnd() @@ -13659,6 +13745,10 @@ def write(self, oprot): oprot.writeString(viter608) oprot.writeMapEnd() oprot.writeFieldEnd() + if self.catName is not None: + oprot.writeFieldBegin('catName', TType.STRING, 7) + oprot.writeString(self.catName) + oprot.writeFieldEnd() oprot.writeFieldStop() oprot.writeStructEnd() @@ -13680,6 +13770,7 @@ def __hash__(self): value = (value * 31) ^ hash(self.type) value = (value * 31) ^ hash(self.runas) value = (value * 31) ^ hash(self.properties) + value = (value * 31) ^ hash(self.catName) return value def __repr__(self): @@ -13852,6 +13943,7 @@ class ShowCompactResponseElement: - endTime - hadoopJobId - id + - catName """ thrift_spec = ( @@ -13869,9 +13961,10 @@ class ShowCompactResponseElement: (11, TType.I64, 'endTime', None, None, ), # 11 (12, TType.STRING, 'hadoopJobId', None, "None", ), # 12 (13, TType.I64, 'id', None, None, ), # 13 + (14, TType.STRING, 'catName', None, None, ), # 14 ) - def __init__(self, dbname=None, tablename=None, partitionname=None, type=None, state=None, workerid=None, start=None, runAs=None, hightestTxnId=None, metaInfo=None, endTime=None, hadoopJobId=thrift_spec[12][4], id=None,): + def __init__(self, dbname=None, tablename=None, partitionname=None, type=None, state=None, workerid=None, start=None, runAs=None, hightestTxnId=None, metaInfo=None, endTime=None, hadoopJobId=thrift_spec[12][4], id=None, catName=None,): self.dbname = dbname self.tablename = tablename self.partitionname = partitionname @@ -13885,6 +13978,7 @@ def __init__(self, dbname=None, tablename=None, partitionname=None, type=None, s self.endTime = endTime self.hadoopJobId = hadoopJobId self.id = id + self.catName = catName def read(self, iprot): if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: @@ -13960,6 +14054,11 @@ def read(self, iprot): self.id = iprot.readI64() else: iprot.skip(ftype) + elif fid == 14: + if ftype == TType.STRING: + self.catName = iprot.readString() + else: + iprot.skip(ftype) else: iprot.skip(ftype) iprot.readFieldEnd() @@ -14022,6 +14121,10 @@ def write(self, oprot): oprot.writeFieldBegin('id', TType.I64, 13) oprot.writeI64(self.id) oprot.writeFieldEnd() + if self.catName is not None: + oprot.writeFieldBegin('catName', TType.STRING, 14) + oprot.writeString(self.catName) + oprot.writeFieldEnd() oprot.writeFieldStop() oprot.writeStructEnd() @@ -14052,6 +14155,7 @@ def __hash__(self): value = (value * 31) ^ hash(self.endTime) value = (value * 31) ^ hash(self.hadoopJobId) value = (value * 31) ^ hash(self.id) + value = (value * 31) ^ hash(self.catName) return value def __repr__(self): @@ -14150,6 +14254,7 @@ class AddDynamicPartitions: - tablename - partitionnames - operationType + - catName """ thrift_spec = ( @@ -14160,15 +14265,17 @@ class AddDynamicPartitions: (4, TType.STRING, 'tablename', None, None, ), # 4 (5, TType.LIST, 'partitionnames', (TType.STRING,None), None, ), # 5 (6, TType.I32, 'operationType', None, 5, ), # 6 + (7, TType.STRING, 'catName', None, None, ), # 7 ) - def __init__(self, txnid=None, writeid=None, dbname=None, tablename=None, partitionnames=None, operationType=thrift_spec[6][4],): + def __init__(self, txnid=None, writeid=None, dbname=None, tablename=None, partitionnames=None, operationType=thrift_spec[6][4], catName=None,): self.txnid = txnid self.writeid = writeid self.dbname = dbname self.tablename = tablename self.partitionnames = partitionnames self.operationType = operationType + self.catName = catName def read(self, iprot): if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: @@ -14214,6 +14321,11 @@ def read(self, iprot): self.operationType = iprot.readI32() else: iprot.skip(ftype) + elif fid == 7: + if ftype == TType.STRING: + self.catName = iprot.readString() + else: + iprot.skip(ftype) else: iprot.skip(ftype) iprot.readFieldEnd() @@ -14251,6 +14363,10 @@ def write(self, oprot): oprot.writeFieldBegin('operationType', TType.I32, 6) oprot.writeI32(self.operationType) oprot.writeFieldEnd() + if self.catName is not None: + oprot.writeFieldBegin('catName', TType.STRING, 7) + oprot.writeString(self.catName) + oprot.writeFieldEnd() oprot.writeFieldStop() oprot.writeStructEnd() @@ -14276,6 +14392,7 @@ def __hash__(self): value = (value * 31) ^ hash(self.tablename) value = (value * 31) ^ hash(self.partitionnames) value = (value * 31) ^ hash(self.operationType) + value = (value * 31) ^ hash(self.catName) return value def __repr__(self): @@ -14298,6 +14415,7 @@ class BasicTxnInfo: - dbname - tablename - partitionname + - catName """ thrift_spec = ( @@ -14308,15 +14426,17 @@ class BasicTxnInfo: (4, TType.STRING, 'dbname', None, None, ), # 4 (5, TType.STRING, 'tablename', None, None, ), # 5 (6, TType.STRING, 'partitionname', None, None, ), # 6 + (7, TType.STRING, 'catName', None, None, ), # 7 ) - def __init__(self, isnull=None, time=None, txnid=None, dbname=None, tablename=None, partitionname=None,): + def __init__(self, isnull=None, time=None, txnid=None, dbname=None, tablename=None, partitionname=None, catName=None,): self.isnull = isnull self.time = time self.txnid = txnid self.dbname = dbname self.tablename = tablename self.partitionname = partitionname + self.catName = catName def read(self, iprot): if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: @@ -14357,6 +14477,11 @@ def read(self, iprot): self.partitionname = iprot.readString() else: iprot.skip(ftype) + elif fid == 7: + if ftype == TType.STRING: + self.catName = iprot.readString() + else: + iprot.skip(ftype) else: iprot.skip(ftype) iprot.readFieldEnd() @@ -14391,6 +14516,10 @@ def write(self, oprot): oprot.writeFieldBegin('partitionname', TType.STRING, 6) oprot.writeString(self.partitionname) oprot.writeFieldEnd() + if self.catName is not None: + oprot.writeFieldBegin('catName', TType.STRING, 7) + oprot.writeString(self.catName) + oprot.writeFieldEnd() oprot.writeFieldStop() oprot.writeStructEnd() @@ -14408,6 +14537,7 @@ def __hash__(self): value = (value * 31) ^ hash(self.dbname) value = (value * 31) ^ hash(self.tablename) value = (value * 31) ^ hash(self.partitionname) + value = (value * 31) ^ hash(self.catName) return value def __repr__(self): diff --git standalone-metastore/src/gen/thrift/gen-rb/hive_metastore_types.rb standalone-metastore/src/gen/thrift/gen-rb/hive_metastore_types.rb index 91745967bc..dd2c7ba66e 100644 --- standalone-metastore/src/gen/thrift/gen-rb/hive_metastore_types.rb +++ standalone-metastore/src/gen/thrift/gen-rb/hive_metastore_types.rb @@ -2587,6 +2587,7 @@ class ReplTblWriteIdStateRequest DBNAME = 4 TABLENAME = 5 PARTNAMES = 6 + CATNAME = 7 FIELDS = { VALIDWRITEIDLIST => {:type => ::Thrift::Types::STRING, :name => 'validWriteIdlist'}, @@ -2594,7 +2595,8 @@ class ReplTblWriteIdStateRequest HOSTNAME => {:type => ::Thrift::Types::STRING, :name => 'hostName'}, DBNAME => {:type => ::Thrift::Types::STRING, :name => 'dbName'}, TABLENAME => {:type => ::Thrift::Types::STRING, :name => 'tableName'}, - PARTNAMES => {:type => ::Thrift::Types::LIST, :name => 'partNames', :element => {:type => ::Thrift::Types::STRING}, :optional => true} + PARTNAMES => {:type => ::Thrift::Types::LIST, :name => 'partNames', :element => {:type => ::Thrift::Types::STRING}, :optional => true}, + CATNAME => {:type => ::Thrift::Types::STRING, :name => 'catName', :optional => true} } def struct_fields; FIELDS; end @@ -2682,13 +2684,15 @@ class AllocateTableWriteIdsRequest TXNIDS = 3 REPLPOLICY = 4 SRCTXNTOWRITEIDLIST = 5 + CATNAME = 6 FIELDS = { DBNAME => {:type => ::Thrift::Types::STRING, :name => 'dbName'}, TABLENAME => {:type => ::Thrift::Types::STRING, :name => 'tableName'}, TXNIDS => {:type => ::Thrift::Types::LIST, :name => 'txnIds', :element => {:type => ::Thrift::Types::I64}, :optional => true}, REPLPOLICY => {:type => ::Thrift::Types::STRING, :name => 'replPolicy', :optional => true}, - SRCTXNTOWRITEIDLIST => {:type => ::Thrift::Types::LIST, :name => 'srcTxnToWriteIdList', :element => {:type => ::Thrift::Types::STRUCT, :class => ::TxnToWriteId}, :optional => true} + SRCTXNTOWRITEIDLIST => {:type => ::Thrift::Types::LIST, :name => 'srcTxnToWriteIdList', :element => {:type => ::Thrift::Types::STRUCT, :class => ::TxnToWriteId}, :optional => true}, + CATNAME => {:type => ::Thrift::Types::STRING, :name => 'catName', :optional => true} } def struct_fields; FIELDS; end @@ -2748,6 +2752,7 @@ class LockComponent OPERATIONTYPE = 6 ISTRANSACTIONAL = 7 ISDYNAMICPARTITIONWRITE = 8 + CATNAME = 9 FIELDS = { TYPE => {:type => ::Thrift::Types::I32, :name => 'type', :enum_class => ::LockType}, @@ -2757,7 +2762,8 @@ class LockComponent PARTITIONNAME => {:type => ::Thrift::Types::STRING, :name => 'partitionname', :optional => true}, OPERATIONTYPE => {:type => ::Thrift::Types::I32, :name => 'operationType', :default => 5, :optional => true, :enum_class => ::DataOperationType}, ISTRANSACTIONAL => {:type => ::Thrift::Types::BOOL, :name => 'isTransactional', :default => false, :optional => true}, - ISDYNAMICPARTITIONWRITE => {:type => ::Thrift::Types::BOOL, :name => 'isDynamicPartitionWrite', :default => false, :optional => true} + ISDYNAMICPARTITIONWRITE => {:type => ::Thrift::Types::BOOL, :name => 'isDynamicPartitionWrite', :default => false, :optional => true}, + CATNAME => {:type => ::Thrift::Types::STRING, :name => 'catName', :optional => true} } def struct_fields; FIELDS; end @@ -2787,13 +2793,15 @@ class LockRequest USER = 3 HOSTNAME = 4 AGENTINFO = 5 + CATNAME = 6 FIELDS = { COMPONENT => {:type => ::Thrift::Types::LIST, :name => 'component', :element => {:type => ::Thrift::Types::STRUCT, :class => ::LockComponent}}, TXNID => {:type => ::Thrift::Types::I64, :name => 'txnid', :optional => true}, USER => {:type => ::Thrift::Types::STRING, :name => 'user'}, HOSTNAME => {:type => ::Thrift::Types::STRING, :name => 'hostname'}, - AGENTINFO => {:type => ::Thrift::Types::STRING, :name => 'agentInfo', :default => %q"Unknown", :optional => true} + AGENTINFO => {:type => ::Thrift::Types::STRING, :name => 'agentInfo', :default => %q"Unknown", :optional => true}, + CATNAME => {:type => ::Thrift::Types::STRING, :name => 'catName', :optional => true} } def struct_fields; FIELDS; end @@ -2874,12 +2882,14 @@ class ShowLocksRequest TABLENAME = 2 PARTNAME = 3 ISEXTENDED = 4 + CATNAME = 5 FIELDS = { DBNAME => {:type => ::Thrift::Types::STRING, :name => 'dbname', :optional => true}, TABLENAME => {:type => ::Thrift::Types::STRING, :name => 'tablename', :optional => true}, PARTNAME => {:type => ::Thrift::Types::STRING, :name => 'partname', :optional => true}, - ISEXTENDED => {:type => ::Thrift::Types::BOOL, :name => 'isExtended', :default => false, :optional => true} + ISEXTENDED => {:type => ::Thrift::Types::BOOL, :name => 'isExtended', :default => false, :optional => true}, + CATNAME => {:type => ::Thrift::Types::STRING, :name => 'catName', :optional => true} } def struct_fields; FIELDS; end @@ -2908,6 +2918,7 @@ class ShowLocksResponseElement BLOCKEDBYEXTID = 14 BLOCKEDBYINTID = 15 LOCKIDINTERNAL = 16 + CATNAME = 17 FIELDS = { LOCKID => {:type => ::Thrift::Types::I64, :name => 'lockid'}, @@ -2925,7 +2936,8 @@ class ShowLocksResponseElement AGENTINFO => {:type => ::Thrift::Types::STRING, :name => 'agentInfo', :optional => true}, BLOCKEDBYEXTID => {:type => ::Thrift::Types::I64, :name => 'blockedByExtId', :optional => true}, BLOCKEDBYINTID => {:type => ::Thrift::Types::I64, :name => 'blockedByIntId', :optional => true}, - LOCKIDINTERNAL => {:type => ::Thrift::Types::I64, :name => 'lockIdInternal', :optional => true} + LOCKIDINTERNAL => {:type => ::Thrift::Types::I64, :name => 'lockIdInternal', :optional => true}, + CATNAME => {:type => ::Thrift::Types::STRING, :name => 'catName', :optional => true} } def struct_fields; FIELDS; end @@ -3031,6 +3043,7 @@ class CompactionRequest TYPE = 4 RUNAS = 5 PROPERTIES = 6 + CATNAME = 7 FIELDS = { DBNAME => {:type => ::Thrift::Types::STRING, :name => 'dbname'}, @@ -3038,7 +3051,8 @@ class CompactionRequest PARTITIONNAME => {:type => ::Thrift::Types::STRING, :name => 'partitionname', :optional => true}, TYPE => {:type => ::Thrift::Types::I32, :name => 'type', :enum_class => ::CompactionType}, RUNAS => {:type => ::Thrift::Types::STRING, :name => 'runas', :optional => true}, - PROPERTIES => {:type => ::Thrift::Types::MAP, :name => 'properties', :key => {:type => ::Thrift::Types::STRING}, :value => {:type => ::Thrift::Types::STRING}, :optional => true} + PROPERTIES => {:type => ::Thrift::Types::MAP, :name => 'properties', :key => {:type => ::Thrift::Types::STRING}, :value => {:type => ::Thrift::Types::STRING}, :optional => true}, + CATNAME => {:type => ::Thrift::Types::STRING, :name => 'catName', :optional => true} } def struct_fields; FIELDS; end @@ -3108,6 +3122,7 @@ class ShowCompactResponseElement ENDTIME = 11 HADOOPJOBID = 12 ID = 13 + CATNAME = 14 FIELDS = { DBNAME => {:type => ::Thrift::Types::STRING, :name => 'dbname'}, @@ -3122,7 +3137,8 @@ class ShowCompactResponseElement METAINFO => {:type => ::Thrift::Types::STRING, :name => 'metaInfo', :optional => true}, ENDTIME => {:type => ::Thrift::Types::I64, :name => 'endTime', :optional => true}, HADOOPJOBID => {:type => ::Thrift::Types::STRING, :name => 'hadoopJobId', :default => %q"None", :optional => true}, - ID => {:type => ::Thrift::Types::I64, :name => 'id', :optional => true} + ID => {:type => ::Thrift::Types::I64, :name => 'id', :optional => true}, + CATNAME => {:type => ::Thrift::Types::STRING, :name => 'catName', :optional => true} } def struct_fields; FIELDS; end @@ -3165,6 +3181,7 @@ class AddDynamicPartitions TABLENAME = 4 PARTITIONNAMES = 5 OPERATIONTYPE = 6 + CATNAME = 7 FIELDS = { TXNID => {:type => ::Thrift::Types::I64, :name => 'txnid'}, @@ -3172,7 +3189,8 @@ class AddDynamicPartitions DBNAME => {:type => ::Thrift::Types::STRING, :name => 'dbname'}, TABLENAME => {:type => ::Thrift::Types::STRING, :name => 'tablename'}, PARTITIONNAMES => {:type => ::Thrift::Types::LIST, :name => 'partitionnames', :element => {:type => ::Thrift::Types::STRING}}, - OPERATIONTYPE => {:type => ::Thrift::Types::I32, :name => 'operationType', :default => 5, :optional => true, :enum_class => ::DataOperationType} + OPERATIONTYPE => {:type => ::Thrift::Types::I32, :name => 'operationType', :default => 5, :optional => true, :enum_class => ::DataOperationType}, + CATNAME => {:type => ::Thrift::Types::STRING, :name => 'catName', :optional => true} } def struct_fields; FIELDS; end @@ -3199,6 +3217,7 @@ class BasicTxnInfo DBNAME = 4 TABLENAME = 5 PARTITIONNAME = 6 + CATNAME = 7 FIELDS = { ISNULL => {:type => ::Thrift::Types::BOOL, :name => 'isnull'}, @@ -3206,7 +3225,8 @@ class BasicTxnInfo TXNID => {:type => ::Thrift::Types::I64, :name => 'txnid', :optional => true}, DBNAME => {:type => ::Thrift::Types::STRING, :name => 'dbname', :optional => true}, TABLENAME => {:type => ::Thrift::Types::STRING, :name => 'tablename', :optional => true}, - PARTITIONNAME => {:type => ::Thrift::Types::STRING, :name => 'partitionname', :optional => true} + PARTITIONNAME => {:type => ::Thrift::Types::STRING, :name => 'partitionname', :optional => true}, + CATNAME => {:type => ::Thrift::Types::STRING, :name => 'catName', :optional => true} } def struct_fields; FIELDS; end diff --git standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/MaterializationsInvalidationCache.java standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/MaterializationsInvalidationCache.java index 99c5abcf59..0185938dec 100644 --- standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/MaterializationsInvalidationCache.java +++ standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/MaterializationsInvalidationCache.java @@ -41,6 +41,7 @@ import org.apache.hadoop.hive.metastore.api.MetaException; import org.apache.hadoop.hive.metastore.api.Table; import org.apache.hadoop.hive.metastore.conf.MetastoreConf; +import org.apache.hadoop.hive.metastore.utils.MetaStoreUtils; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -230,8 +231,9 @@ private void addMaterializedView(String dbName, String tableName, Set ta // check if the MV is still valid. try { String[] names = qNameTableUsed.split("\\."); + // TODO - need to fix this to get the catalog name of the table too when HIVE-18960 is done BasicTxnInfo e = handler.getTxnHandler().getFirstCompletedTransactionForTableAfterCommit( - names[0], names[1], tableTxnList); + MetaStoreUtils.getDefaultCatalog(conf), names[0], names[1], tableTxnList); if (!e.isIsnull()) { modificationsTree.put(e.getTxnid(), e.getTime()); // We do not need to do anything more for current table, as we detected diff --git standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/Warehouse.java standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/Warehouse.java index e31935ebf5..6dab47b1a2 100755 --- standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/Warehouse.java +++ standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/Warehouse.java @@ -65,7 +65,7 @@ public static final String DEFAULT_DATABASE_COMMENT = "Default Hive database"; public static final String DEFAULT_SERIALIZATION_FORMAT = "1"; public static final String DATABASE_WAREHOUSE_SUFFIX = ".db"; - private static final String CAT_DB_TABLE_SEPARATOR = "."; + public static final String CAT_DB_TABLE_SEPARATOR = "."; private Path whRoot; private final Configuration conf; diff --git standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/txn/CompactionInfo.java standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/txn/CompactionInfo.java index b34b7d70de..984e242ac7 100644 --- standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/txn/CompactionInfo.java +++ standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/txn/CompactionInfo.java @@ -30,6 +30,7 @@ */ public class CompactionInfo implements Comparable { public long id; + public String catName; public String dbname; public String tableName; public String partName; @@ -53,14 +54,16 @@ private String fullPartitionName = null; private String fullTableName = null; - public CompactionInfo(String dbname, String tableName, String partName, CompactionType type) { + public CompactionInfo(String catName, String dbname, String tableName, String partName, + CompactionType type) { + this.catName = catName; this.dbname = dbname; this.tableName = tableName; this.partName = partName; this.type = type; } - CompactionInfo(long id, String dbname, String tableName, String partName, char state) { - this(dbname, tableName, partName, null); + CompactionInfo(long id, String catName, String dbname, String tableName, String partName, char state) { + this(catName, dbname, tableName, partName, null); this.id = id; this.state = state; } @@ -137,6 +140,7 @@ public boolean equals(Object obj) { static CompactionInfo loadFullFromCompactionQueue(ResultSet rs) throws SQLException { CompactionInfo fullCi = new CompactionInfo(); fullCi.id = rs.getLong(1); + fullCi.catName = rs.getString("cq_catalog"); fullCi.dbname = rs.getString(2); fullCi.tableName = rs.getString(3); fullCi.partName = rs.getString(4); @@ -166,5 +170,6 @@ static void insertIntoCompletedCompactions(PreparedStatement pStmt, CompactionIn pStmt.setLong(12, ci.highestWriteId); pStmt.setBytes(13, ci.metaInfo); pStmt.setString(14, ci.hadoopJobId); + pStmt.setString(15, ci.catName); } } diff --git standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/txn/CompactionTxnHandler.java standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/txn/CompactionTxnHandler.java index 4e3068d7eb..87272e1b9d 100644 --- standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/txn/CompactionTxnHandler.java +++ standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/txn/CompactionTxnHandler.java @@ -70,7 +70,7 @@ public CompactionTxnHandler() { stmt = dbConn.createStatement(); // Check for completed transactions String s = "select distinct ctc_database, ctc_table, " + - "ctc_partition from COMPLETED_TXN_COMPONENTS"; + "ctc_partition, ctc_catalog from COMPLETED_TXN_COMPONENTS"; LOG.debug("Going to execute query <" + s + ">"); rs = stmt.executeQuery(s); while (rs.next()) { @@ -78,15 +78,16 @@ public CompactionTxnHandler() { info.dbname = rs.getString(1); info.tableName = rs.getString(2); info.partName = rs.getString(3); + info.catName = rs.getString(4); response.add(info); } rs.close(); // Check for aborted txns - s = "select tc_database, tc_table, tc_partition " + + s = "select tc_database, tc_table, tc_partition, tc_catalog " + "from TXNS, TXN_COMPONENTS " + "where txn_id = tc_txnid and txn_state = '" + TXN_ABORTED + "' " + - "group by tc_database, tc_table, tc_partition " + + "group by tc_catalog, tc_database, tc_table, tc_partition " + "having count(*) > " + maxAborted; LOG.debug("Going to execute query <" + s + ">"); @@ -96,6 +97,7 @@ public CompactionTxnHandler() { info.dbname = rs.getString(1); info.tableName = rs.getString(2); info.partName = rs.getString(3); + info.catName = rs.getString(4); info.tooManyAborts = true; response.add(info); } @@ -173,7 +175,8 @@ public CompactionInfo findNextToCompact(String workerId) throws MetaException { dbConn = getDbConn(Connection.TRANSACTION_READ_COMMITTED); stmt = dbConn.createStatement(); String s = "select cq_id, cq_database, cq_table, cq_partition, " + - "cq_type, cq_tblproperties from COMPACTION_QUEUE where cq_state = '" + INITIATED_STATE + "'"; + "cq_type, cq_tblproperties, cq_catalog from COMPACTION_QUEUE where cq_state = '" + + INITIATED_STATE + "'"; LOG.debug("Going to execute query <" + s + ">"); rs = stmt.executeQuery(s); if (!rs.next()) { @@ -185,6 +188,7 @@ public CompactionInfo findNextToCompact(String workerId) throws MetaException { do { CompactionInfo info = new CompactionInfo(); info.id = rs.getLong(1); + info.catName = rs.getString("cq_catalog"); info.dbname = rs.getString(2); info.tableName = rs.getString(3); info.partName = rs.getString(4); @@ -286,14 +290,15 @@ public void markCompacted(CompactionInfo info) throws MetaException { try { dbConn = getDbConn(Connection.TRANSACTION_READ_COMMITTED); stmt = dbConn.createStatement(); - String s = "select cq_id, cq_database, cq_table, cq_partition, " - + "cq_type, cq_run_as, cq_highest_write_id from COMPACTION_QUEUE where cq_state = '" - + READY_FOR_CLEANING + "'"; + String s = "select cq_id, cq_database, cq_table, cq_partition, cq_type, cq_run_as, " + + "cq_highest_write_id, cq_catalog from COMPACTION_QUEUE where cq_state = '" + + READY_FOR_CLEANING + "'"; LOG.debug("Going to execute query <" + s + ">"); rs = stmt.executeQuery(s); while (rs.next()) { CompactionInfo info = new CompactionInfo(); info.id = rs.getLong(1); + info.catName = rs.getString("cq_catalog"); info.dbname = rs.getString(2); info.tableName = rs.getString(3); info.partName = rs.getString(4); @@ -339,7 +344,11 @@ public void markCleaned(CompactionInfo info) throws MetaException { ResultSet rs = null; try { dbConn = getDbConn(Connection.TRANSACTION_READ_COMMITTED); - pStmt = dbConn.prepareStatement("select CQ_ID, CQ_DATABASE, CQ_TABLE, CQ_PARTITION, CQ_STATE, CQ_TYPE, CQ_TBLPROPERTIES, CQ_WORKER_ID, CQ_START, CQ_RUN_AS, CQ_HIGHEST_WRITE_ID, CQ_META_INFO, CQ_HADOOP_JOB_ID from COMPACTION_QUEUE WHERE CQ_ID = ?"); + pStmt = dbConn.prepareStatement("select CQ_ID, CQ_DATABASE, CQ_TABLE, " + + "CQ_PARTITION, CQ_STATE, CQ_TYPE, CQ_TBLPROPERTIES, CQ_WORKER_ID, CQ_START, " + + "CQ_RUN_AS, CQ_HIGHEST_WRITE_ID, CQ_META_INFO, CQ_HADOOP_JOB_ID, cq_catalog " + + "from COMPACTION_QUEUE " + + "WHERE CQ_ID = ?"); pStmt.setLong(1, info.id); rs = pStmt.executeQuery(); if(rs.next()) { @@ -359,7 +368,11 @@ public void markCleaned(CompactionInfo info) throws MetaException { LOG.debug("Going to rollback"); dbConn.rollback(); } - pStmt = dbConn.prepareStatement("insert into COMPLETED_COMPACTIONS(CC_ID, CC_DATABASE, CC_TABLE, CC_PARTITION, CC_STATE, CC_TYPE, CC_TBLPROPERTIES, CC_WORKER_ID, CC_START, CC_END, CC_RUN_AS, CC_HIGHEST_WRITE_ID, CC_META_INFO, CC_HADOOP_JOB_ID) VALUES(?,?,?,?,?, ?,?,?,?,?, ?,?,?,?)"); + pStmt = dbConn.prepareStatement( + "insert into COMPLETED_COMPACTIONS(CC_ID, CC_DATABASE, CC_TABLE, CC_PARTITION, " + + "CC_STATE, CC_TYPE, CC_TBLPROPERTIES, CC_WORKER_ID, CC_START, CC_END, CC_RUN_AS, " + + "CC_HIGHEST_WRITE_ID, CC_META_INFO, CC_HADOOP_JOB_ID, cc_catalog) " + + "VALUES(?,?,?,?,?, ?,?,?,?,?, ?,?,?,?,?)"); info.state = SUCCEEDED_STATE; CompactionInfo.insertIntoCompletedCompactions(pStmt, info, getDbTime(dbConn)); updCount = pStmt.executeUpdate(); @@ -367,7 +380,7 @@ public void markCleaned(CompactionInfo info) throws MetaException { // Remove entries from completed_txn_components as well, so we don't start looking there // again but only up to the highest write ID include in this compaction job. //highestWriteId will be NULL in upgrade scenarios - s = "delete from COMPLETED_TXN_COMPONENTS where ctc_database = ? and " + + s = "delete from COMPLETED_TXN_COMPONENTS where ctc_catalog = ? and ctc_database = ? and " + "ctc_table = ?"; if (info.partName != null) { s += " and ctc_partition = ?"; @@ -377,6 +390,7 @@ public void markCleaned(CompactionInfo info) throws MetaException { } pStmt = dbConn.prepareStatement(s); int paramCount = 1; + pStmt.setString(paramCount++, info.catName); pStmt.setString(paramCount++, info.dbname); pStmt.setString(paramCount++, info.tableName); if (info.partName != null) { @@ -392,12 +406,13 @@ public void markCleaned(CompactionInfo info) throws MetaException { } s = "select distinct txn_id from TXNS, TXN_COMPONENTS where txn_id = tc_txnid and txn_state = '" + - TXN_ABORTED + "' and tc_database = ? and tc_table = ?"; + TXN_ABORTED + "' and tc_catalog = ? and tc_database = ? and tc_table = ?"; if (info.highestWriteId != 0) s += " and tc_writeid <= ?"; if (info.partName != null) s += " and tc_partition = ?"; pStmt = dbConn.prepareStatement(s); paramCount = 1; + pStmt.setString(paramCount++, info.catName); pStmt.setString(paramCount++, info.dbname); pStmt.setString(paramCount++, info.tableName); if(info.highestWriteId != 0) { @@ -427,6 +442,7 @@ public void markCleaned(CompactionInfo info) throws MetaException { prefix.append("delete from TXN_COMPONENTS where "); //because 1 txn may include different partitions/tables even in auto commit mode + suffix.append(" and tc_catalog = ?"); suffix.append(" and tc_database = ?"); suffix.append(" and tc_table = ?"); if (info.partName != null) { @@ -449,6 +465,7 @@ public void markCleaned(CompactionInfo info) throws MetaException { } totalCount += insertCount; paramCount = insertCount + 1; + pStmt.setString(paramCount++, info.catName); pStmt.setString(paramCount++, info.dbname); pStmt.setString(paramCount++, info.tableName); if (info.partName != null) { @@ -858,14 +875,16 @@ public void purgeCompactionHistory() throws MetaException { stmt = dbConn.createStatement(); /*cc_id is monotonically increasing so for any entity sorts in order of compaction history, thus this query groups by entity and withing group sorts most recent first*/ - rs = stmt.executeQuery("select cc_id, cc_database, cc_table, cc_partition, cc_state from " + - "COMPLETED_COMPACTIONS order by cc_database, cc_table, cc_partition, cc_id desc"); + rs = stmt.executeQuery("select cc_id, cc_database, cc_table, cc_partition, cc_state, cc_catalog" + + " from COMPLETED_COMPACTIONS " + + "order by cc_catalog, cc_database, cc_table, cc_partition, cc_id desc"); String lastCompactedEntity = null; /*In each group, walk from most recent and count occurences of each state type. Once you * have counted enough (for each state) to satisfy retention policy, delete all other * instances of this status.*/ while(rs.next()) { - CompactionInfo ci = new CompactionInfo(rs.getLong(1), rs.getString(2), rs.getString(3), rs.getString(4), rs.getString(5).charAt(0)); + CompactionInfo ci = new CompactionInfo(rs.getLong(1), rs.getString("cc_catalog"), + rs.getString(2), rs.getString(3), rs.getString(4), rs.getString(5).charAt(0)); if(!ci.getFullPartitionName().equals(lastCompactedEntity)) { lastCompactedEntity = ci.getFullPartitionName(); rc = new RetentionCounters(MetastoreConf.getIntVar(conf, ConfVars.COMPACTOR_HISTORY_RETENTION_ATTEMPTED), @@ -955,14 +974,14 @@ public boolean checkFailedCompactions(CompactionInfo ci) throws MetaException { try { dbConn = getDbConn(Connection.TRANSACTION_READ_COMMITTED); pStmt = dbConn.prepareStatement("select CC_STATE from COMPLETED_COMPACTIONS where " + - "CC_DATABASE = ? and " + - "CC_TABLE = ? " + - (ci.partName != null ? "and CC_PARTITION = ?" : "") + + "cc_catalog = ? and CC_DATABASE = ? and CC_TABLE = ? " + + (ci.partName != null ? "and CC_PARTITION = ?" : "") + " and CC_STATE != " + quoteChar(ATTEMPTED_STATE) + " order by CC_ID desc"); - pStmt.setString(1, ci.dbname); - pStmt.setString(2, ci.tableName); + pStmt.setString(1, ci.catName); + pStmt.setString(2, ci.dbname); + pStmt.setString(3, ci.tableName); if (ci.partName != null) { - pStmt.setString(3, ci.partName); + pStmt.setString(4, ci.partName); } rs = pStmt.executeQuery(); int numFailed = 0; @@ -1010,7 +1029,10 @@ public void markFailed(CompactionInfo ci) throws MetaException {//todo: this sho try { dbConn = getDbConn(Connection.TRANSACTION_READ_COMMITTED); stmt = dbConn.createStatement(); - pStmt = dbConn.prepareStatement("select CQ_ID, CQ_DATABASE, CQ_TABLE, CQ_PARTITION, CQ_STATE, CQ_TYPE, CQ_TBLPROPERTIES, CQ_WORKER_ID, CQ_START, CQ_RUN_AS, CQ_HIGHEST_WRITE_ID, CQ_META_INFO, CQ_HADOOP_JOB_ID from COMPACTION_QUEUE WHERE CQ_ID = ?"); + pStmt = dbConn.prepareStatement( + "select CQ_ID, CQ_DATABASE, CQ_TABLE, CQ_PARTITION, CQ_STATE, CQ_TYPE, " + + "CQ_TBLPROPERTIES, CQ_WORKER_ID, CQ_START, CQ_RUN_AS, CQ_HIGHEST_WRITE_ID, " + + "CQ_META_INFO, CQ_HADOOP_JOB_ID, cq_catalog from COMPACTION_QUEUE WHERE CQ_ID = ?"); pStmt.setLong(1, ci.id); rs = pStmt.executeQuery(); if(rs.next()) { @@ -1044,7 +1066,11 @@ public void markFailed(CompactionInfo ci) throws MetaException {//todo: this sho close(rs, stmt, null); closeStmt(pStmt); - pStmt = dbConn.prepareStatement("insert into COMPLETED_COMPACTIONS(CC_ID, CC_DATABASE, CC_TABLE, CC_PARTITION, CC_STATE, CC_TYPE, CC_TBLPROPERTIES, CC_WORKER_ID, CC_START, CC_END, CC_RUN_AS, CC_HIGHEST_WRITE_ID, CC_META_INFO, CC_HADOOP_JOB_ID) VALUES(?,?,?,?,?, ?,?,?,?,?, ?,?,?,?)"); + pStmt = dbConn.prepareStatement( + "insert into COMPLETED_COMPACTIONS(CC_ID, CC_DATABASE, CC_TABLE, CC_PARTITION, " + + "CC_STATE, CC_TYPE, CC_TBLPROPERTIES, CC_WORKER_ID, CC_START, CC_END, CC_RUN_AS, " + + "CC_HIGHEST_WRITE_ID, CC_META_INFO, CC_HADOOP_JOB_ID, cc_catalog) " + + "VALUES(?,?,?,?,?, ?,?,?,?,?, ?,?,?,?,?)"); CompactionInfo.insertIntoCompletedCompactions(pStmt, ci, getDbTime(dbConn)); int updCount = pStmt.executeUpdate(); LOG.debug("Going to commit"); diff --git standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/txn/TxnDbUtil.java standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/txn/TxnDbUtil.java index 7b58cbe3e0..3914030570 100644 --- standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/txn/TxnDbUtil.java +++ standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/txn/TxnDbUtil.java @@ -84,6 +84,7 @@ public static void prepDb(Configuration conf) throws Exception { stmt.execute("CREATE TABLE TXN_COMPONENTS (" + " TC_TXNID bigint NOT NULL REFERENCES TXNS (TXN_ID)," + + " TC_CATALOG varchar(256) NOT NULL," + " TC_DATABASE varchar(128) NOT NULL," + " TC_TABLE varchar(128)," + " TC_PARTITION varchar(767)," + @@ -91,6 +92,7 @@ public static void prepDb(Configuration conf) throws Exception { " TC_WRITEID bigint)"); stmt.execute("CREATE TABLE COMPLETED_TXN_COMPONENTS (" + " CTC_TXNID bigint NOT NULL," + + " CTC_CATALOG varchar(256) NOT NULL," + " CTC_DATABASE varchar(128) NOT NULL," + " CTC_TABLE varchar(128)," + " CTC_PARTITION varchar(767)," + @@ -102,10 +104,12 @@ public static void prepDb(Configuration conf) throws Exception { stmt.execute("CREATE TABLE TXN_TO_WRITE_ID (" + " T2W_TXNID bigint NOT NULL," + + " T2W_CATALOG varchar(256) NOT NULL," + " T2W_DATABASE varchar(128) NOT NULL," + " T2W_TABLE varchar(256) NOT NULL," + " T2W_WRITEID bigint NOT NULL)"); stmt.execute("CREATE TABLE NEXT_WRITE_ID (" + + " NWI_CATALOG varchar(256) NOT NULL," + " NWI_DATABASE varchar(128) NOT NULL," + " NWI_TABLE varchar(256) NOT NULL," + " NWI_NEXT bigint NOT NULL)"); @@ -119,6 +123,7 @@ public static void prepDb(Configuration conf) throws Exception { " HL_LOCK_EXT_ID bigint NOT NULL," + " HL_LOCK_INT_ID bigint NOT NULL," + " HL_TXNID bigint NOT NULL," + + " HL_CATALOG varchar(256) NOT NULL," + " HL_DB varchar(128) NOT NULL," + " HL_TABLE varchar(128)," + " HL_PARTITION varchar(767)," + @@ -140,6 +145,7 @@ public static void prepDb(Configuration conf) throws Exception { stmt.execute("CREATE TABLE COMPACTION_QUEUE (" + " CQ_ID bigint PRIMARY KEY," + + " CQ_CATALOG varchar(256) NOT NULL," + " CQ_DATABASE varchar(128) NOT NULL," + " CQ_TABLE varchar(128) NOT NULL," + " CQ_PARTITION varchar(767)," + @@ -158,6 +164,7 @@ public static void prepDb(Configuration conf) throws Exception { stmt.execute("CREATE TABLE COMPLETED_COMPACTIONS (" + " CC_ID bigint PRIMARY KEY," + + " CC_CATALOG varchar(256) NOT NULL," + " CC_DATABASE varchar(128) NOT NULL," + " CC_TABLE varchar(128) NOT NULL," + " CC_PARTITION varchar(767)," + @@ -179,6 +186,7 @@ public static void prepDb(Configuration conf) throws Exception { " PRIMARY KEY(MT_KEY1, MT_KEY2))"); stmt.execute("CREATE TABLE WRITE_SET (" + + " WS_CATALOG varchar(128) NOT NULL," + " WS_DATABASE varchar(128) NOT NULL," + " WS_TABLE varchar(128) NOT NULL," + " WS_PARTITION varchar(767)," + diff --git standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/txn/TxnHandler.java standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/txn/TxnHandler.java index 565fb89d16..0df21c46fe 100644 --- standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/txn/TxnHandler.java +++ standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/txn/TxnHandler.java @@ -127,6 +127,7 @@ import org.apache.hadoop.hive.metastore.metrics.Metrics; import org.apache.hadoop.hive.metastore.metrics.MetricsConstants; import org.apache.hadoop.hive.metastore.tools.SQLGenerator; +import org.apache.hadoop.hive.metastore.utils.FullTableName; import org.apache.hadoop.hive.metastore.utils.JavaUtils; import org.apache.hadoop.hive.metastore.utils.MetaStoreUtils; import org.apache.hadoop.hive.metastore.utils.StringableMap; @@ -886,6 +887,7 @@ public void commitTxn(CommitTxnRequest rqst) MaterializationsRebuildLockHandler materializationsRebuildLockHandler = MaterializationsRebuildLockHandler.get(); String fullyQualifiedName = null; + String catName = null; String dbName = null; String tblName = null; long writeId = 0L; @@ -972,8 +974,8 @@ public void commitTxn(CommitTxnRequest rqst) * even if it includes all of it's columns */ int numCompsWritten = stmt.executeUpdate( - "insert into WRITE_SET (ws_database, ws_table, ws_partition, ws_txnid, ws_commit_id, ws_operation_type)" + - " select distinct tc_database, tc_table, tc_partition, tc_txnid, " + commitId + ", tc_operation_type " + conflictSQLSuffix); + "insert into WRITE_SET (ws_catalog, ws_database, ws_table, ws_partition, ws_txnid, ws_commit_id, ws_operation_type)" + + " select distinct tc_catalog, tc_database, tc_table, tc_partition, tc_txnid, " + commitId + ", tc_operation_type " + conflictSQLSuffix); /** * see if there are any overlapping txns wrote the same element, i.e. have a conflict * Since entire commit operation is mutexed wrt other start/commit ops, @@ -986,9 +988,11 @@ public void commitTxn(CommitTxnRequest rqst) rs = stmt.executeQuery (sqlGenerator.addLimitClause(1, "committed.ws_txnid, committed.ws_commit_id, committed.ws_database," + "committed.ws_table, committed.ws_partition, cur.ws_commit_id cur_ws_commit_id, " + - "cur.ws_operation_type cur_op, committed.ws_operation_type committed_op " + + "cur.ws_operation_type cur_op, committed.ws_operation_type committed_op, committed.ws_catalog " + "from WRITE_SET committed INNER JOIN WRITE_SET cur " + - "ON committed.ws_database=cur.ws_database and committed.ws_table=cur.ws_table " + + "ON committed.ws_catalog = cur.ws_catalog and " + + "committed.ws_database=cur.ws_database and " + + "committed.ws_table=cur.ws_table " + //For partitioned table we always track writes at partition level (never at table) //and for non partitioned - always at table level, thus the same table should never //have entries with partition key and w/o @@ -1004,7 +1008,11 @@ public void commitTxn(CommitTxnRequest rqst) if (rs.next()) { //found a conflict String committedTxn = "[" + JavaUtils.txnIdToString(rs.getLong(1)) + "," + rs.getLong(2) + "]"; - StringBuilder resource = new StringBuilder(rs.getString(3)).append("/").append(rs.getString(4)); + StringBuilder resource = new StringBuilder(rs.getString(9)) + .append("/") + .append(rs.getString(3)) + .append("/") + .append(rs.getString(4)); String partitionName = rs.getString(5); if (partitionName != null) { resource.append('/').append(partitionName); @@ -1039,9 +1047,9 @@ public void commitTxn(CommitTxnRequest rqst) } // Move the record from txn_components into completed_txn_components so that the compactor // knows where to look to compact. - String s = "insert into COMPLETED_TXN_COMPONENTS (ctc_txnid, ctc_database, " + - "ctc_table, ctc_partition, ctc_writeid) select tc_txnid, tc_database, tc_table, " + - "tc_partition, tc_writeid from TXN_COMPONENTS where tc_txnid = " + txnid; + String s = "insert into COMPLETED_TXN_COMPONENTS (ctc_txnid, ctc_catalog, ctc_database, " + + "ctc_table, ctc_partition, ctc_writeid) select tc_txnid, tc_catalog, tc_database, " + + "tc_table, tc_partition, tc_writeid from TXN_COMPONENTS where tc_txnid = " + txnid; LOG.debug("Going to execute insert <" + s + ">"); int modCount = 0; if ((modCount = stmt.executeUpdate(s)) < 1) { @@ -1051,13 +1059,14 @@ public void commitTxn(CommitTxnRequest rqst) "completed_txn_components when committing txn! " + JavaUtils.txnIdToString(txnid)); } // Obtain information that we need to update registry - s = "select ctc_database, ctc_table, ctc_writeid, ctc_timestamp from COMPLETED_TXN_COMPONENTS where ctc_txnid = " + txnid; + s = "select ctc_database, ctc_table, ctc_writeid, ctc_timestamp, ctc_catalog from COMPLETED_TXN_COMPONENTS where ctc_txnid = " + txnid; LOG.debug("Going to extract table modification information for invalidation cache <" + s + ">"); rs = stmt.executeQuery(s); if (rs.next()) { + catName = rs.getString(5); dbName = rs.getString(1); tblName = rs.getString(2); - fullyQualifiedName = Warehouse.getQualifiedName(dbName, tblName); + fullyQualifiedName = Warehouse.getCatalogQualifiedTableName(catName, dbName, tblName); writeId = rs.getLong(3); timestamp = rs.getTimestamp(4, Calendar.getInstance(TimeZone.getTimeZone("UTC"))).getTime(); } @@ -1128,6 +1137,7 @@ public void commitTxn(CommitTxnRequest rqst) @Override @RetrySemantics.Idempotent("No-op if already replicated the writeid state") public void replTableWriteIdState(ReplTblWriteIdStateRequest rqst) throws MetaException { + String catName = rqst.isSetCatName() ? rqst.getCatName() : MetaStoreUtils.getDefaultCatalog(conf); String dbName = rqst.getDbName().toLowerCase(); String tblName = rqst.getTableName().toLowerCase(); ValidWriteIdList validWriteIdList = new ValidReaderWriteIdList(rqst.getValidWriteIdlist()); @@ -1147,7 +1157,8 @@ public void replTableWriteIdState(ReplTblWriteIdStateRequest rqst) throws MetaEx // Check if this txn state is already replicated for this given table. If yes, then it is // idempotent case and just return. - String sql = "select nwi_next from NEXT_WRITE_ID where nwi_database = " + quoteString(dbName) + String sql = "select nwi_next from NEXT_WRITE_ID where nwi_catalog = " + + quoteString(catName) + " and nwi_database = " + quoteString(dbName) + " and nwi_table = " + quoteString(tblName); LOG.debug("Going to execute query <" + sql + ">"); @@ -1170,13 +1181,15 @@ public void replTableWriteIdState(ReplTblWriteIdStateRequest rqst) throws MetaEx int i = 0; for (long txn : txnIds) { long writeId = abortedWriteIds.get(i++); - rows.add(txn + ", " + quoteString(dbName) + ", " + quoteString(tblName) + ", " + writeId); + rows.add(txn + ", " + quoteString(catName) + ", " + quoteString(dbName) + ", " + + quoteString(tblName) + ", " + writeId); LOG.info("Allocated writeID: " + writeId + " for txnId: " + txn); } // Insert entries to TXN_TO_WRITE_ID for aborted write ids List inserts = sqlGenerator.createInsertValuesStmt( - "TXN_TO_WRITE_ID (t2w_txnid, t2w_database, t2w_table, t2w_writeid)", rows); + "TXN_TO_WRITE_ID (t2w_txnid, t2w_catalog, t2w_database, t2w_table, t2w_writeid)", + rows); for (String insert : inserts) { LOG.debug("Going to execute insert <" + insert + ">"); stmt.execute(insert); @@ -1297,9 +1310,10 @@ public GetValidWriteIdsResponse getValidWriteIds(GetValidWriteIdsRequest rqst) // Method to get the Valid write ids list for the given table // Input fullTableName is expected to be of format . private TableValidWriteIds getValidWriteIdsForTable(Statement stmt, String fullTableName, - ValidTxnList validTxnList) throws SQLException { + ValidTxnList validTxnList) + throws SQLException, MetaException { ResultSet rs = null; - String[] names = TxnUtils.getDbTableName(fullTableName); + FullTableName tableName = new FullTableName(fullTableName, conf); try { // Need to initialize to 0 to make sure if nobody modified this table, then current txn // shouldn't read any data. @@ -1315,8 +1329,9 @@ private TableValidWriteIds getValidWriteIdsForTable(Statement stmt, String fullT // traverse through all write Ids less than writeId HWM to make exceptions list. // The writeHWM = min(NEXT_WRITE_ID.nwi_next-1, max(TXN_TO_WRITE_ID.t2w_writeid under txnHwm)) String s = "select max(t2w_writeid) from TXN_TO_WRITE_ID where t2w_txnid <= " + txnHwm - + " and t2w_database = " + quoteString(names[0]) - + " and t2w_table = " + quoteString(names[1]); + + " and t2w_catalog = " + quoteString(tableName.catName) + + " and t2w_database = " + quoteString(tableName.dbName) + + " and t2w_table = " + quoteString(tableName.tableName); LOG.debug("Going to execute query<" + s + ">"); rs = stmt.executeQuery(s); if (rs.next()) { @@ -1327,8 +1342,9 @@ private TableValidWriteIds getValidWriteIdsForTable(Statement stmt, String fullT if (writeIdHwm <= 0) { // Need to subtract 1 as nwi_next would be the next write id to be allocated but we need highest // allocated write id. - s = "select nwi_next-1 from NEXT_WRITE_ID where nwi_database = " + quoteString(names[0]) - + " and nwi_table = " + quoteString(names[1]); + s = "select nwi_next-1 from NEXT_WRITE_ID where nwi_catalog = " + quoteString(tableName.catName) + + " and nwi_database = " + quoteString(tableName.dbName) + + " and nwi_table = " + quoteString(tableName.tableName); LOG.debug("Going to execute query<" + s + ">"); rs = stmt.executeQuery(s); if (rs.next()) { @@ -1345,8 +1361,9 @@ private TableValidWriteIds getValidWriteIdsForTable(Statement stmt, String fullT // on write id. The sorting is needed as exceptions list in ValidWriteIdList would be looked-up // using binary search. s = "select t2w_txnid, t2w_writeid from TXN_TO_WRITE_ID where t2w_writeid <= " + writeIdHwm - + " and t2w_database = " + quoteString(names[0]) - + " and t2w_table = " + quoteString(names[1]) + + " and t2w_catalog = " + quoteString(tableName.catName) + + " and t2w_database = " + quoteString(tableName.dbName) + + " and t2w_table = " + quoteString(tableName.tableName) + " order by t2w_writeid asc"; LOG.debug("Going to execute query<" + s + ">"); @@ -1385,6 +1402,7 @@ private TableValidWriteIds getValidWriteIdsForTable(Statement stmt, String fullT public AllocateTableWriteIdsResponse allocateTableWriteIds(AllocateTableWriteIdsRequest rqst) throws NoSuchTxnException, TxnAbortedException, MetaException { List txnIds; + String catName = rqst.isSetCatName() ? rqst.getCatName() : MetaStoreUtils.getDefaultCatalog(conf); String dbName = rqst.getDbName().toLowerCase(); String tblName = rqst.getTableName().toLowerCase(); try { @@ -1442,7 +1460,8 @@ public AllocateTableWriteIdsResponse allocateTableWriteIds(AllocateTableWriteIds // The write id would have been already allocated in case of multi-statement txns where // first write on a table will allocate write id and rest of the writes should re-use it. prefix.append("select t2w_txnid, t2w_writeid from TXN_TO_WRITE_ID where" - + " t2w_database = " + quoteString(dbName) + + " t2w_catalog = " + quoteString(catName) + + " and t2w_database = " + quoteString(dbName) + " and t2w_table = " + quoteString(tblName) + " and "); suffix.append(""); TxnUtils.buildQueryWithINClause(conf, queries, prefix, suffix, @@ -1475,23 +1494,26 @@ public AllocateTableWriteIdsResponse allocateTableWriteIds(AllocateTableWriteIds // Get the next write id for the given table and update it with new next write id. // This is select for update query which takes a lock if the table entry is already there in NEXT_WRITE_ID s = sqlGenerator.addForUpdateClause( - "select nwi_next from NEXT_WRITE_ID where nwi_database = " + quoteString(dbName) - + " and nwi_table = " + quoteString(tblName)); + "select nwi_next from NEXT_WRITE_ID where nwi_catalog = " + quoteString(catName) + + " and nwi_database = " + quoteString(dbName) + + " and nwi_table = " + quoteString(tblName)); LOG.debug("Going to execute query <" + s + ">"); rs = stmt.executeQuery(s); if (!rs.next()) { // First allocation of write id should add the table to the next_write_id meta table // The initial value for write id should be 1 and hence we add 1 with number of write ids allocated here writeId = 1; - s = "insert into NEXT_WRITE_ID (nwi_database, nwi_table, nwi_next) values (" - + quoteString(dbName) + "," + quoteString(tblName) + "," + Long.toString(numOfWriteIds + 1) + ")"; + s = "insert into NEXT_WRITE_ID (nwi_catalog, nwi_database, nwi_table, nwi_next) values (" + + quoteString(catName) + ", " + quoteString(dbName) + "," + quoteString(tblName) + + ", " + Long.toString(numOfWriteIds + 1) + ")"; LOG.debug("Going to execute insert <" + s + ">"); stmt.execute(s); } else { writeId = rs.getLong(1); // Update the NEXT_WRITE_ID for the given table after incrementing by number of write ids allocated s = "update NEXT_WRITE_ID set nwi_next = " + (writeId + numOfWriteIds) - + " where nwi_database = " + quoteString(dbName) + + " where nwi_catalog = " + quoteString(catName) + + " and nwi_database = " + quoteString(dbName) + " and nwi_table = " + quoteString(tblName); LOG.debug("Going to execute update <" + s + ">"); stmt.executeUpdate(s); @@ -1501,7 +1523,8 @@ public AllocateTableWriteIdsResponse allocateTableWriteIds(AllocateTableWriteIds // write ids List rows = new ArrayList<>(); for (long txn : txnIds) { - rows.add(txn + ", " + quoteString(dbName) + ", " + quoteString(tblName) + ", " + writeId); + rows.add(txn + ", " + quoteString(catName) + ", " + quoteString(dbName) + ", " + + quoteString(tblName) + ", " + writeId); txnToWriteIds.add(new TxnToWriteId(txn, writeId)); LOG.info("Allocated writeID: " + writeId + " for txnId: " + txn); writeId++; @@ -1519,7 +1542,7 @@ public AllocateTableWriteIdsResponse allocateTableWriteIds(AllocateTableWriteIds // Insert entries to TXN_TO_WRITE_ID for newly allocated write ids List inserts = sqlGenerator.createInsertValuesStmt( - "TXN_TO_WRITE_ID (t2w_txnid, t2w_database, t2w_table, t2w_writeid)", rows); + "TXN_TO_WRITE_ID (t2w_txnid, t2w_catalog, t2w_database, t2w_table, t2w_writeid)", rows); for (String insert : inserts) { LOG.debug("Going to execute insert <" + insert + ">"); stmt.execute(insert); @@ -1606,7 +1629,7 @@ public void performWriteSetGC() { @Override @RetrySemantics.ReadOnly public BasicTxnInfo getFirstCompletedTransactionForTableAfterCommit( - String inputDbName, String inputTableName, ValidWriteIdList txnList) + String inputCatName, String inputDbName, String inputTableName, ValidWriteIdList txnList) throws MetaException { final List openTxns = Arrays.asList(ArrayUtils.toObject(txnList.getInvalidWriteIds())); @@ -1617,12 +1640,14 @@ public BasicTxnInfo getFirstCompletedTransactionForTableAfterCommit( dbConn = getDbConn(Connection.TRANSACTION_READ_COMMITTED); stmt = dbConn.createStatement(); stmt.setMaxRows(1); - String s = "select ctc_timestamp, ctc_writeid, ctc_database, ctc_table " + String s = "select ctc_timestamp, ctc_writeid, ctc_database, ctc_table, ctc_catalog " + "from COMPLETED_TXN_COMPONENTS " - + "where ctc_database=" + quoteString(inputDbName) + " and ctc_table=" + quoteString(inputTableName) - + " and ctc_writeid > " + txnList.getHighWatermark() - + (txnList.getInvalidWriteIds().length == 0 ? - " " : " or ctc_writeid IN(" + StringUtils.join(",", openTxns) + ") ") + + "where ctc_catalog = " + quoteString(inputCatName) + + " and ctc_database=" + quoteString(inputDbName) + + " and ctc_table=" + quoteString(inputTableName) + + " and ctc_writeid > " + txnList.getHighWatermark() + + (txnList.getInvalidWriteIds().length == 0 ? + " " : " or ctc_writeid IN(" + StringUtils.join(",", openTxns) + ") ") + "order by ctc_timestamp asc"; if (LOG.isDebugEnabled()) { LOG.debug("Going to execute query <" + s + ">"); @@ -1804,6 +1829,7 @@ private ConnectionLockIdPair enqueueLockWithRetry(LockRequest rqst) throws NoSuc if(!updateTxnComponents) { continue; } + String catName = lc.isSetCatName() ? normalizeCase(lc.getCatName()) : MetaStoreUtils.getDefaultCatalog(conf); String dbName = normalizeCase(lc.getDbname()); String tblName = normalizeCase(lc.getTablename()); String partName = normalizeCase(lc.getPartitionname()); @@ -1814,7 +1840,8 @@ private ConnectionLockIdPair enqueueLockWithRetry(LockRequest rqst) throws NoSuc // may return empty result sets. // Get the write id allocated by this txn for the given table writes s = "select t2w_writeid from TXN_TO_WRITE_ID where" - + " t2w_database = " + quoteString(dbName) + + " t2w_catalog = " + quoteString(catName) + + " and t2w_database = " + quoteString(dbName) + " and t2w_table = " + quoteString(tblName) + " and t2w_txnid = " + txnid; LOG.debug("Going to execute query <" + s + ">"); @@ -1823,14 +1850,15 @@ private ConnectionLockIdPair enqueueLockWithRetry(LockRequest rqst) throws NoSuc writeId = rs.getLong(1); } } - rows.add(txnid + ", '" + dbName + "', " + + rows.add(txnid + ", '" + catName + "', '" + dbName + "', " + (tblName == null ? "null" : "'" + tblName + "'") + ", " + (partName == null ? "null" : "'" + partName + "'")+ "," + quoteString(OpertaionType.fromDataOperationType(lc.getOperationType()).toString())+ "," + (writeId == null ? "null" : writeId)); } List queries = sqlGenerator.createInsertValuesStmt( - "TXN_COMPONENTS (tc_txnid, tc_database, tc_table, tc_partition, tc_operation_type, tc_writeid)", rows); + "TXN_COMPONENTS (tc_txnid, tc_catalog, tc_database, tc_table, tc_partition, tc_operation_type," + + " tc_writeid)", rows); for(String query : queries) { LOG.debug("Going to execute update <" + query + ">"); int modCount = stmt.executeUpdate(query); @@ -1852,6 +1880,7 @@ private ConnectionLockIdPair enqueueLockWithRetry(LockRequest rqst) throws NoSuc + lc + " agentInfo=" + rqst.getAgentInfo()); } intLockId++; + String catName = lc.isSetCatName() ? normalizeCase(lc.getCatName()) : MetaStoreUtils.getDefaultCatalog(conf); String dbName = normalizeCase(lc.getDbname()); String tblName = normalizeCase(lc.getTablename()); String partName = normalizeCase(lc.getPartitionname()); @@ -1870,6 +1899,7 @@ private ConnectionLockIdPair enqueueLockWithRetry(LockRequest rqst) throws NoSuc } long now = getDbTime(dbConn); rows.add(extLockId + ", " + intLockId + "," + txnid + ", " + + quoteString(catName) + ", " + quoteString(dbName) + ", " + valueOrNullLiteral(tblName) + ", " + valueOrNullLiteral(partName) + ", " + @@ -1881,7 +1911,7 @@ private ConnectionLockIdPair enqueueLockWithRetry(LockRequest rqst) throws NoSuc valueOrNullLiteral(rqst.getAgentInfo()));// + ")"; } List queries = sqlGenerator.createInsertValuesStmt( - "HIVE_LOCKS (hl_lock_ext_id, hl_lock_int_id, hl_txnid, hl_db, " + + "HIVE_LOCKS (hl_lock_ext_id, hl_lock_int_id, hl_txnid, hl_catalog, hl_db, " + "hl_table, hl_partition,hl_lock_state, hl_lock_type, " + "hl_last_heartbeat, hl_user, hl_host, hl_agent_info)", rows); for(String query : queries) { @@ -2083,7 +2113,7 @@ public void unlock(UnlockRequest rqst) /** * used to sort entries in {@link org.apache.hadoop.hive.metastore.api.ShowLocksResponse} */ - private static class LockInfoExt extends LockInfo { + private class LockInfoExt extends LockInfo { private final ShowLocksResponseElement e; LockInfoExt(ShowLocksResponseElement e) { super(e); @@ -2104,15 +2134,22 @@ public ShowLocksResponse showLocks(ShowLocksRequest rqst) throws MetaException { String s = "select hl_lock_ext_id, hl_txnid, hl_db, hl_table, hl_partition, hl_lock_state, " + "hl_lock_type, hl_last_heartbeat, hl_acquired_at, hl_user, hl_host, hl_lock_int_id," + - "hl_blockedby_ext_id, hl_blockedby_int_id, hl_agent_info from HIVE_LOCKS"; + "hl_blockedby_ext_id, hl_blockedby_int_id, hl_agent_info, hl_catalog from HIVE_LOCKS"; // Some filters may have been specified in the SHOW LOCKS statement. Add them to the query. + String catName = rqst.getCatName(); String dbName = rqst.getDbname(); String tableName = rqst.getTablename(); String partName = rqst.getPartname(); StringBuilder filter = new StringBuilder(); + if (catName != null && !catName.isEmpty()) { + filter.append("hl_catalog=").append(quoteString(catName)); + } if (dbName != null && !dbName.isEmpty()) { + if (filter.length() > 0) { + filter.append(" and "); + } filter.append("hl_db=").append(quoteString(dbName)); } if (tableName != null && !tableName.isEmpty()) { @@ -2141,6 +2178,7 @@ public ShowLocksResponse showLocks(ShowLocksRequest rqst) throws MetaException { long txnid = rs.getLong(2); if (!rs.wasNull()) e.setTxnid(txnid); e.setDbname(rs.getString(3)); + e.setCatName(rs.getString("hl_catalog")); e.setTablename(rs.getString(4)); String partition = rs.getString(5); if (partition != null) e.setPartname(partition); @@ -2327,10 +2365,12 @@ public CompactionResponse compact(CompactionRequest rqst) throws MetaException { long id = generateCompactionQueueId(stmt); + String catName = rqst.isSetCatName() ? rqst.getCatName() : MetaStoreUtils.getDefaultCatalog(conf); StringBuilder sb = new StringBuilder("select cq_id, cq_state from COMPACTION_QUEUE where"). append(" cq_state IN(").append(quoteChar(INITIATED_STATE)). append(",").append(quoteChar(WORKING_STATE)). - append(") AND cq_database=").append(quoteString(rqst.getDbname())). + append(") AND cq_catalog=").append(quoteString(catName)). + append(" AND cq_database=").append(quoteString(rqst.getDbname())). append(" AND cq_table=").append(quoteString(rqst.getTablename())).append(" AND "); if(rqst.getPartitionname() == null) { sb.append("cq_partition is null"); @@ -2350,8 +2390,8 @@ public CompactionResponse compact(CompactionRequest rqst) throws MetaException { return new CompactionResponse(enqueuedId, state, false); } close(rs); - StringBuilder buf = new StringBuilder("insert into COMPACTION_QUEUE (cq_id, cq_database, " + - "cq_table, "); + StringBuilder buf = new StringBuilder("insert into COMPACTION_QUEUE (cq_id, " + + "cq_catalog, cq_database, cq_table, "); String partName = rqst.getPartitionname(); if (partName != null) buf.append("cq_partition, "); buf.append("cq_state, cq_type"); @@ -2362,6 +2402,8 @@ public CompactionResponse compact(CompactionRequest rqst) throws MetaException { buf.append(") values ("); buf.append(id); buf.append(", '"); + buf.append(catName); + buf.append("', '"); buf.append(rqst.getDbname()); buf.append("', '"); buf.append(rqst.getTablename()); @@ -2443,9 +2485,10 @@ public ShowCompactResponse showCompact(ShowCompactRequest rqst) throws MetaExcep stmt = dbConn.createStatement(); String s = "select cq_database, cq_table, cq_partition, cq_state, cq_type, cq_worker_id, " + //-1 because 'null' literal doesn't work for all DBs... - "cq_start, -1 cc_end, cq_run_as, cq_hadoop_job_id, cq_id from COMPACTION_QUEUE union all " + + "cq_start, -1 cc_end, cq_run_as, cq_hadoop_job_id, cq_id, cq_catalog from COMPACTION_QUEUE " + + "union all " + "select cc_database, cc_table, cc_partition, cc_state, cc_type, cc_worker_id, " + - "cc_start, cc_end, cc_run_as, cc_hadoop_job_id, cc_id from COMPLETED_COMPACTIONS"; + "cc_start, cc_end, cc_run_as, cc_hadoop_job_id, cc_id, cc_catalog from COMPLETED_COMPACTIONS"; //what I want is order by cc_end desc, cc_start asc (but derby has a bug https://issues.apache.org/jira/browse/DERBY-6013) //to sort so that currently running jobs are at the end of the list (bottom of screen) //and currently running ones are in sorted by start time @@ -2476,6 +2519,7 @@ public ShowCompactResponse showCompact(ShowCompactRequest rqst) throws MetaExcep e.setRunAs(rs.getString(9)); e.setHadoopJobId(rs.getString(10)); e.setId(rs.getLong(11)); + e.setCatName(rs.getString(12)); response.addToCompacts(e); } LOG.debug("Going to rollback"); @@ -2534,16 +2578,18 @@ public void addDynamicPartitions(AddDynamicPartitions rqst) } Long writeId = rqst.getWriteid(); + String catName = rqst.isSetCatName() ? normalizeCase(rqst.getCatName()) : MetaStoreUtils.getDefaultCatalog(conf); List rows = new ArrayList<>(); for (String partName : rqst.getPartitionnames()) { - rows.add(rqst.getTxnid() + "," + quoteString(normalizeCase(rqst.getDbname())) + rows.add(rqst.getTxnid() + "," + quoteString(catName) + "," + + quoteString(normalizeCase(rqst.getDbname())) + "," + quoteString(normalizeCase(rqst.getTablename())) + "," + quoteString(partName) + "," + quoteChar(ot.sqlConst) + "," + writeId); } int modCount = 0; //record partitions that were written to List queries = sqlGenerator.createInsertValuesStmt( - "TXN_COMPONENTS (tc_txnid, tc_database, tc_table, tc_partition, tc_operation_type, tc_writeid)", rows); + "TXN_COMPONENTS (tc_txnid, tc_catalog, tc_database, tc_table, tc_partition, tc_operation_type, tc_writeid)", rows); for(String query : queries) { LOG.debug("Going to execute update <" + query + ">"); modCount = stmt.executeUpdate(query); @@ -2594,39 +2640,51 @@ public void cleanupRecords(HiveObjectType type, Database db, Table table, case DATABASE: { dbName = db.getName(); - buff.append("delete from TXN_COMPONENTS where tc_database='"); - buff.append(dbName); - buff.append("'"); + buff.append("delete from TXN_COMPONENTS where tc_catalog = '") + .append(db.getCatalogName()) + .append("' and tc_database='") + .append(dbName) + .append("'"); queries.add(buff.toString()); buff.setLength(0); - buff.append("delete from COMPLETED_TXN_COMPONENTS where ctc_database='"); - buff.append(dbName); - buff.append("'"); + buff.append("delete from COMPLETED_TXN_COMPONENTS where ctc_catalog = '") + .append(db.getCatalogName()) + .append("' and ctc_database='") + .append(dbName) + .append("'"); queries.add(buff.toString()); buff.setLength(0); - buff.append("delete from COMPACTION_QUEUE where cq_database='"); - buff.append(dbName); - buff.append("'"); + buff.append("delete from COMPACTION_QUEUE where cq_catalog = '") + .append(db.getCatalogName()) + .append("' and cq_database='") + .append(dbName) + .append("'"); queries.add(buff.toString()); buff.setLength(0); - buff.append("delete from COMPLETED_COMPACTIONS where cc_database='"); - buff.append(dbName); - buff.append("'"); + buff.append("delete from COMPLETED_COMPACTIONS where cc_catalog ='") + .append(db.getCatalogName()) + .append("' and cc_database='") + .append(dbName) + .append("'"); queries.add(buff.toString()); buff.setLength(0); - buff.append("delete from TXN_TO_WRITE_ID where t2w_database='"); - buff.append(dbName.toLowerCase()); - buff.append("'"); + buff.append("delete from TXN_TO_WRITE_ID where t2w_catalog = '") + .append(db.getCatalogName().toLowerCase()) + .append("' and t2w_database='") + .append(dbName.toLowerCase()) + .append("'"); queries.add(buff.toString()); buff.setLength(0); - buff.append("delete from NEXT_WRITE_ID where nwi_database='"); - buff.append(dbName.toLowerCase()); - buff.append("'"); + buff.append("delete from NEXT_WRITE_ID where nwi_catalog = '") + .append(db.getCatalogName().toLowerCase()) + .append("' and nwi_database='") + .append(dbName.toLowerCase()) + .append("'"); queries.add(buff.toString()); break; @@ -2635,51 +2693,63 @@ public void cleanupRecords(HiveObjectType type, Database db, Table table, dbName = table.getDbName(); tblName = table.getTableName(); - buff.append("delete from TXN_COMPONENTS where tc_database='"); - buff.append(dbName); - buff.append("' and tc_table='"); - buff.append(tblName); - buff.append("'"); + buff.append("delete from TXN_COMPONENTS where tc_catalog = '") + .append(table.getCatName()) + .append("' and tc_database='") + .append(dbName) + .append("' and tc_table='") + .append(tblName) + .append("'"); queries.add(buff.toString()); buff.setLength(0); - buff.append("delete from COMPLETED_TXN_COMPONENTS where ctc_database='"); - buff.append(dbName); - buff.append("' and ctc_table='"); - buff.append(tblName); - buff.append("'"); + buff.append("delete from COMPLETED_TXN_COMPONENTS where ctc_catalog = '") + .append(table.getCatName()) + .append("' and ctc_database='") + .append(dbName) + .append("' and ctc_table='") + .append(tblName) + .append("'"); queries.add(buff.toString()); buff.setLength(0); - buff.append("delete from COMPACTION_QUEUE where cq_database='"); - buff.append(dbName); - buff.append("' and cq_table='"); - buff.append(tblName); - buff.append("'"); + buff.append("delete from COMPACTION_QUEUE where cq_catalog = '") + .append(table.getCatName()) + .append("' and cq_database='") + .append(dbName) + .append("' and cq_table='") + .append(tblName) + .append("'"); queries.add(buff.toString()); buff.setLength(0); - buff.append("delete from COMPLETED_COMPACTIONS where cc_database='"); - buff.append(dbName); - buff.append("' and cc_table='"); - buff.append(tblName); - buff.append("'"); + buff.append("delete from COMPLETED_COMPACTIONS where cc_catalog = '") + .append(table.getCatName()) + .append("' and cc_database='") + .append(dbName) + .append("' and cc_table='") + .append(tblName) + .append("'"); queries.add(buff.toString()); buff.setLength(0); - buff.append("delete from TXN_TO_WRITE_ID where t2w_database='"); - buff.append(dbName.toLowerCase()); - buff.append("' and t2w_table='"); - buff.append(tblName.toLowerCase()); - buff.append("'"); + buff.append("delete from TXN_TO_WRITE_ID where t2w_catalog = '") + .append(table.getCatName().toLowerCase()) + .append("' and t2w_database='") + .append(dbName.toLowerCase()) + .append("' and t2w_table='") + .append(tblName.toLowerCase()) + .append("'"); queries.add(buff.toString()); buff.setLength(0); - buff.append("delete from NEXT_WRITE_ID where nwi_database='"); - buff.append(dbName.toLowerCase()); - buff.append("' and nwi_table='"); - buff.append(tblName.toLowerCase()); - buff.append("'"); + buff.append("delete from NEXT_WRITE_ID where nwi_catalog = '") + .append(table.getCatName().toLowerCase()) + .append("' and nwi_database='") + .append(dbName.toLowerCase()) + .append("' and nwi_table='") + .append(tblName.toLowerCase()) + .append("'"); queries.add(buff.toString()); break; @@ -2696,43 +2766,51 @@ public void cleanupRecords(HiveObjectType type, Database db, Table table, partVals = p.getValues(); partName = Warehouse.makePartName(partCols, partVals); - buff.append("delete from TXN_COMPONENTS where tc_database='"); - buff.append(dbName); - buff.append("' and tc_table='"); - buff.append(tblName); - buff.append("' and tc_partition='"); - buff.append(partName); - buff.append("'"); + buff.append("delete from TXN_COMPONENTS where tc_catalog = '") + .append(p.getCatName()) + .append("' and tc_database='") + .append(dbName) + .append("' and tc_table='") + .append(tblName) + .append("' and tc_partition='") + .append(partName) + .append("'"); queries.add(buff.toString()); buff.setLength(0); - buff.append("delete from COMPLETED_TXN_COMPONENTS where ctc_database='"); - buff.append(dbName); - buff.append("' and ctc_table='"); - buff.append(tblName); - buff.append("' and ctc_partition='"); - buff.append(partName); - buff.append("'"); + buff.append("delete from COMPLETED_TXN_COMPONENTS where ctc_catalog = '") + .append(p.getCatName()) + .append("' and ctc_database='") + .append(dbName) + .append("' and ctc_table='") + .append(tblName) + .append("' and ctc_partition='") + .append(partName) + .append("'"); queries.add(buff.toString()); buff.setLength(0); - buff.append("delete from COMPACTION_QUEUE where cq_database='"); - buff.append(dbName); - buff.append("' and cq_table='"); - buff.append(tblName); - buff.append("' and cq_partition='"); - buff.append(partName); - buff.append("'"); + buff.append("delete from COMPACTION_QUEUE where cq_catalog='") + .append(p.getCatName()) + .append("' and cq_database='") + .append(dbName) + .append("' and cq_table='") + .append(tblName) + .append("' and cq_partition='") + .append(partName) + .append("'"); queries.add(buff.toString()); buff.setLength(0); - buff.append("delete from COMPLETED_COMPACTIONS where cc_database='"); - buff.append(dbName); - buff.append("' and cc_table='"); - buff.append(tblName); - buff.append("' and cc_partition='"); - buff.append(partName); - buff.append("'"); + buff.append("delete from COMPLETED_COMPACTIONS where cc_catalog = '") + .append(p.getCatName()) + .append("' and cc_database='") + .append(dbName) + .append("' and cc_table='") + .append(tblName) + .append("' and cc_partition='") + .append(partName) + .append("'"); queries.add(buff.toString()); } @@ -3201,12 +3279,13 @@ private void determineDatabaseProduct(Connection conn) { } } - private static class LockInfo { + private class LockInfo { private final long extLockId; private final long intLockId; //0 means there is no transaction, i.e. it a select statement which is not part of //explicit transaction or a IUD statement that is not writing to ACID table private final long txnId; + private final String catName; private final String db; private final String table; private final String partition; @@ -3217,6 +3296,7 @@ private void determineDatabaseProduct(Connection conn) { LockInfo(ResultSet rs) throws SQLException, MetaException { extLockId = rs.getLong("hl_lock_ext_id"); // can't be null intLockId = rs.getLong("hl_lock_int_id"); // can't be null + catName = rs.getString("hl_catalog"); // can't be null db = rs.getString("hl_db"); // can't be null String t = rs.getString("hl_table"); table = (rs.wasNull() ? null : t); @@ -3241,6 +3321,7 @@ private void determineDatabaseProduct(Connection conn) { extLockId = e.getLockid(); intLockId = e.getLockIdInternal(); txnId = e.getTxnid(); + catName = e.isSetCatName() ? e.getCatName() : MetaStoreUtils.getDefaultCatalog(conf); db = e.getDbname(); table = e.getTablename(); partition = e.getPartname(); @@ -3258,8 +3339,8 @@ public boolean equals(Object other) { @Override public String toString() { return JavaUtils.lockIdToString(extLockId) + " intLockId:" + - intLockId + " " + JavaUtils.txnIdToString(txnId) - + " db:" + db + " table:" + table + " partition:" + + intLockId + " " + JavaUtils.txnIdToString(txnId) + + "cat:" + catName + " db:" + db + " table:" + table + " partition:" + partition + " state:" + (state == null ? "null" : state.toString()) + " type:" + (type == null ? "null" : type.toString()); } @@ -3531,15 +3612,18 @@ private LockResponse checkLock(Connection dbConn, long extLockId) Savepoint save = dbConn.setSavepoint(); StringBuilder query = new StringBuilder("select hl_lock_ext_id, " + "hl_lock_int_id, hl_db, hl_table, hl_partition, hl_lock_state, " + - "hl_lock_type, hl_txnid from HIVE_LOCKS where hl_db in ("); + "hl_lock_type, hl_txnid, hl_catalog from HIVE_LOCKS where hl_db in ("); Set strings = new HashSet<>(locksBeingChecked.size()); //This the set of entities that the statement represented by extLockId wants to update List writeSet = new ArrayList<>(); + String catName = null; for (LockInfo info : locksBeingChecked) { strings.add(info.db); + if (catName == null) catName = info.catName; + else assert catName.equals(info.catName) : "Cross catalog locking operations are not allowed"; if(!isPartOfDynamicPartitionInsert && info.type == LockType.SHARED_WRITE) { writeSet.add(info); } @@ -3551,12 +3635,18 @@ private LockResponse checkLock(Connection dbConn, long extLockId) } stmt = dbConn.createStatement(); StringBuilder sb = new StringBuilder(" ws_database, ws_table, ws_partition, " + - "ws_txnid, ws_commit_id " + + "ws_txnid, ws_commit_id, ws_catalog " + "from WRITE_SET where ws_commit_id >= " + writeSet.get(0).txnId + " and (");//see commitTxn() for more info on this inequality for(LockInfo info : writeSet) { - sb.append("(ws_database = ").append(quoteString(info.db)).append(" and ws_table = ") - .append(quoteString(info.table)).append(" and ws_partition ") - .append(info.partition == null ? "is null" : "= " + quoteString(info.partition)).append(") or "); + sb.append("(ws_catalog = ") + .append(quoteString(info.catName)) + .append(" and ws_database = ") + .append(quoteString(info.db)) + .append(" and ws_table = ") + .append(quoteString(info.table)) + .append(" and ws_partition ") + .append(info.partition == null ? "is null" : "= " + quoteString(info.partition)) + .append(") or "); } sb.setLength(sb.length() - 4);//nuke trailing " or " sb.append(")"); @@ -3578,7 +3668,8 @@ private LockResponse checkLock(Connection dbConn, long extLockId) * the WW conflict but it will be caught in commitTxn() where actual partitions written are known. * This is OK since we want 2 concurrent updates that update different sets of partitions to both commit. */ - String resourceName = rs.getString(1) + '/' + rs.getString(2); + String resourceName = rs.getString("ws_catalog") + "/" + rs.getString(1) + '/' + + rs.getString(2); String partName = rs.getString(3); if(partName != null) { resourceName += '/' + partName; @@ -3605,7 +3696,9 @@ private LockResponse checkLock(Connection dbConn, long extLockId) query.append(s); query.append('\''); } - query.append(")"); + query.append(") and hl_catalog = '") + .append(catName) + .append("'"); // If any of the table requests are null, then I need to pull all the // table locks for this db. @@ -4104,7 +4197,7 @@ private LockInfo getTxnIdFromLockId(Connection dbConn, long extLockId) try { stmt = dbConn.createStatement(); String s = "select hl_lock_ext_id, hl_lock_int_id, hl_db, hl_table, " + - "hl_partition, hl_lock_state, hl_lock_type, hl_txnid from HIVE_LOCKS where " + + "hl_partition, hl_lock_state, hl_lock_type, hl_txnid, hl_catalog from HIVE_LOCKS where " + "hl_lock_ext_id = " + extLockId; LOG.debug("Going to execute query <" + s + ">"); rs = stmt.executeQuery(s); @@ -4127,7 +4220,7 @@ private LockInfo getTxnIdFromLockId(Connection dbConn, long extLockId) try { stmt = dbConn.createStatement(); String s = "select hl_lock_ext_id, hl_lock_int_id, hl_db, hl_table, " + - "hl_partition, hl_lock_state, hl_lock_type, hl_txnid from HIVE_LOCKS where " + + "hl_partition, hl_lock_state, hl_lock_type, hl_txnid, hl_catalog from HIVE_LOCKS where " + "hl_lock_ext_id = " + extLockId; LOG.debug("Going to execute query <" + s + ">"); ResultSet rs = stmt.executeQuery(s); diff --git standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/txn/TxnStore.java standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/txn/TxnStore.java index 4695f0deef..b1db9e4cac 100644 --- standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/txn/TxnStore.java +++ standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/txn/TxnStore.java @@ -131,7 +131,7 @@ void commitTxn(CommitTxnRequest rqst) */ @RetrySemantics.Idempotent BasicTxnInfo getFirstCompletedTransactionForTableAfterCommit( - String inputDbName, String inputTableName, ValidWriteIdList txnList) + String inputCatName, String inputDbName, String inputTableName, ValidWriteIdList txnList) throws MetaException; /** * Gets the list of valid write ids for the given table wrt to current txn @@ -264,7 +264,7 @@ void addDynamicPartitions(AddDynamicPartitions rqst) */ @RetrySemantics.Idempotent void cleanupRecords(HiveObjectType type, Database db, Table table, - Iterator partitionIterator) throws MetaException; + Iterator partitionIterator) throws MetaException; @RetrySemantics.Idempotent void onRename(String oldCatName, String oldDbName, String oldTabName, String oldPartName, diff --git standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/txn/TxnUtils.java standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/txn/TxnUtils.java index fa291d5f20..9551286e8b 100644 --- standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/txn/TxnUtils.java +++ standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/txn/TxnUtils.java @@ -234,19 +234,6 @@ public static boolean isAcidTable(Table table) { } /** - * Should produce the result as .. - */ - public static String getFullTableName(String dbName, String tableName) { - return dbName.toLowerCase() + "." + tableName.toLowerCase(); - } - - public static String[] getDbTableName(String fullTableName) { - return fullTableName.split("\\."); - } - - - - /** * Build a query (or queries if one query is too big but only for the case of 'IN' * composite clause. For the case of 'NOT IN' clauses, multiple queries change * the semantics of the intended query. diff --git standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/utils/FullTableName.java standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/utils/FullTableName.java new file mode 100644 index 0000000000..abf9a7e84b --- /dev/null +++ standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/utils/FullTableName.java @@ -0,0 +1,57 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + *

+ * http://www.apache.org/licenses/LICENSE-2.0 + *

+ * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hive.metastore.utils; + +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hive.metastore.Warehouse; +import org.apache.hadoop.hive.metastore.api.MetaException; + +/** + * A simple class to contain the full name of a table, catalog, database, and partition. + */ +public class FullTableName { + public final String catName; + public final String dbName; + public final String tableName; + + /** + * + * @param concatenatedName Name in cat.db.table or db.table format + * @param conf configuration object + */ + public FullTableName(String concatenatedName, Configuration conf) throws MetaException { + String[] components = concatenatedName.split("\\" + Warehouse.CAT_DB_TABLE_SEPARATOR); + int index = 0; + if (components.length == 2) { + catName = MetaStoreUtils.getDefaultCatalog(conf); + } else if (components.length == 3) { + catName = components[index++]; + } else { + throw new MetaException("Bad format for full table name, expected cat.db.table or db.table: " + + concatenatedName); + } + dbName = components[index++]; + tableName = components[index]; + } + + @Override + public String toString() { + return catName + Warehouse.CAT_DB_TABLE_SEPARATOR + dbName + Warehouse.CAT_DB_TABLE_SEPARATOR + + tableName; + } +} diff --git standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/utils/MetaStoreUtils.java standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/utils/MetaStoreUtils.java index 742b6bf76b..b7694e2dc0 100644 --- standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/utils/MetaStoreUtils.java +++ standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/utils/MetaStoreUtils.java @@ -1797,4 +1797,5 @@ public static String getDefaultCatalog(Configuration conf) { if (catName == null || "".equals(catName)) catName = Warehouse.DEFAULT_CATALOG_NAME; return catName; } + } diff --git standalone-metastore/src/main/sql/derby/hive-schema-3.1.0.derby.sql standalone-metastore/src/main/sql/derby/hive-schema-3.1.0.derby.sql index d679658272..691e1db8b1 100644 --- standalone-metastore/src/main/sql/derby/hive-schema-3.1.0.derby.sql +++ standalone-metastore/src/main/sql/derby/hive-schema-3.1.0.derby.sql @@ -508,6 +508,7 @@ CREATE TABLE TXNS ( CREATE TABLE TXN_COMPONENTS ( TC_TXNID bigint NOT NULL REFERENCES TXNS (TXN_ID), + TC_CATALOG varchar(256) NOT NULL, TC_DATABASE varchar(128) NOT NULL, TC_TABLE varchar(128), TC_PARTITION varchar(767), @@ -519,6 +520,7 @@ CREATE INDEX TC_TXNID_INDEX ON TXN_COMPONENTS (TC_TXNID); CREATE TABLE COMPLETED_TXN_COMPONENTS ( CTC_TXNID bigint NOT NULL, + CTC_CATALOG varchar(256) NOT NULL, CTC_DATABASE varchar(128) NOT NULL, CTC_TABLE varchar(256), CTC_PARTITION varchar(767), @@ -526,7 +528,7 @@ CREATE TABLE COMPLETED_TXN_COMPONENTS ( CTC_WRITEID bigint ); -CREATE INDEX COMPLETED_TXN_COMPONENTS_IDX ON COMPLETED_TXN_COMPONENTS (CTC_DATABASE, CTC_TABLE, CTC_PARTITION); +CREATE INDEX COMPLETED_TXN_COMPONENTS_IDX ON COMPLETED_TXN_COMPONENTS (CTC_CATALOG, CTC_DATABASE, CTC_TABLE, CTC_PARTITION); CREATE TABLE NEXT_TXN_ID ( NTXN_NEXT bigint NOT NULL @@ -537,6 +539,7 @@ CREATE TABLE HIVE_LOCKS ( HL_LOCK_EXT_ID bigint NOT NULL, HL_LOCK_INT_ID bigint NOT NULL, HL_TXNID bigint NOT NULL, + HL_CATALOG varchar(256) NOT NULL, HL_DB varchar(128) NOT NULL, HL_TABLE varchar(128), HL_PARTITION varchar(767), @@ -562,6 +565,7 @@ INSERT INTO NEXT_LOCK_ID VALUES(1); CREATE TABLE COMPACTION_QUEUE ( CQ_ID bigint PRIMARY KEY, + CQ_CATALOG varchar(256) NOT NULL, CQ_DATABASE varchar(128) NOT NULL, CQ_TABLE varchar(128) NOT NULL, CQ_PARTITION varchar(767), @@ -583,6 +587,7 @@ INSERT INTO NEXT_COMPACTION_QUEUE_ID VALUES(1); CREATE TABLE COMPLETED_COMPACTIONS ( CC_ID bigint PRIMARY KEY, + CC_CATALOG varchar(256) NOT NULL, CC_DATABASE varchar(128) NOT NULL, CC_TABLE varchar(128) NOT NULL, CC_PARTITION varchar(767), @@ -608,6 +613,7 @@ CREATE TABLE AUX_TABLE ( --1st 4 cols make up a PK but since WS_PARTITION is nullable we can't declare such PK --This is a good candidate for Index orgainzed table CREATE TABLE WRITE_SET ( + WS_CATALOG varchar(128) NOT NULL, WS_DATABASE varchar(128) NOT NULL, WS_TABLE varchar(128) NOT NULL, WS_PARTITION varchar(767), @@ -618,21 +624,23 @@ CREATE TABLE WRITE_SET ( CREATE TABLE TXN_TO_WRITE_ID ( T2W_TXNID bigint NOT NULL, + T2W_CATALOG varchar(256) NOT NULL, T2W_DATABASE varchar(128) NOT NULL, T2W_TABLE varchar(256) NOT NULL, T2W_WRITEID bigint NOT NULL ); -CREATE UNIQUE INDEX TBL_TO_TXN_ID_IDX ON TXN_TO_WRITE_ID (T2W_DATABASE, T2W_TABLE, T2W_TXNID); -CREATE UNIQUE INDEX TBL_TO_WRITE_ID_IDX ON TXN_TO_WRITE_ID (T2W_DATABASE, T2W_TABLE, T2W_WRITEID); +CREATE UNIQUE INDEX TBL_TO_TXN_ID_IDX ON TXN_TO_WRITE_ID (T2W_CATALOG, T2W_DATABASE, T2W_TABLE, T2W_TXNID); +CREATE UNIQUE INDEX TBL_TO_WRITE_ID_IDX ON TXN_TO_WRITE_ID (T2W_CATALOG, T2W_DATABASE, T2W_TABLE, T2W_WRITEID); CREATE TABLE NEXT_WRITE_ID ( + NWI_CATALOG varchar(256) NOT NULL, NWI_DATABASE varchar(128) NOT NULL, NWI_TABLE varchar(256) NOT NULL, NWI_NEXT bigint NOT NULL ); -CREATE UNIQUE INDEX NEXT_WRITE_ID_IDX ON NEXT_WRITE_ID (NWI_DATABASE, NWI_TABLE); +CREATE UNIQUE INDEX NEXT_WRITE_ID_IDX ON NEXT_WRITE_ID (NWI_CATALOG, NWI_DATABASE, NWI_TABLE); CREATE TABLE MIN_HISTORY_LEVEL ( MHL_TXNID bigint NOT NULL, diff --git standalone-metastore/src/main/sql/derby/hive-schema-4.0.0.derby.sql standalone-metastore/src/main/sql/derby/hive-schema-4.0.0.derby.sql index 24740f9e4a..296e3aafda 100644 --- standalone-metastore/src/main/sql/derby/hive-schema-4.0.0.derby.sql +++ standalone-metastore/src/main/sql/derby/hive-schema-4.0.0.derby.sql @@ -508,6 +508,7 @@ CREATE TABLE TXNS ( CREATE TABLE TXN_COMPONENTS ( TC_TXNID bigint NOT NULL REFERENCES TXNS (TXN_ID), + TC_CATALOG varchar(256) NOT NULL, TC_DATABASE varchar(128) NOT NULL, TC_TABLE varchar(128), TC_PARTITION varchar(767), @@ -519,6 +520,7 @@ CREATE INDEX TC_TXNID_INDEX ON TXN_COMPONENTS (TC_TXNID); CREATE TABLE COMPLETED_TXN_COMPONENTS ( CTC_TXNID bigint NOT NULL, + CTC_CATALOG varchar(256) NOT NULL, CTC_DATABASE varchar(128) NOT NULL, CTC_TABLE varchar(256), CTC_PARTITION varchar(767), @@ -526,7 +528,7 @@ CREATE TABLE COMPLETED_TXN_COMPONENTS ( CTC_WRITEID bigint ); -CREATE INDEX COMPLETED_TXN_COMPONENTS_IDX ON COMPLETED_TXN_COMPONENTS (CTC_DATABASE, CTC_TABLE, CTC_PARTITION); +CREATE INDEX COMPLETED_TXN_COMPONENTS_IDX ON COMPLETED_TXN_COMPONENTS (CTC_CATALOG, CTC_DATABASE, CTC_TABLE, CTC_PARTITION); CREATE TABLE NEXT_TXN_ID ( NTXN_NEXT bigint NOT NULL @@ -537,6 +539,7 @@ CREATE TABLE HIVE_LOCKS ( HL_LOCK_EXT_ID bigint NOT NULL, HL_LOCK_INT_ID bigint NOT NULL, HL_TXNID bigint NOT NULL, + HL_CATALOG varchar(256) NOT NULL, HL_DB varchar(128) NOT NULL, HL_TABLE varchar(128), HL_PARTITION varchar(767), @@ -562,6 +565,7 @@ INSERT INTO NEXT_LOCK_ID VALUES(1); CREATE TABLE COMPACTION_QUEUE ( CQ_ID bigint PRIMARY KEY, + CQ_CATALOG varchar(256) NOT NULL, CQ_DATABASE varchar(128) NOT NULL, CQ_TABLE varchar(128) NOT NULL, CQ_PARTITION varchar(767), @@ -583,6 +587,7 @@ INSERT INTO NEXT_COMPACTION_QUEUE_ID VALUES(1); CREATE TABLE COMPLETED_COMPACTIONS ( CC_ID bigint PRIMARY KEY, + CC_CATALOG varchar(256) NOT NULL, CC_DATABASE varchar(128) NOT NULL, CC_TABLE varchar(128) NOT NULL, CC_PARTITION varchar(767), @@ -608,6 +613,7 @@ CREATE TABLE AUX_TABLE ( --1st 4 cols make up a PK but since WS_PARTITION is nullable we can't declare such PK --This is a good candidate for Index orgainzed table CREATE TABLE WRITE_SET ( + WS_CATALOG varchar(128) NOT NULL, WS_DATABASE varchar(128) NOT NULL, WS_TABLE varchar(128) NOT NULL, WS_PARTITION varchar(767), @@ -618,21 +624,23 @@ CREATE TABLE WRITE_SET ( CREATE TABLE TXN_TO_WRITE_ID ( T2W_TXNID bigint NOT NULL, + T2W_CATALOG varchar(256) NOT NULL, T2W_DATABASE varchar(128) NOT NULL, T2W_TABLE varchar(256) NOT NULL, T2W_WRITEID bigint NOT NULL ); -CREATE UNIQUE INDEX TBL_TO_TXN_ID_IDX ON TXN_TO_WRITE_ID (T2W_DATABASE, T2W_TABLE, T2W_TXNID); -CREATE UNIQUE INDEX TBL_TO_WRITE_ID_IDX ON TXN_TO_WRITE_ID (T2W_DATABASE, T2W_TABLE, T2W_WRITEID); +CREATE UNIQUE INDEX TBL_TO_TXN_ID_IDX ON TXN_TO_WRITE_ID (T2W_CATALOG, T2W_DATABASE, T2W_TABLE, T2W_TXNID); +CREATE UNIQUE INDEX TBL_TO_WRITE_ID_IDX ON TXN_TO_WRITE_ID (T2W_CATALOG, T2W_DATABASE, T2W_TABLE, T2W_WRITEID); CREATE TABLE NEXT_WRITE_ID ( + NWI_CATALOG varchar(256) NOT NULL, NWI_DATABASE varchar(128) NOT NULL, NWI_TABLE varchar(256) NOT NULL, NWI_NEXT bigint NOT NULL ); -CREATE UNIQUE INDEX NEXT_WRITE_ID_IDX ON NEXT_WRITE_ID (NWI_DATABASE, NWI_TABLE); +CREATE UNIQUE INDEX NEXT_WRITE_ID_IDX ON NEXT_WRITE_ID (NWI_CATALOG, NWI_DATABASE, NWI_TABLE); CREATE TABLE MIN_HISTORY_LEVEL ( MHL_TXNID bigint NOT NULL, diff --git standalone-metastore/src/main/sql/derby/upgrade-3.0.0-to-3.1.0.derby.sql standalone-metastore/src/main/sql/derby/upgrade-3.0.0-to-3.1.0.derby.sql index 047abdbf4b..9b84839a5c 100644 --- standalone-metastore/src/main/sql/derby/upgrade-3.0.0-to-3.1.0.derby.sql +++ standalone-metastore/src/main/sql/derby/upgrade-3.0.0-to-3.1.0.derby.sql @@ -24,5 +24,65 @@ ALTER TABLE "APP"."PART_COL_PRIVS" ADD "AUTHORIZER" VARCHAR(128); DROP INDEX "APP"."PARTITIONCOLUMNPRIVILEGEINDEX"; CREATE INDEX "APP"."PARTITIONCOLUMNPRIVILEGEINDEX" ON "APP"."PART_COL_PRIVS" ("AUTHORIZER", "PART_ID", "COLUMN_NAME", "PRINCIPAL_NAME", "PRINCIPAL_TYPE", "PART_COL_PRIV", "GRANTOR", "GRANTOR_TYPE"); +-- HIVE-18973, add catalogs to TXN system +-- Add to TXN_COMPONENTS +ALTER TABLE TXN_COMPONENTS ADD COLUMN TC_CATALOG VARCHAR(256); +UPDATE TXN_COMPONENTS + SET TC_CATALOG = 'hive'; +ALTER TABLE TXN_COMPONENTS ALTER COLUMN TC_CATALOG NOT NULL; + +-- Add to COMPLETED_TXN_COMPONENTS +ALTER TABLE COMPLETED_TXN_COMPONENTS ADD COLUMN CTC_CATALOG VARCHAR(256); +UPDATE COMPLETED_TXN_COMPONENTS + SET CTC_CATALOG = 'hive'; +ALTER TABLE COMPLETED_TXN_COMPONENTS ALTER COLUMN CTC_CATALOG NOT NULL; + +DROP INDEX COMPLETED_TXN_COMPONENTS_IDX; +CREATE INDEX COMPLETED_TXN_COMPONENTS_IDX ON COMPLETED_TXN_COMPONENTS (CTC_CATALOG, CTC_DATABASE, CTC_TABLE, CTC_PARTITION); + +-- Add to HIVE_LOCKS +ALTER TABLE HIVE_LOCKS ADD COLUMN HL_CATALOG VARCHAR(256); +UPDATE HIVE_LOCKS + SET HL_CATALOG = 'hive'; +ALTER TABLE HIVE_LOCKS ALTER COLUMN HL_CATALOG NOT NULL; + +-- Add to COMPACTION_QUEUE +ALTER TABLE COMPACTION_QUEUE ADD COLUMN CQ_CATALOG VARCHAR(256); +UPDATE COMPACTION_QUEUE + SET CQ_CATALOG = 'hive'; +ALTER TABLE COMPACTION_QUEUE ALTER COLUMN CQ_CATALOG NOT NULL; + +-- Add to COMPLETED_COMPACTIONS +ALTER TABLE COMPLETED_COMPACTIONS ADD COLUMN CC_CATALOG VARCHAR(256); +UPDATE COMPLETED_COMPACTIONS + SET CC_CATALOG = 'hive'; +ALTER TABLE COMPLETED_COMPACTIONS ALTER COLUMN CC_CATALOG NOT NULL; + +-- Add to WRITE_SET +ALTER TABLE WRITE_SET ADD COLUMN WS_CATALOG VARCHAR(256); +UPDATE WRITE_SET + SET WS_CATALOG = 'hive'; +ALTER TABLE WRITE_SET ALTER COLUMN WS_CATALOG NOT NULL; + +-- Add to TXN_TO_WRITE_ID +ALTER TABLE TXN_TO_WRITE_ID ADD COLUMN T2W_CATALOG VARCHAR(256); +UPDATE TXN_TO_WRITE_ID + SET T2W_CATALOG = 'hive'; +ALTER TABLE TXN_TO_WRITE_ID ALTER COLUMN T2W_CATALOG NOT NULL; + +DROP INDEX TBL_TO_TXN_ID_IDX; +DROP INDEX TBL_TO_WRITE_ID_IDX; +CREATE UNIQUE INDEX TBL_TO_TXN_ID_IDX ON TXN_TO_WRITE_ID (T2W_CATALOG, T2W_DATABASE, T2W_TABLE, T2W_TXNID); +CREATE UNIQUE INDEX TBL_TO_WRITE_ID_IDX ON TXN_TO_WRITE_ID (T2W_CATALOG, T2W_DATABASE, T2W_TABLE, T2W_WRITEID); + +-- Add to NEXT_WRITE_ID +ALTER TABLE NEXT_WRITE_ID ADD COLUMN NWI_CATALOG VARCHAR(256); +UPDATE NEXT_WRITE_ID + SET NWI_CATALOG = 'hive'; +ALTER TABLE NEXT_WRITE_ID ALTER COLUMN NWI_CATALOG NOT NULL; + +DROP INDEX NEXT_WRITE_ID_IDX; +CREATE UNIQUE INDEX NEXT_WRITE_ID_IDX ON NEXT_WRITE_ID (NWI_CATALOG, NWI_DATABASE, NWI_TABLE); + -- This needs to be the last thing done. Insert any changes above this line. UPDATE "APP".VERSION SET SCHEMA_VERSION='3.1.0', VERSION_COMMENT='Hive release version 3.1.0' where VER_ID=1; diff --git standalone-metastore/src/main/sql/mssql/hive-schema-3.1.0.mssql.sql standalone-metastore/src/main/sql/mssql/hive-schema-3.1.0.mssql.sql index 1bb3c1acb3..9b08de6542 100644 --- standalone-metastore/src/main/sql/mssql/hive-schema-3.1.0.mssql.sql +++ standalone-metastore/src/main/sql/mssql/hive-schema-3.1.0.mssql.sql @@ -988,6 +988,7 @@ ALTER TABLE DBS ADD CONSTRAINT "DBS_FK1" FOREIGN KEY ("CTLG_NAME") REFERENCES CT -- ----------------------------------------------------------------------------------------------------------------------------------------------- CREATE TABLE COMPACTION_QUEUE( CQ_ID bigint NOT NULL, + CQ_CATALOG nvarchar(256) NOT NULL, CQ_DATABASE nvarchar(128) NOT NULL, CQ_TABLE nvarchar(128) NOT NULL, CQ_PARTITION nvarchar(767) NULL, @@ -1008,6 +1009,7 @@ PRIMARY KEY CLUSTERED CREATE TABLE COMPLETED_COMPACTIONS ( CC_ID bigint NOT NULL, + CC_CATALOG nvarchar(256) NOT NULL, CC_DATABASE nvarchar(128) NOT NULL, CC_TABLE nvarchar(128) NOT NULL, CC_PARTITION nvarchar(767) NULL, @@ -1029,6 +1031,7 @@ PRIMARY KEY CLUSTERED CREATE TABLE COMPLETED_TXN_COMPONENTS( CTC_TXNID bigint NOT NULL, + CTC_CATALOG nvarchar(256) NOT NULL, CTC_DATABASE nvarchar(128) NOT NULL, CTC_TABLE nvarchar(128) NULL, CTC_PARTITION nvarchar(767) NULL, @@ -1036,12 +1039,13 @@ CREATE TABLE COMPLETED_TXN_COMPONENTS( CTC_WRITEID bigint ); -CREATE INDEX COMPLETED_TXN_COMPONENTS_IDX ON COMPLETED_TXN_COMPONENTS (CTC_DATABASE, CTC_TABLE, CTC_PARTITION); +CREATE INDEX COMPLETED_TXN_COMPONENTS_IDX ON COMPLETED_TXN_COMPONENTS (CTC_CATALOG, CTC_DATABASE, CTC_TABLE, CTC_PARTITION); CREATE TABLE HIVE_LOCKS( HL_LOCK_EXT_ID bigint NOT NULL, HL_LOCK_INT_ID bigint NOT NULL, HL_TXNID bigint NOT NULL, + HL_CATALOG nvarchar(256) NOT NULL, HL_DB nvarchar(128) NOT NULL, HL_TABLE nvarchar(128) NULL, HL_PARTITION nvarchar(767) NULL, @@ -1098,6 +1102,7 @@ PRIMARY KEY CLUSTERED CREATE TABLE TXN_COMPONENTS( TC_TXNID bigint NOT NULL, + TC_CATALOG nvarchar(256) NOT NULL, TC_DATABASE nvarchar(128) NOT NULL, TC_TABLE nvarchar(128) NULL, TC_PARTITION nvarchar(767) NULL, @@ -1144,6 +1149,7 @@ CREATE INDEX CONSTRAINTS_PARENT_TBL_ID__INDEX ON KEY_CONSTRAINTS(PARENT_TBL_ID); CREATE INDEX CONSTRAINTS_CONSTRAINT_TYPE_INDEX ON KEY_CONSTRAINTS(CONSTRAINT_TYPE); CREATE TABLE WRITE_SET ( + WS_CATALOG nvarchar(256) NOT NULL, WS_DATABASE nvarchar(128) NOT NULL, WS_TABLE nvarchar(128) NOT NULL, WS_PARTITION nvarchar(767), @@ -1162,21 +1168,23 @@ ALTER TABLE METASTORE_DB_PROPERTIES ADD CONSTRAINT PROPERTY_KEY_PK PRIMARY KEY ( CREATE TABLE TXN_TO_WRITE_ID ( T2W_TXNID bigint NOT NULL, + T2W_CATALOG nvarchar(256) NOT NULL, T2W_DATABASE nvarchar(128) NOT NULL, T2W_TABLE nvarchar(256) NOT NULL, T2W_WRITEID bigint NOT NULL ); -CREATE UNIQUE INDEX TBL_TO_TXN_ID_IDX ON TXN_TO_WRITE_ID (T2W_DATABASE, T2W_TABLE, T2W_TXNID); -CREATE UNIQUE INDEX TBL_TO_WRITE_ID_IDX ON TXN_TO_WRITE_ID (T2W_DATABASE, T2W_TABLE, T2W_WRITEID); +CREATE UNIQUE INDEX TBL_TO_TXN_ID_IDX ON TXN_TO_WRITE_ID (T2W_CATALOG, T2W_DATABASE, T2W_TABLE, T2W_TXNID); +CREATE UNIQUE INDEX TBL_TO_WRITE_ID_IDX ON TXN_TO_WRITE_ID (T2W_CATALOG, T2W_DATABASE, T2W_TABLE, T2W_WRITEID); CREATE TABLE NEXT_WRITE_ID ( + NWI_CATALOG nvarchar(256) NOT NULL, NWI_DATABASE nvarchar(128) NOT NULL, NWI_TABLE nvarchar(256) NOT NULL, NWI_NEXT bigint NOT NULL ); -CREATE UNIQUE INDEX NEXT_WRITE_ID_IDX ON NEXT_WRITE_ID (NWI_DATABASE, NWI_TABLE); +CREATE UNIQUE INDEX NEXT_WRITE_ID_IDX ON NEXT_WRITE_ID (NWI_CATALOG, NWI_DATABASE, NWI_TABLE); CREATE TABLE MIN_HISTORY_LEVEL ( MHL_TXNID bigint NOT NULL, diff --git standalone-metastore/src/main/sql/mssql/hive-schema-4.0.0.mssql.sql standalone-metastore/src/main/sql/mssql/hive-schema-4.0.0.mssql.sql index 7a5cec8aff..fa333ff889 100644 --- standalone-metastore/src/main/sql/mssql/hive-schema-4.0.0.mssql.sql +++ standalone-metastore/src/main/sql/mssql/hive-schema-4.0.0.mssql.sql @@ -988,6 +988,7 @@ ALTER TABLE DBS ADD CONSTRAINT "DBS_FK1" FOREIGN KEY ("CTLG_NAME") REFERENCES CT -- ----------------------------------------------------------------------------------------------------------------------------------------------- CREATE TABLE COMPACTION_QUEUE( CQ_ID bigint NOT NULL, + CQ_CATALOG nvarchar(256) NOT NULL, CQ_DATABASE nvarchar(128) NOT NULL, CQ_TABLE nvarchar(128) NOT NULL, CQ_PARTITION nvarchar(767) NULL, @@ -1008,6 +1009,7 @@ PRIMARY KEY CLUSTERED CREATE TABLE COMPLETED_COMPACTIONS ( CC_ID bigint NOT NULL, + CC_CATALOG nvarchar(256) NOT NULL, CC_DATABASE nvarchar(128) NOT NULL, CC_TABLE nvarchar(128) NOT NULL, CC_PARTITION nvarchar(767) NULL, @@ -1029,6 +1031,7 @@ PRIMARY KEY CLUSTERED CREATE TABLE COMPLETED_TXN_COMPONENTS( CTC_TXNID bigint NOT NULL, + CTC_CATALOG nvarchar(256) NOT NULL, CTC_DATABASE nvarchar(128) NOT NULL, CTC_TABLE nvarchar(128) NULL, CTC_PARTITION nvarchar(767) NULL, @@ -1036,12 +1039,13 @@ CREATE TABLE COMPLETED_TXN_COMPONENTS( CTC_WRITEID bigint ); -CREATE INDEX COMPLETED_TXN_COMPONENTS_IDX ON COMPLETED_TXN_COMPONENTS (CTC_DATABASE, CTC_TABLE, CTC_PARTITION); +CREATE INDEX COMPLETED_TXN_COMPONENTS_IDX ON COMPLETED_TXN_COMPONENTS (CTC_CATALOG, CTC_DATABASE, CTC_TABLE, CTC_PARTITION); CREATE TABLE HIVE_LOCKS( HL_LOCK_EXT_ID bigint NOT NULL, HL_LOCK_INT_ID bigint NOT NULL, HL_TXNID bigint NOT NULL, + HL_CATALOG nvarchar(256) NOT NULL, HL_DB nvarchar(128) NOT NULL, HL_TABLE nvarchar(128) NULL, HL_PARTITION nvarchar(767) NULL, @@ -1098,6 +1102,7 @@ PRIMARY KEY CLUSTERED CREATE TABLE TXN_COMPONENTS( TC_TXNID bigint NOT NULL, + TC_CATALOG nvarchar(256) NOT NULL, TC_DATABASE nvarchar(128) NOT NULL, TC_TABLE nvarchar(128) NULL, TC_PARTITION nvarchar(767) NULL, @@ -1144,6 +1149,7 @@ CREATE INDEX CONSTRAINTS_PARENT_TBL_ID__INDEX ON KEY_CONSTRAINTS(PARENT_TBL_ID); CREATE INDEX CONSTRAINTS_CONSTRAINT_TYPE_INDEX ON KEY_CONSTRAINTS(CONSTRAINT_TYPE); CREATE TABLE WRITE_SET ( + WS_CATALOG nvarchar(256) NOT NULL, WS_DATABASE nvarchar(128) NOT NULL, WS_TABLE nvarchar(128) NOT NULL, WS_PARTITION nvarchar(767), @@ -1162,21 +1168,23 @@ ALTER TABLE METASTORE_DB_PROPERTIES ADD CONSTRAINT PROPERTY_KEY_PK PRIMARY KEY ( CREATE TABLE TXN_TO_WRITE_ID ( T2W_TXNID bigint NOT NULL, + T2W_CATALOG nvarchar(256) NOT NULL, T2W_DATABASE nvarchar(128) NOT NULL, T2W_TABLE nvarchar(256) NOT NULL, T2W_WRITEID bigint NOT NULL ); -CREATE UNIQUE INDEX TBL_TO_TXN_ID_IDX ON TXN_TO_WRITE_ID (T2W_DATABASE, T2W_TABLE, T2W_TXNID); -CREATE UNIQUE INDEX TBL_TO_WRITE_ID_IDX ON TXN_TO_WRITE_ID (T2W_DATABASE, T2W_TABLE, T2W_WRITEID); +CREATE UNIQUE INDEX TBL_TO_TXN_ID_IDX ON TXN_TO_WRITE_ID (T2W_CATALOG, T2W_DATABASE, T2W_TABLE, T2W_TXNID); +CREATE UNIQUE INDEX TBL_TO_WRITE_ID_IDX ON TXN_TO_WRITE_ID (T2W_CATALOG, T2W_DATABASE, T2W_TABLE, T2W_WRITEID); CREATE TABLE NEXT_WRITE_ID ( + NWI_CATALOG nvarchar(256) NOT NULL, NWI_DATABASE nvarchar(128) NOT NULL, NWI_TABLE nvarchar(256) NOT NULL, NWI_NEXT bigint NOT NULL ); -CREATE UNIQUE INDEX NEXT_WRITE_ID_IDX ON NEXT_WRITE_ID (NWI_DATABASE, NWI_TABLE); +CREATE UNIQUE INDEX NEXT_WRITE_ID_IDX ON NEXT_WRITE_ID (NWI_CATALOG, NWI_DATABASE, NWI_TABLE); CREATE TABLE MIN_HISTORY_LEVEL ( MHL_TXNID bigint NOT NULL, diff --git standalone-metastore/src/main/sql/mssql/upgrade-3.0.0-to-3.1.0.mssql.sql standalone-metastore/src/main/sql/mssql/upgrade-3.0.0-to-3.1.0.mssql.sql index d3f2794d72..15fc7c0a6b 100644 --- standalone-metastore/src/main/sql/mssql/upgrade-3.0.0-to-3.1.0.mssql.sql +++ standalone-metastore/src/main/sql/mssql/upgrade-3.0.0-to-3.1.0.mssql.sql @@ -25,6 +25,66 @@ ALTER TABLE PART_COL_PRIVS ADD AUTHORIZER nvarchar(128) NULL; DROP INDEX PART_COL_PRIVS.PARTITIONCOLUMNPRIVILEGEINDEX; CREATE INDEX PARTITIONCOLUMNPRIVILEGEINDEX ON PART_COL_PRIVS (AUTHORIZER,PART_ID,"COLUMN_NAME",PRINCIPAL_NAME,PRINCIPAL_TYPE,PART_COL_PRIV,GRANTOR,GRANTOR_TYPE); +-- HIVE-18973 +-- Add to TXN_COMPONENTS +ALTER TABLE TXN_COMPONENTS ADD TC_CATALOG nvarchar(256); +UPDATE TXN_COMPONENTS + SET TC_CATALOG = 'hive'; +ALTER TABLE TXN_COMPONENTS ALTER COLUMN TC_CATALOG nvarchar(256) NOT NULL; + +-- Add to COMPLETED_TXN_COMPONENTS +ALTER TABLE COMPLETED_TXN_COMPONENTS ADD CTC_CATALOG nvarchar(256); +UPDATE COMPLETED_TXN_COMPONENTS + SET CTC_CATALOG = 'hive'; +ALTER TABLE COMPLETED_TXN_COMPONENTS ALTER COLUMN CTC_CATALOG nvarchar(256) NOT NULL; + +DROP INDEX COMPLETED_TXN_COMPONENTS_IDX ON COMPLETED_TXN_COMPONENTS; +CREATE INDEX COMPLETED_TXN_COMPONENTS_IDX ON COMPLETED_TXN_COMPONENTS (CTC_CATALOG, CTC_DATABASE, CTC_TABLE, CTC_PARTITION); + +-- Add to HIVE_LOCKS +ALTER TABLE HIVE_LOCKS ADD HL_CATALOG nvarchar(256); +UPDATE HIVE_LOCKS + SET HL_CATALOG = 'hive'; +ALTER TABLE HIVE_LOCKS ALTER COLUMN HL_CATALOG nvarchar(256) NOT NULL; + +-- Add to COMPACTION_QUEUE +ALTER TABLE COMPACTION_QUEUE ADD CQ_CATALOG nvarchar(256); +UPDATE COMPACTION_QUEUE + SET CQ_CATALOG = 'hive'; +ALTER TABLE COMPACTION_QUEUE ALTER COLUMN CQ_CATALOG nvarchar(256) NOT NULL; + +-- Add to COMPLETED_COMPACTIONS +ALTER TABLE COMPLETED_COMPACTIONS ADD CC_CATALOG nvarchar(256); +UPDATE COMPLETED_COMPACTIONS + SET CC_CATALOG = 'hive'; +ALTER TABLE COMPLETED_COMPACTIONS ALTER COLUMN CC_CATALOG nvarchar(256) NOT NULL; + +-- Add to WRITE_SET +ALTER TABLE WRITE_SET ADD WS_CATALOG nvarchar(256); +UPDATE WRITE_SET + SET WS_CATALOG = 'hive'; +ALTER TABLE WRITE_SET ALTER COLUMN WS_CATALOG nvarchar(256) NOT NULL; + +-- Add to TXN_TO_WRITE_ID +ALTER TABLE TXN_TO_WRITE_ID ADD T2W_CATALOG nvarchar(256); +UPDATE TXN_TO_WRITE_ID + SET T2W_CATALOG = 'hive'; +ALTER TABLE TXN_TO_WRITE_ID ALTER COLUMN T2W_CATALOG nvarchar(256) NOT NULL; + +DROP INDEX TBL_TO_TXN_ID_IDX ON TXN_TO_WRITE_ID; +DROP INDEX TBL_TO_WRITE_ID_IDX ON TXN_TO_WRITE_ID; +CREATE UNIQUE INDEX TBL_TO_TXN_ID_IDX ON TXN_TO_WRITE_ID (T2W_CATALOG, T2W_DATABASE, T2W_TABLE, T2W_TXNID); +CREATE UNIQUE INDEX TBL_TO_WRITE_ID_IDX ON TXN_TO_WRITE_ID (T2W_CATALOG, T2W_DATABASE, T2W_TABLE, T2W_WRITEID); + +-- Add to NEXT_WRITE_ID +ALTER TABLE NEXT_WRITE_ID ADD NWI_CATALOG nvarchar(256); +UPDATE NEXT_WRITE_ID + SET NWI_CATALOG = 'hive'; +ALTER TABLE NEXT_WRITE_ID ALTER COLUMN NWI_CATALOG nvarchar(256) NOT NULL; + +DROP INDEX NEXT_WRITE_ID_IDX ON NEXT_WRITE_ID; +CREATE UNIQUE INDEX NEXT_WRITE_ID_IDX ON NEXT_WRITE_ID (NWI_CATALOG, NWI_DATABASE, NWI_TABLE); + -- These lines need to be last. Insert any changes above. UPDATE VERSION SET SCHEMA_VERSION='3.1.0', VERSION_COMMENT='Hive release version 3.1.0' where VER_ID=1; SELECT 'Finished upgrading MetaStore schema from 3.0.0 to 3.1.0' AS MESSAGE; diff --git standalone-metastore/src/main/sql/mysql/hive-schema-3.1.0.mysql.sql standalone-metastore/src/main/sql/mysql/hive-schema-3.1.0.mysql.sql index 1f0450330b..490b359a58 100644 --- standalone-metastore/src/main/sql/mysql/hive-schema-3.1.0.mysql.sql +++ standalone-metastore/src/main/sql/mysql/hive-schema-3.1.0.mysql.sql @@ -988,6 +988,7 @@ CREATE TABLE TXNS ( CREATE TABLE TXN_COMPONENTS ( TC_TXNID bigint NOT NULL, + TC_CATALOG varchar(256) NOT NULL, TC_DATABASE varchar(128) NOT NULL, TC_TABLE varchar(128), TC_PARTITION varchar(767), @@ -1000,6 +1001,7 @@ CREATE INDEX TC_TXNID_INDEX ON TXN_COMPONENTS (TC_TXNID); CREATE TABLE COMPLETED_TXN_COMPONENTS ( CTC_TXNID bigint NOT NULL, + CTC_CATALOG varchar(256) NOT NULL, CTC_DATABASE varchar(128) NOT NULL, CTC_TABLE varchar(256), CTC_PARTITION varchar(767), @@ -1007,7 +1009,7 @@ CREATE TABLE COMPLETED_TXN_COMPONENTS ( CTC_WRITEID bigint ) ENGINE=InnoDB DEFAULT CHARSET=latin1; -CREATE INDEX COMPLETED_TXN_COMPONENTS_IDX ON COMPLETED_TXN_COMPONENTS (CTC_DATABASE, CTC_TABLE, CTC_PARTITION) USING BTREE; +CREATE INDEX COMPLETED_TXN_COMPONENTS_IDX ON COMPLETED_TXN_COMPONENTS (CTC_CATALOG, CTC_DATABASE, CTC_TABLE, CTC_PARTITION) USING BTREE; CREATE TABLE NEXT_TXN_ID ( NTXN_NEXT bigint NOT NULL @@ -1018,6 +1020,7 @@ CREATE TABLE HIVE_LOCKS ( HL_LOCK_EXT_ID bigint NOT NULL, HL_LOCK_INT_ID bigint NOT NULL, HL_TXNID bigint NOT NULL, + HL_CATALOG varchar(256) NOT NULL, HL_DB varchar(128) NOT NULL, HL_TABLE varchar(128), HL_PARTITION varchar(767), @@ -1044,6 +1047,7 @@ INSERT INTO NEXT_LOCK_ID VALUES(1); CREATE TABLE COMPACTION_QUEUE ( CQ_ID bigint PRIMARY KEY, + CQ_CATALOG varchar(256) NOT NULL, CQ_DATABASE varchar(128) NOT NULL, CQ_TABLE varchar(128) NOT NULL, CQ_PARTITION varchar(767), @@ -1060,6 +1064,7 @@ CREATE TABLE COMPACTION_QUEUE ( CREATE TABLE COMPLETED_COMPACTIONS ( CC_ID bigint PRIMARY KEY, + CC_CATALOG varchar(256) NOT NULL, CC_DATABASE varchar(128) NOT NULL, CC_TABLE varchar(128) NOT NULL, CC_PARTITION varchar(767), @@ -1088,6 +1093,7 @@ CREATE TABLE AUX_TABLE ( ) ENGINE=InnoDB DEFAULT CHARSET=latin1; CREATE TABLE WRITE_SET ( + WS_CATALOG varchar(256) NOT NULL, WS_DATABASE varchar(128) NOT NULL, WS_TABLE varchar(128) NOT NULL, WS_PARTITION varchar(767), @@ -1098,21 +1104,23 @@ CREATE TABLE WRITE_SET ( CREATE TABLE TXN_TO_WRITE_ID ( T2W_TXNID bigint NOT NULL, + T2W_CATALOG varchar(256) NOT NULL, T2W_DATABASE varchar(128) NOT NULL, T2W_TABLE varchar(256) NOT NULL, T2W_WRITEID bigint NOT NULL ) ENGINE=InnoDB DEFAULT CHARSET=latin1; -CREATE UNIQUE INDEX TBL_TO_TXN_ID_IDX ON TXN_TO_WRITE_ID (T2W_DATABASE, T2W_TABLE, T2W_TXNID); -CREATE UNIQUE INDEX TBL_TO_WRITE_ID_IDX ON TXN_TO_WRITE_ID (T2W_DATABASE, T2W_TABLE, T2W_WRITEID); +CREATE UNIQUE INDEX TBL_TO_TXN_ID_IDX ON TXN_TO_WRITE_ID (T2W_CATALOG, T2W_DATABASE, T2W_TABLE, T2W_TXNID); +CREATE UNIQUE INDEX TBL_TO_WRITE_ID_IDX ON TXN_TO_WRITE_ID (T2W_CATALOG, T2W_DATABASE, T2W_TABLE, T2W_WRITEID); CREATE TABLE NEXT_WRITE_ID ( + NWI_CATALOG varchar(256) NOT NULL, NWI_DATABASE varchar(128) NOT NULL, NWI_TABLE varchar(256) NOT NULL, NWI_NEXT bigint NOT NULL ) ENGINE=InnoDB DEFAULT CHARSET=latin1; -CREATE UNIQUE INDEX NEXT_WRITE_ID_IDX ON NEXT_WRITE_ID (NWI_DATABASE, NWI_TABLE); +CREATE UNIQUE INDEX NEXT_WRITE_ID_IDX ON NEXT_WRITE_ID (NWI_CATALOG, NWI_DATABASE, NWI_TABLE); CREATE TABLE MIN_HISTORY_LEVEL ( MHL_TXNID bigint NOT NULL, diff --git standalone-metastore/src/main/sql/mysql/hive-schema-4.0.0.mysql.sql standalone-metastore/src/main/sql/mysql/hive-schema-4.0.0.mysql.sql index f0d2fa1db9..04068e7ef8 100644 --- standalone-metastore/src/main/sql/mysql/hive-schema-4.0.0.mysql.sql +++ standalone-metastore/src/main/sql/mysql/hive-schema-4.0.0.mysql.sql @@ -988,6 +988,7 @@ CREATE TABLE TXNS ( CREATE TABLE TXN_COMPONENTS ( TC_TXNID bigint NOT NULL, + TC_CATALOG varchar(256) NOT NULL, TC_DATABASE varchar(128) NOT NULL, TC_TABLE varchar(128), TC_PARTITION varchar(767), @@ -1000,6 +1001,7 @@ CREATE INDEX TC_TXNID_INDEX ON TXN_COMPONENTS (TC_TXNID); CREATE TABLE COMPLETED_TXN_COMPONENTS ( CTC_TXNID bigint NOT NULL, + CTC_CATALOG varchar(256) NOT NULL, CTC_DATABASE varchar(128) NOT NULL, CTC_TABLE varchar(256), CTC_PARTITION varchar(767), @@ -1007,7 +1009,7 @@ CREATE TABLE COMPLETED_TXN_COMPONENTS ( CTC_WRITEID bigint ) ENGINE=InnoDB DEFAULT CHARSET=latin1; -CREATE INDEX COMPLETED_TXN_COMPONENTS_IDX ON COMPLETED_TXN_COMPONENTS (CTC_DATABASE, CTC_TABLE, CTC_PARTITION) USING BTREE; +CREATE INDEX COMPLETED_TXN_COMPONENTS_IDX ON COMPLETED_TXN_COMPONENTS (CTC_CATALOG, CTC_DATABASE, CTC_TABLE, CTC_PARTITION) USING BTREE; CREATE TABLE NEXT_TXN_ID ( NTXN_NEXT bigint NOT NULL @@ -1018,6 +1020,7 @@ CREATE TABLE HIVE_LOCKS ( HL_LOCK_EXT_ID bigint NOT NULL, HL_LOCK_INT_ID bigint NOT NULL, HL_TXNID bigint NOT NULL, + HL_CATALOG varchar(256) NOT NULL, HL_DB varchar(128) NOT NULL, HL_TABLE varchar(128), HL_PARTITION varchar(767), @@ -1044,6 +1047,7 @@ INSERT INTO NEXT_LOCK_ID VALUES(1); CREATE TABLE COMPACTION_QUEUE ( CQ_ID bigint PRIMARY KEY, + CQ_CATALOG varchar(256) NOT NULL, CQ_DATABASE varchar(128) NOT NULL, CQ_TABLE varchar(128) NOT NULL, CQ_PARTITION varchar(767), @@ -1060,6 +1064,7 @@ CREATE TABLE COMPACTION_QUEUE ( CREATE TABLE COMPLETED_COMPACTIONS ( CC_ID bigint PRIMARY KEY, + CC_CATALOG varchar(256) NOT NULL, CC_DATABASE varchar(128) NOT NULL, CC_TABLE varchar(128) NOT NULL, CC_PARTITION varchar(767), @@ -1088,6 +1093,7 @@ CREATE TABLE AUX_TABLE ( ) ENGINE=InnoDB DEFAULT CHARSET=latin1; CREATE TABLE WRITE_SET ( + WS_CATALOG varchar(256) NOT NULL, WS_DATABASE varchar(128) NOT NULL, WS_TABLE varchar(128) NOT NULL, WS_PARTITION varchar(767), @@ -1098,21 +1104,23 @@ CREATE TABLE WRITE_SET ( CREATE TABLE TXN_TO_WRITE_ID ( T2W_TXNID bigint NOT NULL, + T2W_CATALOG varchar(256) NOT NULL, T2W_DATABASE varchar(128) NOT NULL, T2W_TABLE varchar(256) NOT NULL, T2W_WRITEID bigint NOT NULL ) ENGINE=InnoDB DEFAULT CHARSET=latin1; -CREATE UNIQUE INDEX TBL_TO_TXN_ID_IDX ON TXN_TO_WRITE_ID (T2W_DATABASE, T2W_TABLE, T2W_TXNID); -CREATE UNIQUE INDEX TBL_TO_WRITE_ID_IDX ON TXN_TO_WRITE_ID (T2W_DATABASE, T2W_TABLE, T2W_WRITEID); +CREATE UNIQUE INDEX TBL_TO_TXN_ID_IDX ON TXN_TO_WRITE_ID (T2W_CATALOG, T2W_DATABASE, T2W_TABLE, T2W_TXNID); +CREATE UNIQUE INDEX TBL_TO_WRITE_ID_IDX ON TXN_TO_WRITE_ID (T2W_CATALOG, T2W_DATABASE, T2W_TABLE, T2W_WRITEID); CREATE TABLE NEXT_WRITE_ID ( + NWI_CATALOG varchar(256) NOT NULL, NWI_DATABASE varchar(128) NOT NULL, NWI_TABLE varchar(256) NOT NULL, NWI_NEXT bigint NOT NULL ) ENGINE=InnoDB DEFAULT CHARSET=latin1; -CREATE UNIQUE INDEX NEXT_WRITE_ID_IDX ON NEXT_WRITE_ID (NWI_DATABASE, NWI_TABLE); +CREATE UNIQUE INDEX NEXT_WRITE_ID_IDX ON NEXT_WRITE_ID (NWI_CATALOG, NWI_DATABASE, NWI_TABLE); CREATE TABLE MIN_HISTORY_LEVEL ( MHL_TXNID bigint NOT NULL, diff --git standalone-metastore/src/main/sql/mysql/upgrade-3.0.0-to-3.1.0.mysql.sql standalone-metastore/src/main/sql/mysql/upgrade-3.0.0-to-3.1.0.mysql.sql index df5485edcf..daa5814e35 100644 --- standalone-metastore/src/main/sql/mysql/upgrade-3.0.0-to-3.1.0.mysql.sql +++ standalone-metastore/src/main/sql/mysql/upgrade-3.0.0-to-3.1.0.mysql.sql @@ -25,6 +25,66 @@ ALTER TABLE `PART_COL_PRIVS` ADD `AUTHORIZER` varchar(128) CHARACTER SET latin1 ALTER TABLE `PART_COL_PRIVS` DROP INDEX `PARTITIONCOLUMNPRIVILEGEINDEX`; ALTER TABLE `PART_COL_PRIVS` ADD INDEX `PARTITIONCOLUMNPRIVILEGEINDEX` (`AUTHORIZER`,`PART_ID`,`COLUMN_NAME`,`PRINCIPAL_NAME`,`PRINCIPAL_TYPE`,`PART_COL_PRIV`,`GRANTOR`,`GRANTOR_TYPE`); +--HIVE-19323 +-- Update TXN_COMPONENTS +ALTER TABLE TXN_COMPONENTS ADD COLUMN TC_CATALOG VARCHAR(256); +UPDATE TXN_COMPONENTS + SET TC_CATALOG = 'hive'; +ALTER TABLE TXN_COMPONENTS CHANGE COLUMN TC_CATALOG TC_CATALOG varchar(256) NOT NULL; + +-- Update COMPLETED_TXN_COMPONENTS +ALTER TABLE COMPLETED_TXN_COMPONENTS ADD COLUMN CTC_CATALOG VARCHAR(256); +UPDATE COMPLETED_TXN_COMPONENTS + SET CTC_CATALOG = 'hive'; +ALTER TABLE COMPLETED_TXN_COMPONENTS CHANGE COLUMN CTC_CATALOG CTC_CATALOG varchar(256) NOT NULL; + +DROP INDEX COMPLETED_TXN_COMPONENTS_IDX ON COMPLETED_TXN_COMPONENTS; +CREATE INDEX COMPLETED_TXN_COMPONENTS_IDX ON COMPLETED_TXN_COMPONENTS (CTC_CATALOG, CTC_DATABASE, CTC_TABLE, CTC_PARTITION) USING BTREE; + +-- Update HIVE_LOCKS +ALTER TABLE HIVE_LOCKS ADD COLUMN HL_CATALOG VARCHAR(256); +UPDATE HIVE_LOCKS + SET HL_CATALOG = 'hive'; +ALTER TABLE HIVE_LOCKS CHANGE COLUMN HL_CATALOG HL_CATALOG varchar(256) NOT NULL; + +-- Update COMPACTION_QUEUE +ALTER TABLE COMPACTION_QUEUE ADD COLUMN CQ_CATALOG VARCHAR(256); +UPDATE COMPACTION_QUEUE + SET CQ_CATALOG = 'hive'; +ALTER TABLE COMPACTION_QUEUE CHANGE COLUMN CQ_CATALOG CQ_CATALOG varchar(256) NOT NULL; + +-- Update COMPLETED_COMPACTIONS +ALTER TABLE COMPLETED_COMPACTIONS ADD COLUMN CC_CATALOG VARCHAR(256); +UPDATE COMPLETED_COMPACTIONS + SET CC_CATALOG = 'hive'; +ALTER TABLE COMPLETED_COMPACTIONS CHANGE COLUMN CC_CATALOG CC_CATALOG varchar(256) NOT NULL; + +-- Update WRITE_SET +ALTER TABLE WRITE_SET ADD COLUMN WS_CATALOG VARCHAR(256); +UPDATE WRITE_SET + SET WS_CATALOG = 'hive'; +ALTER TABLE WRITE_SET CHANGE COLUMN WS_CATALOG WS_CATALOG varchar(256) NOT NULL; + +-- Update TXN_TO_WRITE_ID +ALTER TABLE TXN_TO_WRITE_ID ADD COLUMN T2W_CATALOG VARCHAR(256); +UPDATE TXN_TO_WRITE_ID + SET T2W_CATALOG = 'hive'; +ALTER TABLE TXN_TO_WRITE_ID CHANGE COLUMN T2W_CATALOG T2W_CATALOG varchar(256) NOT NULL; + +DROP INDEX TBL_TO_TXN_ID_IDX ON TXN_TO_WRITE_ID; +DROP INDEX TBL_TO_WRITE_ID_IDX ON TXN_TO_WRITE_ID; +CREATE UNIQUE INDEX TBL_TO_TXN_ID_IDX ON TXN_TO_WRITE_ID (T2W_CATALOG, T2W_DATABASE, T2W_TABLE, T2W_TXNID); +CREATE UNIQUE INDEX TBL_TO_WRITE_ID_IDX ON TXN_TO_WRITE_ID (T2W_CATALOG, T2W_DATABASE, T2W_TABLE, T2W_WRITEID); + +-- Update NEXT_WRITE_ID +ALTER TABLE NEXT_WRITE_ID ADD COLUMN NWI_CATALOG VARCHAR(256); +UPDATE NEXT_WRITE_ID + SET NWI_CATALOG = 'hive'; +ALTER TABLE NEXT_WRITE_ID CHANGE COLUMN NWI_CATALOG NWI_CATALOG varchar(256) NOT NULL; + +DROP INDEX NEXT_WRITE_ID_IDX ON NEXT_WRITE_ID; +CREATE UNIQUE INDEX NEXT_WRITE_ID_IDX ON NEXT_WRITE_ID (NWI_CATALOG, NWI_DATABASE, NWI_TABLE); + -- These lines need to be last. Insert any changes above. UPDATE VERSION SET SCHEMA_VERSION='3.1.0', VERSION_COMMENT='Hive release version 3.1.0' where VER_ID=1; SELECT 'Finished upgrading MetaStore schema from 3.0.0 to 3.1.0' AS ' '; diff --git standalone-metastore/src/main/sql/oracle/hive-schema-3.1.0.oracle.sql standalone-metastore/src/main/sql/oracle/hive-schema-3.1.0.oracle.sql index 2d9b2b7844..8e8e933f3c 100644 --- standalone-metastore/src/main/sql/oracle/hive-schema-3.1.0.oracle.sql +++ standalone-metastore/src/main/sql/oracle/hive-schema-3.1.0.oracle.sql @@ -965,6 +965,7 @@ CREATE TABLE TXNS ( CREATE TABLE TXN_COMPONENTS ( TC_TXNID NUMBER(19) NOT NULL REFERENCES TXNS (TXN_ID), + TC_CATALOG VARCHAR2(256) NOT NULL, TC_DATABASE VARCHAR2(128) NOT NULL, TC_TABLE VARCHAR2(128), TC_PARTITION VARCHAR2(767) NULL, @@ -976,6 +977,7 @@ CREATE INDEX TC_TXNID_INDEX ON TXN_COMPONENTS (TC_TXNID); CREATE TABLE COMPLETED_TXN_COMPONENTS ( CTC_TXNID NUMBER(19) NOT NULL, + CTC_CATALOG VARCHAR2(256) NOT NULL, CTC_DATABASE VARCHAR2(128) NOT NULL, CTC_TABLE VARCHAR2(256), CTC_PARTITION VARCHAR2(767), @@ -983,7 +985,7 @@ CREATE TABLE COMPLETED_TXN_COMPONENTS ( CTC_WRITEID NUMBER(19) ) ROWDEPENDENCIES; -CREATE INDEX COMPLETED_TXN_COMPONENTS_INDEX ON COMPLETED_TXN_COMPONENTS (CTC_DATABASE, CTC_TABLE, CTC_PARTITION); +CREATE INDEX COMPLETED_TXN_COMPONENTS_INDEX ON COMPLETED_TXN_COMPONENTS (CTC_CATALOG, CTC_DATABASE, CTC_TABLE, CTC_PARTITION); CREATE TABLE NEXT_TXN_ID ( NTXN_NEXT NUMBER(19) NOT NULL @@ -994,6 +996,7 @@ CREATE TABLE HIVE_LOCKS ( HL_LOCK_EXT_ID NUMBER(19) NOT NULL, HL_LOCK_INT_ID NUMBER(19) NOT NULL, HL_TXNID NUMBER(19) NOT NULL, + HL_CATALOG VARCHAR2(256) NOT NULL, HL_DB VARCHAR2(128) NOT NULL, HL_TABLE VARCHAR2(128), HL_PARTITION VARCHAR2(767), @@ -1019,6 +1022,7 @@ INSERT INTO NEXT_LOCK_ID VALUES(1); CREATE TABLE COMPACTION_QUEUE ( CQ_ID NUMBER(19) PRIMARY KEY, + CQ_CATALOG varchar2(256) NOT NULL, CQ_DATABASE varchar(128) NOT NULL, CQ_TABLE varchar(128) NOT NULL, CQ_PARTITION varchar(767), @@ -1040,6 +1044,7 @@ INSERT INTO NEXT_COMPACTION_QUEUE_ID VALUES(1); CREATE TABLE COMPLETED_COMPACTIONS ( CC_ID NUMBER(19) PRIMARY KEY, + CC_CATALOG varchar2(256) NOT NULL, CC_DATABASE varchar(128) NOT NULL, CC_TABLE varchar(128) NOT NULL, CC_PARTITION varchar(767), @@ -1063,6 +1068,7 @@ CREATE TABLE AUX_TABLE ( ); CREATE TABLE WRITE_SET ( + WS_CATALOG varchar2(256) NOT NULL, WS_DATABASE varchar2(128) NOT NULL, WS_TABLE varchar2(128) NOT NULL, WS_PARTITION varchar2(767), @@ -1073,21 +1079,23 @@ CREATE TABLE WRITE_SET ( CREATE TABLE TXN_TO_WRITE_ID ( T2W_TXNID NUMBER(19) NOT NULL, + T2W_CATALOG VARCHAR2(256) NOT NULL, T2W_DATABASE VARCHAR2(128) NOT NULL, T2W_TABLE VARCHAR2(256) NOT NULL, T2W_WRITEID NUMBER(19) NOT NULL ); -CREATE UNIQUE INDEX TBL_TO_TXN_ID_IDX ON TXN_TO_WRITE_ID (T2W_DATABASE, T2W_TABLE, T2W_TXNID); -CREATE UNIQUE INDEX TBL_TO_WRITE_ID_IDX ON TXN_TO_WRITE_ID (T2W_DATABASE, T2W_TABLE, T2W_WRITEID); +CREATE UNIQUE INDEX TBL_TO_TXN_ID_IDX ON TXN_TO_WRITE_ID (T2W_CATALOG, T2W_DATABASE, T2W_TABLE, T2W_TXNID); +CREATE UNIQUE INDEX TBL_TO_WRITE_ID_IDX ON TXN_TO_WRITE_ID (T2W_CATALOG, T2W_DATABASE, T2W_TABLE, T2W_WRITEID); CREATE TABLE NEXT_WRITE_ID ( + NWI_CATALOG VARCHAR2(256) NOT NULL, NWI_DATABASE VARCHAR2(128) NOT NULL, NWI_TABLE VARCHAR2(256) NOT NULL, NWI_NEXT NUMBER(19) NOT NULL ); -CREATE UNIQUE INDEX NEXT_WRITE_ID_IDX ON NEXT_WRITE_ID (NWI_DATABASE, NWI_TABLE); +CREATE UNIQUE INDEX NEXT_WRITE_ID_IDX ON NEXT_WRITE_ID (NWI_CATALOG, NWI_DATABASE, NWI_TABLE); CREATE TABLE MIN_HISTORY_LEVEL ( MHL_TXNID NUMBER(19) NOT NULL, diff --git standalone-metastore/src/main/sql/oracle/hive-schema-4.0.0.oracle.sql standalone-metastore/src/main/sql/oracle/hive-schema-4.0.0.oracle.sql index 2877c79842..4e0c05abdd 100644 --- standalone-metastore/src/main/sql/oracle/hive-schema-4.0.0.oracle.sql +++ standalone-metastore/src/main/sql/oracle/hive-schema-4.0.0.oracle.sql @@ -965,6 +965,7 @@ CREATE TABLE TXNS ( CREATE TABLE TXN_COMPONENTS ( TC_TXNID NUMBER(19) NOT NULL REFERENCES TXNS (TXN_ID), + TC_CATALOG VARCHAR2(256) NOT NULL, TC_DATABASE VARCHAR2(128) NOT NULL, TC_TABLE VARCHAR2(128), TC_PARTITION VARCHAR2(767) NULL, @@ -976,6 +977,7 @@ CREATE INDEX TC_TXNID_INDEX ON TXN_COMPONENTS (TC_TXNID); CREATE TABLE COMPLETED_TXN_COMPONENTS ( CTC_TXNID NUMBER(19) NOT NULL, + CTC_CATALOG VARCHAR2(256) NOT NULL, CTC_DATABASE VARCHAR2(128) NOT NULL, CTC_TABLE VARCHAR2(256), CTC_PARTITION VARCHAR2(767), @@ -983,7 +985,7 @@ CREATE TABLE COMPLETED_TXN_COMPONENTS ( CTC_WRITEID NUMBER(19) ) ROWDEPENDENCIES; -CREATE INDEX COMPLETED_TXN_COMPONENTS_INDEX ON COMPLETED_TXN_COMPONENTS (CTC_DATABASE, CTC_TABLE, CTC_PARTITION); +CREATE INDEX COMPLETED_TXN_COMPONENTS_INDEX ON COMPLETED_TXN_COMPONENTS (CTC_CATALOG, CTC_DATABASE, CTC_TABLE, CTC_PARTITION); CREATE TABLE NEXT_TXN_ID ( NTXN_NEXT NUMBER(19) NOT NULL @@ -994,6 +996,7 @@ CREATE TABLE HIVE_LOCKS ( HL_LOCK_EXT_ID NUMBER(19) NOT NULL, HL_LOCK_INT_ID NUMBER(19) NOT NULL, HL_TXNID NUMBER(19) NOT NULL, + HL_CATALOG VARCHAR2(256) NOT NULL, HL_DB VARCHAR2(128) NOT NULL, HL_TABLE VARCHAR2(128), HL_PARTITION VARCHAR2(767), @@ -1019,6 +1022,7 @@ INSERT INTO NEXT_LOCK_ID VALUES(1); CREATE TABLE COMPACTION_QUEUE ( CQ_ID NUMBER(19) PRIMARY KEY, + CQ_CATALOG varchar2(256) NOT NULL, CQ_DATABASE varchar(128) NOT NULL, CQ_TABLE varchar(128) NOT NULL, CQ_PARTITION varchar(767), @@ -1040,6 +1044,7 @@ INSERT INTO NEXT_COMPACTION_QUEUE_ID VALUES(1); CREATE TABLE COMPLETED_COMPACTIONS ( CC_ID NUMBER(19) PRIMARY KEY, + CC_CATALOG varchar2(256) NOT NULL, CC_DATABASE varchar(128) NOT NULL, CC_TABLE varchar(128) NOT NULL, CC_PARTITION varchar(767), @@ -1063,6 +1068,7 @@ CREATE TABLE AUX_TABLE ( ); CREATE TABLE WRITE_SET ( + WS_CATALOG varchar2(256) NOT NULL, WS_DATABASE varchar2(128) NOT NULL, WS_TABLE varchar2(128) NOT NULL, WS_PARTITION varchar2(767), @@ -1073,21 +1079,23 @@ CREATE TABLE WRITE_SET ( CREATE TABLE TXN_TO_WRITE_ID ( T2W_TXNID NUMBER(19) NOT NULL, + T2W_CATALOG VARCHAR2(256) NOT NULL, T2W_DATABASE VARCHAR2(128) NOT NULL, T2W_TABLE VARCHAR2(256) NOT NULL, T2W_WRITEID NUMBER(19) NOT NULL ); -CREATE UNIQUE INDEX TBL_TO_TXN_ID_IDX ON TXN_TO_WRITE_ID (T2W_DATABASE, T2W_TABLE, T2W_TXNID); -CREATE UNIQUE INDEX TBL_TO_WRITE_ID_IDX ON TXN_TO_WRITE_ID (T2W_DATABASE, T2W_TABLE, T2W_WRITEID); +CREATE UNIQUE INDEX TBL_TO_TXN_ID_IDX ON TXN_TO_WRITE_ID (T2W_CATALOG, T2W_DATABASE, T2W_TABLE, T2W_TXNID); +CREATE UNIQUE INDEX TBL_TO_WRITE_ID_IDX ON TXN_TO_WRITE_ID (T2W_CATALOG, T2W_DATABASE, T2W_TABLE, T2W_WRITEID); CREATE TABLE NEXT_WRITE_ID ( + NWI_CATALOG VARCHAR2(256) NOT NULL, NWI_DATABASE VARCHAR2(128) NOT NULL, NWI_TABLE VARCHAR2(256) NOT NULL, NWI_NEXT NUMBER(19) NOT NULL ); -CREATE UNIQUE INDEX NEXT_WRITE_ID_IDX ON NEXT_WRITE_ID (NWI_DATABASE, NWI_TABLE); +CREATE UNIQUE INDEX NEXT_WRITE_ID_IDX ON NEXT_WRITE_ID (NWI_CATALOG, NWI_DATABASE, NWI_TABLE); CREATE TABLE MIN_HISTORY_LEVEL ( MHL_TXNID NUMBER(19) NOT NULL, diff --git standalone-metastore/src/main/sql/oracle/upgrade-3.0.0-to-3.1.0.oracle.sql standalone-metastore/src/main/sql/oracle/upgrade-3.0.0-to-3.1.0.oracle.sql index 6c4c5be180..8203077cb6 100644 --- standalone-metastore/src/main/sql/oracle/upgrade-3.0.0-to-3.1.0.oracle.sql +++ standalone-metastore/src/main/sql/oracle/upgrade-3.0.0-to-3.1.0.oracle.sql @@ -25,6 +25,66 @@ ALTER TABLE PART_COL_PRIVS ADD AUTHORIZER VARCHAR2(128) NULL; DROP INDEX PARTITIONCOLUMNPRIVILEGEINDEX; CREATE INDEX PARTITIONCOLUMNPRIVILEGEINDEX ON PART_COL_PRIVS (AUTHORIZER,PART_ID,"COLUMN_NAME",PRINCIPAL_NAME,PRINCIPAL_TYPE,PART_COL_PRIV,GRANTOR,GRANTOR_TYPE); +--HIVE-19323 +-- Update TXN_COMPONENTS +ALTER TABLE TXN_COMPONENTS ADD TC_CATALOG VARCHAR2(256); +UPDATE TXN_COMPONENTS + SET TC_CATALOG = 'hive'; +ALTER TABLE TXN_COMPONENTS MODIFY TC_CATALOG NOT NULL; + +-- Update COMPLETED_TXN_COMPONENTS +ALTER TABLE COMPLETED_TXN_COMPONENTS ADD CTC_CATALOG VARCHAR2(256); +UPDATE COMPLETED_TXN_COMPONENTS + SET CTC_CATALOG = 'hive'; +ALTER TABLE COMPLETED_TXN_COMPONENTS MODIFY CTC_CATALOG NOT NULL; + +DROP INDEX COMPLETED_TXN_COMPONENTS_INDEX; +CREATE INDEX COMPLETED_TXN_COMPONENTS_INDEX ON COMPLETED_TXN_COMPONENTS (CTC_CATALOG, CTC_DATABASE, CTC_TABLE, CTC_PARTITION); + +-- Update HIVE_LOCKS +ALTER TABLE HIVE_LOCKS ADD HL_CATALOG VARCHAR2(256); +UPDATE HIVE_LOCKS + SET HL_CATALOG = 'hive'; +ALTER TABLE HIVE_LOCKS MODIFY HL_CATALOG NOT NULL; + +-- Update COMPACTION_QUEUE +ALTER TABLE COMPACTION_QUEUE ADD CQ_CATALOG VARCHAR2(256); +UPDATE COMPACTION_QUEUE + SET CQ_CATALOG = 'hive'; +ALTER TABLE COMPACTION_QUEUE MODIFY CQ_CATALOG NOT NULL; + +-- Update COMPLETED_COMPACTIONS +ALTER TABLE COMPLETED_COMPACTIONS ADD CC_CATALOG VARCHAR2(256); +UPDATE COMPLETED_COMPACTIONS + SET CC_CATALOG = 'hive'; +ALTER TABLE COMPLETED_COMPACTIONS MODIFY CC_CATALOG NOT NULL; + +-- Update WRITE_SET +ALTER TABLE WRITE_SET ADD WS_CATALOG VARCHAR2(256); +UPDATE WRITE_SET + SET WS_CATALOG = 'hive'; +ALTER TABLE WRITE_SET MODIFY WS_CATALOG NOT NULL; + +-- Update TXN_TO_WRITE_ID +ALTER TABLE TXN_TO_WRITE_ID ADD T2W_CATALOG VARCHAR2(256); +UPDATE TXN_TO_WRITE_ID + SET T2W_CATALOG = 'hive'; +ALTER TABLE TXN_TO_WRITE_ID MODIFY T2W_CATALOG NOT NULL; + +DROP INDEX TBL_TO_TXN_ID_IDX; +DROP INDEX TBL_TO_WRITE_ID_IDX; +CREATE UNIQUE INDEX TBL_TO_TXN_ID_IDX ON TXN_TO_WRITE_ID (T2W_CATALOG, T2W_DATABASE, T2W_TABLE, T2W_TXNID); +CREATE UNIQUE INDEX TBL_TO_WRITE_ID_IDX ON TXN_TO_WRITE_ID (T2W_CATALOG, T2W_DATABASE, T2W_TABLE, T2W_WRITEID); + +-- Update NEXT_WRITE_ID +ALTER TABLE NEXT_WRITE_ID ADD NWI_CATALOG VARCHAR2(256); +UPDATE NEXT_WRITE_ID + SET NWI_CATALOG = 'hive'; +ALTER TABLE NEXT_WRITE_ID MODIFY NWI_CATALOG NOT NULL; + +DROP INDEX NEXT_WRITE_ID_IDX; +CREATE UNIQUE INDEX NEXT_WRITE_ID_IDX ON NEXT_WRITE_ID (NWI_CATALOG, NWI_DATABASE, NWI_TABLE); + -- These lines need to be last. Insert any changes above. UPDATE VERSION SET SCHEMA_VERSION='3.1.0', VERSION_COMMENT='Hive release version 3.1.0' where VER_ID=1; SELECT 'Finished upgrading MetaStore schema from 3.0.0 to 3.1.0' AS Status from dual; diff --git standalone-metastore/src/main/sql/postgres/hive-schema-3.1.0.postgres.sql standalone-metastore/src/main/sql/postgres/hive-schema-3.1.0.postgres.sql index f8a073a57d..b3d4a5ed44 100644 --- standalone-metastore/src/main/sql/postgres/hive-schema-3.1.0.postgres.sql +++ standalone-metastore/src/main/sql/postgres/hive-schema-3.1.0.postgres.sql @@ -1652,6 +1652,7 @@ CREATE TABLE TXNS ( CREATE TABLE TXN_COMPONENTS ( TC_TXNID bigint NOT NULL REFERENCES TXNS (TXN_ID), + TC_CATALOG varchar(256) NOT NULL, TC_DATABASE varchar(128) NOT NULL, TC_TABLE varchar(128), TC_PARTITION varchar(767) DEFAULT NULL, @@ -1663,6 +1664,7 @@ CREATE INDEX TC_TXNID_INDEX ON TXN_COMPONENTS USING hash (TC_TXNID); CREATE TABLE COMPLETED_TXN_COMPONENTS ( CTC_TXNID bigint NOT NULL, + CTC_CATALOG varchar(256) NOT NULL, CTC_DATABASE varchar(128) NOT NULL, CTC_TABLE varchar(256), CTC_PARTITION varchar(767), @@ -1670,7 +1672,7 @@ CREATE TABLE COMPLETED_TXN_COMPONENTS ( CTC_WRITEID bigint ); -CREATE INDEX COMPLETED_TXN_COMPONENTS_INDEX ON COMPLETED_TXN_COMPONENTS USING btree (CTC_DATABASE, CTC_TABLE, CTC_PARTITION); +CREATE INDEX COMPLETED_TXN_COMPONENTS_INDEX ON COMPLETED_TXN_COMPONENTS USING btree (CTC_CATALOG, CTC_DATABASE, CTC_TABLE, CTC_PARTITION); CREATE TABLE NEXT_TXN_ID ( NTXN_NEXT bigint NOT NULL @@ -1681,6 +1683,7 @@ CREATE TABLE HIVE_LOCKS ( HL_LOCK_EXT_ID bigint NOT NULL, HL_LOCK_INT_ID bigint NOT NULL, HL_TXNID bigint NOT NULL, + HL_CATALOG varchar(256) NOT NULL, HL_DB varchar(128) NOT NULL, HL_TABLE varchar(128), HL_PARTITION varchar(767) DEFAULT NULL, @@ -1706,6 +1709,7 @@ INSERT INTO NEXT_LOCK_ID VALUES(1); CREATE TABLE COMPACTION_QUEUE ( CQ_ID bigint PRIMARY KEY, + CQ_CATALOG varchar(256) NOT NULL, CQ_DATABASE varchar(128) NOT NULL, CQ_TABLE varchar(128) NOT NULL, CQ_PARTITION varchar(767), @@ -1727,6 +1731,7 @@ INSERT INTO NEXT_COMPACTION_QUEUE_ID VALUES(1); CREATE TABLE COMPLETED_COMPACTIONS ( CC_ID bigint PRIMARY KEY, + CC_CATALOG varchar(128) NOT NULL, CC_DATABASE varchar(128) NOT NULL, CC_TABLE varchar(128) NOT NULL, CC_PARTITION varchar(767), @@ -1750,6 +1755,7 @@ CREATE TABLE AUX_TABLE ( ); CREATE TABLE WRITE_SET ( + WS_CATALOG varchar(256) NOT NULL, WS_DATABASE varchar(128) NOT NULL, WS_TABLE varchar(128) NOT NULL, WS_PARTITION varchar(767), @@ -1760,21 +1766,23 @@ CREATE TABLE WRITE_SET ( CREATE TABLE TXN_TO_WRITE_ID ( T2W_TXNID bigint NOT NULL, + T2W_CATALOG varchar(256) NOT NULL, T2W_DATABASE varchar(128) NOT NULL, T2W_TABLE varchar(256) NOT NULL, T2W_WRITEID bigint NOT NULL ); -CREATE UNIQUE INDEX TBL_TO_TXN_ID_IDX ON TXN_TO_WRITE_ID (T2W_DATABASE, T2W_TABLE, T2W_TXNID); -CREATE UNIQUE INDEX TBL_TO_WRITE_ID_IDX ON TXN_TO_WRITE_ID (T2W_DATABASE, T2W_TABLE, T2W_WRITEID); +CREATE UNIQUE INDEX TBL_TO_TXN_ID_IDX ON TXN_TO_WRITE_ID (T2W_CATALOG, T2W_DATABASE, T2W_TABLE, T2W_TXNID); +CREATE UNIQUE INDEX TBL_TO_WRITE_ID_IDX ON TXN_TO_WRITE_ID (T2W_CATALOG, T2W_DATABASE, T2W_TABLE, T2W_WRITEID); CREATE TABLE NEXT_WRITE_ID ( + NWI_CATALOG varchar(256) NOT NULL, NWI_DATABASE varchar(128) NOT NULL, NWI_TABLE varchar(256) NOT NULL, NWI_NEXT bigint NOT NULL ); -CREATE UNIQUE INDEX NEXT_WRITE_ID_IDX ON NEXT_WRITE_ID (NWI_DATABASE, NWI_TABLE); +CREATE UNIQUE INDEX NEXT_WRITE_ID_IDX ON NEXT_WRITE_ID (NWI_CATALOG, NWI_DATABASE, NWI_TABLE); CREATE TABLE MIN_HISTORY_LEVEL ( MHL_TXNID bigint NOT NULL, diff --git standalone-metastore/src/main/sql/postgres/hive-schema-4.0.0.postgres.sql standalone-metastore/src/main/sql/postgres/hive-schema-4.0.0.postgres.sql index 5f93ae07ce..895bc892c1 100644 --- standalone-metastore/src/main/sql/postgres/hive-schema-4.0.0.postgres.sql +++ standalone-metastore/src/main/sql/postgres/hive-schema-4.0.0.postgres.sql @@ -1652,6 +1652,7 @@ CREATE TABLE TXNS ( CREATE TABLE TXN_COMPONENTS ( TC_TXNID bigint NOT NULL REFERENCES TXNS (TXN_ID), + TC_CATALOG varchar(256) NOT NULL, TC_DATABASE varchar(128) NOT NULL, TC_TABLE varchar(128), TC_PARTITION varchar(767) DEFAULT NULL, @@ -1663,6 +1664,7 @@ CREATE INDEX TC_TXNID_INDEX ON TXN_COMPONENTS USING hash (TC_TXNID); CREATE TABLE COMPLETED_TXN_COMPONENTS ( CTC_TXNID bigint NOT NULL, + CTC_CATALOG varchar(256) NOT NULL, CTC_DATABASE varchar(128) NOT NULL, CTC_TABLE varchar(256), CTC_PARTITION varchar(767), @@ -1670,7 +1672,7 @@ CREATE TABLE COMPLETED_TXN_COMPONENTS ( CTC_WRITEID bigint ); -CREATE INDEX COMPLETED_TXN_COMPONENTS_INDEX ON COMPLETED_TXN_COMPONENTS USING btree (CTC_DATABASE, CTC_TABLE, CTC_PARTITION); +CREATE INDEX COMPLETED_TXN_COMPONENTS_INDEX ON COMPLETED_TXN_COMPONENTS USING btree (CTC_CATALOG, CTC_DATABASE, CTC_TABLE, CTC_PARTITION); CREATE TABLE NEXT_TXN_ID ( NTXN_NEXT bigint NOT NULL @@ -1681,6 +1683,7 @@ CREATE TABLE HIVE_LOCKS ( HL_LOCK_EXT_ID bigint NOT NULL, HL_LOCK_INT_ID bigint NOT NULL, HL_TXNID bigint NOT NULL, + HL_CATALOG varchar(256) NOT NULL, HL_DB varchar(128) NOT NULL, HL_TABLE varchar(128), HL_PARTITION varchar(767) DEFAULT NULL, @@ -1706,6 +1709,7 @@ INSERT INTO NEXT_LOCK_ID VALUES(1); CREATE TABLE COMPACTION_QUEUE ( CQ_ID bigint PRIMARY KEY, + CQ_CATALOG varchar(256) NOT NULL, CQ_DATABASE varchar(128) NOT NULL, CQ_TABLE varchar(128) NOT NULL, CQ_PARTITION varchar(767), @@ -1727,6 +1731,7 @@ INSERT INTO NEXT_COMPACTION_QUEUE_ID VALUES(1); CREATE TABLE COMPLETED_COMPACTIONS ( CC_ID bigint PRIMARY KEY, + CC_CATALOG varchar(128) NOT NULL, CC_DATABASE varchar(128) NOT NULL, CC_TABLE varchar(128) NOT NULL, CC_PARTITION varchar(767), @@ -1750,6 +1755,7 @@ CREATE TABLE AUX_TABLE ( ); CREATE TABLE WRITE_SET ( + WS_CATALOG varchar(256) NOT NULL, WS_DATABASE varchar(128) NOT NULL, WS_TABLE varchar(128) NOT NULL, WS_PARTITION varchar(767), @@ -1760,21 +1766,23 @@ CREATE TABLE WRITE_SET ( CREATE TABLE TXN_TO_WRITE_ID ( T2W_TXNID bigint NOT NULL, + T2W_CATALOG varchar(256) NOT NULL, T2W_DATABASE varchar(128) NOT NULL, T2W_TABLE varchar(256) NOT NULL, T2W_WRITEID bigint NOT NULL ); -CREATE UNIQUE INDEX TBL_TO_TXN_ID_IDX ON TXN_TO_WRITE_ID (T2W_DATABASE, T2W_TABLE, T2W_TXNID); -CREATE UNIQUE INDEX TBL_TO_WRITE_ID_IDX ON TXN_TO_WRITE_ID (T2W_DATABASE, T2W_TABLE, T2W_WRITEID); +CREATE UNIQUE INDEX TBL_TO_TXN_ID_IDX ON TXN_TO_WRITE_ID (T2W_CATALOG, T2W_DATABASE, T2W_TABLE, T2W_TXNID); +CREATE UNIQUE INDEX TBL_TO_WRITE_ID_IDX ON TXN_TO_WRITE_ID (T2W_CATALOG, T2W_DATABASE, T2W_TABLE, T2W_WRITEID); CREATE TABLE NEXT_WRITE_ID ( + NWI_CATALOG varchar(256) NOT NULL, NWI_DATABASE varchar(128) NOT NULL, NWI_TABLE varchar(256) NOT NULL, NWI_NEXT bigint NOT NULL ); -CREATE UNIQUE INDEX NEXT_WRITE_ID_IDX ON NEXT_WRITE_ID (NWI_DATABASE, NWI_TABLE); +CREATE UNIQUE INDEX NEXT_WRITE_ID_IDX ON NEXT_WRITE_ID (NWI_CATALOG, NWI_DATABASE, NWI_TABLE); CREATE TABLE MIN_HISTORY_LEVEL ( MHL_TXNID bigint NOT NULL, diff --git standalone-metastore/src/main/sql/postgres/upgrade-3.0.0-to-3.1.0.postgres.sql standalone-metastore/src/main/sql/postgres/upgrade-3.0.0-to-3.1.0.postgres.sql index 81f695c8d4..46d9f32d75 100644 --- standalone-metastore/src/main/sql/postgres/upgrade-3.0.0-to-3.1.0.postgres.sql +++ standalone-metastore/src/main/sql/postgres/upgrade-3.0.0-to-3.1.0.postgres.sql @@ -27,6 +27,66 @@ ALTER TABLE "PART_COL_PRIVS" ADD COLUMN "AUTHORIZER" character varying(128) DEFA DROP INDEX "PARTITIONCOLUMNPRIVILEGEINDEX"; CREATE INDEX "PARTITIONCOLUMNPRIVILEGEINDEX" ON "PART_COL_PRIVS" USING btree ("AUTHORIZER", "PART_ID", "COLUMN_NAME", "PRINCIPAL_NAME", "PRINCIPAL_TYPE", "PART_COL_PRIV", "GRANTOR", "GRANTOR_TYPE"); +-- HIVE-19323 +-- Update TXN_COMPONENTS +ALTER TABLE TXN_COMPONENTS ADD TC_CATALOG VARCHAR(256); +UPDATE TXN_COMPONENTS + SET TC_CATALOG = 'hive'; +ALTER TABLE TXN_COMPONENTS ALTER COLUMN TC_CATALOG SET NOT NULL; + +-- Update COMPLETED_TXN_COMPONENTS +ALTER TABLE COMPLETED_TXN_COMPONENTS ADD CTC_CATALOG VARCHAR(256); +UPDATE COMPLETED_TXN_COMPONENTS + SET CTC_CATALOG = 'hive'; +ALTER TABLE COMPLETED_TXN_COMPONENTS ALTER COLUMN CTC_CATALOG SET NOT NULL; + +DROP INDEX COMPLETED_TXN_COMPONENTS_INDEX; +CREATE INDEX COMPLETED_TXN_COMPONENTS_INDEX ON COMPLETED_TXN_COMPONENTS USING btree (CTC_CATALOG, CTC_DATABASE, CTC_TABLE, CTC_PARTITION); + +-- Update HIVE_LOCKS +ALTER TABLE HIVE_LOCKS ADD HL_CATALOG VARCHAR(256); +UPDATE HIVE_LOCKS + SET HL_CATALOG = 'hive'; +ALTER TABLE HIVE_LOCKS ALTER COLUMN HL_CATALOG SET NOT NULL; + +-- Update COMPACTION_QUEUE +ALTER TABLE COMPACTION_QUEUE ADD CQ_CATALOG VARCHAR(256); +UPDATE COMPACTION_QUEUE + SET CQ_CATALOG = 'hive'; +ALTER TABLE COMPACTION_QUEUE ALTER COLUMN CQ_CATALOG SET NOT NULL; + +-- Update COMPLETED_COMPACTIONS +ALTER TABLE COMPLETED_COMPACTIONS ADD CC_CATALOG VARCHAR(256); +UPDATE COMPLETED_COMPACTIONS + SET CC_CATALOG = 'hive'; +ALTER TABLE COMPLETED_COMPACTIONS ALTER COLUMN CC_CATALOG SET NOT NULL; + +-- Update WRITE_SET +ALTER TABLE WRITE_SET ADD WS_CATALOG VARCHAR(256); +UPDATE WRITE_SET + SET WS_CATALOG = 'hive'; +ALTER TABLE WRITE_SET ALTER COLUMN WS_CATALOG SET NOT NULL; + +-- Update TXN_TO_WRITE_ID +ALTER TABLE TXN_TO_WRITE_ID ADD T2W_CATALOG VARCHAR(256); +UPDATE TXN_TO_WRITE_ID + SET T2W_CATALOG = 'hive'; +ALTER TABLE TXN_TO_WRITE_ID ALTER COLUMN T2W_CATALOG SET NOT NULL; + +DROP INDEX TBL_TO_TXN_ID_IDX; +DROP INDEX TBL_TO_WRITE_ID_IDX; +CREATE UNIQUE INDEX TBL_TO_TXN_ID_IDX ON TXN_TO_WRITE_ID (T2W_CATALOG, T2W_DATABASE, T2W_TABLE, T2W_TXNID); +CREATE UNIQUE INDEX TBL_TO_WRITE_ID_IDX ON TXN_TO_WRITE_ID (T2W_CATALOG, T2W_DATABASE, T2W_TABLE, T2W_WRITEID); + +-- Update NEXT_WRITE_ID +ALTER TABLE NEXT_WRITE_ID ADD NWI_CATALOG VARCHAR(256); +UPDATE NEXT_WRITE_ID + SET NWI_CATALOG = 'hive'; +ALTER TABLE NEXT_WRITE_ID ALTER COLUMN NWI_CATALOG SET NOT NULL; + +DROP INDEX NEXT_WRITE_ID_IDX; +CREATE UNIQUE INDEX NEXT_WRITE_ID_IDX ON NEXT_WRITE_ID (NWI_CATALOG, NWI_DATABASE, NWI_TABLE); + -- These lines need to be last. Insert any changes above. UPDATE "VERSION" SET "SCHEMA_VERSION"='3.1.0', "VERSION_COMMENT"='Hive release version 3.1.0' where "VER_ID"=1; SELECT 'Finished upgrading MetaStore schema from 3.0.0 to 3.1.0'; diff --git standalone-metastore/src/main/thrift/hive_metastore.thrift standalone-metastore/src/main/thrift/hive_metastore.thrift index 3d85acfab3..c93519d5ec 100644 --- standalone-metastore/src/main/thrift/hive_metastore.thrift +++ standalone-metastore/src/main/thrift/hive_metastore.thrift @@ -871,6 +871,7 @@ struct ReplTblWriteIdStateRequest { 4: required string dbName, 5: required string tableName, 6: optional list partNames, + 7: optional string catName, } // Request msg to get the valid write ids list for the given list of tables wrt to input validTxnList @@ -903,6 +904,7 @@ struct AllocateTableWriteIdsRequest { 4: optional string replPolicy, // The list is assumed to be sorted by both txnids and write ids. The write id list is assumed to be contiguous. 5: optional list srcTxnToWriteIdList, + 6: optional string catName, } // Map for allocated write id against the txn for which it is allocated @@ -923,7 +925,8 @@ struct LockComponent { 5: optional string partitionname, 6: optional DataOperationType operationType = DataOperationType.UNSET, 7: optional bool isTransactional = false, - 8: optional bool isDynamicPartitionWrite = false + 8: optional bool isDynamicPartitionWrite = false, + 9: optional string catName } struct LockRequest { @@ -932,6 +935,7 @@ struct LockRequest { 3: required string user, // used in 'show locks' to help admins find who has open locks 4: required string hostname, // used in 'show locks' to help admins find who has open locks 5: optional string agentInfo = "Unknown", + 6: optional string catName } struct LockResponse { @@ -954,6 +958,7 @@ struct ShowLocksRequest { 2: optional string tablename, 3: optional string partname, 4: optional bool isExtended=false, + 5: optional string catName } struct ShowLocksResponseElement { @@ -973,6 +978,7 @@ struct ShowLocksResponseElement { 14: optional i64 blockedByExtId, 15: optional i64 blockedByIntId, 16: optional i64 lockIdInternal, + 17: optional string catName } struct ShowLocksResponse { @@ -1000,7 +1006,8 @@ struct CompactionRequest { 3: optional string partitionname, 4: required CompactionType type, 5: optional string runas, - 6: optional map properties + 6: optional map properties, + 7: optional string catName } struct CompactionResponse { @@ -1026,6 +1033,7 @@ struct ShowCompactResponseElement { 11: optional i64 endTime, 12: optional string hadoopJobId = "None", 13: optional i64 id, + 14: optional string catName } struct ShowCompactResponse { @@ -1038,7 +1046,8 @@ struct AddDynamicPartitions { 3: required string dbname, 4: required string tablename, 5: required list partitionnames, - 6: optional DataOperationType operationType = DataOperationType.UNSET + 6: optional DataOperationType operationType = DataOperationType.UNSET, + 7: optional string catName } struct BasicTxnInfo { @@ -1047,7 +1056,8 @@ struct BasicTxnInfo { 3: optional i64 txnid, 4: optional string dbname, 5: optional string tablename, - 6: optional string partitionname + 6: optional string partitionname, + 7: optional string catName } struct CreationMetadata {