diff --git a/metastore/if/hive_metastore.thrift b/metastore/if/hive_metastore.thrift index 462580179c3484a645aaccc0ad37105b36f17a5b..7a9a75802b4059c0de49bd35b4b0548da84c5534 100755 --- a/metastore/if/hive_metastore.thrift +++ b/metastore/if/hive_metastore.thrift @@ -809,6 +809,9 @@ service ThriftHiveMetastore extends fb303.FacebookService void alter_table_with_environment_context(1:string dbname, 2:string tbl_name, 3:Table new_tbl, 4:EnvironmentContext environment_context) throws (1:InvalidOperationException o1, 2:MetaException o2) + // alter table not only applies to future partitions but also cascade to existing partitions + void alter_table_with_cascade(1:string dbname, 2:string tbl_name, 3:Table new_tbl, 4:bool cascade) + throws (1:InvalidOperationException o1, 2:MetaException o2) // the following applies to only tables that have partitions // * See notes on DDL_TIME Partition add_partition(1:Partition new_part) diff --git a/metastore/src/gen/thrift/gen-cpp/ThriftHiveMetastore.cpp b/metastore/src/gen/thrift/gen-cpp/ThriftHiveMetastore.cpp index 2f9c6e0b0580f0a866fca52d699adffe65f0bd45..68cc668f272ca7c163fe7f55fc7dd320aef27502 100644 --- a/metastore/src/gen/thrift/gen-cpp/ThriftHiveMetastore.cpp +++ b/metastore/src/gen/thrift/gen-cpp/ThriftHiveMetastore.cpp @@ -5592,6 +5592,236 @@ uint32_t ThriftHiveMetastore_alter_table_with_environment_context_presult::read( return xfer; } +uint32_t ThriftHiveMetastore_alter_table_with_cascade_args::read(::apache::thrift::protocol::TProtocol* iprot) { + + uint32_t xfer = 0; + std::string fname; + ::apache::thrift::protocol::TType ftype; + int16_t fid; + + xfer += iprot->readStructBegin(fname); + + using ::apache::thrift::protocol::TProtocolException; + + + while (true) + { + xfer += iprot->readFieldBegin(fname, ftype, fid); + if (ftype == ::apache::thrift::protocol::T_STOP) { + break; + } + switch (fid) + { + case 1: + if (ftype == ::apache::thrift::protocol::T_STRING) { + xfer += iprot->readString(this->dbname); + this->__isset.dbname = true; + } else { + xfer += iprot->skip(ftype); + } + break; + case 2: + if (ftype == ::apache::thrift::protocol::T_STRING) { + xfer += iprot->readString(this->tbl_name); + this->__isset.tbl_name = true; + } else { + xfer += iprot->skip(ftype); + } + break; + case 3: + if (ftype == ::apache::thrift::protocol::T_STRUCT) { + xfer += this->new_tbl.read(iprot); + this->__isset.new_tbl = true; + } else { + xfer += iprot->skip(ftype); + } + break; + case 4: + if (ftype == ::apache::thrift::protocol::T_BOOL) { + xfer += iprot->readBool(this->cascade); + this->__isset.cascade = true; + } else { + xfer += iprot->skip(ftype); + } + break; + default: + xfer += iprot->skip(ftype); + break; + } + xfer += iprot->readFieldEnd(); + } + + xfer += iprot->readStructEnd(); + + return xfer; +} + +uint32_t ThriftHiveMetastore_alter_table_with_cascade_args::write(::apache::thrift::protocol::TProtocol* oprot) const { + uint32_t xfer = 0; + xfer += oprot->writeStructBegin("ThriftHiveMetastore_alter_table_with_cascade_args"); + + xfer += oprot->writeFieldBegin("dbname", ::apache::thrift::protocol::T_STRING, 1); + xfer += oprot->writeString(this->dbname); + xfer += oprot->writeFieldEnd(); + + xfer += oprot->writeFieldBegin("tbl_name", ::apache::thrift::protocol::T_STRING, 2); + xfer += oprot->writeString(this->tbl_name); + xfer += oprot->writeFieldEnd(); + + xfer += oprot->writeFieldBegin("new_tbl", ::apache::thrift::protocol::T_STRUCT, 3); + xfer += this->new_tbl.write(oprot); + xfer += oprot->writeFieldEnd(); + + xfer += oprot->writeFieldBegin("cascade", ::apache::thrift::protocol::T_BOOL, 4); + xfer += oprot->writeBool(this->cascade); + xfer += oprot->writeFieldEnd(); + + xfer += oprot->writeFieldStop(); + xfer += oprot->writeStructEnd(); + return xfer; +} + +uint32_t ThriftHiveMetastore_alter_table_with_cascade_pargs::write(::apache::thrift::protocol::TProtocol* oprot) const { + uint32_t xfer = 0; + xfer += oprot->writeStructBegin("ThriftHiveMetastore_alter_table_with_cascade_pargs"); + + xfer += oprot->writeFieldBegin("dbname", ::apache::thrift::protocol::T_STRING, 1); + xfer += oprot->writeString((*(this->dbname))); + xfer += oprot->writeFieldEnd(); + + xfer += oprot->writeFieldBegin("tbl_name", ::apache::thrift::protocol::T_STRING, 2); + xfer += oprot->writeString((*(this->tbl_name))); + xfer += oprot->writeFieldEnd(); + + xfer += oprot->writeFieldBegin("new_tbl", ::apache::thrift::protocol::T_STRUCT, 3); + xfer += (*(this->new_tbl)).write(oprot); + xfer += oprot->writeFieldEnd(); + + xfer += oprot->writeFieldBegin("cascade", ::apache::thrift::protocol::T_BOOL, 4); + xfer += oprot->writeBool((*(this->cascade))); + xfer += oprot->writeFieldEnd(); + + xfer += oprot->writeFieldStop(); + xfer += oprot->writeStructEnd(); + return xfer; +} + +uint32_t ThriftHiveMetastore_alter_table_with_cascade_result::read(::apache::thrift::protocol::TProtocol* iprot) { + + uint32_t xfer = 0; + std::string fname; + ::apache::thrift::protocol::TType ftype; + int16_t fid; + + xfer += iprot->readStructBegin(fname); + + using ::apache::thrift::protocol::TProtocolException; + + + while (true) + { + xfer += iprot->readFieldBegin(fname, ftype, fid); + if (ftype == ::apache::thrift::protocol::T_STOP) { + break; + } + switch (fid) + { + case 1: + if (ftype == ::apache::thrift::protocol::T_STRUCT) { + xfer += this->o1.read(iprot); + this->__isset.o1 = true; + } else { + xfer += iprot->skip(ftype); + } + break; + case 2: + if (ftype == ::apache::thrift::protocol::T_STRUCT) { + xfer += this->o2.read(iprot); + this->__isset.o2 = true; + } else { + xfer += iprot->skip(ftype); + } + break; + default: + xfer += iprot->skip(ftype); + break; + } + xfer += iprot->readFieldEnd(); + } + + xfer += iprot->readStructEnd(); + + return xfer; +} + +uint32_t ThriftHiveMetastore_alter_table_with_cascade_result::write(::apache::thrift::protocol::TProtocol* oprot) const { + + uint32_t xfer = 0; + + xfer += oprot->writeStructBegin("ThriftHiveMetastore_alter_table_with_cascade_result"); + + if (this->__isset.o1) { + xfer += oprot->writeFieldBegin("o1", ::apache::thrift::protocol::T_STRUCT, 1); + xfer += this->o1.write(oprot); + xfer += oprot->writeFieldEnd(); + } else if (this->__isset.o2) { + xfer += oprot->writeFieldBegin("o2", ::apache::thrift::protocol::T_STRUCT, 2); + xfer += this->o2.write(oprot); + xfer += oprot->writeFieldEnd(); + } + xfer += oprot->writeFieldStop(); + xfer += oprot->writeStructEnd(); + return xfer; +} + +uint32_t ThriftHiveMetastore_alter_table_with_cascade_presult::read(::apache::thrift::protocol::TProtocol* iprot) { + + uint32_t xfer = 0; + std::string fname; + ::apache::thrift::protocol::TType ftype; + int16_t fid; + + xfer += iprot->readStructBegin(fname); + + using ::apache::thrift::protocol::TProtocolException; + + + while (true) + { + xfer += iprot->readFieldBegin(fname, ftype, fid); + if (ftype == ::apache::thrift::protocol::T_STOP) { + break; + } + switch (fid) + { + case 1: + if (ftype == ::apache::thrift::protocol::T_STRUCT) { + xfer += this->o1.read(iprot); + this->__isset.o1 = true; + } else { + xfer += iprot->skip(ftype); + } + break; + case 2: + if (ftype == ::apache::thrift::protocol::T_STRUCT) { + xfer += this->o2.read(iprot); + this->__isset.o2 = true; + } else { + xfer += iprot->skip(ftype); + } + break; + default: + xfer += iprot->skip(ftype); + break; + } + xfer += iprot->readFieldEnd(); + } + + xfer += iprot->readStructEnd(); + + return xfer; +} + uint32_t ThriftHiveMetastore_add_partition_args::read(::apache::thrift::protocol::TProtocol* iprot) { uint32_t xfer = 0; @@ -28447,6 +28677,68 @@ void ThriftHiveMetastoreClient::recv_alter_table_with_environment_context() return; } +void ThriftHiveMetastoreClient::alter_table_with_cascade(const std::string& dbname, const std::string& tbl_name, const Table& new_tbl, const bool cascade) +{ + send_alter_table_with_cascade(dbname, tbl_name, new_tbl, cascade); + recv_alter_table_with_cascade(); +} + +void ThriftHiveMetastoreClient::send_alter_table_with_cascade(const std::string& dbname, const std::string& tbl_name, const Table& new_tbl, const bool cascade) +{ + int32_t cseqid = 0; + oprot_->writeMessageBegin("alter_table_with_cascade", ::apache::thrift::protocol::T_CALL, cseqid); + + ThriftHiveMetastore_alter_table_with_cascade_pargs args; + args.dbname = &dbname; + args.tbl_name = &tbl_name; + args.new_tbl = &new_tbl; + args.cascade = &cascade; + args.write(oprot_); + + oprot_->writeMessageEnd(); + oprot_->getTransport()->writeEnd(); + oprot_->getTransport()->flush(); +} + +void ThriftHiveMetastoreClient::recv_alter_table_with_cascade() +{ + + int32_t rseqid = 0; + std::string fname; + ::apache::thrift::protocol::TMessageType mtype; + + iprot_->readMessageBegin(fname, mtype, rseqid); + if (mtype == ::apache::thrift::protocol::T_EXCEPTION) { + ::apache::thrift::TApplicationException x; + x.read(iprot_); + iprot_->readMessageEnd(); + iprot_->getTransport()->readEnd(); + throw x; + } + if (mtype != ::apache::thrift::protocol::T_REPLY) { + iprot_->skip(::apache::thrift::protocol::T_STRUCT); + iprot_->readMessageEnd(); + iprot_->getTransport()->readEnd(); + } + if (fname.compare("alter_table_with_cascade") != 0) { + iprot_->skip(::apache::thrift::protocol::T_STRUCT); + iprot_->readMessageEnd(); + iprot_->getTransport()->readEnd(); + } + ThriftHiveMetastore_alter_table_with_cascade_presult result; + result.read(iprot_); + iprot_->readMessageEnd(); + iprot_->getTransport()->readEnd(); + + if (result.__isset.o1) { + throw result.o1; + } + if (result.__isset.o2) { + throw result.o2; + } + return; +} + void ThriftHiveMetastoreClient::add_partition(Partition& _return, const Partition& new_part) { send_add_partition(new_part); @@ -35825,6 +36117,65 @@ void ThriftHiveMetastoreProcessor::process_alter_table_with_environment_context( } } +void ThriftHiveMetastoreProcessor::process_alter_table_with_cascade(int32_t seqid, ::apache::thrift::protocol::TProtocol* iprot, ::apache::thrift::protocol::TProtocol* oprot, void* callContext) +{ + void* ctx = NULL; + if (this->eventHandler_.get() != NULL) { + ctx = this->eventHandler_->getContext("ThriftHiveMetastore.alter_table_with_cascade", callContext); + } + ::apache::thrift::TProcessorContextFreer freer(this->eventHandler_.get(), ctx, "ThriftHiveMetastore.alter_table_with_cascade"); + + if (this->eventHandler_.get() != NULL) { + this->eventHandler_->preRead(ctx, "ThriftHiveMetastore.alter_table_with_cascade"); + } + + ThriftHiveMetastore_alter_table_with_cascade_args args; + args.read(iprot); + iprot->readMessageEnd(); + uint32_t bytes = iprot->getTransport()->readEnd(); + + if (this->eventHandler_.get() != NULL) { + this->eventHandler_->postRead(ctx, "ThriftHiveMetastore.alter_table_with_cascade", bytes); + } + + ThriftHiveMetastore_alter_table_with_cascade_result result; + try { + iface_->alter_table_with_cascade(args.dbname, args.tbl_name, args.new_tbl, args.cascade); + } catch (InvalidOperationException &o1) { + result.o1 = o1; + result.__isset.o1 = true; + } catch (MetaException &o2) { + result.o2 = o2; + result.__isset.o2 = true; + } catch (const std::exception& e) { + if (this->eventHandler_.get() != NULL) { + this->eventHandler_->handlerError(ctx, "ThriftHiveMetastore.alter_table_with_cascade"); + } + + ::apache::thrift::TApplicationException x(e.what()); + oprot->writeMessageBegin("alter_table_with_cascade", ::apache::thrift::protocol::T_EXCEPTION, seqid); + x.write(oprot); + oprot->writeMessageEnd(); + oprot->getTransport()->writeEnd(); + oprot->getTransport()->flush(); + return; + } + + if (this->eventHandler_.get() != NULL) { + this->eventHandler_->preWrite(ctx, "ThriftHiveMetastore.alter_table_with_cascade"); + } + + oprot->writeMessageBegin("alter_table_with_cascade", ::apache::thrift::protocol::T_REPLY, seqid); + result.write(oprot); + oprot->writeMessageEnd(); + bytes = oprot->getTransport()->writeEnd(); + oprot->getTransport()->flush(); + + if (this->eventHandler_.get() != NULL) { + this->eventHandler_->postWrite(ctx, "ThriftHiveMetastore.alter_table_with_cascade", bytes); + } +} + void ThriftHiveMetastoreProcessor::process_add_partition(int32_t seqid, ::apache::thrift::protocol::TProtocol* iprot, ::apache::thrift::protocol::TProtocol* oprot, void* callContext) { void* ctx = NULL; diff --git a/metastore/src/gen/thrift/gen-cpp/ThriftHiveMetastore.h b/metastore/src/gen/thrift/gen-cpp/ThriftHiveMetastore.h index c09353463a9a0f72d3eaae70678773490245a68a..7ebb423617fc07b1bbea9fe23c160f57154318bf 100644 --- a/metastore/src/gen/thrift/gen-cpp/ThriftHiveMetastore.h +++ b/metastore/src/gen/thrift/gen-cpp/ThriftHiveMetastore.h @@ -41,6 +41,7 @@ class ThriftHiveMetastoreIf : virtual public ::facebook::fb303::FacebookService virtual void get_table_names_by_filter(std::vector & _return, const std::string& dbname, const std::string& filter, const int16_t max_tables) = 0; virtual void alter_table(const std::string& dbname, const std::string& tbl_name, const Table& new_tbl) = 0; virtual void alter_table_with_environment_context(const std::string& dbname, const std::string& tbl_name, const Table& new_tbl, const EnvironmentContext& environment_context) = 0; + virtual void alter_table_with_cascade(const std::string& dbname, const std::string& tbl_name, const Table& new_tbl, const bool cascade) = 0; virtual void add_partition(Partition& _return, const Partition& new_part) = 0; virtual void add_partition_with_environment_context(Partition& _return, const Partition& new_part, const EnvironmentContext& environment_context) = 0; virtual int32_t add_partitions(const std::vector & new_parts) = 0; @@ -238,6 +239,9 @@ class ThriftHiveMetastoreNull : virtual public ThriftHiveMetastoreIf , virtual p void alter_table_with_environment_context(const std::string& /* dbname */, const std::string& /* tbl_name */, const Table& /* new_tbl */, const EnvironmentContext& /* environment_context */) { return; } + void alter_table_with_cascade(const std::string& /* dbname */, const std::string& /* tbl_name */, const Table& /* new_tbl */, const bool /* cascade */) { + return; + } void add_partition(Partition& /* _return */, const Partition& /* new_part */) { return; } @@ -3858,6 +3862,151 @@ class ThriftHiveMetastore_alter_table_with_environment_context_presult { }; +typedef struct _ThriftHiveMetastore_alter_table_with_cascade_args__isset { + _ThriftHiveMetastore_alter_table_with_cascade_args__isset() : dbname(false), tbl_name(false), new_tbl(false), cascade(false) {} + bool dbname; + bool tbl_name; + bool new_tbl; + bool cascade; +} _ThriftHiveMetastore_alter_table_with_cascade_args__isset; + +class ThriftHiveMetastore_alter_table_with_cascade_args { + public: + + ThriftHiveMetastore_alter_table_with_cascade_args() : dbname(), tbl_name(), cascade(0) { + } + + virtual ~ThriftHiveMetastore_alter_table_with_cascade_args() throw() {} + + std::string dbname; + std::string tbl_name; + Table new_tbl; + bool cascade; + + _ThriftHiveMetastore_alter_table_with_cascade_args__isset __isset; + + void __set_dbname(const std::string& val) { + dbname = val; + } + + void __set_tbl_name(const std::string& val) { + tbl_name = val; + } + + void __set_new_tbl(const Table& val) { + new_tbl = val; + } + + void __set_cascade(const bool val) { + cascade = val; + } + + bool operator == (const ThriftHiveMetastore_alter_table_with_cascade_args & rhs) const + { + if (!(dbname == rhs.dbname)) + return false; + if (!(tbl_name == rhs.tbl_name)) + return false; + if (!(new_tbl == rhs.new_tbl)) + return false; + if (!(cascade == rhs.cascade)) + return false; + return true; + } + bool operator != (const ThriftHiveMetastore_alter_table_with_cascade_args &rhs) const { + return !(*this == rhs); + } + + bool operator < (const ThriftHiveMetastore_alter_table_with_cascade_args & ) const; + + uint32_t read(::apache::thrift::protocol::TProtocol* iprot); + uint32_t write(::apache::thrift::protocol::TProtocol* oprot) const; + +}; + + +class ThriftHiveMetastore_alter_table_with_cascade_pargs { + public: + + + virtual ~ThriftHiveMetastore_alter_table_with_cascade_pargs() throw() {} + + const std::string* dbname; + const std::string* tbl_name; + const Table* new_tbl; + const bool* cascade; + + uint32_t write(::apache::thrift::protocol::TProtocol* oprot) const; + +}; + +typedef struct _ThriftHiveMetastore_alter_table_with_cascade_result__isset { + _ThriftHiveMetastore_alter_table_with_cascade_result__isset() : o1(false), o2(false) {} + bool o1; + bool o2; +} _ThriftHiveMetastore_alter_table_with_cascade_result__isset; + +class ThriftHiveMetastore_alter_table_with_cascade_result { + public: + + ThriftHiveMetastore_alter_table_with_cascade_result() { + } + + virtual ~ThriftHiveMetastore_alter_table_with_cascade_result() throw() {} + + InvalidOperationException o1; + MetaException o2; + + _ThriftHiveMetastore_alter_table_with_cascade_result__isset __isset; + + void __set_o1(const InvalidOperationException& val) { + o1 = val; + } + + void __set_o2(const MetaException& val) { + o2 = val; + } + + bool operator == (const ThriftHiveMetastore_alter_table_with_cascade_result & rhs) const + { + if (!(o1 == rhs.o1)) + return false; + if (!(o2 == rhs.o2)) + return false; + return true; + } + bool operator != (const ThriftHiveMetastore_alter_table_with_cascade_result &rhs) const { + return !(*this == rhs); + } + + bool operator < (const ThriftHiveMetastore_alter_table_with_cascade_result & ) const; + + uint32_t read(::apache::thrift::protocol::TProtocol* iprot); + uint32_t write(::apache::thrift::protocol::TProtocol* oprot) const; + +}; + +typedef struct _ThriftHiveMetastore_alter_table_with_cascade_presult__isset { + _ThriftHiveMetastore_alter_table_with_cascade_presult__isset() : o1(false), o2(false) {} + bool o1; + bool o2; +} _ThriftHiveMetastore_alter_table_with_cascade_presult__isset; + +class ThriftHiveMetastore_alter_table_with_cascade_presult { + public: + + + virtual ~ThriftHiveMetastore_alter_table_with_cascade_presult() throw() {} + + InvalidOperationException o1; + MetaException o2; + + _ThriftHiveMetastore_alter_table_with_cascade_presult__isset __isset; + + uint32_t read(::apache::thrift::protocol::TProtocol* iprot); + +}; + typedef struct _ThriftHiveMetastore_add_partition_args__isset { _ThriftHiveMetastore_add_partition_args__isset() : new_part(false) {} bool new_part; @@ -16505,6 +16654,9 @@ class ThriftHiveMetastoreClient : virtual public ThriftHiveMetastoreIf, public void alter_table_with_environment_context(const std::string& dbname, const std::string& tbl_name, const Table& new_tbl, const EnvironmentContext& environment_context); void send_alter_table_with_environment_context(const std::string& dbname, const std::string& tbl_name, const Table& new_tbl, const EnvironmentContext& environment_context); void recv_alter_table_with_environment_context(); + void alter_table_with_cascade(const std::string& dbname, const std::string& tbl_name, const Table& new_tbl, const bool cascade); + void send_alter_table_with_cascade(const std::string& dbname, const std::string& tbl_name, const Table& new_tbl, const bool cascade); + void recv_alter_table_with_cascade(); void add_partition(Partition& _return, const Partition& new_part); void send_add_partition(const Partition& new_part); void recv_add_partition(Partition& _return); @@ -16813,6 +16965,7 @@ class ThriftHiveMetastoreProcessor : public ::facebook::fb303::FacebookServiceP void process_get_table_names_by_filter(int32_t seqid, ::apache::thrift::protocol::TProtocol* iprot, ::apache::thrift::protocol::TProtocol* oprot, void* callContext); void process_alter_table(int32_t seqid, ::apache::thrift::protocol::TProtocol* iprot, ::apache::thrift::protocol::TProtocol* oprot, void* callContext); void process_alter_table_with_environment_context(int32_t seqid, ::apache::thrift::protocol::TProtocol* iprot, ::apache::thrift::protocol::TProtocol* oprot, void* callContext); + void process_alter_table_with_cascade(int32_t seqid, ::apache::thrift::protocol::TProtocol* iprot, ::apache::thrift::protocol::TProtocol* oprot, void* callContext); void process_add_partition(int32_t seqid, ::apache::thrift::protocol::TProtocol* iprot, ::apache::thrift::protocol::TProtocol* oprot, void* callContext); void process_add_partition_with_environment_context(int32_t seqid, ::apache::thrift::protocol::TProtocol* iprot, ::apache::thrift::protocol::TProtocol* oprot, void* callContext); void process_add_partitions(int32_t seqid, ::apache::thrift::protocol::TProtocol* iprot, ::apache::thrift::protocol::TProtocol* oprot, void* callContext); @@ -16933,6 +17086,7 @@ class ThriftHiveMetastoreProcessor : public ::facebook::fb303::FacebookServiceP processMap_["get_table_names_by_filter"] = &ThriftHiveMetastoreProcessor::process_get_table_names_by_filter; processMap_["alter_table"] = &ThriftHiveMetastoreProcessor::process_alter_table; processMap_["alter_table_with_environment_context"] = &ThriftHiveMetastoreProcessor::process_alter_table_with_environment_context; + processMap_["alter_table_with_cascade"] = &ThriftHiveMetastoreProcessor::process_alter_table_with_cascade; processMap_["add_partition"] = &ThriftHiveMetastoreProcessor::process_add_partition; processMap_["add_partition_with_environment_context"] = &ThriftHiveMetastoreProcessor::process_add_partition_with_environment_context; processMap_["add_partitions"] = &ThriftHiveMetastoreProcessor::process_add_partitions; @@ -17295,6 +17449,15 @@ class ThriftHiveMetastoreMultiface : virtual public ThriftHiveMetastoreIf, publi ifaces_[i]->alter_table_with_environment_context(dbname, tbl_name, new_tbl, environment_context); } + void alter_table_with_cascade(const std::string& dbname, const std::string& tbl_name, const Table& new_tbl, const bool cascade) { + size_t sz = ifaces_.size(); + size_t i = 0; + for (; i < (sz - 1); ++i) { + ifaces_[i]->alter_table_with_cascade(dbname, tbl_name, new_tbl, cascade); + } + ifaces_[i]->alter_table_with_cascade(dbname, tbl_name, new_tbl, cascade); + } + void add_partition(Partition& _return, const Partition& new_part) { size_t sz = ifaces_.size(); size_t i = 0; diff --git a/metastore/src/gen/thrift/gen-cpp/ThriftHiveMetastore_server.skeleton.cpp b/metastore/src/gen/thrift/gen-cpp/ThriftHiveMetastore_server.skeleton.cpp index 0973e844f09f1b53123d60d039ba451961eddc04..e5c1fcbf91775ec9a47907988635a8ee7c3daa66 100644 --- a/metastore/src/gen/thrift/gen-cpp/ThriftHiveMetastore_server.skeleton.cpp +++ b/metastore/src/gen/thrift/gen-cpp/ThriftHiveMetastore_server.skeleton.cpp @@ -147,6 +147,11 @@ class ThriftHiveMetastoreHandler : virtual public ThriftHiveMetastoreIf { printf("alter_table_with_environment_context\n"); } + void alter_table_with_cascade(const std::string& dbname, const std::string& tbl_name, const Table& new_tbl, const bool cascade) { + // Your implementation goes here + printf("alter_table_with_cascade\n"); + } + void add_partition(Partition& _return, const Partition& new_part) { // Your implementation goes here printf("add_partition\n"); diff --git a/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ThriftHiveMetastore.java b/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ThriftHiveMetastore.java index bf6291672f31e476bba90667f527eebc842699d6..09fd71b7dded483d0bbba9f4786e439c99cab57d 100644 --- a/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ThriftHiveMetastore.java +++ b/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ThriftHiveMetastore.java @@ -88,6 +88,8 @@ public void alter_table_with_environment_context(String dbname, String tbl_name, Table new_tbl, EnvironmentContext environment_context) throws InvalidOperationException, MetaException, org.apache.thrift.TException; + public void alter_table_with_cascade(String dbname, String tbl_name, Table new_tbl, boolean cascade) throws InvalidOperationException, MetaException, org.apache.thrift.TException; + public Partition add_partition(Partition new_part) throws InvalidObjectException, AlreadyExistsException, MetaException, org.apache.thrift.TException; public Partition add_partition_with_environment_context(Partition new_part, EnvironmentContext environment_context) throws InvalidObjectException, AlreadyExistsException, MetaException, org.apache.thrift.TException; @@ -324,6 +326,8 @@ public void alter_table_with_environment_context(String dbname, String tbl_name, Table new_tbl, EnvironmentContext environment_context, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException; + public void alter_table_with_cascade(String dbname, String tbl_name, Table new_tbl, boolean cascade, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException; + public void add_partition(Partition new_part, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException; public void add_partition_with_environment_context(Partition new_part, EnvironmentContext environment_context, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException; @@ -1256,6 +1260,35 @@ public void recv_alter_table_with_environment_context() throws InvalidOperationE return; } + public void alter_table_with_cascade(String dbname, String tbl_name, Table new_tbl, boolean cascade) throws InvalidOperationException, MetaException, org.apache.thrift.TException + { + send_alter_table_with_cascade(dbname, tbl_name, new_tbl, cascade); + recv_alter_table_with_cascade(); + } + + public void send_alter_table_with_cascade(String dbname, String tbl_name, Table new_tbl, boolean cascade) throws org.apache.thrift.TException + { + alter_table_with_cascade_args args = new alter_table_with_cascade_args(); + args.setDbname(dbname); + args.setTbl_name(tbl_name); + args.setNew_tbl(new_tbl); + args.setCascade(cascade); + sendBase("alter_table_with_cascade", args); + } + + public void recv_alter_table_with_cascade() throws InvalidOperationException, MetaException, org.apache.thrift.TException + { + alter_table_with_cascade_result result = new alter_table_with_cascade_result(); + receiveBase(result, "alter_table_with_cascade"); + if (result.o1 != null) { + throw result.o1; + } + if (result.o2 != null) { + throw result.o2; + } + return; + } + public Partition add_partition(Partition new_part) throws InvalidObjectException, AlreadyExistsException, MetaException, org.apache.thrift.TException { send_add_partition(new_part); @@ -4848,6 +4881,47 @@ public void getResult() throws InvalidOperationException, MetaException, org.apa } } + public void alter_table_with_cascade(String dbname, String tbl_name, Table new_tbl, boolean cascade, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException { + checkReady(); + alter_table_with_cascade_call method_call = new alter_table_with_cascade_call(dbname, tbl_name, new_tbl, cascade, resultHandler, this, ___protocolFactory, ___transport); + this.___currentMethod = method_call; + ___manager.call(method_call); + } + + public static class alter_table_with_cascade_call extends org.apache.thrift.async.TAsyncMethodCall { + private String dbname; + private String tbl_name; + private Table new_tbl; + private boolean cascade; + public alter_table_with_cascade_call(String dbname, String tbl_name, Table new_tbl, boolean cascade, org.apache.thrift.async.AsyncMethodCallback resultHandler, org.apache.thrift.async.TAsyncClient client, org.apache.thrift.protocol.TProtocolFactory protocolFactory, org.apache.thrift.transport.TNonblockingTransport transport) throws org.apache.thrift.TException { + super(client, protocolFactory, transport, resultHandler, false); + this.dbname = dbname; + this.tbl_name = tbl_name; + this.new_tbl = new_tbl; + this.cascade = cascade; + } + + public void write_args(org.apache.thrift.protocol.TProtocol prot) throws org.apache.thrift.TException { + prot.writeMessageBegin(new org.apache.thrift.protocol.TMessage("alter_table_with_cascade", org.apache.thrift.protocol.TMessageType.CALL, 0)); + alter_table_with_cascade_args args = new alter_table_with_cascade_args(); + args.setDbname(dbname); + args.setTbl_name(tbl_name); + args.setNew_tbl(new_tbl); + args.setCascade(cascade); + args.write(prot); + prot.writeMessageEnd(); + } + + public void getResult() throws InvalidOperationException, MetaException, org.apache.thrift.TException { + if (getState() != org.apache.thrift.async.TAsyncMethodCall.State.RESPONSE_READ) { + throw new IllegalStateException("Method call not finished!"); + } + org.apache.thrift.transport.TMemoryInputTransport memoryTransport = new org.apache.thrift.transport.TMemoryInputTransport(getFrameBuffer().array()); + org.apache.thrift.protocol.TProtocol prot = client.getProtocolFactory().getProtocol(memoryTransport); + (new Client(prot)).recv_alter_table_with_cascade(); + } + } + public void add_partition(Partition new_part, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException { checkReady(); add_partition_call method_call = new add_partition_call(new_part, resultHandler, this, ___protocolFactory, ___transport); @@ -8167,6 +8241,7 @@ protected Processor(I iface, Map extends org.apache.thrift.ProcessFunction { + public alter_table_with_cascade() { + super("alter_table_with_cascade"); + } + + public alter_table_with_cascade_args getEmptyArgsInstance() { + return new alter_table_with_cascade_args(); + } + + protected boolean isOneway() { + return false; + } + + public alter_table_with_cascade_result getResult(I iface, alter_table_with_cascade_args args) throws org.apache.thrift.TException { + alter_table_with_cascade_result result = new alter_table_with_cascade_result(); + try { + iface.alter_table_with_cascade(args.dbname, args.tbl_name, args.new_tbl, args.cascade); + } catch (InvalidOperationException o1) { + result.o1 = o1; + } catch (MetaException o2) { + result.o2 = o2; + } + return result; + } + } + public static class add_partition extends org.apache.thrift.ProcessFunction { public add_partition() { super("add_partition"); @@ -34211,17 +34312,1142 @@ public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache. @Override public String toString() { - StringBuilder sb = new StringBuilder("get_table_names_by_filter_result("); + StringBuilder sb = new StringBuilder("get_table_names_by_filter_result("); + boolean first = true; + + sb.append("success:"); + if (this.success == null) { + sb.append("null"); + } else { + sb.append(this.success); + } + first = false; + if (!first) sb.append(", "); + sb.append("o1:"); + if (this.o1 == null) { + sb.append("null"); + } else { + sb.append(this.o1); + } + first = false; + if (!first) sb.append(", "); + sb.append("o2:"); + if (this.o2 == null) { + sb.append("null"); + } else { + sb.append(this.o2); + } + first = false; + if (!first) sb.append(", "); + sb.append("o3:"); + if (this.o3 == null) { + sb.append("null"); + } else { + sb.append(this.o3); + } + first = false; + sb.append(")"); + return sb.toString(); + } + + public void validate() throws org.apache.thrift.TException { + // check for required fields + // check for sub-struct validity + } + + private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException { + try { + write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out))); + } catch (org.apache.thrift.TException te) { + throw new java.io.IOException(te); + } + } + + private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException { + try { + read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in))); + } catch (org.apache.thrift.TException te) { + throw new java.io.IOException(te); + } + } + + private static class get_table_names_by_filter_resultStandardSchemeFactory implements SchemeFactory { + public get_table_names_by_filter_resultStandardScheme getScheme() { + return new get_table_names_by_filter_resultStandardScheme(); + } + } + + private static class get_table_names_by_filter_resultStandardScheme extends StandardScheme { + + public void read(org.apache.thrift.protocol.TProtocol iprot, get_table_names_by_filter_result struct) throws org.apache.thrift.TException { + org.apache.thrift.protocol.TField schemeField; + iprot.readStructBegin(); + while (true) + { + schemeField = iprot.readFieldBegin(); + if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { + break; + } + switch (schemeField.id) { + case 0: // SUCCESS + if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { + { + org.apache.thrift.protocol.TList _list566 = iprot.readListBegin(); + struct.success = new ArrayList(_list566.size); + for (int _i567 = 0; _i567 < _list566.size; ++_i567) + { + String _elem568; // required + _elem568 = iprot.readString(); + struct.success.add(_elem568); + } + iprot.readListEnd(); + } + struct.setSuccessIsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + break; + case 1: // O1 + if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) { + struct.o1 = new MetaException(); + struct.o1.read(iprot); + struct.setO1IsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + break; + case 2: // O2 + if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) { + struct.o2 = new InvalidOperationException(); + struct.o2.read(iprot); + struct.setO2IsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + break; + case 3: // O3 + if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) { + struct.o3 = new UnknownDBException(); + struct.o3.read(iprot); + struct.setO3IsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + break; + default: + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + iprot.readFieldEnd(); + } + iprot.readStructEnd(); + struct.validate(); + } + + public void write(org.apache.thrift.protocol.TProtocol oprot, get_table_names_by_filter_result struct) throws org.apache.thrift.TException { + struct.validate(); + + oprot.writeStructBegin(STRUCT_DESC); + if (struct.success != null) { + oprot.writeFieldBegin(SUCCESS_FIELD_DESC); + { + oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.success.size())); + for (String _iter569 : struct.success) + { + oprot.writeString(_iter569); + } + oprot.writeListEnd(); + } + oprot.writeFieldEnd(); + } + if (struct.o1 != null) { + oprot.writeFieldBegin(O1_FIELD_DESC); + struct.o1.write(oprot); + oprot.writeFieldEnd(); + } + if (struct.o2 != null) { + oprot.writeFieldBegin(O2_FIELD_DESC); + struct.o2.write(oprot); + oprot.writeFieldEnd(); + } + if (struct.o3 != null) { + oprot.writeFieldBegin(O3_FIELD_DESC); + struct.o3.write(oprot); + oprot.writeFieldEnd(); + } + oprot.writeFieldStop(); + oprot.writeStructEnd(); + } + + } + + private static class get_table_names_by_filter_resultTupleSchemeFactory implements SchemeFactory { + public get_table_names_by_filter_resultTupleScheme getScheme() { + return new get_table_names_by_filter_resultTupleScheme(); + } + } + + private static class get_table_names_by_filter_resultTupleScheme extends TupleScheme { + + @Override + public void write(org.apache.thrift.protocol.TProtocol prot, get_table_names_by_filter_result struct) throws org.apache.thrift.TException { + TTupleProtocol oprot = (TTupleProtocol) prot; + BitSet optionals = new BitSet(); + if (struct.isSetSuccess()) { + optionals.set(0); + } + if (struct.isSetO1()) { + optionals.set(1); + } + if (struct.isSetO2()) { + optionals.set(2); + } + if (struct.isSetO3()) { + optionals.set(3); + } + oprot.writeBitSet(optionals, 4); + if (struct.isSetSuccess()) { + { + oprot.writeI32(struct.success.size()); + for (String _iter570 : struct.success) + { + oprot.writeString(_iter570); + } + } + } + if (struct.isSetO1()) { + struct.o1.write(oprot); + } + if (struct.isSetO2()) { + struct.o2.write(oprot); + } + if (struct.isSetO3()) { + struct.o3.write(oprot); + } + } + + @Override + public void read(org.apache.thrift.protocol.TProtocol prot, get_table_names_by_filter_result struct) throws org.apache.thrift.TException { + TTupleProtocol iprot = (TTupleProtocol) prot; + BitSet incoming = iprot.readBitSet(4); + if (incoming.get(0)) { + { + org.apache.thrift.protocol.TList _list571 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.success = new ArrayList(_list571.size); + for (int _i572 = 0; _i572 < _list571.size; ++_i572) + { + String _elem573; // required + _elem573 = iprot.readString(); + struct.success.add(_elem573); + } + } + struct.setSuccessIsSet(true); + } + if (incoming.get(1)) { + struct.o1 = new MetaException(); + struct.o1.read(iprot); + struct.setO1IsSet(true); + } + if (incoming.get(2)) { + struct.o2 = new InvalidOperationException(); + struct.o2.read(iprot); + struct.setO2IsSet(true); + } + if (incoming.get(3)) { + struct.o3 = new UnknownDBException(); + struct.o3.read(iprot); + struct.setO3IsSet(true); + } + } + } + + } + + public static class alter_table_args implements org.apache.thrift.TBase, java.io.Serializable, Cloneable { + private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("alter_table_args"); + + private static final org.apache.thrift.protocol.TField DBNAME_FIELD_DESC = new org.apache.thrift.protocol.TField("dbname", org.apache.thrift.protocol.TType.STRING, (short)1); + private static final org.apache.thrift.protocol.TField TBL_NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("tbl_name", org.apache.thrift.protocol.TType.STRING, (short)2); + private static final org.apache.thrift.protocol.TField NEW_TBL_FIELD_DESC = new org.apache.thrift.protocol.TField("new_tbl", org.apache.thrift.protocol.TType.STRUCT, (short)3); + + private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); + static { + schemes.put(StandardScheme.class, new alter_table_argsStandardSchemeFactory()); + schemes.put(TupleScheme.class, new alter_table_argsTupleSchemeFactory()); + } + + private String dbname; // required + private String tbl_name; // required + private Table new_tbl; // required + + /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ + public enum _Fields implements org.apache.thrift.TFieldIdEnum { + DBNAME((short)1, "dbname"), + TBL_NAME((short)2, "tbl_name"), + NEW_TBL((short)3, "new_tbl"); + + private static final Map byName = new HashMap(); + + static { + for (_Fields field : EnumSet.allOf(_Fields.class)) { + byName.put(field.getFieldName(), field); + } + } + + /** + * Find the _Fields constant that matches fieldId, or null if its not found. + */ + public static _Fields findByThriftId(int fieldId) { + switch(fieldId) { + case 1: // DBNAME + return DBNAME; + case 2: // TBL_NAME + return TBL_NAME; + case 3: // NEW_TBL + return NEW_TBL; + default: + return null; + } + } + + /** + * Find the _Fields constant that matches fieldId, throwing an exception + * if it is not found. + */ + public static _Fields findByThriftIdOrThrow(int fieldId) { + _Fields fields = findByThriftId(fieldId); + if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!"); + return fields; + } + + /** + * Find the _Fields constant that matches name, or null if its not found. + */ + public static _Fields findByName(String name) { + return byName.get(name); + } + + private final short _thriftId; + private final String _fieldName; + + _Fields(short thriftId, String fieldName) { + _thriftId = thriftId; + _fieldName = fieldName; + } + + public short getThriftFieldId() { + return _thriftId; + } + + public String getFieldName() { + return _fieldName; + } + } + + // isset id assignments + public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; + static { + Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); + tmpMap.put(_Fields.DBNAME, new org.apache.thrift.meta_data.FieldMetaData("dbname", org.apache.thrift.TFieldRequirementType.DEFAULT, + new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING))); + tmpMap.put(_Fields.TBL_NAME, new org.apache.thrift.meta_data.FieldMetaData("tbl_name", org.apache.thrift.TFieldRequirementType.DEFAULT, + new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING))); + tmpMap.put(_Fields.NEW_TBL, new org.apache.thrift.meta_data.FieldMetaData("new_tbl", org.apache.thrift.TFieldRequirementType.DEFAULT, + new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, Table.class))); + metaDataMap = Collections.unmodifiableMap(tmpMap); + org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(alter_table_args.class, metaDataMap); + } + + public alter_table_args() { + } + + public alter_table_args( + String dbname, + String tbl_name, + Table new_tbl) + { + this(); + this.dbname = dbname; + this.tbl_name = tbl_name; + this.new_tbl = new_tbl; + } + + /** + * Performs a deep copy on other. + */ + public alter_table_args(alter_table_args other) { + if (other.isSetDbname()) { + this.dbname = other.dbname; + } + if (other.isSetTbl_name()) { + this.tbl_name = other.tbl_name; + } + if (other.isSetNew_tbl()) { + this.new_tbl = new Table(other.new_tbl); + } + } + + public alter_table_args deepCopy() { + return new alter_table_args(this); + } + + @Override + public void clear() { + this.dbname = null; + this.tbl_name = null; + this.new_tbl = null; + } + + public String getDbname() { + return this.dbname; + } + + public void setDbname(String dbname) { + this.dbname = dbname; + } + + public void unsetDbname() { + this.dbname = null; + } + + /** Returns true if field dbname is set (has been assigned a value) and false otherwise */ + public boolean isSetDbname() { + return this.dbname != null; + } + + public void setDbnameIsSet(boolean value) { + if (!value) { + this.dbname = null; + } + } + + public String getTbl_name() { + return this.tbl_name; + } + + public void setTbl_name(String tbl_name) { + this.tbl_name = tbl_name; + } + + public void unsetTbl_name() { + this.tbl_name = null; + } + + /** Returns true if field tbl_name is set (has been assigned a value) and false otherwise */ + public boolean isSetTbl_name() { + return this.tbl_name != null; + } + + public void setTbl_nameIsSet(boolean value) { + if (!value) { + this.tbl_name = null; + } + } + + public Table getNew_tbl() { + return this.new_tbl; + } + + public void setNew_tbl(Table new_tbl) { + this.new_tbl = new_tbl; + } + + public void unsetNew_tbl() { + this.new_tbl = null; + } + + /** Returns true if field new_tbl is set (has been assigned a value) and false otherwise */ + public boolean isSetNew_tbl() { + return this.new_tbl != null; + } + + public void setNew_tblIsSet(boolean value) { + if (!value) { + this.new_tbl = null; + } + } + + public void setFieldValue(_Fields field, Object value) { + switch (field) { + case DBNAME: + if (value == null) { + unsetDbname(); + } else { + setDbname((String)value); + } + break; + + case TBL_NAME: + if (value == null) { + unsetTbl_name(); + } else { + setTbl_name((String)value); + } + break; + + case NEW_TBL: + if (value == null) { + unsetNew_tbl(); + } else { + setNew_tbl((Table)value); + } + break; + + } + } + + public Object getFieldValue(_Fields field) { + switch (field) { + case DBNAME: + return getDbname(); + + case TBL_NAME: + return getTbl_name(); + + case NEW_TBL: + return getNew_tbl(); + + } + throw new IllegalStateException(); + } + + /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */ + public boolean isSet(_Fields field) { + if (field == null) { + throw new IllegalArgumentException(); + } + + switch (field) { + case DBNAME: + return isSetDbname(); + case TBL_NAME: + return isSetTbl_name(); + case NEW_TBL: + return isSetNew_tbl(); + } + throw new IllegalStateException(); + } + + @Override + public boolean equals(Object that) { + if (that == null) + return false; + if (that instanceof alter_table_args) + return this.equals((alter_table_args)that); + return false; + } + + public boolean equals(alter_table_args that) { + if (that == null) + return false; + + boolean this_present_dbname = true && this.isSetDbname(); + boolean that_present_dbname = true && that.isSetDbname(); + if (this_present_dbname || that_present_dbname) { + if (!(this_present_dbname && that_present_dbname)) + return false; + if (!this.dbname.equals(that.dbname)) + return false; + } + + boolean this_present_tbl_name = true && this.isSetTbl_name(); + boolean that_present_tbl_name = true && that.isSetTbl_name(); + if (this_present_tbl_name || that_present_tbl_name) { + if (!(this_present_tbl_name && that_present_tbl_name)) + return false; + if (!this.tbl_name.equals(that.tbl_name)) + return false; + } + + boolean this_present_new_tbl = true && this.isSetNew_tbl(); + boolean that_present_new_tbl = true && that.isSetNew_tbl(); + if (this_present_new_tbl || that_present_new_tbl) { + if (!(this_present_new_tbl && that_present_new_tbl)) + return false; + if (!this.new_tbl.equals(that.new_tbl)) + return false; + } + + return true; + } + + @Override + public int hashCode() { + HashCodeBuilder builder = new HashCodeBuilder(); + + boolean present_dbname = true && (isSetDbname()); + builder.append(present_dbname); + if (present_dbname) + builder.append(dbname); + + boolean present_tbl_name = true && (isSetTbl_name()); + builder.append(present_tbl_name); + if (present_tbl_name) + builder.append(tbl_name); + + boolean present_new_tbl = true && (isSetNew_tbl()); + builder.append(present_new_tbl); + if (present_new_tbl) + builder.append(new_tbl); + + return builder.toHashCode(); + } + + public int compareTo(alter_table_args other) { + if (!getClass().equals(other.getClass())) { + return getClass().getName().compareTo(other.getClass().getName()); + } + + int lastComparison = 0; + alter_table_args typedOther = (alter_table_args)other; + + lastComparison = Boolean.valueOf(isSetDbname()).compareTo(typedOther.isSetDbname()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetDbname()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.dbname, typedOther.dbname); + if (lastComparison != 0) { + return lastComparison; + } + } + lastComparison = Boolean.valueOf(isSetTbl_name()).compareTo(typedOther.isSetTbl_name()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetTbl_name()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.tbl_name, typedOther.tbl_name); + if (lastComparison != 0) { + return lastComparison; + } + } + lastComparison = Boolean.valueOf(isSetNew_tbl()).compareTo(typedOther.isSetNew_tbl()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetNew_tbl()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.new_tbl, typedOther.new_tbl); + if (lastComparison != 0) { + return lastComparison; + } + } + return 0; + } + + public _Fields fieldForId(int fieldId) { + return _Fields.findByThriftId(fieldId); + } + + public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException { + schemes.get(iprot.getScheme()).getScheme().read(iprot, this); + } + + public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException { + schemes.get(oprot.getScheme()).getScheme().write(oprot, this); + } + + @Override + public String toString() { + StringBuilder sb = new StringBuilder("alter_table_args("); + boolean first = true; + + sb.append("dbname:"); + if (this.dbname == null) { + sb.append("null"); + } else { + sb.append(this.dbname); + } + first = false; + if (!first) sb.append(", "); + sb.append("tbl_name:"); + if (this.tbl_name == null) { + sb.append("null"); + } else { + sb.append(this.tbl_name); + } + first = false; + if (!first) sb.append(", "); + sb.append("new_tbl:"); + if (this.new_tbl == null) { + sb.append("null"); + } else { + sb.append(this.new_tbl); + } + first = false; + sb.append(")"); + return sb.toString(); + } + + public void validate() throws org.apache.thrift.TException { + // check for required fields + // check for sub-struct validity + if (new_tbl != null) { + new_tbl.validate(); + } + } + + private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException { + try { + write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out))); + } catch (org.apache.thrift.TException te) { + throw new java.io.IOException(te); + } + } + + private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException { + try { + read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in))); + } catch (org.apache.thrift.TException te) { + throw new java.io.IOException(te); + } + } + + private static class alter_table_argsStandardSchemeFactory implements SchemeFactory { + public alter_table_argsStandardScheme getScheme() { + return new alter_table_argsStandardScheme(); + } + } + + private static class alter_table_argsStandardScheme extends StandardScheme { + + public void read(org.apache.thrift.protocol.TProtocol iprot, alter_table_args struct) throws org.apache.thrift.TException { + org.apache.thrift.protocol.TField schemeField; + iprot.readStructBegin(); + while (true) + { + schemeField = iprot.readFieldBegin(); + if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { + break; + } + switch (schemeField.id) { + case 1: // DBNAME + if (schemeField.type == org.apache.thrift.protocol.TType.STRING) { + struct.dbname = iprot.readString(); + struct.setDbnameIsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + break; + case 2: // TBL_NAME + if (schemeField.type == org.apache.thrift.protocol.TType.STRING) { + struct.tbl_name = iprot.readString(); + struct.setTbl_nameIsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + break; + case 3: // NEW_TBL + if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) { + struct.new_tbl = new Table(); + struct.new_tbl.read(iprot); + struct.setNew_tblIsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + break; + default: + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + iprot.readFieldEnd(); + } + iprot.readStructEnd(); + struct.validate(); + } + + public void write(org.apache.thrift.protocol.TProtocol oprot, alter_table_args struct) throws org.apache.thrift.TException { + struct.validate(); + + oprot.writeStructBegin(STRUCT_DESC); + if (struct.dbname != null) { + oprot.writeFieldBegin(DBNAME_FIELD_DESC); + oprot.writeString(struct.dbname); + oprot.writeFieldEnd(); + } + if (struct.tbl_name != null) { + oprot.writeFieldBegin(TBL_NAME_FIELD_DESC); + oprot.writeString(struct.tbl_name); + oprot.writeFieldEnd(); + } + if (struct.new_tbl != null) { + oprot.writeFieldBegin(NEW_TBL_FIELD_DESC); + struct.new_tbl.write(oprot); + oprot.writeFieldEnd(); + } + oprot.writeFieldStop(); + oprot.writeStructEnd(); + } + + } + + private static class alter_table_argsTupleSchemeFactory implements SchemeFactory { + public alter_table_argsTupleScheme getScheme() { + return new alter_table_argsTupleScheme(); + } + } + + private static class alter_table_argsTupleScheme extends TupleScheme { + + @Override + public void write(org.apache.thrift.protocol.TProtocol prot, alter_table_args struct) throws org.apache.thrift.TException { + TTupleProtocol oprot = (TTupleProtocol) prot; + BitSet optionals = new BitSet(); + if (struct.isSetDbname()) { + optionals.set(0); + } + if (struct.isSetTbl_name()) { + optionals.set(1); + } + if (struct.isSetNew_tbl()) { + optionals.set(2); + } + oprot.writeBitSet(optionals, 3); + if (struct.isSetDbname()) { + oprot.writeString(struct.dbname); + } + if (struct.isSetTbl_name()) { + oprot.writeString(struct.tbl_name); + } + if (struct.isSetNew_tbl()) { + struct.new_tbl.write(oprot); + } + } + + @Override + public void read(org.apache.thrift.protocol.TProtocol prot, alter_table_args struct) throws org.apache.thrift.TException { + TTupleProtocol iprot = (TTupleProtocol) prot; + BitSet incoming = iprot.readBitSet(3); + if (incoming.get(0)) { + struct.dbname = iprot.readString(); + struct.setDbnameIsSet(true); + } + if (incoming.get(1)) { + struct.tbl_name = iprot.readString(); + struct.setTbl_nameIsSet(true); + } + if (incoming.get(2)) { + struct.new_tbl = new Table(); + struct.new_tbl.read(iprot); + struct.setNew_tblIsSet(true); + } + } + } + + } + + public static class alter_table_result implements org.apache.thrift.TBase, java.io.Serializable, Cloneable { + private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("alter_table_result"); + + private static final org.apache.thrift.protocol.TField O1_FIELD_DESC = new org.apache.thrift.protocol.TField("o1", org.apache.thrift.protocol.TType.STRUCT, (short)1); + private static final org.apache.thrift.protocol.TField O2_FIELD_DESC = new org.apache.thrift.protocol.TField("o2", org.apache.thrift.protocol.TType.STRUCT, (short)2); + + private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); + static { + schemes.put(StandardScheme.class, new alter_table_resultStandardSchemeFactory()); + schemes.put(TupleScheme.class, new alter_table_resultTupleSchemeFactory()); + } + + private InvalidOperationException o1; // required + private MetaException o2; // required + + /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ + public enum _Fields implements org.apache.thrift.TFieldIdEnum { + O1((short)1, "o1"), + O2((short)2, "o2"); + + private static final Map byName = new HashMap(); + + static { + for (_Fields field : EnumSet.allOf(_Fields.class)) { + byName.put(field.getFieldName(), field); + } + } + + /** + * Find the _Fields constant that matches fieldId, or null if its not found. + */ + public static _Fields findByThriftId(int fieldId) { + switch(fieldId) { + case 1: // O1 + return O1; + case 2: // O2 + return O2; + default: + return null; + } + } + + /** + * Find the _Fields constant that matches fieldId, throwing an exception + * if it is not found. + */ + public static _Fields findByThriftIdOrThrow(int fieldId) { + _Fields fields = findByThriftId(fieldId); + if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!"); + return fields; + } + + /** + * Find the _Fields constant that matches name, or null if its not found. + */ + public static _Fields findByName(String name) { + return byName.get(name); + } + + private final short _thriftId; + private final String _fieldName; + + _Fields(short thriftId, String fieldName) { + _thriftId = thriftId; + _fieldName = fieldName; + } + + public short getThriftFieldId() { + return _thriftId; + } + + public String getFieldName() { + return _fieldName; + } + } + + // isset id assignments + public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; + static { + Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); + tmpMap.put(_Fields.O1, new org.apache.thrift.meta_data.FieldMetaData("o1", org.apache.thrift.TFieldRequirementType.DEFAULT, + new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRUCT))); + tmpMap.put(_Fields.O2, new org.apache.thrift.meta_data.FieldMetaData("o2", org.apache.thrift.TFieldRequirementType.DEFAULT, + new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRUCT))); + metaDataMap = Collections.unmodifiableMap(tmpMap); + org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(alter_table_result.class, metaDataMap); + } + + public alter_table_result() { + } + + public alter_table_result( + InvalidOperationException o1, + MetaException o2) + { + this(); + this.o1 = o1; + this.o2 = o2; + } + + /** + * Performs a deep copy on other. + */ + public alter_table_result(alter_table_result other) { + if (other.isSetO1()) { + this.o1 = new InvalidOperationException(other.o1); + } + if (other.isSetO2()) { + this.o2 = new MetaException(other.o2); + } + } + + public alter_table_result deepCopy() { + return new alter_table_result(this); + } + + @Override + public void clear() { + this.o1 = null; + this.o2 = null; + } + + public InvalidOperationException getO1() { + return this.o1; + } + + public void setO1(InvalidOperationException o1) { + this.o1 = o1; + } + + public void unsetO1() { + this.o1 = null; + } + + /** Returns true if field o1 is set (has been assigned a value) and false otherwise */ + public boolean isSetO1() { + return this.o1 != null; + } + + public void setO1IsSet(boolean value) { + if (!value) { + this.o1 = null; + } + } + + public MetaException getO2() { + return this.o2; + } + + public void setO2(MetaException o2) { + this.o2 = o2; + } + + public void unsetO2() { + this.o2 = null; + } + + /** Returns true if field o2 is set (has been assigned a value) and false otherwise */ + public boolean isSetO2() { + return this.o2 != null; + } + + public void setO2IsSet(boolean value) { + if (!value) { + this.o2 = null; + } + } + + public void setFieldValue(_Fields field, Object value) { + switch (field) { + case O1: + if (value == null) { + unsetO1(); + } else { + setO1((InvalidOperationException)value); + } + break; + + case O2: + if (value == null) { + unsetO2(); + } else { + setO2((MetaException)value); + } + break; + + } + } + + public Object getFieldValue(_Fields field) { + switch (field) { + case O1: + return getO1(); + + case O2: + return getO2(); + + } + throw new IllegalStateException(); + } + + /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */ + public boolean isSet(_Fields field) { + if (field == null) { + throw new IllegalArgumentException(); + } + + switch (field) { + case O1: + return isSetO1(); + case O2: + return isSetO2(); + } + throw new IllegalStateException(); + } + + @Override + public boolean equals(Object that) { + if (that == null) + return false; + if (that instanceof alter_table_result) + return this.equals((alter_table_result)that); + return false; + } + + public boolean equals(alter_table_result that) { + if (that == null) + return false; + + boolean this_present_o1 = true && this.isSetO1(); + boolean that_present_o1 = true && that.isSetO1(); + if (this_present_o1 || that_present_o1) { + if (!(this_present_o1 && that_present_o1)) + return false; + if (!this.o1.equals(that.o1)) + return false; + } + + boolean this_present_o2 = true && this.isSetO2(); + boolean that_present_o2 = true && that.isSetO2(); + if (this_present_o2 || that_present_o2) { + if (!(this_present_o2 && that_present_o2)) + return false; + if (!this.o2.equals(that.o2)) + return false; + } + + return true; + } + + @Override + public int hashCode() { + HashCodeBuilder builder = new HashCodeBuilder(); + + boolean present_o1 = true && (isSetO1()); + builder.append(present_o1); + if (present_o1) + builder.append(o1); + + boolean present_o2 = true && (isSetO2()); + builder.append(present_o2); + if (present_o2) + builder.append(o2); + + return builder.toHashCode(); + } + + public int compareTo(alter_table_result other) { + if (!getClass().equals(other.getClass())) { + return getClass().getName().compareTo(other.getClass().getName()); + } + + int lastComparison = 0; + alter_table_result typedOther = (alter_table_result)other; + + lastComparison = Boolean.valueOf(isSetO1()).compareTo(typedOther.isSetO1()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetO1()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.o1, typedOther.o1); + if (lastComparison != 0) { + return lastComparison; + } + } + lastComparison = Boolean.valueOf(isSetO2()).compareTo(typedOther.isSetO2()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetO2()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.o2, typedOther.o2); + if (lastComparison != 0) { + return lastComparison; + } + } + return 0; + } + + public _Fields fieldForId(int fieldId) { + return _Fields.findByThriftId(fieldId); + } + + public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException { + schemes.get(iprot.getScheme()).getScheme().read(iprot, this); + } + + public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException { + schemes.get(oprot.getScheme()).getScheme().write(oprot, this); + } + + @Override + public String toString() { + StringBuilder sb = new StringBuilder("alter_table_result("); boolean first = true; - sb.append("success:"); - if (this.success == null) { - sb.append("null"); - } else { - sb.append(this.success); - } - first = false; - if (!first) sb.append(", "); sb.append("o1:"); if (this.o1 == null) { sb.append("null"); @@ -34237,14 +35463,6 @@ public String toString() { sb.append(this.o2); } first = false; - if (!first) sb.append(", "); - sb.append("o3:"); - if (this.o3 == null) { - sb.append("null"); - } else { - sb.append(this.o3); - } - first = false; sb.append(")"); return sb.toString(); } @@ -34270,15 +35488,15 @@ private void readObject(java.io.ObjectInputStream in) throws java.io.IOException } } - private static class get_table_names_by_filter_resultStandardSchemeFactory implements SchemeFactory { - public get_table_names_by_filter_resultStandardScheme getScheme() { - return new get_table_names_by_filter_resultStandardScheme(); + private static class alter_table_resultStandardSchemeFactory implements SchemeFactory { + public alter_table_resultStandardScheme getScheme() { + return new alter_table_resultStandardScheme(); } } - private static class get_table_names_by_filter_resultStandardScheme extends StandardScheme { + private static class alter_table_resultStandardScheme extends StandardScheme { - public void read(org.apache.thrift.protocol.TProtocol iprot, get_table_names_by_filter_result struct) throws org.apache.thrift.TException { + public void read(org.apache.thrift.protocol.TProtocol iprot, alter_table_result struct) throws org.apache.thrift.TException { org.apache.thrift.protocol.TField schemeField; iprot.readStructBegin(); while (true) @@ -34288,27 +35506,9 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_table_names_by_ break; } switch (schemeField.id) { - case 0: // SUCCESS - if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { - { - org.apache.thrift.protocol.TList _list566 = iprot.readListBegin(); - struct.success = new ArrayList(_list566.size); - for (int _i567 = 0; _i567 < _list566.size; ++_i567) - { - String _elem568; // required - _elem568 = iprot.readString(); - struct.success.add(_elem568); - } - iprot.readListEnd(); - } - struct.setSuccessIsSet(true); - } else { - org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); - } - break; case 1: // O1 if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) { - struct.o1 = new MetaException(); + struct.o1 = new InvalidOperationException(); struct.o1.read(iprot); struct.setO1IsSet(true); } else { @@ -34317,22 +35517,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_table_names_by_ break; case 2: // O2 if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) { - struct.o2 = new InvalidOperationException(); + struct.o2 = new MetaException(); struct.o2.read(iprot); struct.setO2IsSet(true); } else { org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); } break; - case 3: // O3 - if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) { - struct.o3 = new UnknownDBException(); - struct.o3.read(iprot); - struct.setO3IsSet(true); - } else { - org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); - } - break; default: org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); } @@ -34342,22 +35533,10 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_table_names_by_ struct.validate(); } - public void write(org.apache.thrift.protocol.TProtocol oprot, get_table_names_by_filter_result struct) throws org.apache.thrift.TException { + public void write(org.apache.thrift.protocol.TProtocol oprot, alter_table_result struct) throws org.apache.thrift.TException { struct.validate(); oprot.writeStructBegin(STRUCT_DESC); - if (struct.success != null) { - oprot.writeFieldBegin(SUCCESS_FIELD_DESC); - { - oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.success.size())); - for (String _iter569 : struct.success) - { - oprot.writeString(_iter569); - } - oprot.writeListEnd(); - } - oprot.writeFieldEnd(); - } if (struct.o1 != null) { oprot.writeFieldBegin(O1_FIELD_DESC); struct.o1.write(oprot); @@ -34368,121 +35547,83 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_table_names_by struct.o2.write(oprot); oprot.writeFieldEnd(); } - if (struct.o3 != null) { - oprot.writeFieldBegin(O3_FIELD_DESC); - struct.o3.write(oprot); - oprot.writeFieldEnd(); - } oprot.writeFieldStop(); oprot.writeStructEnd(); } } - private static class get_table_names_by_filter_resultTupleSchemeFactory implements SchemeFactory { - public get_table_names_by_filter_resultTupleScheme getScheme() { - return new get_table_names_by_filter_resultTupleScheme(); + private static class alter_table_resultTupleSchemeFactory implements SchemeFactory { + public alter_table_resultTupleScheme getScheme() { + return new alter_table_resultTupleScheme(); } } - private static class get_table_names_by_filter_resultTupleScheme extends TupleScheme { + private static class alter_table_resultTupleScheme extends TupleScheme { @Override - public void write(org.apache.thrift.protocol.TProtocol prot, get_table_names_by_filter_result struct) throws org.apache.thrift.TException { + public void write(org.apache.thrift.protocol.TProtocol prot, alter_table_result struct) throws org.apache.thrift.TException { TTupleProtocol oprot = (TTupleProtocol) prot; BitSet optionals = new BitSet(); - if (struct.isSetSuccess()) { - optionals.set(0); - } if (struct.isSetO1()) { - optionals.set(1); + optionals.set(0); } if (struct.isSetO2()) { - optionals.set(2); - } - if (struct.isSetO3()) { - optionals.set(3); - } - oprot.writeBitSet(optionals, 4); - if (struct.isSetSuccess()) { - { - oprot.writeI32(struct.success.size()); - for (String _iter570 : struct.success) - { - oprot.writeString(_iter570); - } - } + optionals.set(1); } + oprot.writeBitSet(optionals, 2); if (struct.isSetO1()) { struct.o1.write(oprot); } if (struct.isSetO2()) { struct.o2.write(oprot); } - if (struct.isSetO3()) { - struct.o3.write(oprot); - } } @Override - public void read(org.apache.thrift.protocol.TProtocol prot, get_table_names_by_filter_result struct) throws org.apache.thrift.TException { + public void read(org.apache.thrift.protocol.TProtocol prot, alter_table_result struct) throws org.apache.thrift.TException { TTupleProtocol iprot = (TTupleProtocol) prot; - BitSet incoming = iprot.readBitSet(4); + BitSet incoming = iprot.readBitSet(2); if (incoming.get(0)) { - { - org.apache.thrift.protocol.TList _list571 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.success = new ArrayList(_list571.size); - for (int _i572 = 0; _i572 < _list571.size; ++_i572) - { - String _elem573; // required - _elem573 = iprot.readString(); - struct.success.add(_elem573); - } - } - struct.setSuccessIsSet(true); - } - if (incoming.get(1)) { - struct.o1 = new MetaException(); + struct.o1 = new InvalidOperationException(); struct.o1.read(iprot); struct.setO1IsSet(true); } - if (incoming.get(2)) { - struct.o2 = new InvalidOperationException(); + if (incoming.get(1)) { + struct.o2 = new MetaException(); struct.o2.read(iprot); struct.setO2IsSet(true); } - if (incoming.get(3)) { - struct.o3 = new UnknownDBException(); - struct.o3.read(iprot); - struct.setO3IsSet(true); - } } } } - public static class alter_table_args implements org.apache.thrift.TBase, java.io.Serializable, Cloneable { - private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("alter_table_args"); + public static class alter_table_with_environment_context_args implements org.apache.thrift.TBase, java.io.Serializable, Cloneable { + private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("alter_table_with_environment_context_args"); private static final org.apache.thrift.protocol.TField DBNAME_FIELD_DESC = new org.apache.thrift.protocol.TField("dbname", org.apache.thrift.protocol.TType.STRING, (short)1); private static final org.apache.thrift.protocol.TField TBL_NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("tbl_name", org.apache.thrift.protocol.TType.STRING, (short)2); private static final org.apache.thrift.protocol.TField NEW_TBL_FIELD_DESC = new org.apache.thrift.protocol.TField("new_tbl", org.apache.thrift.protocol.TType.STRUCT, (short)3); + private static final org.apache.thrift.protocol.TField ENVIRONMENT_CONTEXT_FIELD_DESC = new org.apache.thrift.protocol.TField("environment_context", org.apache.thrift.protocol.TType.STRUCT, (short)4); private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); static { - schemes.put(StandardScheme.class, new alter_table_argsStandardSchemeFactory()); - schemes.put(TupleScheme.class, new alter_table_argsTupleSchemeFactory()); + schemes.put(StandardScheme.class, new alter_table_with_environment_context_argsStandardSchemeFactory()); + schemes.put(TupleScheme.class, new alter_table_with_environment_context_argsTupleSchemeFactory()); } private String dbname; // required private String tbl_name; // required private Table new_tbl; // required + private EnvironmentContext environment_context; // required /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ public enum _Fields implements org.apache.thrift.TFieldIdEnum { DBNAME((short)1, "dbname"), TBL_NAME((short)2, "tbl_name"), - NEW_TBL((short)3, "new_tbl"); + NEW_TBL((short)3, "new_tbl"), + ENVIRONMENT_CONTEXT((short)4, "environment_context"); private static final Map byName = new HashMap(); @@ -34503,6 +35644,8 @@ public static _Fields findByThriftId(int fieldId) { return TBL_NAME; case 3: // NEW_TBL return NEW_TBL; + case 4: // ENVIRONMENT_CONTEXT + return ENVIRONMENT_CONTEXT; default: return null; } @@ -34552,28 +35695,32 @@ public String getFieldName() { new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING))); tmpMap.put(_Fields.NEW_TBL, new org.apache.thrift.meta_data.FieldMetaData("new_tbl", org.apache.thrift.TFieldRequirementType.DEFAULT, new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, Table.class))); + tmpMap.put(_Fields.ENVIRONMENT_CONTEXT, new org.apache.thrift.meta_data.FieldMetaData("environment_context", org.apache.thrift.TFieldRequirementType.DEFAULT, + new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, EnvironmentContext.class))); metaDataMap = Collections.unmodifiableMap(tmpMap); - org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(alter_table_args.class, metaDataMap); + org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(alter_table_with_environment_context_args.class, metaDataMap); } - public alter_table_args() { + public alter_table_with_environment_context_args() { } - public alter_table_args( + public alter_table_with_environment_context_args( String dbname, String tbl_name, - Table new_tbl) + Table new_tbl, + EnvironmentContext environment_context) { this(); this.dbname = dbname; this.tbl_name = tbl_name; this.new_tbl = new_tbl; + this.environment_context = environment_context; } /** * Performs a deep copy on other. */ - public alter_table_args(alter_table_args other) { + public alter_table_with_environment_context_args(alter_table_with_environment_context_args other) { if (other.isSetDbname()) { this.dbname = other.dbname; } @@ -34583,10 +35730,13 @@ public alter_table_args(alter_table_args other) { if (other.isSetNew_tbl()) { this.new_tbl = new Table(other.new_tbl); } + if (other.isSetEnvironment_context()) { + this.environment_context = new EnvironmentContext(other.environment_context); + } } - public alter_table_args deepCopy() { - return new alter_table_args(this); + public alter_table_with_environment_context_args deepCopy() { + return new alter_table_with_environment_context_args(this); } @Override @@ -34594,6 +35744,7 @@ public void clear() { this.dbname = null; this.tbl_name = null; this.new_tbl = null; + this.environment_context = null; } public String getDbname() { @@ -34665,6 +35816,29 @@ public void setNew_tblIsSet(boolean value) { } } + public EnvironmentContext getEnvironment_context() { + return this.environment_context; + } + + public void setEnvironment_context(EnvironmentContext environment_context) { + this.environment_context = environment_context; + } + + public void unsetEnvironment_context() { + this.environment_context = null; + } + + /** Returns true if field environment_context is set (has been assigned a value) and false otherwise */ + public boolean isSetEnvironment_context() { + return this.environment_context != null; + } + + public void setEnvironment_contextIsSet(boolean value) { + if (!value) { + this.environment_context = null; + } + } + public void setFieldValue(_Fields field, Object value) { switch (field) { case DBNAME: @@ -34691,6 +35865,14 @@ public void setFieldValue(_Fields field, Object value) { } break; + case ENVIRONMENT_CONTEXT: + if (value == null) { + unsetEnvironment_context(); + } else { + setEnvironment_context((EnvironmentContext)value); + } + break; + } } @@ -34705,6 +35887,9 @@ public Object getFieldValue(_Fields field) { case NEW_TBL: return getNew_tbl(); + case ENVIRONMENT_CONTEXT: + return getEnvironment_context(); + } throw new IllegalStateException(); } @@ -34722,6 +35907,8 @@ public boolean isSet(_Fields field) { return isSetTbl_name(); case NEW_TBL: return isSetNew_tbl(); + case ENVIRONMENT_CONTEXT: + return isSetEnvironment_context(); } throw new IllegalStateException(); } @@ -34730,12 +35917,12 @@ public boolean isSet(_Fields field) { public boolean equals(Object that) { if (that == null) return false; - if (that instanceof alter_table_args) - return this.equals((alter_table_args)that); + if (that instanceof alter_table_with_environment_context_args) + return this.equals((alter_table_with_environment_context_args)that); return false; } - public boolean equals(alter_table_args that) { + public boolean equals(alter_table_with_environment_context_args that) { if (that == null) return false; @@ -34766,6 +35953,15 @@ public boolean equals(alter_table_args that) { return false; } + boolean this_present_environment_context = true && this.isSetEnvironment_context(); + boolean that_present_environment_context = true && that.isSetEnvironment_context(); + if (this_present_environment_context || that_present_environment_context) { + if (!(this_present_environment_context && that_present_environment_context)) + return false; + if (!this.environment_context.equals(that.environment_context)) + return false; + } + return true; } @@ -34788,16 +35984,21 @@ public int hashCode() { if (present_new_tbl) builder.append(new_tbl); + boolean present_environment_context = true && (isSetEnvironment_context()); + builder.append(present_environment_context); + if (present_environment_context) + builder.append(environment_context); + return builder.toHashCode(); } - public int compareTo(alter_table_args other) { + public int compareTo(alter_table_with_environment_context_args other) { if (!getClass().equals(other.getClass())) { return getClass().getName().compareTo(other.getClass().getName()); } int lastComparison = 0; - alter_table_args typedOther = (alter_table_args)other; + alter_table_with_environment_context_args typedOther = (alter_table_with_environment_context_args)other; lastComparison = Boolean.valueOf(isSetDbname()).compareTo(typedOther.isSetDbname()); if (lastComparison != 0) { @@ -34829,6 +36030,16 @@ public int compareTo(alter_table_args other) { return lastComparison; } } + lastComparison = Boolean.valueOf(isSetEnvironment_context()).compareTo(typedOther.isSetEnvironment_context()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetEnvironment_context()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.environment_context, typedOther.environment_context); + if (lastComparison != 0) { + return lastComparison; + } + } return 0; } @@ -34846,7 +36057,7 @@ public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache. @Override public String toString() { - StringBuilder sb = new StringBuilder("alter_table_args("); + StringBuilder sb = new StringBuilder("alter_table_with_environment_context_args("); boolean first = true; sb.append("dbname:"); @@ -34872,6 +36083,14 @@ public String toString() { sb.append(this.new_tbl); } first = false; + if (!first) sb.append(", "); + sb.append("environment_context:"); + if (this.environment_context == null) { + sb.append("null"); + } else { + sb.append(this.environment_context); + } + first = false; sb.append(")"); return sb.toString(); } @@ -34882,6 +36101,9 @@ public void validate() throws org.apache.thrift.TException { if (new_tbl != null) { new_tbl.validate(); } + if (environment_context != null) { + environment_context.validate(); + } } private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException { @@ -34900,15 +36122,15 @@ private void readObject(java.io.ObjectInputStream in) throws java.io.IOException } } - private static class alter_table_argsStandardSchemeFactory implements SchemeFactory { - public alter_table_argsStandardScheme getScheme() { - return new alter_table_argsStandardScheme(); + private static class alter_table_with_environment_context_argsStandardSchemeFactory implements SchemeFactory { + public alter_table_with_environment_context_argsStandardScheme getScheme() { + return new alter_table_with_environment_context_argsStandardScheme(); } } - private static class alter_table_argsStandardScheme extends StandardScheme { + private static class alter_table_with_environment_context_argsStandardScheme extends StandardScheme { - public void read(org.apache.thrift.protocol.TProtocol iprot, alter_table_args struct) throws org.apache.thrift.TException { + public void read(org.apache.thrift.protocol.TProtocol iprot, alter_table_with_environment_context_args struct) throws org.apache.thrift.TException { org.apache.thrift.protocol.TField schemeField; iprot.readStructBegin(); while (true) @@ -34943,6 +36165,15 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, alter_table_args st org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); } break; + case 4: // ENVIRONMENT_CONTEXT + if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) { + struct.environment_context = new EnvironmentContext(); + struct.environment_context.read(iprot); + struct.setEnvironment_contextIsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + break; default: org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); } @@ -34952,7 +36183,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, alter_table_args st struct.validate(); } - public void write(org.apache.thrift.protocol.TProtocol oprot, alter_table_args struct) throws org.apache.thrift.TException { + public void write(org.apache.thrift.protocol.TProtocol oprot, alter_table_with_environment_context_args struct) throws org.apache.thrift.TException { struct.validate(); oprot.writeStructBegin(STRUCT_DESC); @@ -34971,22 +36202,27 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, alter_table_args s struct.new_tbl.write(oprot); oprot.writeFieldEnd(); } + if (struct.environment_context != null) { + oprot.writeFieldBegin(ENVIRONMENT_CONTEXT_FIELD_DESC); + struct.environment_context.write(oprot); + oprot.writeFieldEnd(); + } oprot.writeFieldStop(); oprot.writeStructEnd(); } } - private static class alter_table_argsTupleSchemeFactory implements SchemeFactory { - public alter_table_argsTupleScheme getScheme() { - return new alter_table_argsTupleScheme(); + private static class alter_table_with_environment_context_argsTupleSchemeFactory implements SchemeFactory { + public alter_table_with_environment_context_argsTupleScheme getScheme() { + return new alter_table_with_environment_context_argsTupleScheme(); } } - private static class alter_table_argsTupleScheme extends TupleScheme { + private static class alter_table_with_environment_context_argsTupleScheme extends TupleScheme { @Override - public void write(org.apache.thrift.protocol.TProtocol prot, alter_table_args struct) throws org.apache.thrift.TException { + public void write(org.apache.thrift.protocol.TProtocol prot, alter_table_with_environment_context_args struct) throws org.apache.thrift.TException { TTupleProtocol oprot = (TTupleProtocol) prot; BitSet optionals = new BitSet(); if (struct.isSetDbname()) { @@ -34998,7 +36234,10 @@ public void write(org.apache.thrift.protocol.TProtocol prot, alter_table_args st if (struct.isSetNew_tbl()) { optionals.set(2); } - oprot.writeBitSet(optionals, 3); + if (struct.isSetEnvironment_context()) { + optionals.set(3); + } + oprot.writeBitSet(optionals, 4); if (struct.isSetDbname()) { oprot.writeString(struct.dbname); } @@ -35008,12 +36247,15 @@ public void write(org.apache.thrift.protocol.TProtocol prot, alter_table_args st if (struct.isSetNew_tbl()) { struct.new_tbl.write(oprot); } + if (struct.isSetEnvironment_context()) { + struct.environment_context.write(oprot); + } } @Override - public void read(org.apache.thrift.protocol.TProtocol prot, alter_table_args struct) throws org.apache.thrift.TException { + public void read(org.apache.thrift.protocol.TProtocol prot, alter_table_with_environment_context_args struct) throws org.apache.thrift.TException { TTupleProtocol iprot = (TTupleProtocol) prot; - BitSet incoming = iprot.readBitSet(3); + BitSet incoming = iprot.readBitSet(4); if (incoming.get(0)) { struct.dbname = iprot.readString(); struct.setDbnameIsSet(true); @@ -35027,21 +36269,26 @@ public void read(org.apache.thrift.protocol.TProtocol prot, alter_table_args str struct.new_tbl.read(iprot); struct.setNew_tblIsSet(true); } + if (incoming.get(3)) { + struct.environment_context = new EnvironmentContext(); + struct.environment_context.read(iprot); + struct.setEnvironment_contextIsSet(true); + } } } } - public static class alter_table_result implements org.apache.thrift.TBase, java.io.Serializable, Cloneable { - private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("alter_table_result"); + public static class alter_table_with_environment_context_result implements org.apache.thrift.TBase, java.io.Serializable, Cloneable { + private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("alter_table_with_environment_context_result"); private static final org.apache.thrift.protocol.TField O1_FIELD_DESC = new org.apache.thrift.protocol.TField("o1", org.apache.thrift.protocol.TType.STRUCT, (short)1); private static final org.apache.thrift.protocol.TField O2_FIELD_DESC = new org.apache.thrift.protocol.TField("o2", org.apache.thrift.protocol.TType.STRUCT, (short)2); private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); static { - schemes.put(StandardScheme.class, new alter_table_resultStandardSchemeFactory()); - schemes.put(TupleScheme.class, new alter_table_resultTupleSchemeFactory()); + schemes.put(StandardScheme.class, new alter_table_with_environment_context_resultStandardSchemeFactory()); + schemes.put(TupleScheme.class, new alter_table_with_environment_context_resultTupleSchemeFactory()); } private InvalidOperationException o1; // required @@ -35117,13 +36364,13 @@ public String getFieldName() { tmpMap.put(_Fields.O2, new org.apache.thrift.meta_data.FieldMetaData("o2", org.apache.thrift.TFieldRequirementType.DEFAULT, new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRUCT))); metaDataMap = Collections.unmodifiableMap(tmpMap); - org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(alter_table_result.class, metaDataMap); + org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(alter_table_with_environment_context_result.class, metaDataMap); } - public alter_table_result() { + public alter_table_with_environment_context_result() { } - public alter_table_result( + public alter_table_with_environment_context_result( InvalidOperationException o1, MetaException o2) { @@ -35135,7 +36382,7 @@ public alter_table_result( /** * Performs a deep copy on other. */ - public alter_table_result(alter_table_result other) { + public alter_table_with_environment_context_result(alter_table_with_environment_context_result other) { if (other.isSetO1()) { this.o1 = new InvalidOperationException(other.o1); } @@ -35144,8 +36391,8 @@ public alter_table_result(alter_table_result other) { } } - public alter_table_result deepCopy() { - return new alter_table_result(this); + public alter_table_with_environment_context_result deepCopy() { + return new alter_table_with_environment_context_result(this); } @Override @@ -35252,12 +36499,12 @@ public boolean isSet(_Fields field) { public boolean equals(Object that) { if (that == null) return false; - if (that instanceof alter_table_result) - return this.equals((alter_table_result)that); + if (that instanceof alter_table_with_environment_context_result) + return this.equals((alter_table_with_environment_context_result)that); return false; } - public boolean equals(alter_table_result that) { + public boolean equals(alter_table_with_environment_context_result that) { if (that == null) return false; @@ -35299,13 +36546,13 @@ public int hashCode() { return builder.toHashCode(); } - public int compareTo(alter_table_result other) { + public int compareTo(alter_table_with_environment_context_result other) { if (!getClass().equals(other.getClass())) { return getClass().getName().compareTo(other.getClass().getName()); } int lastComparison = 0; - alter_table_result typedOther = (alter_table_result)other; + alter_table_with_environment_context_result typedOther = (alter_table_with_environment_context_result)other; lastComparison = Boolean.valueOf(isSetO1()).compareTo(typedOther.isSetO1()); if (lastComparison != 0) { @@ -35344,7 +36591,7 @@ public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache. @Override public String toString() { - StringBuilder sb = new StringBuilder("alter_table_result("); + StringBuilder sb = new StringBuilder("alter_table_with_environment_context_result("); boolean first = true; sb.append("o1:"); @@ -35387,15 +36634,15 @@ private void readObject(java.io.ObjectInputStream in) throws java.io.IOException } } - private static class alter_table_resultStandardSchemeFactory implements SchemeFactory { - public alter_table_resultStandardScheme getScheme() { - return new alter_table_resultStandardScheme(); + private static class alter_table_with_environment_context_resultStandardSchemeFactory implements SchemeFactory { + public alter_table_with_environment_context_resultStandardScheme getScheme() { + return new alter_table_with_environment_context_resultStandardScheme(); } } - private static class alter_table_resultStandardScheme extends StandardScheme { + private static class alter_table_with_environment_context_resultStandardScheme extends StandardScheme { - public void read(org.apache.thrift.protocol.TProtocol iprot, alter_table_result struct) throws org.apache.thrift.TException { + public void read(org.apache.thrift.protocol.TProtocol iprot, alter_table_with_environment_context_result struct) throws org.apache.thrift.TException { org.apache.thrift.protocol.TField schemeField; iprot.readStructBegin(); while (true) @@ -35432,7 +36679,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, alter_table_result struct.validate(); } - public void write(org.apache.thrift.protocol.TProtocol oprot, alter_table_result struct) throws org.apache.thrift.TException { + public void write(org.apache.thrift.protocol.TProtocol oprot, alter_table_with_environment_context_result struct) throws org.apache.thrift.TException { struct.validate(); oprot.writeStructBegin(STRUCT_DESC); @@ -35452,16 +36699,16 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, alter_table_result } - private static class alter_table_resultTupleSchemeFactory implements SchemeFactory { - public alter_table_resultTupleScheme getScheme() { - return new alter_table_resultTupleScheme(); + private static class alter_table_with_environment_context_resultTupleSchemeFactory implements SchemeFactory { + public alter_table_with_environment_context_resultTupleScheme getScheme() { + return new alter_table_with_environment_context_resultTupleScheme(); } } - private static class alter_table_resultTupleScheme extends TupleScheme { + private static class alter_table_with_environment_context_resultTupleScheme extends TupleScheme { @Override - public void write(org.apache.thrift.protocol.TProtocol prot, alter_table_result struct) throws org.apache.thrift.TException { + public void write(org.apache.thrift.protocol.TProtocol prot, alter_table_with_environment_context_result struct) throws org.apache.thrift.TException { TTupleProtocol oprot = (TTupleProtocol) prot; BitSet optionals = new BitSet(); if (struct.isSetO1()) { @@ -35480,7 +36727,7 @@ public void write(org.apache.thrift.protocol.TProtocol prot, alter_table_result } @Override - public void read(org.apache.thrift.protocol.TProtocol prot, alter_table_result struct) throws org.apache.thrift.TException { + public void read(org.apache.thrift.protocol.TProtocol prot, alter_table_with_environment_context_result struct) throws org.apache.thrift.TException { TTupleProtocol iprot = (TTupleProtocol) prot; BitSet incoming = iprot.readBitSet(2); if (incoming.get(0)) { @@ -35498,31 +36745,31 @@ public void read(org.apache.thrift.protocol.TProtocol prot, alter_table_result s } - public static class alter_table_with_environment_context_args implements org.apache.thrift.TBase, java.io.Serializable, Cloneable { - private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("alter_table_with_environment_context_args"); + public static class alter_table_with_cascade_args implements org.apache.thrift.TBase, java.io.Serializable, Cloneable { + private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("alter_table_with_cascade_args"); private static final org.apache.thrift.protocol.TField DBNAME_FIELD_DESC = new org.apache.thrift.protocol.TField("dbname", org.apache.thrift.protocol.TType.STRING, (short)1); private static final org.apache.thrift.protocol.TField TBL_NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("tbl_name", org.apache.thrift.protocol.TType.STRING, (short)2); private static final org.apache.thrift.protocol.TField NEW_TBL_FIELD_DESC = new org.apache.thrift.protocol.TField("new_tbl", org.apache.thrift.protocol.TType.STRUCT, (short)3); - private static final org.apache.thrift.protocol.TField ENVIRONMENT_CONTEXT_FIELD_DESC = new org.apache.thrift.protocol.TField("environment_context", org.apache.thrift.protocol.TType.STRUCT, (short)4); + private static final org.apache.thrift.protocol.TField CASCADE_FIELD_DESC = new org.apache.thrift.protocol.TField("cascade", org.apache.thrift.protocol.TType.BOOL, (short)4); private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); static { - schemes.put(StandardScheme.class, new alter_table_with_environment_context_argsStandardSchemeFactory()); - schemes.put(TupleScheme.class, new alter_table_with_environment_context_argsTupleSchemeFactory()); + schemes.put(StandardScheme.class, new alter_table_with_cascade_argsStandardSchemeFactory()); + schemes.put(TupleScheme.class, new alter_table_with_cascade_argsTupleSchemeFactory()); } private String dbname; // required private String tbl_name; // required private Table new_tbl; // required - private EnvironmentContext environment_context; // required + private boolean cascade; // required /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ public enum _Fields implements org.apache.thrift.TFieldIdEnum { DBNAME((short)1, "dbname"), TBL_NAME((short)2, "tbl_name"), NEW_TBL((short)3, "new_tbl"), - ENVIRONMENT_CONTEXT((short)4, "environment_context"); + CASCADE((short)4, "cascade"); private static final Map byName = new HashMap(); @@ -35543,8 +36790,8 @@ public static _Fields findByThriftId(int fieldId) { return TBL_NAME; case 3: // NEW_TBL return NEW_TBL; - case 4: // ENVIRONMENT_CONTEXT - return ENVIRONMENT_CONTEXT; + case 4: // CASCADE + return CASCADE; default: return null; } @@ -35585,6 +36832,8 @@ public String getFieldName() { } // isset id assignments + private static final int __CASCADE_ISSET_ID = 0; + private byte __isset_bitfield = 0; public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; static { Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); @@ -35594,32 +36843,34 @@ public String getFieldName() { new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING))); tmpMap.put(_Fields.NEW_TBL, new org.apache.thrift.meta_data.FieldMetaData("new_tbl", org.apache.thrift.TFieldRequirementType.DEFAULT, new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, Table.class))); - tmpMap.put(_Fields.ENVIRONMENT_CONTEXT, new org.apache.thrift.meta_data.FieldMetaData("environment_context", org.apache.thrift.TFieldRequirementType.DEFAULT, - new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, EnvironmentContext.class))); + tmpMap.put(_Fields.CASCADE, new org.apache.thrift.meta_data.FieldMetaData("cascade", org.apache.thrift.TFieldRequirementType.DEFAULT, + new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.BOOL))); metaDataMap = Collections.unmodifiableMap(tmpMap); - org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(alter_table_with_environment_context_args.class, metaDataMap); + org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(alter_table_with_cascade_args.class, metaDataMap); } - public alter_table_with_environment_context_args() { + public alter_table_with_cascade_args() { } - public alter_table_with_environment_context_args( + public alter_table_with_cascade_args( String dbname, String tbl_name, Table new_tbl, - EnvironmentContext environment_context) + boolean cascade) { this(); this.dbname = dbname; this.tbl_name = tbl_name; this.new_tbl = new_tbl; - this.environment_context = environment_context; + this.cascade = cascade; + setCascadeIsSet(true); } /** * Performs a deep copy on other. */ - public alter_table_with_environment_context_args(alter_table_with_environment_context_args other) { + public alter_table_with_cascade_args(alter_table_with_cascade_args other) { + __isset_bitfield = other.__isset_bitfield; if (other.isSetDbname()) { this.dbname = other.dbname; } @@ -35629,13 +36880,11 @@ public alter_table_with_environment_context_args(alter_table_with_environment_co if (other.isSetNew_tbl()) { this.new_tbl = new Table(other.new_tbl); } - if (other.isSetEnvironment_context()) { - this.environment_context = new EnvironmentContext(other.environment_context); - } + this.cascade = other.cascade; } - public alter_table_with_environment_context_args deepCopy() { - return new alter_table_with_environment_context_args(this); + public alter_table_with_cascade_args deepCopy() { + return new alter_table_with_cascade_args(this); } @Override @@ -35643,7 +36892,8 @@ public void clear() { this.dbname = null; this.tbl_name = null; this.new_tbl = null; - this.environment_context = null; + setCascadeIsSet(false); + this.cascade = false; } public String getDbname() { @@ -35715,27 +36965,26 @@ public void setNew_tblIsSet(boolean value) { } } - public EnvironmentContext getEnvironment_context() { - return this.environment_context; + public boolean isCascade() { + return this.cascade; } - public void setEnvironment_context(EnvironmentContext environment_context) { - this.environment_context = environment_context; + public void setCascade(boolean cascade) { + this.cascade = cascade; + setCascadeIsSet(true); } - public void unsetEnvironment_context() { - this.environment_context = null; + public void unsetCascade() { + __isset_bitfield = EncodingUtils.clearBit(__isset_bitfield, __CASCADE_ISSET_ID); } - /** Returns true if field environment_context is set (has been assigned a value) and false otherwise */ - public boolean isSetEnvironment_context() { - return this.environment_context != null; + /** Returns true if field cascade is set (has been assigned a value) and false otherwise */ + public boolean isSetCascade() { + return EncodingUtils.testBit(__isset_bitfield, __CASCADE_ISSET_ID); } - public void setEnvironment_contextIsSet(boolean value) { - if (!value) { - this.environment_context = null; - } + public void setCascadeIsSet(boolean value) { + __isset_bitfield = EncodingUtils.setBit(__isset_bitfield, __CASCADE_ISSET_ID, value); } public void setFieldValue(_Fields field, Object value) { @@ -35764,11 +37013,11 @@ public void setFieldValue(_Fields field, Object value) { } break; - case ENVIRONMENT_CONTEXT: + case CASCADE: if (value == null) { - unsetEnvironment_context(); + unsetCascade(); } else { - setEnvironment_context((EnvironmentContext)value); + setCascade((Boolean)value); } break; @@ -35786,8 +37035,8 @@ public Object getFieldValue(_Fields field) { case NEW_TBL: return getNew_tbl(); - case ENVIRONMENT_CONTEXT: - return getEnvironment_context(); + case CASCADE: + return Boolean.valueOf(isCascade()); } throw new IllegalStateException(); @@ -35806,8 +37055,8 @@ public boolean isSet(_Fields field) { return isSetTbl_name(); case NEW_TBL: return isSetNew_tbl(); - case ENVIRONMENT_CONTEXT: - return isSetEnvironment_context(); + case CASCADE: + return isSetCascade(); } throw new IllegalStateException(); } @@ -35816,12 +37065,12 @@ public boolean isSet(_Fields field) { public boolean equals(Object that) { if (that == null) return false; - if (that instanceof alter_table_with_environment_context_args) - return this.equals((alter_table_with_environment_context_args)that); + if (that instanceof alter_table_with_cascade_args) + return this.equals((alter_table_with_cascade_args)that); return false; } - public boolean equals(alter_table_with_environment_context_args that) { + public boolean equals(alter_table_with_cascade_args that) { if (that == null) return false; @@ -35852,12 +37101,12 @@ public boolean equals(alter_table_with_environment_context_args that) { return false; } - boolean this_present_environment_context = true && this.isSetEnvironment_context(); - boolean that_present_environment_context = true && that.isSetEnvironment_context(); - if (this_present_environment_context || that_present_environment_context) { - if (!(this_present_environment_context && that_present_environment_context)) + boolean this_present_cascade = true; + boolean that_present_cascade = true; + if (this_present_cascade || that_present_cascade) { + if (!(this_present_cascade && that_present_cascade)) return false; - if (!this.environment_context.equals(that.environment_context)) + if (this.cascade != that.cascade) return false; } @@ -35883,21 +37132,21 @@ public int hashCode() { if (present_new_tbl) builder.append(new_tbl); - boolean present_environment_context = true && (isSetEnvironment_context()); - builder.append(present_environment_context); - if (present_environment_context) - builder.append(environment_context); + boolean present_cascade = true; + builder.append(present_cascade); + if (present_cascade) + builder.append(cascade); return builder.toHashCode(); } - public int compareTo(alter_table_with_environment_context_args other) { + public int compareTo(alter_table_with_cascade_args other) { if (!getClass().equals(other.getClass())) { return getClass().getName().compareTo(other.getClass().getName()); } int lastComparison = 0; - alter_table_with_environment_context_args typedOther = (alter_table_with_environment_context_args)other; + alter_table_with_cascade_args typedOther = (alter_table_with_cascade_args)other; lastComparison = Boolean.valueOf(isSetDbname()).compareTo(typedOther.isSetDbname()); if (lastComparison != 0) { @@ -35929,12 +37178,12 @@ public int compareTo(alter_table_with_environment_context_args other) { return lastComparison; } } - lastComparison = Boolean.valueOf(isSetEnvironment_context()).compareTo(typedOther.isSetEnvironment_context()); + lastComparison = Boolean.valueOf(isSetCascade()).compareTo(typedOther.isSetCascade()); if (lastComparison != 0) { return lastComparison; } - if (isSetEnvironment_context()) { - lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.environment_context, typedOther.environment_context); + if (isSetCascade()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.cascade, typedOther.cascade); if (lastComparison != 0) { return lastComparison; } @@ -35956,7 +37205,7 @@ public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache. @Override public String toString() { - StringBuilder sb = new StringBuilder("alter_table_with_environment_context_args("); + StringBuilder sb = new StringBuilder("alter_table_with_cascade_args("); boolean first = true; sb.append("dbname:"); @@ -35983,12 +37232,8 @@ public String toString() { } first = false; if (!first) sb.append(", "); - sb.append("environment_context:"); - if (this.environment_context == null) { - sb.append("null"); - } else { - sb.append(this.environment_context); - } + sb.append("cascade:"); + sb.append(this.cascade); first = false; sb.append(")"); return sb.toString(); @@ -36000,9 +37245,6 @@ public void validate() throws org.apache.thrift.TException { if (new_tbl != null) { new_tbl.validate(); } - if (environment_context != null) { - environment_context.validate(); - } } private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException { @@ -36015,21 +37257,23 @@ private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOExcept private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException { try { + // it doesn't seem like you should have to do this, but java serialization is wacky, and doesn't call the default constructor. + __isset_bitfield = 0; read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in))); } catch (org.apache.thrift.TException te) { throw new java.io.IOException(te); } } - private static class alter_table_with_environment_context_argsStandardSchemeFactory implements SchemeFactory { - public alter_table_with_environment_context_argsStandardScheme getScheme() { - return new alter_table_with_environment_context_argsStandardScheme(); + private static class alter_table_with_cascade_argsStandardSchemeFactory implements SchemeFactory { + public alter_table_with_cascade_argsStandardScheme getScheme() { + return new alter_table_with_cascade_argsStandardScheme(); } } - private static class alter_table_with_environment_context_argsStandardScheme extends StandardScheme { + private static class alter_table_with_cascade_argsStandardScheme extends StandardScheme { - public void read(org.apache.thrift.protocol.TProtocol iprot, alter_table_with_environment_context_args struct) throws org.apache.thrift.TException { + public void read(org.apache.thrift.protocol.TProtocol iprot, alter_table_with_cascade_args struct) throws org.apache.thrift.TException { org.apache.thrift.protocol.TField schemeField; iprot.readStructBegin(); while (true) @@ -36064,11 +37308,10 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, alter_table_with_en org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); } break; - case 4: // ENVIRONMENT_CONTEXT - if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) { - struct.environment_context = new EnvironmentContext(); - struct.environment_context.read(iprot); - struct.setEnvironment_contextIsSet(true); + case 4: // CASCADE + if (schemeField.type == org.apache.thrift.protocol.TType.BOOL) { + struct.cascade = iprot.readBool(); + struct.setCascadeIsSet(true); } else { org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); } @@ -36082,7 +37325,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, alter_table_with_en struct.validate(); } - public void write(org.apache.thrift.protocol.TProtocol oprot, alter_table_with_environment_context_args struct) throws org.apache.thrift.TException { + public void write(org.apache.thrift.protocol.TProtocol oprot, alter_table_with_cascade_args struct) throws org.apache.thrift.TException { struct.validate(); oprot.writeStructBegin(STRUCT_DESC); @@ -36101,27 +37344,25 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, alter_table_with_e struct.new_tbl.write(oprot); oprot.writeFieldEnd(); } - if (struct.environment_context != null) { - oprot.writeFieldBegin(ENVIRONMENT_CONTEXT_FIELD_DESC); - struct.environment_context.write(oprot); - oprot.writeFieldEnd(); - } + oprot.writeFieldBegin(CASCADE_FIELD_DESC); + oprot.writeBool(struct.cascade); + oprot.writeFieldEnd(); oprot.writeFieldStop(); oprot.writeStructEnd(); } } - private static class alter_table_with_environment_context_argsTupleSchemeFactory implements SchemeFactory { - public alter_table_with_environment_context_argsTupleScheme getScheme() { - return new alter_table_with_environment_context_argsTupleScheme(); + private static class alter_table_with_cascade_argsTupleSchemeFactory implements SchemeFactory { + public alter_table_with_cascade_argsTupleScheme getScheme() { + return new alter_table_with_cascade_argsTupleScheme(); } } - private static class alter_table_with_environment_context_argsTupleScheme extends TupleScheme { + private static class alter_table_with_cascade_argsTupleScheme extends TupleScheme { @Override - public void write(org.apache.thrift.protocol.TProtocol prot, alter_table_with_environment_context_args struct) throws org.apache.thrift.TException { + public void write(org.apache.thrift.protocol.TProtocol prot, alter_table_with_cascade_args struct) throws org.apache.thrift.TException { TTupleProtocol oprot = (TTupleProtocol) prot; BitSet optionals = new BitSet(); if (struct.isSetDbname()) { @@ -36133,7 +37374,7 @@ public void write(org.apache.thrift.protocol.TProtocol prot, alter_table_with_en if (struct.isSetNew_tbl()) { optionals.set(2); } - if (struct.isSetEnvironment_context()) { + if (struct.isSetCascade()) { optionals.set(3); } oprot.writeBitSet(optionals, 4); @@ -36146,13 +37387,13 @@ public void write(org.apache.thrift.protocol.TProtocol prot, alter_table_with_en if (struct.isSetNew_tbl()) { struct.new_tbl.write(oprot); } - if (struct.isSetEnvironment_context()) { - struct.environment_context.write(oprot); + if (struct.isSetCascade()) { + oprot.writeBool(struct.cascade); } } @Override - public void read(org.apache.thrift.protocol.TProtocol prot, alter_table_with_environment_context_args struct) throws org.apache.thrift.TException { + public void read(org.apache.thrift.protocol.TProtocol prot, alter_table_with_cascade_args struct) throws org.apache.thrift.TException { TTupleProtocol iprot = (TTupleProtocol) prot; BitSet incoming = iprot.readBitSet(4); if (incoming.get(0)) { @@ -36169,25 +37410,24 @@ public void read(org.apache.thrift.protocol.TProtocol prot, alter_table_with_env struct.setNew_tblIsSet(true); } if (incoming.get(3)) { - struct.environment_context = new EnvironmentContext(); - struct.environment_context.read(iprot); - struct.setEnvironment_contextIsSet(true); + struct.cascade = iprot.readBool(); + struct.setCascadeIsSet(true); } } } } - public static class alter_table_with_environment_context_result implements org.apache.thrift.TBase, java.io.Serializable, Cloneable { - private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("alter_table_with_environment_context_result"); + public static class alter_table_with_cascade_result implements org.apache.thrift.TBase, java.io.Serializable, Cloneable { + private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("alter_table_with_cascade_result"); private static final org.apache.thrift.protocol.TField O1_FIELD_DESC = new org.apache.thrift.protocol.TField("o1", org.apache.thrift.protocol.TType.STRUCT, (short)1); private static final org.apache.thrift.protocol.TField O2_FIELD_DESC = new org.apache.thrift.protocol.TField("o2", org.apache.thrift.protocol.TType.STRUCT, (short)2); private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); static { - schemes.put(StandardScheme.class, new alter_table_with_environment_context_resultStandardSchemeFactory()); - schemes.put(TupleScheme.class, new alter_table_with_environment_context_resultTupleSchemeFactory()); + schemes.put(StandardScheme.class, new alter_table_with_cascade_resultStandardSchemeFactory()); + schemes.put(TupleScheme.class, new alter_table_with_cascade_resultTupleSchemeFactory()); } private InvalidOperationException o1; // required @@ -36263,13 +37503,13 @@ public String getFieldName() { tmpMap.put(_Fields.O2, new org.apache.thrift.meta_data.FieldMetaData("o2", org.apache.thrift.TFieldRequirementType.DEFAULT, new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRUCT))); metaDataMap = Collections.unmodifiableMap(tmpMap); - org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(alter_table_with_environment_context_result.class, metaDataMap); + org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(alter_table_with_cascade_result.class, metaDataMap); } - public alter_table_with_environment_context_result() { + public alter_table_with_cascade_result() { } - public alter_table_with_environment_context_result( + public alter_table_with_cascade_result( InvalidOperationException o1, MetaException o2) { @@ -36281,7 +37521,7 @@ public alter_table_with_environment_context_result( /** * Performs a deep copy on other. */ - public alter_table_with_environment_context_result(alter_table_with_environment_context_result other) { + public alter_table_with_cascade_result(alter_table_with_cascade_result other) { if (other.isSetO1()) { this.o1 = new InvalidOperationException(other.o1); } @@ -36290,8 +37530,8 @@ public alter_table_with_environment_context_result(alter_table_with_environment_ } } - public alter_table_with_environment_context_result deepCopy() { - return new alter_table_with_environment_context_result(this); + public alter_table_with_cascade_result deepCopy() { + return new alter_table_with_cascade_result(this); } @Override @@ -36398,12 +37638,12 @@ public boolean isSet(_Fields field) { public boolean equals(Object that) { if (that == null) return false; - if (that instanceof alter_table_with_environment_context_result) - return this.equals((alter_table_with_environment_context_result)that); + if (that instanceof alter_table_with_cascade_result) + return this.equals((alter_table_with_cascade_result)that); return false; } - public boolean equals(alter_table_with_environment_context_result that) { + public boolean equals(alter_table_with_cascade_result that) { if (that == null) return false; @@ -36445,13 +37685,13 @@ public int hashCode() { return builder.toHashCode(); } - public int compareTo(alter_table_with_environment_context_result other) { + public int compareTo(alter_table_with_cascade_result other) { if (!getClass().equals(other.getClass())) { return getClass().getName().compareTo(other.getClass().getName()); } int lastComparison = 0; - alter_table_with_environment_context_result typedOther = (alter_table_with_environment_context_result)other; + alter_table_with_cascade_result typedOther = (alter_table_with_cascade_result)other; lastComparison = Boolean.valueOf(isSetO1()).compareTo(typedOther.isSetO1()); if (lastComparison != 0) { @@ -36490,7 +37730,7 @@ public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache. @Override public String toString() { - StringBuilder sb = new StringBuilder("alter_table_with_environment_context_result("); + StringBuilder sb = new StringBuilder("alter_table_with_cascade_result("); boolean first = true; sb.append("o1:"); @@ -36533,15 +37773,15 @@ private void readObject(java.io.ObjectInputStream in) throws java.io.IOException } } - private static class alter_table_with_environment_context_resultStandardSchemeFactory implements SchemeFactory { - public alter_table_with_environment_context_resultStandardScheme getScheme() { - return new alter_table_with_environment_context_resultStandardScheme(); + private static class alter_table_with_cascade_resultStandardSchemeFactory implements SchemeFactory { + public alter_table_with_cascade_resultStandardScheme getScheme() { + return new alter_table_with_cascade_resultStandardScheme(); } } - private static class alter_table_with_environment_context_resultStandardScheme extends StandardScheme { + private static class alter_table_with_cascade_resultStandardScheme extends StandardScheme { - public void read(org.apache.thrift.protocol.TProtocol iprot, alter_table_with_environment_context_result struct) throws org.apache.thrift.TException { + public void read(org.apache.thrift.protocol.TProtocol iprot, alter_table_with_cascade_result struct) throws org.apache.thrift.TException { org.apache.thrift.protocol.TField schemeField; iprot.readStructBegin(); while (true) @@ -36578,7 +37818,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, alter_table_with_en struct.validate(); } - public void write(org.apache.thrift.protocol.TProtocol oprot, alter_table_with_environment_context_result struct) throws org.apache.thrift.TException { + public void write(org.apache.thrift.protocol.TProtocol oprot, alter_table_with_cascade_result struct) throws org.apache.thrift.TException { struct.validate(); oprot.writeStructBegin(STRUCT_DESC); @@ -36598,16 +37838,16 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, alter_table_with_e } - private static class alter_table_with_environment_context_resultTupleSchemeFactory implements SchemeFactory { - public alter_table_with_environment_context_resultTupleScheme getScheme() { - return new alter_table_with_environment_context_resultTupleScheme(); + private static class alter_table_with_cascade_resultTupleSchemeFactory implements SchemeFactory { + public alter_table_with_cascade_resultTupleScheme getScheme() { + return new alter_table_with_cascade_resultTupleScheme(); } } - private static class alter_table_with_environment_context_resultTupleScheme extends TupleScheme { + private static class alter_table_with_cascade_resultTupleScheme extends TupleScheme { @Override - public void write(org.apache.thrift.protocol.TProtocol prot, alter_table_with_environment_context_result struct) throws org.apache.thrift.TException { + public void write(org.apache.thrift.protocol.TProtocol prot, alter_table_with_cascade_result struct) throws org.apache.thrift.TException { TTupleProtocol oprot = (TTupleProtocol) prot; BitSet optionals = new BitSet(); if (struct.isSetO1()) { @@ -36626,7 +37866,7 @@ public void write(org.apache.thrift.protocol.TProtocol prot, alter_table_with_en } @Override - public void read(org.apache.thrift.protocol.TProtocol prot, alter_table_with_environment_context_result struct) throws org.apache.thrift.TException { + public void read(org.apache.thrift.protocol.TProtocol prot, alter_table_with_cascade_result struct) throws org.apache.thrift.TException { TTupleProtocol iprot = (TTupleProtocol) prot; BitSet incoming = iprot.readBitSet(2); if (incoming.get(0)) { diff --git a/metastore/src/gen/thrift/gen-php/metastore/ThriftHiveMetastore.php b/metastore/src/gen/thrift/gen-php/metastore/ThriftHiveMetastore.php index 69285cad7c673bd3a7400c755b613260e45dda2f..69978d316f598156a319bdf041cdff5e366ab7d0 100644 --- a/metastore/src/gen/thrift/gen-php/metastore/ThriftHiveMetastore.php +++ b/metastore/src/gen/thrift/gen-php/metastore/ThriftHiveMetastore.php @@ -41,6 +41,7 @@ interface ThriftHiveMetastoreIf extends \FacebookServiceIf { public function get_table_names_by_filter($dbname, $filter, $max_tables); public function alter_table($dbname, $tbl_name, \metastore\Table $new_tbl); public function alter_table_with_environment_context($dbname, $tbl_name, \metastore\Table $new_tbl, \metastore\EnvironmentContext $environment_context); + public function alter_table_with_cascade($dbname, $tbl_name, \metastore\Table $new_tbl, $cascade); public function add_partition(\metastore\Partition $new_part); public function add_partition_with_environment_context(\metastore\Partition $new_part, \metastore\EnvironmentContext $environment_context); public function add_partitions($new_parts); @@ -1567,6 +1568,63 @@ class ThriftHiveMetastoreClient extends \FacebookServiceClient implements \metas return; } + public function alter_table_with_cascade($dbname, $tbl_name, \metastore\Table $new_tbl, $cascade) + { + $this->send_alter_table_with_cascade($dbname, $tbl_name, $new_tbl, $cascade); + $this->recv_alter_table_with_cascade(); + } + + public function send_alter_table_with_cascade($dbname, $tbl_name, \metastore\Table $new_tbl, $cascade) + { + $args = new \metastore\ThriftHiveMetastore_alter_table_with_cascade_args(); + $args->dbname = $dbname; + $args->tbl_name = $tbl_name; + $args->new_tbl = $new_tbl; + $args->cascade = $cascade; + $bin_accel = ($this->output_ instanceof TProtocol::$TBINARYPROTOCOLACCELERATED) && function_exists('thrift_protocol_write_binary'); + if ($bin_accel) + { + thrift_protocol_write_binary($this->output_, 'alter_table_with_cascade', TMessageType::CALL, $args, $this->seqid_, $this->output_->isStrictWrite()); + } + else + { + $this->output_->writeMessageBegin('alter_table_with_cascade', TMessageType::CALL, $this->seqid_); + $args->write($this->output_); + $this->output_->writeMessageEnd(); + $this->output_->getTransport()->flush(); + } + } + + public function recv_alter_table_with_cascade() + { + $bin_accel = ($this->input_ instanceof TProtocol::$TBINARYPROTOCOLACCELERATED) && function_exists('thrift_protocol_read_binary'); + if ($bin_accel) $result = thrift_protocol_read_binary($this->input_, '\metastore\ThriftHiveMetastore_alter_table_with_cascade_result', $this->input_->isStrictRead()); + else + { + $rseqid = 0; + $fname = null; + $mtype = 0; + + $this->input_->readMessageBegin($fname, $mtype, $rseqid); + if ($mtype == TMessageType::EXCEPTION) { + $x = new TApplicationException(); + $x->read($this->input_); + $this->input_->readMessageEnd(); + throw $x; + } + $result = new \metastore\ThriftHiveMetastore_alter_table_with_cascade_result(); + $result->read($this->input_); + $this->input_->readMessageEnd(); + } + if ($result->o1 !== null) { + throw $result->o1; + } + if ($result->o2 !== null) { + throw $result->o2; + } + return; + } + public function add_partition(\metastore\Partition $new_part) { $this->send_add_partition($new_part); @@ -12177,6 +12235,239 @@ class ThriftHiveMetastore_alter_table_with_environment_context_result { } +class ThriftHiveMetastore_alter_table_with_cascade_args { + static $_TSPEC; + + public $dbname = null; + public $tbl_name = null; + public $new_tbl = null; + public $cascade = null; + + public function __construct($vals=null) { + if (!isset(self::$_TSPEC)) { + self::$_TSPEC = array( + 1 => array( + 'var' => 'dbname', + 'type' => TType::STRING, + ), + 2 => array( + 'var' => 'tbl_name', + 'type' => TType::STRING, + ), + 3 => array( + 'var' => 'new_tbl', + 'type' => TType::STRUCT, + 'class' => '\metastore\Table', + ), + 4 => array( + 'var' => 'cascade', + 'type' => TType::BOOL, + ), + ); + } + if (is_array($vals)) { + if (isset($vals['dbname'])) { + $this->dbname = $vals['dbname']; + } + if (isset($vals['tbl_name'])) { + $this->tbl_name = $vals['tbl_name']; + } + if (isset($vals['new_tbl'])) { + $this->new_tbl = $vals['new_tbl']; + } + if (isset($vals['cascade'])) { + $this->cascade = $vals['cascade']; + } + } + } + + public function getName() { + return 'ThriftHiveMetastore_alter_table_with_cascade_args'; + } + + public function read($input) + { + $xfer = 0; + $fname = null; + $ftype = 0; + $fid = 0; + $xfer += $input->readStructBegin($fname); + while (true) + { + $xfer += $input->readFieldBegin($fname, $ftype, $fid); + if ($ftype == TType::STOP) { + break; + } + switch ($fid) + { + case 1: + if ($ftype == TType::STRING) { + $xfer += $input->readString($this->dbname); + } else { + $xfer += $input->skip($ftype); + } + break; + case 2: + if ($ftype == TType::STRING) { + $xfer += $input->readString($this->tbl_name); + } else { + $xfer += $input->skip($ftype); + } + break; + case 3: + if ($ftype == TType::STRUCT) { + $this->new_tbl = new \metastore\Table(); + $xfer += $this->new_tbl->read($input); + } else { + $xfer += $input->skip($ftype); + } + break; + case 4: + if ($ftype == TType::BOOL) { + $xfer += $input->readBool($this->cascade); + } else { + $xfer += $input->skip($ftype); + } + break; + default: + $xfer += $input->skip($ftype); + break; + } + $xfer += $input->readFieldEnd(); + } + $xfer += $input->readStructEnd(); + return $xfer; + } + + public function write($output) { + $xfer = 0; + $xfer += $output->writeStructBegin('ThriftHiveMetastore_alter_table_with_cascade_args'); + if ($this->dbname !== null) { + $xfer += $output->writeFieldBegin('dbname', TType::STRING, 1); + $xfer += $output->writeString($this->dbname); + $xfer += $output->writeFieldEnd(); + } + if ($this->tbl_name !== null) { + $xfer += $output->writeFieldBegin('tbl_name', TType::STRING, 2); + $xfer += $output->writeString($this->tbl_name); + $xfer += $output->writeFieldEnd(); + } + if ($this->new_tbl !== null) { + if (!is_object($this->new_tbl)) { + throw new TProtocolException('Bad type in structure.', TProtocolException::INVALID_DATA); + } + $xfer += $output->writeFieldBegin('new_tbl', TType::STRUCT, 3); + $xfer += $this->new_tbl->write($output); + $xfer += $output->writeFieldEnd(); + } + if ($this->cascade !== null) { + $xfer += $output->writeFieldBegin('cascade', TType::BOOL, 4); + $xfer += $output->writeBool($this->cascade); + $xfer += $output->writeFieldEnd(); + } + $xfer += $output->writeFieldStop(); + $xfer += $output->writeStructEnd(); + return $xfer; + } + +} + +class ThriftHiveMetastore_alter_table_with_cascade_result { + static $_TSPEC; + + public $o1 = null; + public $o2 = null; + + public function __construct($vals=null) { + if (!isset(self::$_TSPEC)) { + self::$_TSPEC = array( + 1 => array( + 'var' => 'o1', + 'type' => TType::STRUCT, + 'class' => '\metastore\InvalidOperationException', + ), + 2 => array( + 'var' => 'o2', + 'type' => TType::STRUCT, + 'class' => '\metastore\MetaException', + ), + ); + } + if (is_array($vals)) { + if (isset($vals['o1'])) { + $this->o1 = $vals['o1']; + } + if (isset($vals['o2'])) { + $this->o2 = $vals['o2']; + } + } + } + + public function getName() { + return 'ThriftHiveMetastore_alter_table_with_cascade_result'; + } + + public function read($input) + { + $xfer = 0; + $fname = null; + $ftype = 0; + $fid = 0; + $xfer += $input->readStructBegin($fname); + while (true) + { + $xfer += $input->readFieldBegin($fname, $ftype, $fid); + if ($ftype == TType::STOP) { + break; + } + switch ($fid) + { + case 1: + if ($ftype == TType::STRUCT) { + $this->o1 = new \metastore\InvalidOperationException(); + $xfer += $this->o1->read($input); + } else { + $xfer += $input->skip($ftype); + } + break; + case 2: + if ($ftype == TType::STRUCT) { + $this->o2 = new \metastore\MetaException(); + $xfer += $this->o2->read($input); + } else { + $xfer += $input->skip($ftype); + } + break; + default: + $xfer += $input->skip($ftype); + break; + } + $xfer += $input->readFieldEnd(); + } + $xfer += $input->readStructEnd(); + return $xfer; + } + + public function write($output) { + $xfer = 0; + $xfer += $output->writeStructBegin('ThriftHiveMetastore_alter_table_with_cascade_result'); + if ($this->o1 !== null) { + $xfer += $output->writeFieldBegin('o1', TType::STRUCT, 1); + $xfer += $this->o1->write($output); + $xfer += $output->writeFieldEnd(); + } + if ($this->o2 !== null) { + $xfer += $output->writeFieldBegin('o2', TType::STRUCT, 2); + $xfer += $this->o2->write($output); + $xfer += $output->writeFieldEnd(); + } + $xfer += $output->writeFieldStop(); + $xfer += $output->writeStructEnd(); + return $xfer; + } + +} + class ThriftHiveMetastore_add_partition_args { static $_TSPEC; diff --git a/metastore/src/gen/thrift/gen-py/hive_metastore/ThriftHiveMetastore-remote b/metastore/src/gen/thrift/gen-py/hive_metastore/ThriftHiveMetastore-remote index 068895e145afafd8bfeeb31ef28a2def32adbda0..59c0393affbbb2e6f5ee87f34214102e4f757796 100644 --- a/metastore/src/gen/thrift/gen-py/hive_metastore/ThriftHiveMetastore-remote +++ b/metastore/src/gen/thrift/gen-py/hive_metastore/ThriftHiveMetastore-remote @@ -48,6 +48,7 @@ if len(sys.argv) <= 1 or sys.argv[1] == '--help': print ' get_table_names_by_filter(string dbname, string filter, i16 max_tables)' print ' void alter_table(string dbname, string tbl_name, Table new_tbl)' print ' void alter_table_with_environment_context(string dbname, string tbl_name, Table new_tbl, EnvironmentContext environment_context)' + print ' void alter_table_with_cascade(string dbname, string tbl_name, Table new_tbl, bool cascade)' print ' Partition add_partition(Partition new_part)' print ' Partition add_partition_with_environment_context(Partition new_part, EnvironmentContext environment_context)' print ' i32 add_partitions( new_parts)' @@ -340,6 +341,12 @@ elif cmd == 'alter_table_with_environment_context': sys.exit(1) pp.pprint(client.alter_table_with_environment_context(args[0],args[1],eval(args[2]),eval(args[3]),)) +elif cmd == 'alter_table_with_cascade': + if len(args) != 4: + print 'alter_table_with_cascade requires 4 args' + sys.exit(1) + pp.pprint(client.alter_table_with_cascade(args[0],args[1],eval(args[2]),eval(args[3]),)) + elif cmd == 'add_partition': if len(args) != 1: print 'add_partition requires 1 args' diff --git a/metastore/src/gen/thrift/gen-py/hive_metastore/ThriftHiveMetastore.py b/metastore/src/gen/thrift/gen-py/hive_metastore/ThriftHiveMetastore.py index 6160b7ef2485630a795c36c41c6245376cfa8b5e..100a7cb5e1cdc60f1883c491704717af1d8e61a0 100644 --- a/metastore/src/gen/thrift/gen-py/hive_metastore/ThriftHiveMetastore.py +++ b/metastore/src/gen/thrift/gen-py/hive_metastore/ThriftHiveMetastore.py @@ -215,6 +215,16 @@ def alter_table_with_environment_context(self, dbname, tbl_name, new_tbl, enviro """ pass + def alter_table_with_cascade(self, dbname, tbl_name, new_tbl, cascade): + """ + Parameters: + - dbname + - tbl_name + - new_tbl + - cascade + """ + pass + def add_partition(self, new_part): """ Parameters: @@ -1851,6 +1861,44 @@ def recv_alter_table_with_environment_context(self, ): raise result.o2 return + def alter_table_with_cascade(self, dbname, tbl_name, new_tbl, cascade): + """ + Parameters: + - dbname + - tbl_name + - new_tbl + - cascade + """ + self.send_alter_table_with_cascade(dbname, tbl_name, new_tbl, cascade) + self.recv_alter_table_with_cascade() + + def send_alter_table_with_cascade(self, dbname, tbl_name, new_tbl, cascade): + self._oprot.writeMessageBegin('alter_table_with_cascade', TMessageType.CALL, self._seqid) + args = alter_table_with_cascade_args() + args.dbname = dbname + args.tbl_name = tbl_name + args.new_tbl = new_tbl + args.cascade = cascade + args.write(self._oprot) + self._oprot.writeMessageEnd() + self._oprot.trans.flush() + + def recv_alter_table_with_cascade(self, ): + (fname, mtype, rseqid) = self._iprot.readMessageBegin() + if mtype == TMessageType.EXCEPTION: + x = TApplicationException() + x.read(self._iprot) + self._iprot.readMessageEnd() + raise x + result = alter_table_with_cascade_result() + result.read(self._iprot) + self._iprot.readMessageEnd() + if result.o1 is not None: + raise result.o1 + if result.o2 is not None: + raise result.o2 + return + def add_partition(self, new_part): """ Parameters: @@ -5177,6 +5225,7 @@ def __init__(self, handler): self._processMap["get_table_names_by_filter"] = Processor.process_get_table_names_by_filter self._processMap["alter_table"] = Processor.process_alter_table self._processMap["alter_table_with_environment_context"] = Processor.process_alter_table_with_environment_context + self._processMap["alter_table_with_cascade"] = Processor.process_alter_table_with_cascade self._processMap["add_partition"] = Processor.process_add_partition self._processMap["add_partition_with_environment_context"] = Processor.process_add_partition_with_environment_context self._processMap["add_partitions"] = Processor.process_add_partitions @@ -5692,6 +5741,22 @@ def process_alter_table_with_environment_context(self, seqid, iprot, oprot): oprot.writeMessageEnd() oprot.trans.flush() + def process_alter_table_with_cascade(self, seqid, iprot, oprot): + args = alter_table_with_cascade_args() + args.read(iprot) + iprot.readMessageEnd() + result = alter_table_with_cascade_result() + try: + self._handler.alter_table_with_cascade(args.dbname, args.tbl_name, args.new_tbl, args.cascade) + except InvalidOperationException as o1: + result.o1 = o1 + except MetaException as o2: + result.o2 = o2 + oprot.writeMessageBegin("alter_table_with_cascade", TMessageType.REPLY, seqid) + result.write(oprot) + oprot.writeMessageEnd() + oprot.trans.flush() + def process_add_partition(self, seqid, iprot, oprot): args = add_partition_args() args.read(iprot) @@ -11052,6 +11117,177 @@ def __eq__(self, other): def __ne__(self, other): return not (self == other) +class alter_table_with_cascade_args: + """ + Attributes: + - dbname + - tbl_name + - new_tbl + - cascade + """ + + thrift_spec = ( + None, # 0 + (1, TType.STRING, 'dbname', None, None, ), # 1 + (2, TType.STRING, 'tbl_name', None, None, ), # 2 + (3, TType.STRUCT, 'new_tbl', (Table, Table.thrift_spec), None, ), # 3 + (4, TType.BOOL, 'cascade', None, None, ), # 4 + ) + + def __init__(self, dbname=None, tbl_name=None, new_tbl=None, cascade=None,): + self.dbname = dbname + self.tbl_name = tbl_name + self.new_tbl = new_tbl + self.cascade = cascade + + def read(self, iprot): + if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: + fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec)) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 1: + if ftype == TType.STRING: + self.dbname = iprot.readString(); + else: + iprot.skip(ftype) + elif fid == 2: + if ftype == TType.STRING: + self.tbl_name = iprot.readString(); + else: + iprot.skip(ftype) + elif fid == 3: + if ftype == TType.STRUCT: + self.new_tbl = Table() + self.new_tbl.read(iprot) + else: + iprot.skip(ftype) + elif fid == 4: + if ftype == TType.BOOL: + self.cascade = iprot.readBool(); + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None: + oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))) + return + oprot.writeStructBegin('alter_table_with_cascade_args') + if self.dbname is not None: + oprot.writeFieldBegin('dbname', TType.STRING, 1) + oprot.writeString(self.dbname) + oprot.writeFieldEnd() + if self.tbl_name is not None: + oprot.writeFieldBegin('tbl_name', TType.STRING, 2) + oprot.writeString(self.tbl_name) + oprot.writeFieldEnd() + if self.new_tbl is not None: + oprot.writeFieldBegin('new_tbl', TType.STRUCT, 3) + self.new_tbl.write(oprot) + oprot.writeFieldEnd() + if self.cascade is not None: + oprot.writeFieldBegin('cascade', TType.BOOL, 4) + oprot.writeBool(self.cascade) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + + def __repr__(self): + L = ['%s=%r' % (key, value) + for key, value in self.__dict__.iteritems()] + return '%s(%s)' % (self.__class__.__name__, ', '.join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + +class alter_table_with_cascade_result: + """ + Attributes: + - o1 + - o2 + """ + + thrift_spec = ( + None, # 0 + (1, TType.STRUCT, 'o1', (InvalidOperationException, InvalidOperationException.thrift_spec), None, ), # 1 + (2, TType.STRUCT, 'o2', (MetaException, MetaException.thrift_spec), None, ), # 2 + ) + + def __init__(self, o1=None, o2=None,): + self.o1 = o1 + self.o2 = o2 + + def read(self, iprot): + if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: + fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec)) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 1: + if ftype == TType.STRUCT: + self.o1 = InvalidOperationException() + self.o1.read(iprot) + else: + iprot.skip(ftype) + elif fid == 2: + if ftype == TType.STRUCT: + self.o2 = MetaException() + self.o2.read(iprot) + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None: + oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))) + return + oprot.writeStructBegin('alter_table_with_cascade_result') + if self.o1 is not None: + oprot.writeFieldBegin('o1', TType.STRUCT, 1) + self.o1.write(oprot) + oprot.writeFieldEnd() + if self.o2 is not None: + oprot.writeFieldBegin('o2', TType.STRUCT, 2) + self.o2.write(oprot) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + + def __repr__(self): + L = ['%s=%r' % (key, value) + for key, value in self.__dict__.iteritems()] + return '%s(%s)' % (self.__class__.__name__, ', '.join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + class add_partition_args: """ Attributes: diff --git a/metastore/src/gen/thrift/gen-rb/thrift_hive_metastore.rb b/metastore/src/gen/thrift/gen-rb/thrift_hive_metastore.rb index 9f59a8abe14f9e91299b9b89468c6e1f26ea7653..e6ef08a38e8f4ee15ea7298585ca5b964cd8fd34 100644 --- a/metastore/src/gen/thrift/gen-rb/thrift_hive_metastore.rb +++ b/metastore/src/gen/thrift/gen-rb/thrift_hive_metastore.rb @@ -431,6 +431,22 @@ module ThriftHiveMetastore return end + def alter_table_with_cascade(dbname, tbl_name, new_tbl, cascade) + send_alter_table_with_cascade(dbname, tbl_name, new_tbl, cascade) + recv_alter_table_with_cascade() + end + + def send_alter_table_with_cascade(dbname, tbl_name, new_tbl, cascade) + send_message('alter_table_with_cascade', Alter_table_with_cascade_args, :dbname => dbname, :tbl_name => tbl_name, :new_tbl => new_tbl, :cascade => cascade) + end + + def recv_alter_table_with_cascade() + result = receive_message(Alter_table_with_cascade_result) + raise result.o1 unless result.o1.nil? + raise result.o2 unless result.o2.nil? + return + end + def add_partition(new_part) send_add_partition(new_part) return recv_add_partition() @@ -2299,6 +2315,19 @@ module ThriftHiveMetastore write_result(result, oprot, 'alter_table_with_environment_context', seqid) end + def process_alter_table_with_cascade(seqid, iprot, oprot) + args = read_args(iprot, Alter_table_with_cascade_args) + result = Alter_table_with_cascade_result.new() + begin + @handler.alter_table_with_cascade(args.dbname, args.tbl_name, args.new_tbl, args.cascade) + rescue ::InvalidOperationException => o1 + result.o1 = o1 + rescue ::MetaException => o2 + result.o2 = o2 + end + write_result(result, oprot, 'alter_table_with_cascade', seqid) + end + def process_add_partition(seqid, iprot, oprot) args = read_args(iprot, Add_partition_args) result = Add_partition_result.new() @@ -4399,6 +4428,46 @@ module ThriftHiveMetastore ::Thrift::Struct.generate_accessors self end + class Alter_table_with_cascade_args + include ::Thrift::Struct, ::Thrift::Struct_Union + DBNAME = 1 + TBL_NAME = 2 + NEW_TBL = 3 + CASCADE = 4 + + FIELDS = { + DBNAME => {:type => ::Thrift::Types::STRING, :name => 'dbname'}, + TBL_NAME => {:type => ::Thrift::Types::STRING, :name => 'tbl_name'}, + NEW_TBL => {:type => ::Thrift::Types::STRUCT, :name => 'new_tbl', :class => ::Table}, + CASCADE => {:type => ::Thrift::Types::BOOL, :name => 'cascade'} + } + + def struct_fields; FIELDS; end + + def validate + end + + ::Thrift::Struct.generate_accessors self + end + + class Alter_table_with_cascade_result + include ::Thrift::Struct, ::Thrift::Struct_Union + O1 = 1 + O2 = 2 + + FIELDS = { + O1 => {:type => ::Thrift::Types::STRUCT, :name => 'o1', :class => ::InvalidOperationException}, + O2 => {:type => ::Thrift::Types::STRUCT, :name => 'o2', :class => ::MetaException} + } + + def struct_fields; FIELDS; end + + def validate + end + + ::Thrift::Struct.generate_accessors self + end + class Add_partition_args include ::Thrift::Struct, ::Thrift::Struct_Union NEW_PART = 1 diff --git a/metastore/src/java/org/apache/hadoop/hive/metastore/AlterHandler.java b/metastore/src/java/org/apache/hadoop/hive/metastore/AlterHandler.java index d872be5d8af60ff933729bf390ed83887a04518c..b5653049eff6003d0b0772ebcfbadebba9a01fae 100644 --- a/metastore/src/java/org/apache/hadoop/hive/metastore/AlterHandler.java +++ b/metastore/src/java/org/apache/hadoop/hive/metastore/AlterHandler.java @@ -49,13 +49,38 @@ * @throws InvalidOperationException * thrown if the newTable object is invalid * @throws MetaException - * thrown if there is any other erro + * thrown if there is any other error */ public abstract void alterTable(RawStore msdb, Warehouse wh, String dbname, String name, Table newTable) throws InvalidOperationException, MetaException; /** + * handles alter table, the changes could be cascaded to partitions if applicable + * + * @param msdb + * object to get metadata + * @param wh + * Hive Warehouse where table data is stored + * @param dbname + * database of the table being altered + * @param name + * original name of the table being altered. same as + * newTable.tableName if alter op is not a rename. + * @param newTable + * new table object + * @param cascade + * if the changes will be cascaded to its partitions if applicable + * @throws InvalidOperationException + * thrown if the newTable object is invalid + * @throws MetaException + * thrown if there is any other error + */ + public abstract void alterTable(RawStore msdb, Warehouse wh, String dbname, + String name, Table newTable, boolean cascade) throws InvalidOperationException, + MetaException; + + /** * handles alter partition * * @param msdb diff --git a/metastore/src/java/org/apache/hadoop/hive/metastore/HiveAlterHandler.java b/metastore/src/java/org/apache/hadoop/hive/metastore/HiveAlterHandler.java index fc6215a8b1baeda046ff661ce7279cdd9bf2413f..d99cfdf74d57071183e9385b5a3f2c5335e4ce60 100644 --- a/metastore/src/java/org/apache/hadoop/hive/metastore/HiveAlterHandler.java +++ b/metastore/src/java/org/apache/hadoop/hive/metastore/HiveAlterHandler.java @@ -63,6 +63,11 @@ public void setConf(Configuration conf) { public void alterTable(RawStore msdb, Warehouse wh, String dbname, String name, Table newt) throws InvalidOperationException, MetaException { + alterTable(msdb, wh, dbname, name, newt, false); + } + + public void alterTable(RawStore msdb, Warehouse wh, String dbname, + String name, Table newt, boolean cascade) throws InvalidOperationException, MetaException { if (newt == null) { throw new InvalidOperationException("New table is invalid: " + newt); } @@ -118,6 +123,19 @@ public void alterTable(RawStore msdb, Warehouse wh, String dbname, oldt.getSd().getCols(), newt.getSd().getCols()); } + if (cascade) { + //Currently only column related changes can be cascaded in alter table + if(MetaStoreUtils.isCascadeNeededInAlterTable(oldt, newt)) { + List parts = msdb.getPartitions(dbname, name, -1); + for (Partition part : parts) { + part.getSd().setCols(newt.getSd().getCols()); + msdb.alterPartition(dbname, name, part.getValues(), part); + } + } else { + LOG.warn("Alter table does not cascade changes to its partitions."); + } + } + //check that partition keys have not changed, except for virtual views //however, allow the partition comments to change boolean partKeysPartiallyEqual = checkPartialPartKeysEqual(oldt.getPartitionKeys(), diff --git a/metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStore.java b/metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStore.java index a47619c9c2660505f973ef809b36041b66c31bdd..65771c27ea5c6df0768904270445337a98c34cf3 100644 --- a/metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStore.java +++ b/metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStore.java @@ -3256,7 +3256,15 @@ public void alter_table(final String dbname, final String name, final Table newTable) throws InvalidOperationException, MetaException { // Do not set an environment context. - alter_table_with_environment_context(dbname, name, newTable, null); + alter_table_core(dbname,name, newTable, null, false); + } + + @Override + public void alter_table_with_cascade(final String dbname, final String name, + final Table newTable, final boolean cascade) + throws InvalidOperationException, MetaException { + // Do not set an environment context. + alter_table_core(dbname,name, newTable, null, cascade); } @Override @@ -3264,6 +3272,12 @@ public void alter_table_with_environment_context(final String dbname, final String name, final Table newTable, final EnvironmentContext envContext) throws InvalidOperationException, MetaException { + alter_table_core(dbname, name, newTable, envContext, false); + } + + private void alter_table_core(final String dbname, final String name, final Table newTable, + final EnvironmentContext envContext, final boolean cascade) + throws InvalidOperationException, MetaException { startFunction("alter_table", ": db=" + dbname + " tbl=" + name + " newtbl=" + newTable.getTableName()); @@ -3278,7 +3292,7 @@ public void alter_table_with_environment_context(final String dbname, try { Table oldt = get_table_core(dbname, name); firePreEvent(new PreAlterTableEvent(oldt, newTable, this)); - alterHandler.alterTable(getMS(), wh, dbname, name, newTable); + alterHandler.alterTable(getMS(), wh, dbname, name, newTable, cascade); success = true; for (MetaStoreEventListener listener : listeners) { diff --git a/metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStoreClient.java b/metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStoreClient.java index 6a0cb96640a303638d159ed141d3996ccb9bfe98..212b68dac2bcffc9b3660f3e132f2f81c7285216 100644 --- a/metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStoreClient.java +++ b/metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStoreClient.java @@ -311,6 +311,12 @@ public void alter_table(String dbname, String tbl_name, Table new_tbl) alter_table(dbname, tbl_name, new_tbl, null); } + @Override + public void alter_table(String dbname, String tbl_name, Table new_tbl, boolean cascade) + throws InvalidOperationException, MetaException, TException { + client.alter_table_with_cascade(dbname, tbl_name, new_tbl, cascade); + } + public void alter_table(String dbname, String tbl_name, Table new_tbl, EnvironmentContext envContext) throws InvalidOperationException, MetaException, TException { client.alter_table_with_environment_context(dbname, tbl_name, new_tbl, envContext); diff --git a/metastore/src/java/org/apache/hadoop/hive/metastore/IMetaStoreClient.java b/metastore/src/java/org/apache/hadoop/hive/metastore/IMetaStoreClient.java index 066ab68443fdd1bd04e14c8a3b1b2bbac9130d6c..11b033623e5ab685cce4ca841ecb088225c58e7b 100644 --- a/metastore/src/java/org/apache/hadoop/hive/metastore/IMetaStoreClient.java +++ b/metastore/src/java/org/apache/hadoop/hive/metastore/IMetaStoreClient.java @@ -616,6 +616,10 @@ void createTable(Table tbl) throws AlreadyExistsException, void alter_table(String defaultDatabaseName, String tblName, Table table) throws InvalidOperationException, MetaException, TException; + //alter_table_with_cascade + void alter_table(String defaultDatabaseName, String tblName, Table table, + boolean cascade) throws InvalidOperationException, MetaException, TException; + void createDatabase(Database db) throws InvalidObjectException, AlreadyExistsException, MetaException, TException; diff --git a/metastore/src/java/org/apache/hadoop/hive/metastore/MetaStoreUtils.java b/metastore/src/java/org/apache/hadoop/hive/metastore/MetaStoreUtils.java index 1ac5affc51ca5cab59eacdc333c6b344ea384887..2db2658fbc57fba01c892c9213baef6c498e659b 100644 --- a/metastore/src/java/org/apache/hadoop/hive/metastore/MetaStoreUtils.java +++ b/metastore/src/java/org/apache/hadoop/hive/metastore/MetaStoreUtils.java @@ -554,6 +554,26 @@ static void throwExceptionIfIncompatibleColTypeChange( } } + static boolean isCascadeNeededInAlterTable(Table oldTable, Table newTable) { + List oldCols = oldTable.getSd().getCols(); + List newCols = newTable.getSd().getCols(); + + //currently cascade only supports add/replace columns and + //changing column type/position/name/comments + if (oldCols.size() != newCols.size()) { + return true; + } else { + for (int i = 0; i < oldCols.size(); i++) { + FieldSchema oldCol = oldCols.get(i); + FieldSchema newCol = newCols.get(i); + if(!oldCol.equals(newCol)) { + return true; + } + } + } + return false; + } + /** * @return true if oldType and newType are compatible. * Two types are compatible if we have internal functions to cast one to another. diff --git a/ql/src/java/org/apache/hadoop/hive/ql/ErrorMsg.java b/ql/src/java/org/apache/hadoop/hive/ql/ErrorMsg.java index 292c83cf967756ad3056f8a57de9590a0b00babf..89775cce496b2a1ef7fabcaa275531eabcde3c34 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/ErrorMsg.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/ErrorMsg.java @@ -423,6 +423,9 @@ "sorted, table {0}", true), ALTER_TABLE_TYPE_PARTIAL_PARTITION_SPEC_NO_SUPPORTED(10299, "Alter table partition type {0} does not allow partial partition spec", true), + ALTER_TABLE_PARTITION_CASCADE_NOT_SUPPORTED(10300, + "Alter table partition type {0} does not support cascade", true), + //========================== 20000 range starts here ========================// SCRIPT_INIT_ERROR(20000, "Unable to initialize custom script."), diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/DDLTask.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/DDLTask.java index 1655f3de2df32a890030b0f148155115d7d48b2e..b33810c07ea61f2d4d77bab0279cdab7eda4e503 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/exec/DDLTask.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/DDLTask.java @@ -3282,7 +3282,7 @@ private int alterTable(Hive db, AlterTableDesc alterTbl) throws HiveException { List allPartitions = null; if (alterTbl.getPartSpec() != null) { - Map partSpec = alterTbl.getPartSpec(); + Map partSpec = alterTbl.getPartSpec(); if (DDLSemanticAnalyzer.isFullSpec(tbl, partSpec)) { allPartitions = new ArrayList(); Partition part = db.getPartition(tbl, partSpec, false); @@ -3322,7 +3322,7 @@ private int alterTable(Hive db, AlterTableDesc alterTbl) throws HiveException { try { if (allPartitions == null) { - db.alterTable(alterTbl.getOldName(), tbl); + db.alterTable(alterTbl.getOldName(), tbl, alterTbl.getIsCascade()); } else { db.alterPartitions(tbl.getTableName(), allPartitions); } diff --git a/ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java b/ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java index b90062704edb0024b2a08b83e4d351d4ef4a14a3..52995e3d27c1e44b033e183557dc3fcbb72f6cfb 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java @@ -442,6 +442,11 @@ public void createTable(String tableName, List columns, List par */ public void alterTable(String tblName, Table newTbl) throws InvalidOperationException, HiveException { + alterTable(tblName, newTbl, false); + } + + public void alterTable(String tblName, Table newTbl, boolean cascade) + throws InvalidOperationException, HiveException { String[] names = Utilities.getDbTableName(tblName); try { // Remove the DDL_TIME so it gets refreshed @@ -449,7 +454,7 @@ public void alterTable(String tblName, Table newTbl) newTbl.getParameters().remove(hive_metastoreConstants.DDL_TIME); } newTbl.checkValidity(); - getMSC().alter_table(names[0], names[1], newTbl.getTTable()); + getMSC().alter_table(names[0], names[1], newTbl.getTTable(), cascade); } catch (MetaException e) { throw new HiveException("Unable to alter table. " + e.getMessage(), e); } catch (TException e) { diff --git a/ql/src/java/org/apache/hadoop/hive/ql/metadata/SessionHiveMetaStoreClient.java b/ql/src/java/org/apache/hadoop/hive/ql/metadata/SessionHiveMetaStoreClient.java index f1f723c939b42fb441ddeaa4159249d897203a08..ce5b2843a5b5a99b8a8bffaffa7a149b81154efa 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/metadata/SessionHiveMetaStoreClient.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/metadata/SessionHiveMetaStoreClient.java @@ -227,6 +227,18 @@ public boolean tableExists(String databaseName, String tableName) throws MetaExc @Override public void alter_table(String dbname, String tbl_name, org.apache.hadoop.hive.metastore.api.Table new_tbl, + boolean cascade) throws InvalidOperationException, MetaException, TException { + org.apache.hadoop.hive.metastore.api.Table old_tbl = getTempTable(dbname, tbl_name); + if (old_tbl != null) { + //actually temp table does not support partitions, cascade is not applicable here + alterTempTable(dbname, tbl_name, old_tbl, new_tbl, null); + return; + } + super.alter_table(dbname, tbl_name, new_tbl, cascade); + } + + @Override + public void alter_table(String dbname, String tbl_name, org.apache.hadoop.hive.metastore.api.Table new_tbl, EnvironmentContext envContext) throws InvalidOperationException, MetaException, TException { // First try temp table org.apache.hadoop.hive.metastore.api.Table old_tbl = getTempTable(dbname, tbl_name); diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/DDLSemanticAnalyzer.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/DDLSemanticAnalyzer.java index b105424b25ede962af3060daf915b47866fd941c..eeaa7cf3fbc1fcd46936f9726fb5f9df31748ad3 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/parse/DDLSemanticAnalyzer.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/DDLSemanticAnalyzer.java @@ -1369,20 +1369,38 @@ private void analyzeAlterTableFileFormat(ASTNode ast, String tableName, private void addInputsOutputsAlterTable(String tableName, Map partSpec) throws SemanticException { - addInputsOutputsAlterTable(tableName, partSpec, null); + addInputsOutputsAlterTable(tableName, partSpec, null, false); } private void addInputsOutputsAlterTable(String tableName, Map partSpec, - AlterTableDesc desc) throws SemanticException { + AlterTableDesc desc)throws SemanticException { + addInputsOutputsAlterTable(tableName, partSpec, desc, false); + } + + private void addInputsOutputsAlterTable(String tableName, Map partSpec, + AlterTableDesc desc, boolean isCascade) throws SemanticException { + boolean alterPartitions = partSpec != null && !partSpec.isEmpty(); + //cascade only occurs at table level then cascade to partition level + if (isCascade && alterPartitions) { + throw new SemanticException( + ErrorMsg.ALTER_TABLE_PARTITION_CASCADE_NOT_SUPPORTED, desc.getOp().name()); + } + Table tab = getTable(tableName, true); // Determine the lock type to acquire WriteEntity.WriteType writeType = desc == null ? WriteEntity.WriteType.DDL_EXCLUSIVE : WriteEntity.determineAlterTableWriteType(desc.getOp()); - if (partSpec == null || partSpec.isEmpty()) { + + if (!alterPartitions) { inputs.add(new ReadEntity(tab)); outputs.add(new WriteEntity(tab, writeType)); - } - else { + //do not need the lock for partitions since they are covered by the table lock + if (isCascade) { + for (Partition part : getPartitions(tab, partSpec, false)) { + outputs.add(new WriteEntity(part, WriteEntity.WriteType.DDL_NO_LOCK)); + } + } + } else { ReadEntity re = new ReadEntity(tab); // In the case of altering a table for its partitions we don't need to lock the table // itself, just the partitions. But the table will have a ReadEntity. So mark that @@ -2495,32 +2513,36 @@ private void analyzeAlterTableRename(String[] source, ASTNode ast, boolean expec private void analyzeAlterTableRenameCol(String[] qualified, ASTNode ast, HashMap partSpec) throws SemanticException { String newComment = null; - String newType = null; - newType = getTypeStringFromAST((ASTNode) ast.getChild(2)); boolean first = false; String flagCol = null; - ASTNode positionNode = null; - if (ast.getChildCount() == 5) { - newComment = unescapeSQLString(ast.getChild(3).getText()); - positionNode = (ASTNode) ast.getChild(4); - } else if (ast.getChildCount() == 4) { - if (ast.getChild(3).getType() == HiveParser.StringLiteral) { - newComment = unescapeSQLString(ast.getChild(3).getText()); - } else { - positionNode = (ASTNode) ast.getChild(3); - } - } - - if (positionNode != null) { - if (positionNode.getChildCount() == 0) { - first = true; - } else { - flagCol = unescapeIdentifier(positionNode.getChild(0).getText()); - } - } - + boolean isCascade = false; + //col_old_name col_new_name column_type [COMMENT col_comment] [FIRST|AFTER column_name] [CASCADE|RESTRICT] String oldColName = ast.getChild(0).getText(); String newColName = ast.getChild(1).getText(); + String newType = getTypeStringFromAST((ASTNode) ast.getChild(2)); + int childCount = ast.getChildCount(); + for (int i = 3; i < childCount; i++) { + ASTNode child = (ASTNode)ast.getChild(i); + switch (child.getToken().getType()) { + case HiveParser.StringLiteral: + newComment = unescapeSQLString(child.getText()); + break; + case HiveParser.TOK_ALTERTABLE_CHANGECOL_AFTER_POSITION: + flagCol = unescapeIdentifier(child.getChild(0).getText()); + break; + case HiveParser.KW_FIRST: + first = true; + break; + case HiveParser.TOK_CASCADE: + isCascade = true; + break; + case HiveParser.TOK_RESTRICT: + break; + default: + throw new SemanticException("Unsupported token: " + child.getToken() + + " for alter table"); + } + } /* Validate the operation of renaming a column name. */ Table tab = getTable(qualified); @@ -2536,8 +2558,8 @@ private void analyzeAlterTableRenameCol(String[] qualified, ASTNode ast, String tblName = getDotName(qualified); AlterTableDesc alterTblDesc = new AlterTableDesc(tblName, partSpec, unescapeIdentifier(oldColName), unescapeIdentifier(newColName), - newType, newComment, first, flagCol); - addInputsOutputsAlterTable(tblName, partSpec, alterTblDesc); + newType, newComment, first, flagCol, isCascade); + addInputsOutputsAlterTable(tblName, partSpec, alterTblDesc, isCascade); rootTasks.add(TaskFactory.get(new DDLWork(getInputs(), getOutputs(), alterTblDesc), conf)); @@ -2585,10 +2607,15 @@ private void analyzeAlterTableModifyCols(String[] qualified, ASTNode ast, String tblName = getDotName(qualified); List newCols = getColumns((ASTNode) ast.getChild(0)); + boolean isCascade = false; + if (null != ast.getFirstChildWithType(HiveParser.TOK_CASCADE)) { + isCascade = true; + } + AlterTableDesc alterTblDesc = new AlterTableDesc(tblName, partSpec, newCols, - alterType); + alterType, isCascade); - addInputsOutputsAlterTable(tblName, partSpec, alterTblDesc); + addInputsOutputsAlterTable(tblName, partSpec, alterTblDesc, isCascade); rootTasks.add(TaskFactory.get(new DDLWork(getInputs(), getOutputs(), alterTblDesc), conf)); } diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/HiveParser.g b/ql/src/java/org/apache/hadoop/hive/ql/parse/HiveParser.g index f1365fa3938cd6505bcacf005a077ebb1950c427..b5ce0fad2b21e96865d6103654032ec4aae4947a 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/parse/HiveParser.g +++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/HiveParser.g @@ -1043,16 +1043,16 @@ alterStatementSuffixRename[boolean table] alterStatementSuffixAddCol @init { pushMsg("add column statement", state); } @after { popMsg(state); } - : (add=KW_ADD | replace=KW_REPLACE) KW_COLUMNS LPAREN columnNameTypeList RPAREN - -> {$add != null}? ^(TOK_ALTERTABLE_ADDCOLS columnNameTypeList) - -> ^(TOK_ALTERTABLE_REPLACECOLS columnNameTypeList) + : (add=KW_ADD | replace=KW_REPLACE) KW_COLUMNS LPAREN columnNameTypeList RPAREN restrictOrCascade? + -> {$add != null}? ^(TOK_ALTERTABLE_ADDCOLS columnNameTypeList restrictOrCascade?) + -> ^(TOK_ALTERTABLE_REPLACECOLS columnNameTypeList restrictOrCascade?) ; alterStatementSuffixRenameCol @init { pushMsg("rename column name", state); } @after { popMsg(state); } - : KW_CHANGE KW_COLUMN? oldName=identifier newName=identifier colType (KW_COMMENT comment=StringLiteral)? alterStatementChangeColPosition? - ->^(TOK_ALTERTABLE_RENAMECOL $oldName $newName colType $comment? alterStatementChangeColPosition?) + : KW_CHANGE KW_COLUMN? oldName=identifier newName=identifier colType (KW_COMMENT comment=StringLiteral)? alterStatementChangeColPosition? restrictOrCascade? + ->^(TOK_ALTERTABLE_RENAMECOL $oldName $newName colType $comment? alterStatementChangeColPosition? restrictOrCascade?) ; alterStatementSuffixUpdateStatsCol diff --git a/ql/src/java/org/apache/hadoop/hive/ql/plan/AlterTableDesc.java b/ql/src/java/org/apache/hadoop/hive/ql/plan/AlterTableDesc.java index f869821d2c928893190d8063c37708183e5eba20..681d809fbd6af14e444162b42cbb02856cecd828 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/plan/AlterTableDesc.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/plan/AlterTableDesc.java @@ -112,6 +112,7 @@ Table table; boolean isDropIfExists = false; boolean isTurnOffSorting = false; + boolean isCascade = false; public AlterTableDesc() { } @@ -127,8 +128,8 @@ public AlterTableDesc() { * @param newType */ public AlterTableDesc(String tblName, HashMap partSpec, - String oldColName, String newColName, - String newType, String newComment, boolean first, String afterCol) { + String oldColName, String newColName, String newType, String newComment, + boolean first, String afterCol, boolean isCascade) { super(); oldName = tblName; this.partSpec = partSpec; @@ -139,6 +140,7 @@ public AlterTableDesc(String tblName, HashMap partSpec, this.first = first; this.afterCol = afterCol; op = AlterTableTypes.RENAMECOLUMN; + this.isCascade = isCascade; } /** @@ -161,11 +163,12 @@ public AlterTableDesc(String oldName, String newName, boolean expectView) { * new columns to be added */ public AlterTableDesc(String name, HashMap partSpec, List newCols, - AlterTableTypes alterType) { + AlterTableTypes alterType, boolean isCascade) { op = alterType; oldName = name; this.newCols = new ArrayList(newCols); this.partSpec = partSpec; + this.isCascade = isCascade; } /** @@ -720,6 +723,13 @@ public boolean getIsDropIfExists() { return isDropIfExists; } + /** + * @return isCascade + */ + public boolean getIsCascade() { + return isCascade; + } + public static boolean doesAlterTableTypeSupportPartialPartitionSpec(AlterTableTypes type) { return alterTableTypesWithPartialSpec.contains(type); } diff --git a/ql/src/test/queries/clientpositive/alter_table_cascade.q b/ql/src/test/queries/clientpositive/alter_table_cascade.q new file mode 100644 index 0000000000000000000000000000000000000000..479fda405cfb293e1d7130512a158ae544ce225a --- /dev/null +++ b/ql/src/test/queries/clientpositive/alter_table_cascade.q @@ -0,0 +1,137 @@ +SET hive.exec.dynamic.partition = true; +SET hive.exec.dynamic.partition.mode = nonstrict; + +-- SORT_QUERY_RESULTS + +drop table if exists alter_table_src; +drop table if exists alter_table_cascade; + +create table alter_table_src(c1 string, c2 string); +load data local inpath '../../data/files/dec.txt' overwrite into table alter_table_src; + +create table alter_table_cascade (c1 string) partitioned by (p1 string, p2 string); + +insert overwrite table alter_table_cascade partition (p1, p2) + select c1, 'abc', '123' from alter_table_src + union all + select c1, null, '123' from alter_table_src; + +show partitions alter_table_cascade; +describe alter_table_cascade; +describe alter_table_cascade partition (p1='abc', p2='123'); +describe alter_table_cascade partition (p1='__HIVE_DEFAULT_PARTITION__', p2='123'); +select * from alter_table_cascade where p1='abc'; +select * from alter_table_cascade where p1='__HIVE_DEFAULT_PARTITION__'; + +-- add columns c2 by replace columns (for HIVE-6131) +-- reload data to existing partition __HIVE_DEFAULT_PARTITION__ +-- load data to a new partition xyz +-- querying data (form new or existing partition) should return non-NULL values for the new column +alter table alter_table_cascade replace columns (c1 string, c2 string) cascade; +load data local inpath '../../data/files/dec.txt' overwrite into table alter_table_cascade partition (p1='__HIVE_DEFAULT_PARTITION__',p2='123'); +load data local inpath '../../data/files/dec.txt' overwrite into table alter_table_cascade partition (p1='xyz', p2='123'); +describe alter_table_cascade; +describe alter_table_cascade partition (p1='xyz', p2='123'); +describe alter_table_cascade partition (p1='abc', p2='123'); +describe alter_table_cascade partition (p1='__HIVE_DEFAULT_PARTITION__', p2='123'); +select * from alter_table_cascade where p1='xyz'; +select * from alter_table_cascade where p1='abc'; +select * from alter_table_cascade where p1='__HIVE_DEFAULT_PARTITION__'; + +-- Change c2 to decimal(10,0), the change should cascaded to all partitions +-- the c2 value returned should be in decimal(10,0) +alter table alter_table_cascade change c2 c2 decimal(10,0) comment "change datatype" cascade; +describe alter_table_cascade; +describe alter_table_cascade partition (p1='xyz', p2='123'); +describe alter_table_cascade partition (p1='abc', p2='123'); +describe alter_table_cascade partition (p1='__HIVE_DEFAULT_PARTITION__', p2='123'); +select * from alter_table_cascade where p1='xyz'; +select * from alter_table_cascade where p1='abc'; +select * from alter_table_cascade where p1='__HIVE_DEFAULT_PARTITION__'; + +-- rename c1 to c2fromc1 and move it to after c2, the change should cascaded to all partitions +alter table alter_table_cascade change c1 c2fromc1 string comment "change position after" after c2 cascade; +describe alter_table_cascade; +describe alter_table_cascade partition (p1='xyz', p2='123'); +describe alter_table_cascade partition (p1='abc', p2='123'); +describe alter_table_cascade partition (p1='__HIVE_DEFAULT_PARTITION__', p2='123'); + +-- rename c2fromc1 back to c1 and move to first as c1, the change should cascaded to all partitions +alter table alter_table_cascade change c2fromc1 c1 string comment "change position first" first cascade; +describe alter_table_cascade; +describe alter_table_cascade partition (p1='xyz', p2='123'); +describe alter_table_cascade partition (p1='abc', p2='123'); +describe alter_table_cascade partition (p1='__HIVE_DEFAULT_PARTITION__', p2='123'); + +-- Try out replace columns, the change should cascaded to all partitions +alter table alter_table_cascade replace columns (c1 string) cascade; +describe alter_table_cascade; +describe alter_table_cascade partition (p1='xyz', p2='123'); +describe alter_table_cascade partition (p1='abc', p2='123'); +describe alter_table_cascade partition (p1='__HIVE_DEFAULT_PARTITION__', p2='123'); +select * from alter_table_cascade where p1='xyz'; +select * from alter_table_cascade where p1='abc'; +select * from alter_table_cascade where p1='__HIVE_DEFAULT_PARTITION__'; + +-- Try add columns, the change should cascaded to all partitions +alter table alter_table_cascade add columns (c2 decimal(14,4)) cascade; +describe alter_table_cascade; +describe alter_table_cascade partition (p1='xyz', p2='123'); +describe alter_table_cascade partition (p1='abc', p2='123'); +describe alter_table_cascade partition (p1='__HIVE_DEFAULT_PARTITION__', p2='123'); +select * from alter_table_cascade where p1='xyz'; +select * from alter_table_cascade where p1='abc'; +select * from alter_table_cascade where p1='__HIVE_DEFAULT_PARTITION__'; + +-- + +drop table if exists alter_table_restrict; + +create table alter_table_restrict (c1 string) partitioned by (p1 string, p2 string); +insert overwrite table alter_table_restrict partition (p1, p2) + select c1, 'abc', '123' from alter_table_src + union all + select c1, null, '123' from alter_table_src; + +show partitions alter_table_restrict; +describe alter_table_restrict; +describe alter_table_restrict partition (p1='abc', p2='123'); +describe alter_table_restrict partition (p1='__HIVE_DEFAULT_PARTITION__', p2='123'); +select * from alter_table_restrict where p1='abc'; +select * from alter_table_restrict where p1='__HIVE_DEFAULT_PARTITION__'; + +-- add columns c2 by replace columns (for HIVE-6131) without cascade +-- only table column definition has changed, partitions do not +-- after replace, only new partition xyz return the value to new added columns but not existing partitions abc and __HIVE_DEFAULT_PARTITION__ +alter table alter_table_restrict replace columns (c1 string, c2 string) restrict; +load data local inpath '../../data/files/dec.txt' overwrite into table alter_table_restrict partition (p1='abc', p2='123'); +load data local inpath '../../data/files/dec.txt' overwrite into table alter_table_restrict partition (p1='__HIVE_DEFAULT_PARTITION__',p2='123'); +load data local inpath '../../data/files/dec.txt' overwrite into table alter_table_restrict partition (p1='xyz', p2='123'); +describe alter_table_restrict; +describe alter_table_restrict partition (p1='xyz', p2='123'); +describe alter_table_restrict partition (p1='abc', p2='123'); +describe alter_table_restrict partition (p1='__HIVE_DEFAULT_PARTITION__', p2='123'); +select * from alter_table_restrict where p1='xyz'; +select * from alter_table_restrict where p1='abc'; +select * from alter_table_restrict where p1='__HIVE_DEFAULT_PARTITION__'; + +-- Change c2 to decimal(10,0), only limited to table and new partition +alter table alter_table_restrict change c2 c2 decimal(10,0) restrict; +describe alter_table_restrict; +describe alter_table_restrict partition (p1='xyz', p2='123'); +describe alter_table_restrict partition (p1='abc', p2='123'); +describe alter_table_restrict partition (p1='__HIVE_DEFAULT_PARTITION__', p2='123'); + +-- Try out replace columns, only limited to table and new partition +alter table alter_table_restrict replace columns (c1 string); +describe alter_table_restrict; +describe alter_table_restrict partition (p1='xyz', p2='123'); +describe alter_table_restrict partition (p1='abc', p2='123'); +describe alter_table_restrict partition (p1='__HIVE_DEFAULT_PARTITION__', p2='123'); + +-- Try add columns, only limited to table and new partition +alter table alter_table_restrict add columns (c2 decimal(14,4)); +describe alter_table_restrict; +describe alter_table_restrict partition (p1='xyz', p2='123'); +describe alter_table_restrict partition (p1='abc', p2='123'); +describe alter_table_restrict partition (p1='__HIVE_DEFAULT_PARTITION__', p2='123'); diff --git a/ql/src/test/results/clientpositive/alter_table_cascade.q.out b/ql/src/test/results/clientpositive/alter_table_cascade.q.out new file mode 100644 index 0000000000000000000000000000000000000000..01394665b9d20f8f498cb2a79988243470a8761e --- /dev/null +++ b/ql/src/test/results/clientpositive/alter_table_cascade.q.out @@ -0,0 +1,1387 @@ +PREHOOK: query: -- SORT_QUERY_RESULTS + +drop table if exists alter_table_src +PREHOOK: type: DROPTABLE +POSTHOOK: query: -- SORT_QUERY_RESULTS + +drop table if exists alter_table_src +POSTHOOK: type: DROPTABLE +PREHOOK: query: drop table if exists alter_table_cascade +PREHOOK: type: DROPTABLE +POSTHOOK: query: drop table if exists alter_table_cascade +POSTHOOK: type: DROPTABLE +PREHOOK: query: create table alter_table_src(c1 string, c2 string) +PREHOOK: type: CREATETABLE +PREHOOK: Output: database:default +PREHOOK: Output: default@alter_table_src +POSTHOOK: query: create table alter_table_src(c1 string, c2 string) +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: database:default +POSTHOOK: Output: default@alter_table_src +PREHOOK: query: load data local inpath '../../data/files/dec.txt' overwrite into table alter_table_src +PREHOOK: type: LOAD +#### A masked pattern was here #### +PREHOOK: Output: default@alter_table_src +POSTHOOK: query: load data local inpath '../../data/files/dec.txt' overwrite into table alter_table_src +POSTHOOK: type: LOAD +#### A masked pattern was here #### +POSTHOOK: Output: default@alter_table_src +PREHOOK: query: create table alter_table_cascade (c1 string) partitioned by (p1 string, p2 string) +PREHOOK: type: CREATETABLE +PREHOOK: Output: database:default +PREHOOK: Output: default@alter_table_cascade +POSTHOOK: query: create table alter_table_cascade (c1 string) partitioned by (p1 string, p2 string) +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: database:default +POSTHOOK: Output: default@alter_table_cascade +PREHOOK: query: insert overwrite table alter_table_cascade partition (p1, p2) + select c1, 'abc', '123' from alter_table_src + union all + select c1, null, '123' from alter_table_src +PREHOOK: type: QUERY +PREHOOK: Input: default@alter_table_src +PREHOOK: Output: default@alter_table_cascade +POSTHOOK: query: insert overwrite table alter_table_cascade partition (p1, p2) + select c1, 'abc', '123' from alter_table_src + union all + select c1, null, '123' from alter_table_src +POSTHOOK: type: QUERY +POSTHOOK: Input: default@alter_table_src +POSTHOOK: Output: default@alter_table_cascade@p1=__HIVE_DEFAULT_PARTITION__/p2=123 +POSTHOOK: Output: default@alter_table_cascade@p1=abc/p2=123 +POSTHOOK: Lineage: alter_table_cascade PARTITION(p1=__HIVE_DEFAULT_PARTITION__,p2=123).c1 EXPRESSION [(alter_table_src)alter_table_src.FieldSchema(name:c1, type:string, comment:null), (alter_table_src)alter_table_src.FieldSchema(name:c1, type:string, comment:null), ] +POSTHOOK: Lineage: alter_table_cascade PARTITION(p1=abc,p2=123).c1 EXPRESSION [(alter_table_src)alter_table_src.FieldSchema(name:c1, type:string, comment:null), (alter_table_src)alter_table_src.FieldSchema(name:c1, type:string, comment:null), ] +PREHOOK: query: show partitions alter_table_cascade +PREHOOK: type: SHOWPARTITIONS +PREHOOK: Input: default@alter_table_cascade +POSTHOOK: query: show partitions alter_table_cascade +POSTHOOK: type: SHOWPARTITIONS +POSTHOOK: Input: default@alter_table_cascade +p1=__HIVE_DEFAULT_PARTITION__/p2=123 +p1=abc/p2=123 +PREHOOK: query: describe alter_table_cascade +PREHOOK: type: DESCTABLE +PREHOOK: Input: default@alter_table_cascade +POSTHOOK: query: describe alter_table_cascade +POSTHOOK: type: DESCTABLE +POSTHOOK: Input: default@alter_table_cascade +c1 string +p1 string +p2 string + +# Partition Information +# col_name data_type comment + +p1 string +p2 string +PREHOOK: query: describe alter_table_cascade partition (p1='abc', p2='123') +PREHOOK: type: DESCTABLE +PREHOOK: Input: default@alter_table_cascade +POSTHOOK: query: describe alter_table_cascade partition (p1='abc', p2='123') +POSTHOOK: type: DESCTABLE +POSTHOOK: Input: default@alter_table_cascade +c1 string +p1 string +p2 string + +# Partition Information +# col_name data_type comment + +p1 string +p2 string +PREHOOK: query: describe alter_table_cascade partition (p1='__HIVE_DEFAULT_PARTITION__', p2='123') +PREHOOK: type: DESCTABLE +PREHOOK: Input: default@alter_table_cascade +POSTHOOK: query: describe alter_table_cascade partition (p1='__HIVE_DEFAULT_PARTITION__', p2='123') +POSTHOOK: type: DESCTABLE +POSTHOOK: Input: default@alter_table_cascade +c1 string +p1 string +p2 string + +# Partition Information +# col_name data_type comment + +p1 string +p2 string +PREHOOK: query: select * from alter_table_cascade where p1='abc' +PREHOOK: type: QUERY +PREHOOK: Input: default@alter_table_cascade +PREHOOK: Input: default@alter_table_cascade@p1=abc/p2=123 +#### A masked pattern was here #### +POSTHOOK: query: select * from alter_table_cascade where p1='abc' +POSTHOOK: type: QUERY +POSTHOOK: Input: default@alter_table_cascade +POSTHOOK: Input: default@alter_table_cascade@p1=abc/p2=123 +#### A masked pattern was here #### +Beck abc 123 +Beck abc 123 +Beck abc 123 +Cluck abc 123 +Mary abc 123 +Mary abc 123 +Snow abc 123 +Tom abc 123 +Tom abc 123 +Tom abc 123 +PREHOOK: query: select * from alter_table_cascade where p1='__HIVE_DEFAULT_PARTITION__' +PREHOOK: type: QUERY +PREHOOK: Input: default@alter_table_cascade +PREHOOK: Input: default@alter_table_cascade@p1=__HIVE_DEFAULT_PARTITION__/p2=123 +#### A masked pattern was here #### +POSTHOOK: query: select * from alter_table_cascade where p1='__HIVE_DEFAULT_PARTITION__' +POSTHOOK: type: QUERY +POSTHOOK: Input: default@alter_table_cascade +POSTHOOK: Input: default@alter_table_cascade@p1=__HIVE_DEFAULT_PARTITION__/p2=123 +#### A masked pattern was here #### +Beck __HIVE_DEFAULT_PARTITION__ 123 +Beck __HIVE_DEFAULT_PARTITION__ 123 +Beck __HIVE_DEFAULT_PARTITION__ 123 +Cluck __HIVE_DEFAULT_PARTITION__ 123 +Mary __HIVE_DEFAULT_PARTITION__ 123 +Mary __HIVE_DEFAULT_PARTITION__ 123 +Snow __HIVE_DEFAULT_PARTITION__ 123 +Tom __HIVE_DEFAULT_PARTITION__ 123 +Tom __HIVE_DEFAULT_PARTITION__ 123 +Tom __HIVE_DEFAULT_PARTITION__ 123 +PREHOOK: query: -- add columns c2 by replace columns (for HIVE-6131) +-- reload data to existing partition __HIVE_DEFAULT_PARTITION__ +-- load data to a new partition xyz +-- querying data (form new or existing partition) should return non-NULL values for the new column +alter table alter_table_cascade replace columns (c1 string, c2 string) cascade +PREHOOK: type: ALTERTABLE_REPLACECOLS +PREHOOK: Input: default@alter_table_cascade +PREHOOK: Output: default@alter_table_cascade +PREHOOK: Output: default@alter_table_cascade@p1=__HIVE_DEFAULT_PARTITION__/p2=123 +PREHOOK: Output: default@alter_table_cascade@p1=abc/p2=123 +POSTHOOK: query: -- add columns c2 by replace columns (for HIVE-6131) +-- reload data to existing partition __HIVE_DEFAULT_PARTITION__ +-- load data to a new partition xyz +-- querying data (form new or existing partition) should return non-NULL values for the new column +alter table alter_table_cascade replace columns (c1 string, c2 string) cascade +POSTHOOK: type: ALTERTABLE_REPLACECOLS +POSTHOOK: Input: default@alter_table_cascade +POSTHOOK: Output: default@alter_table_cascade +POSTHOOK: Output: default@alter_table_cascade@p1=__HIVE_DEFAULT_PARTITION__/p2=123 +POSTHOOK: Output: default@alter_table_cascade@p1=abc/p2=123 +PREHOOK: query: load data local inpath '../../data/files/dec.txt' overwrite into table alter_table_cascade partition (p1='__HIVE_DEFAULT_PARTITION__',p2='123') +PREHOOK: type: LOAD +#### A masked pattern was here #### +PREHOOK: Output: default@alter_table_cascade@p1=__HIVE_DEFAULT_PARTITION__/p2=123 +POSTHOOK: query: load data local inpath '../../data/files/dec.txt' overwrite into table alter_table_cascade partition (p1='__HIVE_DEFAULT_PARTITION__',p2='123') +POSTHOOK: type: LOAD +#### A masked pattern was here #### +POSTHOOK: Output: default@alter_table_cascade@p1=__HIVE_DEFAULT_PARTITION__/p2=123 +PREHOOK: query: load data local inpath '../../data/files/dec.txt' overwrite into table alter_table_cascade partition (p1='xyz', p2='123') +PREHOOK: type: LOAD +#### A masked pattern was here #### +PREHOOK: Output: default@alter_table_cascade +POSTHOOK: query: load data local inpath '../../data/files/dec.txt' overwrite into table alter_table_cascade partition (p1='xyz', p2='123') +POSTHOOK: type: LOAD +#### A masked pattern was here #### +POSTHOOK: Output: default@alter_table_cascade +POSTHOOK: Output: default@alter_table_cascade@p1=xyz/p2=123 +PREHOOK: query: describe alter_table_cascade +PREHOOK: type: DESCTABLE +PREHOOK: Input: default@alter_table_cascade +POSTHOOK: query: describe alter_table_cascade +POSTHOOK: type: DESCTABLE +POSTHOOK: Input: default@alter_table_cascade +c1 string +c2 string +p1 string +p2 string + +# Partition Information +# col_name data_type comment + +p1 string +p2 string +PREHOOK: query: describe alter_table_cascade partition (p1='xyz', p2='123') +PREHOOK: type: DESCTABLE +PREHOOK: Input: default@alter_table_cascade +POSTHOOK: query: describe alter_table_cascade partition (p1='xyz', p2='123') +POSTHOOK: type: DESCTABLE +POSTHOOK: Input: default@alter_table_cascade +c1 string +c2 string +p1 string +p2 string + +# Partition Information +# col_name data_type comment + +p1 string +p2 string +PREHOOK: query: describe alter_table_cascade partition (p1='abc', p2='123') +PREHOOK: type: DESCTABLE +PREHOOK: Input: default@alter_table_cascade +POSTHOOK: query: describe alter_table_cascade partition (p1='abc', p2='123') +POSTHOOK: type: DESCTABLE +POSTHOOK: Input: default@alter_table_cascade +c1 string +c2 string +p1 string +p2 string + +# Partition Information +# col_name data_type comment + +p1 string +p2 string +PREHOOK: query: describe alter_table_cascade partition (p1='__HIVE_DEFAULT_PARTITION__', p2='123') +PREHOOK: type: DESCTABLE +PREHOOK: Input: default@alter_table_cascade +POSTHOOK: query: describe alter_table_cascade partition (p1='__HIVE_DEFAULT_PARTITION__', p2='123') +POSTHOOK: type: DESCTABLE +POSTHOOK: Input: default@alter_table_cascade +c1 string +c2 string +p1 string +p2 string + +# Partition Information +# col_name data_type comment + +p1 string +p2 string +PREHOOK: query: select * from alter_table_cascade where p1='xyz' +PREHOOK: type: QUERY +PREHOOK: Input: default@alter_table_cascade +PREHOOK: Input: default@alter_table_cascade@p1=xyz/p2=123 +#### A masked pattern was here #### +POSTHOOK: query: select * from alter_table_cascade where p1='xyz' +POSTHOOK: type: QUERY +POSTHOOK: Input: default@alter_table_cascade +POSTHOOK: Input: default@alter_table_cascade@p1=xyz/p2=123 +#### A masked pattern was here #### +Beck 0.0 xyz 123 +Beck 77.341 xyz 123 +Beck 79.9 xyz 123 +Cluck 5.96 xyz 123 +Mary 33.33 xyz 123 +Mary 4.329 xyz 123 +Snow 55.71 xyz 123 +Tom -12.25 xyz 123 +Tom 19.00 xyz 123 +Tom 234.79 xyz 123 +PREHOOK: query: select * from alter_table_cascade where p1='abc' +PREHOOK: type: QUERY +PREHOOK: Input: default@alter_table_cascade +PREHOOK: Input: default@alter_table_cascade@p1=abc/p2=123 +#### A masked pattern was here #### +POSTHOOK: query: select * from alter_table_cascade where p1='abc' +POSTHOOK: type: QUERY +POSTHOOK: Input: default@alter_table_cascade +POSTHOOK: Input: default@alter_table_cascade@p1=abc/p2=123 +#### A masked pattern was here #### +Beck NULL abc 123 +Beck NULL abc 123 +Beck NULL abc 123 +Cluck NULL abc 123 +Mary NULL abc 123 +Mary NULL abc 123 +Snow NULL abc 123 +Tom NULL abc 123 +Tom NULL abc 123 +Tom NULL abc 123 +PREHOOK: query: select * from alter_table_cascade where p1='__HIVE_DEFAULT_PARTITION__' +PREHOOK: type: QUERY +PREHOOK: Input: default@alter_table_cascade +PREHOOK: Input: default@alter_table_cascade@p1=__HIVE_DEFAULT_PARTITION__/p2=123 +#### A masked pattern was here #### +POSTHOOK: query: select * from alter_table_cascade where p1='__HIVE_DEFAULT_PARTITION__' +POSTHOOK: type: QUERY +POSTHOOK: Input: default@alter_table_cascade +POSTHOOK: Input: default@alter_table_cascade@p1=__HIVE_DEFAULT_PARTITION__/p2=123 +#### A masked pattern was here #### +Beck 0.0 __HIVE_DEFAULT_PARTITION__ 123 +Beck 77.341 __HIVE_DEFAULT_PARTITION__ 123 +Beck 79.9 __HIVE_DEFAULT_PARTITION__ 123 +Cluck 5.96 __HIVE_DEFAULT_PARTITION__ 123 +Mary 33.33 __HIVE_DEFAULT_PARTITION__ 123 +Mary 4.329 __HIVE_DEFAULT_PARTITION__ 123 +Snow 55.71 __HIVE_DEFAULT_PARTITION__ 123 +Tom -12.25 __HIVE_DEFAULT_PARTITION__ 123 +Tom 19.00 __HIVE_DEFAULT_PARTITION__ 123 +Tom 234.79 __HIVE_DEFAULT_PARTITION__ 123 +PREHOOK: query: -- Change c2 to decimal(10,0), the change should cascaded to all partitions +-- the c2 value returned should be in decimal(10,0) +alter table alter_table_cascade change c2 c2 decimal(10,0) comment "change datatype" cascade +PREHOOK: type: ALTERTABLE_RENAMECOL +PREHOOK: Input: default@alter_table_cascade +PREHOOK: Output: default@alter_table_cascade +PREHOOK: Output: default@alter_table_cascade@p1=__HIVE_DEFAULT_PARTITION__/p2=123 +PREHOOK: Output: default@alter_table_cascade@p1=abc/p2=123 +PREHOOK: Output: default@alter_table_cascade@p1=xyz/p2=123 +POSTHOOK: query: -- Change c2 to decimal(10,0), the change should cascaded to all partitions +-- the c2 value returned should be in decimal(10,0) +alter table alter_table_cascade change c2 c2 decimal(10,0) comment "change datatype" cascade +POSTHOOK: type: ALTERTABLE_RENAMECOL +POSTHOOK: Input: default@alter_table_cascade +POSTHOOK: Output: default@alter_table_cascade +POSTHOOK: Output: default@alter_table_cascade@p1=__HIVE_DEFAULT_PARTITION__/p2=123 +POSTHOOK: Output: default@alter_table_cascade@p1=abc/p2=123 +POSTHOOK: Output: default@alter_table_cascade@p1=xyz/p2=123 +PREHOOK: query: describe alter_table_cascade +PREHOOK: type: DESCTABLE +PREHOOK: Input: default@alter_table_cascade +POSTHOOK: query: describe alter_table_cascade +POSTHOOK: type: DESCTABLE +POSTHOOK: Input: default@alter_table_cascade +c1 string +c2 decimal(10,0) change datatype +p1 string +p2 string + +# Partition Information +# col_name data_type comment + +p1 string +p2 string +PREHOOK: query: describe alter_table_cascade partition (p1='xyz', p2='123') +PREHOOK: type: DESCTABLE +PREHOOK: Input: default@alter_table_cascade +POSTHOOK: query: describe alter_table_cascade partition (p1='xyz', p2='123') +POSTHOOK: type: DESCTABLE +POSTHOOK: Input: default@alter_table_cascade +c1 string +c2 decimal(10,0) change datatype +p1 string +p2 string + +# Partition Information +# col_name data_type comment + +p1 string +p2 string +PREHOOK: query: describe alter_table_cascade partition (p1='abc', p2='123') +PREHOOK: type: DESCTABLE +PREHOOK: Input: default@alter_table_cascade +POSTHOOK: query: describe alter_table_cascade partition (p1='abc', p2='123') +POSTHOOK: type: DESCTABLE +POSTHOOK: Input: default@alter_table_cascade +c1 string +c2 decimal(10,0) change datatype +p1 string +p2 string + +# Partition Information +# col_name data_type comment + +p1 string +p2 string +PREHOOK: query: describe alter_table_cascade partition (p1='__HIVE_DEFAULT_PARTITION__', p2='123') +PREHOOK: type: DESCTABLE +PREHOOK: Input: default@alter_table_cascade +POSTHOOK: query: describe alter_table_cascade partition (p1='__HIVE_DEFAULT_PARTITION__', p2='123') +POSTHOOK: type: DESCTABLE +POSTHOOK: Input: default@alter_table_cascade +c1 string +c2 decimal(10,0) change datatype +p1 string +p2 string + +# Partition Information +# col_name data_type comment + +p1 string +p2 string +PREHOOK: query: select * from alter_table_cascade where p1='xyz' +PREHOOK: type: QUERY +PREHOOK: Input: default@alter_table_cascade +PREHOOK: Input: default@alter_table_cascade@p1=xyz/p2=123 +#### A masked pattern was here #### +POSTHOOK: query: select * from alter_table_cascade where p1='xyz' +POSTHOOK: type: QUERY +POSTHOOK: Input: default@alter_table_cascade +POSTHOOK: Input: default@alter_table_cascade@p1=xyz/p2=123 +#### A masked pattern was here #### +Beck 0 xyz 123 +Beck 77 xyz 123 +Beck 80 xyz 123 +Cluck 6 xyz 123 +Mary 33 xyz 123 +Mary 4 xyz 123 +Snow 56 xyz 123 +Tom -12 xyz 123 +Tom 19 xyz 123 +Tom 235 xyz 123 +PREHOOK: query: select * from alter_table_cascade where p1='abc' +PREHOOK: type: QUERY +PREHOOK: Input: default@alter_table_cascade +PREHOOK: Input: default@alter_table_cascade@p1=abc/p2=123 +#### A masked pattern was here #### +POSTHOOK: query: select * from alter_table_cascade where p1='abc' +POSTHOOK: type: QUERY +POSTHOOK: Input: default@alter_table_cascade +POSTHOOK: Input: default@alter_table_cascade@p1=abc/p2=123 +#### A masked pattern was here #### +Beck NULL abc 123 +Beck NULL abc 123 +Beck NULL abc 123 +Cluck NULL abc 123 +Mary NULL abc 123 +Mary NULL abc 123 +Snow NULL abc 123 +Tom NULL abc 123 +Tom NULL abc 123 +Tom NULL abc 123 +PREHOOK: query: select * from alter_table_cascade where p1='__HIVE_DEFAULT_PARTITION__' +PREHOOK: type: QUERY +PREHOOK: Input: default@alter_table_cascade +PREHOOK: Input: default@alter_table_cascade@p1=__HIVE_DEFAULT_PARTITION__/p2=123 +#### A masked pattern was here #### +POSTHOOK: query: select * from alter_table_cascade where p1='__HIVE_DEFAULT_PARTITION__' +POSTHOOK: type: QUERY +POSTHOOK: Input: default@alter_table_cascade +POSTHOOK: Input: default@alter_table_cascade@p1=__HIVE_DEFAULT_PARTITION__/p2=123 +#### A masked pattern was here #### +Beck 0 __HIVE_DEFAULT_PARTITION__ 123 +Beck 77 __HIVE_DEFAULT_PARTITION__ 123 +Beck 80 __HIVE_DEFAULT_PARTITION__ 123 +Cluck 6 __HIVE_DEFAULT_PARTITION__ 123 +Mary 33 __HIVE_DEFAULT_PARTITION__ 123 +Mary 4 __HIVE_DEFAULT_PARTITION__ 123 +Snow 56 __HIVE_DEFAULT_PARTITION__ 123 +Tom -12 __HIVE_DEFAULT_PARTITION__ 123 +Tom 19 __HIVE_DEFAULT_PARTITION__ 123 +Tom 235 __HIVE_DEFAULT_PARTITION__ 123 +PREHOOK: query: -- rename c1 to c2fromc1 and move it to after c2, the change should cascaded to all partitions +alter table alter_table_cascade change c1 c2fromc1 string comment "change position after" after c2 cascade +PREHOOK: type: ALTERTABLE_RENAMECOL +PREHOOK: Input: default@alter_table_cascade +PREHOOK: Output: default@alter_table_cascade +PREHOOK: Output: default@alter_table_cascade@p1=__HIVE_DEFAULT_PARTITION__/p2=123 +PREHOOK: Output: default@alter_table_cascade@p1=abc/p2=123 +PREHOOK: Output: default@alter_table_cascade@p1=xyz/p2=123 +POSTHOOK: query: -- rename c1 to c2fromc1 and move it to after c2, the change should cascaded to all partitions +alter table alter_table_cascade change c1 c2fromc1 string comment "change position after" after c2 cascade +POSTHOOK: type: ALTERTABLE_RENAMECOL +POSTHOOK: Input: default@alter_table_cascade +POSTHOOK: Output: default@alter_table_cascade +POSTHOOK: Output: default@alter_table_cascade@p1=__HIVE_DEFAULT_PARTITION__/p2=123 +POSTHOOK: Output: default@alter_table_cascade@p1=abc/p2=123 +POSTHOOK: Output: default@alter_table_cascade@p1=xyz/p2=123 +PREHOOK: query: describe alter_table_cascade +PREHOOK: type: DESCTABLE +PREHOOK: Input: default@alter_table_cascade +POSTHOOK: query: describe alter_table_cascade +POSTHOOK: type: DESCTABLE +POSTHOOK: Input: default@alter_table_cascade +c2 decimal(10,0) change datatype +c2fromc1 string change position after +p1 string +p2 string + +# Partition Information +# col_name data_type comment + +p1 string +p2 string +PREHOOK: query: describe alter_table_cascade partition (p1='xyz', p2='123') +PREHOOK: type: DESCTABLE +PREHOOK: Input: default@alter_table_cascade +POSTHOOK: query: describe alter_table_cascade partition (p1='xyz', p2='123') +POSTHOOK: type: DESCTABLE +POSTHOOK: Input: default@alter_table_cascade +c2 decimal(10,0) change datatype +c2fromc1 string change position after +p1 string +p2 string + +# Partition Information +# col_name data_type comment + +p1 string +p2 string +PREHOOK: query: describe alter_table_cascade partition (p1='abc', p2='123') +PREHOOK: type: DESCTABLE +PREHOOK: Input: default@alter_table_cascade +POSTHOOK: query: describe alter_table_cascade partition (p1='abc', p2='123') +POSTHOOK: type: DESCTABLE +POSTHOOK: Input: default@alter_table_cascade +c2 decimal(10,0) change datatype +c2fromc1 string change position after +p1 string +p2 string + +# Partition Information +# col_name data_type comment + +p1 string +p2 string +PREHOOK: query: describe alter_table_cascade partition (p1='__HIVE_DEFAULT_PARTITION__', p2='123') +PREHOOK: type: DESCTABLE +PREHOOK: Input: default@alter_table_cascade +POSTHOOK: query: describe alter_table_cascade partition (p1='__HIVE_DEFAULT_PARTITION__', p2='123') +POSTHOOK: type: DESCTABLE +POSTHOOK: Input: default@alter_table_cascade +c2 decimal(10,0) change datatype +c2fromc1 string change position after +p1 string +p2 string + +# Partition Information +# col_name data_type comment + +p1 string +p2 string +PREHOOK: query: -- rename c2fromc1 back to c1 and move to first as c1, the change should cascaded to all partitions +alter table alter_table_cascade change c2fromc1 c1 string comment "change position first" first cascade +PREHOOK: type: ALTERTABLE_RENAMECOL +PREHOOK: Input: default@alter_table_cascade +PREHOOK: Output: default@alter_table_cascade +PREHOOK: Output: default@alter_table_cascade@p1=__HIVE_DEFAULT_PARTITION__/p2=123 +PREHOOK: Output: default@alter_table_cascade@p1=abc/p2=123 +PREHOOK: Output: default@alter_table_cascade@p1=xyz/p2=123 +POSTHOOK: query: -- rename c2fromc1 back to c1 and move to first as c1, the change should cascaded to all partitions +alter table alter_table_cascade change c2fromc1 c1 string comment "change position first" first cascade +POSTHOOK: type: ALTERTABLE_RENAMECOL +POSTHOOK: Input: default@alter_table_cascade +POSTHOOK: Output: default@alter_table_cascade +POSTHOOK: Output: default@alter_table_cascade@p1=__HIVE_DEFAULT_PARTITION__/p2=123 +POSTHOOK: Output: default@alter_table_cascade@p1=abc/p2=123 +POSTHOOK: Output: default@alter_table_cascade@p1=xyz/p2=123 +PREHOOK: query: describe alter_table_cascade +PREHOOK: type: DESCTABLE +PREHOOK: Input: default@alter_table_cascade +POSTHOOK: query: describe alter_table_cascade +POSTHOOK: type: DESCTABLE +POSTHOOK: Input: default@alter_table_cascade +c1 string change position first +c2 decimal(10,0) change datatype +p1 string +p2 string + +# Partition Information +# col_name data_type comment + +p1 string +p2 string +PREHOOK: query: describe alter_table_cascade partition (p1='xyz', p2='123') +PREHOOK: type: DESCTABLE +PREHOOK: Input: default@alter_table_cascade +POSTHOOK: query: describe alter_table_cascade partition (p1='xyz', p2='123') +POSTHOOK: type: DESCTABLE +POSTHOOK: Input: default@alter_table_cascade +c1 string change position first +c2 decimal(10,0) change datatype +p1 string +p2 string + +# Partition Information +# col_name data_type comment + +p1 string +p2 string +PREHOOK: query: describe alter_table_cascade partition (p1='abc', p2='123') +PREHOOK: type: DESCTABLE +PREHOOK: Input: default@alter_table_cascade +POSTHOOK: query: describe alter_table_cascade partition (p1='abc', p2='123') +POSTHOOK: type: DESCTABLE +POSTHOOK: Input: default@alter_table_cascade +c1 string change position first +c2 decimal(10,0) change datatype +p1 string +p2 string + +# Partition Information +# col_name data_type comment + +p1 string +p2 string +PREHOOK: query: describe alter_table_cascade partition (p1='__HIVE_DEFAULT_PARTITION__', p2='123') +PREHOOK: type: DESCTABLE +PREHOOK: Input: default@alter_table_cascade +POSTHOOK: query: describe alter_table_cascade partition (p1='__HIVE_DEFAULT_PARTITION__', p2='123') +POSTHOOK: type: DESCTABLE +POSTHOOK: Input: default@alter_table_cascade +c1 string change position first +c2 decimal(10,0) change datatype +p1 string +p2 string + +# Partition Information +# col_name data_type comment + +p1 string +p2 string +PREHOOK: query: -- Try out replace columns, the change should cascaded to all partitions +alter table alter_table_cascade replace columns (c1 string) cascade +PREHOOK: type: ALTERTABLE_REPLACECOLS +PREHOOK: Input: default@alter_table_cascade +PREHOOK: Output: default@alter_table_cascade +PREHOOK: Output: default@alter_table_cascade@p1=__HIVE_DEFAULT_PARTITION__/p2=123 +PREHOOK: Output: default@alter_table_cascade@p1=abc/p2=123 +PREHOOK: Output: default@alter_table_cascade@p1=xyz/p2=123 +POSTHOOK: query: -- Try out replace columns, the change should cascaded to all partitions +alter table alter_table_cascade replace columns (c1 string) cascade +POSTHOOK: type: ALTERTABLE_REPLACECOLS +POSTHOOK: Input: default@alter_table_cascade +POSTHOOK: Output: default@alter_table_cascade +POSTHOOK: Output: default@alter_table_cascade@p1=__HIVE_DEFAULT_PARTITION__/p2=123 +POSTHOOK: Output: default@alter_table_cascade@p1=abc/p2=123 +POSTHOOK: Output: default@alter_table_cascade@p1=xyz/p2=123 +PREHOOK: query: describe alter_table_cascade +PREHOOK: type: DESCTABLE +PREHOOK: Input: default@alter_table_cascade +POSTHOOK: query: describe alter_table_cascade +POSTHOOK: type: DESCTABLE +POSTHOOK: Input: default@alter_table_cascade +c1 string +p1 string +p2 string + +# Partition Information +# col_name data_type comment + +p1 string +p2 string +PREHOOK: query: describe alter_table_cascade partition (p1='xyz', p2='123') +PREHOOK: type: DESCTABLE +PREHOOK: Input: default@alter_table_cascade +POSTHOOK: query: describe alter_table_cascade partition (p1='xyz', p2='123') +POSTHOOK: type: DESCTABLE +POSTHOOK: Input: default@alter_table_cascade +c1 string +p1 string +p2 string + +# Partition Information +# col_name data_type comment + +p1 string +p2 string +PREHOOK: query: describe alter_table_cascade partition (p1='abc', p2='123') +PREHOOK: type: DESCTABLE +PREHOOK: Input: default@alter_table_cascade +POSTHOOK: query: describe alter_table_cascade partition (p1='abc', p2='123') +POSTHOOK: type: DESCTABLE +POSTHOOK: Input: default@alter_table_cascade +c1 string +p1 string +p2 string + +# Partition Information +# col_name data_type comment + +p1 string +p2 string +PREHOOK: query: describe alter_table_cascade partition (p1='__HIVE_DEFAULT_PARTITION__', p2='123') +PREHOOK: type: DESCTABLE +PREHOOK: Input: default@alter_table_cascade +POSTHOOK: query: describe alter_table_cascade partition (p1='__HIVE_DEFAULT_PARTITION__', p2='123') +POSTHOOK: type: DESCTABLE +POSTHOOK: Input: default@alter_table_cascade +c1 string +p1 string +p2 string + +# Partition Information +# col_name data_type comment + +p1 string +p2 string +PREHOOK: query: select * from alter_table_cascade where p1='xyz' +PREHOOK: type: QUERY +PREHOOK: Input: default@alter_table_cascade +PREHOOK: Input: default@alter_table_cascade@p1=xyz/p2=123 +#### A masked pattern was here #### +POSTHOOK: query: select * from alter_table_cascade where p1='xyz' +POSTHOOK: type: QUERY +POSTHOOK: Input: default@alter_table_cascade +POSTHOOK: Input: default@alter_table_cascade@p1=xyz/p2=123 +#### A masked pattern was here #### +Beck xyz 123 +Beck xyz 123 +Beck xyz 123 +Cluck xyz 123 +Mary xyz 123 +Mary xyz 123 +Snow xyz 123 +Tom xyz 123 +Tom xyz 123 +Tom xyz 123 +PREHOOK: query: select * from alter_table_cascade where p1='abc' +PREHOOK: type: QUERY +PREHOOK: Input: default@alter_table_cascade +PREHOOK: Input: default@alter_table_cascade@p1=abc/p2=123 +#### A masked pattern was here #### +POSTHOOK: query: select * from alter_table_cascade where p1='abc' +POSTHOOK: type: QUERY +POSTHOOK: Input: default@alter_table_cascade +POSTHOOK: Input: default@alter_table_cascade@p1=abc/p2=123 +#### A masked pattern was here #### +Beck abc 123 +Beck abc 123 +Beck abc 123 +Cluck abc 123 +Mary abc 123 +Mary abc 123 +Snow abc 123 +Tom abc 123 +Tom abc 123 +Tom abc 123 +PREHOOK: query: select * from alter_table_cascade where p1='__HIVE_DEFAULT_PARTITION__' +PREHOOK: type: QUERY +PREHOOK: Input: default@alter_table_cascade +PREHOOK: Input: default@alter_table_cascade@p1=__HIVE_DEFAULT_PARTITION__/p2=123 +#### A masked pattern was here #### +POSTHOOK: query: select * from alter_table_cascade where p1='__HIVE_DEFAULT_PARTITION__' +POSTHOOK: type: QUERY +POSTHOOK: Input: default@alter_table_cascade +POSTHOOK: Input: default@alter_table_cascade@p1=__HIVE_DEFAULT_PARTITION__/p2=123 +#### A masked pattern was here #### +Beck __HIVE_DEFAULT_PARTITION__ 123 +Beck __HIVE_DEFAULT_PARTITION__ 123 +Beck __HIVE_DEFAULT_PARTITION__ 123 +Cluck __HIVE_DEFAULT_PARTITION__ 123 +Mary __HIVE_DEFAULT_PARTITION__ 123 +Mary __HIVE_DEFAULT_PARTITION__ 123 +Snow __HIVE_DEFAULT_PARTITION__ 123 +Tom __HIVE_DEFAULT_PARTITION__ 123 +Tom __HIVE_DEFAULT_PARTITION__ 123 +Tom __HIVE_DEFAULT_PARTITION__ 123 +PREHOOK: query: -- Try add columns, the change should cascaded to all partitions +alter table alter_table_cascade add columns (c2 decimal(14,4)) cascade +PREHOOK: type: ALTERTABLE_ADDCOLS +PREHOOK: Input: default@alter_table_cascade +PREHOOK: Output: default@alter_table_cascade +PREHOOK: Output: default@alter_table_cascade@p1=__HIVE_DEFAULT_PARTITION__/p2=123 +PREHOOK: Output: default@alter_table_cascade@p1=abc/p2=123 +PREHOOK: Output: default@alter_table_cascade@p1=xyz/p2=123 +POSTHOOK: query: -- Try add columns, the change should cascaded to all partitions +alter table alter_table_cascade add columns (c2 decimal(14,4)) cascade +POSTHOOK: type: ALTERTABLE_ADDCOLS +POSTHOOK: Input: default@alter_table_cascade +POSTHOOK: Output: default@alter_table_cascade +POSTHOOK: Output: default@alter_table_cascade@p1=__HIVE_DEFAULT_PARTITION__/p2=123 +POSTHOOK: Output: default@alter_table_cascade@p1=abc/p2=123 +POSTHOOK: Output: default@alter_table_cascade@p1=xyz/p2=123 +PREHOOK: query: describe alter_table_cascade +PREHOOK: type: DESCTABLE +PREHOOK: Input: default@alter_table_cascade +POSTHOOK: query: describe alter_table_cascade +POSTHOOK: type: DESCTABLE +POSTHOOK: Input: default@alter_table_cascade +c1 string +c2 decimal(14,4) +p1 string +p2 string + +# Partition Information +# col_name data_type comment + +p1 string +p2 string +PREHOOK: query: describe alter_table_cascade partition (p1='xyz', p2='123') +PREHOOK: type: DESCTABLE +PREHOOK: Input: default@alter_table_cascade +POSTHOOK: query: describe alter_table_cascade partition (p1='xyz', p2='123') +POSTHOOK: type: DESCTABLE +POSTHOOK: Input: default@alter_table_cascade +c1 string +c2 decimal(14,4) +p1 string +p2 string + +# Partition Information +# col_name data_type comment + +p1 string +p2 string +PREHOOK: query: describe alter_table_cascade partition (p1='abc', p2='123') +PREHOOK: type: DESCTABLE +PREHOOK: Input: default@alter_table_cascade +POSTHOOK: query: describe alter_table_cascade partition (p1='abc', p2='123') +POSTHOOK: type: DESCTABLE +POSTHOOK: Input: default@alter_table_cascade +c1 string +c2 decimal(14,4) +p1 string +p2 string + +# Partition Information +# col_name data_type comment + +p1 string +p2 string +PREHOOK: query: describe alter_table_cascade partition (p1='__HIVE_DEFAULT_PARTITION__', p2='123') +PREHOOK: type: DESCTABLE +PREHOOK: Input: default@alter_table_cascade +POSTHOOK: query: describe alter_table_cascade partition (p1='__HIVE_DEFAULT_PARTITION__', p2='123') +POSTHOOK: type: DESCTABLE +POSTHOOK: Input: default@alter_table_cascade +c1 string +c2 decimal(14,4) +p1 string +p2 string + +# Partition Information +# col_name data_type comment + +p1 string +p2 string +PREHOOK: query: select * from alter_table_cascade where p1='xyz' +PREHOOK: type: QUERY +PREHOOK: Input: default@alter_table_cascade +PREHOOK: Input: default@alter_table_cascade@p1=xyz/p2=123 +#### A masked pattern was here #### +POSTHOOK: query: select * from alter_table_cascade where p1='xyz' +POSTHOOK: type: QUERY +POSTHOOK: Input: default@alter_table_cascade +POSTHOOK: Input: default@alter_table_cascade@p1=xyz/p2=123 +#### A masked pattern was here #### +Beck 0 xyz 123 +Beck 77.341 xyz 123 +Beck 79.9 xyz 123 +Cluck 5.96 xyz 123 +Mary 33.33 xyz 123 +Mary 4.329 xyz 123 +Snow 55.71 xyz 123 +Tom -12.25 xyz 123 +Tom 19 xyz 123 +Tom 234.79 xyz 123 +PREHOOK: query: select * from alter_table_cascade where p1='abc' +PREHOOK: type: QUERY +PREHOOK: Input: default@alter_table_cascade +PREHOOK: Input: default@alter_table_cascade@p1=abc/p2=123 +#### A masked pattern was here #### +POSTHOOK: query: select * from alter_table_cascade where p1='abc' +POSTHOOK: type: QUERY +POSTHOOK: Input: default@alter_table_cascade +POSTHOOK: Input: default@alter_table_cascade@p1=abc/p2=123 +#### A masked pattern was here #### +Beck NULL abc 123 +Beck NULL abc 123 +Beck NULL abc 123 +Cluck NULL abc 123 +Mary NULL abc 123 +Mary NULL abc 123 +Snow NULL abc 123 +Tom NULL abc 123 +Tom NULL abc 123 +Tom NULL abc 123 +PREHOOK: query: select * from alter_table_cascade where p1='__HIVE_DEFAULT_PARTITION__' +PREHOOK: type: QUERY +PREHOOK: Input: default@alter_table_cascade +PREHOOK: Input: default@alter_table_cascade@p1=__HIVE_DEFAULT_PARTITION__/p2=123 +#### A masked pattern was here #### +POSTHOOK: query: select * from alter_table_cascade where p1='__HIVE_DEFAULT_PARTITION__' +POSTHOOK: type: QUERY +POSTHOOK: Input: default@alter_table_cascade +POSTHOOK: Input: default@alter_table_cascade@p1=__HIVE_DEFAULT_PARTITION__/p2=123 +#### A masked pattern was here #### +Beck 0 __HIVE_DEFAULT_PARTITION__ 123 +Beck 77.341 __HIVE_DEFAULT_PARTITION__ 123 +Beck 79.9 __HIVE_DEFAULT_PARTITION__ 123 +Cluck 5.96 __HIVE_DEFAULT_PARTITION__ 123 +Mary 33.33 __HIVE_DEFAULT_PARTITION__ 123 +Mary 4.329 __HIVE_DEFAULT_PARTITION__ 123 +Snow 55.71 __HIVE_DEFAULT_PARTITION__ 123 +Tom -12.25 __HIVE_DEFAULT_PARTITION__ 123 +Tom 19 __HIVE_DEFAULT_PARTITION__ 123 +Tom 234.79 __HIVE_DEFAULT_PARTITION__ 123 +PREHOOK: query: -- + +drop table if exists alter_table_restrict +PREHOOK: type: DROPTABLE +POSTHOOK: query: -- + +drop table if exists alter_table_restrict +POSTHOOK: type: DROPTABLE +PREHOOK: query: create table alter_table_restrict (c1 string) partitioned by (p1 string, p2 string) +PREHOOK: type: CREATETABLE +PREHOOK: Output: database:default +PREHOOK: Output: default@alter_table_restrict +POSTHOOK: query: create table alter_table_restrict (c1 string) partitioned by (p1 string, p2 string) +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: database:default +POSTHOOK: Output: default@alter_table_restrict +PREHOOK: query: insert overwrite table alter_table_restrict partition (p1, p2) + select c1, 'abc', '123' from alter_table_src + union all + select c1, null, '123' from alter_table_src +PREHOOK: type: QUERY +PREHOOK: Input: default@alter_table_src +PREHOOK: Output: default@alter_table_restrict +POSTHOOK: query: insert overwrite table alter_table_restrict partition (p1, p2) + select c1, 'abc', '123' from alter_table_src + union all + select c1, null, '123' from alter_table_src +POSTHOOK: type: QUERY +POSTHOOK: Input: default@alter_table_src +POSTHOOK: Output: default@alter_table_restrict@p1=__HIVE_DEFAULT_PARTITION__/p2=123 +POSTHOOK: Output: default@alter_table_restrict@p1=abc/p2=123 +POSTHOOK: Lineage: alter_table_restrict PARTITION(p1=__HIVE_DEFAULT_PARTITION__,p2=123).c1 EXPRESSION [(alter_table_src)alter_table_src.FieldSchema(name:c1, type:string, comment:null), (alter_table_src)alter_table_src.FieldSchema(name:c1, type:string, comment:null), ] +POSTHOOK: Lineage: alter_table_restrict PARTITION(p1=abc,p2=123).c1 EXPRESSION [(alter_table_src)alter_table_src.FieldSchema(name:c1, type:string, comment:null), (alter_table_src)alter_table_src.FieldSchema(name:c1, type:string, comment:null), ] +PREHOOK: query: show partitions alter_table_restrict +PREHOOK: type: SHOWPARTITIONS +PREHOOK: Input: default@alter_table_restrict +POSTHOOK: query: show partitions alter_table_restrict +POSTHOOK: type: SHOWPARTITIONS +POSTHOOK: Input: default@alter_table_restrict +p1=__HIVE_DEFAULT_PARTITION__/p2=123 +p1=abc/p2=123 +PREHOOK: query: describe alter_table_restrict +PREHOOK: type: DESCTABLE +PREHOOK: Input: default@alter_table_restrict +POSTHOOK: query: describe alter_table_restrict +POSTHOOK: type: DESCTABLE +POSTHOOK: Input: default@alter_table_restrict +c1 string +p1 string +p2 string + +# Partition Information +# col_name data_type comment + +p1 string +p2 string +PREHOOK: query: describe alter_table_restrict partition (p1='abc', p2='123') +PREHOOK: type: DESCTABLE +PREHOOK: Input: default@alter_table_restrict +POSTHOOK: query: describe alter_table_restrict partition (p1='abc', p2='123') +POSTHOOK: type: DESCTABLE +POSTHOOK: Input: default@alter_table_restrict +c1 string +p1 string +p2 string + +# Partition Information +# col_name data_type comment + +p1 string +p2 string +PREHOOK: query: describe alter_table_restrict partition (p1='__HIVE_DEFAULT_PARTITION__', p2='123') +PREHOOK: type: DESCTABLE +PREHOOK: Input: default@alter_table_restrict +POSTHOOK: query: describe alter_table_restrict partition (p1='__HIVE_DEFAULT_PARTITION__', p2='123') +POSTHOOK: type: DESCTABLE +POSTHOOK: Input: default@alter_table_restrict +c1 string +p1 string +p2 string + +# Partition Information +# col_name data_type comment + +p1 string +p2 string +PREHOOK: query: select * from alter_table_restrict where p1='abc' +PREHOOK: type: QUERY +PREHOOK: Input: default@alter_table_restrict +PREHOOK: Input: default@alter_table_restrict@p1=abc/p2=123 +#### A masked pattern was here #### +POSTHOOK: query: select * from alter_table_restrict where p1='abc' +POSTHOOK: type: QUERY +POSTHOOK: Input: default@alter_table_restrict +POSTHOOK: Input: default@alter_table_restrict@p1=abc/p2=123 +#### A masked pattern was here #### +Beck abc 123 +Beck abc 123 +Beck abc 123 +Cluck abc 123 +Mary abc 123 +Mary abc 123 +Snow abc 123 +Tom abc 123 +Tom abc 123 +Tom abc 123 +PREHOOK: query: select * from alter_table_restrict where p1='__HIVE_DEFAULT_PARTITION__' +PREHOOK: type: QUERY +PREHOOK: Input: default@alter_table_restrict +PREHOOK: Input: default@alter_table_restrict@p1=__HIVE_DEFAULT_PARTITION__/p2=123 +#### A masked pattern was here #### +POSTHOOK: query: select * from alter_table_restrict where p1='__HIVE_DEFAULT_PARTITION__' +POSTHOOK: type: QUERY +POSTHOOK: Input: default@alter_table_restrict +POSTHOOK: Input: default@alter_table_restrict@p1=__HIVE_DEFAULT_PARTITION__/p2=123 +#### A masked pattern was here #### +Beck __HIVE_DEFAULT_PARTITION__ 123 +Beck __HIVE_DEFAULT_PARTITION__ 123 +Beck __HIVE_DEFAULT_PARTITION__ 123 +Cluck __HIVE_DEFAULT_PARTITION__ 123 +Mary __HIVE_DEFAULT_PARTITION__ 123 +Mary __HIVE_DEFAULT_PARTITION__ 123 +Snow __HIVE_DEFAULT_PARTITION__ 123 +Tom __HIVE_DEFAULT_PARTITION__ 123 +Tom __HIVE_DEFAULT_PARTITION__ 123 +Tom __HIVE_DEFAULT_PARTITION__ 123 +PREHOOK: query: -- add columns c2 by replace columns (for HIVE-6131) without cascade +-- only table column definition has changed, partitions do not +-- after replace, only new partition xyz return the value to new added columns but not existing partitions abc and __HIVE_DEFAULT_PARTITION__ +alter table alter_table_restrict replace columns (c1 string, c2 string) restrict +PREHOOK: type: ALTERTABLE_REPLACECOLS +PREHOOK: Input: default@alter_table_restrict +PREHOOK: Output: default@alter_table_restrict +POSTHOOK: query: -- add columns c2 by replace columns (for HIVE-6131) without cascade +-- only table column definition has changed, partitions do not +-- after replace, only new partition xyz return the value to new added columns but not existing partitions abc and __HIVE_DEFAULT_PARTITION__ +alter table alter_table_restrict replace columns (c1 string, c2 string) restrict +POSTHOOK: type: ALTERTABLE_REPLACECOLS +POSTHOOK: Input: default@alter_table_restrict +POSTHOOK: Output: default@alter_table_restrict +PREHOOK: query: load data local inpath '../../data/files/dec.txt' overwrite into table alter_table_restrict partition (p1='abc', p2='123') +PREHOOK: type: LOAD +#### A masked pattern was here #### +PREHOOK: Output: default@alter_table_restrict@p1=abc/p2=123 +POSTHOOK: query: load data local inpath '../../data/files/dec.txt' overwrite into table alter_table_restrict partition (p1='abc', p2='123') +POSTHOOK: type: LOAD +#### A masked pattern was here #### +POSTHOOK: Output: default@alter_table_restrict@p1=abc/p2=123 +PREHOOK: query: load data local inpath '../../data/files/dec.txt' overwrite into table alter_table_restrict partition (p1='__HIVE_DEFAULT_PARTITION__',p2='123') +PREHOOK: type: LOAD +#### A masked pattern was here #### +PREHOOK: Output: default@alter_table_restrict@p1=__HIVE_DEFAULT_PARTITION__/p2=123 +POSTHOOK: query: load data local inpath '../../data/files/dec.txt' overwrite into table alter_table_restrict partition (p1='__HIVE_DEFAULT_PARTITION__',p2='123') +POSTHOOK: type: LOAD +#### A masked pattern was here #### +POSTHOOK: Output: default@alter_table_restrict@p1=__HIVE_DEFAULT_PARTITION__/p2=123 +PREHOOK: query: load data local inpath '../../data/files/dec.txt' overwrite into table alter_table_restrict partition (p1='xyz', p2='123') +PREHOOK: type: LOAD +#### A masked pattern was here #### +PREHOOK: Output: default@alter_table_restrict +POSTHOOK: query: load data local inpath '../../data/files/dec.txt' overwrite into table alter_table_restrict partition (p1='xyz', p2='123') +POSTHOOK: type: LOAD +#### A masked pattern was here #### +POSTHOOK: Output: default@alter_table_restrict +POSTHOOK: Output: default@alter_table_restrict@p1=xyz/p2=123 +PREHOOK: query: describe alter_table_restrict +PREHOOK: type: DESCTABLE +PREHOOK: Input: default@alter_table_restrict +POSTHOOK: query: describe alter_table_restrict +POSTHOOK: type: DESCTABLE +POSTHOOK: Input: default@alter_table_restrict +c1 string +c2 string +p1 string +p2 string + +# Partition Information +# col_name data_type comment + +p1 string +p2 string +PREHOOK: query: describe alter_table_restrict partition (p1='xyz', p2='123') +PREHOOK: type: DESCTABLE +PREHOOK: Input: default@alter_table_restrict +POSTHOOK: query: describe alter_table_restrict partition (p1='xyz', p2='123') +POSTHOOK: type: DESCTABLE +POSTHOOK: Input: default@alter_table_restrict +c1 string +c2 string +p1 string +p2 string + +# Partition Information +# col_name data_type comment + +p1 string +p2 string +PREHOOK: query: describe alter_table_restrict partition (p1='abc', p2='123') +PREHOOK: type: DESCTABLE +PREHOOK: Input: default@alter_table_restrict +POSTHOOK: query: describe alter_table_restrict partition (p1='abc', p2='123') +POSTHOOK: type: DESCTABLE +POSTHOOK: Input: default@alter_table_restrict +c1 string +p1 string +p2 string + +# Partition Information +# col_name data_type comment + +p1 string +p2 string +PREHOOK: query: describe alter_table_restrict partition (p1='__HIVE_DEFAULT_PARTITION__', p2='123') +PREHOOK: type: DESCTABLE +PREHOOK: Input: default@alter_table_restrict +POSTHOOK: query: describe alter_table_restrict partition (p1='__HIVE_DEFAULT_PARTITION__', p2='123') +POSTHOOK: type: DESCTABLE +POSTHOOK: Input: default@alter_table_restrict +c1 string +p1 string +p2 string + +# Partition Information +# col_name data_type comment + +p1 string +p2 string +PREHOOK: query: select * from alter_table_restrict where p1='xyz' +PREHOOK: type: QUERY +PREHOOK: Input: default@alter_table_restrict +PREHOOK: Input: default@alter_table_restrict@p1=xyz/p2=123 +#### A masked pattern was here #### +POSTHOOK: query: select * from alter_table_restrict where p1='xyz' +POSTHOOK: type: QUERY +POSTHOOK: Input: default@alter_table_restrict +POSTHOOK: Input: default@alter_table_restrict@p1=xyz/p2=123 +#### A masked pattern was here #### +Beck 0.0 xyz 123 +Beck 77.341 xyz 123 +Beck 79.9 xyz 123 +Cluck 5.96 xyz 123 +Mary 33.33 xyz 123 +Mary 4.329 xyz 123 +Snow 55.71 xyz 123 +Tom -12.25 xyz 123 +Tom 19.00 xyz 123 +Tom 234.79 xyz 123 +PREHOOK: query: select * from alter_table_restrict where p1='abc' +PREHOOK: type: QUERY +PREHOOK: Input: default@alter_table_restrict +PREHOOK: Input: default@alter_table_restrict@p1=abc/p2=123 +#### A masked pattern was here #### +POSTHOOK: query: select * from alter_table_restrict where p1='abc' +POSTHOOK: type: QUERY +POSTHOOK: Input: default@alter_table_restrict +POSTHOOK: Input: default@alter_table_restrict@p1=abc/p2=123 +#### A masked pattern was here #### +Beck NULL abc 123 +Beck NULL abc 123 +Beck NULL abc 123 +Cluck NULL abc 123 +Mary NULL abc 123 +Mary NULL abc 123 +Snow NULL abc 123 +Tom NULL abc 123 +Tom NULL abc 123 +Tom NULL abc 123 +PREHOOK: query: select * from alter_table_restrict where p1='__HIVE_DEFAULT_PARTITION__' +PREHOOK: type: QUERY +PREHOOK: Input: default@alter_table_restrict +PREHOOK: Input: default@alter_table_restrict@p1=__HIVE_DEFAULT_PARTITION__/p2=123 +#### A masked pattern was here #### +POSTHOOK: query: select * from alter_table_restrict where p1='__HIVE_DEFAULT_PARTITION__' +POSTHOOK: type: QUERY +POSTHOOK: Input: default@alter_table_restrict +POSTHOOK: Input: default@alter_table_restrict@p1=__HIVE_DEFAULT_PARTITION__/p2=123 +#### A masked pattern was here #### +Beck NULL __HIVE_DEFAULT_PARTITION__ 123 +Beck NULL __HIVE_DEFAULT_PARTITION__ 123 +Beck NULL __HIVE_DEFAULT_PARTITION__ 123 +Cluck NULL __HIVE_DEFAULT_PARTITION__ 123 +Mary NULL __HIVE_DEFAULT_PARTITION__ 123 +Mary NULL __HIVE_DEFAULT_PARTITION__ 123 +Snow NULL __HIVE_DEFAULT_PARTITION__ 123 +Tom NULL __HIVE_DEFAULT_PARTITION__ 123 +Tom NULL __HIVE_DEFAULT_PARTITION__ 123 +Tom NULL __HIVE_DEFAULT_PARTITION__ 123 +PREHOOK: query: -- Change c2 to decimal(10,0), only limited to table and new partition +alter table alter_table_restrict change c2 c2 decimal(10,0) restrict +PREHOOK: type: ALTERTABLE_RENAMECOL +PREHOOK: Input: default@alter_table_restrict +PREHOOK: Output: default@alter_table_restrict +POSTHOOK: query: -- Change c2 to decimal(10,0), only limited to table and new partition +alter table alter_table_restrict change c2 c2 decimal(10,0) restrict +POSTHOOK: type: ALTERTABLE_RENAMECOL +POSTHOOK: Input: default@alter_table_restrict +POSTHOOK: Output: default@alter_table_restrict +PREHOOK: query: describe alter_table_restrict +PREHOOK: type: DESCTABLE +PREHOOK: Input: default@alter_table_restrict +POSTHOOK: query: describe alter_table_restrict +POSTHOOK: type: DESCTABLE +POSTHOOK: Input: default@alter_table_restrict +c1 string +c2 decimal(10,0) +p1 string +p2 string + +# Partition Information +# col_name data_type comment + +p1 string +p2 string +PREHOOK: query: describe alter_table_restrict partition (p1='xyz', p2='123') +PREHOOK: type: DESCTABLE +PREHOOK: Input: default@alter_table_restrict +POSTHOOK: query: describe alter_table_restrict partition (p1='xyz', p2='123') +POSTHOOK: type: DESCTABLE +POSTHOOK: Input: default@alter_table_restrict +c1 string +c2 string +p1 string +p2 string + +# Partition Information +# col_name data_type comment + +p1 string +p2 string +PREHOOK: query: describe alter_table_restrict partition (p1='abc', p2='123') +PREHOOK: type: DESCTABLE +PREHOOK: Input: default@alter_table_restrict +POSTHOOK: query: describe alter_table_restrict partition (p1='abc', p2='123') +POSTHOOK: type: DESCTABLE +POSTHOOK: Input: default@alter_table_restrict +c1 string +p1 string +p2 string + +# Partition Information +# col_name data_type comment + +p1 string +p2 string +PREHOOK: query: describe alter_table_restrict partition (p1='__HIVE_DEFAULT_PARTITION__', p2='123') +PREHOOK: type: DESCTABLE +PREHOOK: Input: default@alter_table_restrict +POSTHOOK: query: describe alter_table_restrict partition (p1='__HIVE_DEFAULT_PARTITION__', p2='123') +POSTHOOK: type: DESCTABLE +POSTHOOK: Input: default@alter_table_restrict +c1 string +p1 string +p2 string + +# Partition Information +# col_name data_type comment + +p1 string +p2 string +PREHOOK: query: -- Try out replace columns, only limited to table and new partition +alter table alter_table_restrict replace columns (c1 string) +PREHOOK: type: ALTERTABLE_REPLACECOLS +PREHOOK: Input: default@alter_table_restrict +PREHOOK: Output: default@alter_table_restrict +POSTHOOK: query: -- Try out replace columns, only limited to table and new partition +alter table alter_table_restrict replace columns (c1 string) +POSTHOOK: type: ALTERTABLE_REPLACECOLS +POSTHOOK: Input: default@alter_table_restrict +POSTHOOK: Output: default@alter_table_restrict +PREHOOK: query: describe alter_table_restrict +PREHOOK: type: DESCTABLE +PREHOOK: Input: default@alter_table_restrict +POSTHOOK: query: describe alter_table_restrict +POSTHOOK: type: DESCTABLE +POSTHOOK: Input: default@alter_table_restrict +c1 string +p1 string +p2 string + +# Partition Information +# col_name data_type comment + +p1 string +p2 string +PREHOOK: query: describe alter_table_restrict partition (p1='xyz', p2='123') +PREHOOK: type: DESCTABLE +PREHOOK: Input: default@alter_table_restrict +POSTHOOK: query: describe alter_table_restrict partition (p1='xyz', p2='123') +POSTHOOK: type: DESCTABLE +POSTHOOK: Input: default@alter_table_restrict +c1 string +c2 string +p1 string +p2 string + +# Partition Information +# col_name data_type comment + +p1 string +p2 string +PREHOOK: query: describe alter_table_restrict partition (p1='abc', p2='123') +PREHOOK: type: DESCTABLE +PREHOOK: Input: default@alter_table_restrict +POSTHOOK: query: describe alter_table_restrict partition (p1='abc', p2='123') +POSTHOOK: type: DESCTABLE +POSTHOOK: Input: default@alter_table_restrict +c1 string +p1 string +p2 string + +# Partition Information +# col_name data_type comment + +p1 string +p2 string +PREHOOK: query: describe alter_table_restrict partition (p1='__HIVE_DEFAULT_PARTITION__', p2='123') +PREHOOK: type: DESCTABLE +PREHOOK: Input: default@alter_table_restrict +POSTHOOK: query: describe alter_table_restrict partition (p1='__HIVE_DEFAULT_PARTITION__', p2='123') +POSTHOOK: type: DESCTABLE +POSTHOOK: Input: default@alter_table_restrict +c1 string +p1 string +p2 string + +# Partition Information +# col_name data_type comment + +p1 string +p2 string +PREHOOK: query: -- Try add columns, only limited to table and new partition +alter table alter_table_restrict add columns (c2 decimal(14,4)) +PREHOOK: type: ALTERTABLE_ADDCOLS +PREHOOK: Input: default@alter_table_restrict +PREHOOK: Output: default@alter_table_restrict +POSTHOOK: query: -- Try add columns, only limited to table and new partition +alter table alter_table_restrict add columns (c2 decimal(14,4)) +POSTHOOK: type: ALTERTABLE_ADDCOLS +POSTHOOK: Input: default@alter_table_restrict +POSTHOOK: Output: default@alter_table_restrict +PREHOOK: query: describe alter_table_restrict +PREHOOK: type: DESCTABLE +PREHOOK: Input: default@alter_table_restrict +POSTHOOK: query: describe alter_table_restrict +POSTHOOK: type: DESCTABLE +POSTHOOK: Input: default@alter_table_restrict +c1 string +c2 decimal(14,4) +p1 string +p2 string + +# Partition Information +# col_name data_type comment + +p1 string +p2 string +PREHOOK: query: describe alter_table_restrict partition (p1='xyz', p2='123') +PREHOOK: type: DESCTABLE +PREHOOK: Input: default@alter_table_restrict +POSTHOOK: query: describe alter_table_restrict partition (p1='xyz', p2='123') +POSTHOOK: type: DESCTABLE +POSTHOOK: Input: default@alter_table_restrict +c1 string +c2 string +p1 string +p2 string + +# Partition Information +# col_name data_type comment + +p1 string +p2 string +PREHOOK: query: describe alter_table_restrict partition (p1='abc', p2='123') +PREHOOK: type: DESCTABLE +PREHOOK: Input: default@alter_table_restrict +POSTHOOK: query: describe alter_table_restrict partition (p1='abc', p2='123') +POSTHOOK: type: DESCTABLE +POSTHOOK: Input: default@alter_table_restrict +c1 string +p1 string +p2 string + +# Partition Information +# col_name data_type comment + +p1 string +p2 string +PREHOOK: query: describe alter_table_restrict partition (p1='__HIVE_DEFAULT_PARTITION__', p2='123') +PREHOOK: type: DESCTABLE +PREHOOK: Input: default@alter_table_restrict +POSTHOOK: query: describe alter_table_restrict partition (p1='__HIVE_DEFAULT_PARTITION__', p2='123') +POSTHOOK: type: DESCTABLE +POSTHOOK: Input: default@alter_table_restrict +c1 string +p1 string +p2 string + +# Partition Information +# col_name data_type comment + +p1 string +p2 string