diff --git .gitignore .gitignore index 376cdc6..e54804f 100644 --- .gitignore +++ .gitignore @@ -6,3 +6,4 @@ build-eclipse .settings *.launch *~ +metastore_db diff --git build-common.xml build-common.xml index 00aa470..050979b 100644 --- build-common.xml +++ build-common.xml @@ -434,7 +434,7 @@ + excludes="**/TestSerDe.class,**/TestHiveMetaStore.class,**/*$*.class" /> diff --git eclipse-templates/TestCliDriver.launchtemplate eclipse-templates/TestCliDriver.launchtemplate index c304161..5d14f78 100644 --- eclipse-templates/TestCliDriver.launchtemplate +++ eclipse-templates/TestCliDriver.launchtemplate @@ -21,6 +21,6 @@ - + diff --git eclipse-templates/TestEmbeddedHiveMetaStore.launchtemplate eclipse-templates/TestEmbeddedHiveMetaStore.launchtemplate new file mode 100644 index 0000000..c4d8e9a --- /dev/null +++ eclipse-templates/TestEmbeddedHiveMetaStore.launchtemplate @@ -0,0 +1,26 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git eclipse-templates/TestHive.launchtemplate eclipse-templates/TestHive.launchtemplate index 24efc12..e2f46db 100644 --- eclipse-templates/TestHive.launchtemplate +++ eclipse-templates/TestHive.launchtemplate @@ -21,6 +21,6 @@ - + diff --git eclipse-templates/TestHiveMetaStoreChecker.launchtemplate eclipse-templates/TestHiveMetaStoreChecker.launchtemplate new file mode 100644 index 0000000..78c022c --- /dev/null +++ eclipse-templates/TestHiveMetaStoreChecker.launchtemplate @@ -0,0 +1,26 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git eclipse-templates/TestRemoteHiveMetaStore.launchtemplate eclipse-templates/TestRemoteHiveMetaStore.launchtemplate new file mode 100644 index 0000000..3600e5c --- /dev/null +++ eclipse-templates/TestRemoteHiveMetaStore.launchtemplate @@ -0,0 +1,26 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git metastore/if/hive_metastore.thrift metastore/if/hive_metastore.thrift index 478d0af..30e25d6 100755 --- metastore/if/hive_metastore.thrift +++ metastore/if/hive_metastore.thrift @@ -33,6 +33,7 @@ struct Type { struct Database { 1: string name, 2: string description, + 3: string locationUri, } // This object holds the information needed by SerDes @@ -150,16 +151,16 @@ exception ConfigValSecurityException { */ service ThriftHiveMetastore extends fb303.FacebookService { - bool create_database(1:string name, 2:string description) - throws(1:AlreadyExistsException o1, 2:MetaException o2) + void create_database(1:Database database) throws(1:AlreadyExistsException o1, 2:InvalidObjectException o2, 3:MetaException o3) Database get_database(1:string name) throws(1:NoSuchObjectException o1, 2:MetaException o2) - bool drop_database(1:string name) throws(2:MetaException o2) - list get_databases() throws(1:MetaException o1) + void drop_database(1:string name, 2:bool deleteData) throws(1:NoSuchObjectException o1, 2:InvalidOperationException o2, 3:MetaException o3) + list get_databases(1:string pattern) throws(1:MetaException o1) + list get_all_databases() throws(1:MetaException o1) // returns the type with given name (make seperate calls for the dependent types if needed) - Type get_type(1:string name) throws(1:MetaException o2) + Type get_type(1:string name) throws(1:MetaException o1, 2:NoSuchObjectException o2) bool create_type(1:Type type) throws(1:AlreadyExistsException o1, 2:InvalidObjectException o2, 3:MetaException o3) - bool drop_type(1:string type) throws(1:MetaException o2) + bool drop_type(1:string type) throws(1:MetaException o1, 2:NoSuchObjectException o2) map get_type_all(1:string name) throws(1:MetaException o2) @@ -182,8 +183,8 @@ service ThriftHiveMetastore extends fb303.FacebookService // delete data (including partitions) if deleteData is set to true void drop_table(1:string dbname, 2:string name, 3:bool deleteData) throws(1:NoSuchObjectException o1, 2:MetaException o3) - list get_tables(1: string db_name, 2: string pattern) - throws (1: MetaException o1) + list get_tables(1: string db_name, 2: string pattern) throws (1: MetaException o1) + list get_all_tables(1: string db_name) throws (1: MetaException o1) Table get_table(1:string dbname, 2:string tbl_name) throws (1:MetaException o1, 2:NoSuchObjectException o2) diff --git metastore/src/gen-cpp/ThriftHiveMetastore.cpp metastore/src/gen-cpp/ThriftHiveMetastore.cpp index f945a3a..61fdb22 100644 --- metastore/src/gen-cpp/ThriftHiveMetastore.cpp +++ metastore/src/gen-cpp/ThriftHiveMetastore.cpp @@ -28,17 +28,9 @@ uint32_t ThriftHiveMetastore_create_database_args::read(apache::thrift::protocol switch (fid) { case 1: - if (ftype == apache::thrift::protocol::T_STRING) { - xfer += iprot->readString(this->name); - this->__isset.name = true; - } else { - xfer += iprot->skip(ftype); - } - break; - case 2: - if (ftype == apache::thrift::protocol::T_STRING) { - xfer += iprot->readString(this->description); - this->__isset.description = true; + if (ftype == apache::thrift::protocol::T_STRUCT) { + xfer += this->database.read(iprot); + this->__isset.database = true; } else { xfer += iprot->skip(ftype); } @@ -58,11 +50,8 @@ uint32_t ThriftHiveMetastore_create_database_args::read(apache::thrift::protocol uint32_t ThriftHiveMetastore_create_database_args::write(apache::thrift::protocol::TProtocol* oprot) const { uint32_t xfer = 0; xfer += oprot->writeStructBegin("ThriftHiveMetastore_create_database_args"); - xfer += oprot->writeFieldBegin("name", apache::thrift::protocol::T_STRING, 1); - xfer += oprot->writeString(this->name); - xfer += oprot->writeFieldEnd(); - xfer += oprot->writeFieldBegin("description", apache::thrift::protocol::T_STRING, 2); - xfer += oprot->writeString(this->description); + xfer += oprot->writeFieldBegin("database", apache::thrift::protocol::T_STRUCT, 1); + xfer += this->database.write(oprot); xfer += oprot->writeFieldEnd(); xfer += oprot->writeFieldStop(); xfer += oprot->writeStructEnd(); @@ -72,11 +61,8 @@ uint32_t ThriftHiveMetastore_create_database_args::write(apache::thrift::protoco uint32_t ThriftHiveMetastore_create_database_pargs::write(apache::thrift::protocol::TProtocol* oprot) const { uint32_t xfer = 0; xfer += oprot->writeStructBegin("ThriftHiveMetastore_create_database_pargs"); - xfer += oprot->writeFieldBegin("name", apache::thrift::protocol::T_STRING, 1); - xfer += oprot->writeString((*(this->name))); - xfer += oprot->writeFieldEnd(); - xfer += oprot->writeFieldBegin("description", apache::thrift::protocol::T_STRING, 2); - xfer += oprot->writeString((*(this->description))); + xfer += oprot->writeFieldBegin("database", apache::thrift::protocol::T_STRUCT, 1); + xfer += (*(this->database)).write(oprot); xfer += oprot->writeFieldEnd(); xfer += oprot->writeFieldStop(); xfer += oprot->writeStructEnd(); @@ -103,14 +89,6 @@ uint32_t ThriftHiveMetastore_create_database_result::read(apache::thrift::protoc } switch (fid) { - case 0: - if (ftype == apache::thrift::protocol::T_BOOL) { - xfer += iprot->readBool(this->success); - this->__isset.success = true; - } else { - xfer += iprot->skip(ftype); - } - break; case 1: if (ftype == apache::thrift::protocol::T_STRUCT) { xfer += this->o1.read(iprot); @@ -127,6 +105,14 @@ uint32_t ThriftHiveMetastore_create_database_result::read(apache::thrift::protoc xfer += iprot->skip(ftype); } break; + case 3: + if (ftype == apache::thrift::protocol::T_STRUCT) { + xfer += this->o3.read(iprot); + this->__isset.o3 = true; + } else { + xfer += iprot->skip(ftype); + } + break; default: xfer += iprot->skip(ftype); break; @@ -145,11 +131,7 @@ uint32_t ThriftHiveMetastore_create_database_result::write(apache::thrift::proto xfer += oprot->writeStructBegin("ThriftHiveMetastore_create_database_result"); - if (this->__isset.success) { - xfer += oprot->writeFieldBegin("success", apache::thrift::protocol::T_BOOL, 0); - xfer += oprot->writeBool(this->success); - xfer += oprot->writeFieldEnd(); - } else if (this->__isset.o1) { + if (this->__isset.o1) { xfer += oprot->writeFieldBegin("o1", apache::thrift::protocol::T_STRUCT, 1); xfer += this->o1.write(oprot); xfer += oprot->writeFieldEnd(); @@ -157,6 +139,10 @@ uint32_t ThriftHiveMetastore_create_database_result::write(apache::thrift::proto xfer += oprot->writeFieldBegin("o2", apache::thrift::protocol::T_STRUCT, 2); xfer += this->o2.write(oprot); xfer += oprot->writeFieldEnd(); + } else if (this->__isset.o3) { + xfer += oprot->writeFieldBegin("o3", apache::thrift::protocol::T_STRUCT, 3); + xfer += this->o3.write(oprot); + xfer += oprot->writeFieldEnd(); } xfer += oprot->writeFieldStop(); xfer += oprot->writeStructEnd(); @@ -183,14 +169,6 @@ uint32_t ThriftHiveMetastore_create_database_presult::read(apache::thrift::proto } switch (fid) { - case 0: - if (ftype == apache::thrift::protocol::T_BOOL) { - xfer += iprot->readBool((*(this->success))); - this->__isset.success = true; - } else { - xfer += iprot->skip(ftype); - } - break; case 1: if (ftype == apache::thrift::protocol::T_STRUCT) { xfer += this->o1.read(iprot); @@ -207,6 +185,14 @@ uint32_t ThriftHiveMetastore_create_database_presult::read(apache::thrift::proto xfer += iprot->skip(ftype); } break; + case 3: + if (ftype == apache::thrift::protocol::T_STRUCT) { + xfer += this->o3.read(iprot); + this->__isset.o3 = true; + } else { + xfer += iprot->skip(ftype); + } + break; default: xfer += iprot->skip(ftype); break; @@ -445,6 +431,14 @@ uint32_t ThriftHiveMetastore_drop_database_args::read(apache::thrift::protocol:: xfer += iprot->skip(ftype); } break; + case 2: + if (ftype == apache::thrift::protocol::T_BOOL) { + xfer += iprot->readBool(this->deleteData); + this->__isset.deleteData = true; + } else { + xfer += iprot->skip(ftype); + } + break; default: xfer += iprot->skip(ftype); break; @@ -463,6 +457,9 @@ uint32_t ThriftHiveMetastore_drop_database_args::write(apache::thrift::protocol: xfer += oprot->writeFieldBegin("name", apache::thrift::protocol::T_STRING, 1); xfer += oprot->writeString(this->name); xfer += oprot->writeFieldEnd(); + xfer += oprot->writeFieldBegin("deleteData", apache::thrift::protocol::T_BOOL, 2); + xfer += oprot->writeBool(this->deleteData); + xfer += oprot->writeFieldEnd(); xfer += oprot->writeFieldStop(); xfer += oprot->writeStructEnd(); return xfer; @@ -474,6 +471,9 @@ uint32_t ThriftHiveMetastore_drop_database_pargs::write(apache::thrift::protocol xfer += oprot->writeFieldBegin("name", apache::thrift::protocol::T_STRING, 1); xfer += oprot->writeString((*(this->name))); xfer += oprot->writeFieldEnd(); + xfer += oprot->writeFieldBegin("deleteData", apache::thrift::protocol::T_BOOL, 2); + xfer += oprot->writeBool((*(this->deleteData))); + xfer += oprot->writeFieldEnd(); xfer += oprot->writeFieldStop(); xfer += oprot->writeStructEnd(); return xfer; @@ -499,10 +499,10 @@ uint32_t ThriftHiveMetastore_drop_database_result::read(apache::thrift::protocol } switch (fid) { - case 0: - if (ftype == apache::thrift::protocol::T_BOOL) { - xfer += iprot->readBool(this->success); - this->__isset.success = true; + case 1: + if (ftype == apache::thrift::protocol::T_STRUCT) { + xfer += this->o1.read(iprot); + this->__isset.o1 = true; } else { xfer += iprot->skip(ftype); } @@ -515,6 +515,14 @@ uint32_t ThriftHiveMetastore_drop_database_result::read(apache::thrift::protocol xfer += iprot->skip(ftype); } break; + case 3: + if (ftype == apache::thrift::protocol::T_STRUCT) { + xfer += this->o3.read(iprot); + this->__isset.o3 = true; + } else { + xfer += iprot->skip(ftype); + } + break; default: xfer += iprot->skip(ftype); break; @@ -533,14 +541,18 @@ uint32_t ThriftHiveMetastore_drop_database_result::write(apache::thrift::protoco xfer += oprot->writeStructBegin("ThriftHiveMetastore_drop_database_result"); - if (this->__isset.success) { - xfer += oprot->writeFieldBegin("success", apache::thrift::protocol::T_BOOL, 0); - xfer += oprot->writeBool(this->success); + if (this->__isset.o1) { + xfer += oprot->writeFieldBegin("o1", apache::thrift::protocol::T_STRUCT, 1); + xfer += this->o1.write(oprot); xfer += oprot->writeFieldEnd(); } else if (this->__isset.o2) { xfer += oprot->writeFieldBegin("o2", apache::thrift::protocol::T_STRUCT, 2); xfer += this->o2.write(oprot); xfer += oprot->writeFieldEnd(); + } else if (this->__isset.o3) { + xfer += oprot->writeFieldBegin("o3", apache::thrift::protocol::T_STRUCT, 3); + xfer += this->o3.write(oprot); + xfer += oprot->writeFieldEnd(); } xfer += oprot->writeFieldStop(); xfer += oprot->writeStructEnd(); @@ -567,10 +579,10 @@ uint32_t ThriftHiveMetastore_drop_database_presult::read(apache::thrift::protoco } switch (fid) { - case 0: - if (ftype == apache::thrift::protocol::T_BOOL) { - xfer += iprot->readBool((*(this->success))); - this->__isset.success = true; + case 1: + if (ftype == apache::thrift::protocol::T_STRUCT) { + xfer += this->o1.read(iprot); + this->__isset.o1 = true; } else { xfer += iprot->skip(ftype); } @@ -583,6 +595,14 @@ uint32_t ThriftHiveMetastore_drop_database_presult::read(apache::thrift::protoco xfer += iprot->skip(ftype); } break; + case 3: + if (ftype == apache::thrift::protocol::T_STRUCT) { + xfer += this->o3.read(iprot); + this->__isset.o3 = true; + } else { + xfer += iprot->skip(ftype); + } + break; default: xfer += iprot->skip(ftype); break; @@ -615,6 +635,14 @@ uint32_t ThriftHiveMetastore_get_databases_args::read(apache::thrift::protocol:: } switch (fid) { + case 1: + if (ftype == apache::thrift::protocol::T_STRING) { + xfer += iprot->readString(this->pattern); + this->__isset.pattern = true; + } else { + xfer += iprot->skip(ftype); + } + break; default: xfer += iprot->skip(ftype); break; @@ -630,6 +658,9 @@ uint32_t ThriftHiveMetastore_get_databases_args::read(apache::thrift::protocol:: uint32_t ThriftHiveMetastore_get_databases_args::write(apache::thrift::protocol::TProtocol* oprot) const { uint32_t xfer = 0; xfer += oprot->writeStructBegin("ThriftHiveMetastore_get_databases_args"); + xfer += oprot->writeFieldBegin("pattern", apache::thrift::protocol::T_STRING, 1); + xfer += oprot->writeString(this->pattern); + xfer += oprot->writeFieldEnd(); xfer += oprot->writeFieldStop(); xfer += oprot->writeStructEnd(); return xfer; @@ -638,6 +669,9 @@ uint32_t ThriftHiveMetastore_get_databases_args::write(apache::thrift::protocol: uint32_t ThriftHiveMetastore_get_databases_pargs::write(apache::thrift::protocol::TProtocol* oprot) const { uint32_t xfer = 0; xfer += oprot->writeStructBegin("ThriftHiveMetastore_get_databases_pargs"); + xfer += oprot->writeFieldBegin("pattern", apache::thrift::protocol::T_STRING, 1); + xfer += oprot->writeString((*(this->pattern))); + xfer += oprot->writeFieldEnd(); xfer += oprot->writeFieldStop(); xfer += oprot->writeStructEnd(); return xfer; @@ -791,6 +825,202 @@ uint32_t ThriftHiveMetastore_get_databases_presult::read(apache::thrift::protoco return xfer; } +uint32_t ThriftHiveMetastore_get_all_databases_args::read(apache::thrift::protocol::TProtocol* iprot) { + + uint32_t xfer = 0; + std::string fname; + apache::thrift::protocol::TType ftype; + int16_t fid; + + xfer += iprot->readStructBegin(fname); + + using apache::thrift::protocol::TProtocolException; + + + while (true) + { + xfer += iprot->readFieldBegin(fname, ftype, fid); + if (ftype == apache::thrift::protocol::T_STOP) { + break; + } + switch (fid) + { + default: + xfer += iprot->skip(ftype); + break; + } + xfer += iprot->readFieldEnd(); + } + + xfer += iprot->readStructEnd(); + + return xfer; +} + +uint32_t ThriftHiveMetastore_get_all_databases_args::write(apache::thrift::protocol::TProtocol* oprot) const { + uint32_t xfer = 0; + xfer += oprot->writeStructBegin("ThriftHiveMetastore_get_all_databases_args"); + xfer += oprot->writeFieldStop(); + xfer += oprot->writeStructEnd(); + return xfer; +} + +uint32_t ThriftHiveMetastore_get_all_databases_pargs::write(apache::thrift::protocol::TProtocol* oprot) const { + uint32_t xfer = 0; + xfer += oprot->writeStructBegin("ThriftHiveMetastore_get_all_databases_pargs"); + xfer += oprot->writeFieldStop(); + xfer += oprot->writeStructEnd(); + return xfer; +} + +uint32_t ThriftHiveMetastore_get_all_databases_result::read(apache::thrift::protocol::TProtocol* iprot) { + + uint32_t xfer = 0; + std::string fname; + apache::thrift::protocol::TType ftype; + int16_t fid; + + xfer += iprot->readStructBegin(fname); + + using apache::thrift::protocol::TProtocolException; + + + while (true) + { + xfer += iprot->readFieldBegin(fname, ftype, fid); + if (ftype == apache::thrift::protocol::T_STOP) { + break; + } + switch (fid) + { + case 0: + if (ftype == apache::thrift::protocol::T_LIST) { + { + this->success.clear(); + uint32_t _size101; + apache::thrift::protocol::TType _etype104; + iprot->readListBegin(_etype104, _size101); + this->success.resize(_size101); + uint32_t _i105; + for (_i105 = 0; _i105 < _size101; ++_i105) + { + xfer += iprot->readString(this->success[_i105]); + } + iprot->readListEnd(); + } + this->__isset.success = true; + } else { + xfer += iprot->skip(ftype); + } + break; + case 1: + if (ftype == apache::thrift::protocol::T_STRUCT) { + xfer += this->o1.read(iprot); + this->__isset.o1 = true; + } else { + xfer += iprot->skip(ftype); + } + break; + default: + xfer += iprot->skip(ftype); + break; + } + xfer += iprot->readFieldEnd(); + } + + xfer += iprot->readStructEnd(); + + return xfer; +} + +uint32_t ThriftHiveMetastore_get_all_databases_result::write(apache::thrift::protocol::TProtocol* oprot) const { + + uint32_t xfer = 0; + + xfer += oprot->writeStructBegin("ThriftHiveMetastore_get_all_databases_result"); + + if (this->__isset.success) { + xfer += oprot->writeFieldBegin("success", apache::thrift::protocol::T_LIST, 0); + { + xfer += oprot->writeListBegin(apache::thrift::protocol::T_STRING, this->success.size()); + std::vector ::const_iterator _iter106; + for (_iter106 = this->success.begin(); _iter106 != this->success.end(); ++_iter106) + { + xfer += oprot->writeString((*_iter106)); + } + xfer += oprot->writeListEnd(); + } + xfer += oprot->writeFieldEnd(); + } else if (this->__isset.o1) { + xfer += oprot->writeFieldBegin("o1", apache::thrift::protocol::T_STRUCT, 1); + xfer += this->o1.write(oprot); + xfer += oprot->writeFieldEnd(); + } + xfer += oprot->writeFieldStop(); + xfer += oprot->writeStructEnd(); + return xfer; +} + +uint32_t ThriftHiveMetastore_get_all_databases_presult::read(apache::thrift::protocol::TProtocol* iprot) { + + uint32_t xfer = 0; + std::string fname; + apache::thrift::protocol::TType ftype; + int16_t fid; + + xfer += iprot->readStructBegin(fname); + + using apache::thrift::protocol::TProtocolException; + + + while (true) + { + xfer += iprot->readFieldBegin(fname, ftype, fid); + if (ftype == apache::thrift::protocol::T_STOP) { + break; + } + switch (fid) + { + case 0: + if (ftype == apache::thrift::protocol::T_LIST) { + { + (*(this->success)).clear(); + uint32_t _size107; + apache::thrift::protocol::TType _etype110; + iprot->readListBegin(_etype110, _size107); + (*(this->success)).resize(_size107); + uint32_t _i111; + for (_i111 = 0; _i111 < _size107; ++_i111) + { + xfer += iprot->readString((*(this->success))[_i111]); + } + iprot->readListEnd(); + } + this->__isset.success = true; + } else { + xfer += iprot->skip(ftype); + } + break; + case 1: + if (ftype == apache::thrift::protocol::T_STRUCT) { + xfer += this->o1.read(iprot); + this->__isset.o1 = true; + } else { + xfer += iprot->skip(ftype); + } + break; + default: + xfer += iprot->skip(ftype); + break; + } + xfer += iprot->readFieldEnd(); + } + + xfer += iprot->readStructEnd(); + + return xfer; +} + uint32_t ThriftHiveMetastore_get_type_args::read(apache::thrift::protocol::TProtocol* iprot) { uint32_t xfer = 0; @@ -883,6 +1113,14 @@ uint32_t ThriftHiveMetastore_get_type_result::read(apache::thrift::protocol::TPr break; case 1: if (ftype == apache::thrift::protocol::T_STRUCT) { + xfer += this->o1.read(iprot); + this->__isset.o1 = true; + } else { + xfer += iprot->skip(ftype); + } + break; + case 2: + if (ftype == apache::thrift::protocol::T_STRUCT) { xfer += this->o2.read(iprot); this->__isset.o2 = true; } else { @@ -911,8 +1149,12 @@ uint32_t ThriftHiveMetastore_get_type_result::write(apache::thrift::protocol::TP xfer += oprot->writeFieldBegin("success", apache::thrift::protocol::T_STRUCT, 0); xfer += this->success.write(oprot); xfer += oprot->writeFieldEnd(); + } else if (this->__isset.o1) { + xfer += oprot->writeFieldBegin("o1", apache::thrift::protocol::T_STRUCT, 1); + xfer += this->o1.write(oprot); + xfer += oprot->writeFieldEnd(); } else if (this->__isset.o2) { - xfer += oprot->writeFieldBegin("o2", apache::thrift::protocol::T_STRUCT, 1); + xfer += oprot->writeFieldBegin("o2", apache::thrift::protocol::T_STRUCT, 2); xfer += this->o2.write(oprot); xfer += oprot->writeFieldEnd(); } @@ -951,6 +1193,14 @@ uint32_t ThriftHiveMetastore_get_type_presult::read(apache::thrift::protocol::TP break; case 1: if (ftype == apache::thrift::protocol::T_STRUCT) { + xfer += this->o1.read(iprot); + this->__isset.o1 = true; + } else { + xfer += iprot->skip(ftype); + } + break; + case 2: + if (ftype == apache::thrift::protocol::T_STRUCT) { xfer += this->o2.read(iprot); this->__isset.o2 = true; } else { @@ -1279,6 +1529,14 @@ uint32_t ThriftHiveMetastore_drop_type_result::read(apache::thrift::protocol::TP break; case 1: if (ftype == apache::thrift::protocol::T_STRUCT) { + xfer += this->o1.read(iprot); + this->__isset.o1 = true; + } else { + xfer += iprot->skip(ftype); + } + break; + case 2: + if (ftype == apache::thrift::protocol::T_STRUCT) { xfer += this->o2.read(iprot); this->__isset.o2 = true; } else { @@ -1307,8 +1565,12 @@ uint32_t ThriftHiveMetastore_drop_type_result::write(apache::thrift::protocol::T xfer += oprot->writeFieldBegin("success", apache::thrift::protocol::T_BOOL, 0); xfer += oprot->writeBool(this->success); xfer += oprot->writeFieldEnd(); + } else if (this->__isset.o1) { + xfer += oprot->writeFieldBegin("o1", apache::thrift::protocol::T_STRUCT, 1); + xfer += this->o1.write(oprot); + xfer += oprot->writeFieldEnd(); } else if (this->__isset.o2) { - xfer += oprot->writeFieldBegin("o2", apache::thrift::protocol::T_STRUCT, 1); + xfer += oprot->writeFieldBegin("o2", apache::thrift::protocol::T_STRUCT, 2); xfer += this->o2.write(oprot); xfer += oprot->writeFieldEnd(); } @@ -1345,7 +1607,15 @@ uint32_t ThriftHiveMetastore_drop_type_presult::read(apache::thrift::protocol::T xfer += iprot->skip(ftype); } break; - case 1: + case 1: + if (ftype == apache::thrift::protocol::T_STRUCT) { + xfer += this->o1.read(iprot); + this->__isset.o1 = true; + } else { + xfer += iprot->skip(ftype); + } + break; + case 2: if (ftype == apache::thrift::protocol::T_STRUCT) { xfer += this->o2.read(iprot); this->__isset.o2 = true; @@ -1451,17 +1721,17 @@ uint32_t ThriftHiveMetastore_get_type_all_result::read(apache::thrift::protocol: if (ftype == apache::thrift::protocol::T_MAP) { { this->success.clear(); - uint32_t _size101; - apache::thrift::protocol::TType _ktype102; - apache::thrift::protocol::TType _vtype103; - iprot->readMapBegin(_ktype102, _vtype103, _size101); - uint32_t _i105; - for (_i105 = 0; _i105 < _size101; ++_i105) + uint32_t _size112; + apache::thrift::protocol::TType _ktype113; + apache::thrift::protocol::TType _vtype114; + iprot->readMapBegin(_ktype113, _vtype114, _size112); + uint32_t _i116; + for (_i116 = 0; _i116 < _size112; ++_i116) { - std::string _key106; - xfer += iprot->readString(_key106); - Type& _val107 = this->success[_key106]; - xfer += _val107.read(iprot); + std::string _key117; + xfer += iprot->readString(_key117); + Type& _val118 = this->success[_key117]; + xfer += _val118.read(iprot); } iprot->readMapEnd(); } @@ -1500,11 +1770,11 @@ uint32_t ThriftHiveMetastore_get_type_all_result::write(apache::thrift::protocol xfer += oprot->writeFieldBegin("success", apache::thrift::protocol::T_MAP, 0); { xfer += oprot->writeMapBegin(apache::thrift::protocol::T_STRING, apache::thrift::protocol::T_STRUCT, this->success.size()); - std::map ::const_iterator _iter108; - for (_iter108 = this->success.begin(); _iter108 != this->success.end(); ++_iter108) + std::map ::const_iterator _iter119; + for (_iter119 = this->success.begin(); _iter119 != this->success.end(); ++_iter119) { - xfer += oprot->writeString(_iter108->first); - xfer += _iter108->second.write(oprot); + xfer += oprot->writeString(_iter119->first); + xfer += _iter119->second.write(oprot); } xfer += oprot->writeMapEnd(); } @@ -1543,17 +1813,17 @@ uint32_t ThriftHiveMetastore_get_type_all_presult::read(apache::thrift::protocol if (ftype == apache::thrift::protocol::T_MAP) { { (*(this->success)).clear(); - uint32_t _size109; - apache::thrift::protocol::TType _ktype110; - apache::thrift::protocol::TType _vtype111; - iprot->readMapBegin(_ktype110, _vtype111, _size109); - uint32_t _i113; - for (_i113 = 0; _i113 < _size109; ++_i113) + uint32_t _size120; + apache::thrift::protocol::TType _ktype121; + apache::thrift::protocol::TType _vtype122; + iprot->readMapBegin(_ktype121, _vtype122, _size120); + uint32_t _i124; + for (_i124 = 0; _i124 < _size120; ++_i124) { - std::string _key114; - xfer += iprot->readString(_key114); - Type& _val115 = (*(this->success))[_key114]; - xfer += _val115.read(iprot); + std::string _key125; + xfer += iprot->readString(_key125); + Type& _val126 = (*(this->success))[_key125]; + xfer += _val126.read(iprot); } iprot->readMapEnd(); } @@ -1682,14 +1952,14 @@ uint32_t ThriftHiveMetastore_get_fields_result::read(apache::thrift::protocol::T if (ftype == apache::thrift::protocol::T_LIST) { { this->success.clear(); - uint32_t _size116; - apache::thrift::protocol::TType _etype119; - iprot->readListBegin(_etype119, _size116); - this->success.resize(_size116); - uint32_t _i120; - for (_i120 = 0; _i120 < _size116; ++_i120) + uint32_t _size127; + apache::thrift::protocol::TType _etype130; + iprot->readListBegin(_etype130, _size127); + this->success.resize(_size127); + uint32_t _i131; + for (_i131 = 0; _i131 < _size127; ++_i131) { - xfer += this->success[_i120].read(iprot); + xfer += this->success[_i131].read(iprot); } iprot->readListEnd(); } @@ -1744,10 +2014,10 @@ uint32_t ThriftHiveMetastore_get_fields_result::write(apache::thrift::protocol:: xfer += oprot->writeFieldBegin("success", apache::thrift::protocol::T_LIST, 0); { xfer += oprot->writeListBegin(apache::thrift::protocol::T_STRUCT, this->success.size()); - std::vector ::const_iterator _iter121; - for (_iter121 = this->success.begin(); _iter121 != this->success.end(); ++_iter121) + std::vector ::const_iterator _iter132; + for (_iter132 = this->success.begin(); _iter132 != this->success.end(); ++_iter132) { - xfer += (*_iter121).write(oprot); + xfer += (*_iter132).write(oprot); } xfer += oprot->writeListEnd(); } @@ -1794,14 +2064,14 @@ uint32_t ThriftHiveMetastore_get_fields_presult::read(apache::thrift::protocol:: if (ftype == apache::thrift::protocol::T_LIST) { { (*(this->success)).clear(); - uint32_t _size122; - apache::thrift::protocol::TType _etype125; - iprot->readListBegin(_etype125, _size122); - (*(this->success)).resize(_size122); - uint32_t _i126; - for (_i126 = 0; _i126 < _size122; ++_i126) + uint32_t _size133; + apache::thrift::protocol::TType _etype136; + iprot->readListBegin(_etype136, _size133); + (*(this->success)).resize(_size133); + uint32_t _i137; + for (_i137 = 0; _i137 < _size133; ++_i137) { - xfer += (*(this->success))[_i126].read(iprot); + xfer += (*(this->success))[_i137].read(iprot); } iprot->readListEnd(); } @@ -1946,14 +2216,14 @@ uint32_t ThriftHiveMetastore_get_schema_result::read(apache::thrift::protocol::T if (ftype == apache::thrift::protocol::T_LIST) { { this->success.clear(); - uint32_t _size127; - apache::thrift::protocol::TType _etype130; - iprot->readListBegin(_etype130, _size127); - this->success.resize(_size127); - uint32_t _i131; - for (_i131 = 0; _i131 < _size127; ++_i131) + uint32_t _size138; + apache::thrift::protocol::TType _etype141; + iprot->readListBegin(_etype141, _size138); + this->success.resize(_size138); + uint32_t _i142; + for (_i142 = 0; _i142 < _size138; ++_i142) { - xfer += this->success[_i131].read(iprot); + xfer += this->success[_i142].read(iprot); } iprot->readListEnd(); } @@ -2008,10 +2278,10 @@ uint32_t ThriftHiveMetastore_get_schema_result::write(apache::thrift::protocol:: xfer += oprot->writeFieldBegin("success", apache::thrift::protocol::T_LIST, 0); { xfer += oprot->writeListBegin(apache::thrift::protocol::T_STRUCT, this->success.size()); - std::vector ::const_iterator _iter132; - for (_iter132 = this->success.begin(); _iter132 != this->success.end(); ++_iter132) + std::vector ::const_iterator _iter143; + for (_iter143 = this->success.begin(); _iter143 != this->success.end(); ++_iter143) { - xfer += (*_iter132).write(oprot); + xfer += (*_iter143).write(oprot); } xfer += oprot->writeListEnd(); } @@ -2058,14 +2328,14 @@ uint32_t ThriftHiveMetastore_get_schema_presult::read(apache::thrift::protocol:: if (ftype == apache::thrift::protocol::T_LIST) { { (*(this->success)).clear(); - uint32_t _size133; - apache::thrift::protocol::TType _etype136; - iprot->readListBegin(_etype136, _size133); - (*(this->success)).resize(_size133); - uint32_t _i137; - for (_i137 = 0; _i137 < _size133; ++_i137) + uint32_t _size144; + apache::thrift::protocol::TType _etype147; + iprot->readListBegin(_etype147, _size144); + (*(this->success)).resize(_size144); + uint32_t _i148; + for (_i148 = 0; _i148 < _size144; ++_i148) { - xfer += (*(this->success))[_i137].read(iprot); + xfer += (*(this->success))[_i148].read(iprot); } iprot->readListEnd(); } @@ -2582,35 +2852,245 @@ uint32_t ThriftHiveMetastore_get_tables_args::read(apache::thrift::protocol::TPr return xfer; } -uint32_t ThriftHiveMetastore_get_tables_args::write(apache::thrift::protocol::TProtocol* oprot) const { +uint32_t ThriftHiveMetastore_get_tables_args::write(apache::thrift::protocol::TProtocol* oprot) const { + uint32_t xfer = 0; + xfer += oprot->writeStructBegin("ThriftHiveMetastore_get_tables_args"); + xfer += oprot->writeFieldBegin("db_name", apache::thrift::protocol::T_STRING, 1); + xfer += oprot->writeString(this->db_name); + xfer += oprot->writeFieldEnd(); + xfer += oprot->writeFieldBegin("pattern", apache::thrift::protocol::T_STRING, 2); + xfer += oprot->writeString(this->pattern); + xfer += oprot->writeFieldEnd(); + xfer += oprot->writeFieldStop(); + xfer += oprot->writeStructEnd(); + return xfer; +} + +uint32_t ThriftHiveMetastore_get_tables_pargs::write(apache::thrift::protocol::TProtocol* oprot) const { + uint32_t xfer = 0; + xfer += oprot->writeStructBegin("ThriftHiveMetastore_get_tables_pargs"); + xfer += oprot->writeFieldBegin("db_name", apache::thrift::protocol::T_STRING, 1); + xfer += oprot->writeString((*(this->db_name))); + xfer += oprot->writeFieldEnd(); + xfer += oprot->writeFieldBegin("pattern", apache::thrift::protocol::T_STRING, 2); + xfer += oprot->writeString((*(this->pattern))); + xfer += oprot->writeFieldEnd(); + xfer += oprot->writeFieldStop(); + xfer += oprot->writeStructEnd(); + return xfer; +} + +uint32_t ThriftHiveMetastore_get_tables_result::read(apache::thrift::protocol::TProtocol* iprot) { + + uint32_t xfer = 0; + std::string fname; + apache::thrift::protocol::TType ftype; + int16_t fid; + + xfer += iprot->readStructBegin(fname); + + using apache::thrift::protocol::TProtocolException; + + + while (true) + { + xfer += iprot->readFieldBegin(fname, ftype, fid); + if (ftype == apache::thrift::protocol::T_STOP) { + break; + } + switch (fid) + { + case 0: + if (ftype == apache::thrift::protocol::T_LIST) { + { + this->success.clear(); + uint32_t _size149; + apache::thrift::protocol::TType _etype152; + iprot->readListBegin(_etype152, _size149); + this->success.resize(_size149); + uint32_t _i153; + for (_i153 = 0; _i153 < _size149; ++_i153) + { + xfer += iprot->readString(this->success[_i153]); + } + iprot->readListEnd(); + } + this->__isset.success = true; + } else { + xfer += iprot->skip(ftype); + } + break; + case 1: + if (ftype == apache::thrift::protocol::T_STRUCT) { + xfer += this->o1.read(iprot); + this->__isset.o1 = true; + } else { + xfer += iprot->skip(ftype); + } + break; + default: + xfer += iprot->skip(ftype); + break; + } + xfer += iprot->readFieldEnd(); + } + + xfer += iprot->readStructEnd(); + + return xfer; +} + +uint32_t ThriftHiveMetastore_get_tables_result::write(apache::thrift::protocol::TProtocol* oprot) const { + + uint32_t xfer = 0; + + xfer += oprot->writeStructBegin("ThriftHiveMetastore_get_tables_result"); + + if (this->__isset.success) { + xfer += oprot->writeFieldBegin("success", apache::thrift::protocol::T_LIST, 0); + { + xfer += oprot->writeListBegin(apache::thrift::protocol::T_STRING, this->success.size()); + std::vector ::const_iterator _iter154; + for (_iter154 = this->success.begin(); _iter154 != this->success.end(); ++_iter154) + { + xfer += oprot->writeString((*_iter154)); + } + xfer += oprot->writeListEnd(); + } + xfer += oprot->writeFieldEnd(); + } else if (this->__isset.o1) { + xfer += oprot->writeFieldBegin("o1", apache::thrift::protocol::T_STRUCT, 1); + xfer += this->o1.write(oprot); + xfer += oprot->writeFieldEnd(); + } + xfer += oprot->writeFieldStop(); + xfer += oprot->writeStructEnd(); + return xfer; +} + +uint32_t ThriftHiveMetastore_get_tables_presult::read(apache::thrift::protocol::TProtocol* iprot) { + + uint32_t xfer = 0; + std::string fname; + apache::thrift::protocol::TType ftype; + int16_t fid; + + xfer += iprot->readStructBegin(fname); + + using apache::thrift::protocol::TProtocolException; + + + while (true) + { + xfer += iprot->readFieldBegin(fname, ftype, fid); + if (ftype == apache::thrift::protocol::T_STOP) { + break; + } + switch (fid) + { + case 0: + if (ftype == apache::thrift::protocol::T_LIST) { + { + (*(this->success)).clear(); + uint32_t _size155; + apache::thrift::protocol::TType _etype158; + iprot->readListBegin(_etype158, _size155); + (*(this->success)).resize(_size155); + uint32_t _i159; + for (_i159 = 0; _i159 < _size155; ++_i159) + { + xfer += iprot->readString((*(this->success))[_i159]); + } + iprot->readListEnd(); + } + this->__isset.success = true; + } else { + xfer += iprot->skip(ftype); + } + break; + case 1: + if (ftype == apache::thrift::protocol::T_STRUCT) { + xfer += this->o1.read(iprot); + this->__isset.o1 = true; + } else { + xfer += iprot->skip(ftype); + } + break; + default: + xfer += iprot->skip(ftype); + break; + } + xfer += iprot->readFieldEnd(); + } + + xfer += iprot->readStructEnd(); + + return xfer; +} + +uint32_t ThriftHiveMetastore_get_all_tables_args::read(apache::thrift::protocol::TProtocol* iprot) { + + uint32_t xfer = 0; + std::string fname; + apache::thrift::protocol::TType ftype; + int16_t fid; + + xfer += iprot->readStructBegin(fname); + + using apache::thrift::protocol::TProtocolException; + + + while (true) + { + xfer += iprot->readFieldBegin(fname, ftype, fid); + if (ftype == apache::thrift::protocol::T_STOP) { + break; + } + switch (fid) + { + case 1: + if (ftype == apache::thrift::protocol::T_STRING) { + xfer += iprot->readString(this->db_name); + this->__isset.db_name = true; + } else { + xfer += iprot->skip(ftype); + } + break; + default: + xfer += iprot->skip(ftype); + break; + } + xfer += iprot->readFieldEnd(); + } + + xfer += iprot->readStructEnd(); + + return xfer; +} + +uint32_t ThriftHiveMetastore_get_all_tables_args::write(apache::thrift::protocol::TProtocol* oprot) const { uint32_t xfer = 0; - xfer += oprot->writeStructBegin("ThriftHiveMetastore_get_tables_args"); + xfer += oprot->writeStructBegin("ThriftHiveMetastore_get_all_tables_args"); xfer += oprot->writeFieldBegin("db_name", apache::thrift::protocol::T_STRING, 1); xfer += oprot->writeString(this->db_name); xfer += oprot->writeFieldEnd(); - xfer += oprot->writeFieldBegin("pattern", apache::thrift::protocol::T_STRING, 2); - xfer += oprot->writeString(this->pattern); - xfer += oprot->writeFieldEnd(); xfer += oprot->writeFieldStop(); xfer += oprot->writeStructEnd(); return xfer; } -uint32_t ThriftHiveMetastore_get_tables_pargs::write(apache::thrift::protocol::TProtocol* oprot) const { +uint32_t ThriftHiveMetastore_get_all_tables_pargs::write(apache::thrift::protocol::TProtocol* oprot) const { uint32_t xfer = 0; - xfer += oprot->writeStructBegin("ThriftHiveMetastore_get_tables_pargs"); + xfer += oprot->writeStructBegin("ThriftHiveMetastore_get_all_tables_pargs"); xfer += oprot->writeFieldBegin("db_name", apache::thrift::protocol::T_STRING, 1); xfer += oprot->writeString((*(this->db_name))); xfer += oprot->writeFieldEnd(); - xfer += oprot->writeFieldBegin("pattern", apache::thrift::protocol::T_STRING, 2); - xfer += oprot->writeString((*(this->pattern))); - xfer += oprot->writeFieldEnd(); xfer += oprot->writeFieldStop(); xfer += oprot->writeStructEnd(); return xfer; } -uint32_t ThriftHiveMetastore_get_tables_result::read(apache::thrift::protocol::TProtocol* iprot) { +uint32_t ThriftHiveMetastore_get_all_tables_result::read(apache::thrift::protocol::TProtocol* iprot) { uint32_t xfer = 0; std::string fname; @@ -2634,14 +3114,14 @@ uint32_t ThriftHiveMetastore_get_tables_result::read(apache::thrift::protocol::T if (ftype == apache::thrift::protocol::T_LIST) { { this->success.clear(); - uint32_t _size138; - apache::thrift::protocol::TType _etype141; - iprot->readListBegin(_etype141, _size138); - this->success.resize(_size138); - uint32_t _i142; - for (_i142 = 0; _i142 < _size138; ++_i142) + uint32_t _size160; + apache::thrift::protocol::TType _etype163; + iprot->readListBegin(_etype163, _size160); + this->success.resize(_size160); + uint32_t _i164; + for (_i164 = 0; _i164 < _size160; ++_i164) { - xfer += iprot->readString(this->success[_i142]); + xfer += iprot->readString(this->success[_i164]); } iprot->readListEnd(); } @@ -2670,20 +3150,20 @@ uint32_t ThriftHiveMetastore_get_tables_result::read(apache::thrift::protocol::T return xfer; } -uint32_t ThriftHiveMetastore_get_tables_result::write(apache::thrift::protocol::TProtocol* oprot) const { +uint32_t ThriftHiveMetastore_get_all_tables_result::write(apache::thrift::protocol::TProtocol* oprot) const { uint32_t xfer = 0; - xfer += oprot->writeStructBegin("ThriftHiveMetastore_get_tables_result"); + xfer += oprot->writeStructBegin("ThriftHiveMetastore_get_all_tables_result"); if (this->__isset.success) { xfer += oprot->writeFieldBegin("success", apache::thrift::protocol::T_LIST, 0); { xfer += oprot->writeListBegin(apache::thrift::protocol::T_STRING, this->success.size()); - std::vector ::const_iterator _iter143; - for (_iter143 = this->success.begin(); _iter143 != this->success.end(); ++_iter143) + std::vector ::const_iterator _iter165; + for (_iter165 = this->success.begin(); _iter165 != this->success.end(); ++_iter165) { - xfer += oprot->writeString((*_iter143)); + xfer += oprot->writeString((*_iter165)); } xfer += oprot->writeListEnd(); } @@ -2698,7 +3178,7 @@ uint32_t ThriftHiveMetastore_get_tables_result::write(apache::thrift::protocol:: return xfer; } -uint32_t ThriftHiveMetastore_get_tables_presult::read(apache::thrift::protocol::TProtocol* iprot) { +uint32_t ThriftHiveMetastore_get_all_tables_presult::read(apache::thrift::protocol::TProtocol* iprot) { uint32_t xfer = 0; std::string fname; @@ -2722,14 +3202,14 @@ uint32_t ThriftHiveMetastore_get_tables_presult::read(apache::thrift::protocol:: if (ftype == apache::thrift::protocol::T_LIST) { { (*(this->success)).clear(); - uint32_t _size144; - apache::thrift::protocol::TType _etype147; - iprot->readListBegin(_etype147, _size144); - (*(this->success)).resize(_size144); - uint32_t _i148; - for (_i148 = 0; _i148 < _size144; ++_i148) + uint32_t _size166; + apache::thrift::protocol::TType _etype169; + iprot->readListBegin(_etype169, _size166); + (*(this->success)).resize(_size166); + uint32_t _i170; + for (_i170 = 0; _i170 < _size166; ++_i170) { - xfer += iprot->readString((*(this->success))[_i148]); + xfer += iprot->readString((*(this->success))[_i170]); } iprot->readListEnd(); } @@ -3434,14 +3914,14 @@ uint32_t ThriftHiveMetastore_append_partition_args::read(apache::thrift::protoco if (ftype == apache::thrift::protocol::T_LIST) { { this->part_vals.clear(); - uint32_t _size149; - apache::thrift::protocol::TType _etype152; - iprot->readListBegin(_etype152, _size149); - this->part_vals.resize(_size149); - uint32_t _i153; - for (_i153 = 0; _i153 < _size149; ++_i153) + uint32_t _size171; + apache::thrift::protocol::TType _etype174; + iprot->readListBegin(_etype174, _size171); + this->part_vals.resize(_size171); + uint32_t _i175; + for (_i175 = 0; _i175 < _size171; ++_i175) { - xfer += iprot->readString(this->part_vals[_i153]); + xfer += iprot->readString(this->part_vals[_i175]); } iprot->readListEnd(); } @@ -3474,10 +3954,10 @@ uint32_t ThriftHiveMetastore_append_partition_args::write(apache::thrift::protoc xfer += oprot->writeFieldBegin("part_vals", apache::thrift::protocol::T_LIST, 3); { xfer += oprot->writeListBegin(apache::thrift::protocol::T_STRING, this->part_vals.size()); - std::vector ::const_iterator _iter154; - for (_iter154 = this->part_vals.begin(); _iter154 != this->part_vals.end(); ++_iter154) + std::vector ::const_iterator _iter176; + for (_iter176 = this->part_vals.begin(); _iter176 != this->part_vals.end(); ++_iter176) { - xfer += oprot->writeString((*_iter154)); + xfer += oprot->writeString((*_iter176)); } xfer += oprot->writeListEnd(); } @@ -3499,10 +3979,10 @@ uint32_t ThriftHiveMetastore_append_partition_pargs::write(apache::thrift::proto xfer += oprot->writeFieldBegin("part_vals", apache::thrift::protocol::T_LIST, 3); { xfer += oprot->writeListBegin(apache::thrift::protocol::T_STRING, (*(this->part_vals)).size()); - std::vector ::const_iterator _iter155; - for (_iter155 = (*(this->part_vals)).begin(); _iter155 != (*(this->part_vals)).end(); ++_iter155) + std::vector ::const_iterator _iter177; + for (_iter177 = (*(this->part_vals)).begin(); _iter177 != (*(this->part_vals)).end(); ++_iter177) { - xfer += oprot->writeString((*_iter155)); + xfer += oprot->writeString((*_iter177)); } xfer += oprot->writeListEnd(); } @@ -3954,14 +4434,14 @@ uint32_t ThriftHiveMetastore_drop_partition_args::read(apache::thrift::protocol: if (ftype == apache::thrift::protocol::T_LIST) { { this->part_vals.clear(); - uint32_t _size156; - apache::thrift::protocol::TType _etype159; - iprot->readListBegin(_etype159, _size156); - this->part_vals.resize(_size156); - uint32_t _i160; - for (_i160 = 0; _i160 < _size156; ++_i160) + uint32_t _size178; + apache::thrift::protocol::TType _etype181; + iprot->readListBegin(_etype181, _size178); + this->part_vals.resize(_size178); + uint32_t _i182; + for (_i182 = 0; _i182 < _size178; ++_i182) { - xfer += iprot->readString(this->part_vals[_i160]); + xfer += iprot->readString(this->part_vals[_i182]); } iprot->readListEnd(); } @@ -4002,10 +4482,10 @@ uint32_t ThriftHiveMetastore_drop_partition_args::write(apache::thrift::protocol xfer += oprot->writeFieldBegin("part_vals", apache::thrift::protocol::T_LIST, 3); { xfer += oprot->writeListBegin(apache::thrift::protocol::T_STRING, this->part_vals.size()); - std::vector ::const_iterator _iter161; - for (_iter161 = this->part_vals.begin(); _iter161 != this->part_vals.end(); ++_iter161) + std::vector ::const_iterator _iter183; + for (_iter183 = this->part_vals.begin(); _iter183 != this->part_vals.end(); ++_iter183) { - xfer += oprot->writeString((*_iter161)); + xfer += oprot->writeString((*_iter183)); } xfer += oprot->writeListEnd(); } @@ -4030,10 +4510,10 @@ uint32_t ThriftHiveMetastore_drop_partition_pargs::write(apache::thrift::protoco xfer += oprot->writeFieldBegin("part_vals", apache::thrift::protocol::T_LIST, 3); { xfer += oprot->writeListBegin(apache::thrift::protocol::T_STRING, (*(this->part_vals)).size()); - std::vector ::const_iterator _iter162; - for (_iter162 = (*(this->part_vals)).begin(); _iter162 != (*(this->part_vals)).end(); ++_iter162) + std::vector ::const_iterator _iter184; + for (_iter184 = (*(this->part_vals)).begin(); _iter184 != (*(this->part_vals)).end(); ++_iter184) { - xfer += oprot->writeString((*_iter162)); + xfer += oprot->writeString((*_iter184)); } xfer += oprot->writeListEnd(); } @@ -4462,14 +4942,14 @@ uint32_t ThriftHiveMetastore_get_partition_args::read(apache::thrift::protocol:: if (ftype == apache::thrift::protocol::T_LIST) { { this->part_vals.clear(); - uint32_t _size163; - apache::thrift::protocol::TType _etype166; - iprot->readListBegin(_etype166, _size163); - this->part_vals.resize(_size163); - uint32_t _i167; - for (_i167 = 0; _i167 < _size163; ++_i167) + uint32_t _size185; + apache::thrift::protocol::TType _etype188; + iprot->readListBegin(_etype188, _size185); + this->part_vals.resize(_size185); + uint32_t _i189; + for (_i189 = 0; _i189 < _size185; ++_i189) { - xfer += iprot->readString(this->part_vals[_i167]); + xfer += iprot->readString(this->part_vals[_i189]); } iprot->readListEnd(); } @@ -4502,10 +4982,10 @@ uint32_t ThriftHiveMetastore_get_partition_args::write(apache::thrift::protocol: xfer += oprot->writeFieldBegin("part_vals", apache::thrift::protocol::T_LIST, 3); { xfer += oprot->writeListBegin(apache::thrift::protocol::T_STRING, this->part_vals.size()); - std::vector ::const_iterator _iter168; - for (_iter168 = this->part_vals.begin(); _iter168 != this->part_vals.end(); ++_iter168) + std::vector ::const_iterator _iter190; + for (_iter190 = this->part_vals.begin(); _iter190 != this->part_vals.end(); ++_iter190) { - xfer += oprot->writeString((*_iter168)); + xfer += oprot->writeString((*_iter190)); } xfer += oprot->writeListEnd(); } @@ -4527,10 +5007,10 @@ uint32_t ThriftHiveMetastore_get_partition_pargs::write(apache::thrift::protocol xfer += oprot->writeFieldBegin("part_vals", apache::thrift::protocol::T_LIST, 3); { xfer += oprot->writeListBegin(apache::thrift::protocol::T_STRING, (*(this->part_vals)).size()); - std::vector ::const_iterator _iter169; - for (_iter169 = (*(this->part_vals)).begin(); _iter169 != (*(this->part_vals)).end(); ++_iter169) + std::vector ::const_iterator _iter191; + for (_iter191 = (*(this->part_vals)).begin(); _iter191 != (*(this->part_vals)).end(); ++_iter191) { - xfer += oprot->writeString((*_iter169)); + xfer += oprot->writeString((*_iter191)); } xfer += oprot->writeListEnd(); } @@ -5016,14 +5496,14 @@ uint32_t ThriftHiveMetastore_get_partitions_result::read(apache::thrift::protoco if (ftype == apache::thrift::protocol::T_LIST) { { this->success.clear(); - uint32_t _size170; - apache::thrift::protocol::TType _etype173; - iprot->readListBegin(_etype173, _size170); - this->success.resize(_size170); - uint32_t _i174; - for (_i174 = 0; _i174 < _size170; ++_i174) + uint32_t _size192; + apache::thrift::protocol::TType _etype195; + iprot->readListBegin(_etype195, _size192); + this->success.resize(_size192); + uint32_t _i196; + for (_i196 = 0; _i196 < _size192; ++_i196) { - xfer += this->success[_i174].read(iprot); + xfer += this->success[_i196].read(iprot); } iprot->readListEnd(); } @@ -5070,10 +5550,10 @@ uint32_t ThriftHiveMetastore_get_partitions_result::write(apache::thrift::protoc xfer += oprot->writeFieldBegin("success", apache::thrift::protocol::T_LIST, 0); { xfer += oprot->writeListBegin(apache::thrift::protocol::T_STRUCT, this->success.size()); - std::vector ::const_iterator _iter175; - for (_iter175 = this->success.begin(); _iter175 != this->success.end(); ++_iter175) + std::vector ::const_iterator _iter197; + for (_iter197 = this->success.begin(); _iter197 != this->success.end(); ++_iter197) { - xfer += (*_iter175).write(oprot); + xfer += (*_iter197).write(oprot); } xfer += oprot->writeListEnd(); } @@ -5116,14 +5596,14 @@ uint32_t ThriftHiveMetastore_get_partitions_presult::read(apache::thrift::protoc if (ftype == apache::thrift::protocol::T_LIST) { { (*(this->success)).clear(); - uint32_t _size176; - apache::thrift::protocol::TType _etype179; - iprot->readListBegin(_etype179, _size176); - (*(this->success)).resize(_size176); - uint32_t _i180; - for (_i180 = 0; _i180 < _size176; ++_i180) + uint32_t _size198; + apache::thrift::protocol::TType _etype201; + iprot->readListBegin(_etype201, _size198); + (*(this->success)).resize(_size198); + uint32_t _i202; + for (_i202 = 0; _i202 < _size198; ++_i202) { - xfer += (*(this->success))[_i180].read(iprot); + xfer += (*(this->success))[_i202].read(iprot); } iprot->readListEnd(); } @@ -5274,14 +5754,14 @@ uint32_t ThriftHiveMetastore_get_partition_names_result::read(apache::thrift::pr if (ftype == apache::thrift::protocol::T_LIST) { { this->success.clear(); - uint32_t _size181; - apache::thrift::protocol::TType _etype184; - iprot->readListBegin(_etype184, _size181); - this->success.resize(_size181); - uint32_t _i185; - for (_i185 = 0; _i185 < _size181; ++_i185) + uint32_t _size203; + apache::thrift::protocol::TType _etype206; + iprot->readListBegin(_etype206, _size203); + this->success.resize(_size203); + uint32_t _i207; + for (_i207 = 0; _i207 < _size203; ++_i207) { - xfer += iprot->readString(this->success[_i185]); + xfer += iprot->readString(this->success[_i207]); } iprot->readListEnd(); } @@ -5320,10 +5800,10 @@ uint32_t ThriftHiveMetastore_get_partition_names_result::write(apache::thrift::p xfer += oprot->writeFieldBegin("success", apache::thrift::protocol::T_LIST, 0); { xfer += oprot->writeListBegin(apache::thrift::protocol::T_STRING, this->success.size()); - std::vector ::const_iterator _iter186; - for (_iter186 = this->success.begin(); _iter186 != this->success.end(); ++_iter186) + std::vector ::const_iterator _iter208; + for (_iter208 = this->success.begin(); _iter208 != this->success.end(); ++_iter208) { - xfer += oprot->writeString((*_iter186)); + xfer += oprot->writeString((*_iter208)); } xfer += oprot->writeListEnd(); } @@ -5362,14 +5842,14 @@ uint32_t ThriftHiveMetastore_get_partition_names_presult::read(apache::thrift::p if (ftype == apache::thrift::protocol::T_LIST) { { (*(this->success)).clear(); - uint32_t _size187; - apache::thrift::protocol::TType _etype190; - iprot->readListBegin(_etype190, _size187); - (*(this->success)).resize(_size187); - uint32_t _i191; - for (_i191 = 0; _i191 < _size187; ++_i191) + uint32_t _size209; + apache::thrift::protocol::TType _etype212; + iprot->readListBegin(_etype212, _size209); + (*(this->success)).resize(_size209); + uint32_t _i213; + for (_i213 = 0; _i213 < _size209; ++_i213) { - xfer += iprot->readString((*(this->success))[_i191]); + xfer += iprot->readString((*(this->success))[_i213]); } iprot->readListEnd(); } @@ -5438,14 +5918,14 @@ uint32_t ThriftHiveMetastore_get_partitions_ps_args::read(apache::thrift::protoc if (ftype == apache::thrift::protocol::T_LIST) { { this->part_vals.clear(); - uint32_t _size192; - apache::thrift::protocol::TType _etype195; - iprot->readListBegin(_etype195, _size192); - this->part_vals.resize(_size192); - uint32_t _i196; - for (_i196 = 0; _i196 < _size192; ++_i196) + uint32_t _size214; + apache::thrift::protocol::TType _etype217; + iprot->readListBegin(_etype217, _size214); + this->part_vals.resize(_size214); + uint32_t _i218; + for (_i218 = 0; _i218 < _size214; ++_i218) { - xfer += iprot->readString(this->part_vals[_i196]); + xfer += iprot->readString(this->part_vals[_i218]); } iprot->readListEnd(); } @@ -5486,10 +5966,10 @@ uint32_t ThriftHiveMetastore_get_partitions_ps_args::write(apache::thrift::proto xfer += oprot->writeFieldBegin("part_vals", apache::thrift::protocol::T_LIST, 3); { xfer += oprot->writeListBegin(apache::thrift::protocol::T_STRING, this->part_vals.size()); - std::vector ::const_iterator _iter197; - for (_iter197 = this->part_vals.begin(); _iter197 != this->part_vals.end(); ++_iter197) + std::vector ::const_iterator _iter219; + for (_iter219 = this->part_vals.begin(); _iter219 != this->part_vals.end(); ++_iter219) { - xfer += oprot->writeString((*_iter197)); + xfer += oprot->writeString((*_iter219)); } xfer += oprot->writeListEnd(); } @@ -5514,10 +5994,10 @@ uint32_t ThriftHiveMetastore_get_partitions_ps_pargs::write(apache::thrift::prot xfer += oprot->writeFieldBegin("part_vals", apache::thrift::protocol::T_LIST, 3); { xfer += oprot->writeListBegin(apache::thrift::protocol::T_STRING, (*(this->part_vals)).size()); - std::vector ::const_iterator _iter198; - for (_iter198 = (*(this->part_vals)).begin(); _iter198 != (*(this->part_vals)).end(); ++_iter198) + std::vector ::const_iterator _iter220; + for (_iter220 = (*(this->part_vals)).begin(); _iter220 != (*(this->part_vals)).end(); ++_iter220) { - xfer += oprot->writeString((*_iter198)); + xfer += oprot->writeString((*_iter220)); } xfer += oprot->writeListEnd(); } @@ -5554,14 +6034,14 @@ uint32_t ThriftHiveMetastore_get_partitions_ps_result::read(apache::thrift::prot if (ftype == apache::thrift::protocol::T_LIST) { { this->success.clear(); - uint32_t _size199; - apache::thrift::protocol::TType _etype202; - iprot->readListBegin(_etype202, _size199); - this->success.resize(_size199); - uint32_t _i203; - for (_i203 = 0; _i203 < _size199; ++_i203) + uint32_t _size221; + apache::thrift::protocol::TType _etype224; + iprot->readListBegin(_etype224, _size221); + this->success.resize(_size221); + uint32_t _i225; + for (_i225 = 0; _i225 < _size221; ++_i225) { - xfer += this->success[_i203].read(iprot); + xfer += this->success[_i225].read(iprot); } iprot->readListEnd(); } @@ -5600,10 +6080,10 @@ uint32_t ThriftHiveMetastore_get_partitions_ps_result::write(apache::thrift::pro xfer += oprot->writeFieldBegin("success", apache::thrift::protocol::T_LIST, 0); { xfer += oprot->writeListBegin(apache::thrift::protocol::T_STRUCT, this->success.size()); - std::vector ::const_iterator _iter204; - for (_iter204 = this->success.begin(); _iter204 != this->success.end(); ++_iter204) + std::vector ::const_iterator _iter226; + for (_iter226 = this->success.begin(); _iter226 != this->success.end(); ++_iter226) { - xfer += (*_iter204).write(oprot); + xfer += (*_iter226).write(oprot); } xfer += oprot->writeListEnd(); } @@ -5642,14 +6122,14 @@ uint32_t ThriftHiveMetastore_get_partitions_ps_presult::read(apache::thrift::pro if (ftype == apache::thrift::protocol::T_LIST) { { (*(this->success)).clear(); - uint32_t _size205; - apache::thrift::protocol::TType _etype208; - iprot->readListBegin(_etype208, _size205); - (*(this->success)).resize(_size205); - uint32_t _i209; - for (_i209 = 0; _i209 < _size205; ++_i209) + uint32_t _size227; + apache::thrift::protocol::TType _etype230; + iprot->readListBegin(_etype230, _size227); + (*(this->success)).resize(_size227); + uint32_t _i231; + for (_i231 = 0; _i231 < _size227; ++_i231) { - xfer += (*(this->success))[_i209].read(iprot); + xfer += (*(this->success))[_i231].read(iprot); } iprot->readListEnd(); } @@ -5718,14 +6198,14 @@ uint32_t ThriftHiveMetastore_get_partition_names_ps_args::read(apache::thrift::p if (ftype == apache::thrift::protocol::T_LIST) { { this->part_vals.clear(); - uint32_t _size210; - apache::thrift::protocol::TType _etype213; - iprot->readListBegin(_etype213, _size210); - this->part_vals.resize(_size210); - uint32_t _i214; - for (_i214 = 0; _i214 < _size210; ++_i214) + uint32_t _size232; + apache::thrift::protocol::TType _etype235; + iprot->readListBegin(_etype235, _size232); + this->part_vals.resize(_size232); + uint32_t _i236; + for (_i236 = 0; _i236 < _size232; ++_i236) { - xfer += iprot->readString(this->part_vals[_i214]); + xfer += iprot->readString(this->part_vals[_i236]); } iprot->readListEnd(); } @@ -5766,10 +6246,10 @@ uint32_t ThriftHiveMetastore_get_partition_names_ps_args::write(apache::thrift:: xfer += oprot->writeFieldBegin("part_vals", apache::thrift::protocol::T_LIST, 3); { xfer += oprot->writeListBegin(apache::thrift::protocol::T_STRING, this->part_vals.size()); - std::vector ::const_iterator _iter215; - for (_iter215 = this->part_vals.begin(); _iter215 != this->part_vals.end(); ++_iter215) + std::vector ::const_iterator _iter237; + for (_iter237 = this->part_vals.begin(); _iter237 != this->part_vals.end(); ++_iter237) { - xfer += oprot->writeString((*_iter215)); + xfer += oprot->writeString((*_iter237)); } xfer += oprot->writeListEnd(); } @@ -5794,10 +6274,10 @@ uint32_t ThriftHiveMetastore_get_partition_names_ps_pargs::write(apache::thrift: xfer += oprot->writeFieldBegin("part_vals", apache::thrift::protocol::T_LIST, 3); { xfer += oprot->writeListBegin(apache::thrift::protocol::T_STRING, (*(this->part_vals)).size()); - std::vector ::const_iterator _iter216; - for (_iter216 = (*(this->part_vals)).begin(); _iter216 != (*(this->part_vals)).end(); ++_iter216) + std::vector ::const_iterator _iter238; + for (_iter238 = (*(this->part_vals)).begin(); _iter238 != (*(this->part_vals)).end(); ++_iter238) { - xfer += oprot->writeString((*_iter216)); + xfer += oprot->writeString((*_iter238)); } xfer += oprot->writeListEnd(); } @@ -5834,14 +6314,14 @@ uint32_t ThriftHiveMetastore_get_partition_names_ps_result::read(apache::thrift: if (ftype == apache::thrift::protocol::T_LIST) { { this->success.clear(); - uint32_t _size217; - apache::thrift::protocol::TType _etype220; - iprot->readListBegin(_etype220, _size217); - this->success.resize(_size217); - uint32_t _i221; - for (_i221 = 0; _i221 < _size217; ++_i221) + uint32_t _size239; + apache::thrift::protocol::TType _etype242; + iprot->readListBegin(_etype242, _size239); + this->success.resize(_size239); + uint32_t _i243; + for (_i243 = 0; _i243 < _size239; ++_i243) { - xfer += iprot->readString(this->success[_i221]); + xfer += iprot->readString(this->success[_i243]); } iprot->readListEnd(); } @@ -5880,10 +6360,10 @@ uint32_t ThriftHiveMetastore_get_partition_names_ps_result::write(apache::thrift xfer += oprot->writeFieldBegin("success", apache::thrift::protocol::T_LIST, 0); { xfer += oprot->writeListBegin(apache::thrift::protocol::T_STRING, this->success.size()); - std::vector ::const_iterator _iter222; - for (_iter222 = this->success.begin(); _iter222 != this->success.end(); ++_iter222) + std::vector ::const_iterator _iter244; + for (_iter244 = this->success.begin(); _iter244 != this->success.end(); ++_iter244) { - xfer += oprot->writeString((*_iter222)); + xfer += oprot->writeString((*_iter244)); } xfer += oprot->writeListEnd(); } @@ -5922,14 +6402,14 @@ uint32_t ThriftHiveMetastore_get_partition_names_ps_presult::read(apache::thrift if (ftype == apache::thrift::protocol::T_LIST) { { (*(this->success)).clear(); - uint32_t _size223; - apache::thrift::protocol::TType _etype226; - iprot->readListBegin(_etype226, _size223); - (*(this->success)).resize(_size223); - uint32_t _i227; - for (_i227 = 0; _i227 < _size223; ++_i227) + uint32_t _size245; + apache::thrift::protocol::TType _etype248; + iprot->readListBegin(_etype248, _size245); + (*(this->success)).resize(_size245); + uint32_t _i249; + for (_i249 = 0; _i249 < _size245; ++_i249) { - xfer += iprot->readString((*(this->success))[_i227]); + xfer += iprot->readString((*(this->success))[_i249]); } iprot->readListEnd(); } @@ -6442,14 +6922,14 @@ uint32_t ThriftHiveMetastore_partition_name_to_vals_result::read(apache::thrift: if (ftype == apache::thrift::protocol::T_LIST) { { this->success.clear(); - uint32_t _size228; - apache::thrift::protocol::TType _etype231; - iprot->readListBegin(_etype231, _size228); - this->success.resize(_size228); - uint32_t _i232; - for (_i232 = 0; _i232 < _size228; ++_i232) + uint32_t _size250; + apache::thrift::protocol::TType _etype253; + iprot->readListBegin(_etype253, _size250); + this->success.resize(_size250); + uint32_t _i254; + for (_i254 = 0; _i254 < _size250; ++_i254) { - xfer += iprot->readString(this->success[_i232]); + xfer += iprot->readString(this->success[_i254]); } iprot->readListEnd(); } @@ -6488,10 +6968,10 @@ uint32_t ThriftHiveMetastore_partition_name_to_vals_result::write(apache::thrift xfer += oprot->writeFieldBegin("success", apache::thrift::protocol::T_LIST, 0); { xfer += oprot->writeListBegin(apache::thrift::protocol::T_STRING, this->success.size()); - std::vector ::const_iterator _iter233; - for (_iter233 = this->success.begin(); _iter233 != this->success.end(); ++_iter233) + std::vector ::const_iterator _iter255; + for (_iter255 = this->success.begin(); _iter255 != this->success.end(); ++_iter255) { - xfer += oprot->writeString((*_iter233)); + xfer += oprot->writeString((*_iter255)); } xfer += oprot->writeListEnd(); } @@ -6530,14 +7010,14 @@ uint32_t ThriftHiveMetastore_partition_name_to_vals_presult::read(apache::thrift if (ftype == apache::thrift::protocol::T_LIST) { { (*(this->success)).clear(); - uint32_t _size234; - apache::thrift::protocol::TType _etype237; - iprot->readListBegin(_etype237, _size234); - (*(this->success)).resize(_size234); - uint32_t _i238; - for (_i238 = 0; _i238 < _size234; ++_i238) + uint32_t _size256; + apache::thrift::protocol::TType _etype259; + iprot->readListBegin(_etype259, _size256); + (*(this->success)).resize(_size256); + uint32_t _i260; + for (_i260 = 0; _i260 < _size256; ++_i260) { - xfer += iprot->readString((*(this->success))[_i238]); + xfer += iprot->readString((*(this->success))[_i260]); } iprot->readListEnd(); } @@ -6652,17 +7132,17 @@ uint32_t ThriftHiveMetastore_partition_name_to_spec_result::read(apache::thrift: if (ftype == apache::thrift::protocol::T_MAP) { { this->success.clear(); - uint32_t _size239; - apache::thrift::protocol::TType _ktype240; - apache::thrift::protocol::TType _vtype241; - iprot->readMapBegin(_ktype240, _vtype241, _size239); - uint32_t _i243; - for (_i243 = 0; _i243 < _size239; ++_i243) + uint32_t _size261; + apache::thrift::protocol::TType _ktype262; + apache::thrift::protocol::TType _vtype263; + iprot->readMapBegin(_ktype262, _vtype263, _size261); + uint32_t _i265; + for (_i265 = 0; _i265 < _size261; ++_i265) { - std::string _key244; - xfer += iprot->readString(_key244); - std::string& _val245 = this->success[_key244]; - xfer += iprot->readString(_val245); + std::string _key266; + xfer += iprot->readString(_key266); + std::string& _val267 = this->success[_key266]; + xfer += iprot->readString(_val267); } iprot->readMapEnd(); } @@ -6701,11 +7181,11 @@ uint32_t ThriftHiveMetastore_partition_name_to_spec_result::write(apache::thrift xfer += oprot->writeFieldBegin("success", apache::thrift::protocol::T_MAP, 0); { xfer += oprot->writeMapBegin(apache::thrift::protocol::T_STRING, apache::thrift::protocol::T_STRING, this->success.size()); - std::map ::const_iterator _iter246; - for (_iter246 = this->success.begin(); _iter246 != this->success.end(); ++_iter246) + std::map ::const_iterator _iter268; + for (_iter268 = this->success.begin(); _iter268 != this->success.end(); ++_iter268) { - xfer += oprot->writeString(_iter246->first); - xfer += oprot->writeString(_iter246->second); + xfer += oprot->writeString(_iter268->first); + xfer += oprot->writeString(_iter268->second); } xfer += oprot->writeMapEnd(); } @@ -6744,17 +7224,17 @@ uint32_t ThriftHiveMetastore_partition_name_to_spec_presult::read(apache::thrift if (ftype == apache::thrift::protocol::T_MAP) { { (*(this->success)).clear(); - uint32_t _size247; - apache::thrift::protocol::TType _ktype248; - apache::thrift::protocol::TType _vtype249; - iprot->readMapBegin(_ktype248, _vtype249, _size247); - uint32_t _i251; - for (_i251 = 0; _i251 < _size247; ++_i251) + uint32_t _size269; + apache::thrift::protocol::TType _ktype270; + apache::thrift::protocol::TType _vtype271; + iprot->readMapBegin(_ktype270, _vtype271, _size269); + uint32_t _i273; + for (_i273 = 0; _i273 < _size269; ++_i273) { - std::string _key252; - xfer += iprot->readString(_key252); - std::string& _val253 = (*(this->success))[_key252]; - xfer += iprot->readString(_val253); + std::string _key274; + xfer += iprot->readString(_key274); + std::string& _val275 = (*(this->success))[_key274]; + xfer += iprot->readString(_val275); } iprot->readMapEnd(); } @@ -7595,14 +8075,14 @@ uint32_t ThriftHiveMetastore_get_indexes_result::read(apache::thrift::protocol:: if (ftype == apache::thrift::protocol::T_LIST) { { this->success.clear(); - uint32_t _size254; - apache::thrift::protocol::TType _etype257; - iprot->readListBegin(_etype257, _size254); - this->success.resize(_size254); - uint32_t _i258; - for (_i258 = 0; _i258 < _size254; ++_i258) + uint32_t _size276; + apache::thrift::protocol::TType _etype279; + iprot->readListBegin(_etype279, _size276); + this->success.resize(_size276); + uint32_t _i280; + for (_i280 = 0; _i280 < _size276; ++_i280) { - xfer += this->success[_i258].read(iprot); + xfer += this->success[_i280].read(iprot); } iprot->readListEnd(); } @@ -7649,10 +8129,10 @@ uint32_t ThriftHiveMetastore_get_indexes_result::write(apache::thrift::protocol: xfer += oprot->writeFieldBegin("success", apache::thrift::protocol::T_LIST, 0); { xfer += oprot->writeListBegin(apache::thrift::protocol::T_STRUCT, this->success.size()); - std::vector ::const_iterator _iter259; - for (_iter259 = this->success.begin(); _iter259 != this->success.end(); ++_iter259) + std::vector ::const_iterator _iter281; + for (_iter281 = this->success.begin(); _iter281 != this->success.end(); ++_iter281) { - xfer += (*_iter259).write(oprot); + xfer += (*_iter281).write(oprot); } xfer += oprot->writeListEnd(); } @@ -7695,14 +8175,14 @@ uint32_t ThriftHiveMetastore_get_indexes_presult::read(apache::thrift::protocol: if (ftype == apache::thrift::protocol::T_LIST) { { (*(this->success)).clear(); - uint32_t _size260; - apache::thrift::protocol::TType _etype263; - iprot->readListBegin(_etype263, _size260); - (*(this->success)).resize(_size260); - uint32_t _i264; - for (_i264 = 0; _i264 < _size260; ++_i264) + uint32_t _size282; + apache::thrift::protocol::TType _etype285; + iprot->readListBegin(_etype285, _size282); + (*(this->success)).resize(_size282); + uint32_t _i286; + for (_i286 = 0; _i286 < _size282; ++_i286) { - xfer += (*(this->success))[_i264].read(iprot); + xfer += (*(this->success))[_i286].read(iprot); } iprot->readListEnd(); } @@ -7853,14 +8333,14 @@ uint32_t ThriftHiveMetastore_get_index_names_result::read(apache::thrift::protoc if (ftype == apache::thrift::protocol::T_LIST) { { this->success.clear(); - uint32_t _size265; - apache::thrift::protocol::TType _etype268; - iprot->readListBegin(_etype268, _size265); - this->success.resize(_size265); - uint32_t _i269; - for (_i269 = 0; _i269 < _size265; ++_i269) + uint32_t _size287; + apache::thrift::protocol::TType _etype290; + iprot->readListBegin(_etype290, _size287); + this->success.resize(_size287); + uint32_t _i291; + for (_i291 = 0; _i291 < _size287; ++_i291) { - xfer += iprot->readString(this->success[_i269]); + xfer += iprot->readString(this->success[_i291]); } iprot->readListEnd(); } @@ -7899,10 +8379,10 @@ uint32_t ThriftHiveMetastore_get_index_names_result::write(apache::thrift::proto xfer += oprot->writeFieldBegin("success", apache::thrift::protocol::T_LIST, 0); { xfer += oprot->writeListBegin(apache::thrift::protocol::T_STRING, this->success.size()); - std::vector ::const_iterator _iter270; - for (_iter270 = this->success.begin(); _iter270 != this->success.end(); ++_iter270) + std::vector ::const_iterator _iter292; + for (_iter292 = this->success.begin(); _iter292 != this->success.end(); ++_iter292) { - xfer += oprot->writeString((*_iter270)); + xfer += oprot->writeString((*_iter292)); } xfer += oprot->writeListEnd(); } @@ -7941,14 +8421,14 @@ uint32_t ThriftHiveMetastore_get_index_names_presult::read(apache::thrift::proto if (ftype == apache::thrift::protocol::T_LIST) { { (*(this->success)).clear(); - uint32_t _size271; - apache::thrift::protocol::TType _etype274; - iprot->readListBegin(_etype274, _size271); - (*(this->success)).resize(_size271); - uint32_t _i275; - for (_i275 = 0; _i275 < _size271; ++_i275) + uint32_t _size293; + apache::thrift::protocol::TType _etype296; + iprot->readListBegin(_etype296, _size293); + (*(this->success)).resize(_size293); + uint32_t _i297; + for (_i297 = 0; _i297 < _size293; ++_i297) { - xfer += iprot->readString((*(this->success))[_i275]); + xfer += iprot->readString((*(this->success))[_i297]); } iprot->readListEnd(); } @@ -7977,20 +8457,19 @@ uint32_t ThriftHiveMetastore_get_index_names_presult::read(apache::thrift::proto return xfer; } -bool ThriftHiveMetastoreClient::create_database(const std::string& name, const std::string& description) +void ThriftHiveMetastoreClient::create_database(const Database& database) { - send_create_database(name, description); - return recv_create_database(); + send_create_database(database); + recv_create_database(); } -void ThriftHiveMetastoreClient::send_create_database(const std::string& name, const std::string& description) +void ThriftHiveMetastoreClient::send_create_database(const Database& database) { int32_t cseqid = 0; oprot_->writeMessageBegin("create_database", apache::thrift::protocol::T_CALL, cseqid); ThriftHiveMetastore_create_database_pargs args; - args.name = &name; - args.description = &description; + args.database = &database; args.write(oprot_); oprot_->writeMessageEnd(); @@ -7998,7 +8477,7 @@ void ThriftHiveMetastoreClient::send_create_database(const std::string& name, co oprot_->getTransport()->writeEnd(); } -bool ThriftHiveMetastoreClient::recv_create_database() +void ThriftHiveMetastoreClient::recv_create_database() { int32_t rseqid = 0; @@ -8025,23 +8504,21 @@ bool ThriftHiveMetastoreClient::recv_create_database() iprot_->getTransport()->readEnd(); throw apache::thrift::TApplicationException(apache::thrift::TApplicationException::WRONG_METHOD_NAME); } - bool _return; ThriftHiveMetastore_create_database_presult result; - result.success = &_return; result.read(iprot_); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); - if (result.__isset.success) { - return _return; - } if (result.__isset.o1) { throw result.o1; } if (result.__isset.o2) { throw result.o2; } - throw apache::thrift::TApplicationException(apache::thrift::TApplicationException::MISSING_RESULT, "create_database failed: unknown result"); + if (result.__isset.o3) { + throw result.o3; + } + return; } void ThriftHiveMetastoreClient::get_database(Database& _return, const std::string& name) @@ -8110,19 +8587,20 @@ void ThriftHiveMetastoreClient::recv_get_database(Database& _return) throw apache::thrift::TApplicationException(apache::thrift::TApplicationException::MISSING_RESULT, "get_database failed: unknown result"); } -bool ThriftHiveMetastoreClient::drop_database(const std::string& name) +void ThriftHiveMetastoreClient::drop_database(const std::string& name, const bool deleteData) { - send_drop_database(name); - return recv_drop_database(); + send_drop_database(name, deleteData); + recv_drop_database(); } -void ThriftHiveMetastoreClient::send_drop_database(const std::string& name) +void ThriftHiveMetastoreClient::send_drop_database(const std::string& name, const bool deleteData) { int32_t cseqid = 0; oprot_->writeMessageBegin("drop_database", apache::thrift::protocol::T_CALL, cseqid); ThriftHiveMetastore_drop_database_pargs args; args.name = &name; + args.deleteData = &deleteData; args.write(oprot_); oprot_->writeMessageEnd(); @@ -8130,7 +8608,7 @@ void ThriftHiveMetastoreClient::send_drop_database(const std::string& name) oprot_->getTransport()->writeEnd(); } -bool ThriftHiveMetastoreClient::recv_drop_database() +void ThriftHiveMetastoreClient::recv_drop_database() { int32_t rseqid = 0; @@ -8157,34 +8635,36 @@ bool ThriftHiveMetastoreClient::recv_drop_database() iprot_->getTransport()->readEnd(); throw apache::thrift::TApplicationException(apache::thrift::TApplicationException::WRONG_METHOD_NAME); } - bool _return; ThriftHiveMetastore_drop_database_presult result; - result.success = &_return; result.read(iprot_); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); - if (result.__isset.success) { - return _return; + if (result.__isset.o1) { + throw result.o1; } if (result.__isset.o2) { throw result.o2; } - throw apache::thrift::TApplicationException(apache::thrift::TApplicationException::MISSING_RESULT, "drop_database failed: unknown result"); + if (result.__isset.o3) { + throw result.o3; + } + return; } -void ThriftHiveMetastoreClient::get_databases(std::vector & _return) +void ThriftHiveMetastoreClient::get_databases(std::vector & _return, const std::string& pattern) { - send_get_databases(); + send_get_databases(pattern); recv_get_databases(_return); } -void ThriftHiveMetastoreClient::send_get_databases() +void ThriftHiveMetastoreClient::send_get_databases(const std::string& pattern) { int32_t cseqid = 0; oprot_->writeMessageBegin("get_databases", apache::thrift::protocol::T_CALL, cseqid); ThriftHiveMetastore_get_databases_pargs args; + args.pattern = &pattern; args.write(oprot_); oprot_->writeMessageEnd(); @@ -8235,6 +8715,68 @@ void ThriftHiveMetastoreClient::recv_get_databases(std::vector & _r throw apache::thrift::TApplicationException(apache::thrift::TApplicationException::MISSING_RESULT, "get_databases failed: unknown result"); } +void ThriftHiveMetastoreClient::get_all_databases(std::vector & _return) +{ + send_get_all_databases(); + recv_get_all_databases(_return); +} + +void ThriftHiveMetastoreClient::send_get_all_databases() +{ + int32_t cseqid = 0; + oprot_->writeMessageBegin("get_all_databases", apache::thrift::protocol::T_CALL, cseqid); + + ThriftHiveMetastore_get_all_databases_pargs args; + args.write(oprot_); + + oprot_->writeMessageEnd(); + oprot_->getTransport()->flush(); + oprot_->getTransport()->writeEnd(); +} + +void ThriftHiveMetastoreClient::recv_get_all_databases(std::vector & _return) +{ + + int32_t rseqid = 0; + std::string fname; + apache::thrift::protocol::TMessageType mtype; + + iprot_->readMessageBegin(fname, mtype, rseqid); + if (mtype == apache::thrift::protocol::T_EXCEPTION) { + apache::thrift::TApplicationException x; + x.read(iprot_); + iprot_->readMessageEnd(); + iprot_->getTransport()->readEnd(); + throw x; + } + if (mtype != apache::thrift::protocol::T_REPLY) { + iprot_->skip(apache::thrift::protocol::T_STRUCT); + iprot_->readMessageEnd(); + iprot_->getTransport()->readEnd(); + throw apache::thrift::TApplicationException(apache::thrift::TApplicationException::INVALID_MESSAGE_TYPE); + } + if (fname.compare("get_all_databases") != 0) { + iprot_->skip(apache::thrift::protocol::T_STRUCT); + iprot_->readMessageEnd(); + iprot_->getTransport()->readEnd(); + throw apache::thrift::TApplicationException(apache::thrift::TApplicationException::WRONG_METHOD_NAME); + } + ThriftHiveMetastore_get_all_databases_presult result; + result.success = &_return; + result.read(iprot_); + iprot_->readMessageEnd(); + iprot_->getTransport()->readEnd(); + + if (result.__isset.success) { + // _return pointer has now been filled + return; + } + if (result.__isset.o1) { + throw result.o1; + } + throw apache::thrift::TApplicationException(apache::thrift::TApplicationException::MISSING_RESULT, "get_all_databases failed: unknown result"); +} + void ThriftHiveMetastoreClient::get_type(Type& _return, const std::string& name) { send_get_type(name); @@ -8292,6 +8834,9 @@ void ThriftHiveMetastoreClient::recv_get_type(Type& _return) // _return pointer has now been filled return; } + if (result.__isset.o1) { + throw result.o1; + } if (result.__isset.o2) { throw result.o2; } @@ -8424,6 +8969,9 @@ bool ThriftHiveMetastoreClient::recv_drop_type() if (result.__isset.success) { return _return; } + if (result.__isset.o1) { + throw result.o1; + } if (result.__isset.o2) { throw result.o2; } @@ -8827,6 +9375,69 @@ void ThriftHiveMetastoreClient::recv_get_tables(std::vector & _retu throw apache::thrift::TApplicationException(apache::thrift::TApplicationException::MISSING_RESULT, "get_tables failed: unknown result"); } +void ThriftHiveMetastoreClient::get_all_tables(std::vector & _return, const std::string& db_name) +{ + send_get_all_tables(db_name); + recv_get_all_tables(_return); +} + +void ThriftHiveMetastoreClient::send_get_all_tables(const std::string& db_name) +{ + int32_t cseqid = 0; + oprot_->writeMessageBegin("get_all_tables", apache::thrift::protocol::T_CALL, cseqid); + + ThriftHiveMetastore_get_all_tables_pargs args; + args.db_name = &db_name; + args.write(oprot_); + + oprot_->writeMessageEnd(); + oprot_->getTransport()->flush(); + oprot_->getTransport()->writeEnd(); +} + +void ThriftHiveMetastoreClient::recv_get_all_tables(std::vector & _return) +{ + + int32_t rseqid = 0; + std::string fname; + apache::thrift::protocol::TMessageType mtype; + + iprot_->readMessageBegin(fname, mtype, rseqid); + if (mtype == apache::thrift::protocol::T_EXCEPTION) { + apache::thrift::TApplicationException x; + x.read(iprot_); + iprot_->readMessageEnd(); + iprot_->getTransport()->readEnd(); + throw x; + } + if (mtype != apache::thrift::protocol::T_REPLY) { + iprot_->skip(apache::thrift::protocol::T_STRUCT); + iprot_->readMessageEnd(); + iprot_->getTransport()->readEnd(); + throw apache::thrift::TApplicationException(apache::thrift::TApplicationException::INVALID_MESSAGE_TYPE); + } + if (fname.compare("get_all_tables") != 0) { + iprot_->skip(apache::thrift::protocol::T_STRUCT); + iprot_->readMessageEnd(); + iprot_->getTransport()->readEnd(); + throw apache::thrift::TApplicationException(apache::thrift::TApplicationException::WRONG_METHOD_NAME); + } + ThriftHiveMetastore_get_all_tables_presult result; + result.success = &_return; + result.read(iprot_); + iprot_->readMessageEnd(); + iprot_->getTransport()->readEnd(); + + if (result.__isset.success) { + // _return pointer has now been filled + return; + } + if (result.__isset.o1) { + throw result.o1; + } + throw apache::thrift::TApplicationException(apache::thrift::TApplicationException::MISSING_RESULT, "get_all_tables failed: unknown result"); +} + void ThriftHiveMetastoreClient::get_table(Table& _return, const std::string& dbname, const std::string& tbl_name) { send_get_table(dbname, tbl_name); @@ -10345,14 +10956,16 @@ void ThriftHiveMetastoreProcessor::process_create_database(int32_t seqid, apache ThriftHiveMetastore_create_database_result result; try { - result.success = iface_->create_database(args.name, args.description); - result.__isset.success = true; + iface_->create_database(args.database); } catch (AlreadyExistsException &o1) { result.o1 = o1; result.__isset.o1 = true; - } catch (MetaException &o2) { + } catch (InvalidObjectException &o2) { result.o2 = o2; result.__isset.o2 = true; + } catch (MetaException &o3) { + result.o3 = o3; + result.__isset.o3 = true; } catch (const std::exception& e) { apache::thrift::TApplicationException x(e.what()); oprot->writeMessageBegin("create_database", apache::thrift::protocol::T_EXCEPTION, seqid); @@ -10413,11 +11026,16 @@ void ThriftHiveMetastoreProcessor::process_drop_database(int32_t seqid, apache:: ThriftHiveMetastore_drop_database_result result; try { - result.success = iface_->drop_database(args.name); - result.__isset.success = true; - } catch (MetaException &o2) { + iface_->drop_database(args.name, args.deleteData); + } catch (NoSuchObjectException &o1) { + result.o1 = o1; + result.__isset.o1 = true; + } catch (InvalidOperationException &o2) { result.o2 = o2; result.__isset.o2 = true; + } catch (MetaException &o3) { + result.o3 = o3; + result.__isset.o3 = true; } catch (const std::exception& e) { apache::thrift::TApplicationException x(e.what()); oprot->writeMessageBegin("drop_database", apache::thrift::protocol::T_EXCEPTION, seqid); @@ -10444,7 +11062,7 @@ void ThriftHiveMetastoreProcessor::process_get_databases(int32_t seqid, apache:: ThriftHiveMetastore_get_databases_result result; try { - iface_->get_databases(result.success); + iface_->get_databases(result.success, args.pattern); result.__isset.success = true; } catch (MetaException &o1) { result.o1 = o1; @@ -10466,6 +11084,37 @@ void ThriftHiveMetastoreProcessor::process_get_databases(int32_t seqid, apache:: oprot->getTransport()->writeEnd(); } +void ThriftHiveMetastoreProcessor::process_get_all_databases(int32_t seqid, apache::thrift::protocol::TProtocol* iprot, apache::thrift::protocol::TProtocol* oprot) +{ + ThriftHiveMetastore_get_all_databases_args args; + args.read(iprot); + iprot->readMessageEnd(); + iprot->getTransport()->readEnd(); + + ThriftHiveMetastore_get_all_databases_result result; + try { + iface_->get_all_databases(result.success); + result.__isset.success = true; + } catch (MetaException &o1) { + result.o1 = o1; + result.__isset.o1 = true; + } catch (const std::exception& e) { + apache::thrift::TApplicationException x(e.what()); + oprot->writeMessageBegin("get_all_databases", apache::thrift::protocol::T_EXCEPTION, seqid); + x.write(oprot); + oprot->writeMessageEnd(); + oprot->getTransport()->flush(); + oprot->getTransport()->writeEnd(); + return; + } + + oprot->writeMessageBegin("get_all_databases", apache::thrift::protocol::T_REPLY, seqid); + result.write(oprot); + oprot->writeMessageEnd(); + oprot->getTransport()->flush(); + oprot->getTransport()->writeEnd(); +} + void ThriftHiveMetastoreProcessor::process_get_type(int32_t seqid, apache::thrift::protocol::TProtocol* iprot, apache::thrift::protocol::TProtocol* oprot) { ThriftHiveMetastore_get_type_args args; @@ -10477,7 +11126,10 @@ void ThriftHiveMetastoreProcessor::process_get_type(int32_t seqid, apache::thrif try { iface_->get_type(result.success, args.name); result.__isset.success = true; - } catch (MetaException &o2) { + } catch (MetaException &o1) { + result.o1 = o1; + result.__isset.o1 = true; + } catch (NoSuchObjectException &o2) { result.o2 = o2; result.__isset.o2 = true; } catch (const std::exception& e) { @@ -10545,7 +11197,10 @@ void ThriftHiveMetastoreProcessor::process_drop_type(int32_t seqid, apache::thri try { result.success = iface_->drop_type(args.type); result.__isset.success = true; - } catch (MetaException &o2) { + } catch (MetaException &o1) { + result.o1 = o1; + result.__isset.o1 = true; + } catch (NoSuchObjectException &o2) { result.o2 = o2; result.__isset.o2 = true; } catch (const std::exception& e) { @@ -10773,6 +11428,37 @@ void ThriftHiveMetastoreProcessor::process_get_tables(int32_t seqid, apache::thr oprot->getTransport()->writeEnd(); } +void ThriftHiveMetastoreProcessor::process_get_all_tables(int32_t seqid, apache::thrift::protocol::TProtocol* iprot, apache::thrift::protocol::TProtocol* oprot) +{ + ThriftHiveMetastore_get_all_tables_args args; + args.read(iprot); + iprot->readMessageEnd(); + iprot->getTransport()->readEnd(); + + ThriftHiveMetastore_get_all_tables_result result; + try { + iface_->get_all_tables(result.success, args.db_name); + result.__isset.success = true; + } catch (MetaException &o1) { + result.o1 = o1; + result.__isset.o1 = true; + } catch (const std::exception& e) { + apache::thrift::TApplicationException x(e.what()); + oprot->writeMessageBegin("get_all_tables", apache::thrift::protocol::T_EXCEPTION, seqid); + x.write(oprot); + oprot->writeMessageEnd(); + oprot->getTransport()->flush(); + oprot->getTransport()->writeEnd(); + return; + } + + oprot->writeMessageBegin("get_all_tables", apache::thrift::protocol::T_REPLY, seqid); + result.write(oprot); + oprot->writeMessageEnd(); + oprot->getTransport()->flush(); + oprot->getTransport()->writeEnd(); +} + void ThriftHiveMetastoreProcessor::process_get_table(int32_t seqid, apache::thrift::protocol::TProtocol* iprot, apache::thrift::protocol::TProtocol* oprot) { ThriftHiveMetastore_get_table_args args; diff --git metastore/src/gen-cpp/ThriftHiveMetastore.h metastore/src/gen-cpp/ThriftHiveMetastore.h index e2538fb..9c30faa 100644 --- metastore/src/gen-cpp/ThriftHiveMetastore.h +++ metastore/src/gen-cpp/ThriftHiveMetastore.h @@ -15,10 +15,11 @@ namespace Apache { namespace Hadoop { namespace Hive { class ThriftHiveMetastoreIf : virtual public facebook::fb303::FacebookServiceIf { public: virtual ~ThriftHiveMetastoreIf() {} - virtual bool create_database(const std::string& name, const std::string& description) = 0; + virtual void create_database(const Database& database) = 0; virtual void get_database(Database& _return, const std::string& name) = 0; - virtual bool drop_database(const std::string& name) = 0; - virtual void get_databases(std::vector & _return) = 0; + virtual void drop_database(const std::string& name, const bool deleteData) = 0; + virtual void get_databases(std::vector & _return, const std::string& pattern) = 0; + virtual void get_all_databases(std::vector & _return) = 0; virtual void get_type(Type& _return, const std::string& name) = 0; virtual bool create_type(const Type& type) = 0; virtual bool drop_type(const std::string& type) = 0; @@ -28,6 +29,7 @@ class ThriftHiveMetastoreIf : virtual public facebook::fb303::FacebookServiceIf virtual void create_table(const Table& tbl) = 0; virtual void drop_table(const std::string& dbname, const std::string& name, const bool deleteData) = 0; virtual void get_tables(std::vector & _return, const std::string& db_name, const std::string& pattern) = 0; + virtual void get_all_tables(std::vector & _return, const std::string& db_name) = 0; virtual void get_table(Table& _return, const std::string& dbname, const std::string& tbl_name) = 0; virtual void alter_table(const std::string& dbname, const std::string& tbl_name, const Table& new_tbl) = 0; virtual void add_partition(Partition& _return, const Partition& new_part) = 0; @@ -55,18 +57,19 @@ class ThriftHiveMetastoreIf : virtual public facebook::fb303::FacebookServiceIf class ThriftHiveMetastoreNull : virtual public ThriftHiveMetastoreIf , virtual public facebook::fb303::FacebookServiceNull { public: virtual ~ThriftHiveMetastoreNull() {} - bool create_database(const std::string& /* name */, const std::string& /* description */) { - bool _return = false; - return _return; + void create_database(const Database& /* database */) { + return; } void get_database(Database& /* _return */, const std::string& /* name */) { return; } - bool drop_database(const std::string& /* name */) { - bool _return = false; - return _return; + void drop_database(const std::string& /* name */, const bool /* deleteData */) { + return; + } + void get_databases(std::vector & /* _return */, const std::string& /* pattern */) { + return; } - void get_databases(std::vector & /* _return */) { + void get_all_databases(std::vector & /* _return */) { return; } void get_type(Type& /* _return */, const std::string& /* name */) { @@ -98,6 +101,9 @@ class ThriftHiveMetastoreNull : virtual public ThriftHiveMetastoreIf , virtual p void get_tables(std::vector & /* _return */, const std::string& /* db_name */, const std::string& /* pattern */) { return; } + void get_all_tables(std::vector & /* _return */, const std::string& /* db_name */) { + return; + } void get_table(Table& /* _return */, const std::string& /* dbname */, const std::string& /* tbl_name */) { return; } @@ -172,25 +178,21 @@ class ThriftHiveMetastoreNull : virtual public ThriftHiveMetastoreIf , virtual p class ThriftHiveMetastore_create_database_args { public: - ThriftHiveMetastore_create_database_args() : name(""), description("") { + ThriftHiveMetastore_create_database_args() { } virtual ~ThriftHiveMetastore_create_database_args() throw() {} - std::string name; - std::string description; + Database database; struct __isset { - __isset() : name(false), description(false) {} - bool name; - bool description; + __isset() : database(false) {} + bool database; } __isset; bool operator == (const ThriftHiveMetastore_create_database_args & rhs) const { - if (!(name == rhs.name)) - return false; - if (!(description == rhs.description)) + if (!(database == rhs.database)) return false; return true; } @@ -211,8 +213,7 @@ class ThriftHiveMetastore_create_database_pargs { virtual ~ThriftHiveMetastore_create_database_pargs() throw() {} - const std::string* name; - const std::string* description; + const Database* database; uint32_t write(apache::thrift::protocol::TProtocol* oprot) const; @@ -221,30 +222,30 @@ class ThriftHiveMetastore_create_database_pargs { class ThriftHiveMetastore_create_database_result { public: - ThriftHiveMetastore_create_database_result() : success(0) { + ThriftHiveMetastore_create_database_result() { } virtual ~ThriftHiveMetastore_create_database_result() throw() {} - bool success; AlreadyExistsException o1; - MetaException o2; + InvalidObjectException o2; + MetaException o3; struct __isset { - __isset() : success(false), o1(false), o2(false) {} - bool success; + __isset() : o1(false), o2(false), o3(false) {} bool o1; bool o2; + bool o3; } __isset; bool operator == (const ThriftHiveMetastore_create_database_result & rhs) const { - if (!(success == rhs.success)) - return false; if (!(o1 == rhs.o1)) return false; if (!(o2 == rhs.o2)) return false; + if (!(o3 == rhs.o3)) + return false; return true; } bool operator != (const ThriftHiveMetastore_create_database_result &rhs) const { @@ -264,15 +265,15 @@ class ThriftHiveMetastore_create_database_presult { virtual ~ThriftHiveMetastore_create_database_presult() throw() {} - bool* success; AlreadyExistsException o1; - MetaException o2; + InvalidObjectException o2; + MetaException o3; struct __isset { - __isset() : success(false), o1(false), o2(false) {} - bool success; + __isset() : o1(false), o2(false), o3(false) {} bool o1; bool o2; + bool o3; } __isset; uint32_t read(apache::thrift::protocol::TProtocol* iprot); @@ -387,22 +388,26 @@ class ThriftHiveMetastore_get_database_presult { class ThriftHiveMetastore_drop_database_args { public: - ThriftHiveMetastore_drop_database_args() : name("") { + ThriftHiveMetastore_drop_database_args() : name(""), deleteData(0) { } virtual ~ThriftHiveMetastore_drop_database_args() throw() {} std::string name; + bool deleteData; struct __isset { - __isset() : name(false) {} + __isset() : name(false), deleteData(false) {} bool name; + bool deleteData; } __isset; bool operator == (const ThriftHiveMetastore_drop_database_args & rhs) const { if (!(name == rhs.name)) return false; + if (!(deleteData == rhs.deleteData)) + return false; return true; } bool operator != (const ThriftHiveMetastore_drop_database_args &rhs) const { @@ -423,6 +428,7 @@ class ThriftHiveMetastore_drop_database_pargs { virtual ~ThriftHiveMetastore_drop_database_pargs() throw() {} const std::string* name; + const bool* deleteData; uint32_t write(apache::thrift::protocol::TProtocol* oprot) const; @@ -431,26 +437,30 @@ class ThriftHiveMetastore_drop_database_pargs { class ThriftHiveMetastore_drop_database_result { public: - ThriftHiveMetastore_drop_database_result() : success(0) { + ThriftHiveMetastore_drop_database_result() { } virtual ~ThriftHiveMetastore_drop_database_result() throw() {} - bool success; - MetaException o2; + NoSuchObjectException o1; + InvalidOperationException o2; + MetaException o3; struct __isset { - __isset() : success(false), o2(false) {} - bool success; + __isset() : o1(false), o2(false), o3(false) {} + bool o1; bool o2; + bool o3; } __isset; bool operator == (const ThriftHiveMetastore_drop_database_result & rhs) const { - if (!(success == rhs.success)) + if (!(o1 == rhs.o1)) return false; if (!(o2 == rhs.o2)) return false; + if (!(o3 == rhs.o3)) + return false; return true; } bool operator != (const ThriftHiveMetastore_drop_database_result &rhs) const { @@ -470,13 +480,15 @@ class ThriftHiveMetastore_drop_database_presult { virtual ~ThriftHiveMetastore_drop_database_presult() throw() {} - bool* success; - MetaException o2; + NoSuchObjectException o1; + InvalidOperationException o2; + MetaException o3; struct __isset { - __isset() : success(false), o2(false) {} - bool success; + __isset() : o1(false), o2(false), o3(false) {} + bool o1; bool o2; + bool o3; } __isset; uint32_t read(apache::thrift::protocol::TProtocol* iprot); @@ -486,14 +498,22 @@ class ThriftHiveMetastore_drop_database_presult { class ThriftHiveMetastore_get_databases_args { public: - ThriftHiveMetastore_get_databases_args() { + ThriftHiveMetastore_get_databases_args() : pattern("") { } virtual ~ThriftHiveMetastore_get_databases_args() throw() {} + std::string pattern; - bool operator == (const ThriftHiveMetastore_get_databases_args & /* rhs */) const + struct __isset { + __isset() : pattern(false) {} + bool pattern; + } __isset; + + bool operator == (const ThriftHiveMetastore_get_databases_args & rhs) const { + if (!(pattern == rhs.pattern)) + return false; return true; } bool operator != (const ThriftHiveMetastore_get_databases_args &rhs) const { @@ -513,6 +533,7 @@ class ThriftHiveMetastore_get_databases_pargs { virtual ~ThriftHiveMetastore_get_databases_pargs() throw() {} + const std::string* pattern; uint32_t write(apache::thrift::protocol::TProtocol* oprot) const; @@ -573,6 +594,96 @@ class ThriftHiveMetastore_get_databases_presult { }; +class ThriftHiveMetastore_get_all_databases_args { + public: + + ThriftHiveMetastore_get_all_databases_args() { + } + + virtual ~ThriftHiveMetastore_get_all_databases_args() throw() {} + + + bool operator == (const ThriftHiveMetastore_get_all_databases_args & /* rhs */) const + { + return true; + } + bool operator != (const ThriftHiveMetastore_get_all_databases_args &rhs) const { + return !(*this == rhs); + } + + bool operator < (const ThriftHiveMetastore_get_all_databases_args & ) const; + + uint32_t read(apache::thrift::protocol::TProtocol* iprot); + uint32_t write(apache::thrift::protocol::TProtocol* oprot) const; + +}; + +class ThriftHiveMetastore_get_all_databases_pargs { + public: + + + virtual ~ThriftHiveMetastore_get_all_databases_pargs() throw() {} + + + uint32_t write(apache::thrift::protocol::TProtocol* oprot) const; + +}; + +class ThriftHiveMetastore_get_all_databases_result { + public: + + ThriftHiveMetastore_get_all_databases_result() { + } + + virtual ~ThriftHiveMetastore_get_all_databases_result() throw() {} + + std::vector success; + MetaException o1; + + struct __isset { + __isset() : success(false), o1(false) {} + bool success; + bool o1; + } __isset; + + bool operator == (const ThriftHiveMetastore_get_all_databases_result & rhs) const + { + if (!(success == rhs.success)) + return false; + if (!(o1 == rhs.o1)) + return false; + return true; + } + bool operator != (const ThriftHiveMetastore_get_all_databases_result &rhs) const { + return !(*this == rhs); + } + + bool operator < (const ThriftHiveMetastore_get_all_databases_result & ) const; + + uint32_t read(apache::thrift::protocol::TProtocol* iprot); + uint32_t write(apache::thrift::protocol::TProtocol* oprot) const; + +}; + +class ThriftHiveMetastore_get_all_databases_presult { + public: + + + virtual ~ThriftHiveMetastore_get_all_databases_presult() throw() {} + + std::vector * success; + MetaException o1; + + struct __isset { + __isset() : success(false), o1(false) {} + bool success; + bool o1; + } __isset; + + uint32_t read(apache::thrift::protocol::TProtocol* iprot); + +}; + class ThriftHiveMetastore_get_type_args { public: @@ -626,11 +737,13 @@ class ThriftHiveMetastore_get_type_result { virtual ~ThriftHiveMetastore_get_type_result() throw() {} Type success; - MetaException o2; + MetaException o1; + NoSuchObjectException o2; struct __isset { - __isset() : success(false), o2(false) {} + __isset() : success(false), o1(false), o2(false) {} bool success; + bool o1; bool o2; } __isset; @@ -638,6 +751,8 @@ class ThriftHiveMetastore_get_type_result { { if (!(success == rhs.success)) return false; + if (!(o1 == rhs.o1)) + return false; if (!(o2 == rhs.o2)) return false; return true; @@ -660,11 +775,13 @@ class ThriftHiveMetastore_get_type_presult { virtual ~ThriftHiveMetastore_get_type_presult() throw() {} Type* success; - MetaException o2; + MetaException o1; + NoSuchObjectException o2; struct __isset { - __isset() : success(false), o2(false) {} + __isset() : success(false), o1(false), o2(false) {} bool success; + bool o1; bool o2; } __isset; @@ -836,11 +953,13 @@ class ThriftHiveMetastore_drop_type_result { virtual ~ThriftHiveMetastore_drop_type_result() throw() {} bool success; - MetaException o2; + MetaException o1; + NoSuchObjectException o2; struct __isset { - __isset() : success(false), o2(false) {} + __isset() : success(false), o1(false), o2(false) {} bool success; + bool o1; bool o2; } __isset; @@ -848,6 +967,8 @@ class ThriftHiveMetastore_drop_type_result { { if (!(success == rhs.success)) return false; + if (!(o1 == rhs.o1)) + return false; if (!(o2 == rhs.o2)) return false; return true; @@ -870,11 +991,13 @@ class ThriftHiveMetastore_drop_type_presult { virtual ~ThriftHiveMetastore_drop_type_presult() throw() {} bool* success; - MetaException o2; + MetaException o1; + NoSuchObjectException o2; struct __isset { - __isset() : success(false), o2(false) {} + __isset() : success(false), o1(false), o2(false) {} bool success; + bool o1; bool o2; } __isset; @@ -1537,6 +1660,105 @@ class ThriftHiveMetastore_get_tables_presult { }; +class ThriftHiveMetastore_get_all_tables_args { + public: + + ThriftHiveMetastore_get_all_tables_args() : db_name("") { + } + + virtual ~ThriftHiveMetastore_get_all_tables_args() throw() {} + + std::string db_name; + + struct __isset { + __isset() : db_name(false) {} + bool db_name; + } __isset; + + bool operator == (const ThriftHiveMetastore_get_all_tables_args & rhs) const + { + if (!(db_name == rhs.db_name)) + return false; + return true; + } + bool operator != (const ThriftHiveMetastore_get_all_tables_args &rhs) const { + return !(*this == rhs); + } + + bool operator < (const ThriftHiveMetastore_get_all_tables_args & ) const; + + uint32_t read(apache::thrift::protocol::TProtocol* iprot); + uint32_t write(apache::thrift::protocol::TProtocol* oprot) const; + +}; + +class ThriftHiveMetastore_get_all_tables_pargs { + public: + + + virtual ~ThriftHiveMetastore_get_all_tables_pargs() throw() {} + + const std::string* db_name; + + uint32_t write(apache::thrift::protocol::TProtocol* oprot) const; + +}; + +class ThriftHiveMetastore_get_all_tables_result { + public: + + ThriftHiveMetastore_get_all_tables_result() { + } + + virtual ~ThriftHiveMetastore_get_all_tables_result() throw() {} + + std::vector success; + MetaException o1; + + struct __isset { + __isset() : success(false), o1(false) {} + bool success; + bool o1; + } __isset; + + bool operator == (const ThriftHiveMetastore_get_all_tables_result & rhs) const + { + if (!(success == rhs.success)) + return false; + if (!(o1 == rhs.o1)) + return false; + return true; + } + bool operator != (const ThriftHiveMetastore_get_all_tables_result &rhs) const { + return !(*this == rhs); + } + + bool operator < (const ThriftHiveMetastore_get_all_tables_result & ) const; + + uint32_t read(apache::thrift::protocol::TProtocol* iprot); + uint32_t write(apache::thrift::protocol::TProtocol* oprot) const; + +}; + +class ThriftHiveMetastore_get_all_tables_presult { + public: + + + virtual ~ThriftHiveMetastore_get_all_tables_presult() throw() {} + + std::vector * success; + MetaException o1; + + struct __isset { + __isset() : success(false), o1(false) {} + bool success; + bool o1; + } __isset; + + uint32_t read(apache::thrift::protocol::TProtocol* iprot); + +}; + class ThriftHiveMetastore_get_table_args { public: @@ -4029,18 +4251,21 @@ class ThriftHiveMetastoreClient : virtual public ThriftHiveMetastoreIf, public f boost::shared_ptr getOutputProtocol() { return poprot_; } - bool create_database(const std::string& name, const std::string& description); - void send_create_database(const std::string& name, const std::string& description); - bool recv_create_database(); + void create_database(const Database& database); + void send_create_database(const Database& database); + void recv_create_database(); void get_database(Database& _return, const std::string& name); void send_get_database(const std::string& name); void recv_get_database(Database& _return); - bool drop_database(const std::string& name); - void send_drop_database(const std::string& name); - bool recv_drop_database(); - void get_databases(std::vector & _return); - void send_get_databases(); + void drop_database(const std::string& name, const bool deleteData); + void send_drop_database(const std::string& name, const bool deleteData); + void recv_drop_database(); + void get_databases(std::vector & _return, const std::string& pattern); + void send_get_databases(const std::string& pattern); void recv_get_databases(std::vector & _return); + void get_all_databases(std::vector & _return); + void send_get_all_databases(); + void recv_get_all_databases(std::vector & _return); void get_type(Type& _return, const std::string& name); void send_get_type(const std::string& name); void recv_get_type(Type& _return); @@ -4068,6 +4293,9 @@ class ThriftHiveMetastoreClient : virtual public ThriftHiveMetastoreIf, public f void get_tables(std::vector & _return, const std::string& db_name, const std::string& pattern); void send_get_tables(const std::string& db_name, const std::string& pattern); void recv_get_tables(std::vector & _return); + void get_all_tables(std::vector & _return, const std::string& db_name); + void send_get_all_tables(const std::string& db_name); + void recv_get_all_tables(std::vector & _return); void get_table(Table& _return, const std::string& dbname, const std::string& tbl_name); void send_get_table(const std::string& dbname, const std::string& tbl_name); void recv_get_table(Table& _return); @@ -4146,6 +4374,7 @@ class ThriftHiveMetastoreProcessor : virtual public apache::thrift::TProcessor, void process_get_database(int32_t seqid, apache::thrift::protocol::TProtocol* iprot, apache::thrift::protocol::TProtocol* oprot); void process_drop_database(int32_t seqid, apache::thrift::protocol::TProtocol* iprot, apache::thrift::protocol::TProtocol* oprot); void process_get_databases(int32_t seqid, apache::thrift::protocol::TProtocol* iprot, apache::thrift::protocol::TProtocol* oprot); + void process_get_all_databases(int32_t seqid, apache::thrift::protocol::TProtocol* iprot, apache::thrift::protocol::TProtocol* oprot); void process_get_type(int32_t seqid, apache::thrift::protocol::TProtocol* iprot, apache::thrift::protocol::TProtocol* oprot); void process_create_type(int32_t seqid, apache::thrift::protocol::TProtocol* iprot, apache::thrift::protocol::TProtocol* oprot); void process_drop_type(int32_t seqid, apache::thrift::protocol::TProtocol* iprot, apache::thrift::protocol::TProtocol* oprot); @@ -4155,6 +4384,7 @@ class ThriftHiveMetastoreProcessor : virtual public apache::thrift::TProcessor, void process_create_table(int32_t seqid, apache::thrift::protocol::TProtocol* iprot, apache::thrift::protocol::TProtocol* oprot); void process_drop_table(int32_t seqid, apache::thrift::protocol::TProtocol* iprot, apache::thrift::protocol::TProtocol* oprot); void process_get_tables(int32_t seqid, apache::thrift::protocol::TProtocol* iprot, apache::thrift::protocol::TProtocol* oprot); + void process_get_all_tables(int32_t seqid, apache::thrift::protocol::TProtocol* iprot, apache::thrift::protocol::TProtocol* oprot); void process_get_table(int32_t seqid, apache::thrift::protocol::TProtocol* iprot, apache::thrift::protocol::TProtocol* oprot); void process_alter_table(int32_t seqid, apache::thrift::protocol::TProtocol* iprot, apache::thrift::protocol::TProtocol* oprot); void process_add_partition(int32_t seqid, apache::thrift::protocol::TProtocol* iprot, apache::thrift::protocol::TProtocol* oprot); @@ -4185,6 +4415,7 @@ class ThriftHiveMetastoreProcessor : virtual public apache::thrift::TProcessor, processMap_["get_database"] = &ThriftHiveMetastoreProcessor::process_get_database; processMap_["drop_database"] = &ThriftHiveMetastoreProcessor::process_drop_database; processMap_["get_databases"] = &ThriftHiveMetastoreProcessor::process_get_databases; + processMap_["get_all_databases"] = &ThriftHiveMetastoreProcessor::process_get_all_databases; processMap_["get_type"] = &ThriftHiveMetastoreProcessor::process_get_type; processMap_["create_type"] = &ThriftHiveMetastoreProcessor::process_create_type; processMap_["drop_type"] = &ThriftHiveMetastoreProcessor::process_drop_type; @@ -4194,6 +4425,7 @@ class ThriftHiveMetastoreProcessor : virtual public apache::thrift::TProcessor, processMap_["create_table"] = &ThriftHiveMetastoreProcessor::process_create_table; processMap_["drop_table"] = &ThriftHiveMetastoreProcessor::process_drop_table; processMap_["get_tables"] = &ThriftHiveMetastoreProcessor::process_get_tables; + processMap_["get_all_tables"] = &ThriftHiveMetastoreProcessor::process_get_all_tables; processMap_["get_table"] = &ThriftHiveMetastoreProcessor::process_get_table; processMap_["alter_table"] = &ThriftHiveMetastoreProcessor::process_alter_table; processMap_["add_partition"] = &ThriftHiveMetastoreProcessor::process_add_partition; @@ -4239,14 +4471,10 @@ class ThriftHiveMetastoreMultiface : virtual public ThriftHiveMetastoreIf, publi ifaces_.push_back(iface); } public: - bool create_database(const std::string& name, const std::string& description) { + void create_database(const Database& database) { uint32_t sz = ifaces_.size(); for (uint32_t i = 0; i < sz; ++i) { - if (i == sz - 1) { - return ifaces_[i]->create_database(name, description); - } else { - ifaces_[i]->create_database(name, description); - } + ifaces_[i]->create_database(database); } } @@ -4262,25 +4490,33 @@ class ThriftHiveMetastoreMultiface : virtual public ThriftHiveMetastoreIf, publi } } - bool drop_database(const std::string& name) { + void drop_database(const std::string& name, const bool deleteData) { + uint32_t sz = ifaces_.size(); + for (uint32_t i = 0; i < sz; ++i) { + ifaces_[i]->drop_database(name, deleteData); + } + } + + void get_databases(std::vector & _return, const std::string& pattern) { uint32_t sz = ifaces_.size(); for (uint32_t i = 0; i < sz; ++i) { if (i == sz - 1) { - return ifaces_[i]->drop_database(name); + ifaces_[i]->get_databases(_return, pattern); + return; } else { - ifaces_[i]->drop_database(name); + ifaces_[i]->get_databases(_return, pattern); } } } - void get_databases(std::vector & _return) { + void get_all_databases(std::vector & _return) { uint32_t sz = ifaces_.size(); for (uint32_t i = 0; i < sz; ++i) { if (i == sz - 1) { - ifaces_[i]->get_databases(_return); + ifaces_[i]->get_all_databases(_return); return; } else { - ifaces_[i]->get_databases(_return); + ifaces_[i]->get_all_databases(_return); } } } @@ -4381,6 +4617,18 @@ class ThriftHiveMetastoreMultiface : virtual public ThriftHiveMetastoreIf, publi } } + void get_all_tables(std::vector & _return, const std::string& db_name) { + uint32_t sz = ifaces_.size(); + for (uint32_t i = 0; i < sz; ++i) { + if (i == sz - 1) { + ifaces_[i]->get_all_tables(_return, db_name); + return; + } else { + ifaces_[i]->get_all_tables(_return, db_name); + } + } + } + void get_table(Table& _return, const std::string& dbname, const std::string& tbl_name) { uint32_t sz = ifaces_.size(); for (uint32_t i = 0; i < sz; ++i) { diff --git metastore/src/gen-cpp/ThriftHiveMetastore_server.skeleton.cpp metastore/src/gen-cpp/ThriftHiveMetastore_server.skeleton.cpp index ed2bb99..1676bb8 100644 --- metastore/src/gen-cpp/ThriftHiveMetastore_server.skeleton.cpp +++ metastore/src/gen-cpp/ThriftHiveMetastore_server.skeleton.cpp @@ -22,7 +22,7 @@ class ThriftHiveMetastoreHandler : virtual public ThriftHiveMetastoreIf { // Your initialization goes here } - bool create_database(const std::string& name, const std::string& description) { + void create_database(const Database& database) { // Your implementation goes here printf("create_database\n"); } @@ -32,16 +32,21 @@ class ThriftHiveMetastoreHandler : virtual public ThriftHiveMetastoreIf { printf("get_database\n"); } - bool drop_database(const std::string& name) { + void drop_database(const std::string& name, const bool deleteData) { // Your implementation goes here printf("drop_database\n"); } - void get_databases(std::vector & _return) { + void get_databases(std::vector & _return, const std::string& pattern) { // Your implementation goes here printf("get_databases\n"); } + void get_all_databases(std::vector & _return) { + // Your implementation goes here + printf("get_all_databases\n"); + } + void get_type(Type& _return, const std::string& name) { // Your implementation goes here printf("get_type\n"); @@ -87,6 +92,11 @@ class ThriftHiveMetastoreHandler : virtual public ThriftHiveMetastoreIf { printf("get_tables\n"); } + void get_all_tables(std::vector & _return, const std::string& db_name) { + // Your implementation goes here + printf("get_all_tables\n"); + } + void get_table(Table& _return, const std::string& dbname, const std::string& tbl_name) { // Your implementation goes here printf("get_table\n"); diff --git metastore/src/gen-cpp/hive_metastore_types.cpp metastore/src/gen-cpp/hive_metastore_types.cpp index b5a403d..c000db9 100644 --- metastore/src/gen-cpp/hive_metastore_types.cpp +++ metastore/src/gen-cpp/hive_metastore_types.cpp @@ -261,8 +261,8 @@ uint32_t Type::write(apache::thrift::protocol::TProtocol* oprot) const { return xfer; } -const char* Database::ascii_fingerprint = "07A9615F837F7D0A952B595DD3020972"; -const uint8_t Database::binary_fingerprint[16] = {0x07,0xA9,0x61,0x5F,0x83,0x7F,0x7D,0x0A,0x95,0x2B,0x59,0x5D,0xD3,0x02,0x09,0x72}; +const char* Database::ascii_fingerprint = "AB879940BD15B6B25691265F7384B271"; +const uint8_t Database::binary_fingerprint[16] = {0xAB,0x87,0x99,0x40,0xBD,0x15,0xB6,0xB2,0x56,0x91,0x26,0x5F,0x73,0x84,0xB2,0x71}; uint32_t Database::read(apache::thrift::protocol::TProtocol* iprot) { @@ -300,6 +300,14 @@ uint32_t Database::read(apache::thrift::protocol::TProtocol* iprot) { xfer += iprot->skip(ftype); } break; + case 3: + if (ftype == apache::thrift::protocol::T_STRING) { + xfer += iprot->readString(this->locationUri); + this->__isset.locationUri = true; + } else { + xfer += iprot->skip(ftype); + } + break; default: xfer += iprot->skip(ftype); break; @@ -321,6 +329,9 @@ uint32_t Database::write(apache::thrift::protocol::TProtocol* oprot) const { xfer += oprot->writeFieldBegin("description", apache::thrift::protocol::T_STRING, 2); xfer += oprot->writeString(this->description); xfer += oprot->writeFieldEnd(); + xfer += oprot->writeFieldBegin("locationUri", apache::thrift::protocol::T_STRING, 3); + xfer += oprot->writeString(this->locationUri); + xfer += oprot->writeFieldEnd(); xfer += oprot->writeFieldStop(); xfer += oprot->writeStructEnd(); return xfer; diff --git metastore/src/gen-cpp/hive_metastore_types.h metastore/src/gen-cpp/hive_metastore_types.h index 1b0c706..f6cd7e5 100644 --- metastore/src/gen-cpp/hive_metastore_types.h +++ metastore/src/gen-cpp/hive_metastore_types.h @@ -153,21 +153,23 @@ class Type { class Database { public: - static const char* ascii_fingerprint; // = "07A9615F837F7D0A952B595DD3020972"; - static const uint8_t binary_fingerprint[16]; // = {0x07,0xA9,0x61,0x5F,0x83,0x7F,0x7D,0x0A,0x95,0x2B,0x59,0x5D,0xD3,0x02,0x09,0x72}; + static const char* ascii_fingerprint; // = "AB879940BD15B6B25691265F7384B271"; + static const uint8_t binary_fingerprint[16]; // = {0xAB,0x87,0x99,0x40,0xBD,0x15,0xB6,0xB2,0x56,0x91,0x26,0x5F,0x73,0x84,0xB2,0x71}; - Database() : name(""), description("") { + Database() : name(""), description(""), locationUri("") { } virtual ~Database() throw() {} std::string name; std::string description; + std::string locationUri; struct __isset { - __isset() : name(false), description(false) {} + __isset() : name(false), description(false), locationUri(false) {} bool name; bool description; + bool locationUri; } __isset; bool operator == (const Database & rhs) const @@ -176,6 +178,8 @@ class Database { return false; if (!(description == rhs.description)) return false; + if (!(locationUri == rhs.locationUri)) + return false; return true; } bool operator != (const Database &rhs) const { diff --git metastore/src/gen-javabean/org/apache/hadoop/hive/metastore/api/Database.java metastore/src/gen-javabean/org/apache/hadoop/hive/metastore/api/Database.java index 78c78d9..e916cb8 100644 --- metastore/src/gen-javabean/org/apache/hadoop/hive/metastore/api/Database.java +++ metastore/src/gen-javabean/org/apache/hadoop/hive/metastore/api/Database.java @@ -22,11 +22,14 @@ public class Database implements TBase, java.io.Serializable, Cloneable { private static final TStruct STRUCT_DESC = new TStruct("Database"); private static final TField NAME_FIELD_DESC = new TField("name", TType.STRING, (short)1); private static final TField DESCRIPTION_FIELD_DESC = new TField("description", TType.STRING, (short)2); + private static final TField LOCATION_URI_FIELD_DESC = new TField("locationUri", TType.STRING, (short)3); private String name; public static final int NAME = 1; private String description; public static final int DESCRIPTION = 2; + private String locationUri; + public static final int LOCATIONURI = 3; private final Isset __isset = new Isset(); private static final class Isset implements java.io.Serializable { @@ -37,6 +40,8 @@ public class Database implements TBase, java.io.Serializable, Cloneable { new FieldValueMetaData(TType.STRING))); put(DESCRIPTION, new FieldMetaData("description", TFieldRequirementType.DEFAULT, new FieldValueMetaData(TType.STRING))); + put(LOCATIONURI, new FieldMetaData("locationUri", TFieldRequirementType.DEFAULT, + new FieldValueMetaData(TType.STRING))); }}); static { @@ -48,11 +53,13 @@ public class Database implements TBase, java.io.Serializable, Cloneable { public Database( String name, - String description) + String description, + String locationUri) { this(); this.name = name; this.description = description; + this.locationUri = locationUri; } /** @@ -65,6 +72,9 @@ public class Database implements TBase, java.io.Serializable, Cloneable { if (other.isSetDescription()) { this.description = other.description; } + if (other.isSetLocationUri()) { + this.locationUri = other.locationUri; + } } @Override @@ -106,6 +116,23 @@ public class Database implements TBase, java.io.Serializable, Cloneable { return this.description != null; } + public String getLocationUri() { + return this.locationUri; + } + + public void setLocationUri(String locationUri) { + this.locationUri = locationUri; + } + + public void unsetLocationUri() { + this.locationUri = null; + } + + // Returns true if field locationUri is set (has been asigned a value) and false otherwise + public boolean isSetLocationUri() { + return this.locationUri != null; + } + public void setFieldValue(int fieldID, Object value) { switch (fieldID) { case NAME: @@ -124,6 +151,14 @@ public class Database implements TBase, java.io.Serializable, Cloneable { } break; + case LOCATIONURI: + if (value == null) { + unsetLocationUri(); + } else { + setLocationUri((String)value); + } + break; + default: throw new IllegalArgumentException("Field " + fieldID + " doesn't exist!"); } @@ -137,6 +172,9 @@ public class Database implements TBase, java.io.Serializable, Cloneable { case DESCRIPTION: return getDescription(); + case LOCATIONURI: + return getLocationUri(); + default: throw new IllegalArgumentException("Field " + fieldID + " doesn't exist!"); } @@ -149,6 +187,8 @@ public class Database implements TBase, java.io.Serializable, Cloneable { return isSetName(); case DESCRIPTION: return isSetDescription(); + case LOCATIONURI: + return isSetLocationUri(); default: throw new IllegalArgumentException("Field " + fieldID + " doesn't exist!"); } @@ -185,6 +225,15 @@ public class Database implements TBase, java.io.Serializable, Cloneable { return false; } + boolean this_present_locationUri = true && this.isSetLocationUri(); + boolean that_present_locationUri = true && that.isSetLocationUri(); + if (this_present_locationUri || that_present_locationUri) { + if (!(this_present_locationUri && that_present_locationUri)) + return false; + if (!this.locationUri.equals(that.locationUri)) + return false; + } + return true; } @@ -218,6 +267,13 @@ public class Database implements TBase, java.io.Serializable, Cloneable { TProtocolUtil.skip(iprot, field.type); } break; + case LOCATIONURI: + if (field.type == TType.STRING) { + this.locationUri = iprot.readString(); + } else { + TProtocolUtil.skip(iprot, field.type); + } + break; default: TProtocolUtil.skip(iprot, field.type); break; @@ -243,6 +299,11 @@ public class Database implements TBase, java.io.Serializable, Cloneable { oprot.writeString(this.description); oprot.writeFieldEnd(); } + if (this.locationUri != null) { + oprot.writeFieldBegin(LOCATION_URI_FIELD_DESC); + oprot.writeString(this.locationUri); + oprot.writeFieldEnd(); + } oprot.writeFieldStop(); oprot.writeStructEnd(); } @@ -267,6 +328,14 @@ public class Database implements TBase, java.io.Serializable, Cloneable { sb.append(this.description); } first = false; + if (!first) sb.append(", "); + sb.append("locationUri:"); + if (this.locationUri == null) { + sb.append("null"); + } else { + sb.append(this.locationUri); + } + first = false; sb.append(")"); return sb.toString(); } diff --git metastore/src/gen-javabean/org/apache/hadoop/hive/metastore/api/ThriftHiveMetastore.java metastore/src/gen-javabean/org/apache/hadoop/hive/metastore/api/ThriftHiveMetastore.java index 25408d9..bed8557 100644 --- metastore/src/gen-javabean/org/apache/hadoop/hive/metastore/api/ThriftHiveMetastore.java +++ metastore/src/gen-javabean/org/apache/hadoop/hive/metastore/api/ThriftHiveMetastore.java @@ -25,19 +25,21 @@ public class ThriftHiveMetastore { */ public interface Iface extends com.facebook.fb303.FacebookService.Iface { - public boolean create_database(String name, String description) throws AlreadyExistsException, MetaException, TException; + public void create_database(Database database) throws AlreadyExistsException, InvalidObjectException, MetaException, TException; public Database get_database(String name) throws NoSuchObjectException, MetaException, TException; - public boolean drop_database(String name) throws MetaException, TException; + public void drop_database(String name, boolean deleteData) throws NoSuchObjectException, InvalidOperationException, MetaException, TException; - public List get_databases() throws MetaException, TException; + public List get_databases(String pattern) throws MetaException, TException; - public Type get_type(String name) throws MetaException, TException; + public List get_all_databases() throws MetaException, TException; + + public Type get_type(String name) throws MetaException, NoSuchObjectException, TException; public boolean create_type(Type type) throws AlreadyExistsException, InvalidObjectException, MetaException, TException; - public boolean drop_type(String type) throws MetaException, TException; + public boolean drop_type(String type) throws MetaException, NoSuchObjectException, TException; public Map get_type_all(String name) throws MetaException, TException; @@ -51,6 +53,8 @@ public class ThriftHiveMetastore { public List get_tables(String db_name, String pattern) throws MetaException, TException; + public List get_all_tables(String db_name) throws MetaException, TException; + public Table get_table(String dbname, String tbl_name) throws MetaException, NoSuchObjectException, TException; public void alter_table(String dbname, String tbl_name, Table new_tbl) throws InvalidOperationException, MetaException, TException; @@ -108,24 +112,23 @@ public class ThriftHiveMetastore { super(iprot, oprot); } - public boolean create_database(String name, String description) throws AlreadyExistsException, MetaException, TException + public void create_database(Database database) throws AlreadyExistsException, InvalidObjectException, MetaException, TException { - send_create_database(name, description); - return recv_create_database(); + send_create_database(database); + recv_create_database(); } - public void send_create_database(String name, String description) throws TException + public void send_create_database(Database database) throws TException { oprot_.writeMessageBegin(new TMessage("create_database", TMessageType.CALL, seqid_)); create_database_args args = new create_database_args(); - args.name = name; - args.description = description; + args.database = database; args.write(oprot_); oprot_.writeMessageEnd(); oprot_.getTransport().flush(); } - public boolean recv_create_database() throws AlreadyExistsException, MetaException, TException + public void recv_create_database() throws AlreadyExistsException, InvalidObjectException, MetaException, TException { TMessage msg = iprot_.readMessageBegin(); if (msg.type == TMessageType.EXCEPTION) { @@ -136,16 +139,16 @@ public class ThriftHiveMetastore { create_database_result result = new create_database_result(); result.read(iprot_); iprot_.readMessageEnd(); - if (result.isSetSuccess()) { - return result.success; - } if (result.o1 != null) { throw result.o1; } if (result.o2 != null) { throw result.o2; } - throw new TApplicationException(TApplicationException.MISSING_RESULT, "create_database failed: unknown result"); + if (result.o3 != null) { + throw result.o3; + } + return; } public Database get_database(String name) throws NoSuchObjectException, MetaException, TException @@ -187,23 +190,24 @@ public class ThriftHiveMetastore { throw new TApplicationException(TApplicationException.MISSING_RESULT, "get_database failed: unknown result"); } - public boolean drop_database(String name) throws MetaException, TException + public void drop_database(String name, boolean deleteData) throws NoSuchObjectException, InvalidOperationException, MetaException, TException { - send_drop_database(name); - return recv_drop_database(); + send_drop_database(name, deleteData); + recv_drop_database(); } - public void send_drop_database(String name) throws TException + public void send_drop_database(String name, boolean deleteData) throws TException { oprot_.writeMessageBegin(new TMessage("drop_database", TMessageType.CALL, seqid_)); drop_database_args args = new drop_database_args(); args.name = name; + args.deleteData = deleteData; args.write(oprot_); oprot_.writeMessageEnd(); oprot_.getTransport().flush(); } - public boolean recv_drop_database() throws MetaException, TException + public void recv_drop_database() throws NoSuchObjectException, InvalidOperationException, MetaException, TException { TMessage msg = iprot_.readMessageBegin(); if (msg.type == TMessageType.EXCEPTION) { @@ -214,25 +218,29 @@ public class ThriftHiveMetastore { drop_database_result result = new drop_database_result(); result.read(iprot_); iprot_.readMessageEnd(); - if (result.isSetSuccess()) { - return result.success; + if (result.o1 != null) { + throw result.o1; } if (result.o2 != null) { throw result.o2; } - throw new TApplicationException(TApplicationException.MISSING_RESULT, "drop_database failed: unknown result"); + if (result.o3 != null) { + throw result.o3; + } + return; } - public List get_databases() throws MetaException, TException + public List get_databases(String pattern) throws MetaException, TException { - send_get_databases(); + send_get_databases(pattern); return recv_get_databases(); } - public void send_get_databases() throws TException + public void send_get_databases(String pattern) throws TException { oprot_.writeMessageBegin(new TMessage("get_databases", TMessageType.CALL, seqid_)); get_databases_args args = new get_databases_args(); + args.pattern = pattern; args.write(oprot_); oprot_.writeMessageEnd(); oprot_.getTransport().flush(); @@ -258,7 +266,42 @@ public class ThriftHiveMetastore { throw new TApplicationException(TApplicationException.MISSING_RESULT, "get_databases failed: unknown result"); } - public Type get_type(String name) throws MetaException, TException + public List get_all_databases() throws MetaException, TException + { + send_get_all_databases(); + return recv_get_all_databases(); + } + + public void send_get_all_databases() throws TException + { + oprot_.writeMessageBegin(new TMessage("get_all_databases", TMessageType.CALL, seqid_)); + get_all_databases_args args = new get_all_databases_args(); + args.write(oprot_); + oprot_.writeMessageEnd(); + oprot_.getTransport().flush(); + } + + public List recv_get_all_databases() throws MetaException, TException + { + TMessage msg = iprot_.readMessageBegin(); + if (msg.type == TMessageType.EXCEPTION) { + TApplicationException x = TApplicationException.read(iprot_); + iprot_.readMessageEnd(); + throw x; + } + get_all_databases_result result = new get_all_databases_result(); + result.read(iprot_); + iprot_.readMessageEnd(); + if (result.isSetSuccess()) { + return result.success; + } + if (result.o1 != null) { + throw result.o1; + } + throw new TApplicationException(TApplicationException.MISSING_RESULT, "get_all_databases failed: unknown result"); + } + + public Type get_type(String name) throws MetaException, NoSuchObjectException, TException { send_get_type(name); return recv_get_type(); @@ -274,7 +317,7 @@ public class ThriftHiveMetastore { oprot_.getTransport().flush(); } - public Type recv_get_type() throws MetaException, TException + public Type recv_get_type() throws MetaException, NoSuchObjectException, TException { TMessage msg = iprot_.readMessageBegin(); if (msg.type == TMessageType.EXCEPTION) { @@ -288,6 +331,9 @@ public class ThriftHiveMetastore { if (result.isSetSuccess()) { return result.success; } + if (result.o1 != null) { + throw result.o1; + } if (result.o2 != null) { throw result.o2; } @@ -336,7 +382,7 @@ public class ThriftHiveMetastore { throw new TApplicationException(TApplicationException.MISSING_RESULT, "create_type failed: unknown result"); } - public boolean drop_type(String type) throws MetaException, TException + public boolean drop_type(String type) throws MetaException, NoSuchObjectException, TException { send_drop_type(type); return recv_drop_type(); @@ -352,7 +398,7 @@ public class ThriftHiveMetastore { oprot_.getTransport().flush(); } - public boolean recv_drop_type() throws MetaException, TException + public boolean recv_drop_type() throws MetaException, NoSuchObjectException, TException { TMessage msg = iprot_.readMessageBegin(); if (msg.type == TMessageType.EXCEPTION) { @@ -366,6 +412,9 @@ public class ThriftHiveMetastore { if (result.isSetSuccess()) { return result.success; } + if (result.o1 != null) { + throw result.o1; + } if (result.o2 != null) { throw result.o2; } @@ -611,6 +660,42 @@ public class ThriftHiveMetastore { throw new TApplicationException(TApplicationException.MISSING_RESULT, "get_tables failed: unknown result"); } + public List get_all_tables(String db_name) throws MetaException, TException + { + send_get_all_tables(db_name); + return recv_get_all_tables(); + } + + public void send_get_all_tables(String db_name) throws TException + { + oprot_.writeMessageBegin(new TMessage("get_all_tables", TMessageType.CALL, seqid_)); + get_all_tables_args args = new get_all_tables_args(); + args.db_name = db_name; + args.write(oprot_); + oprot_.writeMessageEnd(); + oprot_.getTransport().flush(); + } + + public List recv_get_all_tables() throws MetaException, TException + { + TMessage msg = iprot_.readMessageBegin(); + if (msg.type == TMessageType.EXCEPTION) { + TApplicationException x = TApplicationException.read(iprot_); + iprot_.readMessageEnd(); + throw x; + } + get_all_tables_result result = new get_all_tables_result(); + result.read(iprot_); + iprot_.readMessageEnd(); + if (result.isSetSuccess()) { + return result.success; + } + if (result.o1 != null) { + throw result.o1; + } + throw new TApplicationException(TApplicationException.MISSING_RESULT, "get_all_tables failed: unknown result"); + } + public Table get_table(String dbname, String tbl_name) throws MetaException, NoSuchObjectException, TException { send_get_table(dbname, tbl_name); @@ -1505,6 +1590,7 @@ public class ThriftHiveMetastore { processMap_.put("get_database", new get_database()); processMap_.put("drop_database", new drop_database()); processMap_.put("get_databases", new get_databases()); + processMap_.put("get_all_databases", new get_all_databases()); processMap_.put("get_type", new get_type()); processMap_.put("create_type", new create_type()); processMap_.put("drop_type", new drop_type()); @@ -1514,6 +1600,7 @@ public class ThriftHiveMetastore { processMap_.put("create_table", new create_table()); processMap_.put("drop_table", new drop_table()); processMap_.put("get_tables", new get_tables()); + processMap_.put("get_all_tables", new get_all_tables()); processMap_.put("get_table", new get_table()); processMap_.put("alter_table", new alter_table()); processMap_.put("add_partition", new add_partition()); @@ -1566,12 +1653,13 @@ public class ThriftHiveMetastore { iprot.readMessageEnd(); create_database_result result = new create_database_result(); try { - result.success = iface_.create_database(args.name, args.description); - result.__isset.success = true; + iface_.create_database(args.database); } catch (AlreadyExistsException o1) { result.o1 = o1; - } catch (MetaException o2) { + } catch (InvalidObjectException o2) { result.o2 = o2; + } catch (MetaException o3) { + result.o3 = o3; } catch (Throwable th) { LOGGER.error("Internal error processing create_database", th); TApplicationException x = new TApplicationException(TApplicationException.INTERNAL_ERROR, "Internal error processing create_database"); @@ -1627,10 +1715,13 @@ public class ThriftHiveMetastore { iprot.readMessageEnd(); drop_database_result result = new drop_database_result(); try { - result.success = iface_.drop_database(args.name); - result.__isset.success = true; - } catch (MetaException o2) { + iface_.drop_database(args.name, args.deleteData); + } catch (NoSuchObjectException o1) { + result.o1 = o1; + } catch (InvalidOperationException o2) { result.o2 = o2; + } catch (MetaException o3) { + result.o3 = o3; } catch (Throwable th) { LOGGER.error("Internal error processing drop_database", th); TApplicationException x = new TApplicationException(TApplicationException.INTERNAL_ERROR, "Internal error processing drop_database"); @@ -1656,7 +1747,7 @@ public class ThriftHiveMetastore { iprot.readMessageEnd(); get_databases_result result = new get_databases_result(); try { - result.success = iface_.get_databases(); + result.success = iface_.get_databases(args.pattern); } catch (MetaException o1) { result.o1 = o1; } catch (Throwable th) { @@ -1676,6 +1767,34 @@ public class ThriftHiveMetastore { } + private class get_all_databases implements ProcessFunction { + public void process(int seqid, TProtocol iprot, TProtocol oprot) throws TException + { + get_all_databases_args args = new get_all_databases_args(); + args.read(iprot); + iprot.readMessageEnd(); + get_all_databases_result result = new get_all_databases_result(); + try { + result.success = iface_.get_all_databases(); + } catch (MetaException o1) { + result.o1 = o1; + } catch (Throwable th) { + LOGGER.error("Internal error processing get_all_databases", th); + TApplicationException x = new TApplicationException(TApplicationException.INTERNAL_ERROR, "Internal error processing get_all_databases"); + oprot.writeMessageBegin(new TMessage("get_all_databases", TMessageType.EXCEPTION, seqid)); + x.write(oprot); + oprot.writeMessageEnd(); + oprot.getTransport().flush(); + return; + } + oprot.writeMessageBegin(new TMessage("get_all_databases", TMessageType.REPLY, seqid)); + result.write(oprot); + oprot.writeMessageEnd(); + oprot.getTransport().flush(); + } + + } + private class get_type implements ProcessFunction { public void process(int seqid, TProtocol iprot, TProtocol oprot) throws TException { @@ -1685,7 +1804,9 @@ public class ThriftHiveMetastore { get_type_result result = new get_type_result(); try { result.success = iface_.get_type(args.name); - } catch (MetaException o2) { + } catch (MetaException o1) { + result.o1 = o1; + } catch (NoSuchObjectException o2) { result.o2 = o2; } catch (Throwable th) { LOGGER.error("Internal error processing get_type", th); @@ -1747,7 +1868,9 @@ public class ThriftHiveMetastore { try { result.success = iface_.drop_type(args.type); result.__isset.success = true; - } catch (MetaException o2) { + } catch (MetaException o1) { + result.o1 = o1; + } catch (NoSuchObjectException o2) { result.o2 = o2; } catch (Throwable th) { LOGGER.error("Internal error processing drop_type", th); @@ -1950,6 +2073,34 @@ public class ThriftHiveMetastore { } + private class get_all_tables implements ProcessFunction { + public void process(int seqid, TProtocol iprot, TProtocol oprot) throws TException + { + get_all_tables_args args = new get_all_tables_args(); + args.read(iprot); + iprot.readMessageEnd(); + get_all_tables_result result = new get_all_tables_result(); + try { + result.success = iface_.get_all_tables(args.db_name); + } catch (MetaException o1) { + result.o1 = o1; + } catch (Throwable th) { + LOGGER.error("Internal error processing get_all_tables", th); + TApplicationException x = new TApplicationException(TApplicationException.INTERNAL_ERROR, "Internal error processing get_all_tables"); + oprot.writeMessageBegin(new TMessage("get_all_tables", TMessageType.EXCEPTION, seqid)); + x.write(oprot); + oprot.writeMessageEnd(); + oprot.getTransport().flush(); + return; + } + oprot.writeMessageBegin(new TMessage("get_all_tables", TMessageType.REPLY, seqid)); + result.write(oprot); + oprot.writeMessageEnd(); + oprot.getTransport().flush(); + } + + } + private class get_table implements ProcessFunction { public void process(int seqid, TProtocol iprot, TProtocol oprot) throws TException { @@ -2611,23 +2762,18 @@ public class ThriftHiveMetastore { public static class create_database_args implements TBase, java.io.Serializable, Cloneable { private static final TStruct STRUCT_DESC = new TStruct("create_database_args"); - private static final TField NAME_FIELD_DESC = new TField("name", TType.STRING, (short)1); - private static final TField DESCRIPTION_FIELD_DESC = new TField("description", TType.STRING, (short)2); + private static final TField DATABASE_FIELD_DESC = new TField("database", TType.STRUCT, (short)1); - private String name; - public static final int NAME = 1; - private String description; - public static final int DESCRIPTION = 2; + private Database database; + public static final int DATABASE = 1; private final Isset __isset = new Isset(); private static final class Isset implements java.io.Serializable { } public static final Map metaDataMap = Collections.unmodifiableMap(new HashMap() {{ - put(NAME, new FieldMetaData("name", TFieldRequirementType.DEFAULT, - new FieldValueMetaData(TType.STRING))); - put(DESCRIPTION, new FieldMetaData("description", TFieldRequirementType.DEFAULT, - new FieldValueMetaData(TType.STRING))); + put(DATABASE, new FieldMetaData("database", TFieldRequirementType.DEFAULT, + new StructMetaData(TType.STRUCT, Database.class))); }}); static { @@ -2638,23 +2784,18 @@ public class ThriftHiveMetastore { } public create_database_args( - String name, - String description) + Database database) { this(); - this.name = name; - this.description = description; + this.database = database; } /** * Performs a deep copy on other. */ public create_database_args(create_database_args other) { - if (other.isSetName()) { - this.name = other.name; - } - if (other.isSetDescription()) { - this.description = other.description; + if (other.isSetDatabase()) { + this.database = new Database(other.database); } } @@ -2663,55 +2804,30 @@ public class ThriftHiveMetastore { return new create_database_args(this); } - public String getName() { - return this.name; - } - - public void setName(String name) { - this.name = name; - } - - public void unsetName() { - this.name = null; - } - - // Returns true if field name is set (has been asigned a value) and false otherwise - public boolean isSetName() { - return this.name != null; - } - - public String getDescription() { - return this.description; + public Database getDatabase() { + return this.database; } - public void setDescription(String description) { - this.description = description; + public void setDatabase(Database database) { + this.database = database; } - public void unsetDescription() { - this.description = null; + public void unsetDatabase() { + this.database = null; } - // Returns true if field description is set (has been asigned a value) and false otherwise - public boolean isSetDescription() { - return this.description != null; + // Returns true if field database is set (has been asigned a value) and false otherwise + public boolean isSetDatabase() { + return this.database != null; } public void setFieldValue(int fieldID, Object value) { switch (fieldID) { - case NAME: + case DATABASE: if (value == null) { - unsetName(); - } else { - setName((String)value); - } - break; - - case DESCRIPTION: - if (value == null) { - unsetDescription(); + unsetDatabase(); } else { - setDescription((String)value); + setDatabase((Database)value); } break; @@ -2722,11 +2838,8 @@ public class ThriftHiveMetastore { public Object getFieldValue(int fieldID) { switch (fieldID) { - case NAME: - return getName(); - - case DESCRIPTION: - return getDescription(); + case DATABASE: + return getDatabase(); default: throw new IllegalArgumentException("Field " + fieldID + " doesn't exist!"); @@ -2736,10 +2849,8 @@ public class ThriftHiveMetastore { // Returns true if field corresponding to fieldID is set (has been asigned a value) and false otherwise public boolean isSet(int fieldID) { switch (fieldID) { - case NAME: - return isSetName(); - case DESCRIPTION: - return isSetDescription(); + case DATABASE: + return isSetDatabase(); default: throw new IllegalArgumentException("Field " + fieldID + " doesn't exist!"); } @@ -2758,21 +2869,12 @@ public class ThriftHiveMetastore { if (that == null) return false; - boolean this_present_name = true && this.isSetName(); - boolean that_present_name = true && that.isSetName(); - if (this_present_name || that_present_name) { - if (!(this_present_name && that_present_name)) - return false; - if (!this.name.equals(that.name)) - return false; - } - - boolean this_present_description = true && this.isSetDescription(); - boolean that_present_description = true && that.isSetDescription(); - if (this_present_description || that_present_description) { - if (!(this_present_description && that_present_description)) + boolean this_present_database = true && this.isSetDatabase(); + boolean that_present_database = true && that.isSetDatabase(); + if (this_present_database || that_present_database) { + if (!(this_present_database && that_present_database)) return false; - if (!this.description.equals(that.description)) + if (!this.database.equals(that.database)) return false; } @@ -2795,16 +2897,10 @@ public class ThriftHiveMetastore { } switch (field.id) { - case NAME: - if (field.type == TType.STRING) { - this.name = iprot.readString(); - } else { - TProtocolUtil.skip(iprot, field.type); - } - break; - case DESCRIPTION: - if (field.type == TType.STRING) { - this.description = iprot.readString(); + case DATABASE: + if (field.type == TType.STRUCT) { + this.database = new Database(); + this.database.read(iprot); } else { TProtocolUtil.skip(iprot, field.type); } @@ -2824,14 +2920,9 @@ public class ThriftHiveMetastore { validate(); oprot.writeStructBegin(STRUCT_DESC); - if (this.name != null) { - oprot.writeFieldBegin(NAME_FIELD_DESC); - oprot.writeString(this.name); - oprot.writeFieldEnd(); - } - if (this.description != null) { - oprot.writeFieldBegin(DESCRIPTION_FIELD_DESC); - oprot.writeString(this.description); + if (this.database != null) { + oprot.writeFieldBegin(DATABASE_FIELD_DESC); + this.database.write(oprot); oprot.writeFieldEnd(); } oprot.writeFieldStop(); @@ -2843,19 +2934,11 @@ public class ThriftHiveMetastore { StringBuilder sb = new StringBuilder("create_database_args("); boolean first = true; - sb.append("name:"); - if (this.name == null) { - sb.append("null"); - } else { - sb.append(this.name); - } - first = false; - if (!first) sb.append(", "); - sb.append("description:"); - if (this.description == null) { + sb.append("database:"); + if (this.database == null) { sb.append("null"); } else { - sb.append(this.description); + sb.append(this.database); } first = false; sb.append(")"); @@ -2871,29 +2954,28 @@ public class ThriftHiveMetastore { public static class create_database_result implements TBase, java.io.Serializable, Cloneable { private static final TStruct STRUCT_DESC = new TStruct("create_database_result"); - private static final TField SUCCESS_FIELD_DESC = new TField("success", TType.BOOL, (short)0); private static final TField O1_FIELD_DESC = new TField("o1", TType.STRUCT, (short)1); private static final TField O2_FIELD_DESC = new TField("o2", TType.STRUCT, (short)2); + private static final TField O3_FIELD_DESC = new TField("o3", TType.STRUCT, (short)3); - private boolean success; - public static final int SUCCESS = 0; private AlreadyExistsException o1; public static final int O1 = 1; - private MetaException o2; + private InvalidObjectException o2; public static final int O2 = 2; + private MetaException o3; + public static final int O3 = 3; private final Isset __isset = new Isset(); private static final class Isset implements java.io.Serializable { - public boolean success = false; } public static final Map metaDataMap = Collections.unmodifiableMap(new HashMap() {{ - put(SUCCESS, new FieldMetaData("success", TFieldRequirementType.DEFAULT, - new FieldValueMetaData(TType.BOOL))); put(O1, new FieldMetaData("o1", TFieldRequirementType.DEFAULT, new FieldValueMetaData(TType.STRUCT))); put(O2, new FieldMetaData("o2", TFieldRequirementType.DEFAULT, new FieldValueMetaData(TType.STRUCT))); + put(O3, new FieldMetaData("o3", TFieldRequirementType.DEFAULT, + new FieldValueMetaData(TType.STRUCT))); }}); static { @@ -2904,28 +2986,28 @@ public class ThriftHiveMetastore { } public create_database_result( - boolean success, AlreadyExistsException o1, - MetaException o2) + InvalidObjectException o2, + MetaException o3) { this(); - this.success = success; - this.__isset.success = true; this.o1 = o1; this.o2 = o2; + this.o3 = o3; } /** * Performs a deep copy on other. */ public create_database_result(create_database_result other) { - __isset.success = other.__isset.success; - this.success = other.success; if (other.isSetO1()) { this.o1 = new AlreadyExistsException(other.o1); } if (other.isSetO2()) { - this.o2 = new MetaException(other.o2); + this.o2 = new InvalidObjectException(other.o2); + } + if (other.isSetO3()) { + this.o3 = new MetaException(other.o3); } } @@ -2934,24 +3016,6 @@ public class ThriftHiveMetastore { return new create_database_result(this); } - public boolean isSuccess() { - return this.success; - } - - public void setSuccess(boolean success) { - this.success = success; - this.__isset.success = true; - } - - public void unsetSuccess() { - this.__isset.success = false; - } - - // Returns true if field success is set (has been asigned a value) and false otherwise - public boolean isSetSuccess() { - return this.__isset.success; - } - public AlreadyExistsException getO1() { return this.o1; } @@ -2969,11 +3033,11 @@ public class ThriftHiveMetastore { return this.o1 != null; } - public MetaException getO2() { + public InvalidObjectException getO2() { return this.o2; } - public void setO2(MetaException o2) { + public void setO2(InvalidObjectException o2) { this.o2 = o2; } @@ -2986,16 +3050,25 @@ public class ThriftHiveMetastore { return this.o2 != null; } + public MetaException getO3() { + return this.o3; + } + + public void setO3(MetaException o3) { + this.o3 = o3; + } + + public void unsetO3() { + this.o3 = null; + } + + // Returns true if field o3 is set (has been asigned a value) and false otherwise + public boolean isSetO3() { + return this.o3 != null; + } + public void setFieldValue(int fieldID, Object value) { switch (fieldID) { - case SUCCESS: - if (value == null) { - unsetSuccess(); - } else { - setSuccess((Boolean)value); - } - break; - case O1: if (value == null) { unsetO1(); @@ -3008,7 +3081,15 @@ public class ThriftHiveMetastore { if (value == null) { unsetO2(); } else { - setO2((MetaException)value); + setO2((InvalidObjectException)value); + } + break; + + case O3: + if (value == null) { + unsetO3(); + } else { + setO3((MetaException)value); } break; @@ -3019,15 +3100,15 @@ public class ThriftHiveMetastore { public Object getFieldValue(int fieldID) { switch (fieldID) { - case SUCCESS: - return new Boolean(isSuccess()); - case O1: return getO1(); case O2: return getO2(); + case O3: + return getO3(); + default: throw new IllegalArgumentException("Field " + fieldID + " doesn't exist!"); } @@ -3036,12 +3117,12 @@ public class ThriftHiveMetastore { // Returns true if field corresponding to fieldID is set (has been asigned a value) and false otherwise public boolean isSet(int fieldID) { switch (fieldID) { - case SUCCESS: - return isSetSuccess(); case O1: return isSetO1(); case O2: return isSetO2(); + case O3: + return isSetO3(); default: throw new IllegalArgumentException("Field " + fieldID + " doesn't exist!"); } @@ -3060,15 +3141,6 @@ public class ThriftHiveMetastore { if (that == null) return false; - boolean this_present_success = true; - boolean that_present_success = true; - if (this_present_success || that_present_success) { - if (!(this_present_success && that_present_success)) - return false; - if (this.success != that.success) - return false; - } - boolean this_present_o1 = true && this.isSetO1(); boolean that_present_o1 = true && that.isSetO1(); if (this_present_o1 || that_present_o1) { @@ -3087,6 +3159,15 @@ public class ThriftHiveMetastore { return false; } + boolean this_present_o3 = true && this.isSetO3(); + boolean that_present_o3 = true && that.isSetO3(); + if (this_present_o3 || that_present_o3) { + if (!(this_present_o3 && that_present_o3)) + return false; + if (!this.o3.equals(that.o3)) + return false; + } + return true; } @@ -3106,14 +3187,6 @@ public class ThriftHiveMetastore { } switch (field.id) { - case SUCCESS: - if (field.type == TType.BOOL) { - this.success = iprot.readBool(); - this.__isset.success = true; - } else { - TProtocolUtil.skip(iprot, field.type); - } - break; case O1: if (field.type == TType.STRUCT) { this.o1 = new AlreadyExistsException(); @@ -3124,12 +3197,20 @@ public class ThriftHiveMetastore { break; case O2: if (field.type == TType.STRUCT) { - this.o2 = new MetaException(); + this.o2 = new InvalidObjectException(); this.o2.read(iprot); } else { TProtocolUtil.skip(iprot, field.type); } break; + case O3: + if (field.type == TType.STRUCT) { + this.o3 = new MetaException(); + this.o3.read(iprot); + } else { + TProtocolUtil.skip(iprot, field.type); + } + break; default: TProtocolUtil.skip(iprot, field.type); break; @@ -3144,11 +3225,7 @@ public class ThriftHiveMetastore { public void write(TProtocol oprot) throws TException { oprot.writeStructBegin(STRUCT_DESC); - if (this.isSetSuccess()) { - oprot.writeFieldBegin(SUCCESS_FIELD_DESC); - oprot.writeBool(this.success); - oprot.writeFieldEnd(); - } else if (this.isSetO1()) { + if (this.isSetO1()) { oprot.writeFieldBegin(O1_FIELD_DESC); this.o1.write(oprot); oprot.writeFieldEnd(); @@ -3156,6 +3233,10 @@ public class ThriftHiveMetastore { oprot.writeFieldBegin(O2_FIELD_DESC); this.o2.write(oprot); oprot.writeFieldEnd(); + } else if (this.isSetO3()) { + oprot.writeFieldBegin(O3_FIELD_DESC); + this.o3.write(oprot); + oprot.writeFieldEnd(); } oprot.writeFieldStop(); oprot.writeStructEnd(); @@ -3166,10 +3247,6 @@ public class ThriftHiveMetastore { StringBuilder sb = new StringBuilder("create_database_result("); boolean first = true; - sb.append("success:"); - sb.append(this.success); - first = false; - if (!first) sb.append(", "); sb.append("o1:"); if (this.o1 == null) { sb.append("null"); @@ -3185,6 +3262,14 @@ public class ThriftHiveMetastore { sb.append(this.o2); } first = false; + if (!first) sb.append(", "); + sb.append("o3:"); + if (this.o3 == null) { + sb.append("null"); + } else { + sb.append(this.o3); + } + first = false; sb.append(")"); return sb.toString(); } @@ -3719,17 +3804,23 @@ public class ThriftHiveMetastore { public static class drop_database_args implements TBase, java.io.Serializable, Cloneable { private static final TStruct STRUCT_DESC = new TStruct("drop_database_args"); private static final TField NAME_FIELD_DESC = new TField("name", TType.STRING, (short)1); + private static final TField DELETE_DATA_FIELD_DESC = new TField("deleteData", TType.BOOL, (short)2); private String name; public static final int NAME = 1; + private boolean deleteData; + public static final int DELETEDATA = 2; private final Isset __isset = new Isset(); private static final class Isset implements java.io.Serializable { + public boolean deleteData = false; } public static final Map metaDataMap = Collections.unmodifiableMap(new HashMap() {{ put(NAME, new FieldMetaData("name", TFieldRequirementType.DEFAULT, new FieldValueMetaData(TType.STRING))); + put(DELETEDATA, new FieldMetaData("deleteData", TFieldRequirementType.DEFAULT, + new FieldValueMetaData(TType.BOOL))); }}); static { @@ -3740,10 +3831,13 @@ public class ThriftHiveMetastore { } public drop_database_args( - String name) + String name, + boolean deleteData) { this(); this.name = name; + this.deleteData = deleteData; + this.__isset.deleteData = true; } /** @@ -3753,6 +3847,8 @@ public class ThriftHiveMetastore { if (other.isSetName()) { this.name = other.name; } + __isset.deleteData = other.__isset.deleteData; + this.deleteData = other.deleteData; } @Override @@ -3777,6 +3873,24 @@ public class ThriftHiveMetastore { return this.name != null; } + public boolean isDeleteData() { + return this.deleteData; + } + + public void setDeleteData(boolean deleteData) { + this.deleteData = deleteData; + this.__isset.deleteData = true; + } + + public void unsetDeleteData() { + this.__isset.deleteData = false; + } + + // Returns true if field deleteData is set (has been asigned a value) and false otherwise + public boolean isSetDeleteData() { + return this.__isset.deleteData; + } + public void setFieldValue(int fieldID, Object value) { switch (fieldID) { case NAME: @@ -3787,6 +3901,14 @@ public class ThriftHiveMetastore { } break; + case DELETEDATA: + if (value == null) { + unsetDeleteData(); + } else { + setDeleteData((Boolean)value); + } + break; + default: throw new IllegalArgumentException("Field " + fieldID + " doesn't exist!"); } @@ -3797,6 +3919,9 @@ public class ThriftHiveMetastore { case NAME: return getName(); + case DELETEDATA: + return new Boolean(isDeleteData()); + default: throw new IllegalArgumentException("Field " + fieldID + " doesn't exist!"); } @@ -3807,6 +3932,8 @@ public class ThriftHiveMetastore { switch (fieldID) { case NAME: return isSetName(); + case DELETEDATA: + return isSetDeleteData(); default: throw new IllegalArgumentException("Field " + fieldID + " doesn't exist!"); } @@ -3834,6 +3961,15 @@ public class ThriftHiveMetastore { return false; } + boolean this_present_deleteData = true; + boolean that_present_deleteData = true; + if (this_present_deleteData || that_present_deleteData) { + if (!(this_present_deleteData && that_present_deleteData)) + return false; + if (this.deleteData != that.deleteData) + return false; + } + return true; } @@ -3860,6 +3996,14 @@ public class ThriftHiveMetastore { TProtocolUtil.skip(iprot, field.type); } break; + case DELETEDATA: + if (field.type == TType.BOOL) { + this.deleteData = iprot.readBool(); + this.__isset.deleteData = true; + } else { + TProtocolUtil.skip(iprot, field.type); + } + break; default: TProtocolUtil.skip(iprot, field.type); break; @@ -3880,6 +4024,9 @@ public class ThriftHiveMetastore { oprot.writeString(this.name); oprot.writeFieldEnd(); } + oprot.writeFieldBegin(DELETE_DATA_FIELD_DESC); + oprot.writeBool(this.deleteData); + oprot.writeFieldEnd(); oprot.writeFieldStop(); oprot.writeStructEnd(); } @@ -3896,6 +4043,10 @@ public class ThriftHiveMetastore { sb.append(this.name); } first = false; + if (!first) sb.append(", "); + sb.append("deleteData:"); + sb.append(this.deleteData); + first = false; sb.append(")"); return sb.toString(); } @@ -3909,24 +4060,28 @@ public class ThriftHiveMetastore { public static class drop_database_result implements TBase, java.io.Serializable, Cloneable { private static final TStruct STRUCT_DESC = new TStruct("drop_database_result"); - private static final TField SUCCESS_FIELD_DESC = new TField("success", TType.BOOL, (short)0); + private static final TField O1_FIELD_DESC = new TField("o1", TType.STRUCT, (short)1); private static final TField O2_FIELD_DESC = new TField("o2", TType.STRUCT, (short)2); + private static final TField O3_FIELD_DESC = new TField("o3", TType.STRUCT, (short)3); - private boolean success; - public static final int SUCCESS = 0; - private MetaException o2; + private NoSuchObjectException o1; + public static final int O1 = 1; + private InvalidOperationException o2; public static final int O2 = 2; + private MetaException o3; + public static final int O3 = 3; private final Isset __isset = new Isset(); private static final class Isset implements java.io.Serializable { - public boolean success = false; } public static final Map metaDataMap = Collections.unmodifiableMap(new HashMap() {{ - put(SUCCESS, new FieldMetaData("success", TFieldRequirementType.DEFAULT, - new FieldValueMetaData(TType.BOOL))); + put(O1, new FieldMetaData("o1", TFieldRequirementType.DEFAULT, + new FieldValueMetaData(TType.STRUCT))); put(O2, new FieldMetaData("o2", TFieldRequirementType.DEFAULT, new FieldValueMetaData(TType.STRUCT))); + put(O3, new FieldMetaData("o3", TFieldRequirementType.DEFAULT, + new FieldValueMetaData(TType.STRUCT))); }}); static { @@ -3937,23 +4092,28 @@ public class ThriftHiveMetastore { } public drop_database_result( - boolean success, - MetaException o2) + NoSuchObjectException o1, + InvalidOperationException o2, + MetaException o3) { this(); - this.success = success; - this.__isset.success = true; + this.o1 = o1; this.o2 = o2; + this.o3 = o3; } /** * Performs a deep copy on other. */ public drop_database_result(drop_database_result other) { - __isset.success = other.__isset.success; - this.success = other.success; + if (other.isSetO1()) { + this.o1 = new NoSuchObjectException(other.o1); + } if (other.isSetO2()) { - this.o2 = new MetaException(other.o2); + this.o2 = new InvalidOperationException(other.o2); + } + if (other.isSetO3()) { + this.o3 = new MetaException(other.o3); } } @@ -3962,29 +4122,28 @@ public class ThriftHiveMetastore { return new drop_database_result(this); } - public boolean isSuccess() { - return this.success; + public NoSuchObjectException getO1() { + return this.o1; } - public void setSuccess(boolean success) { - this.success = success; - this.__isset.success = true; + public void setO1(NoSuchObjectException o1) { + this.o1 = o1; } - public void unsetSuccess() { - this.__isset.success = false; + public void unsetO1() { + this.o1 = null; } - // Returns true if field success is set (has been asigned a value) and false otherwise - public boolean isSetSuccess() { - return this.__isset.success; + // Returns true if field o1 is set (has been asigned a value) and false otherwise + public boolean isSetO1() { + return this.o1 != null; } - public MetaException getO2() { + public InvalidOperationException getO2() { return this.o2; } - public void setO2(MetaException o2) { + public void setO2(InvalidOperationException o2) { this.o2 = o2; } @@ -3997,13 +4156,30 @@ public class ThriftHiveMetastore { return this.o2 != null; } + public MetaException getO3() { + return this.o3; + } + + public void setO3(MetaException o3) { + this.o3 = o3; + } + + public void unsetO3() { + this.o3 = null; + } + + // Returns true if field o3 is set (has been asigned a value) and false otherwise + public boolean isSetO3() { + return this.o3 != null; + } + public void setFieldValue(int fieldID, Object value) { switch (fieldID) { - case SUCCESS: + case O1: if (value == null) { - unsetSuccess(); + unsetO1(); } else { - setSuccess((Boolean)value); + setO1((NoSuchObjectException)value); } break; @@ -4011,23 +4187,34 @@ public class ThriftHiveMetastore { if (value == null) { unsetO2(); } else { - setO2((MetaException)value); + setO2((InvalidOperationException)value); } break; - default: - throw new IllegalArgumentException("Field " + fieldID + " doesn't exist!"); - } - } + case O3: + if (value == null) { + unsetO3(); + } else { + setO3((MetaException)value); + } + break; + + default: + throw new IllegalArgumentException("Field " + fieldID + " doesn't exist!"); + } + } public Object getFieldValue(int fieldID) { switch (fieldID) { - case SUCCESS: - return new Boolean(isSuccess()); + case O1: + return getO1(); case O2: return getO2(); + case O3: + return getO3(); + default: throw new IllegalArgumentException("Field " + fieldID + " doesn't exist!"); } @@ -4036,10 +4223,12 @@ public class ThriftHiveMetastore { // Returns true if field corresponding to fieldID is set (has been asigned a value) and false otherwise public boolean isSet(int fieldID) { switch (fieldID) { - case SUCCESS: - return isSetSuccess(); + case O1: + return isSetO1(); case O2: return isSetO2(); + case O3: + return isSetO3(); default: throw new IllegalArgumentException("Field " + fieldID + " doesn't exist!"); } @@ -4058,12 +4247,12 @@ public class ThriftHiveMetastore { if (that == null) return false; - boolean this_present_success = true; - boolean that_present_success = true; - if (this_present_success || that_present_success) { - if (!(this_present_success && that_present_success)) + boolean this_present_o1 = true && this.isSetO1(); + boolean that_present_o1 = true && that.isSetO1(); + if (this_present_o1 || that_present_o1) { + if (!(this_present_o1 && that_present_o1)) return false; - if (this.success != that.success) + if (!this.o1.equals(that.o1)) return false; } @@ -4076,6 +4265,15 @@ public class ThriftHiveMetastore { return false; } + boolean this_present_o3 = true && this.isSetO3(); + boolean that_present_o3 = true && that.isSetO3(); + if (this_present_o3 || that_present_o3) { + if (!(this_present_o3 && that_present_o3)) + return false; + if (!this.o3.equals(that.o3)) + return false; + } + return true; } @@ -4095,22 +4293,30 @@ public class ThriftHiveMetastore { } switch (field.id) { - case SUCCESS: - if (field.type == TType.BOOL) { - this.success = iprot.readBool(); - this.__isset.success = true; + case O1: + if (field.type == TType.STRUCT) { + this.o1 = new NoSuchObjectException(); + this.o1.read(iprot); } else { TProtocolUtil.skip(iprot, field.type); } break; case O2: if (field.type == TType.STRUCT) { - this.o2 = new MetaException(); + this.o2 = new InvalidOperationException(); this.o2.read(iprot); } else { TProtocolUtil.skip(iprot, field.type); } break; + case O3: + if (field.type == TType.STRUCT) { + this.o3 = new MetaException(); + this.o3.read(iprot); + } else { + TProtocolUtil.skip(iprot, field.type); + } + break; default: TProtocolUtil.skip(iprot, field.type); break; @@ -4125,14 +4331,18 @@ public class ThriftHiveMetastore { public void write(TProtocol oprot) throws TException { oprot.writeStructBegin(STRUCT_DESC); - if (this.isSetSuccess()) { - oprot.writeFieldBegin(SUCCESS_FIELD_DESC); - oprot.writeBool(this.success); + if (this.isSetO1()) { + oprot.writeFieldBegin(O1_FIELD_DESC); + this.o1.write(oprot); oprot.writeFieldEnd(); } else if (this.isSetO2()) { oprot.writeFieldBegin(O2_FIELD_DESC); this.o2.write(oprot); oprot.writeFieldEnd(); + } else if (this.isSetO3()) { + oprot.writeFieldBegin(O3_FIELD_DESC); + this.o3.write(oprot); + oprot.writeFieldEnd(); } oprot.writeFieldStop(); oprot.writeStructEnd(); @@ -4143,8 +4353,12 @@ public class ThriftHiveMetastore { StringBuilder sb = new StringBuilder("drop_database_result("); boolean first = true; - sb.append("success:"); - sb.append(this.success); + sb.append("o1:"); + if (this.o1 == null) { + sb.append("null"); + } else { + sb.append(this.o1); + } first = false; if (!first) sb.append(", "); sb.append("o2:"); @@ -4154,6 +4368,14 @@ public class ThriftHiveMetastore { sb.append(this.o2); } first = false; + if (!first) sb.append(", "); + sb.append("o3:"); + if (this.o3 == null) { + sb.append("null"); + } else { + sb.append(this.o3); + } + first = false; sb.append(")"); return sb.toString(); } @@ -4167,8 +4389,18 @@ public class ThriftHiveMetastore { public static class get_databases_args implements TBase, java.io.Serializable, Cloneable { private static final TStruct STRUCT_DESC = new TStruct("get_databases_args"); + private static final TField PATTERN_FIELD_DESC = new TField("pattern", TType.STRING, (short)1); + + private String pattern; + public static final int PATTERN = 1; + + private final Isset __isset = new Isset(); + private static final class Isset implements java.io.Serializable { + } public static final Map metaDataMap = Collections.unmodifiableMap(new HashMap() {{ + put(PATTERN, new FieldMetaData("pattern", TFieldRequirementType.DEFAULT, + new FieldValueMetaData(TType.STRING))); }}); static { @@ -4178,10 +4410,20 @@ public class ThriftHiveMetastore { public get_databases_args() { } + public get_databases_args( + String pattern) + { + this(); + this.pattern = pattern; + } + /** * Performs a deep copy on other. */ public get_databases_args(get_databases_args other) { + if (other.isSetPattern()) { + this.pattern = other.pattern; + } } @Override @@ -4189,8 +4431,33 @@ public class ThriftHiveMetastore { return new get_databases_args(this); } + public String getPattern() { + return this.pattern; + } + + public void setPattern(String pattern) { + this.pattern = pattern; + } + + public void unsetPattern() { + this.pattern = null; + } + + // Returns true if field pattern is set (has been asigned a value) and false otherwise + public boolean isSetPattern() { + return this.pattern != null; + } + public void setFieldValue(int fieldID, Object value) { switch (fieldID) { + case PATTERN: + if (value == null) { + unsetPattern(); + } else { + setPattern((String)value); + } + break; + default: throw new IllegalArgumentException("Field " + fieldID + " doesn't exist!"); } @@ -4198,6 +4465,9 @@ public class ThriftHiveMetastore { public Object getFieldValue(int fieldID) { switch (fieldID) { + case PATTERN: + return getPattern(); + default: throw new IllegalArgumentException("Field " + fieldID + " doesn't exist!"); } @@ -4206,6 +4476,8 @@ public class ThriftHiveMetastore { // Returns true if field corresponding to fieldID is set (has been asigned a value) and false otherwise public boolean isSet(int fieldID) { switch (fieldID) { + case PATTERN: + return isSetPattern(); default: throw new IllegalArgumentException("Field " + fieldID + " doesn't exist!"); } @@ -4224,6 +4496,15 @@ public class ThriftHiveMetastore { if (that == null) return false; + boolean this_present_pattern = true && this.isSetPattern(); + boolean that_present_pattern = true && that.isSetPattern(); + if (this_present_pattern || that_present_pattern) { + if (!(this_present_pattern && that_present_pattern)) + return false; + if (!this.pattern.equals(that.pattern)) + return false; + } + return true; } @@ -4243,6 +4524,13 @@ public class ThriftHiveMetastore { } switch (field.id) { + case PATTERN: + if (field.type == TType.STRING) { + this.pattern = iprot.readString(); + } else { + TProtocolUtil.skip(iprot, field.type); + } + break; default: TProtocolUtil.skip(iprot, field.type); break; @@ -4258,6 +4546,11 @@ public class ThriftHiveMetastore { validate(); oprot.writeStructBegin(STRUCT_DESC); + if (this.pattern != null) { + oprot.writeFieldBegin(PATTERN_FIELD_DESC); + oprot.writeString(this.pattern); + oprot.writeFieldEnd(); + } oprot.writeFieldStop(); oprot.writeStructEnd(); } @@ -4267,6 +4560,13 @@ public class ThriftHiveMetastore { StringBuilder sb = new StringBuilder("get_databases_args("); boolean first = true; + sb.append("pattern:"); + if (this.pattern == null) { + sb.append("null"); + } else { + sb.append(this.pattern); + } + first = false; sb.append(")"); return sb.toString(); } @@ -4573,77 +4873,32 @@ public class ThriftHiveMetastore { } - public static class get_type_args implements TBase, java.io.Serializable, Cloneable { - private static final TStruct STRUCT_DESC = new TStruct("get_type_args"); - private static final TField NAME_FIELD_DESC = new TField("name", TType.STRING, (short)1); - - private String name; - public static final int NAME = 1; - - private final Isset __isset = new Isset(); - private static final class Isset implements java.io.Serializable { - } + public static class get_all_databases_args implements TBase, java.io.Serializable, Cloneable { + private static final TStruct STRUCT_DESC = new TStruct("get_all_databases_args"); public static final Map metaDataMap = Collections.unmodifiableMap(new HashMap() {{ - put(NAME, new FieldMetaData("name", TFieldRequirementType.DEFAULT, - new FieldValueMetaData(TType.STRING))); }}); static { - FieldMetaData.addStructMetaDataMap(get_type_args.class, metaDataMap); - } - - public get_type_args() { + FieldMetaData.addStructMetaDataMap(get_all_databases_args.class, metaDataMap); } - public get_type_args( - String name) - { - this(); - this.name = name; + public get_all_databases_args() { } /** * Performs a deep copy on other. */ - public get_type_args(get_type_args other) { - if (other.isSetName()) { - this.name = other.name; - } + public get_all_databases_args(get_all_databases_args other) { } @Override - public get_type_args clone() { - return new get_type_args(this); - } - - public String getName() { - return this.name; - } - - public void setName(String name) { - this.name = name; - } - - public void unsetName() { - this.name = null; - } - - // Returns true if field name is set (has been asigned a value) and false otherwise - public boolean isSetName() { - return this.name != null; + public get_all_databases_args clone() { + return new get_all_databases_args(this); } public void setFieldValue(int fieldID, Object value) { switch (fieldID) { - case NAME: - if (value == null) { - unsetName(); - } else { - setName((String)value); - } - break; - default: throw new IllegalArgumentException("Field " + fieldID + " doesn't exist!"); } @@ -4651,9 +4906,6 @@ public class ThriftHiveMetastore { public Object getFieldValue(int fieldID) { switch (fieldID) { - case NAME: - return getName(); - default: throw new IllegalArgumentException("Field " + fieldID + " doesn't exist!"); } @@ -4662,8 +4914,6 @@ public class ThriftHiveMetastore { // Returns true if field corresponding to fieldID is set (has been asigned a value) and false otherwise public boolean isSet(int fieldID) { switch (fieldID) { - case NAME: - return isSetName(); default: throw new IllegalArgumentException("Field " + fieldID + " doesn't exist!"); } @@ -4673,24 +4923,15 @@ public class ThriftHiveMetastore { public boolean equals(Object that) { if (that == null) return false; - if (that instanceof get_type_args) - return this.equals((get_type_args)that); + if (that instanceof get_all_databases_args) + return this.equals((get_all_databases_args)that); return false; } - public boolean equals(get_type_args that) { + public boolean equals(get_all_databases_args that) { if (that == null) return false; - boolean this_present_name = true && this.isSetName(); - boolean that_present_name = true && that.isSetName(); - if (this_present_name || that_present_name) { - if (!(this_present_name && that_present_name)) - return false; - if (!this.name.equals(that.name)) - return false; - } - return true; } @@ -4710,13 +4951,6 @@ public class ThriftHiveMetastore { } switch (field.id) { - case NAME: - if (field.type == TType.STRING) { - this.name = iprot.readString(); - } else { - TProtocolUtil.skip(iprot, field.type); - } - break; default: TProtocolUtil.skip(iprot, field.type); break; @@ -4732,27 +4966,15 @@ public class ThriftHiveMetastore { validate(); oprot.writeStructBegin(STRUCT_DESC); - if (this.name != null) { - oprot.writeFieldBegin(NAME_FIELD_DESC); - oprot.writeString(this.name); - oprot.writeFieldEnd(); - } oprot.writeFieldStop(); oprot.writeStructEnd(); } @Override public String toString() { - StringBuilder sb = new StringBuilder("get_type_args("); + StringBuilder sb = new StringBuilder("get_all_databases_args("); boolean first = true; - sb.append("name:"); - if (this.name == null) { - sb.append("null"); - } else { - sb.append(this.name); - } - first = false; sb.append(")"); return sb.toString(); } @@ -4764,15 +4986,15 @@ public class ThriftHiveMetastore { } - public static class get_type_result implements TBase, java.io.Serializable, Cloneable { - private static final TStruct STRUCT_DESC = new TStruct("get_type_result"); - private static final TField SUCCESS_FIELD_DESC = new TField("success", TType.STRUCT, (short)0); - private static final TField O2_FIELD_DESC = new TField("o2", TType.STRUCT, (short)1); - - private Type success; + public static class get_all_databases_result implements TBase, java.io.Serializable, Cloneable { + private static final TStruct STRUCT_DESC = new TStruct("get_all_databases_result"); + private static final TField SUCCESS_FIELD_DESC = new TField("success", TType.LIST, (short)0); + private static final TField O1_FIELD_DESC = new TField("o1", TType.STRUCT, (short)1); + + private List success; public static final int SUCCESS = 0; - private MetaException o2; - public static final int O2 = 1; + private MetaException o1; + public static final int O1 = 1; private final Isset __isset = new Isset(); private static final class Isset implements java.io.Serializable { @@ -4780,49 +5002,69 @@ public class ThriftHiveMetastore { public static final Map metaDataMap = Collections.unmodifiableMap(new HashMap() {{ put(SUCCESS, new FieldMetaData("success", TFieldRequirementType.DEFAULT, - new StructMetaData(TType.STRUCT, Type.class))); - put(O2, new FieldMetaData("o2", TFieldRequirementType.DEFAULT, + new ListMetaData(TType.LIST, + new FieldValueMetaData(TType.STRING)))); + put(O1, new FieldMetaData("o1", TFieldRequirementType.DEFAULT, new FieldValueMetaData(TType.STRUCT))); }}); static { - FieldMetaData.addStructMetaDataMap(get_type_result.class, metaDataMap); + FieldMetaData.addStructMetaDataMap(get_all_databases_result.class, metaDataMap); } - public get_type_result() { + public get_all_databases_result() { } - public get_type_result( - Type success, - MetaException o2) + public get_all_databases_result( + List success, + MetaException o1) { this(); this.success = success; - this.o2 = o2; + this.o1 = o1; } /** * Performs a deep copy on other. */ - public get_type_result(get_type_result other) { + public get_all_databases_result(get_all_databases_result other) { if (other.isSetSuccess()) { - this.success = new Type(other.success); + List __this__success = new ArrayList(); + for (String other_element : other.success) { + __this__success.add(other_element); + } + this.success = __this__success; } - if (other.isSetO2()) { - this.o2 = new MetaException(other.o2); + if (other.isSetO1()) { + this.o1 = new MetaException(other.o1); } } @Override - public get_type_result clone() { - return new get_type_result(this); + public get_all_databases_result clone() { + return new get_all_databases_result(this); } - public Type getSuccess() { + public int getSuccessSize() { + return (this.success == null) ? 0 : this.success.size(); + } + + public java.util.Iterator getSuccessIterator() { + return (this.success == null) ? null : this.success.iterator(); + } + + public void addToSuccess(String elem) { + if (this.success == null) { + this.success = new ArrayList(); + } + this.success.add(elem); + } + + public List getSuccess() { return this.success; } - public void setSuccess(Type success) { + public void setSuccess(List success) { this.success = success; } @@ -4835,21 +5077,21 @@ public class ThriftHiveMetastore { return this.success != null; } - public MetaException getO2() { - return this.o2; + public MetaException getO1() { + return this.o1; } - public void setO2(MetaException o2) { - this.o2 = o2; + public void setO1(MetaException o1) { + this.o1 = o1; } - public void unsetO2() { - this.o2 = null; + public void unsetO1() { + this.o1 = null; } - // Returns true if field o2 is set (has been asigned a value) and false otherwise - public boolean isSetO2() { - return this.o2 != null; + // Returns true if field o1 is set (has been asigned a value) and false otherwise + public boolean isSetO1() { + return this.o1 != null; } public void setFieldValue(int fieldID, Object value) { @@ -4858,15 +5100,15 @@ public class ThriftHiveMetastore { if (value == null) { unsetSuccess(); } else { - setSuccess((Type)value); + setSuccess((List)value); } break; - case O2: + case O1: if (value == null) { - unsetO2(); + unsetO1(); } else { - setO2((MetaException)value); + setO1((MetaException)value); } break; @@ -4880,8 +5122,8 @@ public class ThriftHiveMetastore { case SUCCESS: return getSuccess(); - case O2: - return getO2(); + case O1: + return getO1(); default: throw new IllegalArgumentException("Field " + fieldID + " doesn't exist!"); @@ -4893,8 +5135,8 @@ public class ThriftHiveMetastore { switch (fieldID) { case SUCCESS: return isSetSuccess(); - case O2: - return isSetO2(); + case O1: + return isSetO1(); default: throw new IllegalArgumentException("Field " + fieldID + " doesn't exist!"); } @@ -4904,12 +5146,12 @@ public class ThriftHiveMetastore { public boolean equals(Object that) { if (that == null) return false; - if (that instanceof get_type_result) - return this.equals((get_type_result)that); + if (that instanceof get_all_databases_result) + return this.equals((get_all_databases_result)that); return false; } - public boolean equals(get_type_result that) { + public boolean equals(get_all_databases_result that) { if (that == null) return false; @@ -4922,12 +5164,12 @@ public class ThriftHiveMetastore { return false; } - boolean this_present_o2 = true && this.isSetO2(); - boolean that_present_o2 = true && that.isSetO2(); - if (this_present_o2 || that_present_o2) { - if (!(this_present_o2 && that_present_o2)) + boolean this_present_o1 = true && this.isSetO1(); + boolean that_present_o1 = true && that.isSetO1(); + if (this_present_o1 || that_present_o1) { + if (!(this_present_o1 && that_present_o1)) return false; - if (!this.o2.equals(that.o2)) + if (!this.o1.equals(that.o1)) return false; } @@ -4951,17 +5193,26 @@ public class ThriftHiveMetastore { switch (field.id) { case SUCCESS: - if (field.type == TType.STRUCT) { - this.success = new Type(); - this.success.read(iprot); + if (field.type == TType.LIST) { + { + TList _list62 = iprot.readListBegin(); + this.success = new ArrayList(_list62.size); + for (int _i63 = 0; _i63 < _list62.size; ++_i63) + { + String _elem64; + _elem64 = iprot.readString(); + this.success.add(_elem64); + } + iprot.readListEnd(); + } } else { TProtocolUtil.skip(iprot, field.type); } break; - case O2: + case O1: if (field.type == TType.STRUCT) { - this.o2 = new MetaException(); - this.o2.read(iprot); + this.o1 = new MetaException(); + this.o1.read(iprot); } else { TProtocolUtil.skip(iprot, field.type); } @@ -4982,11 +5233,17 @@ public class ThriftHiveMetastore { if (this.isSetSuccess()) { oprot.writeFieldBegin(SUCCESS_FIELD_DESC); - this.success.write(oprot); + { + oprot.writeListBegin(new TList(TType.STRING, this.success.size())); + for (String _iter65 : this.success) { + oprot.writeString(_iter65); + } + oprot.writeListEnd(); + } oprot.writeFieldEnd(); - } else if (this.isSetO2()) { - oprot.writeFieldBegin(O2_FIELD_DESC); - this.o2.write(oprot); + } else if (this.isSetO1()) { + oprot.writeFieldBegin(O1_FIELD_DESC); + this.o1.write(oprot); oprot.writeFieldEnd(); } oprot.writeFieldStop(); @@ -4995,7 +5252,7 @@ public class ThriftHiveMetastore { @Override public String toString() { - StringBuilder sb = new StringBuilder("get_type_result("); + StringBuilder sb = new StringBuilder("get_all_databases_result("); boolean first = true; sb.append("success:"); @@ -5006,11 +5263,11 @@ public class ThriftHiveMetastore { } first = false; if (!first) sb.append(", "); - sb.append("o2:"); - if (this.o2 == null) { + sb.append("o1:"); + if (this.o1 == null) { sb.append("null"); } else { - sb.append(this.o2); + sb.append(this.o1); } first = false; sb.append(")"); @@ -5024,74 +5281,74 @@ public class ThriftHiveMetastore { } - public static class create_type_args implements TBase, java.io.Serializable, Cloneable { - private static final TStruct STRUCT_DESC = new TStruct("create_type_args"); - private static final TField TYPE_FIELD_DESC = new TField("type", TType.STRUCT, (short)1); + public static class get_type_args implements TBase, java.io.Serializable, Cloneable { + private static final TStruct STRUCT_DESC = new TStruct("get_type_args"); + private static final TField NAME_FIELD_DESC = new TField("name", TType.STRING, (short)1); - private Type type; - public static final int TYPE = 1; + private String name; + public static final int NAME = 1; private final Isset __isset = new Isset(); private static final class Isset implements java.io.Serializable { } public static final Map metaDataMap = Collections.unmodifiableMap(new HashMap() {{ - put(TYPE, new FieldMetaData("type", TFieldRequirementType.DEFAULT, - new StructMetaData(TType.STRUCT, Type.class))); + put(NAME, new FieldMetaData("name", TFieldRequirementType.DEFAULT, + new FieldValueMetaData(TType.STRING))); }}); static { - FieldMetaData.addStructMetaDataMap(create_type_args.class, metaDataMap); + FieldMetaData.addStructMetaDataMap(get_type_args.class, metaDataMap); } - public create_type_args() { + public get_type_args() { } - public create_type_args( - Type type) + public get_type_args( + String name) { this(); - this.type = type; + this.name = name; } /** * Performs a deep copy on other. */ - public create_type_args(create_type_args other) { - if (other.isSetType()) { - this.type = new Type(other.type); + public get_type_args(get_type_args other) { + if (other.isSetName()) { + this.name = other.name; } } @Override - public create_type_args clone() { - return new create_type_args(this); + public get_type_args clone() { + return new get_type_args(this); } - public Type getType() { - return this.type; + public String getName() { + return this.name; } - public void setType(Type type) { - this.type = type; + public void setName(String name) { + this.name = name; } - public void unsetType() { - this.type = null; + public void unsetName() { + this.name = null; } - // Returns true if field type is set (has been asigned a value) and false otherwise - public boolean isSetType() { - return this.type != null; + // Returns true if field name is set (has been asigned a value) and false otherwise + public boolean isSetName() { + return this.name != null; } public void setFieldValue(int fieldID, Object value) { switch (fieldID) { - case TYPE: + case NAME: if (value == null) { - unsetType(); + unsetName(); } else { - setType((Type)value); + setName((String)value); } break; @@ -5102,8 +5359,8 @@ public class ThriftHiveMetastore { public Object getFieldValue(int fieldID) { switch (fieldID) { - case TYPE: - return getType(); + case NAME: + return getName(); default: throw new IllegalArgumentException("Field " + fieldID + " doesn't exist!"); @@ -5113,8 +5370,8 @@ public class ThriftHiveMetastore { // Returns true if field corresponding to fieldID is set (has been asigned a value) and false otherwise public boolean isSet(int fieldID) { switch (fieldID) { - case TYPE: - return isSetType(); + case NAME: + return isSetName(); default: throw new IllegalArgumentException("Field " + fieldID + " doesn't exist!"); } @@ -5124,21 +5381,21 @@ public class ThriftHiveMetastore { public boolean equals(Object that) { if (that == null) return false; - if (that instanceof create_type_args) - return this.equals((create_type_args)that); + if (that instanceof get_type_args) + return this.equals((get_type_args)that); return false; } - public boolean equals(create_type_args that) { + public boolean equals(get_type_args that) { if (that == null) return false; - boolean this_present_type = true && this.isSetType(); - boolean that_present_type = true && that.isSetType(); - if (this_present_type || that_present_type) { - if (!(this_present_type && that_present_type)) + boolean this_present_name = true && this.isSetName(); + boolean that_present_name = true && that.isSetName(); + if (this_present_name || that_present_name) { + if (!(this_present_name && that_present_name)) return false; - if (!this.type.equals(that.type)) + if (!this.name.equals(that.name)) return false; } @@ -5161,10 +5418,9 @@ public class ThriftHiveMetastore { } switch (field.id) { - case TYPE: - if (field.type == TType.STRUCT) { - this.type = new Type(); - this.type.read(iprot); + case NAME: + if (field.type == TType.STRING) { + this.name = iprot.readString(); } else { TProtocolUtil.skip(iprot, field.type); } @@ -5184,9 +5440,9 @@ public class ThriftHiveMetastore { validate(); oprot.writeStructBegin(STRUCT_DESC); - if (this.type != null) { - oprot.writeFieldBegin(TYPE_FIELD_DESC); - this.type.write(oprot); + if (this.name != null) { + oprot.writeFieldBegin(NAME_FIELD_DESC); + oprot.writeString(this.name); oprot.writeFieldEnd(); } oprot.writeFieldStop(); @@ -5195,14 +5451,14 @@ public class ThriftHiveMetastore { @Override public String toString() { - StringBuilder sb = new StringBuilder("create_type_args("); + StringBuilder sb = new StringBuilder("get_type_args("); boolean first = true; - sb.append("type:"); - if (this.type == null) { + sb.append("name:"); + if (this.name == null) { sb.append("null"); } else { - sb.append(this.type); + sb.append(this.name); } first = false; sb.append(")"); @@ -5216,104 +5472,92 @@ public class ThriftHiveMetastore { } - public static class create_type_result implements TBase, java.io.Serializable, Cloneable { - private static final TStruct STRUCT_DESC = new TStruct("create_type_result"); - private static final TField SUCCESS_FIELD_DESC = new TField("success", TType.BOOL, (short)0); + public static class get_type_result implements TBase, java.io.Serializable, Cloneable { + private static final TStruct STRUCT_DESC = new TStruct("get_type_result"); + private static final TField SUCCESS_FIELD_DESC = new TField("success", TType.STRUCT, (short)0); private static final TField O1_FIELD_DESC = new TField("o1", TType.STRUCT, (short)1); private static final TField O2_FIELD_DESC = new TField("o2", TType.STRUCT, (short)2); - private static final TField O3_FIELD_DESC = new TField("o3", TType.STRUCT, (short)3); - private boolean success; + private Type success; public static final int SUCCESS = 0; - private AlreadyExistsException o1; + private MetaException o1; public static final int O1 = 1; - private InvalidObjectException o2; + private NoSuchObjectException o2; public static final int O2 = 2; - private MetaException o3; - public static final int O3 = 3; private final Isset __isset = new Isset(); private static final class Isset implements java.io.Serializable { - public boolean success = false; } public static final Map metaDataMap = Collections.unmodifiableMap(new HashMap() {{ put(SUCCESS, new FieldMetaData("success", TFieldRequirementType.DEFAULT, - new FieldValueMetaData(TType.BOOL))); + new StructMetaData(TType.STRUCT, Type.class))); put(O1, new FieldMetaData("o1", TFieldRequirementType.DEFAULT, new FieldValueMetaData(TType.STRUCT))); put(O2, new FieldMetaData("o2", TFieldRequirementType.DEFAULT, new FieldValueMetaData(TType.STRUCT))); - put(O3, new FieldMetaData("o3", TFieldRequirementType.DEFAULT, - new FieldValueMetaData(TType.STRUCT))); }}); static { - FieldMetaData.addStructMetaDataMap(create_type_result.class, metaDataMap); + FieldMetaData.addStructMetaDataMap(get_type_result.class, metaDataMap); } - public create_type_result() { + public get_type_result() { } - public create_type_result( - boolean success, - AlreadyExistsException o1, - InvalidObjectException o2, - MetaException o3) + public get_type_result( + Type success, + MetaException o1, + NoSuchObjectException o2) { this(); this.success = success; - this.__isset.success = true; this.o1 = o1; this.o2 = o2; - this.o3 = o3; } /** * Performs a deep copy on other. */ - public create_type_result(create_type_result other) { - __isset.success = other.__isset.success; - this.success = other.success; + public get_type_result(get_type_result other) { + if (other.isSetSuccess()) { + this.success = new Type(other.success); + } if (other.isSetO1()) { - this.o1 = new AlreadyExistsException(other.o1); + this.o1 = new MetaException(other.o1); } if (other.isSetO2()) { - this.o2 = new InvalidObjectException(other.o2); - } - if (other.isSetO3()) { - this.o3 = new MetaException(other.o3); + this.o2 = new NoSuchObjectException(other.o2); } } @Override - public create_type_result clone() { - return new create_type_result(this); + public get_type_result clone() { + return new get_type_result(this); } - public boolean isSuccess() { + public Type getSuccess() { return this.success; } - public void setSuccess(boolean success) { + public void setSuccess(Type success) { this.success = success; - this.__isset.success = true; } public void unsetSuccess() { - this.__isset.success = false; + this.success = null; } // Returns true if field success is set (has been asigned a value) and false otherwise public boolean isSetSuccess() { - return this.__isset.success; + return this.success != null; } - public AlreadyExistsException getO1() { + public MetaException getO1() { return this.o1; } - public void setO1(AlreadyExistsException o1) { + public void setO1(MetaException o1) { this.o1 = o1; } @@ -5326,11 +5570,11 @@ public class ThriftHiveMetastore { return this.o1 != null; } - public InvalidObjectException getO2() { + public NoSuchObjectException getO2() { return this.o2; } - public void setO2(InvalidObjectException o2) { + public void setO2(NoSuchObjectException o2) { this.o2 = o2; } @@ -5343,30 +5587,13 @@ public class ThriftHiveMetastore { return this.o2 != null; } - public MetaException getO3() { - return this.o3; - } - - public void setO3(MetaException o3) { - this.o3 = o3; - } - - public void unsetO3() { - this.o3 = null; - } - - // Returns true if field o3 is set (has been asigned a value) and false otherwise - public boolean isSetO3() { - return this.o3 != null; - } - public void setFieldValue(int fieldID, Object value) { switch (fieldID) { case SUCCESS: if (value == null) { unsetSuccess(); } else { - setSuccess((Boolean)value); + setSuccess((Type)value); } break; @@ -5374,7 +5601,7 @@ public class ThriftHiveMetastore { if (value == null) { unsetO1(); } else { - setO1((AlreadyExistsException)value); + setO1((MetaException)value); } break; @@ -5382,15 +5609,7 @@ public class ThriftHiveMetastore { if (value == null) { unsetO2(); } else { - setO2((InvalidObjectException)value); - } - break; - - case O3: - if (value == null) { - unsetO3(); - } else { - setO3((MetaException)value); + setO2((NoSuchObjectException)value); } break; @@ -5402,7 +5621,7 @@ public class ThriftHiveMetastore { public Object getFieldValue(int fieldID) { switch (fieldID) { case SUCCESS: - return new Boolean(isSuccess()); + return getSuccess(); case O1: return getO1(); @@ -5410,9 +5629,6 @@ public class ThriftHiveMetastore { case O2: return getO2(); - case O3: - return getO3(); - default: throw new IllegalArgumentException("Field " + fieldID + " doesn't exist!"); } @@ -5427,8 +5643,6 @@ public class ThriftHiveMetastore { return isSetO1(); case O2: return isSetO2(); - case O3: - return isSetO3(); default: throw new IllegalArgumentException("Field " + fieldID + " doesn't exist!"); } @@ -5438,21 +5652,21 @@ public class ThriftHiveMetastore { public boolean equals(Object that) { if (that == null) return false; - if (that instanceof create_type_result) - return this.equals((create_type_result)that); + if (that instanceof get_type_result) + return this.equals((get_type_result)that); return false; } - public boolean equals(create_type_result that) { + public boolean equals(get_type_result that) { if (that == null) return false; - boolean this_present_success = true; - boolean that_present_success = true; + boolean this_present_success = true && this.isSetSuccess(); + boolean that_present_success = true && that.isSetSuccess(); if (this_present_success || that_present_success) { if (!(this_present_success && that_present_success)) return false; - if (this.success != that.success) + if (!this.success.equals(that.success)) return false; } @@ -5474,15 +5688,6 @@ public class ThriftHiveMetastore { return false; } - boolean this_present_o3 = true && this.isSetO3(); - boolean that_present_o3 = true && that.isSetO3(); - if (this_present_o3 || that_present_o3) { - if (!(this_present_o3 && that_present_o3)) - return false; - if (!this.o3.equals(that.o3)) - return false; - } - return true; } @@ -5503,16 +5708,16 @@ public class ThriftHiveMetastore { switch (field.id) { case SUCCESS: - if (field.type == TType.BOOL) { - this.success = iprot.readBool(); - this.__isset.success = true; + if (field.type == TType.STRUCT) { + this.success = new Type(); + this.success.read(iprot); } else { TProtocolUtil.skip(iprot, field.type); } break; case O1: if (field.type == TType.STRUCT) { - this.o1 = new AlreadyExistsException(); + this.o1 = new MetaException(); this.o1.read(iprot); } else { TProtocolUtil.skip(iprot, field.type); @@ -5520,20 +5725,12 @@ public class ThriftHiveMetastore { break; case O2: if (field.type == TType.STRUCT) { - this.o2 = new InvalidObjectException(); + this.o2 = new NoSuchObjectException(); this.o2.read(iprot); } else { TProtocolUtil.skip(iprot, field.type); } break; - case O3: - if (field.type == TType.STRUCT) { - this.o3 = new MetaException(); - this.o3.read(iprot); - } else { - TProtocolUtil.skip(iprot, field.type); - } - break; default: TProtocolUtil.skip(iprot, field.type); break; @@ -5550,7 +5747,7 @@ public class ThriftHiveMetastore { if (this.isSetSuccess()) { oprot.writeFieldBegin(SUCCESS_FIELD_DESC); - oprot.writeBool(this.success); + this.success.write(oprot); oprot.writeFieldEnd(); } else if (this.isSetO1()) { oprot.writeFieldBegin(O1_FIELD_DESC); @@ -5560,10 +5757,6 @@ public class ThriftHiveMetastore { oprot.writeFieldBegin(O2_FIELD_DESC); this.o2.write(oprot); oprot.writeFieldEnd(); - } else if (this.isSetO3()) { - oprot.writeFieldBegin(O3_FIELD_DESC); - this.o3.write(oprot); - oprot.writeFieldEnd(); } oprot.writeFieldStop(); oprot.writeStructEnd(); @@ -5571,11 +5764,15 @@ public class ThriftHiveMetastore { @Override public String toString() { - StringBuilder sb = new StringBuilder("create_type_result("); + StringBuilder sb = new StringBuilder("get_type_result("); boolean first = true; sb.append("success:"); - sb.append(this.success); + if (this.success == null) { + sb.append("null"); + } else { + sb.append(this.success); + } first = false; if (!first) sb.append(", "); sb.append("o1:"); @@ -5593,14 +5790,6 @@ public class ThriftHiveMetastore { sb.append(this.o2); } first = false; - if (!first) sb.append(", "); - sb.append("o3:"); - if (this.o3 == null) { - sb.append("null"); - } else { - sb.append(this.o3); - } - first = false; sb.append(")"); return sb.toString(); } @@ -5612,11 +5801,11 @@ public class ThriftHiveMetastore { } - public static class drop_type_args implements TBase, java.io.Serializable, Cloneable { - private static final TStruct STRUCT_DESC = new TStruct("drop_type_args"); - private static final TField TYPE_FIELD_DESC = new TField("type", TType.STRING, (short)1); + public static class create_type_args implements TBase, java.io.Serializable, Cloneable { + private static final TStruct STRUCT_DESC = new TStruct("create_type_args"); + private static final TField TYPE_FIELD_DESC = new TField("type", TType.STRUCT, (short)1); - private String type; + private Type type; public static final int TYPE = 1; private final Isset __isset = new Isset(); @@ -5625,18 +5814,18 @@ public class ThriftHiveMetastore { public static final Map metaDataMap = Collections.unmodifiableMap(new HashMap() {{ put(TYPE, new FieldMetaData("type", TFieldRequirementType.DEFAULT, - new FieldValueMetaData(TType.STRING))); + new StructMetaData(TType.STRUCT, Type.class))); }}); static { - FieldMetaData.addStructMetaDataMap(drop_type_args.class, metaDataMap); + FieldMetaData.addStructMetaDataMap(create_type_args.class, metaDataMap); } - public drop_type_args() { + public create_type_args() { } - public drop_type_args( - String type) + public create_type_args( + Type type) { this(); this.type = type; @@ -5645,22 +5834,22 @@ public class ThriftHiveMetastore { /** * Performs a deep copy on other. */ - public drop_type_args(drop_type_args other) { + public create_type_args(create_type_args other) { if (other.isSetType()) { - this.type = other.type; + this.type = new Type(other.type); } } @Override - public drop_type_args clone() { - return new drop_type_args(this); + public create_type_args clone() { + return new create_type_args(this); } - public String getType() { + public Type getType() { return this.type; } - public void setType(String type) { + public void setType(Type type) { this.type = type; } @@ -5679,7 +5868,7 @@ public class ThriftHiveMetastore { if (value == null) { unsetType(); } else { - setType((String)value); + setType((Type)value); } break; @@ -5712,12 +5901,12 @@ public class ThriftHiveMetastore { public boolean equals(Object that) { if (that == null) return false; - if (that instanceof drop_type_args) - return this.equals((drop_type_args)that); + if (that instanceof create_type_args) + return this.equals((create_type_args)that); return false; } - public boolean equals(drop_type_args that) { + public boolean equals(create_type_args that) { if (that == null) return false; @@ -5750,8 +5939,9 @@ public class ThriftHiveMetastore { switch (field.id) { case TYPE: - if (field.type == TType.STRING) { - this.type = iprot.readString(); + if (field.type == TType.STRUCT) { + this.type = new Type(); + this.type.read(iprot); } else { TProtocolUtil.skip(iprot, field.type); } @@ -5773,7 +5963,7 @@ public class ThriftHiveMetastore { oprot.writeStructBegin(STRUCT_DESC); if (this.type != null) { oprot.writeFieldBegin(TYPE_FIELD_DESC); - oprot.writeString(this.type); + this.type.write(oprot); oprot.writeFieldEnd(); } oprot.writeFieldStop(); @@ -5782,7 +5972,7 @@ public class ThriftHiveMetastore { @Override public String toString() { - StringBuilder sb = new StringBuilder("drop_type_args("); + StringBuilder sb = new StringBuilder("create_type_args("); boolean first = true; sb.append("type:"); @@ -5803,15 +5993,21 @@ public class ThriftHiveMetastore { } - public static class drop_type_result implements TBase, java.io.Serializable, Cloneable { - private static final TStruct STRUCT_DESC = new TStruct("drop_type_result"); + public static class create_type_result implements TBase, java.io.Serializable, Cloneable { + private static final TStruct STRUCT_DESC = new TStruct("create_type_result"); private static final TField SUCCESS_FIELD_DESC = new TField("success", TType.BOOL, (short)0); - private static final TField O2_FIELD_DESC = new TField("o2", TType.STRUCT, (short)1); + private static final TField O1_FIELD_DESC = new TField("o1", TType.STRUCT, (short)1); + private static final TField O2_FIELD_DESC = new TField("o2", TType.STRUCT, (short)2); + private static final TField O3_FIELD_DESC = new TField("o3", TType.STRUCT, (short)3); private boolean success; public static final int SUCCESS = 0; - private MetaException o2; - public static final int O2 = 1; + private AlreadyExistsException o1; + public static final int O1 = 1; + private InvalidObjectException o2; + public static final int O2 = 2; + private MetaException o3; + public static final int O3 = 3; private final Isset __isset = new Isset(); private static final class Isset implements java.io.Serializable { @@ -5821,41 +6017,55 @@ public class ThriftHiveMetastore { public static final Map metaDataMap = Collections.unmodifiableMap(new HashMap() {{ put(SUCCESS, new FieldMetaData("success", TFieldRequirementType.DEFAULT, new FieldValueMetaData(TType.BOOL))); + put(O1, new FieldMetaData("o1", TFieldRequirementType.DEFAULT, + new FieldValueMetaData(TType.STRUCT))); put(O2, new FieldMetaData("o2", TFieldRequirementType.DEFAULT, new FieldValueMetaData(TType.STRUCT))); + put(O3, new FieldMetaData("o3", TFieldRequirementType.DEFAULT, + new FieldValueMetaData(TType.STRUCT))); }}); static { - FieldMetaData.addStructMetaDataMap(drop_type_result.class, metaDataMap); + FieldMetaData.addStructMetaDataMap(create_type_result.class, metaDataMap); } - public drop_type_result() { + public create_type_result() { } - public drop_type_result( + public create_type_result( boolean success, - MetaException o2) + AlreadyExistsException o1, + InvalidObjectException o2, + MetaException o3) { this(); this.success = success; this.__isset.success = true; + this.o1 = o1; this.o2 = o2; + this.o3 = o3; } /** * Performs a deep copy on other. */ - public drop_type_result(drop_type_result other) { + public create_type_result(create_type_result other) { __isset.success = other.__isset.success; this.success = other.success; + if (other.isSetO1()) { + this.o1 = new AlreadyExistsException(other.o1); + } if (other.isSetO2()) { - this.o2 = new MetaException(other.o2); + this.o2 = new InvalidObjectException(other.o2); + } + if (other.isSetO3()) { + this.o3 = new MetaException(other.o3); } } @Override - public drop_type_result clone() { - return new drop_type_result(this); + public create_type_result clone() { + return new create_type_result(this); } public boolean isSuccess() { @@ -5876,23 +6086,57 @@ public class ThriftHiveMetastore { return this.__isset.success; } - public MetaException getO2() { - return this.o2; + public AlreadyExistsException getO1() { + return this.o1; } - public void setO2(MetaException o2) { - this.o2 = o2; + public void setO1(AlreadyExistsException o1) { + this.o1 = o1; } - public void unsetO2() { - this.o2 = null; - } + public void unsetO1() { + this.o1 = null; + } + + // Returns true if field o1 is set (has been asigned a value) and false otherwise + public boolean isSetO1() { + return this.o1 != null; + } + + public InvalidObjectException getO2() { + return this.o2; + } + + public void setO2(InvalidObjectException o2) { + this.o2 = o2; + } + + public void unsetO2() { + this.o2 = null; + } // Returns true if field o2 is set (has been asigned a value) and false otherwise public boolean isSetO2() { return this.o2 != null; } + public MetaException getO3() { + return this.o3; + } + + public void setO3(MetaException o3) { + this.o3 = o3; + } + + public void unsetO3() { + this.o3 = null; + } + + // Returns true if field o3 is set (has been asigned a value) and false otherwise + public boolean isSetO3() { + return this.o3 != null; + } + public void setFieldValue(int fieldID, Object value) { switch (fieldID) { case SUCCESS: @@ -5903,11 +6147,27 @@ public class ThriftHiveMetastore { } break; + case O1: + if (value == null) { + unsetO1(); + } else { + setO1((AlreadyExistsException)value); + } + break; + case O2: if (value == null) { unsetO2(); } else { - setO2((MetaException)value); + setO2((InvalidObjectException)value); + } + break; + + case O3: + if (value == null) { + unsetO3(); + } else { + setO3((MetaException)value); } break; @@ -5921,9 +6181,15 @@ public class ThriftHiveMetastore { case SUCCESS: return new Boolean(isSuccess()); + case O1: + return getO1(); + case O2: return getO2(); + case O3: + return getO3(); + default: throw new IllegalArgumentException("Field " + fieldID + " doesn't exist!"); } @@ -5934,8 +6200,12 @@ public class ThriftHiveMetastore { switch (fieldID) { case SUCCESS: return isSetSuccess(); + case O1: + return isSetO1(); case O2: return isSetO2(); + case O3: + return isSetO3(); default: throw new IllegalArgumentException("Field " + fieldID + " doesn't exist!"); } @@ -5945,12 +6215,12 @@ public class ThriftHiveMetastore { public boolean equals(Object that) { if (that == null) return false; - if (that instanceof drop_type_result) - return this.equals((drop_type_result)that); + if (that instanceof create_type_result) + return this.equals((create_type_result)that); return false; } - public boolean equals(drop_type_result that) { + public boolean equals(create_type_result that) { if (that == null) return false; @@ -5963,6 +6233,15 @@ public class ThriftHiveMetastore { return false; } + boolean this_present_o1 = true && this.isSetO1(); + boolean that_present_o1 = true && that.isSetO1(); + if (this_present_o1 || that_present_o1) { + if (!(this_present_o1 && that_present_o1)) + return false; + if (!this.o1.equals(that.o1)) + return false; + } + boolean this_present_o2 = true && this.isSetO2(); boolean that_present_o2 = true && that.isSetO2(); if (this_present_o2 || that_present_o2) { @@ -5972,6 +6251,15 @@ public class ThriftHiveMetastore { return false; } + boolean this_present_o3 = true && this.isSetO3(); + boolean that_present_o3 = true && that.isSetO3(); + if (this_present_o3 || that_present_o3) { + if (!(this_present_o3 && that_present_o3)) + return false; + if (!this.o3.equals(that.o3)) + return false; + } + return true; } @@ -5999,14 +6287,30 @@ public class ThriftHiveMetastore { TProtocolUtil.skip(iprot, field.type); } break; + case O1: + if (field.type == TType.STRUCT) { + this.o1 = new AlreadyExistsException(); + this.o1.read(iprot); + } else { + TProtocolUtil.skip(iprot, field.type); + } + break; case O2: if (field.type == TType.STRUCT) { - this.o2 = new MetaException(); + this.o2 = new InvalidObjectException(); this.o2.read(iprot); } else { TProtocolUtil.skip(iprot, field.type); } break; + case O3: + if (field.type == TType.STRUCT) { + this.o3 = new MetaException(); + this.o3.read(iprot); + } else { + TProtocolUtil.skip(iprot, field.type); + } + break; default: TProtocolUtil.skip(iprot, field.type); break; @@ -6025,10 +6329,18 @@ public class ThriftHiveMetastore { oprot.writeFieldBegin(SUCCESS_FIELD_DESC); oprot.writeBool(this.success); oprot.writeFieldEnd(); + } else if (this.isSetO1()) { + oprot.writeFieldBegin(O1_FIELD_DESC); + this.o1.write(oprot); + oprot.writeFieldEnd(); } else if (this.isSetO2()) { oprot.writeFieldBegin(O2_FIELD_DESC); this.o2.write(oprot); oprot.writeFieldEnd(); + } else if (this.isSetO3()) { + oprot.writeFieldBegin(O3_FIELD_DESC); + this.o3.write(oprot); + oprot.writeFieldEnd(); } oprot.writeFieldStop(); oprot.writeStructEnd(); @@ -6036,13 +6348,21 @@ public class ThriftHiveMetastore { @Override public String toString() { - StringBuilder sb = new StringBuilder("drop_type_result("); + StringBuilder sb = new StringBuilder("create_type_result("); boolean first = true; sb.append("success:"); sb.append(this.success); first = false; if (!first) sb.append(", "); + sb.append("o1:"); + if (this.o1 == null) { + sb.append("null"); + } else { + sb.append(this.o1); + } + first = false; + if (!first) sb.append(", "); sb.append("o2:"); if (this.o2 == null) { sb.append("null"); @@ -6050,6 +6370,14 @@ public class ThriftHiveMetastore { sb.append(this.o2); } first = false; + if (!first) sb.append(", "); + sb.append("o3:"); + if (this.o3 == null) { + sb.append("null"); + } else { + sb.append(this.o3); + } + first = false; sb.append(")"); return sb.toString(); } @@ -6061,74 +6389,74 @@ public class ThriftHiveMetastore { } - public static class get_type_all_args implements TBase, java.io.Serializable, Cloneable { - private static final TStruct STRUCT_DESC = new TStruct("get_type_all_args"); - private static final TField NAME_FIELD_DESC = new TField("name", TType.STRING, (short)1); + public static class drop_type_args implements TBase, java.io.Serializable, Cloneable { + private static final TStruct STRUCT_DESC = new TStruct("drop_type_args"); + private static final TField TYPE_FIELD_DESC = new TField("type", TType.STRING, (short)1); - private String name; - public static final int NAME = 1; + private String type; + public static final int TYPE = 1; private final Isset __isset = new Isset(); private static final class Isset implements java.io.Serializable { } public static final Map metaDataMap = Collections.unmodifiableMap(new HashMap() {{ - put(NAME, new FieldMetaData("name", TFieldRequirementType.DEFAULT, + put(TYPE, new FieldMetaData("type", TFieldRequirementType.DEFAULT, new FieldValueMetaData(TType.STRING))); }}); static { - FieldMetaData.addStructMetaDataMap(get_type_all_args.class, metaDataMap); + FieldMetaData.addStructMetaDataMap(drop_type_args.class, metaDataMap); } - public get_type_all_args() { + public drop_type_args() { } - public get_type_all_args( - String name) + public drop_type_args( + String type) { this(); - this.name = name; + this.type = type; } /** * Performs a deep copy on other. */ - public get_type_all_args(get_type_all_args other) { - if (other.isSetName()) { - this.name = other.name; + public drop_type_args(drop_type_args other) { + if (other.isSetType()) { + this.type = other.type; } } @Override - public get_type_all_args clone() { - return new get_type_all_args(this); + public drop_type_args clone() { + return new drop_type_args(this); } - public String getName() { - return this.name; + public String getType() { + return this.type; } - public void setName(String name) { - this.name = name; + public void setType(String type) { + this.type = type; } - public void unsetName() { - this.name = null; + public void unsetType() { + this.type = null; } - // Returns true if field name is set (has been asigned a value) and false otherwise - public boolean isSetName() { - return this.name != null; + // Returns true if field type is set (has been asigned a value) and false otherwise + public boolean isSetType() { + return this.type != null; } public void setFieldValue(int fieldID, Object value) { switch (fieldID) { - case NAME: + case TYPE: if (value == null) { - unsetName(); + unsetType(); } else { - setName((String)value); + setType((String)value); } break; @@ -6139,8 +6467,8 @@ public class ThriftHiveMetastore { public Object getFieldValue(int fieldID) { switch (fieldID) { - case NAME: - return getName(); + case TYPE: + return getType(); default: throw new IllegalArgumentException("Field " + fieldID + " doesn't exist!"); @@ -6150,8 +6478,8 @@ public class ThriftHiveMetastore { // Returns true if field corresponding to fieldID is set (has been asigned a value) and false otherwise public boolean isSet(int fieldID) { switch (fieldID) { - case NAME: - return isSetName(); + case TYPE: + return isSetType(); default: throw new IllegalArgumentException("Field " + fieldID + " doesn't exist!"); } @@ -6161,21 +6489,21 @@ public class ThriftHiveMetastore { public boolean equals(Object that) { if (that == null) return false; - if (that instanceof get_type_all_args) - return this.equals((get_type_all_args)that); + if (that instanceof drop_type_args) + return this.equals((drop_type_args)that); return false; } - public boolean equals(get_type_all_args that) { + public boolean equals(drop_type_args that) { if (that == null) return false; - boolean this_present_name = true && this.isSetName(); - boolean that_present_name = true && that.isSetName(); - if (this_present_name || that_present_name) { - if (!(this_present_name && that_present_name)) + boolean this_present_type = true && this.isSetType(); + boolean that_present_type = true && that.isSetType(); + if (this_present_type || that_present_type) { + if (!(this_present_type && that_present_type)) return false; - if (!this.name.equals(that.name)) + if (!this.type.equals(that.type)) return false; } @@ -6198,9 +6526,9 @@ public class ThriftHiveMetastore { } switch (field.id) { - case NAME: + case TYPE: if (field.type == TType.STRING) { - this.name = iprot.readString(); + this.type = iprot.readString(); } else { TProtocolUtil.skip(iprot, field.type); } @@ -6220,9 +6548,9 @@ public class ThriftHiveMetastore { validate(); oprot.writeStructBegin(STRUCT_DESC); - if (this.name != null) { - oprot.writeFieldBegin(NAME_FIELD_DESC); - oprot.writeString(this.name); + if (this.type != null) { + oprot.writeFieldBegin(TYPE_FIELD_DESC); + oprot.writeString(this.type); oprot.writeFieldEnd(); } oprot.writeFieldStop(); @@ -6231,14 +6559,14 @@ public class ThriftHiveMetastore { @Override public String toString() { - StringBuilder sb = new StringBuilder("get_type_all_args("); + StringBuilder sb = new StringBuilder("drop_type_args("); boolean first = true; - sb.append("name:"); - if (this.name == null) { + sb.append("type:"); + if (this.type == null) { sb.append("null"); } else { - sb.append(this.name); + sb.append(this.type); } first = false; sb.append(")"); @@ -6252,107 +6580,111 @@ public class ThriftHiveMetastore { } - public static class get_type_all_result implements TBase, java.io.Serializable, Cloneable { - private static final TStruct STRUCT_DESC = new TStruct("get_type_all_result"); - private static final TField SUCCESS_FIELD_DESC = new TField("success", TType.MAP, (short)0); - private static final TField O2_FIELD_DESC = new TField("o2", TType.STRUCT, (short)1); + public static class drop_type_result implements TBase, java.io.Serializable, Cloneable { + private static final TStruct STRUCT_DESC = new TStruct("drop_type_result"); + private static final TField SUCCESS_FIELD_DESC = new TField("success", TType.BOOL, (short)0); + private static final TField O1_FIELD_DESC = new TField("o1", TType.STRUCT, (short)1); + private static final TField O2_FIELD_DESC = new TField("o2", TType.STRUCT, (short)2); - private Map success; + private boolean success; public static final int SUCCESS = 0; - private MetaException o2; - public static final int O2 = 1; + private MetaException o1; + public static final int O1 = 1; + private NoSuchObjectException o2; + public static final int O2 = 2; private final Isset __isset = new Isset(); private static final class Isset implements java.io.Serializable { + public boolean success = false; } public static final Map metaDataMap = Collections.unmodifiableMap(new HashMap() {{ put(SUCCESS, new FieldMetaData("success", TFieldRequirementType.DEFAULT, - new MapMetaData(TType.MAP, - new FieldValueMetaData(TType.STRING), - new StructMetaData(TType.STRUCT, Type.class)))); + new FieldValueMetaData(TType.BOOL))); + put(O1, new FieldMetaData("o1", TFieldRequirementType.DEFAULT, + new FieldValueMetaData(TType.STRUCT))); put(O2, new FieldMetaData("o2", TFieldRequirementType.DEFAULT, new FieldValueMetaData(TType.STRUCT))); }}); static { - FieldMetaData.addStructMetaDataMap(get_type_all_result.class, metaDataMap); + FieldMetaData.addStructMetaDataMap(drop_type_result.class, metaDataMap); } - public get_type_all_result() { + public drop_type_result() { } - public get_type_all_result( - Map success, - MetaException o2) + public drop_type_result( + boolean success, + MetaException o1, + NoSuchObjectException o2) { this(); this.success = success; + this.__isset.success = true; + this.o1 = o1; this.o2 = o2; } /** * Performs a deep copy on other. */ - public get_type_all_result(get_type_all_result other) { - if (other.isSetSuccess()) { - Map __this__success = new HashMap(); - for (Map.Entry other_element : other.success.entrySet()) { - - String other_element_key = other_element.getKey(); - Type other_element_value = other_element.getValue(); - - String __this__success_copy_key = other_element_key; - - Type __this__success_copy_value = new Type(other_element_value); - - __this__success.put(__this__success_copy_key, __this__success_copy_value); - } - this.success = __this__success; + public drop_type_result(drop_type_result other) { + __isset.success = other.__isset.success; + this.success = other.success; + if (other.isSetO1()) { + this.o1 = new MetaException(other.o1); } if (other.isSetO2()) { - this.o2 = new MetaException(other.o2); + this.o2 = new NoSuchObjectException(other.o2); } } @Override - public get_type_all_result clone() { - return new get_type_all_result(this); - } - - public int getSuccessSize() { - return (this.success == null) ? 0 : this.success.size(); - } - - public void putToSuccess(String key, Type val) { - if (this.success == null) { - this.success = new HashMap(); - } - this.success.put(key, val); + public drop_type_result clone() { + return new drop_type_result(this); } - public Map getSuccess() { + public boolean isSuccess() { return this.success; } - public void setSuccess(Map success) { + public void setSuccess(boolean success) { this.success = success; + this.__isset.success = true; } public void unsetSuccess() { - this.success = null; + this.__isset.success = false; } // Returns true if field success is set (has been asigned a value) and false otherwise public boolean isSetSuccess() { - return this.success != null; + return this.__isset.success; } - public MetaException getO2() { + public MetaException getO1() { + return this.o1; + } + + public void setO1(MetaException o1) { + this.o1 = o1; + } + + public void unsetO1() { + this.o1 = null; + } + + // Returns true if field o1 is set (has been asigned a value) and false otherwise + public boolean isSetO1() { + return this.o1 != null; + } + + public NoSuchObjectException getO2() { return this.o2; } - public void setO2(MetaException o2) { + public void setO2(NoSuchObjectException o2) { this.o2 = o2; } @@ -6371,7 +6703,15 @@ public class ThriftHiveMetastore { if (value == null) { unsetSuccess(); } else { - setSuccess((Map)value); + setSuccess((Boolean)value); + } + break; + + case O1: + if (value == null) { + unsetO1(); + } else { + setO1((MetaException)value); } break; @@ -6379,7 +6719,7 @@ public class ThriftHiveMetastore { if (value == null) { unsetO2(); } else { - setO2((MetaException)value); + setO2((NoSuchObjectException)value); } break; @@ -6391,7 +6731,10 @@ public class ThriftHiveMetastore { public Object getFieldValue(int fieldID) { switch (fieldID) { case SUCCESS: - return getSuccess(); + return new Boolean(isSuccess()); + + case O1: + return getO1(); case O2: return getO2(); @@ -6406,6 +6749,8 @@ public class ThriftHiveMetastore { switch (fieldID) { case SUCCESS: return isSetSuccess(); + case O1: + return isSetO1(); case O2: return isSetO2(); default: @@ -6417,21 +6762,30 @@ public class ThriftHiveMetastore { public boolean equals(Object that) { if (that == null) return false; - if (that instanceof get_type_all_result) - return this.equals((get_type_all_result)that); + if (that instanceof drop_type_result) + return this.equals((drop_type_result)that); return false; } - public boolean equals(get_type_all_result that) { + public boolean equals(drop_type_result that) { if (that == null) return false; - boolean this_present_success = true && this.isSetSuccess(); - boolean that_present_success = true && that.isSetSuccess(); + boolean this_present_success = true; + boolean that_present_success = true; if (this_present_success || that_present_success) { if (!(this_present_success && that_present_success)) return false; - if (!this.success.equals(that.success)) + if (this.success != that.success) + return false; + } + + boolean this_present_o1 = true && this.isSetO1(); + boolean that_present_o1 = true && that.isSetO1(); + if (this_present_o1 || that_present_o1) { + if (!(this_present_o1 && that_present_o1)) + return false; + if (!this.o1.equals(that.o1)) return false; } @@ -6464,28 +6818,24 @@ public class ThriftHiveMetastore { switch (field.id) { case SUCCESS: - if (field.type == TType.MAP) { - { - TMap _map62 = iprot.readMapBegin(); - this.success = new HashMap(2*_map62.size); - for (int _i63 = 0; _i63 < _map62.size; ++_i63) - { - String _key64; - Type _val65; - _key64 = iprot.readString(); - _val65 = new Type(); - _val65.read(iprot); - this.success.put(_key64, _val65); - } - iprot.readMapEnd(); - } + if (field.type == TType.BOOL) { + this.success = iprot.readBool(); + this.__isset.success = true; + } else { + TProtocolUtil.skip(iprot, field.type); + } + break; + case O1: + if (field.type == TType.STRUCT) { + this.o1 = new MetaException(); + this.o1.read(iprot); } else { TProtocolUtil.skip(iprot, field.type); } break; case O2: if (field.type == TType.STRUCT) { - this.o2 = new MetaException(); + this.o2 = new NoSuchObjectException(); this.o2.read(iprot); } else { TProtocolUtil.skip(iprot, field.type); @@ -6507,14 +6857,11 @@ public class ThriftHiveMetastore { if (this.isSetSuccess()) { oprot.writeFieldBegin(SUCCESS_FIELD_DESC); - { - oprot.writeMapBegin(new TMap(TType.STRING, TType.STRUCT, this.success.size())); - for (Map.Entry _iter66 : this.success.entrySet()) { - oprot.writeString(_iter66.getKey()); - _iter66.getValue().write(oprot); - } - oprot.writeMapEnd(); - } + oprot.writeBool(this.success); + oprot.writeFieldEnd(); + } else if (this.isSetO1()) { + oprot.writeFieldBegin(O1_FIELD_DESC); + this.o1.write(oprot); oprot.writeFieldEnd(); } else if (this.isSetO2()) { oprot.writeFieldBegin(O2_FIELD_DESC); @@ -6527,14 +6874,18 @@ public class ThriftHiveMetastore { @Override public String toString() { - StringBuilder sb = new StringBuilder("get_type_all_result("); + StringBuilder sb = new StringBuilder("drop_type_result("); boolean first = true; sb.append("success:"); - if (this.success == null) { + sb.append(this.success); + first = false; + if (!first) sb.append(", "); + sb.append("o1:"); + if (this.o1 == null) { sb.append("null"); } else { - sb.append(this.success); + sb.append(this.o1); } first = false; if (!first) sb.append(", "); @@ -6556,109 +6907,74 @@ public class ThriftHiveMetastore { } - public static class get_fields_args implements TBase, java.io.Serializable, Cloneable { - private static final TStruct STRUCT_DESC = new TStruct("get_fields_args"); - private static final TField DB_NAME_FIELD_DESC = new TField("db_name", TType.STRING, (short)1); - private static final TField TABLE_NAME_FIELD_DESC = new TField("table_name", TType.STRING, (short)2); + public static class get_type_all_args implements TBase, java.io.Serializable, Cloneable { + private static final TStruct STRUCT_DESC = new TStruct("get_type_all_args"); + private static final TField NAME_FIELD_DESC = new TField("name", TType.STRING, (short)1); - private String db_name; - public static final int DB_NAME = 1; - private String table_name; - public static final int TABLE_NAME = 2; + private String name; + public static final int NAME = 1; private final Isset __isset = new Isset(); private static final class Isset implements java.io.Serializable { } public static final Map metaDataMap = Collections.unmodifiableMap(new HashMap() {{ - put(DB_NAME, new FieldMetaData("db_name", TFieldRequirementType.DEFAULT, - new FieldValueMetaData(TType.STRING))); - put(TABLE_NAME, new FieldMetaData("table_name", TFieldRequirementType.DEFAULT, + put(NAME, new FieldMetaData("name", TFieldRequirementType.DEFAULT, new FieldValueMetaData(TType.STRING))); }}); static { - FieldMetaData.addStructMetaDataMap(get_fields_args.class, metaDataMap); + FieldMetaData.addStructMetaDataMap(get_type_all_args.class, metaDataMap); } - public get_fields_args() { + public get_type_all_args() { } - public get_fields_args( - String db_name, - String table_name) + public get_type_all_args( + String name) { this(); - this.db_name = db_name; - this.table_name = table_name; + this.name = name; } /** * Performs a deep copy on other. */ - public get_fields_args(get_fields_args other) { - if (other.isSetDb_name()) { - this.db_name = other.db_name; - } - if (other.isSetTable_name()) { - this.table_name = other.table_name; + public get_type_all_args(get_type_all_args other) { + if (other.isSetName()) { + this.name = other.name; } } @Override - public get_fields_args clone() { - return new get_fields_args(this); - } - - public String getDb_name() { - return this.db_name; - } - - public void setDb_name(String db_name) { - this.db_name = db_name; - } - - public void unsetDb_name() { - this.db_name = null; - } - - // Returns true if field db_name is set (has been asigned a value) and false otherwise - public boolean isSetDb_name() { - return this.db_name != null; + public get_type_all_args clone() { + return new get_type_all_args(this); } - public String getTable_name() { - return this.table_name; + public String getName() { + return this.name; } - public void setTable_name(String table_name) { - this.table_name = table_name; + public void setName(String name) { + this.name = name; } - public void unsetTable_name() { - this.table_name = null; + public void unsetName() { + this.name = null; } - // Returns true if field table_name is set (has been asigned a value) and false otherwise - public boolean isSetTable_name() { - return this.table_name != null; + // Returns true if field name is set (has been asigned a value) and false otherwise + public boolean isSetName() { + return this.name != null; } public void setFieldValue(int fieldID, Object value) { switch (fieldID) { - case DB_NAME: - if (value == null) { - unsetDb_name(); - } else { - setDb_name((String)value); - } - break; - - case TABLE_NAME: + case NAME: if (value == null) { - unsetTable_name(); + unsetName(); } else { - setTable_name((String)value); + setName((String)value); } break; @@ -6669,11 +6985,8 @@ public class ThriftHiveMetastore { public Object getFieldValue(int fieldID) { switch (fieldID) { - case DB_NAME: - return getDb_name(); - - case TABLE_NAME: - return getTable_name(); + case NAME: + return getName(); default: throw new IllegalArgumentException("Field " + fieldID + " doesn't exist!"); @@ -6683,10 +6996,8 @@ public class ThriftHiveMetastore { // Returns true if field corresponding to fieldID is set (has been asigned a value) and false otherwise public boolean isSet(int fieldID) { switch (fieldID) { - case DB_NAME: - return isSetDb_name(); - case TABLE_NAME: - return isSetTable_name(); + case NAME: + return isSetName(); default: throw new IllegalArgumentException("Field " + fieldID + " doesn't exist!"); } @@ -6696,30 +7007,21 @@ public class ThriftHiveMetastore { public boolean equals(Object that) { if (that == null) return false; - if (that instanceof get_fields_args) - return this.equals((get_fields_args)that); + if (that instanceof get_type_all_args) + return this.equals((get_type_all_args)that); return false; } - public boolean equals(get_fields_args that) { + public boolean equals(get_type_all_args that) { if (that == null) return false; - boolean this_present_db_name = true && this.isSetDb_name(); - boolean that_present_db_name = true && that.isSetDb_name(); - if (this_present_db_name || that_present_db_name) { - if (!(this_present_db_name && that_present_db_name)) - return false; - if (!this.db_name.equals(that.db_name)) - return false; - } - - boolean this_present_table_name = true && this.isSetTable_name(); - boolean that_present_table_name = true && that.isSetTable_name(); - if (this_present_table_name || that_present_table_name) { - if (!(this_present_table_name && that_present_table_name)) + boolean this_present_name = true && this.isSetName(); + boolean that_present_name = true && that.isSetName(); + if (this_present_name || that_present_name) { + if (!(this_present_name && that_present_name)) return false; - if (!this.table_name.equals(that.table_name)) + if (!this.name.equals(that.name)) return false; } @@ -6742,16 +7044,9 @@ public class ThriftHiveMetastore { } switch (field.id) { - case DB_NAME: - if (field.type == TType.STRING) { - this.db_name = iprot.readString(); - } else { - TProtocolUtil.skip(iprot, field.type); - } - break; - case TABLE_NAME: + case NAME: if (field.type == TType.STRING) { - this.table_name = iprot.readString(); + this.name = iprot.readString(); } else { TProtocolUtil.skip(iprot, field.type); } @@ -6771,14 +7066,9 @@ public class ThriftHiveMetastore { validate(); oprot.writeStructBegin(STRUCT_DESC); - if (this.db_name != null) { - oprot.writeFieldBegin(DB_NAME_FIELD_DESC); - oprot.writeString(this.db_name); - oprot.writeFieldEnd(); - } - if (this.table_name != null) { - oprot.writeFieldBegin(TABLE_NAME_FIELD_DESC); - oprot.writeString(this.table_name); + if (this.name != null) { + oprot.writeFieldBegin(NAME_FIELD_DESC); + oprot.writeString(this.name); oprot.writeFieldEnd(); } oprot.writeFieldStop(); @@ -6787,22 +7077,14 @@ public class ThriftHiveMetastore { @Override public String toString() { - StringBuilder sb = new StringBuilder("get_fields_args("); + StringBuilder sb = new StringBuilder("get_type_all_args("); boolean first = true; - sb.append("db_name:"); - if (this.db_name == null) { - sb.append("null"); - } else { - sb.append(this.db_name); - } - first = false; - if (!first) sb.append(", "); - sb.append("table_name:"); - if (this.table_name == null) { + sb.append("name:"); + if (this.name == null) { sb.append("null"); } else { - sb.append(this.table_name); + sb.append(this.name); } first = false; sb.append(")"); @@ -6816,21 +7098,15 @@ public class ThriftHiveMetastore { } - public static class get_fields_result implements TBase, java.io.Serializable, Cloneable { - private static final TStruct STRUCT_DESC = new TStruct("get_fields_result"); - private static final TField SUCCESS_FIELD_DESC = new TField("success", TType.LIST, (short)0); - private static final TField O1_FIELD_DESC = new TField("o1", TType.STRUCT, (short)1); - private static final TField O2_FIELD_DESC = new TField("o2", TType.STRUCT, (short)2); - private static final TField O3_FIELD_DESC = new TField("o3", TType.STRUCT, (short)3); + public static class get_type_all_result implements TBase, java.io.Serializable, Cloneable { + private static final TStruct STRUCT_DESC = new TStruct("get_type_all_result"); + private static final TField SUCCESS_FIELD_DESC = new TField("success", TType.MAP, (short)0); + private static final TField O2_FIELD_DESC = new TField("o2", TType.STRUCT, (short)1); - private List success; + private Map success; public static final int SUCCESS = 0; - private MetaException o1; - public static final int O1 = 1; - private UnknownTableException o2; - public static final int O2 = 2; - private UnknownDBException o3; - public static final int O3 = 3; + private MetaException o2; + public static final int O2 = 1; private final Isset __isset = new Isset(); private static final class Isset implements java.io.Serializable { @@ -6838,83 +7114,74 @@ public class ThriftHiveMetastore { public static final Map metaDataMap = Collections.unmodifiableMap(new HashMap() {{ put(SUCCESS, new FieldMetaData("success", TFieldRequirementType.DEFAULT, - new ListMetaData(TType.LIST, - new StructMetaData(TType.STRUCT, FieldSchema.class)))); - put(O1, new FieldMetaData("o1", TFieldRequirementType.DEFAULT, - new FieldValueMetaData(TType.STRUCT))); + new MapMetaData(TType.MAP, + new FieldValueMetaData(TType.STRING), + new StructMetaData(TType.STRUCT, Type.class)))); put(O2, new FieldMetaData("o2", TFieldRequirementType.DEFAULT, new FieldValueMetaData(TType.STRUCT))); - put(O3, new FieldMetaData("o3", TFieldRequirementType.DEFAULT, - new FieldValueMetaData(TType.STRUCT))); }}); static { - FieldMetaData.addStructMetaDataMap(get_fields_result.class, metaDataMap); + FieldMetaData.addStructMetaDataMap(get_type_all_result.class, metaDataMap); } - public get_fields_result() { + public get_type_all_result() { } - public get_fields_result( - List success, - MetaException o1, - UnknownTableException o2, - UnknownDBException o3) + public get_type_all_result( + Map success, + MetaException o2) { this(); this.success = success; - this.o1 = o1; this.o2 = o2; - this.o3 = o3; } /** * Performs a deep copy on other. */ - public get_fields_result(get_fields_result other) { + public get_type_all_result(get_type_all_result other) { if (other.isSetSuccess()) { - List __this__success = new ArrayList(); - for (FieldSchema other_element : other.success) { - __this__success.add(new FieldSchema(other_element)); + Map __this__success = new HashMap(); + for (Map.Entry other_element : other.success.entrySet()) { + + String other_element_key = other_element.getKey(); + Type other_element_value = other_element.getValue(); + + String __this__success_copy_key = other_element_key; + + Type __this__success_copy_value = new Type(other_element_value); + + __this__success.put(__this__success_copy_key, __this__success_copy_value); } this.success = __this__success; } - if (other.isSetO1()) { - this.o1 = new MetaException(other.o1); - } if (other.isSetO2()) { - this.o2 = new UnknownTableException(other.o2); - } - if (other.isSetO3()) { - this.o3 = new UnknownDBException(other.o3); + this.o2 = new MetaException(other.o2); } } @Override - public get_fields_result clone() { - return new get_fields_result(this); + public get_type_all_result clone() { + return new get_type_all_result(this); } public int getSuccessSize() { return (this.success == null) ? 0 : this.success.size(); } - public java.util.Iterator getSuccessIterator() { - return (this.success == null) ? null : this.success.iterator(); - } - - public void addToSuccess(FieldSchema elem) { + public void putToSuccess(String key, Type val) { if (this.success == null) { - this.success = new ArrayList(); + this.success = new HashMap(); } - this.success.add(elem); + this.success.put(key, val); } - public List getSuccess() { + public Map getSuccess() { return this.success; } - public void setSuccess(List success) { + public void setSuccess(Map success) { this.success = success; } @@ -6927,28 +7194,11 @@ public class ThriftHiveMetastore { return this.success != null; } - public MetaException getO1() { - return this.o1; - } - - public void setO1(MetaException o1) { - this.o1 = o1; - } - - public void unsetO1() { - this.o1 = null; - } - - // Returns true if field o1 is set (has been asigned a value) and false otherwise - public boolean isSetO1() { - return this.o1 != null; - } - - public UnknownTableException getO2() { + public MetaException getO2() { return this.o2; } - public void setO2(UnknownTableException o2) { + public void setO2(MetaException o2) { this.o2 = o2; } @@ -6961,38 +7211,13 @@ public class ThriftHiveMetastore { return this.o2 != null; } - public UnknownDBException getO3() { - return this.o3; - } - - public void setO3(UnknownDBException o3) { - this.o3 = o3; - } - - public void unsetO3() { - this.o3 = null; - } - - // Returns true if field o3 is set (has been asigned a value) and false otherwise - public boolean isSetO3() { - return this.o3 != null; - } - public void setFieldValue(int fieldID, Object value) { switch (fieldID) { case SUCCESS: if (value == null) { unsetSuccess(); } else { - setSuccess((List)value); - } - break; - - case O1: - if (value == null) { - unsetO1(); - } else { - setO1((MetaException)value); + setSuccess((Map)value); } break; @@ -7000,15 +7225,7 @@ public class ThriftHiveMetastore { if (value == null) { unsetO2(); } else { - setO2((UnknownTableException)value); - } - break; - - case O3: - if (value == null) { - unsetO3(); - } else { - setO3((UnknownDBException)value); + setO2((MetaException)value); } break; @@ -7022,15 +7239,9 @@ public class ThriftHiveMetastore { case SUCCESS: return getSuccess(); - case O1: - return getO1(); - case O2: return getO2(); - case O3: - return getO3(); - default: throw new IllegalArgumentException("Field " + fieldID + " doesn't exist!"); } @@ -7041,12 +7252,8 @@ public class ThriftHiveMetastore { switch (fieldID) { case SUCCESS: return isSetSuccess(); - case O1: - return isSetO1(); case O2: return isSetO2(); - case O3: - return isSetO3(); default: throw new IllegalArgumentException("Field " + fieldID + " doesn't exist!"); } @@ -7056,12 +7263,12 @@ public class ThriftHiveMetastore { public boolean equals(Object that) { if (that == null) return false; - if (that instanceof get_fields_result) - return this.equals((get_fields_result)that); + if (that instanceof get_type_all_result) + return this.equals((get_type_all_result)that); return false; } - public boolean equals(get_fields_result that) { + public boolean equals(get_type_all_result that) { if (that == null) return false; @@ -7074,15 +7281,6 @@ public class ThriftHiveMetastore { return false; } - boolean this_present_o1 = true && this.isSetO1(); - boolean that_present_o1 = true && that.isSetO1(); - if (this_present_o1 || that_present_o1) { - if (!(this_present_o1 && that_present_o1)) - return false; - if (!this.o1.equals(that.o1)) - return false; - } - boolean this_present_o2 = true && this.isSetO2(); boolean that_present_o2 = true && that.isSetO2(); if (this_present_o2 || that_present_o2) { @@ -7092,15 +7290,6 @@ public class ThriftHiveMetastore { return false; } - boolean this_present_o3 = true && this.isSetO3(); - boolean that_present_o3 = true && that.isSetO3(); - if (this_present_o3 || that_present_o3) { - if (!(this_present_o3 && that_present_o3)) - return false; - if (!this.o3.equals(that.o3)) - return false; - } - return true; } @@ -7121,43 +7310,29 @@ public class ThriftHiveMetastore { switch (field.id) { case SUCCESS: - if (field.type == TType.LIST) { + if (field.type == TType.MAP) { { - TList _list67 = iprot.readListBegin(); - this.success = new ArrayList(_list67.size); - for (int _i68 = 0; _i68 < _list67.size; ++_i68) + TMap _map66 = iprot.readMapBegin(); + this.success = new HashMap(2*_map66.size); + for (int _i67 = 0; _i67 < _map66.size; ++_i67) { - FieldSchema _elem69; - _elem69 = new FieldSchema(); - _elem69.read(iprot); - this.success.add(_elem69); + String _key68; + Type _val69; + _key68 = iprot.readString(); + _val69 = new Type(); + _val69.read(iprot); + this.success.put(_key68, _val69); } - iprot.readListEnd(); + iprot.readMapEnd(); } } else { TProtocolUtil.skip(iprot, field.type); } break; - case O1: + case O2: if (field.type == TType.STRUCT) { - this.o1 = new MetaException(); - this.o1.read(iprot); - } else { - TProtocolUtil.skip(iprot, field.type); - } - break; - case O2: - if (field.type == TType.STRUCT) { - this.o2 = new UnknownTableException(); - this.o2.read(iprot); - } else { - TProtocolUtil.skip(iprot, field.type); - } - break; - case O3: - if (field.type == TType.STRUCT) { - this.o3 = new UnknownDBException(); - this.o3.read(iprot); + this.o2 = new MetaException(); + this.o2.read(iprot); } else { TProtocolUtil.skip(iprot, field.type); } @@ -7179,25 +7354,18 @@ public class ThriftHiveMetastore { if (this.isSetSuccess()) { oprot.writeFieldBegin(SUCCESS_FIELD_DESC); { - oprot.writeListBegin(new TList(TType.STRUCT, this.success.size())); - for (FieldSchema _iter70 : this.success) { - _iter70.write(oprot); + oprot.writeMapBegin(new TMap(TType.STRING, TType.STRUCT, this.success.size())); + for (Map.Entry _iter70 : this.success.entrySet()) { + oprot.writeString(_iter70.getKey()); + _iter70.getValue().write(oprot); } - oprot.writeListEnd(); + oprot.writeMapEnd(); } oprot.writeFieldEnd(); - } else if (this.isSetO1()) { - oprot.writeFieldBegin(O1_FIELD_DESC); - this.o1.write(oprot); - oprot.writeFieldEnd(); } else if (this.isSetO2()) { oprot.writeFieldBegin(O2_FIELD_DESC); this.o2.write(oprot); oprot.writeFieldEnd(); - } else if (this.isSetO3()) { - oprot.writeFieldBegin(O3_FIELD_DESC); - this.o3.write(oprot); - oprot.writeFieldEnd(); } oprot.writeFieldStop(); oprot.writeStructEnd(); @@ -7205,7 +7373,7 @@ public class ThriftHiveMetastore { @Override public String toString() { - StringBuilder sb = new StringBuilder("get_fields_result("); + StringBuilder sb = new StringBuilder("get_type_all_result("); boolean first = true; sb.append("success:"); @@ -7216,14 +7384,6 @@ public class ThriftHiveMetastore { } first = false; if (!first) sb.append(", "); - sb.append("o1:"); - if (this.o1 == null) { - sb.append("null"); - } else { - sb.append(this.o1); - } - first = false; - if (!first) sb.append(", "); sb.append("o2:"); if (this.o2 == null) { sb.append("null"); @@ -7231,14 +7391,6 @@ public class ThriftHiveMetastore { sb.append(this.o2); } first = false; - if (!first) sb.append(", "); - sb.append("o3:"); - if (this.o3 == null) { - sb.append("null"); - } else { - sb.append(this.o3); - } - first = false; sb.append(")"); return sb.toString(); } @@ -7250,8 +7402,8 @@ public class ThriftHiveMetastore { } - public static class get_schema_args implements TBase, java.io.Serializable, Cloneable { - private static final TStruct STRUCT_DESC = new TStruct("get_schema_args"); + public static class get_fields_args implements TBase, java.io.Serializable, Cloneable { + private static final TStruct STRUCT_DESC = new TStruct("get_fields_args"); private static final TField DB_NAME_FIELD_DESC = new TField("db_name", TType.STRING, (short)1); private static final TField TABLE_NAME_FIELD_DESC = new TField("table_name", TType.STRING, (short)2); @@ -7272,13 +7424,13 @@ public class ThriftHiveMetastore { }}); static { - FieldMetaData.addStructMetaDataMap(get_schema_args.class, metaDataMap); + FieldMetaData.addStructMetaDataMap(get_fields_args.class, metaDataMap); } - public get_schema_args() { + public get_fields_args() { } - public get_schema_args( + public get_fields_args( String db_name, String table_name) { @@ -7290,7 +7442,7 @@ public class ThriftHiveMetastore { /** * Performs a deep copy on other. */ - public get_schema_args(get_schema_args other) { + public get_fields_args(get_fields_args other) { if (other.isSetDb_name()) { this.db_name = other.db_name; } @@ -7300,8 +7452,8 @@ public class ThriftHiveMetastore { } @Override - public get_schema_args clone() { - return new get_schema_args(this); + public get_fields_args clone() { + return new get_fields_args(this); } public String getDb_name() { @@ -7390,12 +7542,12 @@ public class ThriftHiveMetastore { public boolean equals(Object that) { if (that == null) return false; - if (that instanceof get_schema_args) - return this.equals((get_schema_args)that); + if (that instanceof get_fields_args) + return this.equals((get_fields_args)that); return false; } - public boolean equals(get_schema_args that) { + public boolean equals(get_fields_args that) { if (that == null) return false; @@ -7481,7 +7633,7 @@ public class ThriftHiveMetastore { @Override public String toString() { - StringBuilder sb = new StringBuilder("get_schema_args("); + StringBuilder sb = new StringBuilder("get_fields_args("); boolean first = true; sb.append("db_name:"); @@ -7510,8 +7662,8 @@ public class ThriftHiveMetastore { } - public static class get_schema_result implements TBase, java.io.Serializable, Cloneable { - private static final TStruct STRUCT_DESC = new TStruct("get_schema_result"); + public static class get_fields_result implements TBase, java.io.Serializable, Cloneable { + private static final TStruct STRUCT_DESC = new TStruct("get_fields_result"); private static final TField SUCCESS_FIELD_DESC = new TField("success", TType.LIST, (short)0); private static final TField O1_FIELD_DESC = new TField("o1", TType.STRUCT, (short)1); private static final TField O2_FIELD_DESC = new TField("o2", TType.STRUCT, (short)2); @@ -7543,13 +7695,13 @@ public class ThriftHiveMetastore { }}); static { - FieldMetaData.addStructMetaDataMap(get_schema_result.class, metaDataMap); + FieldMetaData.addStructMetaDataMap(get_fields_result.class, metaDataMap); } - public get_schema_result() { + public get_fields_result() { } - public get_schema_result( + public get_fields_result( List success, MetaException o1, UnknownTableException o2, @@ -7565,7 +7717,7 @@ public class ThriftHiveMetastore { /** * Performs a deep copy on other. */ - public get_schema_result(get_schema_result other) { + public get_fields_result(get_fields_result other) { if (other.isSetSuccess()) { List __this__success = new ArrayList(); for (FieldSchema other_element : other.success) { @@ -7585,8 +7737,8 @@ public class ThriftHiveMetastore { } @Override - public get_schema_result clone() { - return new get_schema_result(this); + public get_fields_result clone() { + return new get_fields_result(this); } public int getSuccessSize() { @@ -7750,12 +7902,12 @@ public class ThriftHiveMetastore { public boolean equals(Object that) { if (that == null) return false; - if (that instanceof get_schema_result) - return this.equals((get_schema_result)that); + if (that instanceof get_fields_result) + return this.equals((get_fields_result)that); return false; } - public boolean equals(get_schema_result that) { + public boolean equals(get_fields_result that) { if (that == null) return false; @@ -7899,7 +8051,7 @@ public class ThriftHiveMetastore { @Override public String toString() { - StringBuilder sb = new StringBuilder("get_schema_result("); + StringBuilder sb = new StringBuilder("get_fields_result("); boolean first = true; sb.append("success:"); @@ -7944,74 +8096,109 @@ public class ThriftHiveMetastore { } - public static class create_table_args implements TBase, java.io.Serializable, Cloneable { - private static final TStruct STRUCT_DESC = new TStruct("create_table_args"); - private static final TField TBL_FIELD_DESC = new TField("tbl", TType.STRUCT, (short)1); + public static class get_schema_args implements TBase, java.io.Serializable, Cloneable { + private static final TStruct STRUCT_DESC = new TStruct("get_schema_args"); + private static final TField DB_NAME_FIELD_DESC = new TField("db_name", TType.STRING, (short)1); + private static final TField TABLE_NAME_FIELD_DESC = new TField("table_name", TType.STRING, (short)2); - private Table tbl; - public static final int TBL = 1; + private String db_name; + public static final int DB_NAME = 1; + private String table_name; + public static final int TABLE_NAME = 2; private final Isset __isset = new Isset(); private static final class Isset implements java.io.Serializable { } public static final Map metaDataMap = Collections.unmodifiableMap(new HashMap() {{ - put(TBL, new FieldMetaData("tbl", TFieldRequirementType.DEFAULT, - new StructMetaData(TType.STRUCT, Table.class))); + put(DB_NAME, new FieldMetaData("db_name", TFieldRequirementType.DEFAULT, + new FieldValueMetaData(TType.STRING))); + put(TABLE_NAME, new FieldMetaData("table_name", TFieldRequirementType.DEFAULT, + new FieldValueMetaData(TType.STRING))); }}); static { - FieldMetaData.addStructMetaDataMap(create_table_args.class, metaDataMap); + FieldMetaData.addStructMetaDataMap(get_schema_args.class, metaDataMap); } - public create_table_args() { + public get_schema_args() { } - public create_table_args( - Table tbl) + public get_schema_args( + String db_name, + String table_name) { this(); - this.tbl = tbl; + this.db_name = db_name; + this.table_name = table_name; } /** * Performs a deep copy on other. */ - public create_table_args(create_table_args other) { - if (other.isSetTbl()) { - this.tbl = new Table(other.tbl); + public get_schema_args(get_schema_args other) { + if (other.isSetDb_name()) { + this.db_name = other.db_name; + } + if (other.isSetTable_name()) { + this.table_name = other.table_name; } } @Override - public create_table_args clone() { - return new create_table_args(this); + public get_schema_args clone() { + return new get_schema_args(this); } - public Table getTbl() { - return this.tbl; + public String getDb_name() { + return this.db_name; } - public void setTbl(Table tbl) { - this.tbl = tbl; + public void setDb_name(String db_name) { + this.db_name = db_name; } - public void unsetTbl() { - this.tbl = null; + public void unsetDb_name() { + this.db_name = null; } - // Returns true if field tbl is set (has been asigned a value) and false otherwise - public boolean isSetTbl() { - return this.tbl != null; + // Returns true if field db_name is set (has been asigned a value) and false otherwise + public boolean isSetDb_name() { + return this.db_name != null; + } + + public String getTable_name() { + return this.table_name; + } + + public void setTable_name(String table_name) { + this.table_name = table_name; + } + + public void unsetTable_name() { + this.table_name = null; + } + + // Returns true if field table_name is set (has been asigned a value) and false otherwise + public boolean isSetTable_name() { + return this.table_name != null; } public void setFieldValue(int fieldID, Object value) { switch (fieldID) { - case TBL: + case DB_NAME: if (value == null) { - unsetTbl(); + unsetDb_name(); } else { - setTbl((Table)value); + setDb_name((String)value); + } + break; + + case TABLE_NAME: + if (value == null) { + unsetTable_name(); + } else { + setTable_name((String)value); } break; @@ -8022,8 +8209,11 @@ public class ThriftHiveMetastore { public Object getFieldValue(int fieldID) { switch (fieldID) { - case TBL: - return getTbl(); + case DB_NAME: + return getDb_name(); + + case TABLE_NAME: + return getTable_name(); default: throw new IllegalArgumentException("Field " + fieldID + " doesn't exist!"); @@ -8033,8 +8223,10 @@ public class ThriftHiveMetastore { // Returns true if field corresponding to fieldID is set (has been asigned a value) and false otherwise public boolean isSet(int fieldID) { switch (fieldID) { - case TBL: - return isSetTbl(); + case DB_NAME: + return isSetDb_name(); + case TABLE_NAME: + return isSetTable_name(); default: throw new IllegalArgumentException("Field " + fieldID + " doesn't exist!"); } @@ -8044,21 +8236,30 @@ public class ThriftHiveMetastore { public boolean equals(Object that) { if (that == null) return false; - if (that instanceof create_table_args) - return this.equals((create_table_args)that); + if (that instanceof get_schema_args) + return this.equals((get_schema_args)that); return false; } - public boolean equals(create_table_args that) { + public boolean equals(get_schema_args that) { if (that == null) return false; - boolean this_present_tbl = true && this.isSetTbl(); - boolean that_present_tbl = true && that.isSetTbl(); - if (this_present_tbl || that_present_tbl) { - if (!(this_present_tbl && that_present_tbl)) + boolean this_present_db_name = true && this.isSetDb_name(); + boolean that_present_db_name = true && that.isSetDb_name(); + if (this_present_db_name || that_present_db_name) { + if (!(this_present_db_name && that_present_db_name)) return false; - if (!this.tbl.equals(that.tbl)) + if (!this.db_name.equals(that.db_name)) + return false; + } + + boolean this_present_table_name = true && this.isSetTable_name(); + boolean that_present_table_name = true && that.isSetTable_name(); + if (this_present_table_name || that_present_table_name) { + if (!(this_present_table_name && that_present_table_name)) + return false; + if (!this.table_name.equals(that.table_name)) return false; } @@ -8081,10 +8282,16 @@ public class ThriftHiveMetastore { } switch (field.id) { - case TBL: - if (field.type == TType.STRUCT) { - this.tbl = new Table(); - this.tbl.read(iprot); + case DB_NAME: + if (field.type == TType.STRING) { + this.db_name = iprot.readString(); + } else { + TProtocolUtil.skip(iprot, field.type); + } + break; + case TABLE_NAME: + if (field.type == TType.STRING) { + this.table_name = iprot.readString(); } else { TProtocolUtil.skip(iprot, field.type); } @@ -8104,9 +8311,14 @@ public class ThriftHiveMetastore { validate(); oprot.writeStructBegin(STRUCT_DESC); - if (this.tbl != null) { - oprot.writeFieldBegin(TBL_FIELD_DESC); - this.tbl.write(oprot); + if (this.db_name != null) { + oprot.writeFieldBegin(DB_NAME_FIELD_DESC); + oprot.writeString(this.db_name); + oprot.writeFieldEnd(); + } + if (this.table_name != null) { + oprot.writeFieldBegin(TABLE_NAME_FIELD_DESC); + oprot.writeString(this.table_name); oprot.writeFieldEnd(); } oprot.writeFieldStop(); @@ -8115,14 +8327,22 @@ public class ThriftHiveMetastore { @Override public String toString() { - StringBuilder sb = new StringBuilder("create_table_args("); + StringBuilder sb = new StringBuilder("get_schema_args("); boolean first = true; - sb.append("tbl:"); - if (this.tbl == null) { + sb.append("db_name:"); + if (this.db_name == null) { sb.append("null"); } else { - sb.append(this.tbl); + sb.append(this.db_name); + } + first = false; + if (!first) sb.append(", "); + sb.append("table_name:"); + if (this.table_name == null) { + sb.append("null"); + } else { + sb.append(this.table_name); } first = false; sb.append(")"); @@ -8136,85 +8356,122 @@ public class ThriftHiveMetastore { } - public static class create_table_result implements TBase, java.io.Serializable, Cloneable { - private static final TStruct STRUCT_DESC = new TStruct("create_table_result"); + public static class get_schema_result implements TBase, java.io.Serializable, Cloneable { + private static final TStruct STRUCT_DESC = new TStruct("get_schema_result"); + private static final TField SUCCESS_FIELD_DESC = new TField("success", TType.LIST, (short)0); private static final TField O1_FIELD_DESC = new TField("o1", TType.STRUCT, (short)1); private static final TField O2_FIELD_DESC = new TField("o2", TType.STRUCT, (short)2); private static final TField O3_FIELD_DESC = new TField("o3", TType.STRUCT, (short)3); - private static final TField O4_FIELD_DESC = new TField("o4", TType.STRUCT, (short)4); - private AlreadyExistsException o1; + private List success; + public static final int SUCCESS = 0; + private MetaException o1; public static final int O1 = 1; - private InvalidObjectException o2; + private UnknownTableException o2; public static final int O2 = 2; - private MetaException o3; + private UnknownDBException o3; public static final int O3 = 3; - private NoSuchObjectException o4; - public static final int O4 = 4; private final Isset __isset = new Isset(); private static final class Isset implements java.io.Serializable { } public static final Map metaDataMap = Collections.unmodifiableMap(new HashMap() {{ + put(SUCCESS, new FieldMetaData("success", TFieldRequirementType.DEFAULT, + new ListMetaData(TType.LIST, + new StructMetaData(TType.STRUCT, FieldSchema.class)))); put(O1, new FieldMetaData("o1", TFieldRequirementType.DEFAULT, new FieldValueMetaData(TType.STRUCT))); put(O2, new FieldMetaData("o2", TFieldRequirementType.DEFAULT, new FieldValueMetaData(TType.STRUCT))); put(O3, new FieldMetaData("o3", TFieldRequirementType.DEFAULT, new FieldValueMetaData(TType.STRUCT))); - put(O4, new FieldMetaData("o4", TFieldRequirementType.DEFAULT, - new FieldValueMetaData(TType.STRUCT))); }}); static { - FieldMetaData.addStructMetaDataMap(create_table_result.class, metaDataMap); + FieldMetaData.addStructMetaDataMap(get_schema_result.class, metaDataMap); } - public create_table_result() { + public get_schema_result() { } - public create_table_result( - AlreadyExistsException o1, - InvalidObjectException o2, - MetaException o3, - NoSuchObjectException o4) + public get_schema_result( + List success, + MetaException o1, + UnknownTableException o2, + UnknownDBException o3) { this(); + this.success = success; this.o1 = o1; this.o2 = o2; this.o3 = o3; - this.o4 = o4; } /** * Performs a deep copy on other. */ - public create_table_result(create_table_result other) { + public get_schema_result(get_schema_result other) { + if (other.isSetSuccess()) { + List __this__success = new ArrayList(); + for (FieldSchema other_element : other.success) { + __this__success.add(new FieldSchema(other_element)); + } + this.success = __this__success; + } if (other.isSetO1()) { - this.o1 = new AlreadyExistsException(other.o1); + this.o1 = new MetaException(other.o1); } if (other.isSetO2()) { - this.o2 = new InvalidObjectException(other.o2); + this.o2 = new UnknownTableException(other.o2); } if (other.isSetO3()) { - this.o3 = new MetaException(other.o3); - } - if (other.isSetO4()) { - this.o4 = new NoSuchObjectException(other.o4); + this.o3 = new UnknownDBException(other.o3); } } @Override - public create_table_result clone() { - return new create_table_result(this); + public get_schema_result clone() { + return new get_schema_result(this); } - public AlreadyExistsException getO1() { + public int getSuccessSize() { + return (this.success == null) ? 0 : this.success.size(); + } + + public java.util.Iterator getSuccessIterator() { + return (this.success == null) ? null : this.success.iterator(); + } + + public void addToSuccess(FieldSchema elem) { + if (this.success == null) { + this.success = new ArrayList(); + } + this.success.add(elem); + } + + public List getSuccess() { + return this.success; + } + + public void setSuccess(List success) { + this.success = success; + } + + public void unsetSuccess() { + this.success = null; + } + + // Returns true if field success is set (has been asigned a value) and false otherwise + public boolean isSetSuccess() { + return this.success != null; + } + + public MetaException getO1() { return this.o1; } - public void setO1(AlreadyExistsException o1) { + public void setO1(MetaException o1) { this.o1 = o1; } @@ -8227,11 +8484,11 @@ public class ThriftHiveMetastore { return this.o1 != null; } - public InvalidObjectException getO2() { + public UnknownTableException getO2() { return this.o2; } - public void setO2(InvalidObjectException o2) { + public void setO2(UnknownTableException o2) { this.o2 = o2; } @@ -8244,11 +8501,11 @@ public class ThriftHiveMetastore { return this.o2 != null; } - public MetaException getO3() { + public UnknownDBException getO3() { return this.o3; } - public void setO3(MetaException o3) { + public void setO3(UnknownDBException o3) { this.o3 = o3; } @@ -8261,30 +8518,21 @@ public class ThriftHiveMetastore { return this.o3 != null; } - public NoSuchObjectException getO4() { - return this.o4; - } - - public void setO4(NoSuchObjectException o4) { - this.o4 = o4; - } - - public void unsetO4() { - this.o4 = null; - } - - // Returns true if field o4 is set (has been asigned a value) and false otherwise - public boolean isSetO4() { - return this.o4 != null; - } - public void setFieldValue(int fieldID, Object value) { switch (fieldID) { + case SUCCESS: + if (value == null) { + unsetSuccess(); + } else { + setSuccess((List)value); + } + break; + case O1: if (value == null) { unsetO1(); } else { - setO1((AlreadyExistsException)value); + setO1((MetaException)value); } break; @@ -8292,7 +8540,7 @@ public class ThriftHiveMetastore { if (value == null) { unsetO2(); } else { - setO2((InvalidObjectException)value); + setO2((UnknownTableException)value); } break; @@ -8300,15 +8548,7 @@ public class ThriftHiveMetastore { if (value == null) { unsetO3(); } else { - setO3((MetaException)value); - } - break; - - case O4: - if (value == null) { - unsetO4(); - } else { - setO4((NoSuchObjectException)value); + setO3((UnknownDBException)value); } break; @@ -8319,6 +8559,9 @@ public class ThriftHiveMetastore { public Object getFieldValue(int fieldID) { switch (fieldID) { + case SUCCESS: + return getSuccess(); + case O1: return getO1(); @@ -8328,9 +8571,6 @@ public class ThriftHiveMetastore { case O3: return getO3(); - case O4: - return getO4(); - default: throw new IllegalArgumentException("Field " + fieldID + " doesn't exist!"); } @@ -8339,14 +8579,14 @@ public class ThriftHiveMetastore { // Returns true if field corresponding to fieldID is set (has been asigned a value) and false otherwise public boolean isSet(int fieldID) { switch (fieldID) { + case SUCCESS: + return isSetSuccess(); case O1: return isSetO1(); case O2: return isSetO2(); case O3: return isSetO3(); - case O4: - return isSetO4(); default: throw new IllegalArgumentException("Field " + fieldID + " doesn't exist!"); } @@ -8356,15 +8596,24 @@ public class ThriftHiveMetastore { public boolean equals(Object that) { if (that == null) return false; - if (that instanceof create_table_result) - return this.equals((create_table_result)that); + if (that instanceof get_schema_result) + return this.equals((get_schema_result)that); return false; } - public boolean equals(create_table_result that) { + public boolean equals(get_schema_result that) { if (that == null) return false; + boolean this_present_success = true && this.isSetSuccess(); + boolean that_present_success = true && that.isSetSuccess(); + if (this_present_success || that_present_success) { + if (!(this_present_success && that_present_success)) + return false; + if (!this.success.equals(that.success)) + return false; + } + boolean this_present_o1 = true && this.isSetO1(); boolean that_present_o1 = true && that.isSetO1(); if (this_present_o1 || that_present_o1) { @@ -8392,15 +8641,6 @@ public class ThriftHiveMetastore { return false; } - boolean this_present_o4 = true && this.isSetO4(); - boolean that_present_o4 = true && that.isSetO4(); - if (this_present_o4 || that_present_o4) { - if (!(this_present_o4 && that_present_o4)) - return false; - if (!this.o4.equals(that.o4)) - return false; - } - return true; } @@ -8420,9 +8660,27 @@ public class ThriftHiveMetastore { } switch (field.id) { + case SUCCESS: + if (field.type == TType.LIST) { + { + TList _list75 = iprot.readListBegin(); + this.success = new ArrayList(_list75.size); + for (int _i76 = 0; _i76 < _list75.size; ++_i76) + { + FieldSchema _elem77; + _elem77 = new FieldSchema(); + _elem77.read(iprot); + this.success.add(_elem77); + } + iprot.readListEnd(); + } + } else { + TProtocolUtil.skip(iprot, field.type); + } + break; case O1: if (field.type == TType.STRUCT) { - this.o1 = new AlreadyExistsException(); + this.o1 = new MetaException(); this.o1.read(iprot); } else { TProtocolUtil.skip(iprot, field.type); @@ -8430,7 +8688,7 @@ public class ThriftHiveMetastore { break; case O2: if (field.type == TType.STRUCT) { - this.o2 = new InvalidObjectException(); + this.o2 = new UnknownTableException(); this.o2.read(iprot); } else { TProtocolUtil.skip(iprot, field.type); @@ -8438,20 +8696,12 @@ public class ThriftHiveMetastore { break; case O3: if (field.type == TType.STRUCT) { - this.o3 = new MetaException(); + this.o3 = new UnknownDBException(); this.o3.read(iprot); } else { TProtocolUtil.skip(iprot, field.type); } break; - case O4: - if (field.type == TType.STRUCT) { - this.o4 = new NoSuchObjectException(); - this.o4.read(iprot); - } else { - TProtocolUtil.skip(iprot, field.type); - } - break; default: TProtocolUtil.skip(iprot, field.type); break; @@ -8466,7 +8716,17 @@ public class ThriftHiveMetastore { public void write(TProtocol oprot) throws TException { oprot.writeStructBegin(STRUCT_DESC); - if (this.isSetO1()) { + if (this.isSetSuccess()) { + oprot.writeFieldBegin(SUCCESS_FIELD_DESC); + { + oprot.writeListBegin(new TList(TType.STRUCT, this.success.size())); + for (FieldSchema _iter78 : this.success) { + _iter78.write(oprot); + } + oprot.writeListEnd(); + } + oprot.writeFieldEnd(); + } else if (this.isSetO1()) { oprot.writeFieldBegin(O1_FIELD_DESC); this.o1.write(oprot); oprot.writeFieldEnd(); @@ -8478,10 +8738,6 @@ public class ThriftHiveMetastore { oprot.writeFieldBegin(O3_FIELD_DESC); this.o3.write(oprot); oprot.writeFieldEnd(); - } else if (this.isSetO4()) { - oprot.writeFieldBegin(O4_FIELD_DESC); - this.o4.write(oprot); - oprot.writeFieldEnd(); } oprot.writeFieldStop(); oprot.writeStructEnd(); @@ -8489,9 +8745,17 @@ public class ThriftHiveMetastore { @Override public String toString() { - StringBuilder sb = new StringBuilder("create_table_result("); + StringBuilder sb = new StringBuilder("get_schema_result("); boolean first = true; + sb.append("success:"); + if (this.success == null) { + sb.append("null"); + } else { + sb.append(this.success); + } + first = false; + if (!first) sb.append(", "); sb.append("o1:"); if (this.o1 == null) { sb.append("null"); @@ -8515,14 +8779,6 @@ public class ThriftHiveMetastore { sb.append(this.o3); } first = false; - if (!first) sb.append(", "); - sb.append("o4:"); - if (this.o4 == null) { - sb.append("null"); - } else { - sb.append(this.o4); - } - first = false; sb.append(")"); return sb.toString(); } @@ -8534,146 +8790,1285 @@ public class ThriftHiveMetastore { } - public static class drop_table_args implements TBase, java.io.Serializable, Cloneable { - private static final TStruct STRUCT_DESC = new TStruct("drop_table_args"); - private static final TField DBNAME_FIELD_DESC = new TField("dbname", TType.STRING, (short)1); - private static final TField NAME_FIELD_DESC = new TField("name", TType.STRING, (short)2); - private static final TField DELETE_DATA_FIELD_DESC = new TField("deleteData", TType.BOOL, (short)3); + public static class create_table_args implements TBase, java.io.Serializable, Cloneable { + private static final TStruct STRUCT_DESC = new TStruct("create_table_args"); + private static final TField TBL_FIELD_DESC = new TField("tbl", TType.STRUCT, (short)1); - private String dbname; - public static final int DBNAME = 1; - private String name; - public static final int NAME = 2; - private boolean deleteData; - public static final int DELETEDATA = 3; + private Table tbl; + public static final int TBL = 1; private final Isset __isset = new Isset(); private static final class Isset implements java.io.Serializable { - public boolean deleteData = false; } public static final Map metaDataMap = Collections.unmodifiableMap(new HashMap() {{ - put(DBNAME, new FieldMetaData("dbname", TFieldRequirementType.DEFAULT, - new FieldValueMetaData(TType.STRING))); - put(NAME, new FieldMetaData("name", TFieldRequirementType.DEFAULT, - new FieldValueMetaData(TType.STRING))); - put(DELETEDATA, new FieldMetaData("deleteData", TFieldRequirementType.DEFAULT, - new FieldValueMetaData(TType.BOOL))); + put(TBL, new FieldMetaData("tbl", TFieldRequirementType.DEFAULT, + new StructMetaData(TType.STRUCT, Table.class))); }}); static { - FieldMetaData.addStructMetaDataMap(drop_table_args.class, metaDataMap); + FieldMetaData.addStructMetaDataMap(create_table_args.class, metaDataMap); } - public drop_table_args() { + public create_table_args() { } - public drop_table_args( - String dbname, - String name, - boolean deleteData) + public create_table_args( + Table tbl) { this(); - this.dbname = dbname; - this.name = name; - this.deleteData = deleteData; - this.__isset.deleteData = true; + this.tbl = tbl; } /** * Performs a deep copy on other. */ - public drop_table_args(drop_table_args other) { - if (other.isSetDbname()) { - this.dbname = other.dbname; + public create_table_args(create_table_args other) { + if (other.isSetTbl()) { + this.tbl = new Table(other.tbl); } - if (other.isSetName()) { - this.name = other.name; + } + + @Override + public create_table_args clone() { + return new create_table_args(this); + } + + public Table getTbl() { + return this.tbl; + } + + public void setTbl(Table tbl) { + this.tbl = tbl; + } + + public void unsetTbl() { + this.tbl = null; + } + + // Returns true if field tbl is set (has been asigned a value) and false otherwise + public boolean isSetTbl() { + return this.tbl != null; + } + + public void setFieldValue(int fieldID, Object value) { + switch (fieldID) { + case TBL: + if (value == null) { + unsetTbl(); + } else { + setTbl((Table)value); + } + break; + + default: + throw new IllegalArgumentException("Field " + fieldID + " doesn't exist!"); + } + } + + public Object getFieldValue(int fieldID) { + switch (fieldID) { + case TBL: + return getTbl(); + + default: + throw new IllegalArgumentException("Field " + fieldID + " doesn't exist!"); + } + } + + // Returns true if field corresponding to fieldID is set (has been asigned a value) and false otherwise + public boolean isSet(int fieldID) { + switch (fieldID) { + case TBL: + return isSetTbl(); + default: + throw new IllegalArgumentException("Field " + fieldID + " doesn't exist!"); } - __isset.deleteData = other.__isset.deleteData; - this.deleteData = other.deleteData; } @Override - public drop_table_args clone() { - return new drop_table_args(this); + public boolean equals(Object that) { + if (that == null) + return false; + if (that instanceof create_table_args) + return this.equals((create_table_args)that); + return false; } - public String getDbname() { - return this.dbname; + public boolean equals(create_table_args that) { + if (that == null) + return false; + + boolean this_present_tbl = true && this.isSetTbl(); + boolean that_present_tbl = true && that.isSetTbl(); + if (this_present_tbl || that_present_tbl) { + if (!(this_present_tbl && that_present_tbl)) + return false; + if (!this.tbl.equals(that.tbl)) + return false; + } + + return true; } - public void setDbname(String dbname) { - this.dbname = dbname; + @Override + public int hashCode() { + return 0; } - public void unsetDbname() { - this.dbname = null; + public void read(TProtocol iprot) throws TException { + TField field; + iprot.readStructBegin(); + while (true) + { + field = iprot.readFieldBegin(); + if (field.type == TType.STOP) { + break; + } + switch (field.id) + { + case TBL: + if (field.type == TType.STRUCT) { + this.tbl = new Table(); + this.tbl.read(iprot); + } else { + TProtocolUtil.skip(iprot, field.type); + } + break; + default: + TProtocolUtil.skip(iprot, field.type); + break; + } + iprot.readFieldEnd(); + } + iprot.readStructEnd(); + + validate(); } - // Returns true if field dbname is set (has been asigned a value) and false otherwise - public boolean isSetDbname() { - return this.dbname != null; + public void write(TProtocol oprot) throws TException { + validate(); + + oprot.writeStructBegin(STRUCT_DESC); + if (this.tbl != null) { + oprot.writeFieldBegin(TBL_FIELD_DESC); + this.tbl.write(oprot); + oprot.writeFieldEnd(); + } + oprot.writeFieldStop(); + oprot.writeStructEnd(); } - public String getName() { - return this.name; + @Override + public String toString() { + StringBuilder sb = new StringBuilder("create_table_args("); + boolean first = true; + + sb.append("tbl:"); + if (this.tbl == null) { + sb.append("null"); + } else { + sb.append(this.tbl); + } + first = false; + sb.append(")"); + return sb.toString(); } - public void setName(String name) { - this.name = name; + public void validate() throws TException { + // check for required fields + // check that fields of type enum have valid values } - public void unsetName() { - this.name = null; + } + + public static class create_table_result implements TBase, java.io.Serializable, Cloneable { + private static final TStruct STRUCT_DESC = new TStruct("create_table_result"); + private static final TField O1_FIELD_DESC = new TField("o1", TType.STRUCT, (short)1); + private static final TField O2_FIELD_DESC = new TField("o2", TType.STRUCT, (short)2); + private static final TField O3_FIELD_DESC = new TField("o3", TType.STRUCT, (short)3); + private static final TField O4_FIELD_DESC = new TField("o4", TType.STRUCT, (short)4); + + private AlreadyExistsException o1; + public static final int O1 = 1; + private InvalidObjectException o2; + public static final int O2 = 2; + private MetaException o3; + public static final int O3 = 3; + private NoSuchObjectException o4; + public static final int O4 = 4; + + private final Isset __isset = new Isset(); + private static final class Isset implements java.io.Serializable { } - // Returns true if field name is set (has been asigned a value) and false otherwise - public boolean isSetName() { - return this.name != null; + public static final Map metaDataMap = Collections.unmodifiableMap(new HashMap() {{ + put(O1, new FieldMetaData("o1", TFieldRequirementType.DEFAULT, + new FieldValueMetaData(TType.STRUCT))); + put(O2, new FieldMetaData("o2", TFieldRequirementType.DEFAULT, + new FieldValueMetaData(TType.STRUCT))); + put(O3, new FieldMetaData("o3", TFieldRequirementType.DEFAULT, + new FieldValueMetaData(TType.STRUCT))); + put(O4, new FieldMetaData("o4", TFieldRequirementType.DEFAULT, + new FieldValueMetaData(TType.STRUCT))); + }}); + + static { + FieldMetaData.addStructMetaDataMap(create_table_result.class, metaDataMap); } - public boolean isDeleteData() { - return this.deleteData; + public create_table_result() { } - public void setDeleteData(boolean deleteData) { - this.deleteData = deleteData; - this.__isset.deleteData = true; + public create_table_result( + AlreadyExistsException o1, + InvalidObjectException o2, + MetaException o3, + NoSuchObjectException o4) + { + this(); + this.o1 = o1; + this.o2 = o2; + this.o3 = o3; + this.o4 = o4; } - public void unsetDeleteData() { - this.__isset.deleteData = false; + /** + * Performs a deep copy on other. + */ + public create_table_result(create_table_result other) { + if (other.isSetO1()) { + this.o1 = new AlreadyExistsException(other.o1); + } + if (other.isSetO2()) { + this.o2 = new InvalidObjectException(other.o2); + } + if (other.isSetO3()) { + this.o3 = new MetaException(other.o3); + } + if (other.isSetO4()) { + this.o4 = new NoSuchObjectException(other.o4); + } + } + + @Override + public create_table_result clone() { + return new create_table_result(this); + } + + public AlreadyExistsException getO1() { + return this.o1; + } + + public void setO1(AlreadyExistsException o1) { + this.o1 = o1; + } + + public void unsetO1() { + this.o1 = null; + } + + // Returns true if field o1 is set (has been asigned a value) and false otherwise + public boolean isSetO1() { + return this.o1 != null; + } + + public InvalidObjectException getO2() { + return this.o2; + } + + public void setO2(InvalidObjectException o2) { + this.o2 = o2; + } + + public void unsetO2() { + this.o2 = null; + } + + // Returns true if field o2 is set (has been asigned a value) and false otherwise + public boolean isSetO2() { + return this.o2 != null; + } + + public MetaException getO3() { + return this.o3; + } + + public void setO3(MetaException o3) { + this.o3 = o3; + } + + public void unsetO3() { + this.o3 = null; + } + + // Returns true if field o3 is set (has been asigned a value) and false otherwise + public boolean isSetO3() { + return this.o3 != null; + } + + public NoSuchObjectException getO4() { + return this.o4; + } + + public void setO4(NoSuchObjectException o4) { + this.o4 = o4; + } + + public void unsetO4() { + this.o4 = null; + } + + // Returns true if field o4 is set (has been asigned a value) and false otherwise + public boolean isSetO4() { + return this.o4 != null; + } + + public void setFieldValue(int fieldID, Object value) { + switch (fieldID) { + case O1: + if (value == null) { + unsetO1(); + } else { + setO1((AlreadyExistsException)value); + } + break; + + case O2: + if (value == null) { + unsetO2(); + } else { + setO2((InvalidObjectException)value); + } + break; + + case O3: + if (value == null) { + unsetO3(); + } else { + setO3((MetaException)value); + } + break; + + case O4: + if (value == null) { + unsetO4(); + } else { + setO4((NoSuchObjectException)value); + } + break; + + default: + throw new IllegalArgumentException("Field " + fieldID + " doesn't exist!"); + } + } + + public Object getFieldValue(int fieldID) { + switch (fieldID) { + case O1: + return getO1(); + + case O2: + return getO2(); + + case O3: + return getO3(); + + case O4: + return getO4(); + + default: + throw new IllegalArgumentException("Field " + fieldID + " doesn't exist!"); + } + } + + // Returns true if field corresponding to fieldID is set (has been asigned a value) and false otherwise + public boolean isSet(int fieldID) { + switch (fieldID) { + case O1: + return isSetO1(); + case O2: + return isSetO2(); + case O3: + return isSetO3(); + case O4: + return isSetO4(); + default: + throw new IllegalArgumentException("Field " + fieldID + " doesn't exist!"); + } + } + + @Override + public boolean equals(Object that) { + if (that == null) + return false; + if (that instanceof create_table_result) + return this.equals((create_table_result)that); + return false; + } + + public boolean equals(create_table_result that) { + if (that == null) + return false; + + boolean this_present_o1 = true && this.isSetO1(); + boolean that_present_o1 = true && that.isSetO1(); + if (this_present_o1 || that_present_o1) { + if (!(this_present_o1 && that_present_o1)) + return false; + if (!this.o1.equals(that.o1)) + return false; + } + + boolean this_present_o2 = true && this.isSetO2(); + boolean that_present_o2 = true && that.isSetO2(); + if (this_present_o2 || that_present_o2) { + if (!(this_present_o2 && that_present_o2)) + return false; + if (!this.o2.equals(that.o2)) + return false; + } + + boolean this_present_o3 = true && this.isSetO3(); + boolean that_present_o3 = true && that.isSetO3(); + if (this_present_o3 || that_present_o3) { + if (!(this_present_o3 && that_present_o3)) + return false; + if (!this.o3.equals(that.o3)) + return false; + } + + boolean this_present_o4 = true && this.isSetO4(); + boolean that_present_o4 = true && that.isSetO4(); + if (this_present_o4 || that_present_o4) { + if (!(this_present_o4 && that_present_o4)) + return false; + if (!this.o4.equals(that.o4)) + return false; + } + + return true; + } + + @Override + public int hashCode() { + return 0; + } + + public void read(TProtocol iprot) throws TException { + TField field; + iprot.readStructBegin(); + while (true) + { + field = iprot.readFieldBegin(); + if (field.type == TType.STOP) { + break; + } + switch (field.id) + { + case O1: + if (field.type == TType.STRUCT) { + this.o1 = new AlreadyExistsException(); + this.o1.read(iprot); + } else { + TProtocolUtil.skip(iprot, field.type); + } + break; + case O2: + if (field.type == TType.STRUCT) { + this.o2 = new InvalidObjectException(); + this.o2.read(iprot); + } else { + TProtocolUtil.skip(iprot, field.type); + } + break; + case O3: + if (field.type == TType.STRUCT) { + this.o3 = new MetaException(); + this.o3.read(iprot); + } else { + TProtocolUtil.skip(iprot, field.type); + } + break; + case O4: + if (field.type == TType.STRUCT) { + this.o4 = new NoSuchObjectException(); + this.o4.read(iprot); + } else { + TProtocolUtil.skip(iprot, field.type); + } + break; + default: + TProtocolUtil.skip(iprot, field.type); + break; + } + iprot.readFieldEnd(); + } + iprot.readStructEnd(); + + validate(); + } + + public void write(TProtocol oprot) throws TException { + oprot.writeStructBegin(STRUCT_DESC); + + if (this.isSetO1()) { + oprot.writeFieldBegin(O1_FIELD_DESC); + this.o1.write(oprot); + oprot.writeFieldEnd(); + } else if (this.isSetO2()) { + oprot.writeFieldBegin(O2_FIELD_DESC); + this.o2.write(oprot); + oprot.writeFieldEnd(); + } else if (this.isSetO3()) { + oprot.writeFieldBegin(O3_FIELD_DESC); + this.o3.write(oprot); + oprot.writeFieldEnd(); + } else if (this.isSetO4()) { + oprot.writeFieldBegin(O4_FIELD_DESC); + this.o4.write(oprot); + oprot.writeFieldEnd(); + } + oprot.writeFieldStop(); + oprot.writeStructEnd(); + } + + @Override + public String toString() { + StringBuilder sb = new StringBuilder("create_table_result("); + boolean first = true; + + sb.append("o1:"); + if (this.o1 == null) { + sb.append("null"); + } else { + sb.append(this.o1); + } + first = false; + if (!first) sb.append(", "); + sb.append("o2:"); + if (this.o2 == null) { + sb.append("null"); + } else { + sb.append(this.o2); + } + first = false; + if (!first) sb.append(", "); + sb.append("o3:"); + if (this.o3 == null) { + sb.append("null"); + } else { + sb.append(this.o3); + } + first = false; + if (!first) sb.append(", "); + sb.append("o4:"); + if (this.o4 == null) { + sb.append("null"); + } else { + sb.append(this.o4); + } + first = false; + sb.append(")"); + return sb.toString(); + } + + public void validate() throws TException { + // check for required fields + // check that fields of type enum have valid values + } + + } + + public static class drop_table_args implements TBase, java.io.Serializable, Cloneable { + private static final TStruct STRUCT_DESC = new TStruct("drop_table_args"); + private static final TField DBNAME_FIELD_DESC = new TField("dbname", TType.STRING, (short)1); + private static final TField NAME_FIELD_DESC = new TField("name", TType.STRING, (short)2); + private static final TField DELETE_DATA_FIELD_DESC = new TField("deleteData", TType.BOOL, (short)3); + + private String dbname; + public static final int DBNAME = 1; + private String name; + public static final int NAME = 2; + private boolean deleteData; + public static final int DELETEDATA = 3; + + private final Isset __isset = new Isset(); + private static final class Isset implements java.io.Serializable { + public boolean deleteData = false; + } + + public static final Map metaDataMap = Collections.unmodifiableMap(new HashMap() {{ + put(DBNAME, new FieldMetaData("dbname", TFieldRequirementType.DEFAULT, + new FieldValueMetaData(TType.STRING))); + put(NAME, new FieldMetaData("name", TFieldRequirementType.DEFAULT, + new FieldValueMetaData(TType.STRING))); + put(DELETEDATA, new FieldMetaData("deleteData", TFieldRequirementType.DEFAULT, + new FieldValueMetaData(TType.BOOL))); + }}); + + static { + FieldMetaData.addStructMetaDataMap(drop_table_args.class, metaDataMap); + } + + public drop_table_args() { + } + + public drop_table_args( + String dbname, + String name, + boolean deleteData) + { + this(); + this.dbname = dbname; + this.name = name; + this.deleteData = deleteData; + this.__isset.deleteData = true; + } + + /** + * Performs a deep copy on other. + */ + public drop_table_args(drop_table_args other) { + if (other.isSetDbname()) { + this.dbname = other.dbname; + } + if (other.isSetName()) { + this.name = other.name; + } + __isset.deleteData = other.__isset.deleteData; + this.deleteData = other.deleteData; + } + + @Override + public drop_table_args clone() { + return new drop_table_args(this); + } + + public String getDbname() { + return this.dbname; + } + + public void setDbname(String dbname) { + this.dbname = dbname; + } + + public void unsetDbname() { + this.dbname = null; + } + + // Returns true if field dbname is set (has been asigned a value) and false otherwise + public boolean isSetDbname() { + return this.dbname != null; + } + + public String getName() { + return this.name; + } + + public void setName(String name) { + this.name = name; + } + + public void unsetName() { + this.name = null; + } + + // Returns true if field name is set (has been asigned a value) and false otherwise + public boolean isSetName() { + return this.name != null; + } + + public boolean isDeleteData() { + return this.deleteData; + } + + public void setDeleteData(boolean deleteData) { + this.deleteData = deleteData; + this.__isset.deleteData = true; + } + + public void unsetDeleteData() { + this.__isset.deleteData = false; + } + + // Returns true if field deleteData is set (has been asigned a value) and false otherwise + public boolean isSetDeleteData() { + return this.__isset.deleteData; + } + + public void setFieldValue(int fieldID, Object value) { + switch (fieldID) { + case DBNAME: + if (value == null) { + unsetDbname(); + } else { + setDbname((String)value); + } + break; + + case NAME: + if (value == null) { + unsetName(); + } else { + setName((String)value); + } + break; + + case DELETEDATA: + if (value == null) { + unsetDeleteData(); + } else { + setDeleteData((Boolean)value); + } + break; + + default: + throw new IllegalArgumentException("Field " + fieldID + " doesn't exist!"); + } + } + + public Object getFieldValue(int fieldID) { + switch (fieldID) { + case DBNAME: + return getDbname(); + + case NAME: + return getName(); + + case DELETEDATA: + return new Boolean(isDeleteData()); + + default: + throw new IllegalArgumentException("Field " + fieldID + " doesn't exist!"); + } + } + + // Returns true if field corresponding to fieldID is set (has been asigned a value) and false otherwise + public boolean isSet(int fieldID) { + switch (fieldID) { + case DBNAME: + return isSetDbname(); + case NAME: + return isSetName(); + case DELETEDATA: + return isSetDeleteData(); + default: + throw new IllegalArgumentException("Field " + fieldID + " doesn't exist!"); + } + } + + @Override + public boolean equals(Object that) { + if (that == null) + return false; + if (that instanceof drop_table_args) + return this.equals((drop_table_args)that); + return false; + } + + public boolean equals(drop_table_args that) { + if (that == null) + return false; + + boolean this_present_dbname = true && this.isSetDbname(); + boolean that_present_dbname = true && that.isSetDbname(); + if (this_present_dbname || that_present_dbname) { + if (!(this_present_dbname && that_present_dbname)) + return false; + if (!this.dbname.equals(that.dbname)) + return false; + } + + boolean this_present_name = true && this.isSetName(); + boolean that_present_name = true && that.isSetName(); + if (this_present_name || that_present_name) { + if (!(this_present_name && that_present_name)) + return false; + if (!this.name.equals(that.name)) + return false; + } + + boolean this_present_deleteData = true; + boolean that_present_deleteData = true; + if (this_present_deleteData || that_present_deleteData) { + if (!(this_present_deleteData && that_present_deleteData)) + return false; + if (this.deleteData != that.deleteData) + return false; + } + + return true; + } + + @Override + public int hashCode() { + return 0; + } + + public void read(TProtocol iprot) throws TException { + TField field; + iprot.readStructBegin(); + while (true) + { + field = iprot.readFieldBegin(); + if (field.type == TType.STOP) { + break; + } + switch (field.id) + { + case DBNAME: + if (field.type == TType.STRING) { + this.dbname = iprot.readString(); + } else { + TProtocolUtil.skip(iprot, field.type); + } + break; + case NAME: + if (field.type == TType.STRING) { + this.name = iprot.readString(); + } else { + TProtocolUtil.skip(iprot, field.type); + } + break; + case DELETEDATA: + if (field.type == TType.BOOL) { + this.deleteData = iprot.readBool(); + this.__isset.deleteData = true; + } else { + TProtocolUtil.skip(iprot, field.type); + } + break; + default: + TProtocolUtil.skip(iprot, field.type); + break; + } + iprot.readFieldEnd(); + } + iprot.readStructEnd(); + + validate(); + } + + public void write(TProtocol oprot) throws TException { + validate(); + + oprot.writeStructBegin(STRUCT_DESC); + if (this.dbname != null) { + oprot.writeFieldBegin(DBNAME_FIELD_DESC); + oprot.writeString(this.dbname); + oprot.writeFieldEnd(); + } + if (this.name != null) { + oprot.writeFieldBegin(NAME_FIELD_DESC); + oprot.writeString(this.name); + oprot.writeFieldEnd(); + } + oprot.writeFieldBegin(DELETE_DATA_FIELD_DESC); + oprot.writeBool(this.deleteData); + oprot.writeFieldEnd(); + oprot.writeFieldStop(); + oprot.writeStructEnd(); + } + + @Override + public String toString() { + StringBuilder sb = new StringBuilder("drop_table_args("); + boolean first = true; + + sb.append("dbname:"); + if (this.dbname == null) { + sb.append("null"); + } else { + sb.append(this.dbname); + } + first = false; + if (!first) sb.append(", "); + sb.append("name:"); + if (this.name == null) { + sb.append("null"); + } else { + sb.append(this.name); + } + first = false; + if (!first) sb.append(", "); + sb.append("deleteData:"); + sb.append(this.deleteData); + first = false; + sb.append(")"); + return sb.toString(); + } + + public void validate() throws TException { + // check for required fields + // check that fields of type enum have valid values + } + + } + + public static class drop_table_result implements TBase, java.io.Serializable, Cloneable { + private static final TStruct STRUCT_DESC = new TStruct("drop_table_result"); + private static final TField O1_FIELD_DESC = new TField("o1", TType.STRUCT, (short)1); + private static final TField O3_FIELD_DESC = new TField("o3", TType.STRUCT, (short)2); + + private NoSuchObjectException o1; + public static final int O1 = 1; + private MetaException o3; + public static final int O3 = 2; + + private final Isset __isset = new Isset(); + private static final class Isset implements java.io.Serializable { + } + + public static final Map metaDataMap = Collections.unmodifiableMap(new HashMap() {{ + put(O1, new FieldMetaData("o1", TFieldRequirementType.DEFAULT, + new FieldValueMetaData(TType.STRUCT))); + put(O3, new FieldMetaData("o3", TFieldRequirementType.DEFAULT, + new FieldValueMetaData(TType.STRUCT))); + }}); + + static { + FieldMetaData.addStructMetaDataMap(drop_table_result.class, metaDataMap); + } + + public drop_table_result() { + } + + public drop_table_result( + NoSuchObjectException o1, + MetaException o3) + { + this(); + this.o1 = o1; + this.o3 = o3; + } + + /** + * Performs a deep copy on other. + */ + public drop_table_result(drop_table_result other) { + if (other.isSetO1()) { + this.o1 = new NoSuchObjectException(other.o1); + } + if (other.isSetO3()) { + this.o3 = new MetaException(other.o3); + } + } + + @Override + public drop_table_result clone() { + return new drop_table_result(this); + } + + public NoSuchObjectException getO1() { + return this.o1; + } + + public void setO1(NoSuchObjectException o1) { + this.o1 = o1; + } + + public void unsetO1() { + this.o1 = null; + } + + // Returns true if field o1 is set (has been asigned a value) and false otherwise + public boolean isSetO1() { + return this.o1 != null; + } + + public MetaException getO3() { + return this.o3; + } + + public void setO3(MetaException o3) { + this.o3 = o3; + } + + public void unsetO3() { + this.o3 = null; + } + + // Returns true if field o3 is set (has been asigned a value) and false otherwise + public boolean isSetO3() { + return this.o3 != null; + } + + public void setFieldValue(int fieldID, Object value) { + switch (fieldID) { + case O1: + if (value == null) { + unsetO1(); + } else { + setO1((NoSuchObjectException)value); + } + break; + + case O3: + if (value == null) { + unsetO3(); + } else { + setO3((MetaException)value); + } + break; + + default: + throw new IllegalArgumentException("Field " + fieldID + " doesn't exist!"); + } + } + + public Object getFieldValue(int fieldID) { + switch (fieldID) { + case O1: + return getO1(); + + case O3: + return getO3(); + + default: + throw new IllegalArgumentException("Field " + fieldID + " doesn't exist!"); + } + } + + // Returns true if field corresponding to fieldID is set (has been asigned a value) and false otherwise + public boolean isSet(int fieldID) { + switch (fieldID) { + case O1: + return isSetO1(); + case O3: + return isSetO3(); + default: + throw new IllegalArgumentException("Field " + fieldID + " doesn't exist!"); + } + } + + @Override + public boolean equals(Object that) { + if (that == null) + return false; + if (that instanceof drop_table_result) + return this.equals((drop_table_result)that); + return false; + } + + public boolean equals(drop_table_result that) { + if (that == null) + return false; + + boolean this_present_o1 = true && this.isSetO1(); + boolean that_present_o1 = true && that.isSetO1(); + if (this_present_o1 || that_present_o1) { + if (!(this_present_o1 && that_present_o1)) + return false; + if (!this.o1.equals(that.o1)) + return false; + } + + boolean this_present_o3 = true && this.isSetO3(); + boolean that_present_o3 = true && that.isSetO3(); + if (this_present_o3 || that_present_o3) { + if (!(this_present_o3 && that_present_o3)) + return false; + if (!this.o3.equals(that.o3)) + return false; + } + + return true; + } + + @Override + public int hashCode() { + return 0; + } + + public void read(TProtocol iprot) throws TException { + TField field; + iprot.readStructBegin(); + while (true) + { + field = iprot.readFieldBegin(); + if (field.type == TType.STOP) { + break; + } + switch (field.id) + { + case O1: + if (field.type == TType.STRUCT) { + this.o1 = new NoSuchObjectException(); + this.o1.read(iprot); + } else { + TProtocolUtil.skip(iprot, field.type); + } + break; + case O3: + if (field.type == TType.STRUCT) { + this.o3 = new MetaException(); + this.o3.read(iprot); + } else { + TProtocolUtil.skip(iprot, field.type); + } + break; + default: + TProtocolUtil.skip(iprot, field.type); + break; + } + iprot.readFieldEnd(); + } + iprot.readStructEnd(); + + validate(); + } + + public void write(TProtocol oprot) throws TException { + oprot.writeStructBegin(STRUCT_DESC); + + if (this.isSetO1()) { + oprot.writeFieldBegin(O1_FIELD_DESC); + this.o1.write(oprot); + oprot.writeFieldEnd(); + } else if (this.isSetO3()) { + oprot.writeFieldBegin(O3_FIELD_DESC); + this.o3.write(oprot); + oprot.writeFieldEnd(); + } + oprot.writeFieldStop(); + oprot.writeStructEnd(); + } + + @Override + public String toString() { + StringBuilder sb = new StringBuilder("drop_table_result("); + boolean first = true; + + sb.append("o1:"); + if (this.o1 == null) { + sb.append("null"); + } else { + sb.append(this.o1); + } + first = false; + if (!first) sb.append(", "); + sb.append("o3:"); + if (this.o3 == null) { + sb.append("null"); + } else { + sb.append(this.o3); + } + first = false; + sb.append(")"); + return sb.toString(); + } + + public void validate() throws TException { + // check for required fields + // check that fields of type enum have valid values + } + + } + + public static class get_tables_args implements TBase, java.io.Serializable, Cloneable { + private static final TStruct STRUCT_DESC = new TStruct("get_tables_args"); + private static final TField DB_NAME_FIELD_DESC = new TField("db_name", TType.STRING, (short)1); + private static final TField PATTERN_FIELD_DESC = new TField("pattern", TType.STRING, (short)2); + + private String db_name; + public static final int DB_NAME = 1; + private String pattern; + public static final int PATTERN = 2; + + private final Isset __isset = new Isset(); + private static final class Isset implements java.io.Serializable { + } + + public static final Map metaDataMap = Collections.unmodifiableMap(new HashMap() {{ + put(DB_NAME, new FieldMetaData("db_name", TFieldRequirementType.DEFAULT, + new FieldValueMetaData(TType.STRING))); + put(PATTERN, new FieldMetaData("pattern", TFieldRequirementType.DEFAULT, + new FieldValueMetaData(TType.STRING))); + }}); + + static { + FieldMetaData.addStructMetaDataMap(get_tables_args.class, metaDataMap); + } + + public get_tables_args() { + } + + public get_tables_args( + String db_name, + String pattern) + { + this(); + this.db_name = db_name; + this.pattern = pattern; + } + + /** + * Performs a deep copy on other. + */ + public get_tables_args(get_tables_args other) { + if (other.isSetDb_name()) { + this.db_name = other.db_name; + } + if (other.isSetPattern()) { + this.pattern = other.pattern; + } + } + + @Override + public get_tables_args clone() { + return new get_tables_args(this); + } + + public String getDb_name() { + return this.db_name; + } + + public void setDb_name(String db_name) { + this.db_name = db_name; + } + + public void unsetDb_name() { + this.db_name = null; + } + + // Returns true if field db_name is set (has been asigned a value) and false otherwise + public boolean isSetDb_name() { + return this.db_name != null; + } + + public String getPattern() { + return this.pattern; + } + + public void setPattern(String pattern) { + this.pattern = pattern; + } + + public void unsetPattern() { + this.pattern = null; } - // Returns true if field deleteData is set (has been asigned a value) and false otherwise - public boolean isSetDeleteData() { - return this.__isset.deleteData; + // Returns true if field pattern is set (has been asigned a value) and false otherwise + public boolean isSetPattern() { + return this.pattern != null; } public void setFieldValue(int fieldID, Object value) { switch (fieldID) { - case DBNAME: - if (value == null) { - unsetDbname(); - } else { - setDbname((String)value); - } - break; - - case NAME: + case DB_NAME: if (value == null) { - unsetName(); + unsetDb_name(); } else { - setName((String)value); + setDb_name((String)value); } break; - case DELETEDATA: + case PATTERN: if (value == null) { - unsetDeleteData(); + unsetPattern(); } else { - setDeleteData((Boolean)value); + setPattern((String)value); } break; @@ -8684,14 +10079,11 @@ public class ThriftHiveMetastore { public Object getFieldValue(int fieldID) { switch (fieldID) { - case DBNAME: - return getDbname(); - - case NAME: - return getName(); + case DB_NAME: + return getDb_name(); - case DELETEDATA: - return new Boolean(isDeleteData()); + case PATTERN: + return getPattern(); default: throw new IllegalArgumentException("Field " + fieldID + " doesn't exist!"); @@ -8701,12 +10093,10 @@ public class ThriftHiveMetastore { // Returns true if field corresponding to fieldID is set (has been asigned a value) and false otherwise public boolean isSet(int fieldID) { switch (fieldID) { - case DBNAME: - return isSetDbname(); - case NAME: - return isSetName(); - case DELETEDATA: - return isSetDeleteData(); + case DB_NAME: + return isSetDb_name(); + case PATTERN: + return isSetPattern(); default: throw new IllegalArgumentException("Field " + fieldID + " doesn't exist!"); } @@ -8716,39 +10106,30 @@ public class ThriftHiveMetastore { public boolean equals(Object that) { if (that == null) return false; - if (that instanceof drop_table_args) - return this.equals((drop_table_args)that); + if (that instanceof get_tables_args) + return this.equals((get_tables_args)that); return false; } - public boolean equals(drop_table_args that) { + public boolean equals(get_tables_args that) { if (that == null) return false; - boolean this_present_dbname = true && this.isSetDbname(); - boolean that_present_dbname = true && that.isSetDbname(); - if (this_present_dbname || that_present_dbname) { - if (!(this_present_dbname && that_present_dbname)) - return false; - if (!this.dbname.equals(that.dbname)) - return false; - } - - boolean this_present_name = true && this.isSetName(); - boolean that_present_name = true && that.isSetName(); - if (this_present_name || that_present_name) { - if (!(this_present_name && that_present_name)) + boolean this_present_db_name = true && this.isSetDb_name(); + boolean that_present_db_name = true && that.isSetDb_name(); + if (this_present_db_name || that_present_db_name) { + if (!(this_present_db_name && that_present_db_name)) return false; - if (!this.name.equals(that.name)) + if (!this.db_name.equals(that.db_name)) return false; } - boolean this_present_deleteData = true; - boolean that_present_deleteData = true; - if (this_present_deleteData || that_present_deleteData) { - if (!(this_present_deleteData && that_present_deleteData)) + boolean this_present_pattern = true && this.isSetPattern(); + boolean that_present_pattern = true && that.isSetPattern(); + if (this_present_pattern || that_present_pattern) { + if (!(this_present_pattern && that_present_pattern)) return false; - if (this.deleteData != that.deleteData) + if (!this.pattern.equals(that.pattern)) return false; } @@ -8771,24 +10152,16 @@ public class ThriftHiveMetastore { } switch (field.id) { - case DBNAME: + case DB_NAME: if (field.type == TType.STRING) { - this.dbname = iprot.readString(); + this.db_name = iprot.readString(); } else { TProtocolUtil.skip(iprot, field.type); } break; - case NAME: + case PATTERN: if (field.type == TType.STRING) { - this.name = iprot.readString(); - } else { - TProtocolUtil.skip(iprot, field.type); - } - break; - case DELETEDATA: - if (field.type == TType.BOOL) { - this.deleteData = iprot.readBool(); - this.__isset.deleteData = true; + this.pattern = iprot.readString(); } else { TProtocolUtil.skip(iprot, field.type); } @@ -8808,47 +10181,40 @@ public class ThriftHiveMetastore { validate(); oprot.writeStructBegin(STRUCT_DESC); - if (this.dbname != null) { - oprot.writeFieldBegin(DBNAME_FIELD_DESC); - oprot.writeString(this.dbname); + if (this.db_name != null) { + oprot.writeFieldBegin(DB_NAME_FIELD_DESC); + oprot.writeString(this.db_name); oprot.writeFieldEnd(); } - if (this.name != null) { - oprot.writeFieldBegin(NAME_FIELD_DESC); - oprot.writeString(this.name); + if (this.pattern != null) { + oprot.writeFieldBegin(PATTERN_FIELD_DESC); + oprot.writeString(this.pattern); oprot.writeFieldEnd(); } - oprot.writeFieldBegin(DELETE_DATA_FIELD_DESC); - oprot.writeBool(this.deleteData); - oprot.writeFieldEnd(); oprot.writeFieldStop(); oprot.writeStructEnd(); } @Override public String toString() { - StringBuilder sb = new StringBuilder("drop_table_args("); + StringBuilder sb = new StringBuilder("get_tables_args("); boolean first = true; - sb.append("dbname:"); - if (this.dbname == null) { + sb.append("db_name:"); + if (this.db_name == null) { sb.append("null"); } else { - sb.append(this.dbname); + sb.append(this.db_name); } first = false; if (!first) sb.append(", "); - sb.append("name:"); - if (this.name == null) { + sb.append("pattern:"); + if (this.pattern == null) { sb.append("null"); } else { - sb.append(this.name); + sb.append(this.pattern); } first = false; - if (!first) sb.append(", "); - sb.append("deleteData:"); - sb.append(this.deleteData); - first = false; sb.append(")"); return sb.toString(); } @@ -8860,109 +10226,129 @@ public class ThriftHiveMetastore { } - public static class drop_table_result implements TBase, java.io.Serializable, Cloneable { - private static final TStruct STRUCT_DESC = new TStruct("drop_table_result"); + public static class get_tables_result implements TBase, java.io.Serializable, Cloneable { + private static final TStruct STRUCT_DESC = new TStruct("get_tables_result"); + private static final TField SUCCESS_FIELD_DESC = new TField("success", TType.LIST, (short)0); private static final TField O1_FIELD_DESC = new TField("o1", TType.STRUCT, (short)1); - private static final TField O3_FIELD_DESC = new TField("o3", TType.STRUCT, (short)2); - private NoSuchObjectException o1; + private List success; + public static final int SUCCESS = 0; + private MetaException o1; public static final int O1 = 1; - private MetaException o3; - public static final int O3 = 2; private final Isset __isset = new Isset(); private static final class Isset implements java.io.Serializable { } public static final Map metaDataMap = Collections.unmodifiableMap(new HashMap() {{ + put(SUCCESS, new FieldMetaData("success", TFieldRequirementType.DEFAULT, + new ListMetaData(TType.LIST, + new FieldValueMetaData(TType.STRING)))); put(O1, new FieldMetaData("o1", TFieldRequirementType.DEFAULT, new FieldValueMetaData(TType.STRUCT))); - put(O3, new FieldMetaData("o3", TFieldRequirementType.DEFAULT, - new FieldValueMetaData(TType.STRUCT))); }}); static { - FieldMetaData.addStructMetaDataMap(drop_table_result.class, metaDataMap); + FieldMetaData.addStructMetaDataMap(get_tables_result.class, metaDataMap); } - public drop_table_result() { + public get_tables_result() { } - public drop_table_result( - NoSuchObjectException o1, - MetaException o3) + public get_tables_result( + List success, + MetaException o1) { this(); + this.success = success; this.o1 = o1; - this.o3 = o3; } /** * Performs a deep copy on other. */ - public drop_table_result(drop_table_result other) { - if (other.isSetO1()) { - this.o1 = new NoSuchObjectException(other.o1); + public get_tables_result(get_tables_result other) { + if (other.isSetSuccess()) { + List __this__success = new ArrayList(); + for (String other_element : other.success) { + __this__success.add(other_element); + } + this.success = __this__success; } - if (other.isSetO3()) { - this.o3 = new MetaException(other.o3); + if (other.isSetO1()) { + this.o1 = new MetaException(other.o1); } } @Override - public drop_table_result clone() { - return new drop_table_result(this); + public get_tables_result clone() { + return new get_tables_result(this); } - public NoSuchObjectException getO1() { - return this.o1; + public int getSuccessSize() { + return (this.success == null) ? 0 : this.success.size(); } - public void setO1(NoSuchObjectException o1) { - this.o1 = o1; + public java.util.Iterator getSuccessIterator() { + return (this.success == null) ? null : this.success.iterator(); } - public void unsetO1() { - this.o1 = null; + public void addToSuccess(String elem) { + if (this.success == null) { + this.success = new ArrayList(); + } + this.success.add(elem); } - // Returns true if field o1 is set (has been asigned a value) and false otherwise - public boolean isSetO1() { - return this.o1 != null; + public List getSuccess() { + return this.success; } - public MetaException getO3() { - return this.o3; + public void setSuccess(List success) { + this.success = success; } - public void setO3(MetaException o3) { - this.o3 = o3; + public void unsetSuccess() { + this.success = null; } - public void unsetO3() { - this.o3 = null; + // Returns true if field success is set (has been asigned a value) and false otherwise + public boolean isSetSuccess() { + return this.success != null; } - // Returns true if field o3 is set (has been asigned a value) and false otherwise - public boolean isSetO3() { - return this.o3 != null; + public MetaException getO1() { + return this.o1; + } + + public void setO1(MetaException o1) { + this.o1 = o1; + } + + public void unsetO1() { + this.o1 = null; + } + + // Returns true if field o1 is set (has been asigned a value) and false otherwise + public boolean isSetO1() { + return this.o1 != null; } public void setFieldValue(int fieldID, Object value) { switch (fieldID) { - case O1: + case SUCCESS: if (value == null) { - unsetO1(); + unsetSuccess(); } else { - setO1((NoSuchObjectException)value); + setSuccess((List)value); } break; - case O3: + case O1: if (value == null) { - unsetO3(); + unsetO1(); } else { - setO3((MetaException)value); + setO1((MetaException)value); } break; @@ -8973,12 +10359,12 @@ public class ThriftHiveMetastore { public Object getFieldValue(int fieldID) { switch (fieldID) { + case SUCCESS: + return getSuccess(); + case O1: return getO1(); - case O3: - return getO3(); - default: throw new IllegalArgumentException("Field " + fieldID + " doesn't exist!"); } @@ -8987,10 +10373,10 @@ public class ThriftHiveMetastore { // Returns true if field corresponding to fieldID is set (has been asigned a value) and false otherwise public boolean isSet(int fieldID) { switch (fieldID) { + case SUCCESS: + return isSetSuccess(); case O1: return isSetO1(); - case O3: - return isSetO3(); default: throw new IllegalArgumentException("Field " + fieldID + " doesn't exist!"); } @@ -9000,15 +10386,24 @@ public class ThriftHiveMetastore { public boolean equals(Object that) { if (that == null) return false; - if (that instanceof drop_table_result) - return this.equals((drop_table_result)that); + if (that instanceof get_tables_result) + return this.equals((get_tables_result)that); return false; } - public boolean equals(drop_table_result that) { + public boolean equals(get_tables_result that) { if (that == null) return false; + boolean this_present_success = true && this.isSetSuccess(); + boolean that_present_success = true && that.isSetSuccess(); + if (this_present_success || that_present_success) { + if (!(this_present_success && that_present_success)) + return false; + if (!this.success.equals(that.success)) + return false; + } + boolean this_present_o1 = true && this.isSetO1(); boolean that_present_o1 = true && that.isSetO1(); if (this_present_o1 || that_present_o1) { @@ -9018,15 +10413,6 @@ public class ThriftHiveMetastore { return false; } - boolean this_present_o3 = true && this.isSetO3(); - boolean that_present_o3 = true && that.isSetO3(); - if (this_present_o3 || that_present_o3) { - if (!(this_present_o3 && that_present_o3)) - return false; - if (!this.o3.equals(that.o3)) - return false; - } - return true; } @@ -9046,18 +10432,27 @@ public class ThriftHiveMetastore { } switch (field.id) { - case O1: - if (field.type == TType.STRUCT) { - this.o1 = new NoSuchObjectException(); - this.o1.read(iprot); + case SUCCESS: + if (field.type == TType.LIST) { + { + TList _list79 = iprot.readListBegin(); + this.success = new ArrayList(_list79.size); + for (int _i80 = 0; _i80 < _list79.size; ++_i80) + { + String _elem81; + _elem81 = iprot.readString(); + this.success.add(_elem81); + } + iprot.readListEnd(); + } } else { TProtocolUtil.skip(iprot, field.type); } break; - case O3: + case O1: if (field.type == TType.STRUCT) { - this.o3 = new MetaException(); - this.o3.read(iprot); + this.o1 = new MetaException(); + this.o1.read(iprot); } else { TProtocolUtil.skip(iprot, field.type); } @@ -9076,14 +10471,20 @@ public class ThriftHiveMetastore { public void write(TProtocol oprot) throws TException { oprot.writeStructBegin(STRUCT_DESC); - if (this.isSetO1()) { + if (this.isSetSuccess()) { + oprot.writeFieldBegin(SUCCESS_FIELD_DESC); + { + oprot.writeListBegin(new TList(TType.STRING, this.success.size())); + for (String _iter82 : this.success) { + oprot.writeString(_iter82); + } + oprot.writeListEnd(); + } + oprot.writeFieldEnd(); + } else if (this.isSetO1()) { oprot.writeFieldBegin(O1_FIELD_DESC); this.o1.write(oprot); oprot.writeFieldEnd(); - } else if (this.isSetO3()) { - oprot.writeFieldBegin(O3_FIELD_DESC); - this.o3.write(oprot); - oprot.writeFieldEnd(); } oprot.writeFieldStop(); oprot.writeStructEnd(); @@ -9091,22 +10492,22 @@ public class ThriftHiveMetastore { @Override public String toString() { - StringBuilder sb = new StringBuilder("drop_table_result("); + StringBuilder sb = new StringBuilder("get_tables_result("); boolean first = true; - sb.append("o1:"); - if (this.o1 == null) { + sb.append("success:"); + if (this.success == null) { sb.append("null"); } else { - sb.append(this.o1); + sb.append(this.success); } first = false; if (!first) sb.append(", "); - sb.append("o3:"); - if (this.o3 == null) { + sb.append("o1:"); + if (this.o1 == null) { sb.append("null"); } else { - sb.append(this.o3); + sb.append(this.o1); } first = false; sb.append(")"); @@ -9120,15 +10521,12 @@ public class ThriftHiveMetastore { } - public static class get_tables_args implements TBase, java.io.Serializable, Cloneable { - private static final TStruct STRUCT_DESC = new TStruct("get_tables_args"); + public static class get_all_tables_args implements TBase, java.io.Serializable, Cloneable { + private static final TStruct STRUCT_DESC = new TStruct("get_all_tables_args"); private static final TField DB_NAME_FIELD_DESC = new TField("db_name", TType.STRING, (short)1); - private static final TField PATTERN_FIELD_DESC = new TField("pattern", TType.STRING, (short)2); private String db_name; public static final int DB_NAME = 1; - private String pattern; - public static final int PATTERN = 2; private final Isset __isset = new Isset(); private static final class Isset implements java.io.Serializable { @@ -9137,41 +10535,34 @@ public class ThriftHiveMetastore { public static final Map metaDataMap = Collections.unmodifiableMap(new HashMap() {{ put(DB_NAME, new FieldMetaData("db_name", TFieldRequirementType.DEFAULT, new FieldValueMetaData(TType.STRING))); - put(PATTERN, new FieldMetaData("pattern", TFieldRequirementType.DEFAULT, - new FieldValueMetaData(TType.STRING))); }}); static { - FieldMetaData.addStructMetaDataMap(get_tables_args.class, metaDataMap); + FieldMetaData.addStructMetaDataMap(get_all_tables_args.class, metaDataMap); } - public get_tables_args() { + public get_all_tables_args() { } - public get_tables_args( - String db_name, - String pattern) + public get_all_tables_args( + String db_name) { this(); this.db_name = db_name; - this.pattern = pattern; } /** * Performs a deep copy on other. */ - public get_tables_args(get_tables_args other) { + public get_all_tables_args(get_all_tables_args other) { if (other.isSetDb_name()) { this.db_name = other.db_name; } - if (other.isSetPattern()) { - this.pattern = other.pattern; - } } @Override - public get_tables_args clone() { - return new get_tables_args(this); + public get_all_tables_args clone() { + return new get_all_tables_args(this); } public String getDb_name() { @@ -9191,23 +10582,6 @@ public class ThriftHiveMetastore { return this.db_name != null; } - public String getPattern() { - return this.pattern; - } - - public void setPattern(String pattern) { - this.pattern = pattern; - } - - public void unsetPattern() { - this.pattern = null; - } - - // Returns true if field pattern is set (has been asigned a value) and false otherwise - public boolean isSetPattern() { - return this.pattern != null; - } - public void setFieldValue(int fieldID, Object value) { switch (fieldID) { case DB_NAME: @@ -9218,14 +10592,6 @@ public class ThriftHiveMetastore { } break; - case PATTERN: - if (value == null) { - unsetPattern(); - } else { - setPattern((String)value); - } - break; - default: throw new IllegalArgumentException("Field " + fieldID + " doesn't exist!"); } @@ -9236,9 +10602,6 @@ public class ThriftHiveMetastore { case DB_NAME: return getDb_name(); - case PATTERN: - return getPattern(); - default: throw new IllegalArgumentException("Field " + fieldID + " doesn't exist!"); } @@ -9249,8 +10612,6 @@ public class ThriftHiveMetastore { switch (fieldID) { case DB_NAME: return isSetDb_name(); - case PATTERN: - return isSetPattern(); default: throw new IllegalArgumentException("Field " + fieldID + " doesn't exist!"); } @@ -9260,12 +10621,12 @@ public class ThriftHiveMetastore { public boolean equals(Object that) { if (that == null) return false; - if (that instanceof get_tables_args) - return this.equals((get_tables_args)that); + if (that instanceof get_all_tables_args) + return this.equals((get_all_tables_args)that); return false; } - public boolean equals(get_tables_args that) { + public boolean equals(get_all_tables_args that) { if (that == null) return false; @@ -9278,15 +10639,6 @@ public class ThriftHiveMetastore { return false; } - boolean this_present_pattern = true && this.isSetPattern(); - boolean that_present_pattern = true && that.isSetPattern(); - if (this_present_pattern || that_present_pattern) { - if (!(this_present_pattern && that_present_pattern)) - return false; - if (!this.pattern.equals(that.pattern)) - return false; - } - return true; } @@ -9313,13 +10665,6 @@ public class ThriftHiveMetastore { TProtocolUtil.skip(iprot, field.type); } break; - case PATTERN: - if (field.type == TType.STRING) { - this.pattern = iprot.readString(); - } else { - TProtocolUtil.skip(iprot, field.type); - } - break; default: TProtocolUtil.skip(iprot, field.type); break; @@ -9340,18 +10685,13 @@ public class ThriftHiveMetastore { oprot.writeString(this.db_name); oprot.writeFieldEnd(); } - if (this.pattern != null) { - oprot.writeFieldBegin(PATTERN_FIELD_DESC); - oprot.writeString(this.pattern); - oprot.writeFieldEnd(); - } oprot.writeFieldStop(); oprot.writeStructEnd(); } @Override public String toString() { - StringBuilder sb = new StringBuilder("get_tables_args("); + StringBuilder sb = new StringBuilder("get_all_tables_args("); boolean first = true; sb.append("db_name:"); @@ -9361,14 +10701,6 @@ public class ThriftHiveMetastore { sb.append(this.db_name); } first = false; - if (!first) sb.append(", "); - sb.append("pattern:"); - if (this.pattern == null) { - sb.append("null"); - } else { - sb.append(this.pattern); - } - first = false; sb.append(")"); return sb.toString(); } @@ -9380,8 +10712,8 @@ public class ThriftHiveMetastore { } - public static class get_tables_result implements TBase, java.io.Serializable, Cloneable { - private static final TStruct STRUCT_DESC = new TStruct("get_tables_result"); + public static class get_all_tables_result implements TBase, java.io.Serializable, Cloneable { + private static final TStruct STRUCT_DESC = new TStruct("get_all_tables_result"); private static final TField SUCCESS_FIELD_DESC = new TField("success", TType.LIST, (short)0); private static final TField O1_FIELD_DESC = new TField("o1", TType.STRUCT, (short)1); @@ -9403,13 +10735,13 @@ public class ThriftHiveMetastore { }}); static { - FieldMetaData.addStructMetaDataMap(get_tables_result.class, metaDataMap); + FieldMetaData.addStructMetaDataMap(get_all_tables_result.class, metaDataMap); } - public get_tables_result() { + public get_all_tables_result() { } - public get_tables_result( + public get_all_tables_result( List success, MetaException o1) { @@ -9421,7 +10753,7 @@ public class ThriftHiveMetastore { /** * Performs a deep copy on other. */ - public get_tables_result(get_tables_result other) { + public get_all_tables_result(get_all_tables_result other) { if (other.isSetSuccess()) { List __this__success = new ArrayList(); for (String other_element : other.success) { @@ -9435,8 +10767,8 @@ public class ThriftHiveMetastore { } @Override - public get_tables_result clone() { - return new get_tables_result(this); + public get_all_tables_result clone() { + return new get_all_tables_result(this); } public int getSuccessSize() { @@ -9540,12 +10872,12 @@ public class ThriftHiveMetastore { public boolean equals(Object that) { if (that == null) return false; - if (that instanceof get_tables_result) - return this.equals((get_tables_result)that); + if (that instanceof get_all_tables_result) + return this.equals((get_all_tables_result)that); return false; } - public boolean equals(get_tables_result that) { + public boolean equals(get_all_tables_result that) { if (that == null) return false; @@ -9589,13 +10921,13 @@ public class ThriftHiveMetastore { case SUCCESS: if (field.type == TType.LIST) { { - TList _list75 = iprot.readListBegin(); - this.success = new ArrayList(_list75.size); - for (int _i76 = 0; _i76 < _list75.size; ++_i76) + TList _list83 = iprot.readListBegin(); + this.success = new ArrayList(_list83.size); + for (int _i84 = 0; _i84 < _list83.size; ++_i84) { - String _elem77; - _elem77 = iprot.readString(); - this.success.add(_elem77); + String _elem85; + _elem85 = iprot.readString(); + this.success.add(_elem85); } iprot.readListEnd(); } @@ -9629,8 +10961,8 @@ public class ThriftHiveMetastore { oprot.writeFieldBegin(SUCCESS_FIELD_DESC); { oprot.writeListBegin(new TList(TType.STRING, this.success.size())); - for (String _iter78 : this.success) { - oprot.writeString(_iter78); + for (String _iter86 : this.success) { + oprot.writeString(_iter86); } oprot.writeListEnd(); } @@ -9646,7 +10978,7 @@ public class ThriftHiveMetastore { @Override public String toString() { - StringBuilder sb = new StringBuilder("get_tables_result("); + StringBuilder sb = new StringBuilder("get_all_tables_result("); boolean first = true; sb.append("success:"); @@ -11716,13 +13048,13 @@ public class ThriftHiveMetastore { case PART_VALS: if (field.type == TType.LIST) { { - TList _list79 = iprot.readListBegin(); - this.part_vals = new ArrayList(_list79.size); - for (int _i80 = 0; _i80 < _list79.size; ++_i80) + TList _list87 = iprot.readListBegin(); + this.part_vals = new ArrayList(_list87.size); + for (int _i88 = 0; _i88 < _list87.size; ++_i88) { - String _elem81; - _elem81 = iprot.readString(); - this.part_vals.add(_elem81); + String _elem89; + _elem89 = iprot.readString(); + this.part_vals.add(_elem89); } iprot.readListEnd(); } @@ -11759,8 +13091,8 @@ public class ThriftHiveMetastore { oprot.writeFieldBegin(PART_VALS_FIELD_DESC); { oprot.writeListBegin(new TList(TType.STRING, this.part_vals.size())); - for (String _iter82 : this.part_vals) { - oprot.writeString(_iter82); + for (String _iter90 : this.part_vals) { + oprot.writeString(_iter90); } oprot.writeListEnd(); } @@ -13257,13 +14589,13 @@ public class ThriftHiveMetastore { case PART_VALS: if (field.type == TType.LIST) { { - TList _list83 = iprot.readListBegin(); - this.part_vals = new ArrayList(_list83.size); - for (int _i84 = 0; _i84 < _list83.size; ++_i84) + TList _list91 = iprot.readListBegin(); + this.part_vals = new ArrayList(_list91.size); + for (int _i92 = 0; _i92 < _list91.size; ++_i92) { - String _elem85; - _elem85 = iprot.readString(); - this.part_vals.add(_elem85); + String _elem93; + _elem93 = iprot.readString(); + this.part_vals.add(_elem93); } iprot.readListEnd(); } @@ -13308,8 +14640,8 @@ public class ThriftHiveMetastore { oprot.writeFieldBegin(PART_VALS_FIELD_DESC); { oprot.writeListBegin(new TList(TType.STRING, this.part_vals.size())); - for (String _iter86 : this.part_vals) { - oprot.writeString(_iter86); + for (String _iter94 : this.part_vals) { + oprot.writeString(_iter94); } oprot.writeListEnd(); } @@ -14686,13 +16018,13 @@ public class ThriftHiveMetastore { case PART_VALS: if (field.type == TType.LIST) { { - TList _list87 = iprot.readListBegin(); - this.part_vals = new ArrayList(_list87.size); - for (int _i88 = 0; _i88 < _list87.size; ++_i88) + TList _list95 = iprot.readListBegin(); + this.part_vals = new ArrayList(_list95.size); + for (int _i96 = 0; _i96 < _list95.size; ++_i96) { - String _elem89; - _elem89 = iprot.readString(); - this.part_vals.add(_elem89); + String _elem97; + _elem97 = iprot.readString(); + this.part_vals.add(_elem97); } iprot.readListEnd(); } @@ -14729,8 +16061,8 @@ public class ThriftHiveMetastore { oprot.writeFieldBegin(PART_VALS_FIELD_DESC); { oprot.writeListBegin(new TList(TType.STRING, this.part_vals.size())); - for (String _iter90 : this.part_vals) { - oprot.writeString(_iter90); + for (String _iter98 : this.part_vals) { + oprot.writeString(_iter98); } oprot.writeListEnd(); } @@ -16352,14 +17684,14 @@ public class ThriftHiveMetastore { case SUCCESS: if (field.type == TType.LIST) { { - TList _list91 = iprot.readListBegin(); - this.success = new ArrayList(_list91.size); - for (int _i92 = 0; _i92 < _list91.size; ++_i92) + TList _list99 = iprot.readListBegin(); + this.success = new ArrayList(_list99.size); + for (int _i100 = 0; _i100 < _list99.size; ++_i100) { - Partition _elem93; - _elem93 = new Partition(); - _elem93.read(iprot); - this.success.add(_elem93); + Partition _elem101; + _elem101 = new Partition(); + _elem101.read(iprot); + this.success.add(_elem101); } iprot.readListEnd(); } @@ -16401,8 +17733,8 @@ public class ThriftHiveMetastore { oprot.writeFieldBegin(SUCCESS_FIELD_DESC); { oprot.writeListBegin(new TList(TType.STRUCT, this.success.size())); - for (Partition _iter94 : this.success) { - _iter94.write(oprot); + for (Partition _iter102 : this.success) { + _iter102.write(oprot); } oprot.writeListEnd(); } @@ -16996,13 +18328,13 @@ public class ThriftHiveMetastore { case SUCCESS: if (field.type == TType.LIST) { { - TList _list95 = iprot.readListBegin(); - this.success = new ArrayList(_list95.size); - for (int _i96 = 0; _i96 < _list95.size; ++_i96) + TList _list103 = iprot.readListBegin(); + this.success = new ArrayList(_list103.size); + for (int _i104 = 0; _i104 < _list103.size; ++_i104) { - String _elem97; - _elem97 = iprot.readString(); - this.success.add(_elem97); + String _elem105; + _elem105 = iprot.readString(); + this.success.add(_elem105); } iprot.readListEnd(); } @@ -17036,8 +18368,8 @@ public class ThriftHiveMetastore { oprot.writeFieldBegin(SUCCESS_FIELD_DESC); { oprot.writeListBegin(new TList(TType.STRING, this.success.size())); - for (String _iter98 : this.success) { - oprot.writeString(_iter98); + for (String _iter106 : this.success) { + oprot.writeString(_iter106); } oprot.writeListEnd(); } @@ -17407,13 +18739,13 @@ public class ThriftHiveMetastore { case PART_VALS: if (field.type == TType.LIST) { { - TList _list99 = iprot.readListBegin(); - this.part_vals = new ArrayList(_list99.size); - for (int _i100 = 0; _i100 < _list99.size; ++_i100) + TList _list107 = iprot.readListBegin(); + this.part_vals = new ArrayList(_list107.size); + for (int _i108 = 0; _i108 < _list107.size; ++_i108) { - String _elem101; - _elem101 = iprot.readString(); - this.part_vals.add(_elem101); + String _elem109; + _elem109 = iprot.readString(); + this.part_vals.add(_elem109); } iprot.readListEnd(); } @@ -17458,8 +18790,8 @@ public class ThriftHiveMetastore { oprot.writeFieldBegin(PART_VALS_FIELD_DESC); { oprot.writeListBegin(new TList(TType.STRING, this.part_vals.size())); - for (String _iter102 : this.part_vals) { - oprot.writeString(_iter102); + for (String _iter110 : this.part_vals) { + oprot.writeString(_iter110); } oprot.writeListEnd(); } @@ -17724,14 +19056,14 @@ public class ThriftHiveMetastore { case SUCCESS: if (field.type == TType.LIST) { { - TList _list103 = iprot.readListBegin(); - this.success = new ArrayList(_list103.size); - for (int _i104 = 0; _i104 < _list103.size; ++_i104) + TList _list111 = iprot.readListBegin(); + this.success = new ArrayList(_list111.size); + for (int _i112 = 0; _i112 < _list111.size; ++_i112) { - Partition _elem105; - _elem105 = new Partition(); - _elem105.read(iprot); - this.success.add(_elem105); + Partition _elem113; + _elem113 = new Partition(); + _elem113.read(iprot); + this.success.add(_elem113); } iprot.readListEnd(); } @@ -17765,8 +19097,8 @@ public class ThriftHiveMetastore { oprot.writeFieldBegin(SUCCESS_FIELD_DESC); { oprot.writeListBegin(new TList(TType.STRUCT, this.success.size())); - for (Partition _iter106 : this.success) { - _iter106.write(oprot); + for (Partition _iter114 : this.success) { + _iter114.write(oprot); } oprot.writeListEnd(); } @@ -18136,13 +19468,13 @@ public class ThriftHiveMetastore { case PART_VALS: if (field.type == TType.LIST) { { - TList _list107 = iprot.readListBegin(); - this.part_vals = new ArrayList(_list107.size); - for (int _i108 = 0; _i108 < _list107.size; ++_i108) + TList _list115 = iprot.readListBegin(); + this.part_vals = new ArrayList(_list115.size); + for (int _i116 = 0; _i116 < _list115.size; ++_i116) { - String _elem109; - _elem109 = iprot.readString(); - this.part_vals.add(_elem109); + String _elem117; + _elem117 = iprot.readString(); + this.part_vals.add(_elem117); } iprot.readListEnd(); } @@ -18187,8 +19519,8 @@ public class ThriftHiveMetastore { oprot.writeFieldBegin(PART_VALS_FIELD_DESC); { oprot.writeListBegin(new TList(TType.STRING, this.part_vals.size())); - for (String _iter110 : this.part_vals) { - oprot.writeString(_iter110); + for (String _iter118 : this.part_vals) { + oprot.writeString(_iter118); } oprot.writeListEnd(); } @@ -18453,13 +19785,13 @@ public class ThriftHiveMetastore { case SUCCESS: if (field.type == TType.LIST) { { - TList _list111 = iprot.readListBegin(); - this.success = new ArrayList(_list111.size); - for (int _i112 = 0; _i112 < _list111.size; ++_i112) + TList _list119 = iprot.readListBegin(); + this.success = new ArrayList(_list119.size); + for (int _i120 = 0; _i120 < _list119.size; ++_i120) { - String _elem113; - _elem113 = iprot.readString(); - this.success.add(_elem113); + String _elem121; + _elem121 = iprot.readString(); + this.success.add(_elem121); } iprot.readListEnd(); } @@ -18493,8 +19825,8 @@ public class ThriftHiveMetastore { oprot.writeFieldBegin(SUCCESS_FIELD_DESC); { oprot.writeListBegin(new TList(TType.STRING, this.success.size())); - for (String _iter114 : this.success) { - oprot.writeString(_iter114); + for (String _iter122 : this.success) { + oprot.writeString(_iter122); } oprot.writeListEnd(); } @@ -20048,13 +21380,13 @@ public class ThriftHiveMetastore { case SUCCESS: if (field.type == TType.LIST) { { - TList _list115 = iprot.readListBegin(); - this.success = new ArrayList(_list115.size); - for (int _i116 = 0; _i116 < _list115.size; ++_i116) + TList _list123 = iprot.readListBegin(); + this.success = new ArrayList(_list123.size); + for (int _i124 = 0; _i124 < _list123.size; ++_i124) { - String _elem117; - _elem117 = iprot.readString(); - this.success.add(_elem117); + String _elem125; + _elem125 = iprot.readString(); + this.success.add(_elem125); } iprot.readListEnd(); } @@ -20088,8 +21420,8 @@ public class ThriftHiveMetastore { oprot.writeFieldBegin(SUCCESS_FIELD_DESC); { oprot.writeListBegin(new TList(TType.STRING, this.success.size())); - for (String _iter118 : this.success) { - oprot.writeString(_iter118); + for (String _iter126 : this.success) { + oprot.writeString(_iter126); } oprot.writeListEnd(); } @@ -20539,15 +21871,15 @@ public class ThriftHiveMetastore { case SUCCESS: if (field.type == TType.MAP) { { - TMap _map119 = iprot.readMapBegin(); - this.success = new HashMap(2*_map119.size); - for (int _i120 = 0; _i120 < _map119.size; ++_i120) + TMap _map127 = iprot.readMapBegin(); + this.success = new HashMap(2*_map127.size); + for (int _i128 = 0; _i128 < _map127.size; ++_i128) { - String _key121; - String _val122; - _key121 = iprot.readString(); - _val122 = iprot.readString(); - this.success.put(_key121, _val122); + String _key129; + String _val130; + _key129 = iprot.readString(); + _val130 = iprot.readString(); + this.success.put(_key129, _val130); } iprot.readMapEnd(); } @@ -20581,9 +21913,9 @@ public class ThriftHiveMetastore { oprot.writeFieldBegin(SUCCESS_FIELD_DESC); { oprot.writeMapBegin(new TMap(TType.STRING, TType.STRING, this.success.size())); - for (Map.Entry _iter123 : this.success.entrySet()) { - oprot.writeString(_iter123.getKey()); - oprot.writeString(_iter123.getValue()); + for (Map.Entry _iter131 : this.success.entrySet()) { + oprot.writeString(_iter131.getKey()); + oprot.writeString(_iter131.getValue()); } oprot.writeMapEnd(); } @@ -23254,14 +24586,14 @@ public class ThriftHiveMetastore { case SUCCESS: if (field.type == TType.LIST) { { - TList _list124 = iprot.readListBegin(); - this.success = new ArrayList(_list124.size); - for (int _i125 = 0; _i125 < _list124.size; ++_i125) + TList _list132 = iprot.readListBegin(); + this.success = new ArrayList(_list132.size); + for (int _i133 = 0; _i133 < _list132.size; ++_i133) { - Index _elem126; - _elem126 = new Index(); - _elem126.read(iprot); - this.success.add(_elem126); + Index _elem134; + _elem134 = new Index(); + _elem134.read(iprot); + this.success.add(_elem134); } iprot.readListEnd(); } @@ -23303,8 +24635,8 @@ public class ThriftHiveMetastore { oprot.writeFieldBegin(SUCCESS_FIELD_DESC); { oprot.writeListBegin(new TList(TType.STRUCT, this.success.size())); - for (Index _iter127 : this.success) { - _iter127.write(oprot); + for (Index _iter135 : this.success) { + _iter135.write(oprot); } oprot.writeListEnd(); } @@ -23898,13 +25230,13 @@ public class ThriftHiveMetastore { case SUCCESS: if (field.type == TType.LIST) { { - TList _list128 = iprot.readListBegin(); - this.success = new ArrayList(_list128.size); - for (int _i129 = 0; _i129 < _list128.size; ++_i129) + TList _list136 = iprot.readListBegin(); + this.success = new ArrayList(_list136.size); + for (int _i137 = 0; _i137 < _list136.size; ++_i137) { - String _elem130; - _elem130 = iprot.readString(); - this.success.add(_elem130); + String _elem138; + _elem138 = iprot.readString(); + this.success.add(_elem138); } iprot.readListEnd(); } @@ -23938,8 +25270,8 @@ public class ThriftHiveMetastore { oprot.writeFieldBegin(SUCCESS_FIELD_DESC); { oprot.writeListBegin(new TList(TType.STRING, this.success.size())); - for (String _iter131 : this.success) { - oprot.writeString(_iter131); + for (String _iter139 : this.success) { + oprot.writeString(_iter139); } oprot.writeListEnd(); } diff --git metastore/src/gen-php/ThriftHiveMetastore.php metastore/src/gen-php/ThriftHiveMetastore.php index ea4add5..dfb68b1 100644 --- metastore/src/gen-php/ThriftHiveMetastore.php +++ metastore/src/gen-php/ThriftHiveMetastore.php @@ -10,10 +10,11 @@ include_once $GLOBALS['THRIFT_ROOT'].'/packages/hive_metastore/hive_metastore_ty include_once $GLOBALS['THRIFT_ROOT'].'/packages/fb303/FacebookService.php'; interface ThriftHiveMetastoreIf extends FacebookServiceIf { - public function create_database($name, $description); + public function create_database($database); public function get_database($name); - public function drop_database($name); - public function get_databases(); + public function drop_database($name, $deleteData); + public function get_databases($pattern); + public function get_all_databases(); public function get_type($name); public function create_type($type); public function drop_type($type); @@ -23,6 +24,7 @@ interface ThriftHiveMetastoreIf extends FacebookServiceIf { public function create_table($tbl); public function drop_table($dbname, $name, $deleteData); public function get_tables($db_name, $pattern); + public function get_all_tables($db_name); public function get_table($dbname, $tbl_name); public function alter_table($dbname, $tbl_name, $new_tbl); public function add_partition($new_part); @@ -52,17 +54,16 @@ class ThriftHiveMetastoreClient extends FacebookServiceClient implements ThriftH parent::__construct($input, $output); } - public function create_database($name, $description) + public function create_database($database) { - $this->send_create_database($name, $description); - return $this->recv_create_database(); + $this->send_create_database($database); + $this->recv_create_database(); } - public function send_create_database($name, $description) + public function send_create_database($database) { $args = new metastore_ThriftHiveMetastore_create_database_args(); - $args->name = $name; - $args->description = $description; + $args->database = $database; $bin_accel = ($this->output_ instanceof TProtocol::$TBINARYPROTOCOLACCELERATED) && function_exists('thrift_protocol_write_binary'); if ($bin_accel) { @@ -98,16 +99,16 @@ class ThriftHiveMetastoreClient extends FacebookServiceClient implements ThriftH $result->read($this->input_); $this->input_->readMessageEnd(); } - if ($result->success !== null) { - return $result->success; - } if ($result->o1 !== null) { throw $result->o1; } if ($result->o2 !== null) { throw $result->o2; } - throw new Exception("create_database failed: unknown result"); + if ($result->o3 !== null) { + throw $result->o3; + } + return; } public function get_database($name) @@ -167,16 +168,17 @@ class ThriftHiveMetastoreClient extends FacebookServiceClient implements ThriftH throw new Exception("get_database failed: unknown result"); } - public function drop_database($name) + public function drop_database($name, $deleteData) { - $this->send_drop_database($name); - return $this->recv_drop_database(); + $this->send_drop_database($name, $deleteData); + $this->recv_drop_database(); } - public function send_drop_database($name) + public function send_drop_database($name, $deleteData) { $args = new metastore_ThriftHiveMetastore_drop_database_args(); $args->name = $name; + $args->deleteData = $deleteData; $bin_accel = ($this->output_ instanceof TProtocol::$TBINARYPROTOCOLACCELERATED) && function_exists('thrift_protocol_write_binary'); if ($bin_accel) { @@ -212,24 +214,28 @@ class ThriftHiveMetastoreClient extends FacebookServiceClient implements ThriftH $result->read($this->input_); $this->input_->readMessageEnd(); } - if ($result->success !== null) { - return $result->success; + if ($result->o1 !== null) { + throw $result->o1; } if ($result->o2 !== null) { throw $result->o2; } - throw new Exception("drop_database failed: unknown result"); + if ($result->o3 !== null) { + throw $result->o3; + } + return; } - public function get_databases() + public function get_databases($pattern) { - $this->send_get_databases(); + $this->send_get_databases($pattern); return $this->recv_get_databases(); } - public function send_get_databases() + public function send_get_databases($pattern) { $args = new metastore_ThriftHiveMetastore_get_databases_args(); + $args->pattern = $pattern; $bin_accel = ($this->output_ instanceof TProtocol::$TBINARYPROTOCOLACCELERATED) && function_exists('thrift_protocol_write_binary'); if ($bin_accel) { @@ -274,6 +280,59 @@ class ThriftHiveMetastoreClient extends FacebookServiceClient implements ThriftH throw new Exception("get_databases failed: unknown result"); } + public function get_all_databases() + { + $this->send_get_all_databases(); + return $this->recv_get_all_databases(); + } + + public function send_get_all_databases() + { + $args = new metastore_ThriftHiveMetastore_get_all_databases_args(); + $bin_accel = ($this->output_ instanceof TProtocol::$TBINARYPROTOCOLACCELERATED) && function_exists('thrift_protocol_write_binary'); + if ($bin_accel) + { + thrift_protocol_write_binary($this->output_, 'get_all_databases', TMessageType::CALL, $args, $this->seqid_, $this->output_->isStrictWrite()); + } + else + { + $this->output_->writeMessageBegin('get_all_databases', TMessageType::CALL, $this->seqid_); + $args->write($this->output_); + $this->output_->writeMessageEnd(); + $this->output_->getTransport()->flush(); + } + } + + public function recv_get_all_databases() + { + $bin_accel = ($this->input_ instanceof TProtocol::$TBINARYPROTOCOLACCELERATED) && function_exists('thrift_protocol_read_binary'); + if ($bin_accel) $result = thrift_protocol_read_binary($this->input_, 'metastore_ThriftHiveMetastore_get_all_databases_result', $this->input_->isStrictRead()); + else + { + $rseqid = 0; + $fname = null; + $mtype = 0; + + $this->input_->readMessageBegin($fname, $mtype, $rseqid); + if ($mtype == TMessageType::EXCEPTION) { + $x = new TApplicationException(); + $x->read($this->input_); + $this->input_->readMessageEnd(); + throw $x; + } + $result = new metastore_ThriftHiveMetastore_get_all_databases_result(); + $result->read($this->input_); + $this->input_->readMessageEnd(); + } + if ($result->success !== null) { + return $result->success; + } + if ($result->o1 !== null) { + throw $result->o1; + } + throw new Exception("get_all_databases failed: unknown result"); + } + public function get_type($name) { $this->send_get_type($name); @@ -322,6 +381,9 @@ class ThriftHiveMetastoreClient extends FacebookServiceClient implements ThriftH if ($result->success !== null) { return $result->success; } + if ($result->o1 !== null) { + throw $result->o1; + } if ($result->o2 !== null) { throw $result->o2; } @@ -436,6 +498,9 @@ class ThriftHiveMetastoreClient extends FacebookServiceClient implements ThriftH if ($result->success !== null) { return $result->success; } + if ($result->o1 !== null) { + throw $result->o1; + } if ($result->o2 !== null) { throw $result->o2; } @@ -789,6 +854,60 @@ class ThriftHiveMetastoreClient extends FacebookServiceClient implements ThriftH throw new Exception("get_tables failed: unknown result"); } + public function get_all_tables($db_name) + { + $this->send_get_all_tables($db_name); + return $this->recv_get_all_tables(); + } + + public function send_get_all_tables($db_name) + { + $args = new metastore_ThriftHiveMetastore_get_all_tables_args(); + $args->db_name = $db_name; + $bin_accel = ($this->output_ instanceof TProtocol::$TBINARYPROTOCOLACCELERATED) && function_exists('thrift_protocol_write_binary'); + if ($bin_accel) + { + thrift_protocol_write_binary($this->output_, 'get_all_tables', TMessageType::CALL, $args, $this->seqid_, $this->output_->isStrictWrite()); + } + else + { + $this->output_->writeMessageBegin('get_all_tables', TMessageType::CALL, $this->seqid_); + $args->write($this->output_); + $this->output_->writeMessageEnd(); + $this->output_->getTransport()->flush(); + } + } + + public function recv_get_all_tables() + { + $bin_accel = ($this->input_ instanceof TProtocol::$TBINARYPROTOCOLACCELERATED) && function_exists('thrift_protocol_read_binary'); + if ($bin_accel) $result = thrift_protocol_read_binary($this->input_, 'metastore_ThriftHiveMetastore_get_all_tables_result', $this->input_->isStrictRead()); + else + { + $rseqid = 0; + $fname = null; + $mtype = 0; + + $this->input_->readMessageBegin($fname, $mtype, $rseqid); + if ($mtype == TMessageType::EXCEPTION) { + $x = new TApplicationException(); + $x->read($this->input_); + $this->input_->readMessageEnd(); + throw $x; + } + $result = new metastore_ThriftHiveMetastore_get_all_tables_result(); + $result->read($this->input_); + $this->input_->readMessageEnd(); + } + if ($result->success !== null) { + return $result->success; + } + if ($result->o1 !== null) { + throw $result->o1; + } + throw new Exception("get_all_tables failed: unknown result"); + } + public function get_table($dbname, $tbl_name) { $this->send_get_table($dbname, $tbl_name); @@ -2075,28 +2194,21 @@ class ThriftHiveMetastoreClient extends FacebookServiceClient implements ThriftH class metastore_ThriftHiveMetastore_create_database_args { static $_TSPEC; - public $name = null; - public $description = null; + public $database = null; public function __construct($vals=null) { if (!isset(self::$_TSPEC)) { self::$_TSPEC = array( 1 => array( - 'var' => 'name', - 'type' => TType::STRING, - ), - 2 => array( - 'var' => 'description', - 'type' => TType::STRING, + 'var' => 'database', + 'type' => TType::STRUCT, + 'class' => 'metastore_Database', ), ); } if (is_array($vals)) { - if (isset($vals['name'])) { - $this->name = $vals['name']; - } - if (isset($vals['description'])) { - $this->description = $vals['description']; + if (isset($vals['database'])) { + $this->database = $vals['database']; } } } @@ -2121,15 +2233,9 @@ class metastore_ThriftHiveMetastore_create_database_args { switch ($fid) { case 1: - if ($ftype == TType::STRING) { - $xfer += $input->readString($this->name); - } else { - $xfer += $input->skip($ftype); - } - break; - case 2: - if ($ftype == TType::STRING) { - $xfer += $input->readString($this->description); + if ($ftype == TType::STRUCT) { + $this->database = new metastore_Database(); + $xfer += $this->database->read($input); } else { $xfer += $input->skip($ftype); } @@ -2147,14 +2253,12 @@ class metastore_ThriftHiveMetastore_create_database_args { public function write($output) { $xfer = 0; $xfer += $output->writeStructBegin('ThriftHiveMetastore_create_database_args'); - if ($this->name !== null) { - $xfer += $output->writeFieldBegin('name', TType::STRING, 1); - $xfer += $output->writeString($this->name); - $xfer += $output->writeFieldEnd(); - } - if ($this->description !== null) { - $xfer += $output->writeFieldBegin('description', TType::STRING, 2); - $xfer += $output->writeString($this->description); + if ($this->database !== null) { + if (!is_object($this->database)) { + throw new TProtocolException('Bad type in structure.', TProtocolException::INVALID_DATA); + } + $xfer += $output->writeFieldBegin('database', TType::STRUCT, 1); + $xfer += $this->database->write($output); $xfer += $output->writeFieldEnd(); } $xfer += $output->writeFieldStop(); @@ -2167,17 +2271,13 @@ class metastore_ThriftHiveMetastore_create_database_args { class metastore_ThriftHiveMetastore_create_database_result { static $_TSPEC; - public $success = null; public $o1 = null; public $o2 = null; + public $o3 = null; public function __construct($vals=null) { if (!isset(self::$_TSPEC)) { self::$_TSPEC = array( - 0 => array( - 'var' => 'success', - 'type' => TType::BOOL, - ), 1 => array( 'var' => 'o1', 'type' => TType::STRUCT, @@ -2186,20 +2286,25 @@ class metastore_ThriftHiveMetastore_create_database_result { 2 => array( 'var' => 'o2', 'type' => TType::STRUCT, + 'class' => 'metastore_InvalidObjectException', + ), + 3 => array( + 'var' => 'o3', + 'type' => TType::STRUCT, 'class' => 'metastore_MetaException', ), ); } if (is_array($vals)) { - if (isset($vals['success'])) { - $this->success = $vals['success']; - } if (isset($vals['o1'])) { $this->o1 = $vals['o1']; } if (isset($vals['o2'])) { $this->o2 = $vals['o2']; } + if (isset($vals['o3'])) { + $this->o3 = $vals['o3']; + } } } @@ -2222,13 +2327,6 @@ class metastore_ThriftHiveMetastore_create_database_result { } switch ($fid) { - case 0: - if ($ftype == TType::BOOL) { - $xfer += $input->readBool($this->success); - } else { - $xfer += $input->skip($ftype); - } - break; case 1: if ($ftype == TType::STRUCT) { $this->o1 = new metastore_AlreadyExistsException(); @@ -2239,12 +2337,20 @@ class metastore_ThriftHiveMetastore_create_database_result { break; case 2: if ($ftype == TType::STRUCT) { - $this->o2 = new metastore_MetaException(); + $this->o2 = new metastore_InvalidObjectException(); $xfer += $this->o2->read($input); } else { $xfer += $input->skip($ftype); } break; + case 3: + if ($ftype == TType::STRUCT) { + $this->o3 = new metastore_MetaException(); + $xfer += $this->o3->read($input); + } else { + $xfer += $input->skip($ftype); + } + break; default: $xfer += $input->skip($ftype); break; @@ -2258,11 +2364,6 @@ class metastore_ThriftHiveMetastore_create_database_result { public function write($output) { $xfer = 0; $xfer += $output->writeStructBegin('ThriftHiveMetastore_create_database_result'); - if ($this->success !== null) { - $xfer += $output->writeFieldBegin('success', TType::BOOL, 0); - $xfer += $output->writeBool($this->success); - $xfer += $output->writeFieldEnd(); - } if ($this->o1 !== null) { $xfer += $output->writeFieldBegin('o1', TType::STRUCT, 1); $xfer += $this->o1->write($output); @@ -2273,6 +2374,11 @@ class metastore_ThriftHiveMetastore_create_database_result { $xfer += $this->o2->write($output); $xfer += $output->writeFieldEnd(); } + if ($this->o3 !== null) { + $xfer += $output->writeFieldBegin('o3', TType::STRUCT, 3); + $xfer += $this->o3->write($output); + $xfer += $output->writeFieldEnd(); + } $xfer += $output->writeFieldStop(); $xfer += $output->writeStructEnd(); return $xfer; @@ -2477,6 +2583,7 @@ class metastore_ThriftHiveMetastore_drop_database_args { static $_TSPEC; public $name = null; + public $deleteData = null; public function __construct($vals=null) { if (!isset(self::$_TSPEC)) { @@ -2485,12 +2592,19 @@ class metastore_ThriftHiveMetastore_drop_database_args { 'var' => 'name', 'type' => TType::STRING, ), + 2 => array( + 'var' => 'deleteData', + 'type' => TType::BOOL, + ), ); } if (is_array($vals)) { if (isset($vals['name'])) { $this->name = $vals['name']; } + if (isset($vals['deleteData'])) { + $this->deleteData = $vals['deleteData']; + } } } @@ -2520,6 +2634,13 @@ class metastore_ThriftHiveMetastore_drop_database_args { $xfer += $input->skip($ftype); } break; + case 2: + if ($ftype == TType::BOOL) { + $xfer += $input->readBool($this->deleteData); + } else { + $xfer += $input->skip($ftype); + } + break; default: $xfer += $input->skip($ftype); break; @@ -2538,6 +2659,11 @@ class metastore_ThriftHiveMetastore_drop_database_args { $xfer += $output->writeString($this->name); $xfer += $output->writeFieldEnd(); } + if ($this->deleteData !== null) { + $xfer += $output->writeFieldBegin('deleteData', TType::BOOL, 2); + $xfer += $output->writeBool($this->deleteData); + $xfer += $output->writeFieldEnd(); + } $xfer += $output->writeFieldStop(); $xfer += $output->writeStructEnd(); return $xfer; @@ -2548,30 +2674,40 @@ class metastore_ThriftHiveMetastore_drop_database_args { class metastore_ThriftHiveMetastore_drop_database_result { static $_TSPEC; - public $success = null; + public $o1 = null; public $o2 = null; + public $o3 = null; public function __construct($vals=null) { if (!isset(self::$_TSPEC)) { self::$_TSPEC = array( - 0 => array( - 'var' => 'success', - 'type' => TType::BOOL, + 1 => array( + 'var' => 'o1', + 'type' => TType::STRUCT, + 'class' => 'metastore_NoSuchObjectException', ), 2 => array( 'var' => 'o2', 'type' => TType::STRUCT, + 'class' => 'metastore_InvalidOperationException', + ), + 3 => array( + 'var' => 'o3', + 'type' => TType::STRUCT, 'class' => 'metastore_MetaException', ), ); } if (is_array($vals)) { - if (isset($vals['success'])) { - $this->success = $vals['success']; + if (isset($vals['o1'])) { + $this->o1 = $vals['o1']; } if (isset($vals['o2'])) { $this->o2 = $vals['o2']; } + if (isset($vals['o3'])) { + $this->o3 = $vals['o3']; + } } } @@ -2594,21 +2730,30 @@ class metastore_ThriftHiveMetastore_drop_database_result { } switch ($fid) { - case 0: - if ($ftype == TType::BOOL) { - $xfer += $input->readBool($this->success); + case 1: + if ($ftype == TType::STRUCT) { + $this->o1 = new metastore_NoSuchObjectException(); + $xfer += $this->o1->read($input); } else { $xfer += $input->skip($ftype); } break; case 2: if ($ftype == TType::STRUCT) { - $this->o2 = new metastore_MetaException(); + $this->o2 = new metastore_InvalidOperationException(); $xfer += $this->o2->read($input); } else { $xfer += $input->skip($ftype); } break; + case 3: + if ($ftype == TType::STRUCT) { + $this->o3 = new metastore_MetaException(); + $xfer += $this->o3->read($input); + } else { + $xfer += $input->skip($ftype); + } + break; default: $xfer += $input->skip($ftype); break; @@ -2622,9 +2767,9 @@ class metastore_ThriftHiveMetastore_drop_database_result { public function write($output) { $xfer = 0; $xfer += $output->writeStructBegin('ThriftHiveMetastore_drop_database_result'); - if ($this->success !== null) { - $xfer += $output->writeFieldBegin('success', TType::BOOL, 0); - $xfer += $output->writeBool($this->success); + if ($this->o1 !== null) { + $xfer += $output->writeFieldBegin('o1', TType::STRUCT, 1); + $xfer += $this->o1->write($output); $xfer += $output->writeFieldEnd(); } if ($this->o2 !== null) { @@ -2632,6 +2777,11 @@ class metastore_ThriftHiveMetastore_drop_database_result { $xfer += $this->o2->write($output); $xfer += $output->writeFieldEnd(); } + if ($this->o3 !== null) { + $xfer += $output->writeFieldBegin('o3', TType::STRUCT, 3); + $xfer += $this->o3->write($output); + $xfer += $output->writeFieldEnd(); + } $xfer += $output->writeFieldStop(); $xfer += $output->writeStructEnd(); return $xfer; @@ -2642,13 +2792,23 @@ class metastore_ThriftHiveMetastore_drop_database_result { class metastore_ThriftHiveMetastore_get_databases_args { static $_TSPEC; + public $pattern = null; - public function __construct() { + public function __construct($vals=null) { if (!isset(self::$_TSPEC)) { self::$_TSPEC = array( + 1 => array( + 'var' => 'pattern', + 'type' => TType::STRING, + ), ); } - } + if (is_array($vals)) { + if (isset($vals['pattern'])) { + $this->pattern = $vals['pattern']; + } + } + } public function getName() { return 'ThriftHiveMetastore_get_databases_args'; @@ -2669,6 +2829,13 @@ class metastore_ThriftHiveMetastore_get_databases_args { } switch ($fid) { + case 1: + if ($ftype == TType::STRING) { + $xfer += $input->readString($this->pattern); + } else { + $xfer += $input->skip($ftype); + } + break; default: $xfer += $input->skip($ftype); break; @@ -2682,6 +2849,11 @@ class metastore_ThriftHiveMetastore_get_databases_args { public function write($output) { $xfer = 0; $xfer += $output->writeStructBegin('ThriftHiveMetastore_get_databases_args'); + if ($this->pattern !== null) { + $xfer += $output->writeFieldBegin('pattern', TType::STRING, 1); + $xfer += $output->writeString($this->pattern); + $xfer += $output->writeFieldEnd(); + } $xfer += $output->writeFieldStop(); $xfer += $output->writeStructEnd(); return $xfer; @@ -2809,6 +2981,176 @@ class metastore_ThriftHiveMetastore_get_databases_result { } +class metastore_ThriftHiveMetastore_get_all_databases_args { + static $_TSPEC; + + + public function __construct() { + if (!isset(self::$_TSPEC)) { + self::$_TSPEC = array( + ); + } + } + + public function getName() { + return 'ThriftHiveMetastore_get_all_databases_args'; + } + + public function read($input) + { + $xfer = 0; + $fname = null; + $ftype = 0; + $fid = 0; + $xfer += $input->readStructBegin($fname); + while (true) + { + $xfer += $input->readFieldBegin($fname, $ftype, $fid); + if ($ftype == TType::STOP) { + break; + } + switch ($fid) + { + default: + $xfer += $input->skip($ftype); + break; + } + $xfer += $input->readFieldEnd(); + } + $xfer += $input->readStructEnd(); + return $xfer; + } + + public function write($output) { + $xfer = 0; + $xfer += $output->writeStructBegin('ThriftHiveMetastore_get_all_databases_args'); + $xfer += $output->writeFieldStop(); + $xfer += $output->writeStructEnd(); + return $xfer; + } + +} + +class metastore_ThriftHiveMetastore_get_all_databases_result { + static $_TSPEC; + + public $success = null; + public $o1 = null; + + public function __construct($vals=null) { + if (!isset(self::$_TSPEC)) { + self::$_TSPEC = array( + 0 => array( + 'var' => 'success', + 'type' => TType::LST, + 'etype' => TType::STRING, + 'elem' => array( + 'type' => TType::STRING, + ), + ), + 1 => array( + 'var' => 'o1', + 'type' => TType::STRUCT, + 'class' => 'metastore_MetaException', + ), + ); + } + if (is_array($vals)) { + if (isset($vals['success'])) { + $this->success = $vals['success']; + } + if (isset($vals['o1'])) { + $this->o1 = $vals['o1']; + } + } + } + + public function getName() { + return 'ThriftHiveMetastore_get_all_databases_result'; + } + + public function read($input) + { + $xfer = 0; + $fname = null; + $ftype = 0; + $fid = 0; + $xfer += $input->readStructBegin($fname); + while (true) + { + $xfer += $input->readFieldBegin($fname, $ftype, $fid); + if ($ftype == TType::STOP) { + break; + } + switch ($fid) + { + case 0: + if ($ftype == TType::LST) { + $this->success = array(); + $_size110 = 0; + $_etype113 = 0; + $xfer += $input->readListBegin($_etype113, $_size110); + for ($_i114 = 0; $_i114 < $_size110; ++$_i114) + { + $elem115 = null; + $xfer += $input->readString($elem115); + $this->success []= $elem115; + } + $xfer += $input->readListEnd(); + } else { + $xfer += $input->skip($ftype); + } + break; + case 1: + if ($ftype == TType::STRUCT) { + $this->o1 = new metastore_MetaException(); + $xfer += $this->o1->read($input); + } else { + $xfer += $input->skip($ftype); + } + break; + default: + $xfer += $input->skip($ftype); + break; + } + $xfer += $input->readFieldEnd(); + } + $xfer += $input->readStructEnd(); + return $xfer; + } + + public function write($output) { + $xfer = 0; + $xfer += $output->writeStructBegin('ThriftHiveMetastore_get_all_databases_result'); + if ($this->success !== null) { + if (!is_array($this->success)) { + throw new TProtocolException('Bad type in structure.', TProtocolException::INVALID_DATA); + } + $xfer += $output->writeFieldBegin('success', TType::LST, 0); + { + $output->writeListBegin(TType::STRING, count($this->success)); + { + foreach ($this->success as $iter116) + { + $xfer += $output->writeString($iter116); + } + } + $output->writeListEnd(); + } + $xfer += $output->writeFieldEnd(); + } + if ($this->o1 !== null) { + $xfer += $output->writeFieldBegin('o1', TType::STRUCT, 1); + $xfer += $this->o1->write($output); + $xfer += $output->writeFieldEnd(); + } + $xfer += $output->writeFieldStop(); + $xfer += $output->writeStructEnd(); + return $xfer; + } + +} + class metastore_ThriftHiveMetastore_get_type_args { static $_TSPEC; @@ -2885,6 +3227,7 @@ class metastore_ThriftHiveMetastore_get_type_result { static $_TSPEC; public $success = null; + public $o1 = null; public $o2 = null; public function __construct($vals=null) { @@ -2896,16 +3239,24 @@ class metastore_ThriftHiveMetastore_get_type_result { 'class' => 'metastore_Type', ), 1 => array( - 'var' => 'o2', + 'var' => 'o1', 'type' => TType::STRUCT, 'class' => 'metastore_MetaException', ), + 2 => array( + 'var' => 'o2', + 'type' => TType::STRUCT, + 'class' => 'metastore_NoSuchObjectException', + ), ); } if (is_array($vals)) { if (isset($vals['success'])) { $this->success = $vals['success']; } + if (isset($vals['o1'])) { + $this->o1 = $vals['o1']; + } if (isset($vals['o2'])) { $this->o2 = $vals['o2']; } @@ -2941,7 +3292,15 @@ class metastore_ThriftHiveMetastore_get_type_result { break; case 1: if ($ftype == TType::STRUCT) { - $this->o2 = new metastore_MetaException(); + $this->o1 = new metastore_MetaException(); + $xfer += $this->o1->read($input); + } else { + $xfer += $input->skip($ftype); + } + break; + case 2: + if ($ftype == TType::STRUCT) { + $this->o2 = new metastore_NoSuchObjectException(); $xfer += $this->o2->read($input); } else { $xfer += $input->skip($ftype); @@ -2968,8 +3327,13 @@ class metastore_ThriftHiveMetastore_get_type_result { $xfer += $this->success->write($output); $xfer += $output->writeFieldEnd(); } + if ($this->o1 !== null) { + $xfer += $output->writeFieldBegin('o1', TType::STRUCT, 1); + $xfer += $this->o1->write($output); + $xfer += $output->writeFieldEnd(); + } if ($this->o2 !== null) { - $xfer += $output->writeFieldBegin('o2', TType::STRUCT, 1); + $xfer += $output->writeFieldBegin('o2', TType::STRUCT, 2); $xfer += $this->o2->write($output); $xfer += $output->writeFieldEnd(); } @@ -3271,6 +3635,7 @@ class metastore_ThriftHiveMetastore_drop_type_result { static $_TSPEC; public $success = null; + public $o1 = null; public $o2 = null; public function __construct($vals=null) { @@ -3281,16 +3646,24 @@ class metastore_ThriftHiveMetastore_drop_type_result { 'type' => TType::BOOL, ), 1 => array( - 'var' => 'o2', + 'var' => 'o1', 'type' => TType::STRUCT, 'class' => 'metastore_MetaException', ), + 2 => array( + 'var' => 'o2', + 'type' => TType::STRUCT, + 'class' => 'metastore_NoSuchObjectException', + ), ); } if (is_array($vals)) { if (isset($vals['success'])) { $this->success = $vals['success']; } + if (isset($vals['o1'])) { + $this->o1 = $vals['o1']; + } if (isset($vals['o2'])) { $this->o2 = $vals['o2']; } @@ -3325,7 +3698,15 @@ class metastore_ThriftHiveMetastore_drop_type_result { break; case 1: if ($ftype == TType::STRUCT) { - $this->o2 = new metastore_MetaException(); + $this->o1 = new metastore_MetaException(); + $xfer += $this->o1->read($input); + } else { + $xfer += $input->skip($ftype); + } + break; + case 2: + if ($ftype == TType::STRUCT) { + $this->o2 = new metastore_NoSuchObjectException(); $xfer += $this->o2->read($input); } else { $xfer += $input->skip($ftype); @@ -3349,8 +3730,13 @@ class metastore_ThriftHiveMetastore_drop_type_result { $xfer += $output->writeBool($this->success); $xfer += $output->writeFieldEnd(); } + if ($this->o1 !== null) { + $xfer += $output->writeFieldBegin('o1', TType::STRUCT, 1); + $xfer += $this->o1->write($output); + $xfer += $output->writeFieldEnd(); + } if ($this->o2 !== null) { - $xfer += $output->writeFieldBegin('o2', TType::STRUCT, 1); + $xfer += $output->writeFieldBegin('o2', TType::STRUCT, 2); $xfer += $this->o2->write($output); $xfer += $output->writeFieldEnd(); } @@ -3494,18 +3880,18 @@ class metastore_ThriftHiveMetastore_get_type_all_result { case 0: if ($ftype == TType::MAP) { $this->success = array(); - $_size110 = 0; - $_ktype111 = 0; - $_vtype112 = 0; - $xfer += $input->readMapBegin($_ktype111, $_vtype112, $_size110); - for ($_i114 = 0; $_i114 < $_size110; ++$_i114) + $_size117 = 0; + $_ktype118 = 0; + $_vtype119 = 0; + $xfer += $input->readMapBegin($_ktype118, $_vtype119, $_size117); + for ($_i121 = 0; $_i121 < $_size117; ++$_i121) { - $key115 = ''; - $val116 = new metastore_Type(); - $xfer += $input->readString($key115); - $val116 = new metastore_Type(); - $xfer += $val116->read($input); - $this->success[$key115] = $val116; + $key122 = ''; + $val123 = new metastore_Type(); + $xfer += $input->readString($key122); + $val123 = new metastore_Type(); + $xfer += $val123->read($input); + $this->success[$key122] = $val123; } $xfer += $input->readMapEnd(); } else { @@ -3541,10 +3927,10 @@ class metastore_ThriftHiveMetastore_get_type_all_result { { $output->writeMapBegin(TType::STRING, TType::STRUCT, count($this->success)); { - foreach ($this->success as $kiter117 => $viter118) + foreach ($this->success as $kiter124 => $viter125) { - $xfer += $output->writeString($kiter117); - $xfer += $viter118->write($output); + $xfer += $output->writeString($kiter124); + $xfer += $viter125->write($output); } } $output->writeMapEnd(); @@ -3730,15 +4116,15 @@ class metastore_ThriftHiveMetastore_get_fields_result { case 0: if ($ftype == TType::LST) { $this->success = array(); - $_size119 = 0; - $_etype122 = 0; - $xfer += $input->readListBegin($_etype122, $_size119); - for ($_i123 = 0; $_i123 < $_size119; ++$_i123) + $_size126 = 0; + $_etype129 = 0; + $xfer += $input->readListBegin($_etype129, $_size126); + for ($_i130 = 0; $_i130 < $_size126; ++$_i130) { - $elem124 = null; - $elem124 = new metastore_FieldSchema(); - $xfer += $elem124->read($input); - $this->success []= $elem124; + $elem131 = null; + $elem131 = new metastore_FieldSchema(); + $xfer += $elem131->read($input); + $this->success []= $elem131; } $xfer += $input->readListEnd(); } else { @@ -3790,9 +4176,9 @@ class metastore_ThriftHiveMetastore_get_fields_result { { $output->writeListBegin(TType::STRUCT, count($this->success)); { - foreach ($this->success as $iter125) + foreach ($this->success as $iter132) { - $xfer += $iter125->write($output); + $xfer += $iter132->write($output); } } $output->writeListEnd(); @@ -3988,15 +4374,15 @@ class metastore_ThriftHiveMetastore_get_schema_result { case 0: if ($ftype == TType::LST) { $this->success = array(); - $_size126 = 0; - $_etype129 = 0; - $xfer += $input->readListBegin($_etype129, $_size126); - for ($_i130 = 0; $_i130 < $_size126; ++$_i130) + $_size133 = 0; + $_etype136 = 0; + $xfer += $input->readListBegin($_etype136, $_size133); + for ($_i137 = 0; $_i137 < $_size133; ++$_i137) { - $elem131 = null; - $elem131 = new metastore_FieldSchema(); - $xfer += $elem131->read($input); - $this->success []= $elem131; + $elem138 = null; + $elem138 = new metastore_FieldSchema(); + $xfer += $elem138->read($input); + $this->success []= $elem138; } $xfer += $input->readListEnd(); } else { @@ -4048,9 +4434,9 @@ class metastore_ThriftHiveMetastore_get_schema_result { { $output->writeListBegin(TType::STRUCT, count($this->success)); { - foreach ($this->success as $iter132) + foreach ($this->success as $iter139) { - $xfer += $iter132->write($output); + $xfer += $iter139->write($output); } } $output->writeListEnd(); @@ -4396,9 +4782,197 @@ class metastore_ThriftHiveMetastore_drop_table_args { $xfer += $output->writeString($this->name); $xfer += $output->writeFieldEnd(); } - if ($this->deleteData !== null) { - $xfer += $output->writeFieldBegin('deleteData', TType::BOOL, 3); - $xfer += $output->writeBool($this->deleteData); + if ($this->deleteData !== null) { + $xfer += $output->writeFieldBegin('deleteData', TType::BOOL, 3); + $xfer += $output->writeBool($this->deleteData); + $xfer += $output->writeFieldEnd(); + } + $xfer += $output->writeFieldStop(); + $xfer += $output->writeStructEnd(); + return $xfer; + } + +} + +class metastore_ThriftHiveMetastore_drop_table_result { + static $_TSPEC; + + public $o1 = null; + public $o3 = null; + + public function __construct($vals=null) { + if (!isset(self::$_TSPEC)) { + self::$_TSPEC = array( + 1 => array( + 'var' => 'o1', + 'type' => TType::STRUCT, + 'class' => 'metastore_NoSuchObjectException', + ), + 2 => array( + 'var' => 'o3', + 'type' => TType::STRUCT, + 'class' => 'metastore_MetaException', + ), + ); + } + if (is_array($vals)) { + if (isset($vals['o1'])) { + $this->o1 = $vals['o1']; + } + if (isset($vals['o3'])) { + $this->o3 = $vals['o3']; + } + } + } + + public function getName() { + return 'ThriftHiveMetastore_drop_table_result'; + } + + public function read($input) + { + $xfer = 0; + $fname = null; + $ftype = 0; + $fid = 0; + $xfer += $input->readStructBegin($fname); + while (true) + { + $xfer += $input->readFieldBegin($fname, $ftype, $fid); + if ($ftype == TType::STOP) { + break; + } + switch ($fid) + { + case 1: + if ($ftype == TType::STRUCT) { + $this->o1 = new metastore_NoSuchObjectException(); + $xfer += $this->o1->read($input); + } else { + $xfer += $input->skip($ftype); + } + break; + case 2: + if ($ftype == TType::STRUCT) { + $this->o3 = new metastore_MetaException(); + $xfer += $this->o3->read($input); + } else { + $xfer += $input->skip($ftype); + } + break; + default: + $xfer += $input->skip($ftype); + break; + } + $xfer += $input->readFieldEnd(); + } + $xfer += $input->readStructEnd(); + return $xfer; + } + + public function write($output) { + $xfer = 0; + $xfer += $output->writeStructBegin('ThriftHiveMetastore_drop_table_result'); + if ($this->o1 !== null) { + $xfer += $output->writeFieldBegin('o1', TType::STRUCT, 1); + $xfer += $this->o1->write($output); + $xfer += $output->writeFieldEnd(); + } + if ($this->o3 !== null) { + $xfer += $output->writeFieldBegin('o3', TType::STRUCT, 2); + $xfer += $this->o3->write($output); + $xfer += $output->writeFieldEnd(); + } + $xfer += $output->writeFieldStop(); + $xfer += $output->writeStructEnd(); + return $xfer; + } + +} + +class metastore_ThriftHiveMetastore_get_tables_args { + static $_TSPEC; + + public $db_name = null; + public $pattern = null; + + public function __construct($vals=null) { + if (!isset(self::$_TSPEC)) { + self::$_TSPEC = array( + 1 => array( + 'var' => 'db_name', + 'type' => TType::STRING, + ), + 2 => array( + 'var' => 'pattern', + 'type' => TType::STRING, + ), + ); + } + if (is_array($vals)) { + if (isset($vals['db_name'])) { + $this->db_name = $vals['db_name']; + } + if (isset($vals['pattern'])) { + $this->pattern = $vals['pattern']; + } + } + } + + public function getName() { + return 'ThriftHiveMetastore_get_tables_args'; + } + + public function read($input) + { + $xfer = 0; + $fname = null; + $ftype = 0; + $fid = 0; + $xfer += $input->readStructBegin($fname); + while (true) + { + $xfer += $input->readFieldBegin($fname, $ftype, $fid); + if ($ftype == TType::STOP) { + break; + } + switch ($fid) + { + case 1: + if ($ftype == TType::STRING) { + $xfer += $input->readString($this->db_name); + } else { + $xfer += $input->skip($ftype); + } + break; + case 2: + if ($ftype == TType::STRING) { + $xfer += $input->readString($this->pattern); + } else { + $xfer += $input->skip($ftype); + } + break; + default: + $xfer += $input->skip($ftype); + break; + } + $xfer += $input->readFieldEnd(); + } + $xfer += $input->readStructEnd(); + return $xfer; + } + + public function write($output) { + $xfer = 0; + $xfer += $output->writeStructBegin('ThriftHiveMetastore_get_tables_args'); + if ($this->db_name !== null) { + $xfer += $output->writeFieldBegin('db_name', TType::STRING, 1); + $xfer += $output->writeString($this->db_name); + $xfer += $output->writeFieldEnd(); + } + if ($this->pattern !== null) { + $xfer += $output->writeFieldBegin('pattern', TType::STRING, 2); + $xfer += $output->writeString($this->pattern); $xfer += $output->writeFieldEnd(); } $xfer += $output->writeFieldStop(); @@ -4408,39 +4982,42 @@ class metastore_ThriftHiveMetastore_drop_table_args { } -class metastore_ThriftHiveMetastore_drop_table_result { +class metastore_ThriftHiveMetastore_get_tables_result { static $_TSPEC; + public $success = null; public $o1 = null; - public $o3 = null; public function __construct($vals=null) { if (!isset(self::$_TSPEC)) { self::$_TSPEC = array( + 0 => array( + 'var' => 'success', + 'type' => TType::LST, + 'etype' => TType::STRING, + 'elem' => array( + 'type' => TType::STRING, + ), + ), 1 => array( 'var' => 'o1', 'type' => TType::STRUCT, - 'class' => 'metastore_NoSuchObjectException', - ), - 2 => array( - 'var' => 'o3', - 'type' => TType::STRUCT, 'class' => 'metastore_MetaException', ), ); } if (is_array($vals)) { + if (isset($vals['success'])) { + $this->success = $vals['success']; + } if (isset($vals['o1'])) { $this->o1 = $vals['o1']; } - if (isset($vals['o3'])) { - $this->o3 = $vals['o3']; - } } } public function getName() { - return 'ThriftHiveMetastore_drop_table_result'; + return 'ThriftHiveMetastore_get_tables_result'; } public function read($input) @@ -4458,18 +5035,27 @@ class metastore_ThriftHiveMetastore_drop_table_result { } switch ($fid) { - case 1: - if ($ftype == TType::STRUCT) { - $this->o1 = new metastore_NoSuchObjectException(); - $xfer += $this->o1->read($input); + case 0: + if ($ftype == TType::LST) { + $this->success = array(); + $_size140 = 0; + $_etype143 = 0; + $xfer += $input->readListBegin($_etype143, $_size140); + for ($_i144 = 0; $_i144 < $_size140; ++$_i144) + { + $elem145 = null; + $xfer += $input->readString($elem145); + $this->success []= $elem145; + } + $xfer += $input->readListEnd(); } else { $xfer += $input->skip($ftype); } break; - case 2: + case 1: if ($ftype == TType::STRUCT) { - $this->o3 = new metastore_MetaException(); - $xfer += $this->o3->read($input); + $this->o1 = new metastore_MetaException(); + $xfer += $this->o1->read($input); } else { $xfer += $input->skip($ftype); } @@ -4486,17 +5072,29 @@ class metastore_ThriftHiveMetastore_drop_table_result { public function write($output) { $xfer = 0; - $xfer += $output->writeStructBegin('ThriftHiveMetastore_drop_table_result'); + $xfer += $output->writeStructBegin('ThriftHiveMetastore_get_tables_result'); + if ($this->success !== null) { + if (!is_array($this->success)) { + throw new TProtocolException('Bad type in structure.', TProtocolException::INVALID_DATA); + } + $xfer += $output->writeFieldBegin('success', TType::LST, 0); + { + $output->writeListBegin(TType::STRING, count($this->success)); + { + foreach ($this->success as $iter146) + { + $xfer += $output->writeString($iter146); + } + } + $output->writeListEnd(); + } + $xfer += $output->writeFieldEnd(); + } if ($this->o1 !== null) { $xfer += $output->writeFieldBegin('o1', TType::STRUCT, 1); $xfer += $this->o1->write($output); $xfer += $output->writeFieldEnd(); } - if ($this->o3 !== null) { - $xfer += $output->writeFieldBegin('o3', TType::STRUCT, 2); - $xfer += $this->o3->write($output); - $xfer += $output->writeFieldEnd(); - } $xfer += $output->writeFieldStop(); $xfer += $output->writeStructEnd(); return $xfer; @@ -4504,11 +5102,10 @@ class metastore_ThriftHiveMetastore_drop_table_result { } -class metastore_ThriftHiveMetastore_get_tables_args { +class metastore_ThriftHiveMetastore_get_all_tables_args { static $_TSPEC; public $db_name = null; - public $pattern = null; public function __construct($vals=null) { if (!isset(self::$_TSPEC)) { @@ -4517,24 +5114,17 @@ class metastore_ThriftHiveMetastore_get_tables_args { 'var' => 'db_name', 'type' => TType::STRING, ), - 2 => array( - 'var' => 'pattern', - 'type' => TType::STRING, - ), ); } if (is_array($vals)) { if (isset($vals['db_name'])) { $this->db_name = $vals['db_name']; } - if (isset($vals['pattern'])) { - $this->pattern = $vals['pattern']; - } } } public function getName() { - return 'ThriftHiveMetastore_get_tables_args'; + return 'ThriftHiveMetastore_get_all_tables_args'; } public function read($input) @@ -4559,13 +5149,6 @@ class metastore_ThriftHiveMetastore_get_tables_args { $xfer += $input->skip($ftype); } break; - case 2: - if ($ftype == TType::STRING) { - $xfer += $input->readString($this->pattern); - } else { - $xfer += $input->skip($ftype); - } - break; default: $xfer += $input->skip($ftype); break; @@ -4578,17 +5161,12 @@ class metastore_ThriftHiveMetastore_get_tables_args { public function write($output) { $xfer = 0; - $xfer += $output->writeStructBegin('ThriftHiveMetastore_get_tables_args'); + $xfer += $output->writeStructBegin('ThriftHiveMetastore_get_all_tables_args'); if ($this->db_name !== null) { $xfer += $output->writeFieldBegin('db_name', TType::STRING, 1); $xfer += $output->writeString($this->db_name); $xfer += $output->writeFieldEnd(); } - if ($this->pattern !== null) { - $xfer += $output->writeFieldBegin('pattern', TType::STRING, 2); - $xfer += $output->writeString($this->pattern); - $xfer += $output->writeFieldEnd(); - } $xfer += $output->writeFieldStop(); $xfer += $output->writeStructEnd(); return $xfer; @@ -4596,7 +5174,7 @@ class metastore_ThriftHiveMetastore_get_tables_args { } -class metastore_ThriftHiveMetastore_get_tables_result { +class metastore_ThriftHiveMetastore_get_all_tables_result { static $_TSPEC; public $success = null; @@ -4631,7 +5209,7 @@ class metastore_ThriftHiveMetastore_get_tables_result { } public function getName() { - return 'ThriftHiveMetastore_get_tables_result'; + return 'ThriftHiveMetastore_get_all_tables_result'; } public function read($input) @@ -4652,14 +5230,14 @@ class metastore_ThriftHiveMetastore_get_tables_result { case 0: if ($ftype == TType::LST) { $this->success = array(); - $_size133 = 0; - $_etype136 = 0; - $xfer += $input->readListBegin($_etype136, $_size133); - for ($_i137 = 0; $_i137 < $_size133; ++$_i137) + $_size147 = 0; + $_etype150 = 0; + $xfer += $input->readListBegin($_etype150, $_size147); + for ($_i151 = 0; $_i151 < $_size147; ++$_i151) { - $elem138 = null; - $xfer += $input->readString($elem138); - $this->success []= $elem138; + $elem152 = null; + $xfer += $input->readString($elem152); + $this->success []= $elem152; } $xfer += $input->readListEnd(); } else { @@ -4686,7 +5264,7 @@ class metastore_ThriftHiveMetastore_get_tables_result { public function write($output) { $xfer = 0; - $xfer += $output->writeStructBegin('ThriftHiveMetastore_get_tables_result'); + $xfer += $output->writeStructBegin('ThriftHiveMetastore_get_all_tables_result'); if ($this->success !== null) { if (!is_array($this->success)) { throw new TProtocolException('Bad type in structure.', TProtocolException::INVALID_DATA); @@ -4695,9 +5273,9 @@ class metastore_ThriftHiveMetastore_get_tables_result { { $output->writeListBegin(TType::STRING, count($this->success)); { - foreach ($this->success as $iter139) + foreach ($this->success as $iter153) { - $xfer += $output->writeString($iter139); + $xfer += $output->writeString($iter153); } } $output->writeListEnd(); @@ -5439,14 +6017,14 @@ class metastore_ThriftHiveMetastore_append_partition_args { case 3: if ($ftype == TType::LST) { $this->part_vals = array(); - $_size140 = 0; - $_etype143 = 0; - $xfer += $input->readListBegin($_etype143, $_size140); - for ($_i144 = 0; $_i144 < $_size140; ++$_i144) + $_size154 = 0; + $_etype157 = 0; + $xfer += $input->readListBegin($_etype157, $_size154); + for ($_i158 = 0; $_i158 < $_size154; ++$_i158) { - $elem145 = null; - $xfer += $input->readString($elem145); - $this->part_vals []= $elem145; + $elem159 = null; + $xfer += $input->readString($elem159); + $this->part_vals []= $elem159; } $xfer += $input->readListEnd(); } else { @@ -5484,9 +6062,9 @@ class metastore_ThriftHiveMetastore_append_partition_args { { $output->writeListBegin(TType::STRING, count($this->part_vals)); { - foreach ($this->part_vals as $iter146) + foreach ($this->part_vals as $iter160) { - $xfer += $output->writeString($iter146); + $xfer += $output->writeString($iter160); } } $output->writeListEnd(); @@ -5983,14 +6561,14 @@ class metastore_ThriftHiveMetastore_drop_partition_args { case 3: if ($ftype == TType::LST) { $this->part_vals = array(); - $_size147 = 0; - $_etype150 = 0; - $xfer += $input->readListBegin($_etype150, $_size147); - for ($_i151 = 0; $_i151 < $_size147; ++$_i151) + $_size161 = 0; + $_etype164 = 0; + $xfer += $input->readListBegin($_etype164, $_size161); + for ($_i165 = 0; $_i165 < $_size161; ++$_i165) { - $elem152 = null; - $xfer += $input->readString($elem152); - $this->part_vals []= $elem152; + $elem166 = null; + $xfer += $input->readString($elem166); + $this->part_vals []= $elem166; } $xfer += $input->readListEnd(); } else { @@ -6035,9 +6613,9 @@ class metastore_ThriftHiveMetastore_drop_partition_args { { $output->writeListBegin(TType::STRING, count($this->part_vals)); { - foreach ($this->part_vals as $iter153) + foreach ($this->part_vals as $iter167) { - $xfer += $output->writeString($iter153); + $xfer += $output->writeString($iter167); } } $output->writeListEnd(); @@ -6497,14 +7075,14 @@ class metastore_ThriftHiveMetastore_get_partition_args { case 3: if ($ftype == TType::LST) { $this->part_vals = array(); - $_size154 = 0; - $_etype157 = 0; - $xfer += $input->readListBegin($_etype157, $_size154); - for ($_i158 = 0; $_i158 < $_size154; ++$_i158) + $_size168 = 0; + $_etype171 = 0; + $xfer += $input->readListBegin($_etype171, $_size168); + for ($_i172 = 0; $_i172 < $_size168; ++$_i172) { - $elem159 = null; - $xfer += $input->readString($elem159); - $this->part_vals []= $elem159; + $elem173 = null; + $xfer += $input->readString($elem173); + $this->part_vals []= $elem173; } $xfer += $input->readListEnd(); } else { @@ -6542,9 +7120,9 @@ class metastore_ThriftHiveMetastore_get_partition_args { { $output->writeListBegin(TType::STRING, count($this->part_vals)); { - foreach ($this->part_vals as $iter160) + foreach ($this->part_vals as $iter174) { - $xfer += $output->writeString($iter160); + $xfer += $output->writeString($iter174); } } $output->writeListEnd(); @@ -7090,15 +7668,15 @@ class metastore_ThriftHiveMetastore_get_partitions_result { case 0: if ($ftype == TType::LST) { $this->success = array(); - $_size161 = 0; - $_etype164 = 0; - $xfer += $input->readListBegin($_etype164, $_size161); - for ($_i165 = 0; $_i165 < $_size161; ++$_i165) + $_size175 = 0; + $_etype178 = 0; + $xfer += $input->readListBegin($_etype178, $_size175); + for ($_i179 = 0; $_i179 < $_size175; ++$_i179) { - $elem166 = null; - $elem166 = new metastore_Partition(); - $xfer += $elem166->read($input); - $this->success []= $elem166; + $elem180 = null; + $elem180 = new metastore_Partition(); + $xfer += $elem180->read($input); + $this->success []= $elem180; } $xfer += $input->readListEnd(); } else { @@ -7142,9 +7720,9 @@ class metastore_ThriftHiveMetastore_get_partitions_result { { $output->writeListBegin(TType::STRUCT, count($this->success)); { - foreach ($this->success as $iter167) + foreach ($this->success as $iter181) { - $xfer += $iter167->write($output); + $xfer += $iter181->write($output); } } $output->writeListEnd(); @@ -7336,14 +7914,14 @@ class metastore_ThriftHiveMetastore_get_partition_names_result { case 0: if ($ftype == TType::LST) { $this->success = array(); - $_size168 = 0; - $_etype171 = 0; - $xfer += $input->readListBegin($_etype171, $_size168); - for ($_i172 = 0; $_i172 < $_size168; ++$_i172) + $_size182 = 0; + $_etype185 = 0; + $xfer += $input->readListBegin($_etype185, $_size182); + for ($_i186 = 0; $_i186 < $_size182; ++$_i186) { - $elem173 = null; - $xfer += $input->readString($elem173); - $this->success []= $elem173; + $elem187 = null; + $xfer += $input->readString($elem187); + $this->success []= $elem187; } $xfer += $input->readListEnd(); } else { @@ -7379,9 +7957,9 @@ class metastore_ThriftHiveMetastore_get_partition_names_result { { $output->writeListBegin(TType::STRING, count($this->success)); { - foreach ($this->success as $iter174) + foreach ($this->success as $iter188) { - $xfer += $output->writeString($iter174); + $xfer += $output->writeString($iter188); } } $output->writeListEnd(); @@ -7485,14 +8063,14 @@ class metastore_ThriftHiveMetastore_get_partitions_ps_args { case 3: if ($ftype == TType::LST) { $this->part_vals = array(); - $_size175 = 0; - $_etype178 = 0; - $xfer += $input->readListBegin($_etype178, $_size175); - for ($_i179 = 0; $_i179 < $_size175; ++$_i179) + $_size189 = 0; + $_etype192 = 0; + $xfer += $input->readListBegin($_etype192, $_size189); + for ($_i193 = 0; $_i193 < $_size189; ++$_i193) { - $elem180 = null; - $xfer += $input->readString($elem180); - $this->part_vals []= $elem180; + $elem194 = null; + $xfer += $input->readString($elem194); + $this->part_vals []= $elem194; } $xfer += $input->readListEnd(); } else { @@ -7537,9 +8115,9 @@ class metastore_ThriftHiveMetastore_get_partitions_ps_args { { $output->writeListBegin(TType::STRING, count($this->part_vals)); { - foreach ($this->part_vals as $iter181) + foreach ($this->part_vals as $iter195) { - $xfer += $output->writeString($iter181); + $xfer += $output->writeString($iter195); } } $output->writeListEnd(); @@ -7615,15 +8193,15 @@ class metastore_ThriftHiveMetastore_get_partitions_ps_result { case 0: if ($ftype == TType::LST) { $this->success = array(); - $_size182 = 0; - $_etype185 = 0; - $xfer += $input->readListBegin($_etype185, $_size182); - for ($_i186 = 0; $_i186 < $_size182; ++$_i186) + $_size196 = 0; + $_etype199 = 0; + $xfer += $input->readListBegin($_etype199, $_size196); + for ($_i200 = 0; $_i200 < $_size196; ++$_i200) { - $elem187 = null; - $elem187 = new metastore_Partition(); - $xfer += $elem187->read($input); - $this->success []= $elem187; + $elem201 = null; + $elem201 = new metastore_Partition(); + $xfer += $elem201->read($input); + $this->success []= $elem201; } $xfer += $input->readListEnd(); } else { @@ -7659,9 +8237,9 @@ class metastore_ThriftHiveMetastore_get_partitions_ps_result { { $output->writeListBegin(TType::STRUCT, count($this->success)); { - foreach ($this->success as $iter188) + foreach ($this->success as $iter202) { - $xfer += $iter188->write($output); + $xfer += $iter202->write($output); } } $output->writeListEnd(); @@ -7765,14 +8343,14 @@ class metastore_ThriftHiveMetastore_get_partition_names_ps_args { case 3: if ($ftype == TType::LST) { $this->part_vals = array(); - $_size189 = 0; - $_etype192 = 0; - $xfer += $input->readListBegin($_etype192, $_size189); - for ($_i193 = 0; $_i193 < $_size189; ++$_i193) + $_size203 = 0; + $_etype206 = 0; + $xfer += $input->readListBegin($_etype206, $_size203); + for ($_i207 = 0; $_i207 < $_size203; ++$_i207) { - $elem194 = null; - $xfer += $input->readString($elem194); - $this->part_vals []= $elem194; + $elem208 = null; + $xfer += $input->readString($elem208); + $this->part_vals []= $elem208; } $xfer += $input->readListEnd(); } else { @@ -7817,9 +8395,9 @@ class metastore_ThriftHiveMetastore_get_partition_names_ps_args { { $output->writeListBegin(TType::STRING, count($this->part_vals)); { - foreach ($this->part_vals as $iter195) + foreach ($this->part_vals as $iter209) { - $xfer += $output->writeString($iter195); + $xfer += $output->writeString($iter209); } } $output->writeListEnd(); @@ -7894,14 +8472,14 @@ class metastore_ThriftHiveMetastore_get_partition_names_ps_result { case 0: if ($ftype == TType::LST) { $this->success = array(); - $_size196 = 0; - $_etype199 = 0; - $xfer += $input->readListBegin($_etype199, $_size196); - for ($_i200 = 0; $_i200 < $_size196; ++$_i200) + $_size210 = 0; + $_etype213 = 0; + $xfer += $input->readListBegin($_etype213, $_size210); + for ($_i214 = 0; $_i214 < $_size210; ++$_i214) { - $elem201 = null; - $xfer += $input->readString($elem201); - $this->success []= $elem201; + $elem215 = null; + $xfer += $input->readString($elem215); + $this->success []= $elem215; } $xfer += $input->readListEnd(); } else { @@ -7937,9 +8515,9 @@ class metastore_ThriftHiveMetastore_get_partition_names_ps_result { { $output->writeListBegin(TType::STRING, count($this->success)); { - foreach ($this->success as $iter202) + foreach ($this->success as $iter216) { - $xfer += $output->writeString($iter202); + $xfer += $output->writeString($iter216); } } $output->writeListEnd(); @@ -8485,14 +9063,14 @@ class metastore_ThriftHiveMetastore_partition_name_to_vals_result { case 0: if ($ftype == TType::LST) { $this->success = array(); - $_size203 = 0; - $_etype206 = 0; - $xfer += $input->readListBegin($_etype206, $_size203); - for ($_i207 = 0; $_i207 < $_size203; ++$_i207) + $_size217 = 0; + $_etype220 = 0; + $xfer += $input->readListBegin($_etype220, $_size217); + for ($_i221 = 0; $_i221 < $_size217; ++$_i221) { - $elem208 = null; - $xfer += $input->readString($elem208); - $this->success []= $elem208; + $elem222 = null; + $xfer += $input->readString($elem222); + $this->success []= $elem222; } $xfer += $input->readListEnd(); } else { @@ -8528,9 +9106,9 @@ class metastore_ThriftHiveMetastore_partition_name_to_vals_result { { $output->writeListBegin(TType::STRING, count($this->success)); { - foreach ($this->success as $iter209) + foreach ($this->success as $iter223) { - $xfer += $output->writeString($iter209); + $xfer += $output->writeString($iter223); } } $output->writeListEnd(); @@ -8681,17 +9259,17 @@ class metastore_ThriftHiveMetastore_partition_name_to_spec_result { case 0: if ($ftype == TType::MAP) { $this->success = array(); - $_size210 = 0; - $_ktype211 = 0; - $_vtype212 = 0; - $xfer += $input->readMapBegin($_ktype211, $_vtype212, $_size210); - for ($_i214 = 0; $_i214 < $_size210; ++$_i214) + $_size224 = 0; + $_ktype225 = 0; + $_vtype226 = 0; + $xfer += $input->readMapBegin($_ktype225, $_vtype226, $_size224); + for ($_i228 = 0; $_i228 < $_size224; ++$_i228) { - $key215 = ''; - $val216 = ''; - $xfer += $input->readString($key215); - $xfer += $input->readString($val216); - $this->success[$key215] = $val216; + $key229 = ''; + $val230 = ''; + $xfer += $input->readString($key229); + $xfer += $input->readString($val230); + $this->success[$key229] = $val230; } $xfer += $input->readMapEnd(); } else { @@ -8727,10 +9305,10 @@ class metastore_ThriftHiveMetastore_partition_name_to_spec_result { { $output->writeMapBegin(TType::STRING, TType::STRING, count($this->success)); { - foreach ($this->success as $kiter217 => $viter218) + foreach ($this->success as $kiter231 => $viter232) { - $xfer += $output->writeString($kiter217); - $xfer += $output->writeString($viter218); + $xfer += $output->writeString($kiter231); + $xfer += $output->writeString($viter232); } } $output->writeMapEnd(); @@ -9653,15 +10231,15 @@ class metastore_ThriftHiveMetastore_get_indexes_result { case 0: if ($ftype == TType::LST) { $this->success = array(); - $_size219 = 0; - $_etype222 = 0; - $xfer += $input->readListBegin($_etype222, $_size219); - for ($_i223 = 0; $_i223 < $_size219; ++$_i223) + $_size233 = 0; + $_etype236 = 0; + $xfer += $input->readListBegin($_etype236, $_size233); + for ($_i237 = 0; $_i237 < $_size233; ++$_i237) { - $elem224 = null; - $elem224 = new metastore_Index(); - $xfer += $elem224->read($input); - $this->success []= $elem224; + $elem238 = null; + $elem238 = new metastore_Index(); + $xfer += $elem238->read($input); + $this->success []= $elem238; } $xfer += $input->readListEnd(); } else { @@ -9705,9 +10283,9 @@ class metastore_ThriftHiveMetastore_get_indexes_result { { $output->writeListBegin(TType::STRUCT, count($this->success)); { - foreach ($this->success as $iter225) + foreach ($this->success as $iter239) { - $xfer += $iter225->write($output); + $xfer += $iter239->write($output); } } $output->writeListEnd(); @@ -9899,14 +10477,14 @@ class metastore_ThriftHiveMetastore_get_index_names_result { case 0: if ($ftype == TType::LST) { $this->success = array(); - $_size226 = 0; - $_etype229 = 0; - $xfer += $input->readListBegin($_etype229, $_size226); - for ($_i230 = 0; $_i230 < $_size226; ++$_i230) + $_size240 = 0; + $_etype243 = 0; + $xfer += $input->readListBegin($_etype243, $_size240); + for ($_i244 = 0; $_i244 < $_size240; ++$_i244) { - $elem231 = null; - $xfer += $input->readString($elem231); - $this->success []= $elem231; + $elem245 = null; + $xfer += $input->readString($elem245); + $this->success []= $elem245; } $xfer += $input->readListEnd(); } else { @@ -9942,9 +10520,9 @@ class metastore_ThriftHiveMetastore_get_index_names_result { { $output->writeListBegin(TType::STRING, count($this->success)); { - foreach ($this->success as $iter232) + foreach ($this->success as $iter246) { - $xfer += $output->writeString($iter232); + $xfer += $output->writeString($iter246); } } $output->writeListEnd(); diff --git metastore/src/gen-php/hive_metastore_types.php metastore/src/gen-php/hive_metastore_types.php index 61872a0..1d5f4b4 100644 --- metastore/src/gen-php/hive_metastore_types.php +++ metastore/src/gen-php/hive_metastore_types.php @@ -377,6 +377,7 @@ class metastore_Database { public $name = null; public $description = null; + public $locationUri = null; public function __construct($vals=null) { if (!isset(self::$_TSPEC)) { @@ -389,6 +390,10 @@ class metastore_Database { 'var' => 'description', 'type' => TType::STRING, ), + 3 => array( + 'var' => 'locationUri', + 'type' => TType::STRING, + ), ); } if (is_array($vals)) { @@ -398,6 +403,9 @@ class metastore_Database { if (isset($vals['description'])) { $this->description = $vals['description']; } + if (isset($vals['locationUri'])) { + $this->locationUri = $vals['locationUri']; + } } } @@ -434,6 +442,13 @@ class metastore_Database { $xfer += $input->skip($ftype); } break; + case 3: + if ($ftype == TType::STRING) { + $xfer += $input->readString($this->locationUri); + } else { + $xfer += $input->skip($ftype); + } + break; default: $xfer += $input->skip($ftype); break; @@ -457,6 +472,11 @@ class metastore_Database { $xfer += $output->writeString($this->description); $xfer += $output->writeFieldEnd(); } + if ($this->locationUri !== null) { + $xfer += $output->writeFieldBegin('locationUri', TType::STRING, 3); + $xfer += $output->writeString($this->locationUri); + $xfer += $output->writeFieldEnd(); + } $xfer += $output->writeFieldStop(); $xfer += $output->writeStructEnd(); return $xfer; diff --git metastore/src/gen-py/hive_metastore/ThriftHiveMetastore-remote metastore/src/gen-py/hive_metastore/ThriftHiveMetastore-remote old mode 100644 new mode 100755 index fc06cba..06ea9cc --- metastore/src/gen-py/hive_metastore/ThriftHiveMetastore-remote +++ metastore/src/gen-py/hive_metastore/ThriftHiveMetastore-remote @@ -21,10 +21,11 @@ if len(sys.argv) <= 1 or sys.argv[1] == '--help': print 'Usage: ' + sys.argv[0] + ' [-h host:port] [-u url] [-f[ramed]] function [arg1 [arg2...]]' print '' print 'Functions:' - print ' bool create_database(string name, string description)' + print ' void create_database(Database database)' print ' Database get_database(string name)' - print ' bool drop_database(string name)' - print ' get_databases()' + print ' void drop_database(string name, bool deleteData)' + print ' get_databases(string pattern)' + print ' get_all_databases()' print ' Type get_type(string name)' print ' bool create_type(Type type)' print ' bool drop_type(string type)' @@ -34,6 +35,7 @@ if len(sys.argv) <= 1 or sys.argv[1] == '--help': print ' void create_table(Table tbl)' print ' void drop_table(string dbname, string name, bool deleteData)' print ' get_tables(string db_name, string pattern)' + print ' get_all_tables(string db_name)' print ' Table get_table(string dbname, string tbl_name)' print ' void alter_table(string dbname, string tbl_name, Table new_tbl)' print ' Partition add_partition(Partition new_part)' @@ -105,10 +107,10 @@ client = ThriftHiveMetastore.Client(protocol) transport.open() if cmd == 'create_database': - if len(args) != 2: - print 'create_database requires 2 args' + if len(args) != 1: + print 'create_database requires 1 args' sys.exit(1) - pp.pprint(client.create_database(args[0],args[1],)) + pp.pprint(client.create_database(eval(args[0]),)) elif cmd == 'get_database': if len(args) != 1: @@ -117,16 +119,22 @@ elif cmd == 'get_database': pp.pprint(client.get_database(args[0],)) elif cmd == 'drop_database': - if len(args) != 1: - print 'drop_database requires 1 args' + if len(args) != 2: + print 'drop_database requires 2 args' sys.exit(1) - pp.pprint(client.drop_database(args[0],)) + pp.pprint(client.drop_database(args[0],eval(args[1]),)) elif cmd == 'get_databases': + if len(args) != 1: + print 'get_databases requires 1 args' + sys.exit(1) + pp.pprint(client.get_databases(args[0],)) + +elif cmd == 'get_all_databases': if len(args) != 0: - print 'get_databases requires 0 args' + print 'get_all_databases requires 0 args' sys.exit(1) - pp.pprint(client.get_databases()) + pp.pprint(client.get_all_databases()) elif cmd == 'get_type': if len(args) != 1: @@ -182,6 +190,12 @@ elif cmd == 'get_tables': sys.exit(1) pp.pprint(client.get_tables(args[0],args[1],)) +elif cmd == 'get_all_tables': + if len(args) != 1: + print 'get_all_tables requires 1 args' + sys.exit(1) + pp.pprint(client.get_all_tables(args[0],)) + elif cmd == 'get_table': if len(args) != 2: print 'get_table requires 2 args' diff --git metastore/src/gen-py/hive_metastore/ThriftHiveMetastore.py metastore/src/gen-py/hive_metastore/ThriftHiveMetastore.py index 4a0bc67..1ff1dbd 100644 --- metastore/src/gen-py/hive_metastore/ThriftHiveMetastore.py +++ metastore/src/gen-py/hive_metastore/ThriftHiveMetastore.py @@ -20,11 +20,10 @@ class Iface(fb303.FacebookService.Iface): """ This interface is live. """ - def create_database(self, name, description): + def create_database(self, database): """ Parameters: - - name - - description + - database """ pass @@ -35,14 +34,22 @@ class Iface(fb303.FacebookService.Iface): """ pass - def drop_database(self, name): + def drop_database(self, name, deleteData): """ Parameters: - name + - deleteData """ pass - def get_databases(self, ): + def get_databases(self, pattern): + """ + Parameters: + - pattern + """ + pass + + def get_all_databases(self, ): pass def get_type(self, name): @@ -113,6 +120,13 @@ class Iface(fb303.FacebookService.Iface): """ pass + def get_all_tables(self, db_name): + """ + Parameters: + - db_name + """ + pass + def get_table(self, dbname, tbl_name): """ Parameters: @@ -315,20 +329,18 @@ class Client(fb303.FacebookService.Client, Iface): def __init__(self, iprot, oprot=None): fb303.FacebookService.Client.__init__(self, iprot, oprot) - def create_database(self, name, description): + def create_database(self, database): """ Parameters: - - name - - description + - database """ - self.send_create_database(name, description) - return self.recv_create_database() + self.send_create_database(database) + self.recv_create_database() - def send_create_database(self, name, description): + def send_create_database(self, database): self._oprot.writeMessageBegin('create_database', TMessageType.CALL, self._seqid) args = create_database_args() - args.name = name - args.description = description + args.database = database args.write(self._oprot) self._oprot.writeMessageEnd() self._oprot.trans.flush() @@ -343,13 +355,13 @@ class Client(fb303.FacebookService.Client, Iface): result = create_database_result() result.read(self._iprot) self._iprot.readMessageEnd() - if result.success != None: - return result.success if result.o1 != None: raise result.o1 if result.o2 != None: raise result.o2 - raise TApplicationException(TApplicationException.MISSING_RESULT, "create_database failed: unknown result"); + if result.o3 != None: + raise result.o3 + return def get_database(self, name): """ @@ -385,18 +397,20 @@ class Client(fb303.FacebookService.Client, Iface): raise result.o2 raise TApplicationException(TApplicationException.MISSING_RESULT, "get_database failed: unknown result"); - def drop_database(self, name): + def drop_database(self, name, deleteData): """ Parameters: - name + - deleteData """ - self.send_drop_database(name) - return self.recv_drop_database() + self.send_drop_database(name, deleteData) + self.recv_drop_database() - def send_drop_database(self, name): + def send_drop_database(self, name, deleteData): self._oprot.writeMessageBegin('drop_database', TMessageType.CALL, self._seqid) args = drop_database_args() args.name = name + args.deleteData = deleteData args.write(self._oprot) self._oprot.writeMessageEnd() self._oprot.trans.flush() @@ -411,19 +425,26 @@ class Client(fb303.FacebookService.Client, Iface): result = drop_database_result() result.read(self._iprot) self._iprot.readMessageEnd() - if result.success != None: - return result.success + if result.o1 != None: + raise result.o1 if result.o2 != None: raise result.o2 - raise TApplicationException(TApplicationException.MISSING_RESULT, "drop_database failed: unknown result"); + if result.o3 != None: + raise result.o3 + return - def get_databases(self, ): - self.send_get_databases() + def get_databases(self, pattern): + """ + Parameters: + - pattern + """ + self.send_get_databases(pattern) return self.recv_get_databases() - def send_get_databases(self, ): + def send_get_databases(self, pattern): self._oprot.writeMessageBegin('get_databases', TMessageType.CALL, self._seqid) args = get_databases_args() + args.pattern = pattern args.write(self._oprot) self._oprot.writeMessageEnd() self._oprot.trans.flush() @@ -444,6 +465,33 @@ class Client(fb303.FacebookService.Client, Iface): raise result.o1 raise TApplicationException(TApplicationException.MISSING_RESULT, "get_databases failed: unknown result"); + def get_all_databases(self, ): + self.send_get_all_databases() + return self.recv_get_all_databases() + + def send_get_all_databases(self, ): + self._oprot.writeMessageBegin('get_all_databases', TMessageType.CALL, self._seqid) + args = get_all_databases_args() + args.write(self._oprot) + self._oprot.writeMessageEnd() + self._oprot.trans.flush() + + def recv_get_all_databases(self, ): + (fname, mtype, rseqid) = self._iprot.readMessageBegin() + if mtype == TMessageType.EXCEPTION: + x = TApplicationException() + x.read(self._iprot) + self._iprot.readMessageEnd() + raise x + result = get_all_databases_result() + result.read(self._iprot) + self._iprot.readMessageEnd() + if result.success != None: + return result.success + if result.o1 != None: + raise result.o1 + raise TApplicationException(TApplicationException.MISSING_RESULT, "get_all_databases failed: unknown result"); + def get_type(self, name): """ Parameters: @@ -472,6 +520,8 @@ class Client(fb303.FacebookService.Client, Iface): self._iprot.readMessageEnd() if result.success != None: return result.success + if result.o1 != None: + raise result.o1 if result.o2 != None: raise result.o2 raise TApplicationException(TApplicationException.MISSING_RESULT, "get_type failed: unknown result"); @@ -540,6 +590,8 @@ class Client(fb303.FacebookService.Client, Iface): self._iprot.readMessageEnd() if result.success != None: return result.success + if result.o1 != None: + raise result.o1 if result.o2 != None: raise result.o2 raise TApplicationException(TApplicationException.MISSING_RESULT, "drop_type failed: unknown result"); @@ -758,6 +810,38 @@ class Client(fb303.FacebookService.Client, Iface): raise result.o1 raise TApplicationException(TApplicationException.MISSING_RESULT, "get_tables failed: unknown result"); + def get_all_tables(self, db_name): + """ + Parameters: + - db_name + """ + self.send_get_all_tables(db_name) + return self.recv_get_all_tables() + + def send_get_all_tables(self, db_name): + self._oprot.writeMessageBegin('get_all_tables', TMessageType.CALL, self._seqid) + args = get_all_tables_args() + args.db_name = db_name + args.write(self._oprot) + self._oprot.writeMessageEnd() + self._oprot.trans.flush() + + def recv_get_all_tables(self, ): + (fname, mtype, rseqid) = self._iprot.readMessageBegin() + if mtype == TMessageType.EXCEPTION: + x = TApplicationException() + x.read(self._iprot) + self._iprot.readMessageEnd() + raise x + result = get_all_tables_result() + result.read(self._iprot) + self._iprot.readMessageEnd() + if result.success != None: + return result.success + if result.o1 != None: + raise result.o1 + raise TApplicationException(TApplicationException.MISSING_RESULT, "get_all_tables failed: unknown result"); + def get_table(self, dbname, tbl_name): """ Parameters: @@ -1584,6 +1668,7 @@ class Processor(fb303.FacebookService.Processor, Iface, TProcessor): self._processMap["get_database"] = Processor.process_get_database self._processMap["drop_database"] = Processor.process_drop_database self._processMap["get_databases"] = Processor.process_get_databases + self._processMap["get_all_databases"] = Processor.process_get_all_databases self._processMap["get_type"] = Processor.process_get_type self._processMap["create_type"] = Processor.process_create_type self._processMap["drop_type"] = Processor.process_drop_type @@ -1593,6 +1678,7 @@ class Processor(fb303.FacebookService.Processor, Iface, TProcessor): self._processMap["create_table"] = Processor.process_create_table self._processMap["drop_table"] = Processor.process_drop_table self._processMap["get_tables"] = Processor.process_get_tables + self._processMap["get_all_tables"] = Processor.process_get_all_tables self._processMap["get_table"] = Processor.process_get_table self._processMap["alter_table"] = Processor.process_alter_table self._processMap["add_partition"] = Processor.process_add_partition @@ -1637,11 +1723,13 @@ class Processor(fb303.FacebookService.Processor, Iface, TProcessor): iprot.readMessageEnd() result = create_database_result() try: - result.success = self._handler.create_database(args.name, args.description) + self._handler.create_database(args.database) except AlreadyExistsException, o1: result.o1 = o1 - except MetaException, o2: + except InvalidObjectException, o2: result.o2 = o2 + except MetaException, o3: + result.o3 = o3 oprot.writeMessageBegin("create_database", TMessageType.REPLY, seqid) result.write(oprot) oprot.writeMessageEnd() @@ -1669,9 +1757,13 @@ class Processor(fb303.FacebookService.Processor, Iface, TProcessor): iprot.readMessageEnd() result = drop_database_result() try: - result.success = self._handler.drop_database(args.name) - except MetaException, o2: + self._handler.drop_database(args.name, args.deleteData) + except NoSuchObjectException, o1: + result.o1 = o1 + except InvalidOperationException, o2: result.o2 = o2 + except MetaException, o3: + result.o3 = o3 oprot.writeMessageBegin("drop_database", TMessageType.REPLY, seqid) result.write(oprot) oprot.writeMessageEnd() @@ -1683,7 +1775,7 @@ class Processor(fb303.FacebookService.Processor, Iface, TProcessor): iprot.readMessageEnd() result = get_databases_result() try: - result.success = self._handler.get_databases() + result.success = self._handler.get_databases(args.pattern) except MetaException, o1: result.o1 = o1 oprot.writeMessageBegin("get_databases", TMessageType.REPLY, seqid) @@ -1691,6 +1783,20 @@ class Processor(fb303.FacebookService.Processor, Iface, TProcessor): oprot.writeMessageEnd() oprot.trans.flush() + def process_get_all_databases(self, seqid, iprot, oprot): + args = get_all_databases_args() + args.read(iprot) + iprot.readMessageEnd() + result = get_all_databases_result() + try: + result.success = self._handler.get_all_databases() + except MetaException, o1: + result.o1 = o1 + oprot.writeMessageBegin("get_all_databases", TMessageType.REPLY, seqid) + result.write(oprot) + oprot.writeMessageEnd() + oprot.trans.flush() + def process_get_type(self, seqid, iprot, oprot): args = get_type_args() args.read(iprot) @@ -1698,7 +1804,9 @@ class Processor(fb303.FacebookService.Processor, Iface, TProcessor): result = get_type_result() try: result.success = self._handler.get_type(args.name) - except MetaException, o2: + except MetaException, o1: + result.o1 = o1 + except NoSuchObjectException, o2: result.o2 = o2 oprot.writeMessageBegin("get_type", TMessageType.REPLY, seqid) result.write(oprot) @@ -1730,7 +1838,9 @@ class Processor(fb303.FacebookService.Processor, Iface, TProcessor): result = drop_type_result() try: result.success = self._handler.drop_type(args.type) - except MetaException, o2: + except MetaException, o1: + result.o1 = o1 + except NoSuchObjectException, o2: result.o2 = o2 oprot.writeMessageBegin("drop_type", TMessageType.REPLY, seqid) result.write(oprot) @@ -1837,6 +1947,20 @@ class Processor(fb303.FacebookService.Processor, Iface, TProcessor): oprot.writeMessageEnd() oprot.trans.flush() + def process_get_all_tables(self, seqid, iprot, oprot): + args = get_all_tables_args() + args.read(iprot) + iprot.readMessageEnd() + result = get_all_tables_result() + try: + result.success = self._handler.get_all_tables(args.db_name) + except MetaException, o1: + result.o1 = o1 + oprot.writeMessageBegin("get_all_tables", TMessageType.REPLY, seqid) + result.write(oprot) + oprot.writeMessageEnd() + oprot.trans.flush() + def process_get_table(self, seqid, iprot, oprot): args = get_table_args() args.read(iprot) @@ -2189,19 +2313,16 @@ class Processor(fb303.FacebookService.Processor, Iface, TProcessor): class create_database_args: """ Attributes: - - name - - description + - database """ thrift_spec = ( None, # 0 - (1, TType.STRING, 'name', None, None, ), # 1 - (2, TType.STRING, 'description', None, None, ), # 2 + (1, TType.STRUCT, 'database', (Database, Database.thrift_spec), None, ), # 1 ) - def __init__(self, name=None, description=None,): - self.name = name - self.description = description + def __init__(self, database=None,): + self.database = database def read(self, iprot): if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: @@ -2213,13 +2334,9 @@ class create_database_args: if ftype == TType.STOP: break if fid == 1: - if ftype == TType.STRING: - self.name = iprot.readString(); - else: - iprot.skip(ftype) - elif fid == 2: - if ftype == TType.STRING: - self.description = iprot.readString(); + if ftype == TType.STRUCT: + self.database = Database() + self.database.read(iprot) else: iprot.skip(ftype) else: @@ -2232,13 +2349,9 @@ class create_database_args: oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))) return oprot.writeStructBegin('create_database_args') - if self.name != None: - oprot.writeFieldBegin('name', TType.STRING, 1) - oprot.writeString(self.name) - oprot.writeFieldEnd() - if self.description != None: - oprot.writeFieldBegin('description', TType.STRING, 2) - oprot.writeString(self.description) + if self.database != None: + oprot.writeFieldBegin('database', TType.STRUCT, 1) + self.database.write(oprot) oprot.writeFieldEnd() oprot.writeFieldStop() oprot.writeStructEnd() @@ -2257,21 +2370,22 @@ class create_database_args: class create_database_result: """ Attributes: - - success - o1 - o2 + - o3 """ thrift_spec = ( - (0, TType.BOOL, 'success', None, None, ), # 0 + None, # 0 (1, TType.STRUCT, 'o1', (AlreadyExistsException, AlreadyExistsException.thrift_spec), None, ), # 1 - (2, TType.STRUCT, 'o2', (MetaException, MetaException.thrift_spec), None, ), # 2 + (2, TType.STRUCT, 'o2', (InvalidObjectException, InvalidObjectException.thrift_spec), None, ), # 2 + (3, TType.STRUCT, 'o3', (MetaException, MetaException.thrift_spec), None, ), # 3 ) - def __init__(self, success=None, o1=None, o2=None,): - self.success = success + def __init__(self, o1=None, o2=None, o3=None,): self.o1 = o1 self.o2 = o2 + self.o3 = o3 def read(self, iprot): if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: @@ -2282,12 +2396,7 @@ class create_database_result: (fname, ftype, fid) = iprot.readFieldBegin() if ftype == TType.STOP: break - if fid == 0: - if ftype == TType.BOOL: - self.success = iprot.readBool(); - else: - iprot.skip(ftype) - elif fid == 1: + if fid == 1: if ftype == TType.STRUCT: self.o1 = AlreadyExistsException() self.o1.read(iprot) @@ -2295,10 +2404,16 @@ class create_database_result: iprot.skip(ftype) elif fid == 2: if ftype == TType.STRUCT: - self.o2 = MetaException() + self.o2 = InvalidObjectException() self.o2.read(iprot) else: iprot.skip(ftype) + elif fid == 3: + if ftype == TType.STRUCT: + self.o3 = MetaException() + self.o3.read(iprot) + else: + iprot.skip(ftype) else: iprot.skip(ftype) iprot.readFieldEnd() @@ -2309,10 +2424,6 @@ class create_database_result: oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))) return oprot.writeStructBegin('create_database_result') - if self.success != None: - oprot.writeFieldBegin('success', TType.BOOL, 0) - oprot.writeBool(self.success) - oprot.writeFieldEnd() if self.o1 != None: oprot.writeFieldBegin('o1', TType.STRUCT, 1) self.o1.write(oprot) @@ -2321,6 +2432,10 @@ class create_database_result: oprot.writeFieldBegin('o2', TType.STRUCT, 2) self.o2.write(oprot) oprot.writeFieldEnd() + if self.o3 != None: + oprot.writeFieldBegin('o3', TType.STRUCT, 3) + self.o3.write(oprot) + oprot.writeFieldEnd() oprot.writeFieldStop() oprot.writeStructEnd() @@ -2477,15 +2592,18 @@ class drop_database_args: """ Attributes: - name + - deleteData """ thrift_spec = ( None, # 0 (1, TType.STRING, 'name', None, None, ), # 1 + (2, TType.BOOL, 'deleteData', None, None, ), # 2 ) - def __init__(self, name=None,): + def __init__(self, name=None, deleteData=None,): self.name = name + self.deleteData = deleteData def read(self, iprot): if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: @@ -2501,6 +2619,11 @@ class drop_database_args: self.name = iprot.readString(); else: iprot.skip(ftype) + elif fid == 2: + if ftype == TType.BOOL: + self.deleteData = iprot.readBool(); + else: + iprot.skip(ftype) else: iprot.skip(ftype) iprot.readFieldEnd() @@ -2515,6 +2638,10 @@ class drop_database_args: oprot.writeFieldBegin('name', TType.STRING, 1) oprot.writeString(self.name) oprot.writeFieldEnd() + if self.deleteData != None: + oprot.writeFieldBegin('deleteData', TType.BOOL, 2) + oprot.writeBool(self.deleteData) + oprot.writeFieldEnd() oprot.writeFieldStop() oprot.writeStructEnd() @@ -2532,19 +2659,22 @@ class drop_database_args: class drop_database_result: """ Attributes: - - success + - o1 - o2 + - o3 """ thrift_spec = ( - (0, TType.BOOL, 'success', None, None, ), # 0 - None, # 1 - (2, TType.STRUCT, 'o2', (MetaException, MetaException.thrift_spec), None, ), # 2 + None, # 0 + (1, TType.STRUCT, 'o1', (NoSuchObjectException, NoSuchObjectException.thrift_spec), None, ), # 1 + (2, TType.STRUCT, 'o2', (InvalidOperationException, InvalidOperationException.thrift_spec), None, ), # 2 + (3, TType.STRUCT, 'o3', (MetaException, MetaException.thrift_spec), None, ), # 3 ) - def __init__(self, success=None, o2=None,): - self.success = success + def __init__(self, o1=None, o2=None, o3=None,): + self.o1 = o1 self.o2 = o2 + self.o3 = o3 def read(self, iprot): if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: @@ -2555,17 +2685,24 @@ class drop_database_result: (fname, ftype, fid) = iprot.readFieldBegin() if ftype == TType.STOP: break - if fid == 0: - if ftype == TType.BOOL: - self.success = iprot.readBool(); + if fid == 1: + if ftype == TType.STRUCT: + self.o1 = NoSuchObjectException() + self.o1.read(iprot) else: iprot.skip(ftype) elif fid == 2: if ftype == TType.STRUCT: - self.o2 = MetaException() + self.o2 = InvalidOperationException() self.o2.read(iprot) else: iprot.skip(ftype) + elif fid == 3: + if ftype == TType.STRUCT: + self.o3 = MetaException() + self.o3.read(iprot) + else: + iprot.skip(ftype) else: iprot.skip(ftype) iprot.readFieldEnd() @@ -2576,14 +2713,18 @@ class drop_database_result: oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))) return oprot.writeStructBegin('drop_database_result') - if self.success != None: - oprot.writeFieldBegin('success', TType.BOOL, 0) - oprot.writeBool(self.success) + if self.o1 != None: + oprot.writeFieldBegin('o1', TType.STRUCT, 1) + self.o1.write(oprot) oprot.writeFieldEnd() if self.o2 != None: oprot.writeFieldBegin('o2', TType.STRUCT, 2) self.o2.write(oprot) oprot.writeFieldEnd() + if self.o3 != None: + oprot.writeFieldBegin('o3', TType.STRUCT, 3) + self.o3.write(oprot) + oprot.writeFieldEnd() oprot.writeFieldStop() oprot.writeStructEnd() @@ -2599,10 +2740,19 @@ class drop_database_result: return not (self == other) class get_databases_args: + """ + Attributes: + - pattern + """ thrift_spec = ( + None, # 0 + (1, TType.STRING, 'pattern', None, None, ), # 1 ) + def __init__(self, pattern=None,): + self.pattern = pattern + def read(self, iprot): if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec)) @@ -2612,6 +2762,11 @@ class get_databases_args: (fname, ftype, fid) = iprot.readFieldBegin() if ftype == TType.STOP: break + if fid == 1: + if ftype == TType.STRING: + self.pattern = iprot.readString(); + else: + iprot.skip(ftype) else: iprot.skip(ftype) iprot.readFieldEnd() @@ -2622,6 +2777,10 @@ class get_databases_args: oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))) return oprot.writeStructBegin('get_databases_args') + if self.pattern != None: + oprot.writeFieldBegin('pattern', TType.STRING, 1) + oprot.writeString(self.pattern) + oprot.writeFieldEnd() oprot.writeFieldStop() oprot.writeStructEnd() @@ -2712,6 +2871,120 @@ class get_databases_result: def __ne__(self, other): return not (self == other) +class get_all_databases_args: + + thrift_spec = ( + ) + + def read(self, iprot): + if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: + fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec)) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None: + oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))) + return + oprot.writeStructBegin('get_all_databases_args') + oprot.writeFieldStop() + oprot.writeStructEnd() + + def __repr__(self): + L = ['%s=%r' % (key, value) + for key, value in self.__dict__.iteritems()] + return '%s(%s)' % (self.__class__.__name__, ', '.join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + +class get_all_databases_result: + """ + Attributes: + - success + - o1 + """ + + thrift_spec = ( + (0, TType.LIST, 'success', (TType.STRING,None), None, ), # 0 + (1, TType.STRUCT, 'o1', (MetaException, MetaException.thrift_spec), None, ), # 1 + ) + + def __init__(self, success=None, o1=None,): + self.success = success + self.o1 = o1 + + def read(self, iprot): + if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: + fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec)) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 0: + if ftype == TType.LIST: + self.success = [] + (_etype113, _size110) = iprot.readListBegin() + for _i114 in xrange(_size110): + _elem115 = iprot.readString(); + self.success.append(_elem115) + iprot.readListEnd() + else: + iprot.skip(ftype) + elif fid == 1: + if ftype == TType.STRUCT: + self.o1 = MetaException() + self.o1.read(iprot) + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None: + oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))) + return + oprot.writeStructBegin('get_all_databases_result') + if self.success != None: + oprot.writeFieldBegin('success', TType.LIST, 0) + oprot.writeListBegin(TType.STRING, len(self.success)) + for iter116 in self.success: + oprot.writeString(iter116) + oprot.writeListEnd() + oprot.writeFieldEnd() + if self.o1 != None: + oprot.writeFieldBegin('o1', TType.STRUCT, 1) + self.o1.write(oprot) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def __repr__(self): + L = ['%s=%r' % (key, value) + for key, value in self.__dict__.iteritems()] + return '%s(%s)' % (self.__class__.__name__, ', '.join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + class get_type_args: """ Attributes: @@ -2772,16 +3045,19 @@ class get_type_result: """ Attributes: - success + - o1 - o2 """ thrift_spec = ( (0, TType.STRUCT, 'success', (Type, Type.thrift_spec), None, ), # 0 - (1, TType.STRUCT, 'o2', (MetaException, MetaException.thrift_spec), None, ), # 1 + (1, TType.STRUCT, 'o1', (MetaException, MetaException.thrift_spec), None, ), # 1 + (2, TType.STRUCT, 'o2', (NoSuchObjectException, NoSuchObjectException.thrift_spec), None, ), # 2 ) - def __init__(self, success=None, o2=None,): + def __init__(self, success=None, o1=None, o2=None,): self.success = success + self.o1 = o1 self.o2 = o2 def read(self, iprot): @@ -2801,7 +3077,13 @@ class get_type_result: iprot.skip(ftype) elif fid == 1: if ftype == TType.STRUCT: - self.o2 = MetaException() + self.o1 = MetaException() + self.o1.read(iprot) + else: + iprot.skip(ftype) + elif fid == 2: + if ftype == TType.STRUCT: + self.o2 = NoSuchObjectException() self.o2.read(iprot) else: iprot.skip(ftype) @@ -2819,8 +3101,12 @@ class get_type_result: oprot.writeFieldBegin('success', TType.STRUCT, 0) self.success.write(oprot) oprot.writeFieldEnd() + if self.o1 != None: + oprot.writeFieldBegin('o1', TType.STRUCT, 1) + self.o1.write(oprot) + oprot.writeFieldEnd() if self.o2 != None: - oprot.writeFieldBegin('o2', TType.STRUCT, 1) + oprot.writeFieldBegin('o2', TType.STRUCT, 2) self.o2.write(oprot) oprot.writeFieldEnd() oprot.writeFieldStop() @@ -3048,16 +3334,19 @@ class drop_type_result: """ Attributes: - success + - o1 - o2 """ thrift_spec = ( (0, TType.BOOL, 'success', None, None, ), # 0 - (1, TType.STRUCT, 'o2', (MetaException, MetaException.thrift_spec), None, ), # 1 + (1, TType.STRUCT, 'o1', (MetaException, MetaException.thrift_spec), None, ), # 1 + (2, TType.STRUCT, 'o2', (NoSuchObjectException, NoSuchObjectException.thrift_spec), None, ), # 2 ) - def __init__(self, success=None, o2=None,): + def __init__(self, success=None, o1=None, o2=None,): self.success = success + self.o1 = o1 self.o2 = o2 def read(self, iprot): @@ -3076,7 +3365,13 @@ class drop_type_result: iprot.skip(ftype) elif fid == 1: if ftype == TType.STRUCT: - self.o2 = MetaException() + self.o1 = MetaException() + self.o1.read(iprot) + else: + iprot.skip(ftype) + elif fid == 2: + if ftype == TType.STRUCT: + self.o2 = NoSuchObjectException() self.o2.read(iprot) else: iprot.skip(ftype) @@ -3094,8 +3389,12 @@ class drop_type_result: oprot.writeFieldBegin('success', TType.BOOL, 0) oprot.writeBool(self.success) oprot.writeFieldEnd() + if self.o1 != None: + oprot.writeFieldBegin('o1', TType.STRUCT, 1) + self.o1.write(oprot) + oprot.writeFieldEnd() if self.o2 != None: - oprot.writeFieldBegin('o2', TType.STRUCT, 1) + oprot.writeFieldBegin('o2', TType.STRUCT, 2) self.o2.write(oprot) oprot.writeFieldEnd() oprot.writeFieldStop() @@ -3196,12 +3495,12 @@ class get_type_all_result: if fid == 0: if ftype == TType.MAP: self.success = {} - (_ktype111, _vtype112, _size110 ) = iprot.readMapBegin() - for _i114 in xrange(_size110): - _key115 = iprot.readString(); - _val116 = Type() - _val116.read(iprot) - self.success[_key115] = _val116 + (_ktype118, _vtype119, _size117 ) = iprot.readMapBegin() + for _i121 in xrange(_size117): + _key122 = iprot.readString(); + _val123 = Type() + _val123.read(iprot) + self.success[_key122] = _val123 iprot.readMapEnd() else: iprot.skip(ftype) @@ -3224,9 +3523,9 @@ class get_type_all_result: if self.success != None: oprot.writeFieldBegin('success', TType.MAP, 0) oprot.writeMapBegin(TType.STRING, TType.STRUCT, len(self.success)) - for kiter117,viter118 in self.success.items(): - oprot.writeString(kiter117) - viter118.write(oprot) + for kiter124,viter125 in self.success.items(): + oprot.writeString(kiter124) + viter125.write(oprot) oprot.writeMapEnd() oprot.writeFieldEnd() if self.o2 != None: @@ -3349,11 +3648,11 @@ class get_fields_result: if fid == 0: if ftype == TType.LIST: self.success = [] - (_etype122, _size119) = iprot.readListBegin() - for _i123 in xrange(_size119): - _elem124 = FieldSchema() - _elem124.read(iprot) - self.success.append(_elem124) + (_etype129, _size126) = iprot.readListBegin() + for _i130 in xrange(_size126): + _elem131 = FieldSchema() + _elem131.read(iprot) + self.success.append(_elem131) iprot.readListEnd() else: iprot.skip(ftype) @@ -3388,8 +3687,8 @@ class get_fields_result: if self.success != None: oprot.writeFieldBegin('success', TType.LIST, 0) oprot.writeListBegin(TType.STRUCT, len(self.success)) - for iter125 in self.success: - iter125.write(oprot) + for iter132 in self.success: + iter132.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() if self.o1 != None: @@ -3520,11 +3819,11 @@ class get_schema_result: if fid == 0: if ftype == TType.LIST: self.success = [] - (_etype129, _size126) = iprot.readListBegin() - for _i130 in xrange(_size126): - _elem131 = FieldSchema() - _elem131.read(iprot) - self.success.append(_elem131) + (_etype136, _size133) = iprot.readListBegin() + for _i137 in xrange(_size133): + _elem138 = FieldSchema() + _elem138.read(iprot) + self.success.append(_elem138) iprot.readListEnd() else: iprot.skip(ftype) @@ -3559,8 +3858,8 @@ class get_schema_result: if self.success != None: oprot.writeFieldBegin('success', TType.LIST, 0) oprot.writeListBegin(TType.STRUCT, len(self.success)) - for iter132 in self.success: - iter132.write(oprot) + for iter139 in self.success: + iter139.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() if self.o1 != None: @@ -3988,10 +4287,10 @@ class get_tables_result: if fid == 0: if ftype == TType.LIST: self.success = [] - (_etype136, _size133) = iprot.readListBegin() - for _i137 in xrange(_size133): - _elem138 = iprot.readString(); - self.success.append(_elem138) + (_etype143, _size140) = iprot.readListBegin() + for _i144 in xrange(_size140): + _elem145 = iprot.readString(); + self.success.append(_elem145) iprot.readListEnd() else: iprot.skip(ftype) @@ -4014,8 +4313,140 @@ class get_tables_result: if self.success != None: oprot.writeFieldBegin('success', TType.LIST, 0) oprot.writeListBegin(TType.STRING, len(self.success)) - for iter139 in self.success: - oprot.writeString(iter139) + for iter146 in self.success: + oprot.writeString(iter146) + oprot.writeListEnd() + oprot.writeFieldEnd() + if self.o1 != None: + oprot.writeFieldBegin('o1', TType.STRUCT, 1) + self.o1.write(oprot) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def __repr__(self): + L = ['%s=%r' % (key, value) + for key, value in self.__dict__.iteritems()] + return '%s(%s)' % (self.__class__.__name__, ', '.join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + +class get_all_tables_args: + """ + Attributes: + - db_name + """ + + thrift_spec = ( + None, # 0 + (1, TType.STRING, 'db_name', None, None, ), # 1 + ) + + def __init__(self, db_name=None,): + self.db_name = db_name + + def read(self, iprot): + if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: + fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec)) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 1: + if ftype == TType.STRING: + self.db_name = iprot.readString(); + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None: + oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))) + return + oprot.writeStructBegin('get_all_tables_args') + if self.db_name != None: + oprot.writeFieldBegin('db_name', TType.STRING, 1) + oprot.writeString(self.db_name) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def __repr__(self): + L = ['%s=%r' % (key, value) + for key, value in self.__dict__.iteritems()] + return '%s(%s)' % (self.__class__.__name__, ', '.join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + +class get_all_tables_result: + """ + Attributes: + - success + - o1 + """ + + thrift_spec = ( + (0, TType.LIST, 'success', (TType.STRING,None), None, ), # 0 + (1, TType.STRUCT, 'o1', (MetaException, MetaException.thrift_spec), None, ), # 1 + ) + + def __init__(self, success=None, o1=None,): + self.success = success + self.o1 = o1 + + def read(self, iprot): + if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: + fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec)) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 0: + if ftype == TType.LIST: + self.success = [] + (_etype150, _size147) = iprot.readListBegin() + for _i151 in xrange(_size147): + _elem152 = iprot.readString(); + self.success.append(_elem152) + iprot.readListEnd() + else: + iprot.skip(ftype) + elif fid == 1: + if ftype == TType.STRUCT: + self.o1 = MetaException() + self.o1.read(iprot) + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None: + oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))) + return + oprot.writeStructBegin('get_all_tables_result') + if self.success != None: + oprot.writeFieldBegin('success', TType.LIST, 0) + oprot.writeListBegin(TType.STRING, len(self.success)) + for iter153 in self.success: + oprot.writeString(iter153) oprot.writeListEnd() oprot.writeFieldEnd() if self.o1 != None: @@ -4531,10 +4962,10 @@ class append_partition_args: elif fid == 3: if ftype == TType.LIST: self.part_vals = [] - (_etype143, _size140) = iprot.readListBegin() - for _i144 in xrange(_size140): - _elem145 = iprot.readString(); - self.part_vals.append(_elem145) + (_etype157, _size154) = iprot.readListBegin() + for _i158 in xrange(_size154): + _elem159 = iprot.readString(); + self.part_vals.append(_elem159) iprot.readListEnd() else: iprot.skip(ftype) @@ -4559,8 +4990,8 @@ class append_partition_args: if self.part_vals != None: oprot.writeFieldBegin('part_vals', TType.LIST, 3) oprot.writeListBegin(TType.STRING, len(self.part_vals)) - for iter146 in self.part_vals: - oprot.writeString(iter146) + for iter160 in self.part_vals: + oprot.writeString(iter160) oprot.writeListEnd() oprot.writeFieldEnd() oprot.writeFieldStop() @@ -4892,10 +5323,10 @@ class drop_partition_args: elif fid == 3: if ftype == TType.LIST: self.part_vals = [] - (_etype150, _size147) = iprot.readListBegin() - for _i151 in xrange(_size147): - _elem152 = iprot.readString(); - self.part_vals.append(_elem152) + (_etype164, _size161) = iprot.readListBegin() + for _i165 in xrange(_size161): + _elem166 = iprot.readString(); + self.part_vals.append(_elem166) iprot.readListEnd() else: iprot.skip(ftype) @@ -4925,8 +5356,8 @@ class drop_partition_args: if self.part_vals != None: oprot.writeFieldBegin('part_vals', TType.LIST, 3) oprot.writeListBegin(TType.STRING, len(self.part_vals)) - for iter153 in self.part_vals: - oprot.writeString(iter153) + for iter167 in self.part_vals: + oprot.writeString(iter167) oprot.writeListEnd() oprot.writeFieldEnd() if self.deleteData != None: @@ -5243,10 +5674,10 @@ class get_partition_args: elif fid == 3: if ftype == TType.LIST: self.part_vals = [] - (_etype157, _size154) = iprot.readListBegin() - for _i158 in xrange(_size154): - _elem159 = iprot.readString(); - self.part_vals.append(_elem159) + (_etype171, _size168) = iprot.readListBegin() + for _i172 in xrange(_size168): + _elem173 = iprot.readString(); + self.part_vals.append(_elem173) iprot.readListEnd() else: iprot.skip(ftype) @@ -5271,8 +5702,8 @@ class get_partition_args: if self.part_vals != None: oprot.writeFieldBegin('part_vals', TType.LIST, 3) oprot.writeListBegin(TType.STRING, len(self.part_vals)) - for iter160 in self.part_vals: - oprot.writeString(iter160) + for iter174 in self.part_vals: + oprot.writeString(iter174) oprot.writeListEnd() oprot.writeFieldEnd() oprot.writeFieldStop() @@ -5644,11 +6075,11 @@ class get_partitions_result: if fid == 0: if ftype == TType.LIST: self.success = [] - (_etype164, _size161) = iprot.readListBegin() - for _i165 in xrange(_size161): - _elem166 = Partition() - _elem166.read(iprot) - self.success.append(_elem166) + (_etype178, _size175) = iprot.readListBegin() + for _i179 in xrange(_size175): + _elem180 = Partition() + _elem180.read(iprot) + self.success.append(_elem180) iprot.readListEnd() else: iprot.skip(ftype) @@ -5677,8 +6108,8 @@ class get_partitions_result: if self.success != None: oprot.writeFieldBegin('success', TType.LIST, 0) oprot.writeListBegin(TType.STRUCT, len(self.success)) - for iter167 in self.success: - iter167.write(oprot) + for iter181 in self.success: + iter181.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() if self.o1 != None: @@ -5811,10 +6242,10 @@ class get_partition_names_result: if fid == 0: if ftype == TType.LIST: self.success = [] - (_etype171, _size168) = iprot.readListBegin() - for _i172 in xrange(_size168): - _elem173 = iprot.readString(); - self.success.append(_elem173) + (_etype185, _size182) = iprot.readListBegin() + for _i186 in xrange(_size182): + _elem187 = iprot.readString(); + self.success.append(_elem187) iprot.readListEnd() else: iprot.skip(ftype) @@ -5837,8 +6268,8 @@ class get_partition_names_result: if self.success != None: oprot.writeFieldBegin('success', TType.LIST, 0) oprot.writeListBegin(TType.STRING, len(self.success)) - for iter174 in self.success: - oprot.writeString(iter174) + for iter188 in self.success: + oprot.writeString(iter188) oprot.writeListEnd() oprot.writeFieldEnd() if self.o2 != None: @@ -5904,10 +6335,10 @@ class get_partitions_ps_args: elif fid == 3: if ftype == TType.LIST: self.part_vals = [] - (_etype178, _size175) = iprot.readListBegin() - for _i179 in xrange(_size175): - _elem180 = iprot.readString(); - self.part_vals.append(_elem180) + (_etype192, _size189) = iprot.readListBegin() + for _i193 in xrange(_size189): + _elem194 = iprot.readString(); + self.part_vals.append(_elem194) iprot.readListEnd() else: iprot.skip(ftype) @@ -5937,8 +6368,8 @@ class get_partitions_ps_args: if self.part_vals != None: oprot.writeFieldBegin('part_vals', TType.LIST, 3) oprot.writeListBegin(TType.STRING, len(self.part_vals)) - for iter181 in self.part_vals: - oprot.writeString(iter181) + for iter195 in self.part_vals: + oprot.writeString(iter195) oprot.writeListEnd() oprot.writeFieldEnd() if self.max_parts != None: @@ -5987,11 +6418,11 @@ class get_partitions_ps_result: if fid == 0: if ftype == TType.LIST: self.success = [] - (_etype185, _size182) = iprot.readListBegin() - for _i186 in xrange(_size182): - _elem187 = Partition() - _elem187.read(iprot) - self.success.append(_elem187) + (_etype199, _size196) = iprot.readListBegin() + for _i200 in xrange(_size196): + _elem201 = Partition() + _elem201.read(iprot) + self.success.append(_elem201) iprot.readListEnd() else: iprot.skip(ftype) @@ -6014,8 +6445,8 @@ class get_partitions_ps_result: if self.success != None: oprot.writeFieldBegin('success', TType.LIST, 0) oprot.writeListBegin(TType.STRUCT, len(self.success)) - for iter188 in self.success: - iter188.write(oprot) + for iter202 in self.success: + iter202.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() if self.o1 != None: @@ -6081,10 +6512,10 @@ class get_partition_names_ps_args: elif fid == 3: if ftype == TType.LIST: self.part_vals = [] - (_etype192, _size189) = iprot.readListBegin() - for _i193 in xrange(_size189): - _elem194 = iprot.readString(); - self.part_vals.append(_elem194) + (_etype206, _size203) = iprot.readListBegin() + for _i207 in xrange(_size203): + _elem208 = iprot.readString(); + self.part_vals.append(_elem208) iprot.readListEnd() else: iprot.skip(ftype) @@ -6114,8 +6545,8 @@ class get_partition_names_ps_args: if self.part_vals != None: oprot.writeFieldBegin('part_vals', TType.LIST, 3) oprot.writeListBegin(TType.STRING, len(self.part_vals)) - for iter195 in self.part_vals: - oprot.writeString(iter195) + for iter209 in self.part_vals: + oprot.writeString(iter209) oprot.writeListEnd() oprot.writeFieldEnd() if self.max_parts != None: @@ -6164,10 +6595,10 @@ class get_partition_names_ps_result: if fid == 0: if ftype == TType.LIST: self.success = [] - (_etype199, _size196) = iprot.readListBegin() - for _i200 in xrange(_size196): - _elem201 = iprot.readString(); - self.success.append(_elem201) + (_etype213, _size210) = iprot.readListBegin() + for _i214 in xrange(_size210): + _elem215 = iprot.readString(); + self.success.append(_elem215) iprot.readListEnd() else: iprot.skip(ftype) @@ -6190,8 +6621,8 @@ class get_partition_names_ps_result: if self.success != None: oprot.writeFieldBegin('success', TType.LIST, 0) oprot.writeListBegin(TType.STRING, len(self.success)) - for iter202 in self.success: - oprot.writeString(iter202) + for iter216 in self.success: + oprot.writeString(iter216) oprot.writeListEnd() oprot.writeFieldEnd() if self.o1 != None: @@ -6583,10 +7014,10 @@ class partition_name_to_vals_result: if fid == 0: if ftype == TType.LIST: self.success = [] - (_etype206, _size203) = iprot.readListBegin() - for _i207 in xrange(_size203): - _elem208 = iprot.readString(); - self.success.append(_elem208) + (_etype220, _size217) = iprot.readListBegin() + for _i221 in xrange(_size217): + _elem222 = iprot.readString(); + self.success.append(_elem222) iprot.readListEnd() else: iprot.skip(ftype) @@ -6609,8 +7040,8 @@ class partition_name_to_vals_result: if self.success != None: oprot.writeFieldBegin('success', TType.LIST, 0) oprot.writeListBegin(TType.STRING, len(self.success)) - for iter209 in self.success: - oprot.writeString(iter209) + for iter223 in self.success: + oprot.writeString(iter223) oprot.writeListEnd() oprot.writeFieldEnd() if self.o1 != None: @@ -6715,11 +7146,11 @@ class partition_name_to_spec_result: if fid == 0: if ftype == TType.MAP: self.success = {} - (_ktype211, _vtype212, _size210 ) = iprot.readMapBegin() - for _i214 in xrange(_size210): - _key215 = iprot.readString(); - _val216 = iprot.readString(); - self.success[_key215] = _val216 + (_ktype225, _vtype226, _size224 ) = iprot.readMapBegin() + for _i228 in xrange(_size224): + _key229 = iprot.readString(); + _val230 = iprot.readString(); + self.success[_key229] = _val230 iprot.readMapEnd() else: iprot.skip(ftype) @@ -6742,9 +7173,9 @@ class partition_name_to_spec_result: if self.success != None: oprot.writeFieldBegin('success', TType.MAP, 0) oprot.writeMapBegin(TType.STRING, TType.STRING, len(self.success)) - for kiter217,viter218 in self.success.items(): - oprot.writeString(kiter217) - oprot.writeString(viter218) + for kiter231,viter232 in self.success.items(): + oprot.writeString(kiter231) + oprot.writeString(viter232) oprot.writeMapEnd() oprot.writeFieldEnd() if self.o1 != None: @@ -7376,11 +7807,11 @@ class get_indexes_result: if fid == 0: if ftype == TType.LIST: self.success = [] - (_etype222, _size219) = iprot.readListBegin() - for _i223 in xrange(_size219): - _elem224 = Index() - _elem224.read(iprot) - self.success.append(_elem224) + (_etype236, _size233) = iprot.readListBegin() + for _i237 in xrange(_size233): + _elem238 = Index() + _elem238.read(iprot) + self.success.append(_elem238) iprot.readListEnd() else: iprot.skip(ftype) @@ -7409,8 +7840,8 @@ class get_indexes_result: if self.success != None: oprot.writeFieldBegin('success', TType.LIST, 0) oprot.writeListBegin(TType.STRUCT, len(self.success)) - for iter225 in self.success: - iter225.write(oprot) + for iter239 in self.success: + iter239.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() if self.o1 != None: @@ -7543,10 +7974,10 @@ class get_index_names_result: if fid == 0: if ftype == TType.LIST: self.success = [] - (_etype229, _size226) = iprot.readListBegin() - for _i230 in xrange(_size226): - _elem231 = iprot.readString(); - self.success.append(_elem231) + (_etype243, _size240) = iprot.readListBegin() + for _i244 in xrange(_size240): + _elem245 = iprot.readString(); + self.success.append(_elem245) iprot.readListEnd() else: iprot.skip(ftype) @@ -7569,8 +8000,8 @@ class get_index_names_result: if self.success != None: oprot.writeFieldBegin('success', TType.LIST, 0) oprot.writeListBegin(TType.STRING, len(self.success)) - for iter232 in self.success: - oprot.writeString(iter232) + for iter246 in self.success: + oprot.writeString(iter246) oprot.writeListEnd() oprot.writeFieldEnd() if self.o2 != None: diff --git metastore/src/gen-py/hive_metastore/ttypes.py metastore/src/gen-py/hive_metastore/ttypes.py index ea7269e..9e2479b 100644 --- metastore/src/gen-py/hive_metastore/ttypes.py +++ metastore/src/gen-py/hive_metastore/ttypes.py @@ -270,17 +270,20 @@ class Database: Attributes: - name - description + - locationUri """ thrift_spec = ( None, # 0 (1, TType.STRING, 'name', None, None, ), # 1 (2, TType.STRING, 'description', None, None, ), # 2 + (3, TType.STRING, 'locationUri', None, None, ), # 3 ) - def __init__(self, name=None, description=None,): + def __init__(self, name=None, description=None, locationUri=None,): self.name = name self.description = description + self.locationUri = locationUri def read(self, iprot): if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: @@ -301,6 +304,11 @@ class Database: self.description = iprot.readString(); else: iprot.skip(ftype) + elif fid == 3: + if ftype == TType.STRING: + self.locationUri = iprot.readString(); + else: + iprot.skip(ftype) else: iprot.skip(ftype) iprot.readFieldEnd() @@ -319,6 +327,10 @@ class Database: oprot.writeFieldBegin('description', TType.STRING, 2) oprot.writeString(self.description) oprot.writeFieldEnd() + if self.locationUri != None: + oprot.writeFieldBegin('locationUri', TType.STRING, 3) + oprot.writeString(self.locationUri) + oprot.writeFieldEnd() oprot.writeFieldStop() oprot.writeStructEnd() diff --git metastore/src/java/org/apache/hadoop/hive/metastore/HiveAlterHandler.java metastore/src/java/org/apache/hadoop/hive/metastore/HiveAlterHandler.java index 39dbd52..fa6b5ae 100644 --- metastore/src/java/org/apache/hadoop/hive/metastore/HiveAlterHandler.java +++ metastore/src/java/org/apache/hadoop/hive/metastore/HiveAlterHandler.java @@ -114,8 +114,7 @@ public class HiveAlterHandler implements AlterHandler { // that means user is asking metastore to move data to new location // corresponding to the new name // get new location - newTblLoc = wh.getDefaultTablePath(newt.getDbName(), - newt.getTableName()).toString(); + newTblLoc = wh.getDefaultTablePath(newt.getDbName(), newt.getTableName()).toString(); newt.getSd().setLocation(newTblLoc); oldTblLoc = oldt.getSd().getLocation(); moveData = true; diff --git metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStore.java metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStore.java index 4fb296a..a8df500 100644 --- metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStore.java +++ metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStore.java @@ -18,6 +18,11 @@ package org.apache.hadoop.hive.metastore; +import static org.apache.commons.lang.StringUtils.join; +import static org.apache.hadoop.hive.metastore.MetaStoreUtils.DEFAULT_DATABASE_COMMENT; +import static org.apache.hadoop.hive.metastore.MetaStoreUtils.DEFAULT_DATABASE_NAME; +import static org.apache.hadoop.hive.metastore.MetaStoreUtils.validateName; + import java.util.ArrayList; import java.util.HashMap; import java.util.LinkedHashMap; @@ -349,14 +354,13 @@ public class HiveMetaStore extends ThriftHiveMetastore { return; } - private void createDefaultDB_core(RawStore ms) throws MetaException { + private void createDefaultDB_core(RawStore ms) throws MetaException, InvalidObjectException { try { - ms.getDatabase(MetaStoreUtils.DEFAULT_DATABASE_NAME); + ms.getDatabase(DEFAULT_DATABASE_NAME); } catch (NoSuchObjectException e) { ms.createDatabase( - new Database(MetaStoreUtils.DEFAULT_DATABASE_NAME, wh - .getDefaultDatabasePath(MetaStoreUtils.DEFAULT_DATABASE_NAME) - .toString())); + new Database(DEFAULT_DATABASE_NAME, DEFAULT_DATABASE_COMMENT, + wh.getDefaultDatabasePath(DEFAULT_DATABASE_NAME).toString())); } HMSHandler.createDefaultDB = true; } @@ -378,6 +382,8 @@ public class HiveMetaStore extends ThriftHiveMetastore { return Boolean.TRUE; } }); + } catch (InvalidObjectException e) { + throw new MetaException(e.getMessage()); } catch (MetaException e) { throw e; } catch (Exception e) { @@ -400,9 +406,13 @@ public class HiveMetaStore extends ThriftHiveMetastore { LOG.info(threadLocalId.get().toString() + ": " + m); } - private void logStartFunction(String f, String db, String tbl) { - LOG.info(threadLocalId.get().toString() + ": " + f + " : db=" + db - + " tbl=" + tbl); + private void logStartTableFunction(String f, String db, String tbl) { + LOG.info(threadLocalId.get().toString() + ": " + f + " : db=" + db + " tbl=" + tbl); + } + + private void logStartPartitionFunction(String f, String db, String tbl, List partVals) { + LOG.info(threadLocalId.get().toString() + ": " + f + " : db=" + db + " tbl=" + tbl + + "[" + join(partVals, ",") + "]"); } @Override @@ -420,48 +430,60 @@ public class HiveMetaStore extends ThriftHiveMetastore { System.exit(0); } - private boolean create_database_core(RawStore ms, final String name, - final String location_uri) throws AlreadyExistsException, MetaException { + private void create_database_core(RawStore ms, final Database db) + throws AlreadyExistsException, InvalidObjectException, MetaException { + if (!validateName(db.getName())) { + throw new InvalidObjectException(db.getName() + " is not a valid database name"); + } boolean success = false; try { ms.openTransaction(); - Database db = new Database(name, location_uri); - if (ms.createDatabase(db) - && wh.mkdirs(wh.getDefaultDatabasePath(name))) { - success = ms.commitTransaction(); + if (null == db.getLocationUri()) { + db.setLocationUri(wh.getDefaultDatabasePath(db.getName()).toString()); } + ms.createDatabase(db); + success = ms.commitTransaction(); } finally { if (!success) { ms.rollbackTransaction(); + } else { + wh.mkdirs(new Path(db.getLocationUri())); } } - return success; } - public boolean create_database(final String name, final String location_uri) - throws AlreadyExistsException, MetaException { + public void create_database(final Database db) + throws AlreadyExistsException, InvalidObjectException, MetaException { incrementCounter("create_database"); - logStartFunction("create_database: " + name); - - Boolean ret = null; + logStartFunction("create_database: " + + db.getName() + " " + + db.getLocationUri() + " " + + db.getDescription()); try { - ret = executeWithRetry(new Command() { + try { + if(null != get_database(db.getName())) { + throw new AlreadyExistsException("Database " + db.getName() + " already exists"); + } + } catch (NoSuchObjectException e) { + // expected + } + executeWithRetry(new Command() { @Override Boolean run(RawStore ms) throws Exception { - boolean success = create_database_core(ms, name, location_uri); - return Boolean.valueOf(success); + create_database_core(ms, db); + return Boolean.TRUE; } }); } catch (AlreadyExistsException e) { throw e; + } catch (InvalidObjectException e) { + throw e; } catch (MetaException e) { throw e; } catch (Exception e) { assert(e instanceof RuntimeException); throw (RuntimeException)e; } - - return ret.booleanValue(); } public Database get_database(final String name) throws NoSuchObjectException, @@ -488,59 +510,68 @@ public class HiveMetaStore extends ThriftHiveMetastore { return db; } - private boolean drop_database_core(RawStore ms, final String name) throws MetaException { + private void drop_database_core(RawStore ms, + final String name, final boolean deleteData) + throws NoSuchObjectException, InvalidOperationException, MetaException { boolean success = false; + Database db = null; try { ms.openTransaction(); + db = ms.getDatabase(name); + if (!get_all_tables(db.getName()).isEmpty()) { + throw new InvalidOperationException("Database " + db.getName() + " is not empty"); + } if (ms.dropDatabase(name)) { success = ms.commitTransaction(); } } finally { if (!success) { ms.rollbackTransaction(); - } else { - wh.deleteDir(wh.getDefaultDatabasePath(name), true); + } else if (deleteData) { + wh.deleteDir(new Path(db.getLocationUri()), true); // it is not a terrible thing even if the data is not deleted } } - return success; } - public boolean drop_database(final String name) throws MetaException { + public void drop_database(final String dbName, final boolean deleteData) + throws NoSuchObjectException, InvalidOperationException, MetaException { incrementCounter("drop_database"); - logStartFunction("drop_database: " + name); - if (name.equalsIgnoreCase(MetaStoreUtils.DEFAULT_DATABASE_NAME)) { - throw new MetaException("Can't drop default database"); + logStartFunction("drop_database: " + dbName); + if (DEFAULT_DATABASE_NAME.equalsIgnoreCase(dbName)) { + throw new MetaException("Can not drop default database"); } - Boolean ret = null; try { - ret = executeWithRetry(new Command() { + executeWithRetry(new Command() { @Override Boolean run(RawStore ms) throws Exception { - boolean success = drop_database_core(ms, name); - return Boolean.valueOf(success); + drop_database_core(ms, dbName, deleteData); + return Boolean.TRUE; } }); + } catch (NoSuchObjectException e) { + throw e; + } catch (InvalidOperationException e) { + throw e; } catch (MetaException e) { throw e; } catch (Exception e) { assert(e instanceof RuntimeException); throw (RuntimeException)e; } - return ret.booleanValue(); } - public List get_databases() throws MetaException { + public List get_databases(final String pattern) throws MetaException { incrementCounter("get_databases"); - logStartFunction("get_databases"); + logStartFunction("get_databases: " + pattern); List ret = null; try { ret = executeWithRetry(new Command>() { @Override List run(RawStore ms) throws Exception { - return ms.getDatabases(); + return ms.getDatabases(pattern); } }); } catch (MetaException e) { @@ -552,23 +583,59 @@ public class HiveMetaStore extends ThriftHiveMetastore { return ret; } + public List get_all_databases() throws MetaException { + incrementCounter("get_all_databases"); + logStartFunction("get_all_databases"); + + List ret = null; + try { + ret = executeWithRetry(new Command>() { + @Override + List run(RawStore ms) throws Exception { + return ms.getAllDatabases(); + } + }); + } catch (MetaException e) { + throw e; + } catch (Exception e) { + assert(e instanceof RuntimeException); + throw (RuntimeException)e; + } + return ret; + } + + private void create_type_core(final RawStore ms, final Type type) + throws AlreadyExistsException, MetaException, InvalidObjectException { + if (!MetaStoreUtils.validateName(type.getName())) { + throw new InvalidObjectException("Invalid type name"); + } + + boolean success = false; + try { + ms.openTransaction(); + if (is_type_exists(type.getName())) { + throw new AlreadyExistsException("Type " + type.getName() + " already exists"); + } + ms.createType(type); + success = ms.commitTransaction(); + } finally { + if (!success) { + ms.rollbackTransaction(); + } + } + } + public boolean create_type(final Type type) throws AlreadyExistsException, MetaException, InvalidObjectException { incrementCounter("create_type"); logStartFunction("create_type: " + type.getName()); - // check whether type already exists - if (get_type(type.getName()) != null) { - throw new AlreadyExistsException("Type " + type.getName() - + " already exists"); - } - Boolean ret = null; try { ret = executeWithRetry(new Command() { @Override Boolean run(RawStore ms) throws Exception { - // TODO:pc Validation of types should be done by clients or here???? - return Boolean.valueOf(ms.createType(type)); + create_type_core(ms, type); + return Boolean.TRUE; } }); } catch (AlreadyExistsException e) { @@ -585,7 +652,7 @@ public class HiveMetaStore extends ThriftHiveMetastore { return ret.booleanValue(); } - public Type get_type(final String name) throws MetaException { + public Type get_type(final String name) throws MetaException, NoSuchObjectException { incrementCounter("get_type"); logStartFunction("get_type: " + name); @@ -594,9 +661,15 @@ public class HiveMetaStore extends ThriftHiveMetastore { ret = executeWithRetry(new Command() { @Override Type run(RawStore ms) throws Exception { - return ms.getType(name); + Type type = ms.getType(name); + if (null == type) { + throw new NoSuchObjectException("Type \"" + name + "\" not found."); + } + return type; } }); + } catch (NoSuchObjectException e) { + throw e; } catch (MetaException e) { throw e; } catch (Exception e) { @@ -606,6 +679,37 @@ public class HiveMetaStore extends ThriftHiveMetastore { return ret; } + public boolean is_type_exists(String typeName) throws MetaException { + incrementCounter("is_type_exists"); + logStartFunction("is_type_exists: " + typeName); + try { + return (get_type(typeName) != null); + } catch (NoSuchObjectException e) { + return false; + } + } + + private void drop_type_core(final RawStore ms, String typeName) + throws NoSuchObjectException, MetaException { + boolean success = false; + try { + ms.openTransaction(); + // drop any partitions + if (!is_type_exists(typeName)) { + throw new NoSuchObjectException(typeName + " doesn't exist"); + } + if (!ms.dropType(typeName)) { + throw new MetaException("Unable to drop type " + typeName); + } + success = ms.commitTransaction(); + } finally { + if (!success) { + ms.rollbackTransaction(); + } + } + } + + public boolean drop_type(final String name) throws MetaException { incrementCounter("drop_type"); logStartFunction("drop_type: " + name); @@ -631,7 +735,7 @@ public class HiveMetaStore extends ThriftHiveMetastore { public Map get_type_all(String name) throws MetaException { incrementCounter("get_type_all"); // TODO Auto-generated method stub - logStartFunction("get_type_all"); + logStartFunction("get_type_all: " + name); throw new MetaException("Not yet implemented"); } @@ -650,13 +754,13 @@ public class HiveMetaStore extends ThriftHiveMetastore { boolean success = false, madeDir = false; try { ms.openTransaction(); - + // get_table checks whether database exists, it should be moved here if (is_table_exists(tbl.getDbName(), tbl.getTableName())) { throw new AlreadyExistsException("Table " + tbl.getTableName() + " already exists"); } - + if (!TableType.VIRTUAL_VIEW.toString().equals(tbl.getTableType())) { if (tbl.getSd().getLocation() == null || tbl.getSd().getLocation().isEmpty()) { @@ -727,6 +831,8 @@ public class HiveMetaStore extends ThriftHiveMetastore { public boolean is_table_exists(String dbname, String name) throws MetaException { + incrementCounter("is_table_exists"); + logStartTableFunction("is_table_exists", dbname, name); try { return (get_table(dbname, name) != null); } catch (NoSuchObjectException e) { @@ -754,7 +860,7 @@ public class HiveMetaStore extends ThriftHiveMetastore { if (tbl.getSd() == null) { throw new MetaException("Table metadata is corrupted"); } - + isIndexTable = isIndexTable(tbl); if (isIndexTable) { throw new RuntimeException( @@ -778,7 +884,7 @@ public class HiveMetaStore extends ThriftHiveMetastore { if (tbl.getSd().getLocation() != null) { tblPath = new Path(tbl.getSd().getLocation()); } - + if (!ms.dropTable(dbname, name)) { throw new MetaException("Unable to drop table"); } @@ -797,7 +903,7 @@ public class HiveMetaStore extends ThriftHiveMetastore { public void drop_table(final String dbname, final String name, final boolean deleteData) throws NoSuchObjectException, MetaException { incrementCounter("drop_table"); - logStartFunction("drop_table", dbname, name); + logStartTableFunction("drop_table", dbname, name); try { executeWithRetry(new Command() { @@ -828,7 +934,7 @@ public class HiveMetaStore extends ThriftHiveMetastore { private boolean isExternal(Table table) { return MetaStoreUtils.isExternalTable(table); } - + private boolean isIndexTable (Table table) { return MetaStoreUtils.isIndexTable(table); } @@ -837,7 +943,7 @@ public class HiveMetaStore extends ThriftHiveMetastore { NoSuchObjectException { Table t = null; incrementCounter("get_table"); - logStartFunction("get_table", dbname, name); + logStartTableFunction("get_table", dbname, name); try { t = executeWithRetry(new Command() { @Override @@ -864,7 +970,7 @@ public class HiveMetaStore extends ThriftHiveMetastore { public boolean set_table_parameters(String dbname, String name, Map params) throws NoSuchObjectException, MetaException { incrementCounter("set_table_parameters"); - logStartFunction("set_table_parameters", dbname, name); + logStartTableFunction("set_table_parameters", dbname, name); // TODO Auto-generated method stub return false; } @@ -938,7 +1044,7 @@ public class HiveMetaStore extends ThriftHiveMetastore { final List part_vals) throws InvalidObjectException, AlreadyExistsException, MetaException { incrementCounter("append_partition"); - logStartFunction("append_partition", dbName, tableName); + logStartPartitionFunction("append_partition", dbName, tableName, part_vals); if (LOG.isDebugEnabled()) { for (String part : part_vals) { LOG.debug(part); @@ -970,7 +1076,7 @@ public class HiveMetaStore extends ThriftHiveMetastore { throws MetaException, InvalidObjectException, AlreadyExistsException { String db = parts.get(0).getDbName(); String tbl = parts.get(0).getTableName(); - logStartFunction("add_partitions", db, tbl); + logStartTableFunction("add_partitions", db, tbl); boolean success = false; try { ms.openTransaction(); @@ -1083,7 +1189,7 @@ public class HiveMetaStore extends ThriftHiveMetastore { public Partition add_partition(final Partition part) throws InvalidObjectException, AlreadyExistsException, MetaException { incrementCounter("add_partition"); - logStartFunction("add_partition", part.getDbName(), part.getTableName()); + logStartTableFunction("add_partition", part.getDbName(), part.getTableName()); Partition ret = null; try { @@ -1164,7 +1270,7 @@ public class HiveMetaStore extends ThriftHiveMetastore { final List part_vals, final boolean deleteData) throws NoSuchObjectException, MetaException, TException { incrementCounter("drop_partition"); - logStartFunction("drop_partition", db_name, tbl_name); + logStartPartitionFunction("drop_partition", db_name, tbl_name, part_vals); LOG.info("Partition values:" + part_vals); Boolean ret = null; @@ -1193,7 +1299,7 @@ public class HiveMetaStore extends ThriftHiveMetastore { public Partition get_partition(final String db_name, final String tbl_name, final List part_vals) throws MetaException, NoSuchObjectException { incrementCounter("get_partition"); - logStartFunction("get_partition", db_name, tbl_name); + logStartPartitionFunction("get_partition", db_name, tbl_name, part_vals); Partition ret = null; try { @@ -1217,7 +1323,7 @@ public class HiveMetaStore extends ThriftHiveMetastore { public List get_partitions(final String db_name, final String tbl_name, final short max_parts) throws NoSuchObjectException, MetaException { incrementCounter("get_partitions"); - logStartFunction("get_partitions", db_name, tbl_name); + logStartTableFunction("get_partitions", db_name, tbl_name); List ret = null; try { @@ -1242,7 +1348,7 @@ public class HiveMetaStore extends ThriftHiveMetastore { public List get_partition_names(final String db_name, final String tbl_name, final short max_parts) throws MetaException { incrementCounter("get_partition_names"); - logStartFunction("get_partition_names", db_name, tbl_name); + logStartTableFunction("get_partition_names", db_name, tbl_name); List ret = null; try { @@ -1277,7 +1383,7 @@ public class HiveMetaStore extends ThriftHiveMetastore { final Partition new_part) throws InvalidOperationException, MetaException, TException { incrementCounter("alter_partition"); - logStartFunction("alter_partition", db_name, tbl_name); + logStartTableFunction("alter_partition", db_name, tbl_name); LOG.info("Partition values:" + new_part.getValues()); try { @@ -1338,7 +1444,6 @@ public class HiveMetaStore extends ThriftHiveMetastore { assert(e instanceof RuntimeException); throw (RuntimeException)e; } - } public List get_tables(final String dbname, final String pattern) @@ -1361,7 +1466,27 @@ public class HiveMetaStore extends ThriftHiveMetastore { throw (RuntimeException)e; } return ret; + } + + public List get_all_tables(final String dbname) throws MetaException { + incrementCounter("get_all_tables"); + logStartFunction("get_all_tables: db=" + dbname); + List ret; + try { + ret = executeWithRetry(new Command>() { + @Override + List run(RawStore ms) throws Exception { + return ms.getAllTables(dbname); + } + }); + } catch (MetaException e) { + throw e; + } catch (Exception e) { + assert(e instanceof RuntimeException); + throw (RuntimeException)e; + } + return ret; } public List get_fields(String db, String tableName) @@ -1622,7 +1747,7 @@ public class HiveMetaStore extends ThriftHiveMetastore { List part_vals, short max_parts) throws MetaException, TException { incrementCounter("get_partitions_ps"); - logStartFunction("get_partitions_ps", db_name, tbl_name); + logStartPartitionFunction("get_partitions_ps", db_name, tbl_name, part_vals); List parts = null; List matchingParts = new ArrayList(); @@ -1650,7 +1775,7 @@ public class HiveMetaStore extends ThriftHiveMetastore { public List get_partition_names_ps(String db_name, String tbl_name, List part_vals, short max_parts) throws MetaException, TException { incrementCounter("get_partition_names_ps"); - logStartFunction("get_partitions_names_ps", db_name, tbl_name); + logStartPartitionFunction("get_partitions_names_ps", db_name, tbl_name, part_vals); Table t; try { t = get_table(db_name, tbl_name); @@ -1724,12 +1849,12 @@ public class HiveMetaStore extends ThriftHiveMetastore { } return ret; } - + private Index add_index_core(final RawStore ms, final Index index, final Table indexTable) throws InvalidObjectException, AlreadyExistsException, MetaException { - + boolean success = false, indexTableCreated = false; - + try { ms.openTransaction(); Index old_index = null; @@ -1746,13 +1871,13 @@ public class HiveMetaStore extends ThriftHiveMetastore { throw new InvalidObjectException( "Unable to add index because database or the orginal table do not exist"); } - + // set create time long time = System.currentTimeMillis() / 1000; Table indexTbl = indexTable; if (indexTbl != null) { try { - indexTbl = ms.getTable(index.getDbName(), index.getIndexTableName()); + indexTbl = ms.getTable(index.getDbName(), index.getIndexTableName()); } catch (Exception e) { } if (indexTbl != null) { @@ -1812,7 +1937,7 @@ public class HiveMetaStore extends ThriftHiveMetastore { return ret.booleanValue(); } - + private boolean drop_index_by_name_core(final RawStore ms, final String dbName, final String tblName, final String indexName, final boolean deleteData) throws NoSuchObjectException, @@ -1822,14 +1947,14 @@ public class HiveMetaStore extends ThriftHiveMetastore { Path tblPath = null; try { ms.openTransaction(); - + //drop the underlying index table Index index = get_index_by_name(dbName, tblName, indexName); if (index == null) { throw new NoSuchObjectException(indexName + " doesn't exist"); } ms.dropIndex(dbName, tblName, indexName); - + String idxTblName = index.getIndexTableName(); if (idxTblName != null) { Table tbl = null; @@ -1837,7 +1962,7 @@ public class HiveMetaStore extends ThriftHiveMetastore { if (tbl.getSd() == null) { throw new MetaException("Table metadata is corrupted"); } - + if (tbl.getSd().getLocation() != null) { tblPath = new Path(tbl.getSd().getLocation()); } @@ -1889,7 +2014,7 @@ public class HiveMetaStore extends ThriftHiveMetastore { } return ret; } - + private Index get_index_by_name_core(final RawStore ms, final String db_name, final String tbl_name, final String index_name) throws MetaException, NoSuchObjectException, TException { @@ -1906,7 +2031,7 @@ public class HiveMetaStore extends ThriftHiveMetastore { public List get_index_names(final String dbName, final String tblName, final short maxIndexes) throws MetaException, TException { incrementCounter("get_index_names"); - logStartFunction("get_index_names", dbName, tblName); + logStartTableFunction("get_index_names", dbName, tblName); List ret = null; try { @@ -1929,8 +2054,8 @@ public class HiveMetaStore extends ThriftHiveMetastore { public List get_indexes(final String dbName, final String tblName, final short maxIndexes) throws NoSuchObjectException, MetaException, TException { - incrementCounter("get_indexs"); - logStartFunction("get_indexs", dbName, tblName); + incrementCounter("get_indexes"); + logStartTableFunction("get_indexes", dbName, tblName); List ret = null; try { diff --git metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStoreClient.java metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStoreClient.java index c6541af..7fd99d8 100644 --- metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStoreClient.java +++ metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStoreClient.java @@ -79,6 +79,7 @@ public class HiveMetaStoreClient implements IMetaStoreClient { conf = new HiveConf(HiveMetaStoreClient.class); } + boolean localMetaStore = conf.getBoolean("hive.metastore.local", false); if (localMetaStore) { // instantiate the metastore server handler directly instead of connecting @@ -204,19 +205,6 @@ public class HiveMetaStoreClient implements IMetaStoreClient { } } - public void dropTable(String tableName, boolean deleteData) - throws MetaException, NoSuchObjectException { - // assume that it is default database - try { - this.dropTable(MetaStoreUtils.DEFAULT_DATABASE_NAME, tableName, - deleteData, false); - } catch (NoSuchObjectException e) { - throw e; - } catch (Exception e) { - MetaStoreUtils.logAndThrowMetaException(e); - } - } - /** * @param new_part * @return the added partition @@ -256,19 +244,21 @@ public class HiveMetaStoreClient implements IMetaStoreClient { return deepCopy( client.append_partition_by_name(dbName, tableName, partName)); } + /** - * @param name - * @param location_uri + * Create a new Database + * @param db * @return true or false * @throws AlreadyExistsException + * @throws InvalidObjectException * @throws MetaException * @throws TException * @see org.apache.hadoop.hive.metastore.api.ThriftHiveMetastore.Iface#create_database(java.lang.String, * java.lang.String) */ - public boolean createDatabase(String name, String location_uri) - throws AlreadyExistsException, MetaException, TException { - return client.create_database(name, location_uri); + public void createDatabase(Database db) + throws AlreadyExistsException, InvalidObjectException, MetaException, TException { + client.create_database(db); } /** @@ -315,14 +305,32 @@ public class HiveMetaStoreClient implements IMetaStoreClient { /** * @param name * @return true or false + * @throws NoSuchObjectException + * @throws InvalidOperationException * @throws MetaException * @throws TException * @see org.apache.hadoop.hive.metastore.api.ThriftHiveMetastore.Iface#drop_database(java.lang.String) */ - public boolean dropDatabase(String name) throws MetaException, TException { - return client.drop_database(name); + public void dropDatabase(String name) + throws NoSuchObjectException, InvalidOperationException, MetaException, TException { + dropDatabase(name, true, false); } + + public void dropDatabase(String name, boolean deleteData, boolean ignoreUnknownDb) + throws NoSuchObjectException, InvalidOperationException, MetaException, TException { + try { + getDatabase(name); + } catch (NoSuchObjectException e) { + if (!ignoreUnknownDb) { + throw e; + } + return; + } + client.drop_database(name, deleteData); + } + + /** * @param tbl_name * @param db_name @@ -431,7 +439,7 @@ public class HiveMetaStoreClient implements IMetaStoreClient { * @throws TException * @see org.apache.hadoop.hive.metastore.api.ThriftHiveMetastore.Iface#drop_type(java.lang.String) */ - public boolean dropType(String type) throws MetaException, TException { + public boolean dropType(String type) throws NoSuchObjectException, MetaException, TException { return client.drop_type(type); } @@ -461,8 +469,29 @@ public class HiveMetaStoreClient implements IMetaStoreClient { * @throws TException * @see org.apache.hadoop.hive.metastore.api.ThriftHiveMetastore.Iface#get_databases() */ - public List getDatabases() throws MetaException, TException { - return client.get_databases(); + public List getDatabases(String databasePattern) + throws MetaException { + try { + return client.get_databases(databasePattern); + } catch (Exception e) { + MetaStoreUtils.logAndThrowMetaException(e); + } + return null; + } + + /** + * @return the list of databases + * @throws MetaException + * @throws TException + * @see org.apache.hadoop.hive.metastore.api.ThriftHiveMetastore.Iface#get_all_databases() + */ + public List getAllDatabases() throws MetaException { + try { + return client.get_all_databases(); + } catch (Exception e) { + MetaStoreUtils.logAndThrowMetaException(e); + } + return null; } /** @@ -537,14 +566,14 @@ public class HiveMetaStoreClient implements IMetaStoreClient { * @return the type * @throws MetaException * @throws TException + * @throws NoSuchObjectException * @see org.apache.hadoop.hive.metastore.api.ThriftHiveMetastore.Iface#get_type(java.lang.String) */ - public Type getType(String name) throws MetaException, TException { + public Type getType(String name) throws NoSuchObjectException, MetaException, TException { return deepCopy(client.get_type(name)); } - public List getTables(String dbname, String tablePattern) - throws MetaException { + public List getTables(String dbname, String tablePattern) throws MetaException { try { return client.get_tables(dbname, tablePattern); } catch (Exception e) { @@ -553,26 +582,25 @@ public class HiveMetaStoreClient implements IMetaStoreClient { return null; } - public List getTables(String tablePattern) throws MetaException { - String dbname = MetaStoreUtils.DEFAULT_DATABASE_NAME; - return this.getTables(dbname, tablePattern); + public List getAllTables(String dbname) throws MetaException { + try { + return client.get_all_tables(dbname); + } catch (Exception e) { + MetaStoreUtils.logAndThrowMetaException(e); + } + return null; } - public boolean tableExists(String tableName) throws MetaException, + public boolean tableExists(String databaseName, String tableName) throws MetaException, TException, UnknownDBException { try { - client.get_table(MetaStoreUtils.DEFAULT_DATABASE_NAME, tableName); + client.get_table(databaseName, tableName); } catch (NoSuchObjectException e) { return false; } return true; } - public Table getTable(String tableName) throws MetaException, TException, - NoSuchObjectException { - return getTable(MetaStoreUtils.DEFAULT_DATABASE_NAME, tableName); - } - public List listPartitionNames(String dbName, String tblName, short max) throws MetaException, TException { return client.get_partition_names(dbName, tblName, max); @@ -604,7 +632,7 @@ public class HiveMetaStoreClient implements IMetaStoreClient { UnknownDBException { return deepCopyFieldSchemas(client.get_fields(db, tableName)); } - + /** * create an index * @param index the index object @@ -613,12 +641,12 @@ public class HiveMetaStoreClient implements IMetaStoreClient { * @throws MetaException * @throws NoSuchObjectException * @throws TException - * @throws AlreadyExistsException + * @throws AlreadyExistsException */ public void createIndex(Index index, Table indexTable) throws AlreadyExistsException, InvalidObjectException, MetaException, NoSuchObjectException, TException { client.add_index(index, indexTable); } - + /** * @param dbName * @param tblName @@ -652,7 +680,7 @@ public class HiveMetaStoreClient implements IMetaStoreClient { /** * list all the index names of the give base table. - * + * * @param db_name * @param tbl_name * @param max @@ -664,7 +692,7 @@ public class HiveMetaStoreClient implements IMetaStoreClient { throws NoSuchObjectException, MetaException, TException { return client.get_indexes(dbName, tblName, max); } - + /** * @param db * @param tableName diff --git metastore/src/java/org/apache/hadoop/hive/metastore/IMetaStoreClient.java metastore/src/java/org/apache/hadoop/hive/metastore/IMetaStoreClient.java index 6013644..20deb21 100644 --- metastore/src/java/org/apache/hadoop/hive/metastore/IMetaStoreClient.java +++ metastore/src/java/org/apache/hadoop/hive/metastore/IMetaStoreClient.java @@ -23,9 +23,9 @@ import java.util.Map; import org.apache.hadoop.hive.metastore.api.AlreadyExistsException; import org.apache.hadoop.hive.metastore.api.ConfigValSecurityException; +import org.apache.hadoop.hive.metastore.api.Database; import org.apache.hadoop.hive.metastore.api.FieldSchema; import org.apache.hadoop.hive.metastore.api.Index; -import org.apache.hadoop.hive.metastore.api.IndexAlreadyExistsException; import org.apache.hadoop.hive.metastore.api.InvalidObjectException; import org.apache.hadoop.hive.metastore.api.InvalidOperationException; import org.apache.hadoop.hive.metastore.api.MetaException; @@ -44,29 +44,18 @@ public interface IMetaStoreClient { public void close(); + public List getDatabases(String databasePattern) + throws MetaException, TException; + + public List getAllDatabases() + throws MetaException, TException; + public List getTables(String dbName, String tablePattern) - throws MetaException, UnknownTableException, TException, - UnknownDBException; + throws MetaException, TException, UnknownDBException; + + public List getAllTables(String dbName) + throws MetaException, TException, UnknownDBException; - /** - * Drop the table. - * - * @param tableName - * The table to drop - * @param deleteData - * Should we delete the underlying data - * @throws MetaException - * Could not drop table properly. - * @throws UnknownTableException - * The table wasn't found. - * @throws TException - * A thrift communication error occurred - * @throws NoSuchObjectException - * The table wasn't found. - */ - public void dropTable(String tableName, boolean deleteData) - throws MetaException, UnknownTableException, TException, - NoSuchObjectException; /** * Drop the table. @@ -87,28 +76,27 @@ public interface IMetaStoreClient { boolean ignoreUknownTab) throws MetaException, TException, NoSuchObjectException; + public void dropTable(String dbname, String tableName) + throws MetaException, TException, NoSuchObjectException; + // public void createTable(String tableName, Properties schema) throws // MetaException, UnknownTableException, // TException; - public boolean tableExists(String tableName) throws MetaException, + public boolean tableExists(String databaseName, String tableName) throws MetaException, TException, UnknownDBException; /** - * Get a table object. - * - * @param tableName - * Name of the table to fetch. - * @return An object representing the table. - * @throws MetaException - * Could not fetch the table - * @throws TException - * A thrift communication error occurred - * @throws NoSuchObjectException - * In case the table wasn't found. + * Get a Database Object + * @param databaseName name of the database to fetch + * @return + * @throws NoSuchObjectException The database does not exist + * @throws MetaException Could not fetch the database + * @throws TException A thrift communication error occurred */ - public Table getTable(String tableName) throws MetaException, TException, - NoSuchObjectException; + public Database getDatabase(String databaseName) + throws NoSuchObjectException, MetaException, TException; + /** * Get a table object. @@ -227,10 +215,14 @@ public interface IMetaStoreClient { public void alter_table(String defaultDatabaseName, String tblName, Table table) throws InvalidOperationException, MetaException, TException; - public boolean createDatabase(String name, String location_uri) - throws AlreadyExistsException, MetaException, TException; + public void createDatabase(Database db) + throws InvalidObjectException, AlreadyExistsException, MetaException, TException; + + public void dropDatabase(String name) + throws NoSuchObjectException, InvalidOperationException, MetaException, TException; - public boolean dropDatabase(String name) throws MetaException, TException; + public void dropDatabase(String name, boolean deleteData, boolean ignoreUnknownDb) + throws NoSuchObjectException, InvalidOperationException, MetaException, TException; /** * @param db_name @@ -339,13 +331,13 @@ public interface IMetaStoreClient { * @throws MetaException * @throws NoSuchObjectException * @throws TException - * @throws AlreadyExistsException + * @throws AlreadyExistsException */ public void createIndex(Index index, Table indexTable) throws InvalidObjectException, MetaException, NoSuchObjectException, TException, AlreadyExistsException; /** - * + * * @param dbName * @param tblName * @param indexName @@ -375,7 +367,7 @@ public interface IMetaStoreClient { /** * list all the index names of the give base table. - * + * * @param db_name * @param tbl_name * @param max @@ -385,7 +377,7 @@ public interface IMetaStoreClient { */ public List listIndexNames(String db_name, String tbl_name, short max) throws MetaException, TException; - + /** * @param db_name * @param tbl_name diff --git metastore/src/java/org/apache/hadoop/hive/metastore/MetaStoreUtils.java metastore/src/java/org/apache/hadoop/hive/metastore/MetaStoreUtils.java index 0818689..2f6a427 100644 --- metastore/src/java/org/apache/hadoop/hive/metastore/MetaStoreUtils.java +++ metastore/src/java/org/apache/hadoop/hive/metastore/MetaStoreUtils.java @@ -60,7 +60,8 @@ public class MetaStoreUtils { protected static final Log LOG = LogFactory.getLog("hive.log"); public static final String DEFAULT_DATABASE_NAME = "default"; - + public static final String DEFAULT_DATABASE_COMMENT = "Default Hive database"; + /** * printStackTrace * @@ -324,135 +325,6 @@ public class MetaStoreUtils { return "map<" + k + "," + v + ">"; } - public static Table getTable(Configuration conf, Properties schema) - throws MetaException { - Table t = new Table(); - t.setSd(new StorageDescriptor()); - t - .setTableName(schema - .getProperty(org.apache.hadoop.hive.metastore.api.Constants.META_TABLE_NAME)); - t - .getSd() - .setLocation( - schema - .getProperty(org.apache.hadoop.hive.metastore.api.Constants.META_TABLE_LOCATION)); - t.getSd().setInputFormat( - schema.getProperty( - org.apache.hadoop.hive.metastore.api.Constants.FILE_INPUT_FORMAT, - org.apache.hadoop.mapred.SequenceFileInputFormat.class.getName())); - t.getSd().setOutputFormat( - schema.getProperty( - org.apache.hadoop.hive.metastore.api.Constants.FILE_OUTPUT_FORMAT, - org.apache.hadoop.mapred.SequenceFileOutputFormat.class.getName())); - t.setPartitionKeys(new ArrayList()); - t.setDbName(MetaStoreUtils.DEFAULT_DATABASE_NAME); - String part_cols_str = schema - .getProperty(org.apache.hadoop.hive.metastore.api.Constants.META_TABLE_PARTITION_COLUMNS); - t.setPartitionKeys(new ArrayList()); - if (part_cols_str != null && (part_cols_str.trim().length() != 0)) { - String[] part_keys = part_cols_str.trim().split("/"); - for (String key : part_keys) { - FieldSchema part = new FieldSchema(); - part.setName(key); - part.setType(org.apache.hadoop.hive.serde.Constants.STRING_TYPE_NAME); // default - // partition - // key - t.getPartitionKeys().add(part); - } - } - t.getSd() - .setNumBuckets( - Integer.parseInt(schema.getProperty( - org.apache.hadoop.hive.metastore.api.Constants.BUCKET_COUNT, - "-1"))); - String bucketFieldName = schema - .getProperty(org.apache.hadoop.hive.metastore.api.Constants.BUCKET_FIELD_NAME); - t.getSd().setBucketCols(new ArrayList(1)); - if ((bucketFieldName != null) && (bucketFieldName.trim().length() != 0)) { - t.getSd().setBucketCols(new ArrayList(1)); - t.getSd().getBucketCols().add(bucketFieldName); - } - - t.getSd().setSerdeInfo(new SerDeInfo()); - t.getSd().getSerdeInfo().setParameters(new HashMap()); - t.getSd().getSerdeInfo().setName(t.getTableName()); - t - .getSd() - .getSerdeInfo() - .setSerializationLib( - schema - .getProperty(org.apache.hadoop.hive.serde.Constants.SERIALIZATION_LIB)); - setSerdeParam(t.getSd().getSerdeInfo(), schema, - org.apache.hadoop.hive.serde.Constants.SERIALIZATION_CLASS); - setSerdeParam(t.getSd().getSerdeInfo(), schema, - org.apache.hadoop.hive.serde.Constants.SERIALIZATION_FORMAT); - if (org.apache.commons.lang.StringUtils - .isNotBlank(schema - .getProperty(org.apache.hadoop.hive.serde.Constants.SERIALIZATION_CLASS))) { - setSerdeParam(t.getSd().getSerdeInfo(), schema, - org.apache.hadoop.hive.metastore.api.Constants.META_TABLE_SERDE); - } - // needed for MetadataTypedColumnSetSerDe and LazySimpleSerDe - setSerdeParam(t.getSd().getSerdeInfo(), schema, - org.apache.hadoop.hive.metastore.api.Constants.META_TABLE_COLUMNS); - // needed for LazySimpleSerDe - setSerdeParam(t.getSd().getSerdeInfo(), schema, - org.apache.hadoop.hive.metastore.api.Constants.META_TABLE_COLUMN_TYPES); - // needed for DynamicSerDe - setSerdeParam(t.getSd().getSerdeInfo(), schema, - org.apache.hadoop.hive.serde.Constants.SERIALIZATION_DDL); - - String colstr = schema - .getProperty(org.apache.hadoop.hive.metastore.api.Constants.META_TABLE_COLUMNS); - List fields = new ArrayList(); - if (colstr != null) { - String[] cols = colstr.split(","); - for (String colName : cols) { - FieldSchema col = new FieldSchema(colName, - org.apache.hadoop.hive.serde.Constants.STRING_TYPE_NAME, - "'default'"); - fields.add(col); - } - } - - if (fields.size() == 0) { - // get the fields from serde - try { - fields = getFieldsFromDeserializer(t.getTableName(), getDeserializer( - conf, schema)); - } catch (SerDeException e) { - LOG.error(StringUtils.stringifyException(e)); - throw new MetaException("Invalid serde or schema. " + e.getMessage()); - } - } - t.getSd().setCols(fields); - - t.setOwner(schema.getProperty("creator")); - - // remove all the used up parameters to find out the remaining parameters - schema.remove(Constants.META_TABLE_NAME); - schema.remove(Constants.META_TABLE_LOCATION); - schema.remove(Constants.FILE_INPUT_FORMAT); - schema.remove(Constants.FILE_OUTPUT_FORMAT); - schema.remove(Constants.META_TABLE_PARTITION_COLUMNS); - schema.remove(Constants.BUCKET_COUNT); - schema.remove(Constants.BUCKET_FIELD_NAME); - schema.remove(org.apache.hadoop.hive.serde.Constants.SERIALIZATION_CLASS); - schema.remove(org.apache.hadoop.hive.serde.Constants.SERIALIZATION_FORMAT); - schema.remove(org.apache.hadoop.hive.serde.Constants.SERIALIZATION_LIB); - schema.remove(Constants.META_TABLE_SERDE); - schema.remove(Constants.META_TABLE_COLUMNS); - schema.remove(Constants.META_TABLE_COLUMN_TYPES); - - // add the remaining unknown parameters to the table's parameters - t.setParameters(new HashMap()); - for (Entry e : schema.entrySet()) { - t.getParameters().put(e.getKey().toString(), e.getValue().toString()); - } - - return t; - } - public static void setSerdeParam(SerDeInfo sdi, Properties schema, String param) { String val = schema.getProperty(param); @@ -883,7 +755,7 @@ public class MetaStoreUtils { } return true; } - + public static String getIndexTableName(String dbName, String baseTblName, String indexName) { return dbName + "__" + baseTblName + "_" + indexName + "__"; } @@ -894,5 +766,5 @@ public class MetaStoreUtils { } return TableType.INDEX_TABLE.toString().equals(table.getTableType()); } - + } diff --git metastore/src/java/org/apache/hadoop/hive/metastore/ObjectStore.java metastore/src/java/org/apache/hadoop/hive/metastore/ObjectStore.java index a06384c..9b1ea25 100644 --- metastore/src/java/org/apache/hadoop/hive/metastore/ObjectStore.java +++ metastore/src/java/org/apache/hadoop/hive/metastore/ObjectStore.java @@ -286,126 +286,133 @@ public class ObjectStore implements RawStore, Configurable { } } - public boolean createDatabase(Database db) { - boolean success = false; + public void createDatabase(Database db) throws InvalidObjectException, MetaException { boolean commited = false; - MDatabase mdb = new MDatabase(db.getName().toLowerCase(), db - .getDescription()); + MDatabase mdb = new MDatabase(); + mdb.setName(db.getName().toLowerCase()); + mdb.setLocationUri(db.getLocationUri()); + mdb.setDescription(db.getDescription()); try { openTransaction(); pm.makePersistent(mdb); - success = true; commited = commitTransaction(); } finally { if (!commited) { rollbackTransaction(); } } - return success; - } - - public boolean createDatabase(String name) { - // TODO: get default path - Database db = new Database(name, "default_path"); - return this.createDatabase(db); } @SuppressWarnings("nls") private MDatabase getMDatabase(String name) throws NoSuchObjectException { - MDatabase db = null; + MDatabase mdb = null; boolean commited = false; try { openTransaction(); - name = name.toLowerCase(); + name = name.toLowerCase().trim(); Query query = pm.newQuery(MDatabase.class, "name == dbname"); query.declareParameters("java.lang.String dbname"); query.setUnique(true); - db = (MDatabase) query.execute(name.trim()); - pm.retrieve(db); + mdb = (MDatabase) query.execute(name); + pm.retrieve(mdb); commited = commitTransaction(); } finally { if (!commited) { rollbackTransaction(); } } - if (db == null) { + if (mdb == null) { throw new NoSuchObjectException("There is no database named " + name); } - return db; + return mdb; } public Database getDatabase(String name) throws NoSuchObjectException { - MDatabase db = null; + MDatabase mdb = null; boolean commited = false; try { openTransaction(); - db = getMDatabase(name); + mdb = getMDatabase(name); commited = commitTransaction(); } finally { if (!commited) { rollbackTransaction(); } } - return new Database(db.getName(), db.getDescription()); + Database db = new Database(); + db.setName(mdb.getName()); + db.setDescription(mdb.getDescription()); + db.setLocationUri(mdb.getLocationUri()); + return db; } - public boolean dropDatabase(String dbname) { - + public boolean dropDatabase(String dbname) throws NoSuchObjectException, MetaException { boolean success = false; - boolean commited = false; + LOG.info("Dropping database " + dbname + " along with all tables"); + dbname = dbname.toLowerCase(); try { openTransaction(); // first drop tables - dbname = dbname.toLowerCase(); - LOG.info("Dropping database along with all tables " + dbname); - Query q1 = pm.newQuery(MTable.class, "database.name == dbName"); - q1.declareParameters("java.lang.String dbName"); - List mtbls = (List) q1.execute(dbname.trim()); - pm.deletePersistentAll(mtbls); + for (String tableName : getAllTables(dbname)) { + dropTable(dbname, tableName); + } // then drop the database - Query query = pm.newQuery(MDatabase.class, "name == dbName"); - query.declareParameters("java.lang.String dbName"); - query.setUnique(true); - MDatabase db = (MDatabase) query.execute(dbname.trim()); + MDatabase db = getMDatabase(dbname); pm.retrieve(db); - - // StringIdentity id = new StringIdentity(MDatabase.class, dbname); - // MDatabase db = (MDatabase) pm.getObjectById(id); if (db != null) { pm.deletePersistent(db); } - commited = commitTransaction(); - success = true; - } catch (JDOObjectNotFoundException e) { - LOG.debug("database not found " + dbname, e); - commited = commitTransaction(); + success = commitTransaction(); } finally { - if (!commited) { + if (!success) { rollbackTransaction(); } } return success; } - public List getDatabases() { - List dbs = null; + + public List getDatabases(String pattern) throws MetaException { boolean commited = false; + List databases = null; try { openTransaction(); - Query query = pm.newQuery(MDatabase.class); - query.setResult("name"); - query.setResultClass(String.class); - query.setOrdering("name asc"); - dbs = (List) query.execute(); + // Take the pattern and split it on the | to get all the composing + // patterns + String[] subpatterns = pattern.trim().split("\\|"); + String query = "select name from org.apache.hadoop.hive.metastore.model.MDatabase where ("; + boolean first = true; + for (String subpattern : subpatterns) { + subpattern = "(?i)" + subpattern.replaceAll("\\*", ".*"); + if (!first) { + query = query + " || "; + } + query = query + " name.matches(\"" + subpattern + "\")"; + first = false; + } + query = query + ")"; + + Query q = pm.newQuery(query); + q.setResult("name"); + q.setOrdering("name ascending"); + Collection names = (Collection) q.execute(); + databases = new ArrayList(); + for (Iterator i = names.iterator(); i.hasNext();) { + databases.add((String) i.next()); + } commited = commitTransaction(); } finally { if (!commited) { rollbackTransaction(); } } - return dbs; + return databases; + } + + public List getAllDatabases() throws MetaException { + return getDatabases(".*"); } private MType getMType(Type type) { @@ -470,7 +477,6 @@ public class ObjectStore implements RawStore, Configurable { } public boolean dropType(String typeName) { - boolean success = false; boolean commited = false; try { @@ -494,8 +500,7 @@ public class ObjectStore implements RawStore, Configurable { return success; } - public void createTable(Table tbl) throws InvalidObjectException, - MetaException { + public void createTable(Table tbl) throws InvalidObjectException, MetaException { boolean commited = false; try { openTransaction(); @@ -509,8 +514,7 @@ public class ObjectStore implements RawStore, Configurable { } } - public boolean dropTable(String dbName, String tableName) { - + public boolean dropTable(String dbName, String tableName) throws MetaException { boolean success = false; try { openTransaction(); @@ -552,11 +556,13 @@ public class ObjectStore implements RawStore, Configurable { List tbls = null; try { openTransaction(); - dbName = dbName.toLowerCase(); + dbName = dbName.toLowerCase().trim(); // Take the pattern and split it on the | to get all the composing // patterns String[] subpatterns = pattern.trim().split("\\|"); - String query = "select tableName from org.apache.hadoop.hive.metastore.model.MTable where database.name == dbName && ("; + String query = + "select tableName from org.apache.hadoop.hive.metastore.model.MTable " + + "where database.name == dbName && ("; boolean first = true; for (String subpattern : subpatterns) { subpattern = "(?i)" + subpattern.replaceAll("\\*", ".*"); @@ -571,7 +577,8 @@ public class ObjectStore implements RawStore, Configurable { Query q = pm.newQuery(query); q.declareParameters("java.lang.String dbName"); q.setResult("tableName"); - Collection names = (Collection) q.execute(dbName.trim()); + q.setOrdering("tableName ascending"); + Collection names = (Collection) q.execute(dbName); tbls = new ArrayList(); for (Iterator i = names.iterator(); i.hasNext();) { tbls.add((String) i.next()); @@ -585,18 +592,21 @@ public class ObjectStore implements RawStore, Configurable { return tbls; } + public List getAllTables(String dbName) throws MetaException { + return getTables(dbName, ".*"); + } + private MTable getMTable(String db, String table) { MTable mtbl = null; boolean commited = false; try { openTransaction(); - db = db.toLowerCase(); - table = table.toLowerCase(); - Query query = pm.newQuery(MTable.class, - "tableName == table && database.name == db"); + db = db.toLowerCase().trim(); + table = table.toLowerCase().trim(); + Query query = pm.newQuery(MTable.class, "tableName == table && database.name == db"); query.declareParameters("java.lang.String table, java.lang.String db"); query.setUnique(true); - mtbl = (MTable) query.execute(table.trim(), db.trim()); + mtbl = (MTable) query.execute(table, db); pm.retrieve(mtbl); commited = commitTransaction(); } finally { @@ -641,7 +651,7 @@ public class ObjectStore implements RawStore, Configurable { } catch (NoSuchObjectException e) { LOG.error(StringUtils.stringifyException(e)); throw new InvalidObjectException("Database " + tbl.getDbName() - + " doesn't exsit."); + + " doesn't exist."); } // If the table has property EXTERNAL set, update table type @@ -790,8 +800,8 @@ public class ObjectStore implements RawStore, Configurable { boolean commited = false; try { openTransaction(); - dbName = dbName.toLowerCase(); - tableName = tableName.toLowerCase(); + dbName = dbName.toLowerCase().trim(); + tableName = tableName.toLowerCase().trim(); MTable mtbl = getMTable(dbName, tableName); if (mtbl == null) { commited = commitTransaction(); @@ -801,13 +811,11 @@ public class ObjectStore implements RawStore, Configurable { // redundant String name = Warehouse.makePartName(convertToFieldSchemas(mtbl .getPartitionKeys()), part_vals); - Query query = pm - .newQuery(MPartition.class, - "table.tableName == t1 && table.database.name == t2 && partitionName == t3"); - query - .declareParameters("java.lang.String t1, java.lang.String t2, java.lang.String t3"); + Query query = pm.newQuery(MPartition.class, + "table.tableName == t1 && table.database.name == t2 && partitionName == t3"); + query.declareParameters("java.lang.String t1, java.lang.String t2, java.lang.String t3"); query.setUnique(true); - mpart = (MPartition) query.execute(tableName.trim(), dbName.trim(), name); + mpart = (MPartition) query.execute(tableName, dbName, name); pm.retrieve(mpart); commited = commitTransaction(); } finally { @@ -888,14 +896,15 @@ public class ObjectStore implements RawStore, Configurable { try { openTransaction(); LOG.debug("Executing getPartitionNames"); - dbName = dbName.toLowerCase(); - tableName = tableName.toLowerCase(); - Query q = pm - .newQuery("select partitionName from org.apache.hadoop.hive.metastore.model.MPartition where table.database.name == t1 && table.tableName == t2 order by partitionName asc"); + dbName = dbName.toLowerCase().trim(); + tableName = tableName.toLowerCase().trim(); + Query q = pm.newQuery( + "select partitionName from org.apache.hadoop.hive.metastore.model.MPartition " + + "where table.database.name == t1 && table.tableName == t2 " + + "order by partitionName asc"); q.declareParameters("java.lang.String t1, java.lang.String t2"); q.setResult("partitionName"); - Collection names = (Collection) q - .execute(dbName.trim(), tableName.trim()); + Collection names = (Collection) q.execute(dbName, tableName); pns = new ArrayList(); for (Iterator i = names.iterator(); i.hasNext();) { pns.add((String) i.next()); @@ -917,13 +926,12 @@ public class ObjectStore implements RawStore, Configurable { try { openTransaction(); LOG.debug("Executing listMPartitions"); - dbName = dbName.toLowerCase(); - tableName = tableName.toLowerCase(); + dbName = dbName.toLowerCase().trim(); + tableName = tableName.toLowerCase().trim(); Query query = pm.newQuery(MPartition.class, "table.tableName == t1 && table.database.name == t2"); query.declareParameters("java.lang.String t1, java.lang.String t2"); - mparts = (List) query - .execute(tableName.trim(), dbName.trim()); + mparts = (List) query.execute(tableName, dbName); LOG.debug("Done executing query for listMPartitions"); pm.retrieveAll(mparts); success = commitTransaction(); @@ -1077,27 +1085,25 @@ public class ObjectStore implements RawStore, Configurable { } return success; } - + private MIndex getMIndex(String dbName, String originalTblName, String indexName) throws MetaException { MIndex midx = null; boolean commited = false; try { openTransaction(); - dbName = dbName.toLowerCase(); - originalTblName = originalTblName.toLowerCase(); + dbName = dbName.toLowerCase().trim(); + originalTblName = originalTblName.toLowerCase().trim(); MTable mtbl = getMTable(dbName, originalTblName); if (mtbl == null) { commited = commitTransaction(); return null; } - Query query = pm - .newQuery(MIndex.class, - "origTable.tableName == t1 && origTable.database.name == t2 && indexName == t3"); - query - .declareParameters("java.lang.String t1, java.lang.String t2, java.lang.String t3"); + Query query = pm.newQuery(MIndex.class, + "origTable.tableName == t1 && origTable.database.name == t2 && indexName == t3"); + query.declareParameters("java.lang.String t1, java.lang.String t2, java.lang.String t3"); query.setUnique(true); - midx = (MIndex) query.execute(originalTblName.trim(), dbName.trim(), indexName); + midx = (MIndex) query.execute(originalTblName, dbName, indexName); pm.retrieve(midx); commited = commitTransaction(); } finally { @@ -1126,7 +1132,7 @@ public class ObjectStore implements RawStore, Configurable { return new Index( mIndex.getIndexName(), mIndex.getIndexHandlerClass(), - MetaStoreUtils.DEFAULT_DATABASE_NAME, + mIndex.getOrigTable().getDatabase().getName(), mIndex.getOrigTable().getTableName(), mIndex.getCreateTime(), mIndex.getLastAccessTime(), @@ -1156,7 +1162,7 @@ public class ObjectStore implements RawStore, Configurable { } } } - + private List listMIndexes(String dbName, String origTableName, int max) { boolean success = false; @@ -1164,13 +1170,12 @@ public class ObjectStore implements RawStore, Configurable { try { openTransaction(); LOG.debug("Executing listMIndexes"); - dbName = dbName.toLowerCase(); - origTableName = origTableName.toLowerCase(); + dbName = dbName.toLowerCase().trim(); + origTableName = origTableName.toLowerCase().trim(); Query query = pm.newQuery(MIndex.class, "origTable.tableName == t1 && origTable.database.name == t2"); query.declareParameters("java.lang.String t1, java.lang.String t2"); - mindexes = (List) query - .execute(origTableName.trim(), dbName.trim()); + mindexes = (List) query.execute(origTableName, dbName); LOG.debug("Done executing query for listMIndexes"); pm.retrieveAll(mindexes); success = commitTransaction(); @@ -1191,14 +1196,15 @@ public class ObjectStore implements RawStore, Configurable { try { openTransaction(); LOG.debug("Executing listIndexNames"); - dbName = dbName.toLowerCase(); - origTableName = origTableName.toLowerCase(); - Query q = pm - .newQuery("select indexName from org.apache.hadoop.hive.metastore.model.MIndex where origTable.database.name == t1 && origTable.tableName == t2 order by indexName asc"); + dbName = dbName.toLowerCase().trim(); + origTableName = origTableName.toLowerCase().trim(); + Query q = pm.newQuery( + "select indexName from org.apache.hadoop.hive.metastore.model.MIndex " + + "where origTable.database.name == t1 && origTable.tableName == t2 " + + "order by indexName asc"); q.declareParameters("java.lang.String t1, java.lang.String t2"); q.setResult("indexName"); - Collection names = (Collection) q - .execute(dbName.trim(), origTableName.trim()); + Collection names = (Collection) q.execute(dbName, origTableName); pns = new ArrayList(); for (Iterator i = names.iterator(); i.hasNext();) { pns.add((String) i.next()); diff --git metastore/src/java/org/apache/hadoop/hive/metastore/RawStore.java metastore/src/java/org/apache/hadoop/hive/metastore/RawStore.java index 4951bd6..1995b95 100644 --- metastore/src/java/org/apache/hadoop/hive/metastore/RawStore.java +++ metastore/src/java/org/apache/hadoop/hive/metastore/RawStore.java @@ -37,7 +37,7 @@ public interface RawStore extends Configurable { /** * Opens a new one or the one already created Every call of this function must * have corresponding commit or rollback function call - * + * * @return an active transaction */ @@ -46,7 +46,7 @@ public interface RawStore extends Configurable { /** * if this is the commit of the first open call then an actual commit is * called. - * + * * @return true or false */ public abstract boolean commitTransaction(); @@ -56,16 +56,17 @@ public interface RawStore extends Configurable { */ public abstract void rollbackTransaction(); - public abstract boolean createDatabase(Database db) throws MetaException; - - public abstract boolean createDatabase(String name) throws MetaException; + public abstract void createDatabase(Database db) + throws InvalidObjectException, MetaException; public abstract Database getDatabase(String name) throws NoSuchObjectException; - public abstract boolean dropDatabase(String dbname); + public abstract boolean dropDatabase(String dbname) throws NoSuchObjectException, MetaException; - public abstract List getDatabases() throws MetaException; + public abstract List getDatabases(String pattern) throws MetaException; + + public abstract List getAllDatabases() throws MetaException; public abstract boolean createType(Type type); @@ -100,23 +101,25 @@ public interface RawStore extends Configurable { public List getTables(String dbName, String pattern) throws MetaException; + public List getAllTables(String dbName) throws MetaException; + public abstract List listPartitionNames(String db_name, String tbl_name, short max_parts) throws MetaException; public abstract void alterPartition(String db_name, String tbl_name, Partition new_part) throws InvalidObjectException, MetaException; - + public abstract boolean addIndex(Index index) throws InvalidObjectException, MetaException; - + public abstract Index getIndex(String dbName, String origTableName, String indexName) throws MetaException; public abstract boolean dropIndex(String dbName, String origTableName, String indexName) throws MetaException; public abstract List getIndexes(String dbName, String origTableName, int max) throws MetaException; - + public abstract List listIndexNames(String dbName, String origTableName, short max) throws MetaException; - + } diff --git metastore/src/java/org/apache/hadoop/hive/metastore/Warehouse.java metastore/src/java/org/apache/hadoop/hive/metastore/Warehouse.java index 4488f94..af20de1 100755 --- metastore/src/java/org/apache/hadoop/hive/metastore/Warehouse.java +++ metastore/src/java/org/apache/hadoop/hive/metastore/Warehouse.java @@ -18,6 +18,8 @@ package org.apache.hadoop.hive.metastore; +import static org.apache.hadoop.hive.metastore.MetaStoreUtils.DEFAULT_DATABASE_NAME; + import java.io.FileNotFoundException; import java.io.IOException; import java.util.ArrayList; @@ -47,7 +49,9 @@ import org.apache.hadoop.hive.metastore.api.MetaException; public class Warehouse { private Path whRoot; private final Configuration conf; - String whRootString; + private final String whRootString; + + private static final String DATABASE_WAREHOUSE_SUFFIX = ".db"; public static final Log LOG = LogFactory.getLog("hive.metastore.warehouse"); @@ -117,10 +121,10 @@ public class Warehouse { } public Path getDefaultDatabasePath(String dbName) throws MetaException { - if (dbName.equalsIgnoreCase(MetaStoreUtils.DEFAULT_DATABASE_NAME)) { + if (dbName.equalsIgnoreCase(DEFAULT_DATABASE_NAME)) { return getWhRoot(); } - return new Path(getWhRoot(), dbName.toLowerCase() + ".db"); + return new Path(getWhRoot(), dbName.toLowerCase() + DATABASE_WAREHOUSE_SUFFIX); } public Path getDefaultTablePath(String dbName, String tableName) @@ -328,7 +332,7 @@ public class Warehouse { } return FileUtils.makePartName(colNames, vals); } - + public static List getPartValuesFromPartName(String partName) throws MetaException { LinkedHashMap partSpec = Warehouse.makeSpecFromName(partName); diff --git metastore/src/model/org/apache/hadoop/hive/metastore/model/MDatabase.java metastore/src/model/org/apache/hadoop/hive/metastore/model/MDatabase.java index b3e098d..e7cf0e6 100644 --- metastore/src/model/org/apache/hadoop/hive/metastore/model/MDatabase.java +++ metastore/src/model/org/apache/hadoop/hive/metastore/model/MDatabase.java @@ -27,6 +27,7 @@ package org.apache.hadoop.hive.metastore.model; */ public class MDatabase { private String name; + private String locationUri; private String description; /** @@ -37,11 +38,13 @@ public class MDatabase { /** * To create a database object * @param name of the database - * @param location future use + * @param locationUri Location of the database in the warehouse + * @param description Comment describing the database */ - public MDatabase(String name, String location) { + public MDatabase(String name, String locationUri, String description) { this.name = name; - this.description = location; + this.locationUri = locationUri; + this.description = description; } /** @@ -59,6 +62,20 @@ public class MDatabase { } /** + * @return the location_uri + */ + public String getLocationUri() { + return locationUri; + } + + /** + * @param locationUri the locationUri to set + */ + public void setLocationUri(String locationUri) { + this.locationUri = locationUri; + } + + /** * @return the description */ public String getDescription() { @@ -71,5 +88,4 @@ public class MDatabase { public void setDescription(String description) { this.description = description; } - } diff --git metastore/src/model/package.jdo metastore/src/model/package.jdo index 206ba75..527f4b2 100644 --- metastore/src/model/package.jdo +++ metastore/src/model/package.jdo @@ -8,12 +8,15 @@ - + - - - + + + + + + diff --git metastore/src/test/org/apache/hadoop/hive/metastore/TestEmbeddedHiveMetaStore.java metastore/src/test/org/apache/hadoop/hive/metastore/TestEmbeddedHiveMetaStore.java new file mode 100644 index 0000000..8558ace --- /dev/null +++ metastore/src/test/org/apache/hadoop/hive/metastore/TestEmbeddedHiveMetaStore.java @@ -0,0 +1,49 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hive.metastore; + +import org.apache.hadoop.util.StringUtils; + +public class TestEmbeddedHiveMetaStore extends TestHiveMetaStore { + + @Override + protected void setUp() throws Exception { + super.setUp(); + + try { + client = new HiveMetaStoreClient(hiveConf, null); + } catch (Throwable e) { + System.err.println("Unable to open the metastore"); + System.err.println(StringUtils.stringifyException(e)); + throw new Exception(e); + } + } + + @Override + protected void tearDown() throws Exception { + try { + super.tearDown(); + client.close(); + } catch (Throwable e) { + System.err.println("Unable to close metastore"); + System.err.println(StringUtils.stringifyException(e)); + throw new Exception(e); + } + } +} diff --git metastore/src/test/org/apache/hadoop/hive/metastore/TestHiveMetaStore.java metastore/src/test/org/apache/hadoop/hive/metastore/TestHiveMetaStore.java index fff6aad..df09811 100644 --- metastore/src/test/org/apache/hadoop/hive/metastore/TestHiveMetaStore.java +++ metastore/src/test/org/apache/hadoop/hive/metastore/TestHiveMetaStore.java @@ -48,40 +48,25 @@ import org.apache.hadoop.hive.serde.Constants; import org.apache.hadoop.util.StringUtils; import org.apache.thrift.TException; -public class TestHiveMetaStore extends TestCase { - private HiveMetaStoreClient client; - private HiveConf hiveConf; +public abstract class TestHiveMetaStore extends TestCase { + protected static HiveMetaStoreClient client; + protected static HiveConf hiveConf; + protected static Warehouse warehouse; + protected static boolean isThriftClient = false; + + private static final String TEST_DB1_NAME = "testdb1"; + private static final String TEST_DB2_NAME = "testdb2"; @Override protected void setUp() throws Exception { - super.setUp(); hiveConf = new HiveConf(this.getClass()); + warehouse = new Warehouse(hiveConf); // set some values to use for getting conf. vars hiveConf.set("hive.key1", "value1"); hiveConf.set("hive.key2", "http://www.example.com"); hiveConf.set("hive.key3", ""); hiveConf.set("hive.key4", "0"); - - try { - client = new HiveMetaStoreClient(hiveConf, null); - } catch (Throwable e) { - System.err.println("Unable to open the metastore"); - System.err.println(StringUtils.stringifyException(e)); - throw new Exception(e); - } - } - - @Override - protected void tearDown() throws Exception { - try { - super.tearDown(); - client.close(); - } catch (Throwable e) { - System.err.println("Unable to close metastore"); - System.err.println(StringUtils.stringifyException(e)); - throw new Exception(e); - } } public void testNameMethods() { @@ -118,11 +103,11 @@ public class TestHiveMetaStore extends TestCase { * @throws Exception */ public void testPartition() throws Exception { - partitionTester(client, hiveConf, false); + partitionTester(client, hiveConf); } - public static void partitionTester(HiveMetaStoreClient client, HiveConf hiveConf, - boolean isThriftClient) throws Exception { + public static void partitionTester(HiveMetaStoreClient client, HiveConf hiveConf) + throws Exception { try { String dbName = "compdb"; String tblName = "comptbl"; @@ -139,9 +124,10 @@ public class TestHiveMetaStore extends TestCase { vals3.add("15"); client.dropTable(dbName, tblName); - client.dropDatabase(dbName); - boolean ret = client.createDatabase(dbName, "strange_loc"); - assertTrue("Unable to create the databse " + dbName, ret); + silentDropDatabase(dbName); + Database db = new Database(); + db.setName(dbName); + client.createDatabase(db); client.dropType(typeName); Type typ1 = new Type(); @@ -151,8 +137,7 @@ public class TestHiveMetaStore extends TestCase { new FieldSchema("name", Constants.STRING_TYPE_NAME, "")); typ1.getFields().add( new FieldSchema("income", Constants.INT_TYPE_NAME, "")); - ret = client.createType(typ1); - assertTrue("Unable to create type " + typeName, ret); + client.createType(typ1); Table tbl = new Table(); tbl.setDbName(dbName); @@ -181,7 +166,7 @@ public class TestHiveMetaStore extends TestCase { client.createTable(tbl); - if(isThriftClient) { + if (isThriftClient) { // the createTable() above does not update the location in the 'tbl' // object when the client is a thrift client and the code below relies // on the location being present in the 'tbl' object - so get the table @@ -306,9 +291,9 @@ public class TestHiveMetaStore extends TestCase { Path partPath = new Path(part2.getSd().getLocation()); FileSystem fs = FileSystem.get(partPath.toUri(), hiveConf); + assertTrue(fs.exists(partPath)); - ret = client.dropPartition(dbName, tblName, part.getValues(), true); - assertTrue(ret); + client.dropPartition(dbName, tblName, part.getValues(), true); assertFalse(fs.exists(partPath)); // Test append_partition_by_name @@ -326,12 +311,11 @@ public class TestHiveMetaStore extends TestCase { // add the partition again so that drop table with a partition can be // tested retp = client.add_partition(part); - assertNotNull("Unable to create partition " + part, ret); + assertNotNull("Unable to create partition " + part, retp); client.dropTable(dbName, tblName); - ret = client.dropType(typeName); - assertTrue("Unable to drop type " + typeName, ret); + client.dropType(typeName); // recreate table as external, drop partition and it should // still exist @@ -343,8 +327,11 @@ public class TestHiveMetaStore extends TestCase { client.dropPartition(dbName, tblName, part.getValues(), true); assertTrue(fs.exists(partPath)); - ret = client.dropDatabase(dbName); - assertTrue("Unable to create the databse " + dbName, ret); + for (String tableName : client.getTables(dbName, "*")) { + client.dropTable(dbName, tableName); + } + + client.dropDatabase(dbName); } catch (Exception e) { System.err.println(StringUtils.stringifyException(e)); @@ -363,9 +350,11 @@ public class TestHiveMetaStore extends TestCase { vals.add("14"); client.dropTable(dbName, tblName); - client.dropDatabase(dbName); - boolean ret = client.createDatabase(dbName, "strange_loc"); - assertTrue("Unable to create the databse " + dbName, ret); + silentDropDatabase(dbName); + Database db = new Database(); + db.setName(dbName); + db.setDescription("Alter Partition Test database"); + client.createDatabase(db); ArrayList cols = new ArrayList(2); cols.add(new FieldSchema("name", Constants.STRING_TYPE_NAME, "")); @@ -398,6 +387,14 @@ public class TestHiveMetaStore extends TestCase { client.createTable(tbl); + if (isThriftClient) { + // the createTable() above does not update the location in the 'tbl' + // object when the client is a thrift client and the code below relies + // on the location being present in the 'tbl' object - so get the table + // from the metastore + tbl = client.getTable(dbName, tblName); + } + Partition part = new Partition(); part.setDbName(dbName); part.setTableName(tblName); @@ -426,8 +423,7 @@ public class TestHiveMetaStore extends TestCase { client.dropTable(dbName, tblName); - ret = client.dropDatabase(dbName); - assertTrue("Unable to create the databse " + dbName, ret); + client.dropDatabase(dbName); } catch (Exception e) { System.err.println(StringUtils.stringifyException(e)); System.err.println("testPartition() failed."); @@ -438,40 +434,40 @@ public class TestHiveMetaStore extends TestCase { public void testDatabase() throws Throwable { try { // clear up any existing databases - client.dropDatabase("test1"); - client.dropDatabase("test2"); + silentDropDatabase(TEST_DB1_NAME); + silentDropDatabase(TEST_DB2_NAME); - boolean ret = client.createDatabase("test1", "strange_loc"); - assertTrue("Unable to create the databse", ret); + Database db = new Database(); + db.setName(TEST_DB1_NAME); + client.createDatabase(db); - Database db = client.getDatabase("test1"); + db = client.getDatabase(TEST_DB1_NAME); assertEquals("name of returned db is different from that of inserted db", - "test1", db.getName()); - assertEquals( - "location of the returned db is different from that of inserted db", - "strange_loc", db.getDescription()); + TEST_DB1_NAME, db.getName()); + assertEquals("location of the returned db is different from that of inserted db", + warehouse.getDefaultDatabasePath(TEST_DB1_NAME).toString(), db.getLocationUri()); - boolean ret2 = client.createDatabase("test2", "another_strange_loc"); - assertTrue("Unable to create the databse", ret2); + Database db2 = new Database(); + db2.setName(TEST_DB2_NAME); + client.createDatabase(db2); - Database db2 = client.getDatabase("test2"); + db2 = client.getDatabase(TEST_DB2_NAME); assertEquals("name of returned db is different from that of inserted db", - "test2", db2.getName()); - assertEquals( - "location of the returned db is different from that of inserted db", - "another_strange_loc", db2.getDescription()); + TEST_DB2_NAME, db2.getName()); + assertEquals("location of the returned db is different from that of inserted db", + warehouse.getDefaultDatabasePath(TEST_DB2_NAME).toString(), db2.getLocationUri()); - List dbs = client.getDatabases(); + List dbs = client.getDatabases(".*"); - assertTrue("first database is not test1", dbs.contains("test1")); - assertTrue("second database is not test2", dbs.contains("test2")); + assertTrue("first database is not " + TEST_DB1_NAME, dbs.contains(TEST_DB1_NAME)); + assertTrue("second database is not " + TEST_DB2_NAME, dbs.contains(TEST_DB2_NAME)); - ret = client.dropDatabase("test1"); - assertTrue("couldn't delete first database", ret); - ret = client.dropDatabase("test2"); - assertTrue("couldn't delete second database", ret); + client.dropDatabase(TEST_DB1_NAME); + client.dropDatabase(TEST_DB2_NAME); + silentDropDatabase(TEST_DB1_NAME); + silentDropDatabase(TEST_DB2_NAME); } catch (Throwable e) { System.err.println(StringUtils.stringifyException(e)); System.err.println("testDatabase() failed."); @@ -495,9 +491,13 @@ public class TestHiveMetaStore extends TestCase { ret = client.dropType(Constants.INT_TYPE_NAME); assertTrue("unable to drop type integer", ret); - Type typ1_3 = null; - typ1_3 = client.getType(Constants.INT_TYPE_NAME); - assertNull("unable to drop type integer", typ1_3); + boolean exceptionThrown = false; + try { + client.getType(Constants.INT_TYPE_NAME); + } catch (NoSuchObjectException e) { + exceptionThrown = true; + } + assertTrue("Expected NoSuchObjectException", exceptionThrown); } catch (Exception e) { System.err.println(StringUtils.stringifyException(e)); System.err.println("testSimpleTypeApi() failed."); @@ -554,9 +554,13 @@ public class TestHiveMetaStore extends TestCase { ret = client.dropType("Person"); assertTrue("unable to drop type Person", ret); - Type typ1_3 = null; - typ1_3 = client.getType("Person"); - assertNull("unable to drop type Person", typ1_3); + boolean exceptionThrown = false; + try { + client.getType("Person"); + } catch (NoSuchObjectException e) { + exceptionThrown = true; + } + assertTrue("Expected NoSuchObjectException", exceptionThrown); } catch (Exception e) { System.err.println(StringUtils.stringifyException(e)); System.err.println("testComplexTypeApi() failed."); @@ -572,9 +576,11 @@ public class TestHiveMetaStore extends TestCase { String typeName = "Person"; client.dropTable(dbName, tblName); - client.dropDatabase(dbName); - boolean ret = client.createDatabase(dbName, "strange_loc"); - assertTrue("Unable to create the databse " + dbName, ret); + silentDropDatabase(dbName); + + Database db = new Database(); + db.setName(dbName); + client.createDatabase(db); client.dropType(typeName); Type typ1 = new Type(); @@ -584,8 +590,7 @@ public class TestHiveMetaStore extends TestCase { new FieldSchema("name", Constants.STRING_TYPE_NAME, "")); typ1.getFields().add( new FieldSchema("income", Constants.INT_TYPE_NAME, "")); - ret = client.createType(typ1); - assertTrue("Unable to create type " + typeName, ret); + client.createType(typ1); Table tbl = new Table(); tbl.setDbName(dbName); @@ -610,6 +615,14 @@ public class TestHiveMetaStore extends TestCase { client.createTable(tbl); + if (isThriftClient) { + // the createTable() above does not update the location in the 'tbl' + // object when the client is a thrift client and the code below relies + // on the location being present in the 'tbl' object - so get the table + // from the metastore + tbl = client.getTable(dbName, tblName); + } + Table tbl2 = client.getTable(dbName, tblName); assertNotNull(tbl2); assertEquals(tbl2.getDbName(), dbName); @@ -647,6 +660,9 @@ public class TestHiveMetaStore extends TestCase { } client.createTable(tbl2); + if (isThriftClient) { + tbl2 = client.getTable(tbl2.getDbName(), tbl2.getTableName()); + } Table tbl3 = client.getTable(dbName, tblName2); assertNotNull(tbl3); @@ -683,18 +699,15 @@ public class TestHiveMetaStore extends TestCase { (tbl2.getPartitionKeys() == null) || (tbl2.getPartitionKeys().size() == 0)); - FileSystem fs = FileSystem.get((new Path(tbl.getSd().getLocation())).toUri(), - hiveConf); + FileSystem fs = FileSystem.get((new Path(tbl.getSd().getLocation())).toUri(), hiveConf); client.dropTable(dbName, tblName); assertFalse(fs.exists(new Path(tbl.getSd().getLocation()))); client.dropTable(dbName, tblName2); assertTrue(fs.exists(new Path(tbl2.getSd().getLocation()))); - ret = client.dropType(typeName); - assertTrue("Unable to drop type " + typeName, ret); - ret = client.dropDatabase(dbName); - assertTrue("Unable to drop databse " + dbName, ret); + client.dropType(typeName); + client.dropDatabase(dbName); } catch (Exception e) { System.err.println(StringUtils.stringifyException(e)); System.err.println("testSimpleTable() failed."); @@ -703,15 +716,17 @@ public class TestHiveMetaStore extends TestCase { } public void testAlterTable() throws Exception { - try { - String dbName = "alterdb"; - String invTblName = "alter-tbl"; - String tblName = "altertbl"; + String dbName = "alterdb"; + String invTblName = "alter-tbl"; + String tblName = "altertbl"; + try { client.dropTable(dbName, tblName); - client.dropDatabase(dbName); - boolean ret = client.createDatabase(dbName, "strange_loc"); - assertTrue("Unable to create the databse " + dbName, ret); + silentDropDatabase(dbName); + + Database db = new Database(); + db.setName(dbName); + client.createDatabase(db); ArrayList invCols = new ArrayList(2); invCols.add(new FieldSchema("n-ame", Constants.STRING_TYPE_NAME, "")); @@ -753,6 +768,10 @@ public class TestHiveMetaStore extends TestCase { tbl.getSd().setCols(cols); client.createTable(tbl); + if (isThriftClient) { + tbl = client.getTable(tbl.getDbName(), tbl.getTableName()); + } + // now try to invalid alter table Table tbl2 = client.getTable(dbName, tblName); failed = false; @@ -776,18 +795,22 @@ public class TestHiveMetaStore extends TestCase { assertEquals("Alter table didn't succeed. Num buckets is different ", tbl2.getSd().getNumBuckets(), tbl3.getSd().getNumBuckets()); // check that data has moved - FileSystem fs = FileSystem.get((new Path(tbl.getSd().getLocation())).toUri(), - hiveConf); + FileSystem fs = FileSystem.get((new Path(tbl.getSd().getLocation())).toUri(), hiveConf); assertFalse("old table location still exists", fs.exists(new Path(tbl .getSd().getLocation()))); assertTrue("data did not move to new location", fs.exists(new Path(tbl3 .getSd().getLocation()))); - assertEquals("alter table didn't move data correct location", tbl3 - .getSd().getLocation(), tbl2.getSd().getLocation()); + + if (!isThriftClient) { + assertEquals("alter table didn't move data correct location", tbl3 + .getSd().getLocation(), tbl2.getSd().getLocation()); + } } catch (Exception e) { System.err.println(StringUtils.stringifyException(e)); System.err.println("testSimpleTable() failed."); throw e; + } finally { + silentDropDatabase(dbName); } } @@ -799,9 +822,10 @@ public class TestHiveMetaStore extends TestCase { try { client.dropTable(dbName, tblName); - client.dropDatabase(dbName); - boolean ret = client.createDatabase(dbName, "strange_loc"); - assertTrue("Unable to create the databse " + dbName, ret); + silentDropDatabase(dbName); + Database db = new Database(); + db.setName(dbName); + client.createDatabase(db); client.dropType(typeName); Type typ1 = new Type(); @@ -811,8 +835,7 @@ public class TestHiveMetaStore extends TestCase { new FieldSchema("name", Constants.STRING_TYPE_NAME, "")); typ1.getFields().add( new FieldSchema("income", Constants.INT_TYPE_NAME, "")); - ret = client.createType(typ1); - assertTrue("Unable to create type " + typeName, ret); + client.createType(typ1); Table tbl = new Table(); tbl.setDbName(dbName); @@ -889,8 +912,7 @@ public class TestHiveMetaStore extends TestCase { client.dropTable(dbName, tblName); boolean ret = client.dropType(typeName); assertTrue("Unable to drop type " + typeName, ret); - ret = client.dropDatabase(dbName); - assertTrue("Unable to create the databse " + dbName, ret); + client.dropDatabase(dbName); } } @@ -898,20 +920,21 @@ public class TestHiveMetaStore extends TestCase { String val = "value"; - try { - assertEquals(client.getConfigValue("hive.key1", val), "value1"); - assertEquals(client.getConfigValue("hive.key2", val), - "http://www.example.com"); - assertEquals(client.getConfigValue("hive.key3", val), ""); - assertEquals(client.getConfigValue("hive.key4", val), "0"); - assertEquals(client.getConfigValue("hive.key5", val), val); - assertEquals(client.getConfigValue(null, val), val); - } catch (TException e) { - e.printStackTrace(); - assert (false); - } catch (ConfigValSecurityException e) { - e.printStackTrace(); - assert (false); + if (!isThriftClient) { + try { + assertEquals(client.getConfigValue("hive.key1", val), "value1"); + assertEquals(client.getConfigValue("hive.key2", val), "http://www.example.com"); + assertEquals(client.getConfigValue("hive.key3", val), ""); + assertEquals(client.getConfigValue("hive.key4", val), "0"); + assertEquals(client.getConfigValue("hive.key5", val), val); + assertEquals(client.getConfigValue(null, val), val); + } catch (TException e) { + e.printStackTrace(); + assert (false); + } catch (ConfigValSecurityException e) { + e.printStackTrace(); + assert (false); + } } boolean threwException = false; @@ -934,4 +957,15 @@ public class TestHiveMetaStore extends TestCase { part.setCreateTime(part_get.getCreateTime()); part.putToParameters(org.apache.hadoop.hive.metastore.api.Constants.DDL_TIME, Long.toString(part_get.getCreateTime())); } + + private static void silentDropDatabase(String dbName) throws MetaException, TException { + try { + for (String tableName : client.getTables(dbName, "*")) { + client.dropTable(dbName, tableName); + } + client.dropDatabase(dbName); + } catch (NoSuchObjectException e) { + } catch (InvalidOperationException e) { + } + } } diff --git metastore/src/test/org/apache/hadoop/hive/metastore/TestHiveMetaStoreRemote.java metastore/src/test/org/apache/hadoop/hive/metastore/TestHiveMetaStoreRemote.java deleted file mode 100644 index bc950b9..0000000 --- metastore/src/test/org/apache/hadoop/hive/metastore/TestHiveMetaStoreRemote.java +++ /dev/null @@ -1,79 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.hive.metastore; - -import junit.framework.TestCase; - -import org.apache.hadoop.hive.conf.HiveConf; - - -public class TestHiveMetaStoreRemote extends TestCase { - private static final String METASTORE_PORT = "29083"; -private HiveMetaStoreClient client; - private HiveConf hiveConf; - boolean isServerRunning = false; - - private static class RunMS implements Runnable { - - @Override - public void run() { - System.out.println("Running metastore!"); - String [] args = new String [1]; - args[0] = METASTORE_PORT; - HiveMetaStore.main(args); - } - - } - - @Override - protected void setUp() throws Exception { - super.setUp(); - if(isServerRunning) { - return; - } - Thread t = new Thread(new RunMS()); - t.start(); - - // Wait a little bit for the metastore to start. Should probably have - // a better way of detecting if the metastore has started? - Thread.sleep(5000); - - // Set conf to connect to the local metastore. - hiveConf = new HiveConf(this.getClass()); - // hive.metastore.local should be defined in HiveConf - hiveConf.set("hive.metastore.local", "false"); - hiveConf.setVar(HiveConf.ConfVars.METASTOREURIS, "thrift://localhost:" + METASTORE_PORT); - hiveConf.setIntVar(HiveConf.ConfVars.METATORETHRIFTRETRIES, 3); - - client = new HiveMetaStoreClient(hiveConf); - // Now you have the client - run necessary tests. - isServerRunning = true; - } - - /** - * tests create table and partition and tries to drop the table without - * droppping the partition - * - * @throws Exception - */ - public void testPartition() throws Exception { - TestHiveMetaStore.partitionTester(client, hiveConf, true); - } - -} diff --git metastore/src/test/org/apache/hadoop/hive/metastore/TestRemoteHiveMetaStore.java metastore/src/test/org/apache/hadoop/hive/metastore/TestRemoteHiveMetaStore.java new file mode 100644 index 0000000..57648b6 --- /dev/null +++ metastore/src/test/org/apache/hadoop/hive/metastore/TestRemoteHiveMetaStore.java @@ -0,0 +1,65 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hive.metastore; + +import org.apache.hadoop.hive.conf.HiveConf; + + +public class TestRemoteHiveMetaStore extends TestHiveMetaStore { + private static final String METASTORE_PORT = "29083"; + private static boolean isServerRunning = false; + + private static class RunMS implements Runnable { + + @Override + public void run() { + System.out.println("Running metastore!"); + String [] args = new String [1]; + args[0] = METASTORE_PORT; + HiveMetaStore.main(args); + } + + } + + @Override + protected void setUp() throws Exception { + super.setUp(); + if(isServerRunning) { + return; + } + Thread t = new Thread(new RunMS()); + t.start(); + + // Wait a little bit for the metastore to start. Should probably have + // a better way of detecting if the metastore has started? + Thread.sleep(5000); + + // hive.metastore.local should be defined in HiveConf + hiveConf.set("hive.metastore.local", "false"); + hiveConf.setVar(HiveConf.ConfVars.METASTOREURIS, "thrift://localhost:" + METASTORE_PORT); + hiveConf.setIntVar(HiveConf.ConfVars.METATORETHRIFTRETRIES, 3); + + client = new HiveMetaStoreClient(hiveConf); + isThriftClient = true; + + // Now you have the client - run necessary tests. + isServerRunning = true; + } + +} diff --git ql/src/java/org/apache/hadoop/hive/ql/exec/DDLTask.java ql/src/java/org/apache/hadoop/hive/ql/exec/DDLTask.java index 28b4d2a..71b8ca2 100644 --- ql/src/java/org/apache/hadoop/hive/ql/exec/DDLTask.java +++ ql/src/java/org/apache/hadoop/hive/ql/exec/DDLTask.java @@ -55,9 +55,12 @@ import org.apache.hadoop.hive.metastore.MetaStoreUtils; import org.apache.hadoop.hive.metastore.ProtectMode; import org.apache.hadoop.hive.metastore.TableType; import org.apache.hadoop.hive.metastore.Warehouse; +import org.apache.hadoop.hive.metastore.api.AlreadyExistsException; +import org.apache.hadoop.hive.metastore.api.Database; import org.apache.hadoop.hive.metastore.api.FieldSchema; import org.apache.hadoop.hive.metastore.api.InvalidOperationException; import org.apache.hadoop.hive.metastore.api.MetaException; +import org.apache.hadoop.hive.metastore.api.NoSuchObjectException; import org.apache.hadoop.hive.metastore.api.Order; import org.apache.hadoop.hive.ql.Context; import org.apache.hadoop.hive.ql.DriverContext; @@ -76,6 +79,7 @@ import org.apache.hadoop.hive.ql.metadata.Table; import org.apache.hadoop.hive.ql.plan.AddPartitionDesc; import org.apache.hadoop.hive.ql.plan.AlterTableDesc; import org.apache.hadoop.hive.ql.plan.AlterTableSimpleDesc; +import org.apache.hadoop.hive.ql.plan.CreateDatabaseDesc; import org.apache.hadoop.hive.ql.plan.CreateIndexDesc; import org.apache.hadoop.hive.ql.plan.CreateTableDesc; import org.apache.hadoop.hive.ql.plan.CreateTableLikeDesc; @@ -83,9 +87,11 @@ import org.apache.hadoop.hive.ql.plan.CreateViewDesc; import org.apache.hadoop.hive.ql.plan.DDLWork; import org.apache.hadoop.hive.ql.plan.DescFunctionDesc; import org.apache.hadoop.hive.ql.plan.DescTableDesc; +import org.apache.hadoop.hive.ql.plan.DropDatabaseDesc; import org.apache.hadoop.hive.ql.plan.DropIndexDesc; import org.apache.hadoop.hive.ql.plan.DropTableDesc; import org.apache.hadoop.hive.ql.plan.MsckDesc; +import org.apache.hadoop.hive.ql.plan.ShowDatabasesDesc; import org.apache.hadoop.hive.ql.plan.ShowFunctionsDesc; import org.apache.hadoop.hive.ql.plan.ShowLocksDesc; import org.apache.hadoop.hive.ql.plan.LockTableDesc; @@ -93,6 +99,7 @@ import org.apache.hadoop.hive.ql.plan.UnlockTableDesc; import org.apache.hadoop.hive.ql.plan.ShowPartitionsDesc; import org.apache.hadoop.hive.ql.plan.ShowTableStatusDesc; import org.apache.hadoop.hive.ql.plan.ShowTablesDesc; +import org.apache.hadoop.hive.ql.plan.SwitchDatabaseDesc; import org.apache.hadoop.hive.ql.plan.AlterTableDesc.AlterTableTypes; import org.apache.hadoop.hive.ql.plan.api.StageType; import org.apache.hadoop.hive.ql.lockmgr.HiveLock; @@ -153,6 +160,21 @@ public class DDLTask extends Task implements Serializable { try { db = Hive.get(conf); + CreateDatabaseDesc createDatabaseDesc = work.getCreateDatabaseDesc(); + if (null != createDatabaseDesc) { + return createDatabase(db, createDatabaseDesc); + } + + DropDatabaseDesc dropDatabaseDesc = work.getDropDatabaseDesc(); + if (dropDatabaseDesc != null) { + return dropDatabase(db, dropDatabaseDesc); + } + + SwitchDatabaseDesc switchDatabaseDesc = work.getSwitchDatabaseDesc(); + if (switchDatabaseDesc != null) { + return switchDatabase(db, switchDatabaseDesc); + } + CreateTableDesc crtTbl = work.getCreateTblDesc(); if (crtTbl != null) { return createTable(db, crtTbl); @@ -164,12 +186,11 @@ public class DDLTask extends Task implements Serializable { } DropIndexDesc dropIdx = work.getDropIdxDesc(); - if(dropIdx != null) { + if (dropIdx != null) { return dropIndex(db, dropIdx); } CreateTableLikeDesc crtTblLike = work.getCreateTblLikeDesc(); - if (crtTblLike != null) { return createTableLike(db, crtTblLike); } @@ -195,8 +216,7 @@ public class DDLTask extends Task implements Serializable { } AlterTableSimpleDesc simpleDesc = work.getAlterTblSimpleDesc(); - - if(simpleDesc != null) { + if (simpleDesc != null) { if (simpleDesc.getType() == AlterTableTypes.TOUCH) { return touch(db, simpleDesc); } else if (simpleDesc.getType() == AlterTableTypes.ARCHIVE) { @@ -205,6 +225,7 @@ public class DDLTask extends Task implements Serializable { return unarchive(db, simpleDesc); } } + MsckDesc msckDesc = work.getMsckDesc(); if (msckDesc != null) { return msck(db, msckDesc); @@ -220,6 +241,11 @@ public class DDLTask extends Task implements Serializable { return describeFunction(descFunc); } + ShowDatabasesDesc showDatabases = work.getShowDatabasesDesc(); + if (showDatabases != null) { + return showDatabases(db, showDatabases); + } + ShowTablesDesc showTbls = work.getShowTblsDesc(); if (showTbls != null) { return showTables(db, showTbls); @@ -274,7 +300,7 @@ public class DDLTask extends Task implements Serializable { } private int dropIndex(Hive db, DropIndexDesc dropIdx) throws HiveException { - db.dropIndex(MetaStoreUtils.DEFAULT_DATABASE_NAME, dropIdx.getTableName(), + db.dropIndex(db.getCurrentDatabase(), dropIdx.getTableName(), dropIdx.getIndexName(), true); return 0; } @@ -309,8 +335,7 @@ public class DDLTask extends Task implements Serializable { */ private int addPartition(Hive db, AddPartitionDesc addPartitionDesc) throws HiveException { - Table tbl = db.getTable(addPartitionDesc.getDbName(), addPartitionDesc - .getTableName()); + Table tbl = db.getTable(addPartitionDesc.getDbName(), addPartitionDesc.getTableName()); validateAlterTableType(tbl, AlterTableDesc.AlterTableTypes.ADDPARTITION); @@ -904,11 +929,10 @@ public class DDLTask extends Task implements Serializable { List repairOutput = new ArrayList(); try { HiveMetaStoreChecker checker = new HiveMetaStoreChecker(db); - checker.checkMetastore(MetaStoreUtils.DEFAULT_DATABASE_NAME, msckDesc + checker.checkMetastore(db.getCurrentDatabase(), msckDesc .getTableName(), msckDesc.getPartSpecs(), result); if (msckDesc.isRepairPartitions()) { - Table table = db.getTable(MetaStoreUtils.DEFAULT_DATABASE_NAME, - msckDesc.getTableName()); + Table table = db.getTable(msckDesc.getTableName()); for (CheckResult.PartitionResult part : result.getPartitionsNotInMs()) { try { db.createPartition(table, Warehouse.makeSpecFromName(part @@ -1020,18 +1044,17 @@ public class DDLTask extends Task implements Serializable { Table tbl = null; List parts = null; - tbl = db.getTable(MetaStoreUtils.DEFAULT_DATABASE_NAME, tabName); + tbl = db.getTable(tabName); if (!tbl.isPartitioned()) { console.printError("Table " + tabName + " is not a partitioned table"); return 1; } if (showParts.getPartSpec() != null) { - parts = db.getPartitionNames(MetaStoreUtils.DEFAULT_DATABASE_NAME, + parts = db.getPartitionNames(db.getCurrentDatabase(), tbl.getTableName(), showParts.getPartSpec(), (short) -1); } else { - parts = db.getPartitionNames(MetaStoreUtils.DEFAULT_DATABASE_NAME, tbl - .getTableName(), (short) -1); + parts = db.getPartitionNames(db.getCurrentDatabase(), tbl.getTableName(), (short) -1); } // write the results in the file @@ -1061,6 +1084,50 @@ public class DDLTask extends Task implements Serializable { } /** + * Write a list of the available databases to a file. + * + * @param showDatabases + * These are the databases we're interested in. + * @return Returns 0 when execution succeeds and above 0 if it fails. + * @throws HiveException + * Throws this exception if an unexpected error occurs. + */ + private int showDatabases(Hive db, ShowDatabasesDesc showDatabasesDesc) throws HiveException { + // get the databases for the desired pattern - populate the output stream + List databases = null; + if (showDatabasesDesc.getPattern() != null) { + LOG.info("pattern: " + showDatabasesDesc.getPattern()); + databases = db.getDatabasesByPattern(showDatabasesDesc.getPattern()); + } else { + databases = db.getAllDatabases(); + } + LOG.info("results : " + databases.size()); + + // write the results in the file + try { + Path resFile = new Path(showDatabasesDesc.getResFile()); + FileSystem fs = resFile.getFileSystem(conf); + DataOutput outStream = fs.create(resFile); + + for (String database : databases) { + // create a row per database name + outStream.writeBytes(database); + outStream.write(terminator); + } + ((FSDataOutputStream) outStream).close(); + } catch (FileNotFoundException e) { + LOG.warn("show databases: " + stringifyException(e)); + return 1; + } catch (IOException e) { + LOG.warn("show databases: " + stringifyException(e)); + return 1; + } catch (Exception e) { + throw new HiveException(e.toString()); + } + return 0; + } + + /** * Write a list of the tables in the database to a file. * * @param db @@ -1388,8 +1455,7 @@ public class DDLTask extends Task implements Serializable { Map part = showTblStatus.getPartSpec(); Partition par = null; if (part != null) { - Table tbl = db.getTable(showTblStatus.getDbName(), showTblStatus - .getPattern()); + Table tbl = db.getTable(showTblStatus.getDbName(), showTblStatus.getPattern()); par = db.getPartition(tbl, part, false); if (par == null) { throw new HiveException("Partition " + part + " for table " @@ -1512,8 +1578,7 @@ public class DDLTask extends Task implements Serializable { colPath.indexOf('.') == -1 ? colPath.length() : colPath.indexOf('.')); // describe the table - populate the output stream - Table tbl = db.getTable(MetaStoreUtils.DEFAULT_DATABASE_NAME, tableName, - false); + Table tbl = db.getTable(db.getCurrentDatabase(), tableName, false); Partition part = null; try { Path resFile = new Path(descTbl.getResFile()); @@ -1764,8 +1829,7 @@ public class DDLTask extends Task implements Serializable { */ private int alterTable(Hive db, AlterTableDesc alterTbl) throws HiveException { // alter the table - Table tbl = db.getTable(MetaStoreUtils.DEFAULT_DATABASE_NAME, alterTbl - .getOldName()); + Table tbl = db.getTable(alterTbl.getOldName()); Partition part = null; if(alterTbl.getPartSpec() != null) { @@ -2086,8 +2150,7 @@ public class DDLTask extends Task implements Serializable { // post-execution hook Table tbl = null; try { - tbl = db.getTable(MetaStoreUtils.DEFAULT_DATABASE_NAME, dropTbl - .getTableName()); + tbl = db.getTable(dropTbl.getTableName()); if (!tbl.canDrop()) { throw new HiveException("Table " + tbl.getTableName() + " is protected from being dropped"); @@ -2122,25 +2185,20 @@ public class DDLTask extends Task implements Serializable { } // drop the table - db - .dropTable(MetaStoreUtils.DEFAULT_DATABASE_NAME, dropTbl - .getTableName()); + db.dropTable(db.getCurrentDatabase(), dropTbl.getTableName()); if (tbl != null) { work.getOutputs().add(new WriteEntity(tbl)); } } else { // get all partitions of the table - List partitionNames = db.getPartitionNames( - MetaStoreUtils.DEFAULT_DATABASE_NAME, dropTbl.getTableName(), - (short) -1); - + List partitionNames = + db.getPartitionNames(db.getCurrentDatabase(), dropTbl.getTableName(), (short) -1); Set> partitions = new HashSet>(); - for (int i = 0; i < partitionNames.size(); i++) { + for (String partitionName : partitionNames) { try { - partitions.add(Warehouse.makeSpecFromName(partitionNames.get(i))); + partitions.add(Warehouse.makeSpecFromName(partitionName)); } catch (MetaException e) { - LOG.warn("Unrecognized partition name from metastore: " - + partitionNames.get(i)); + LOG.warn("Unrecognized partition name from metastore: " + partitionName); } } // drop partitions in the list @@ -2174,8 +2232,8 @@ public class DDLTask extends Task implements Serializable { // drop all existing partitions from the list for (Partition partition : partsToDelete) { console.printInfo("Dropping the partition " + partition.getName()); - db.dropPartition(MetaStoreUtils.DEFAULT_DATABASE_NAME, dropTbl - .getTableName(), partition.getValues(), true); // drop data for the + db.dropPartition(db.getCurrentDatabase(), dropTbl.getTableName(), + partition.getValues(), true); // drop data for the // partition work.getOutputs().add(new WriteEntity(partition)); } @@ -2199,6 +2257,57 @@ public class DDLTask extends Task implements Serializable { } /** + * Create a Database + * @param db + * @param crtDb + * @return Always returns 0 + * @throws HiveException + * @throws AlreadyExistsException + */ + private int createDatabase(Hive db, CreateDatabaseDesc crtDb) + throws HiveException, AlreadyExistsException { + Database database = new Database(); + database.setName(crtDb.getName()); + database.setDescription(crtDb.getComment()); + database.setLocationUri(crtDb.getLocationUri()); + + db.createDatabase(database, crtDb.getIfNotExists()); + return 0; + } + + /** + * Drop a Database + * @param db + * @param dropDb + * @return Always returns 0 + * @throws HiveException + * @throws NoSuchObjectException + */ + private int dropDatabase(Hive db, DropDatabaseDesc dropDb) + throws HiveException, NoSuchObjectException { + db.dropDatabase(dropDb.getDatabaseName(), true, dropDb.getIfExists()); + return 0; + } + + /** + * Switch to a different Database + * @param db + * @param switchDb + * @return Always returns 0 + * @throws HiveException + */ + private int switchDatabase(Hive db, SwitchDatabaseDesc switchDb) + throws HiveException { + String dbName = switchDb.getDatabaseName(); + if (!db.databaseExists(dbName)) { + throw new HiveException("ERROR: The database " + dbName + " does not exist."); + } + db.setCurrentDatabase(dbName); + return 0; + } + + + /** * Create a new table. * * @param db @@ -2211,7 +2320,7 @@ public class DDLTask extends Task implements Serializable { */ private int createTable(Hive db, CreateTableDesc crtTbl) throws HiveException { // create the table - Table tbl = new Table(crtTbl.getTableName()); + Table tbl = new Table(db.getCurrentDatabase(), crtTbl.getTableName()); if (crtTbl.getPartCols() != null) { tbl.setPartCols(crtTbl.getPartCols()); } @@ -2370,8 +2479,7 @@ public class DDLTask extends Task implements Serializable { */ private int createTableLike(Hive db, CreateTableLikeDesc crtTbl) throws HiveException { // Get the existing table - Table tbl = db.getTable(MetaStoreUtils.DEFAULT_DATABASE_NAME, crtTbl - .getLikeTableName()); + Table tbl = db.getTable(crtTbl.getLikeTableName()); tbl.setTableName(crtTbl.getTableName()); @@ -2405,7 +2513,7 @@ public class DDLTask extends Task implements Serializable { * Throws this exception if an unexpected error occurs. */ private int createView(Hive db, CreateViewDesc crtView) throws HiveException { - Table tbl = new Table(crtView.getViewName()); + Table tbl = new Table(db.getCurrentDatabase(), crtView.getViewName()); tbl.setTableType(TableType.VIRTUAL_VIEW); tbl.setSerializationLib(null); tbl.clearSerDeInfo(); diff --git ql/src/java/org/apache/hadoop/hive/ql/exec/MoveTask.java ql/src/java/org/apache/hadoop/hive/ql/exec/MoveTask.java index d59f48c..1c15db2 100644 --- ql/src/java/org/apache/hadoop/hive/ql/exec/MoveTask.java +++ ql/src/java/org/apache/hadoop/hive/ql/exec/MoveTask.java @@ -32,10 +32,10 @@ import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.LocalFileSystem; import org.apache.hadoop.fs.Path; import org.apache.hadoop.hive.conf.HiveConf; -import org.apache.hadoop.hive.metastore.MetaStoreUtils; +import org.apache.hadoop.hive.ql.Context; import org.apache.hadoop.hive.ql.DriverContext; -import org.apache.hadoop.hive.ql.hooks.LineageInfo.DataContainer; import org.apache.hadoop.hive.ql.hooks.WriteEntity; +import org.apache.hadoop.hive.ql.hooks.LineageInfo.DataContainer; import org.apache.hadoop.hive.ql.io.HiveFileFormatUtils; import org.apache.hadoop.hive.ql.metadata.HiveException; import org.apache.hadoop.hive.ql.metadata.Partition; @@ -46,7 +46,6 @@ import org.apache.hadoop.hive.ql.plan.LoadTableDesc; import org.apache.hadoop.hive.ql.plan.MoveWork; import org.apache.hadoop.hive.ql.plan.api.StageType; import org.apache.hadoop.hive.ql.session.SessionState; -import org.apache.hadoop.hive.ql.Context; import org.apache.hadoop.util.StringUtils; /** @@ -132,8 +131,7 @@ public class MoveTask extends Task implements Serializable { } String mesg_detail = " from " + tbd.getSourceDir(); console.printInfo(mesg.toString(), mesg_detail); - Table table = db.getTable(MetaStoreUtils.DEFAULT_DATABASE_NAME, tbd - .getTable().getTableName()); + Table table = db.getTable(tbd.getTable().getTableName()); if (work.getCheckFileFormat()) { // Get all files from the src directory diff --git ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java index ce5d903..afec18f 100644 --- ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java +++ ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java @@ -18,6 +18,16 @@ package org.apache.hadoop.hive.ql.metadata; +import static org.apache.hadoop.hive.metastore.MetaStoreUtils.DEFAULT_DATABASE_NAME; +import static org.apache.hadoop.hive.metastore.api.Constants.META_TABLE_STORAGE; +import static org.apache.hadoop.hive.serde.Constants.COLLECTION_DELIM; +import static org.apache.hadoop.hive.serde.Constants.ESCAPE_CHAR; +import static org.apache.hadoop.hive.serde.Constants.FIELD_DELIM; +import static org.apache.hadoop.hive.serde.Constants.LINE_DELIM; +import static org.apache.hadoop.hive.serde.Constants.MAPKEY_DELIM; +import static org.apache.hadoop.hive.serde.Constants.SERIALIZATION_FORMAT; +import static org.apache.hadoop.hive.serde.Constants.STRING_TYPE_NAME; + import java.io.IOException; import java.util.ArrayList; import java.util.HashMap; @@ -42,6 +52,7 @@ import org.apache.hadoop.hive.metastore.MetaStoreUtils; import org.apache.hadoop.hive.metastore.TableType; import org.apache.hadoop.hive.metastore.Warehouse; import org.apache.hadoop.hive.metastore.api.AlreadyExistsException; +import org.apache.hadoop.hive.metastore.api.Database; import org.apache.hadoop.hive.metastore.api.FieldSchema; import org.apache.hadoop.hive.metastore.api.Index; import org.apache.hadoop.hive.metastore.api.InvalidOperationException; @@ -51,7 +62,6 @@ import org.apache.hadoop.hive.metastore.api.Order; import org.apache.hadoop.hive.metastore.api.SerDeInfo; import org.apache.hadoop.hive.ql.exec.Utilities; import org.apache.hadoop.hive.ql.index.HiveIndexHandler; -import org.apache.hadoop.hive.serde.Constants; import org.apache.hadoop.hive.serde2.Deserializer; import org.apache.hadoop.hive.serde2.SerDeException; import org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe; @@ -72,6 +82,7 @@ public class Hive { private HiveConf conf = null; private IMetaStoreClient metaStoreClient; + private String currentDatabase; private static ThreadLocal hiveDB = new ThreadLocal() { @Override @@ -172,6 +183,69 @@ public class Hive { } /** + * Create a database + * @param db + * @param ifNotExist if true, will ignore AlreadyExistsException exception + * @throws AlreadyExistsException + * @throws HiveException + */ + public void createDatabase(Database db, boolean ifNotExist) + throws AlreadyExistsException, HiveException { + try { + getMSC().createDatabase(db); + } catch (AlreadyExistsException e) { + if (!ifNotExist) { + throw e; + } + } catch (Exception e) { + throw new HiveException(e); + } + } + + /** + * Create a Database. Raise an error if a database with the same name already exists. + * @param db + * @throws AlreadyExistsException + * @throws HiveException + */ + public void createDatabase(Database db) throws AlreadyExistsException, HiveException { + createDatabase(db, false); + } + + /** + * Drop a database. + * @param name + * @throws NoSuchObjectException + * @throws HiveException + * @see org.apache.hadoop.hive.metastore.HiveMetaStoreClient#dropDatabase(java.lang.String) + */ + public void dropDatabase(String name) throws HiveException, NoSuchObjectException { + dropDatabase(name, true, false); + } + + + /** + * Drop a database + * @param name + * @param deleteData + * @param ignoreUnknownDb if true, will ignore NoSuchObjectException + * @return + * @throws HiveException + * @throws NoSuchObjectException + */ + public void dropDatabase(String name, boolean deleteData, boolean ignoreUnknownDb) + throws HiveException, NoSuchObjectException { + try { + getMSC().dropDatabase(name, deleteData, ignoreUnknownDb); + } catch (NoSuchObjectException e) { + throw e; + } catch (Exception e) { + throw new HiveException(e); + } + } + + + /** * Creates a table metdata and the directory for the table data * * @param tableName @@ -223,13 +297,12 @@ public class Hive { throw new HiveException("columns not specified for table " + tableName); } - Table tbl = new Table(tableName); + Table tbl = new Table(getCurrentDatabase(), tableName); tbl.setInputFormatClass(fileInputFormat.getName()); tbl.setOutputFormatClass(fileOutputFormat.getName()); for (String col : columns) { - FieldSchema field = new FieldSchema(col, - org.apache.hadoop.hive.serde.Constants.STRING_TYPE_NAME, "default"); + FieldSchema field = new FieldSchema(col, STRING_TYPE_NAME, "default"); tbl.getCols().add(field); } @@ -237,9 +310,7 @@ public class Hive { for (String partCol : partCols) { FieldSchema part = new FieldSchema(); part.setName(partCol); - part.setType(org.apache.hadoop.hive.serde.Constants.STRING_TYPE_NAME); // default - // partition - // key + part.setType(STRING_TYPE_NAME); // default partition key tbl.getPartCols().add(part); } } @@ -263,8 +334,7 @@ public class Hive { public void alterTable(String tblName, Table newTbl) throws InvalidOperationException, HiveException { try { - getMSC().alter_table(MetaStoreUtils.DEFAULT_DATABASE_NAME, tblName, - newTbl.getTTable()); + getMSC().alter_table(getCurrentDatabase(), tblName, newTbl.getTTable()); } catch (MetaException e) { throw new HiveException("Unable to alter table.", e); } catch (TException e) { @@ -286,7 +356,7 @@ public class Hive { public void alterPartition(String tblName, Partition newPart) throws InvalidOperationException, HiveException { try { - getMSC().alter_partition(MetaStoreUtils.DEFAULT_DATABASE_NAME, tblName, + getMSC().alter_partition(getCurrentDatabase(), tblName, newPart.getTPartition()); } catch (MetaException e) { @@ -318,6 +388,9 @@ public class Hive { */ public void createTable(Table tbl, boolean ifNotExists) throws HiveException { try { + if (tbl.getDbName() == null || "".equals(tbl.getDbName().trim())) { + tbl.setDbName(getCurrentDatabase()); + } if (tbl.getCols().size() == 0) { tbl.setFields(MetaStoreUtils.getFieldsFromDeserializer(tbl.getTableName(), tbl.getDeserializer())); @@ -377,7 +450,7 @@ public class Hive { throws HiveException { try { - String dbName = MetaStoreUtils.DEFAULT_DATABASE_NAME; + String dbName = getCurrentDatabase(); Index old_index = null; try { old_index = getIndex(dbName, tableName, indexName); @@ -420,22 +493,20 @@ public class Hive { } if (fieldDelim != null) { - serdeInfo.getParameters().put(Constants.FIELD_DELIM, fieldDelim); - serdeInfo.getParameters().put(Constants.SERIALIZATION_FORMAT, - fieldDelim); + serdeInfo.getParameters().put(FIELD_DELIM, fieldDelim); + serdeInfo.getParameters().put(SERIALIZATION_FORMAT, fieldDelim); } if (fieldEscape != null) { - serdeInfo.getParameters().put(Constants.ESCAPE_CHAR, fieldEscape); + serdeInfo.getParameters().put(ESCAPE_CHAR, fieldEscape); } if (collItemDelim != null) { - serdeInfo.getParameters() - .put(Constants.COLLECTION_DELIM, collItemDelim); + serdeInfo.getParameters().put(COLLECTION_DELIM, collItemDelim); } if (mapKeyDelim != null) { - serdeInfo.getParameters().put(Constants.MAPKEY_DELIM, mapKeyDelim); + serdeInfo.getParameters().put(MAPKEY_DELIM, mapKeyDelim); } if (lineDelim != null) { - serdeInfo.getParameters().put(Constants.LINE_DELIM, lineDelim); + serdeInfo.getParameters().put(LINE_DELIM, lineDelim); } if (serdeProps != null) { @@ -481,7 +552,7 @@ public class Hive { HiveIndexHandler indexHandler = HiveUtils.getIndexHandler(this.getConf(), indexHandlerClass); if (indexHandler.usesIndexTable()) { - tt = new org.apache.hadoop.hive.ql.metadata.Table(indexTblName).getTTable(); + tt = new org.apache.hadoop.hive.ql.metadata.Table(dbName, indexTblName).getTTable(); List partKeys = baseTbl.getPartitionKeys(); tt.setPartitionKeys(partKeys); tt.setTableType(TableType.INDEX_TABLE.toString()); @@ -537,6 +608,26 @@ public class Hive { * @throws HiveException * thrown if the drop fails */ + public void dropTable(String tableName) throws HiveException { + dropTable(getCurrentDatabase(), tableName, true, true); + } + + /** + * Drops table along with the data in it. If the table doesn't exist + * then it is a no-op + * @param dbName database where the table lives + * @param tableName table to drop + * @throws HiveException thrown if the drop fails + * Drops table along with the data in it. If the table doesn't exist then it + * is a no-op + * + * @param dbName + * database where the table lives + * @param tableName + * table to drop + * @throws HiveException + * thrown if the drop fails + */ public void dropTable(String dbName, String tableName) throws HiveException { dropTable(dbName, tableName, true, true); } @@ -570,7 +661,18 @@ public class Hive { } /** - * Returns metadata of the table. + * Returns metadata for the table named tableName in the current database. + * @param tableName the name of the table + * @return + * @throws HiveException if there's an internal error or if the + * table doesn't exist + */ + public Table getTable(final String tableName) throws HiveException { + return this.getTable(getCurrentDatabase(), tableName, true); + } + + /** + * Returns metadata of the table * * @param dbName * the name of the database @@ -580,9 +682,7 @@ public class Hive { * @exception HiveException * if there's an internal error or if the table doesn't exist */ - public Table getTable(final String dbName, final String tableName) - throws HiveException { - + public Table getTable(final String dbName, final String tableName) throws HiveException { return this.getTable(dbName, tableName, true); } @@ -623,12 +723,11 @@ public class Hive { if (!TableType.VIRTUAL_VIEW.toString().equals(tTable.getTableType())) { // Fix the non-printable chars Map parameters = tTable.getSd().getParameters(); - String sf = parameters.get(org.apache.hadoop.hive.serde.Constants.SERIALIZATION_FORMAT); + String sf = parameters.get(SERIALIZATION_FORMAT); if (sf != null) { char[] b = sf.toCharArray(); if ((b.length == 1) && (b[0] < 10)) { // ^A, ^B, ^C, ^D, \t - parameters.put(org.apache.hadoop.hive.serde.Constants.SERIALIZATION_FORMAT, - Integer.toString(b[0])); + parameters.put(SERIALIZATION_FORMAT, Integer.toString(b[0])); } } @@ -654,12 +753,27 @@ public class Hive { return table; } + /** + * Get all table names for the current database. + * @return List of table names + * @throws HiveException + */ public List getAllTables() throws HiveException { - return getTablesByPattern(".*"); + return getAllTables(getCurrentDatabase()); + } + + /** + * Get all table names for the specified database. + * @param dbName + * @return List of table names + * @throws HiveException + */ + public List getAllTables(String dbName) throws HiveException { + return getTablesByPattern(dbName, ".*"); } /** - * returns all existing tables from default database which match the given + * Returns all existing tables from default database which match the given * pattern. The matching occurs as per Java regular expressions * * @param tablePattern @@ -667,13 +781,28 @@ public class Hive { * @return list of table names * @throws HiveException */ - public List getTablesByPattern(String tablePattern) - throws HiveException { - return getTablesForDb(MetaStoreUtils.DEFAULT_DATABASE_NAME, tablePattern); + public List getTablesByPattern(String tablePattern) throws HiveException { + return getTablesByPattern(getCurrentDatabase(), tablePattern); + } + + /** + * Returns all existing tables from the specified database which match the given + * pattern. The matching occurs as per Java regular expressions. + * @param dbName + * @param tablePattern + * @return list of table names + * @throws HiveException + */ + public List getTablesByPattern(String dbName, String tablePattern) throws HiveException { + try { + return getMSC().getTables(dbName, tablePattern); + } catch (Exception e) { + throw new HiveException(e); + } } /** - * returns all existing tables from the given database which match the given + * Returns all existing tables from the given database which match the given * pattern. The matching occurs as per Java regular expressions * * @param database @@ -693,31 +822,58 @@ public class Hive { } /** - * @param name - * @param locationUri - * @return true or false - * @throws AlreadyExistsException - * @throws MetaException - * @throws TException - * @see org.apache.hadoop.hive.metastore.HiveMetaStoreClient#createDatabase(java.lang.String, - * java.lang.String) + * Get all existing database names. + * + * @return List of database names. + * @throws HiveException */ - protected boolean createDatabase(String name, String locationUri) - throws AlreadyExistsException, MetaException, TException { - return getMSC().createDatabase(name, locationUri); + public List getAllDatabases() throws HiveException { + try { + return getMSC().getAllDatabases(); + } catch (Exception e) { + throw new HiveException(e); + } } /** - * @param name - * @return true or false - * @throws MetaException - * @throws TException - * @see org.apache.hadoop.hive.metastore.HiveMetaStoreClient#dropDatabase(java.lang.String) + * Get all existing databases that match the given + * pattern. The matching occurs as per Java regular expressions + * + * @param databasePattern + * java re pattern + * @return list of database names + * @throws HiveException + */ + public List getDatabasesByPattern(String databasePattern) throws HiveException { + try { + return getMSC().getDatabases(databasePattern); + } catch (Exception e) { + throw new HiveException(e); + } + } + + /** + * Query metadata to see if a database with the given name already exists. + * + * @param dbName + * @return true if a database with the given name already exists, false if + * does not exist. + * @throws HiveException */ - protected boolean dropDatabase(String name) throws MetaException, TException { - return getMSC().dropDatabase(name); + public boolean databaseExists(String dbName) throws HiveException { + try { + if (null != getMSC().getDatabase(dbName)) { + return true; + } + return false; + } catch (NoSuchObjectException e) { + return false; + } catch (Exception e) { + throw new HiveException(e); + } } + /** * Load a directory into a Hive Table Partition - Alters existing content of * the partition with the contents of loadPath. - If he partition does not @@ -740,7 +896,7 @@ public class Hive { Map partSpec, boolean replace, Path tmpDirPath, boolean holdDDLTime) throws HiveException { - Table tbl = getTable(MetaStoreUtils.DEFAULT_DATABASE_NAME, tableName); + Table tbl = getTable(tableName); try { /** * Move files before creating the partition since down stream processes @@ -865,7 +1021,7 @@ public class Hive { */ public void loadTable(Path loadPath, String tableName, boolean replace, Path tmpDirPath, boolean holdDDLTime) throws HiveException { - Table tbl = getTable(MetaStoreUtils.DEFAULT_DATABASE_NAME, tableName); + Table tbl = getTable(tableName); if (replace) { tbl.replaceFiles(loadPath, tmpDirPath); @@ -1122,6 +1278,25 @@ public class Hive { return qlPartitions; } + /** + * Get the name of the current database + * @return + */ + public String getCurrentDatabase() { + if (null == currentDatabase) { + currentDatabase = DEFAULT_DATABASE_NAME; + } + return currentDatabase; + } + + /** + * Set the name of the current database + * @param currentDatabase + */ + public void setCurrentDatabase(String currentDatabase) { + this.currentDatabase = currentDatabase; + } + static private void checkPaths(FileSystem fs, FileStatus[] srcs, Path destf, boolean replace) throws HiveException { try { @@ -1284,10 +1459,8 @@ public class Hive { return null; } HiveStorageHandler storageHandler = - HiveUtils.getStorageHandler( - conf, - tbl.getParameters().get( - org.apache.hadoop.hive.metastore.api.Constants.META_TABLE_STORAGE)); + HiveUtils.getStorageHandler(conf, + tbl.getParameters().get(META_TABLE_STORAGE)); if (storageHandler == null) { return null; } diff --git ql/src/java/org/apache/hadoop/hive/ql/metadata/Table.java ql/src/java/org/apache/hadoop/hive/ql/metadata/Table.java index 352af89..e276f72 100644 --- ql/src/java/org/apache/hadoop/hive/ql/metadata/Table.java +++ ql/src/java/org/apache/hadoop/hive/ql/metadata/Table.java @@ -97,8 +97,8 @@ public class Table implements Serializable { } } - public Table(String name) { - this(getEmptyTable(name)); + public Table(String databaseName, String tableName) { + this(getEmptyTable(databaseName, tableName)); } /** @@ -120,7 +120,8 @@ public class Table implements Serializable { /** * Initialize an emtpy table. */ - static org.apache.hadoop.hive.metastore.api.Table getEmptyTable(String name) { + static org.apache.hadoop.hive.metastore.api.Table + getEmptyTable(String databaseName, String tableName) { StorageDescriptor sd = new StorageDescriptor(); { sd.setSerdeInfo(new SerDeInfo()); @@ -144,8 +145,8 @@ public class Table implements Serializable { t.setPartitionKeys(new ArrayList()); t.setParameters(new HashMap()); t.setTableType(TableType.MANAGED_TABLE.toString()); - t.setTableName(name); - t.setDbName(MetaStoreUtils.DEFAULT_DATABASE_NAME); + t.setDbName(databaseName); + t.setTableName(tableName); } return t; } diff --git ql/src/java/org/apache/hadoop/hive/ql/parse/BaseSemanticAnalyzer.java ql/src/java/org/apache/hadoop/hive/ql/parse/BaseSemanticAnalyzer.java index eedf9e3..91c8888 100644 --- ql/src/java/org/apache/hadoop/hive/ql/parse/BaseSemanticAnalyzer.java +++ ql/src/java/org/apache/hadoop/hive/ql/parse/BaseSemanticAnalyzer.java @@ -33,7 +33,6 @@ import org.antlr.runtime.tree.Tree; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.hive.conf.HiveConf; -import org.apache.hadoop.hive.metastore.MetaStoreUtils; import org.apache.hadoop.hive.metastore.api.FieldSchema; import org.apache.hadoop.hive.metastore.api.Order; import org.apache.hadoop.hive.ql.Context; @@ -571,8 +570,7 @@ public abstract class BaseSemanticAnalyzer { + tableName; } - tableHandle = db.getTable(MetaStoreUtils.DEFAULT_DATABASE_NAME, - tableName); + tableHandle = db.getTable(tableName); } catch (InvalidTableException ite) { throw new SemanticException(ErrorMsg.INVALID_TABLE.getMsg(ast .getChild(0)), ite); diff --git ql/src/java/org/apache/hadoop/hive/ql/parse/DDLSemanticAnalyzer.java ql/src/java/org/apache/hadoop/hive/ql/parse/DDLSemanticAnalyzer.java index 658eea5..af9a94d 100644 --- ql/src/java/org/apache/hadoop/hive/ql/parse/DDLSemanticAnalyzer.java +++ ql/src/java/org/apache/hadoop/hive/ql/parse/DDLSemanticAnalyzer.java @@ -18,6 +18,14 @@ package org.apache.hadoop.hive.ql.parse; +import static org.apache.hadoop.hive.ql.parse.HiveParser.TOK_CREATEDATABASE; +import static org.apache.hadoop.hive.ql.parse.HiveParser.TOK_DATABASECOMMENT; +import static org.apache.hadoop.hive.ql.parse.HiveParser.TOK_DROPDATABASE; +import static org.apache.hadoop.hive.ql.parse.HiveParser.TOK_IFEXISTS; +import static org.apache.hadoop.hive.ql.parse.HiveParser.TOK_IFNOTEXISTS; +import static org.apache.hadoop.hive.ql.parse.HiveParser.TOK_SHOWDATABASES; +import static org.apache.hadoop.hive.ql.parse.HiveParser.TOK_SWITCHDATABASE; + import java.util.ArrayList; import java.util.HashMap; import java.util.HashSet; @@ -36,7 +44,6 @@ import org.apache.commons.logging.LogFactory; import org.apache.hadoop.fs.Path; import org.apache.hadoop.hive.conf.HiveConf; import org.apache.hadoop.hive.conf.HiveConf.ConfVars; -import org.apache.hadoop.hive.metastore.MetaStoreUtils; import org.apache.hadoop.hive.metastore.Warehouse; import org.apache.hadoop.hive.metastore.api.FieldSchema; import org.apache.hadoop.hive.metastore.api.Index; @@ -45,6 +52,8 @@ import org.apache.hadoop.hive.metastore.api.Order; import org.apache.hadoop.hive.ql.exec.FetchTask; import org.apache.hadoop.hive.ql.exec.Task; import org.apache.hadoop.hive.ql.exec.TaskFactory; +import org.apache.hadoop.hive.ql.hooks.ReadEntity; +import org.apache.hadoop.hive.ql.hooks.WriteEntity; import org.apache.hadoop.hive.ql.index.HiveIndex; import org.apache.hadoop.hive.ql.index.HiveIndexHandler; import org.apache.hadoop.hive.ql.index.HiveIndex.IndexType; @@ -57,14 +66,17 @@ import org.apache.hadoop.hive.ql.metadata.Table; import org.apache.hadoop.hive.ql.plan.AddPartitionDesc; import org.apache.hadoop.hive.ql.plan.AlterTableDesc; import org.apache.hadoop.hive.ql.plan.AlterTableSimpleDesc; +import org.apache.hadoop.hive.ql.plan.CreateDatabaseDesc; import org.apache.hadoop.hive.ql.plan.CreateIndexDesc; import org.apache.hadoop.hive.ql.plan.DDLWork; import org.apache.hadoop.hive.ql.plan.DescFunctionDesc; import org.apache.hadoop.hive.ql.plan.DescTableDesc; +import org.apache.hadoop.hive.ql.plan.DropDatabaseDesc; import org.apache.hadoop.hive.ql.plan.DropIndexDesc; import org.apache.hadoop.hive.ql.plan.DropTableDesc; import org.apache.hadoop.hive.ql.plan.FetchWork; import org.apache.hadoop.hive.ql.plan.MsckDesc; +import org.apache.hadoop.hive.ql.plan.ShowDatabasesDesc; import org.apache.hadoop.hive.ql.plan.ShowFunctionsDesc; import org.apache.hadoop.hive.ql.plan.ShowPartitionsDesc; import org.apache.hadoop.hive.ql.plan.ShowTableStatusDesc; @@ -72,13 +84,12 @@ import org.apache.hadoop.hive.ql.plan.ShowTablesDesc; import org.apache.hadoop.hive.ql.plan.ShowLocksDesc; import org.apache.hadoop.hive.ql.plan.LockTableDesc; import org.apache.hadoop.hive.ql.plan.UnlockTableDesc; +import org.apache.hadoop.hive.ql.plan.SwitchDatabaseDesc; import org.apache.hadoop.hive.ql.plan.TableDesc; import org.apache.hadoop.hive.ql.plan.AlterTableDesc.AlterTableTypes; import org.apache.hadoop.hive.serde.Constants; import org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe; import org.apache.hadoop.mapred.TextInputFormat; -import org.apache.hadoop.hive.ql.hooks.ReadEntity; -import org.apache.hadoop.hive.ql.hooks.WriteEntity; /** * DDLSemanticAnalyzer. @@ -165,6 +176,9 @@ public class DDLSemanticAnalyzer extends BaseSemanticAnalyzer { } else if (ast.getToken().getType() == HiveParser.TOK_DESCTABLE) { ctx.setResFile(new Path(ctx.getLocalTmpFileURI())); analyzeDescribeTable(ast); + } else if (ast.getToken().getType() == TOK_SHOWDATABASES) { + ctx.setResFile(new Path(ctx.getLocalTmpFileURI())); + analyzeShowDatabases(ast); } else if (ast.getToken().getType() == HiveParser.TOK_SHOWTABLES) { ctx.setResFile(new Path(ctx.getLocalTmpFileURI())); analyzeShowTables(ast); @@ -222,16 +236,72 @@ public class DDLSemanticAnalyzer extends BaseSemanticAnalyzer { analyzeLockTable(ast); } else if (ast.getToken().getType() == HiveParser.TOK_UNLOCKTABLE) { analyzeUnlockTable(ast); + } else if (ast.getToken().getType() == TOK_CREATEDATABASE) { + analyzeCreateDatabase(ast); + } else if (ast.getToken().getType() == TOK_DROPDATABASE) { + analyzeDropDatabase(ast); + } else if (ast.getToken().getType() == TOK_SWITCHDATABASE) { + analyzeSwitchDatabase(ast); } else { throw new SemanticException("Unsupported command."); } } + private void analyzeCreateDatabase(ASTNode ast) throws SemanticException { + String dbName = unescapeIdentifier(ast.getChild(0).getText()); + boolean ifNotExists = false; + String dbComment = null; + + for (int i = 1; i < ast.getChildCount(); i++) { + ASTNode childNode = (ASTNode) ast.getChild(i); + switch (childNode.getToken().getType()) { + case TOK_IFNOTEXISTS: + ifNotExists = true; + break; + case TOK_DATABASECOMMENT: + dbComment = unescapeSQLString(childNode.getChild(0).getText()); + break; + default: + throw new SemanticException("Unrecognized token in CREATE DATABASE statement"); + } + } + + CreateDatabaseDesc createDatabaseDesc = new CreateDatabaseDesc(); + createDatabaseDesc.setName(dbName); + createDatabaseDesc.setComment(dbComment); + createDatabaseDesc.setIfNotExists(ifNotExists); + createDatabaseDesc.setLocationUri(null); + + rootTasks.add(TaskFactory.get(new DDLWork(getInputs(), getOutputs(), + createDatabaseDesc), conf)); + } + + private void analyzeDropDatabase(ASTNode ast) throws SemanticException { + String dbName = unescapeIdentifier(ast.getChild(0).getText()); + boolean ifExists = false; + + if (null != ast.getFirstChildWithType(TOK_IFEXISTS)) { + ifExists = true; + } + + DropDatabaseDesc dropDatabaseDesc = new DropDatabaseDesc(dbName, ifExists); + rootTasks.add(TaskFactory.get(new DDLWork(getInputs(), getOutputs(), dropDatabaseDesc), conf)); + } + + private void analyzeSwitchDatabase(ASTNode ast) { + String dbName = unescapeIdentifier(ast.getChild(0).getText()); + SwitchDatabaseDesc switchDatabaseDesc = new SwitchDatabaseDesc(dbName); + rootTasks.add(TaskFactory.get(new DDLWork(getInputs(), getOutputs(), + switchDatabaseDesc), conf)); + } + + + private void analyzeDropTable(ASTNode ast, boolean expectView) throws SemanticException { String tableName = unescapeIdentifier(ast.getChild(0).getText()); try { - Table tab = db.getTable(MetaStoreUtils.DEFAULT_DATABASE_NAME, tableName, false); + Table tab = db.getTable(db.getCurrentDatabase(), tableName, false); // Ignore if table does not exist if (tab != null) { inputs.add(new ReadEntity(tab)); @@ -340,11 +410,11 @@ public class DDLSemanticAnalyzer extends BaseSemanticAnalyzer { private List> getIndexBuilderMapRed(String baseTableName, String indexName, HashMap partSpec) throws SemanticException { try { - Index index = db.getIndex(MetaStoreUtils.DEFAULT_DATABASE_NAME, baseTableName, indexName); - Table indexTbl = db.getTable(MetaStoreUtils.DEFAULT_DATABASE_NAME,index.getIndexTableName()); + String dbName = db.getCurrentDatabase(); + Index index = db.getIndex(dbName, baseTableName, indexName); + Table indexTbl = db.getTable(dbName, index.getIndexTableName()); String baseTblName = index.getOrigTableName(); - Table baseTbl = db.getTable(MetaStoreUtils.DEFAULT_DATABASE_NAME, - baseTblName); + Table baseTbl = db.getTable(dbName, baseTblName); String handlerCls = index.getIndexHandlerClass(); HiveIndexHandler handler = HiveUtils.getIndexHandler(conf, handlerCls); @@ -415,7 +485,7 @@ public class DDLSemanticAnalyzer extends BaseSemanticAnalyzer { alterTblDesc.setOldName(tableName); try { - Table tab = db.getTable(MetaStoreUtils.DEFAULT_DATABASE_NAME, tableName, false); + Table tab = db.getTable(db.getCurrentDatabase(), tableName, false); if (tab != null) { inputs.add(new ReadEntity(tab)); outputs.add(new WriteEntity(tab)); @@ -439,7 +509,7 @@ public class DDLSemanticAnalyzer extends BaseSemanticAnalyzer { alterTblDesc.setOldName(tableName); try { - Table tab = db.getTable(MetaStoreUtils.DEFAULT_DATABASE_NAME, tableName, false); + Table tab = db.getTable(db.getCurrentDatabase(), tableName, false); if (tab != null) { inputs.add(new ReadEntity(tab)); outputs.add(new WriteEntity(tab)); @@ -465,7 +535,7 @@ public class DDLSemanticAnalyzer extends BaseSemanticAnalyzer { alterTblDesc.setSerdeName(serdeName); try { - Table tab = db.getTable(MetaStoreUtils.DEFAULT_DATABASE_NAME, tableName, false); + Table tab = db.getTable(db.getCurrentDatabase(), tableName, false); if (tab != null) { inputs.add(new ReadEntity(tab)); outputs.add(new WriteEntity(tab)); @@ -529,7 +599,7 @@ public class DDLSemanticAnalyzer extends BaseSemanticAnalyzer { outputFormat, serde, storageHandler, partSpec); try { - Table tab = db.getTable(MetaStoreUtils.DEFAULT_DATABASE_NAME, tableName, false); + Table tab = db.getTable(db.getCurrentDatabase(), tableName, false); if (tab != null) { inputs.add(new ReadEntity(tab)); outputs.add(new WriteEntity(tab)); @@ -550,7 +620,7 @@ public class DDLSemanticAnalyzer extends BaseSemanticAnalyzer { AlterTableDesc alterTblDesc = new AlterTableDesc (tableName, newLocation, partSpec); try { - Table tab = db.getTable(MetaStoreUtils.DEFAULT_DATABASE_NAME, tableName, false); + Table tab = db.getTable(db.getCurrentDatabase(), tableName, false); if (tab != null) { inputs.add(new ReadEntity(tab)); outputs.add(new WriteEntity(tab)); @@ -604,7 +674,7 @@ public class DDLSemanticAnalyzer extends BaseSemanticAnalyzer { } try { - Table tab = db.getTable(MetaStoreUtils.DEFAULT_DATABASE_NAME, tableName, false); + Table tab = db.getTable(db.getCurrentDatabase(), tableName, false); if (tab != null) { inputs.add(new ReadEntity(tab)); outputs.add(new WriteEntity(tab)); @@ -622,7 +692,7 @@ public class DDLSemanticAnalyzer extends BaseSemanticAnalyzer { String tableName = unescapeIdentifier(ast.getChild(0).getText()); try { - Table tab = db.getTable(MetaStoreUtils.DEFAULT_DATABASE_NAME, tableName, false); + Table tab = db.getTable(db.getCurrentDatabase(), tableName, false); if (tab != null) { inputs.add(new ReadEntity(tab)); outputs.add(new WriteEntity(tab)); @@ -753,6 +823,18 @@ public class DDLSemanticAnalyzer extends BaseSemanticAnalyzer { setFetchTask(createFetchTask(showPartsDesc.getSchema())); } + private void analyzeShowDatabases(ASTNode ast) throws SemanticException { + ShowDatabasesDesc showDatabasesDesc; + if (ast.getChildCount() == 1) { + String databasePattern = unescapeSQLString(ast.getChild(0).getText()); + showDatabasesDesc = new ShowDatabasesDesc(ctx.getResFile(), databasePattern); + } else { + showDatabasesDesc = new ShowDatabasesDesc(ctx.getResFile()); + } + rootTasks.add(TaskFactory.get(new DDLWork(getInputs(), getOutputs(), showDatabasesDesc), conf)); + setFetchTask(createFetchTask(showDatabasesDesc.getSchema())); + } + private void analyzeShowTables(ASTNode ast) throws SemanticException { ShowTablesDesc showTblsDesc; if (ast.getChildCount() == 1) { @@ -769,7 +851,7 @@ public class DDLSemanticAnalyzer extends BaseSemanticAnalyzer { private void analyzeShowTableStatus(ASTNode ast) throws SemanticException { ShowTableStatusDesc showTblStatusDesc; String tableNames = unescapeIdentifier(ast.getChild(0).getText()); - String dbName = MetaStoreUtils.DEFAULT_DATABASE_NAME; + String dbName = db.getCurrentDatabase(); int children = ast.getChildCount(); HashMap partSpec = null; if (children >= 2) { @@ -929,7 +1011,7 @@ public class DDLSemanticAnalyzer extends BaseSemanticAnalyzer { AlterTableDesc alterTblDesc = new AlterTableDesc(tblName, unescapeIdentifier(ast.getChild(1).getText())); try { - Table tab = db.getTable(MetaStoreUtils.DEFAULT_DATABASE_NAME, tblName, false); + Table tab = db.getTable(db.getCurrentDatabase(), tblName, false); if (tab != null) { inputs.add(new ReadEntity(tab)); outputs.add(new WriteEntity(tab)); @@ -974,7 +1056,7 @@ public class DDLSemanticAnalyzer extends BaseSemanticAnalyzer { .getChild(2).getText()), newType, newComment, first, flagCol); try { - Table tab = db.getTable(MetaStoreUtils.DEFAULT_DATABASE_NAME, tblName, false); + Table tab = db.getTable(db.getCurrentDatabase(), tblName, false); if (tab != null) { inputs.add(new ReadEntity(tab)); outputs.add(new WriteEntity(tab)); @@ -995,7 +1077,7 @@ public class DDLSemanticAnalyzer extends BaseSemanticAnalyzer { alterType); try { - Table tab = db.getTable(MetaStoreUtils.DEFAULT_DATABASE_NAME, tblName, false); + Table tab = db.getTable(db.getCurrentDatabase(), tblName, false); if (tab != null) { inputs.add(new ReadEntity(tab)); outputs.add(new WriteEntity(tab)); @@ -1015,7 +1097,7 @@ public class DDLSemanticAnalyzer extends BaseSemanticAnalyzer { DropTableDesc dropTblDesc = new DropTableDesc(tblName, partSpecs); try { - Table tab = db.getTable(MetaStoreUtils.DEFAULT_DATABASE_NAME, tblName, false); + Table tab = db.getTable(db.getCurrentDatabase(), tblName, false); if (tab != null) { inputs.add(new ReadEntity(tab)); } @@ -1045,7 +1127,7 @@ public class DDLSemanticAnalyzer extends BaseSemanticAnalyzer { String tblName = unescapeIdentifier(ast.getChild(0).getText()); try { - Table tab = db.getTable(MetaStoreUtils.DEFAULT_DATABASE_NAME, tblName, false); + Table tab = db.getTable(db.getCurrentDatabase(), tblName, false); if (tab != null) { inputs.add(new ReadEntity(tab)); } @@ -1074,7 +1156,7 @@ public class DDLSemanticAnalyzer extends BaseSemanticAnalyzer { if (currentPart != null) { validatePartitionValues(currentPart); AddPartitionDesc addPartitionDesc = new AddPartitionDesc( - MetaStoreUtils.DEFAULT_DATABASE_NAME, tblName, currentPart, + db.getCurrentDatabase(), tblName, currentPart, currentLocation, ifNotExists); rootTasks.add(TaskFactory.get(new DDLWork(getInputs(), getOutputs(), addPartitionDesc), conf)); @@ -1096,7 +1178,7 @@ public class DDLSemanticAnalyzer extends BaseSemanticAnalyzer { if (currentPart != null) { validatePartitionValues(currentPart); AddPartitionDesc addPartitionDesc = new AddPartitionDesc( - MetaStoreUtils.DEFAULT_DATABASE_NAME, tblName, currentPart, + db.getCurrentDatabase(), tblName, currentPart, currentLocation, ifNotExists); rootTasks.add(TaskFactory.get(new DDLWork(getInputs(), getOutputs(), addPartitionDesc), conf)); @@ -1120,7 +1202,7 @@ public class DDLSemanticAnalyzer extends BaseSemanticAnalyzer { Table tab; try { - tab = db.getTable(MetaStoreUtils.DEFAULT_DATABASE_NAME, tblName, false); + tab = db.getTable(db.getCurrentDatabase(), tblName, false); if (tab != null) { inputs.add(new ReadEntity(tab)); } @@ -1133,7 +1215,7 @@ public class DDLSemanticAnalyzer extends BaseSemanticAnalyzer { if (partSpecs.size() == 0) { AlterTableSimpleDesc touchDesc = new AlterTableSimpleDesc( - MetaStoreUtils.DEFAULT_DATABASE_NAME, tblName, null, + db.getCurrentDatabase(), tblName, null, AlterTableDesc.AlterTableTypes.TOUCH); outputs.add(new WriteEntity(tab)); rootTasks.add(TaskFactory.get(new DDLWork(getInputs(), getOutputs(), @@ -1142,7 +1224,7 @@ public class DDLSemanticAnalyzer extends BaseSemanticAnalyzer { addTablePartsOutputs(tblName, partSpecs); for (Map partSpec : partSpecs) { AlterTableSimpleDesc touchDesc = new AlterTableSimpleDesc( - MetaStoreUtils.DEFAULT_DATABASE_NAME, tblName, partSpec, + db.getCurrentDatabase(), tblName, partSpec, AlterTableDesc.AlterTableTypes.TOUCH); rootTasks.add(TaskFactory.get(new DDLWork(getInputs(), getOutputs(), touchDesc), conf)); @@ -1162,7 +1244,7 @@ public class DDLSemanticAnalyzer extends BaseSemanticAnalyzer { List> partSpecs = getPartitionSpecs(ast); try { - Table tab = db.getTable(MetaStoreUtils.DEFAULT_DATABASE_NAME, tblName, false); + Table tab = db.getTable(db.getCurrentDatabase(), tblName, false); if (tab != null) { inputs.add(new ReadEntity(tab)); } @@ -1182,7 +1264,7 @@ public class DDLSemanticAnalyzer extends BaseSemanticAnalyzer { Map partSpec = partSpecs.get(0); AlterTableSimpleDesc archiveDesc = new AlterTableSimpleDesc( - MetaStoreUtils.DEFAULT_DATABASE_NAME, tblName, partSpec, + db.getCurrentDatabase(), tblName, partSpec, (isUnArchive ? AlterTableTypes.UNARCHIVE : AlterTableTypes.ARCHIVE)); rootTasks.add(TaskFactory.get(new DDLWork(getInputs(), getOutputs(), archiveDesc), conf)); @@ -1273,7 +1355,7 @@ public class DDLSemanticAnalyzer extends BaseSemanticAnalyzer { throws SemanticException { Table tab; try { - tab = db.getTable(MetaStoreUtils.DEFAULT_DATABASE_NAME, tblName); + tab = db.getTable(tblName); } catch (HiveException e) { throw new SemanticException(ErrorMsg.INVALID_TABLE.getMsg(tblName)); } diff --git ql/src/java/org/apache/hadoop/hive/ql/parse/Hive.g ql/src/java/org/apache/hadoop/hive/ql/parse/Hive.g index fca7ff6..427f41a 100644 --- ql/src/java/org/apache/hadoop/hive/ql/parse/Hive.g +++ ql/src/java/org/apache/hadoop/hive/ql/parse/Hive.g @@ -85,6 +85,7 @@ TOK_STRING; TOK_LIST; TOK_STRUCT; TOK_MAP; +TOK_CREATEDATABASE; TOK_CREATETABLE; TOK_CREATEINDEX; TOK_CREATEINDEX_INDEXTBLNAME; @@ -113,6 +114,7 @@ TOK_ALTERTABLE_PROPERTIES; TOK_ALTERTABLE_CHANGECOL_AFTER_POSITION; TOK_ALTERINDEX_REBUILD; TOK_MSCK; +TOK_SHOWDATABASES; TOK_SHOWTABLES; TOK_SHOWFUNCTIONS; TOK_SHOWPARTITIONS; @@ -120,7 +122,10 @@ TOK_SHOW_TABLESTATUS; TOK_SHOWLOCKS; TOK_LOCKTABLE; TOK_UNLOCKTABLE; +TOK_SWITCHDATABASE; +TOK_DROPDATABASE; TOK_DROPTABLE; +TOK_DATABASECOMMENT; TOK_TABCOLLIST; TOK_TABCOL; TOK_TABLECOMMENT; @@ -162,6 +167,7 @@ TOK_TABLEPROPLIST; TOK_TABTYPE; TOK_LIMIT; TOK_TABLEPROPERTY; +TOK_IFEXISTS; TOK_IFNOTEXISTS; TOK_HINTLIST; TOK_HINT; @@ -227,7 +233,10 @@ loadStatement ddlStatement @init { msgs.push("ddl statement"); } @after { msgs.pop(); } - : createTableStatement + : createDatabaseStatement + | switchDatabaseStatement + | dropDatabaseStatement + | createTableStatement | dropTableStatement | alterStatement | descStatement @@ -244,6 +253,13 @@ ddlStatement | unlockStatement ; +ifExists +@init { msgs.push("if exists clause"); } +@after { msgs.pop(); } + : KW_IF KW_EXISTS + -> ^(TOK_IFEXISTS) + ; + ifNotExists @init { msgs.push("if not exists clause"); } @after { msgs.pop(); } @@ -251,6 +267,38 @@ ifNotExists -> ^(TOK_IFNOTEXISTS) ; + +createDatabaseStatement +@init { msgs.push("create database statement"); } +@after { msgs.pop(); } + : KW_CREATE (KW_DATABASE|KW_SCHEMA) + ifNotExists? + name=Identifier + databaseComment? + -> ^(TOK_CREATEDATABASE $name ifNotExists? databaseComment?) + ; + +switchDatabaseStatement +@init { msgs.push("switch database statement"); } +@after { msgs.pop(); } + : KW_USE Identifier + -> ^(TOK_SWITCHDATABASE Identifier) + ; + +dropDatabaseStatement +@init { msgs.push("drop database statement"); } +@after { msgs.pop(); } + : KW_DROP (KW_DATABASE|KW_SCHEMA) ifExists? Identifier + -> ^(TOK_DROPDATABASE Identifier ifExists?) + ; + +databaseComment +@init { msgs.push("database's comment"); } +@after { msgs.pop(); } + : KW_COMMENT comment=StringLiteral + -> ^(TOK_DATABASECOMMENT $comment) + ; + createTableStatement @init { msgs.push("create table statement"); } @after { msgs.pop(); } @@ -577,7 +625,8 @@ descStatement showStatement @init { msgs.push("show statement"); } @after { msgs.pop(); } - : KW_SHOW KW_TABLES showStmtIdentifier? -> ^(TOK_SHOWTABLES showStmtIdentifier?) + : KW_SHOW (KW_DATABASES|KW_SCHEMAS) (KW_LIKE showStmtIdentifier)? -> ^(TOK_SHOWDATABASES showStmtIdentifier?) + | KW_SHOW KW_TABLES showStmtIdentifier? -> ^(TOK_SHOWTABLES showStmtIdentifier?) | KW_SHOW KW_FUNCTIONS showStmtIdentifier? -> ^(TOK_SHOWFUNCTIONS showStmtIdentifier?) | KW_SHOW KW_PARTITIONS Identifier partitionSpec? -> ^(TOK_SHOWPARTITIONS Identifier partitionSpec?) | KW_SHOW KW_TABLE KW_EXTENDED ((KW_FROM|KW_IN) db_name=Identifier)? KW_LIKE showStmtIdentifier partitionSpec? @@ -1786,6 +1835,7 @@ KW_INTERSECT: 'INTERSECT'; KW_VIEW: 'VIEW'; KW_IN: 'IN'; KW_DATABASE: 'DATABASE'; +KW_DATABASES: 'DATABASES'; KW_MATERIALIZED: 'MATERIALIZED'; KW_SCHEMA: 'SCHEMA'; KW_SCHEMAS: 'SCHEMAS'; @@ -1821,6 +1871,7 @@ KW_LATERAL: 'LATERAL'; KW_TOUCH: 'TOUCH'; KW_ARCHIVE: 'ARCHIVE'; KW_UNARCHIVE: 'UNARCHIVE'; +KW_USE: 'USE'; // Operators // NOTE: if you add a new function/operator, add it to sysFuncNames so that describe function _FUNC_ will work. diff --git ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java index cba4a66..05ffbff 100644 --- ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java +++ ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java @@ -32,9 +32,9 @@ import java.util.Iterator; import java.util.LinkedHashMap; import java.util.List; import java.util.Map; -import java.util.Map.Entry; import java.util.Set; import java.util.TreeSet; +import java.util.Map.Entry; import java.util.regex.Pattern; import java.util.regex.PatternSyntaxException; @@ -45,7 +45,6 @@ import org.apache.hadoop.fs.PathFilter; import org.apache.hadoop.hive.common.FileUtils; import org.apache.hadoop.hive.common.JavaUtils; import org.apache.hadoop.hive.conf.HiveConf; -import org.apache.hadoop.hive.metastore.MetaStoreUtils; import org.apache.hadoop.hive.metastore.api.FieldSchema; import org.apache.hadoop.hive.metastore.api.Order; import org.apache.hadoop.hive.ql.Context; @@ -94,7 +93,6 @@ import org.apache.hadoop.hive.ql.metadata.VirtualColumn; import org.apache.hadoop.hive.ql.optimizer.GenMRFileSink1; import org.apache.hadoop.hive.ql.optimizer.GenMROperator; import org.apache.hadoop.hive.ql.optimizer.GenMRProcContext; -import org.apache.hadoop.hive.ql.optimizer.GenMRProcContext.GenMapRedCtx; import org.apache.hadoop.hive.ql.optimizer.GenMRRedSink1; import org.apache.hadoop.hive.ql.optimizer.GenMRRedSink2; import org.apache.hadoop.hive.ql.optimizer.GenMRRedSink3; @@ -104,6 +102,7 @@ import org.apache.hadoop.hive.ql.optimizer.GenMRUnion1; import org.apache.hadoop.hive.ql.optimizer.GenMapRedUtils; import org.apache.hadoop.hive.ql.optimizer.MapJoinFactory; import org.apache.hadoop.hive.ql.optimizer.Optimizer; +import org.apache.hadoop.hive.ql.optimizer.GenMRProcContext.GenMapRedCtx; import org.apache.hadoop.hive.ql.optimizer.physical.PhysicalContext; import org.apache.hadoop.hive.ql.optimizer.physical.PhysicalOptimizer; import org.apache.hadoop.hive.ql.optimizer.ppr.PartitionPruner; @@ -123,7 +122,6 @@ import org.apache.hadoop.hive.ql.plan.ExtractDesc; import org.apache.hadoop.hive.ql.plan.FetchWork; import org.apache.hadoop.hive.ql.plan.FileSinkDesc; import org.apache.hadoop.hive.ql.plan.FilterDesc; -import org.apache.hadoop.hive.ql.plan.FilterDesc.sampleDesc; import org.apache.hadoop.hive.ql.plan.ForwardDesc; import org.apache.hadoop.hive.ql.plan.GroupByDesc; import org.apache.hadoop.hive.ql.plan.JoinCondDesc; @@ -145,11 +143,12 @@ import org.apache.hadoop.hive.ql.plan.TableDesc; import org.apache.hadoop.hive.ql.plan.TableScanDesc; import org.apache.hadoop.hive.ql.plan.UDTFDesc; import org.apache.hadoop.hive.ql.plan.UnionDesc; +import org.apache.hadoop.hive.ql.plan.FilterDesc.sampleDesc; import org.apache.hadoop.hive.ql.session.SessionState; import org.apache.hadoop.hive.ql.udf.generic.GenericUDAFEvaluator; -import org.apache.hadoop.hive.ql.udf.generic.GenericUDAFEvaluator.Mode; import org.apache.hadoop.hive.ql.udf.generic.GenericUDFHash; import org.apache.hadoop.hive.ql.udf.generic.GenericUDTF; +import org.apache.hadoop.hive.ql.udf.generic.GenericUDAFEvaluator.Mode; import org.apache.hadoop.hive.serde.Constants; import org.apache.hadoop.hive.serde2.Deserializer; import org.apache.hadoop.hive.serde2.MetadataTypedColumnsetSerDe; @@ -157,9 +156,9 @@ import org.apache.hadoop.hive.serde2.SerDeException; import org.apache.hadoop.hive.serde2.SerDeUtils; import org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe; import org.apache.hadoop.hive.serde2.objectinspector.ObjectInspector; -import org.apache.hadoop.hive.serde2.objectinspector.ObjectInspector.Category; import org.apache.hadoop.hive.serde2.objectinspector.StructField; import org.apache.hadoop.hive.serde2.objectinspector.StructObjectInspector; +import org.apache.hadoop.hive.serde2.objectinspector.ObjectInspector.Category; import org.apache.hadoop.hive.serde2.typeinfo.TypeInfo; import org.apache.hadoop.hive.serde2.typeinfo.TypeInfoFactory; import org.apache.hadoop.hive.serde2.typeinfo.TypeInfoUtils; @@ -744,7 +743,7 @@ public class SemanticAnalyzer extends BaseSemanticAnalyzer { String tab_name = qb.getTabNameForAlias(alias); Table tab = null; try { - tab = db.getTable(MetaStoreUtils.DEFAULT_DATABASE_NAME, tab_name); + tab = db.getTable(tab_name); } catch (InvalidTableException ite) { throw new SemanticException(ErrorMsg.INVALID_TABLE.getMsg(qb .getParseInfo().getSrcForAlias(alias))); @@ -6768,16 +6767,12 @@ public class SemanticAnalyzer extends BaseSemanticAnalyzer { case CTAS: // create table as select - // check for existence of table. Throw an exception if it exists. + // Verify that the table does not already exist try { - Table tab = db.getTable(MetaStoreUtils.DEFAULT_DATABASE_NAME, - tableName, false); // do not throw exception if table does not exist - - if (tab != null) { - throw new SemanticException(ErrorMsg.TABLE_ALREADY_EXISTS - .getMsg(tableName)); + if (null != db.getTable(db.getCurrentDatabase(), tableName, false)) { + throw new SemanticException(ErrorMsg.TABLE_ALREADY_EXISTS.getMsg(tableName)); } - } catch (HiveException e) { // may be unable to get meta data + } catch (HiveException e) { throw new SemanticException(e); } @@ -6790,7 +6785,7 @@ public class SemanticAnalyzer extends BaseSemanticAnalyzer { return selectStmt; default: - assert false; // should never be unknown command type + throw new SemanticException("Unrecognized command."); } return null; } diff --git ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzerFactory.java ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzerFactory.java index 881290c..3b78d25 100644 --- ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzerFactory.java +++ ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzerFactory.java @@ -35,6 +35,9 @@ public final class SemanticAnalyzerFactory { static { commandType.put(HiveParser.TOK_EXPLAIN, "EXPLAIN"); commandType.put(HiveParser.TOK_LOAD, "LOAD"); + commandType.put(HiveParser.TOK_CREATEDATABASE, "CREATEDATABASE"); + commandType.put(HiveParser.TOK_DROPDATABASE, "DROPDATABASE"); + commandType.put(HiveParser.TOK_SWITCHDATABASE, "SWITCHDATABASE"); commandType.put(HiveParser.TOK_CREATETABLE, "CREATETABLE"); commandType.put(HiveParser.TOK_DROPTABLE, "DROPTABLE"); commandType.put(HiveParser.TOK_DESCTABLE, "DESCTABLE"); @@ -52,6 +55,7 @@ public final class SemanticAnalyzerFactory { commandType.put(HiveParser.TOK_ALTERTABLE_PROPERTIES, "ALTERTABLE_PROPERTIES"); commandType.put(HiveParser.TOK_ALTERTABLE_SERIALIZER, "ALTERTABLE_SERIALIZER"); commandType.put(HiveParser.TOK_ALTERTABLE_SERDEPROPERTIES, "ALTERTABLE_SERDEPROPERTIES"); + commandType.put(HiveParser.TOK_SHOWDATABASES, "SHOWDATABASES"); commandType.put(HiveParser.TOK_SHOWTABLES, "SHOWTABLES"); commandType.put(HiveParser.TOK_SHOW_TABLESTATUS, "SHOW_TABLESTATUS"); commandType.put(HiveParser.TOK_SHOWFUNCTIONS, "SHOWFUNCTIONS"); @@ -92,6 +96,9 @@ public final class SemanticAnalyzerFactory { return new ExplainSemanticAnalyzer(conf); case HiveParser.TOK_LOAD: return new LoadSemanticAnalyzer(conf); + case HiveParser.TOK_CREATEDATABASE: + case HiveParser.TOK_DROPDATABASE: + case HiveParser.TOK_SWITCHDATABASE: case HiveParser.TOK_DROPTABLE: case HiveParser.TOK_DROPVIEW: case HiveParser.TOK_DESCTABLE: @@ -108,6 +115,7 @@ public final class SemanticAnalyzerFactory { case HiveParser.TOK_ALTERTABLE_SERDEPROPERTIES: case HiveParser.TOK_ALTERINDEX_REBUILD: case HiveParser.TOK_ALTERVIEW_PROPERTIES: + case HiveParser.TOK_SHOWDATABASES: case HiveParser.TOK_SHOWTABLES: case HiveParser.TOK_SHOW_TABLESTATUS: case HiveParser.TOK_SHOWFUNCTIONS: diff --git ql/src/java/org/apache/hadoop/hive/ql/plan/CreateDatabaseDesc.java ql/src/java/org/apache/hadoop/hive/ql/plan/CreateDatabaseDesc.java new file mode 100644 index 0000000..57ccf94 --- /dev/null +++ ql/src/java/org/apache/hadoop/hive/ql/plan/CreateDatabaseDesc.java @@ -0,0 +1,93 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hive.ql.plan; + +import java.io.Serializable; + +/** + * CreateDatabaseDesc. + * + */ +@Explain(displayName = "Create Database") +public class CreateDatabaseDesc extends DDLDesc implements Serializable { + + private static final long serialVersionUID = 1L; + + String databaseName; + String locationUri; + String comment; + boolean ifNotExists; + + /** + * For serialization only. + */ + public CreateDatabaseDesc() { + } + + public CreateDatabaseDesc(String databaseName, String comment, + String locationUri, boolean ifNotExists) { + super(); + this.databaseName = databaseName; + this.comment = comment; + this.locationUri = locationUri; + this.ifNotExists = ifNotExists; + } + + public CreateDatabaseDesc(String databaseName, boolean ifNotExists) { + this(databaseName, null, null, ifNotExists); + } + + + + @Explain(displayName="if not exists") + public boolean getIfNotExists() { + return ifNotExists; + } + + public void setIfNotExists(boolean ifNotExists) { + this.ifNotExists = ifNotExists; + } + + @Explain(displayName="name") + public String getName() { + return databaseName; + } + + public void setName(String databaseName) { + this.databaseName = databaseName; + } + + @Explain(displayName="comment") + public String getComment() { + return comment; + } + + public void setComment(String comment) { + this.comment = comment; + } + + @Explain(displayName="locationUri") + public String getLocationUri() { + return locationUri; + } + + public void setLocationUri(String locationUri) { + this.locationUri = locationUri; + } +} diff --git ql/src/java/org/apache/hadoop/hive/ql/plan/DDLWork.java ql/src/java/org/apache/hadoop/hive/ql/plan/DDLWork.java index 0c848e6..d445be1 100644 --- ql/src/java/org/apache/hadoop/hive/ql/plan/DDLWork.java +++ ql/src/java/org/apache/hadoop/hive/ql/plan/DDLWork.java @@ -30,13 +30,17 @@ import org.apache.hadoop.hive.ql.hooks.WriteEntity; */ public class DDLWork implements Serializable { private static final long serialVersionUID = 1L; - private CreateIndexDesc createIndexDesc; + private CreateIndexDesc createIndexDesc; private DropIndexDesc dropIdxDesc; + private CreateDatabaseDesc createDatabaseDesc; + private SwitchDatabaseDesc switchDatabaseDesc; + private DropDatabaseDesc dropDatabaseDesc; private CreateTableDesc createTblDesc; private CreateTableLikeDesc createTblLikeDesc; private CreateViewDesc createVwDesc; private DropTableDesc dropTblDesc; private AlterTableDesc alterTblDesc; + private ShowDatabasesDesc showDatabasesDesc; private ShowTablesDesc showTblsDesc; private LockTableDesc lockTblDesc; private UnlockTableDesc unlockTblDesc; @@ -72,6 +76,36 @@ public class DDLWork implements Serializable { } /** + * @param createDatabaseDesc + * Create Database descriptor + */ + public DDLWork(HashSet inputs, HashSet outputs, + CreateDatabaseDesc createDatabaseDesc) { + this(inputs, outputs); + this.createDatabaseDesc = createDatabaseDesc; + } + + /** + * @param dropDatabaseDesc + * Drop Database descriptor + */ + public DDLWork(HashSet inputs, HashSet outputs, + DropDatabaseDesc dropDatabaseDesc) { + this(inputs, outputs); + this.dropDatabaseDesc = dropDatabaseDesc; + } + + /** + * @param switchDatabaseDesc + * Switch Database descriptor + */ + public DDLWork(HashSet inputs, HashSet outputs, + SwitchDatabaseDesc switchDatabaseDesc) { + this(inputs, outputs); + this.switchDatabaseDesc = switchDatabaseDesc; + } + + /** * @param alterTblDesc * alter table descriptor */ @@ -136,6 +170,16 @@ public class DDLWork implements Serializable { } /** + * @param showDatabasesDesc + */ + public DDLWork(HashSet inputs, HashSet outputs, + ShowDatabasesDesc showDatabasesDesc) { + this(inputs, outputs); + + this.showDatabasesDesc = showDatabasesDesc; + } + + /** * @param showTblsDesc */ public DDLWork(HashSet inputs, HashSet outputs, @@ -252,6 +296,51 @@ public class DDLWork implements Serializable { } /** + * @return Create Database descriptor + */ + public CreateDatabaseDesc getCreateDatabaseDesc() { + return createDatabaseDesc; + } + + /** + * Set Create Database descriptor + * @param createDatabaseDesc + */ + public void setCreateDatabaseDesc(CreateDatabaseDesc createDatabaseDesc) { + this.createDatabaseDesc = createDatabaseDesc; + } + + /** + * @return Drop Database descriptor + */ + public DropDatabaseDesc getDropDatabaseDesc() { + return dropDatabaseDesc; + } + + /** + * Set Drop Database descriptor + * @param dropDatabaseDesc + */ + public void setDropDatabaseDesc(DropDatabaseDesc dropDatabaseDesc) { + this.dropDatabaseDesc = dropDatabaseDesc; + } + + /** + * @return Switch Database descriptor + */ + public SwitchDatabaseDesc getSwitchDatabaseDesc() { + return switchDatabaseDesc; + } + + /** + * Set Switch Database descriptor + * @param switchDatabaseDesc + */ + public void setSwitchDatabaseDesc(SwitchDatabaseDesc switchDatabaseDesc) { + this.switchDatabaseDesc = switchDatabaseDesc; + } + + /** * @return the createTblDesc */ @Explain(displayName = "Create Table Operator") @@ -340,6 +429,22 @@ public class DDLWork implements Serializable { } /** + * @return the showDatabasesDesc + */ + @Explain(displayName = "Show Databases Operator") + public ShowDatabasesDesc getShowDatabasesDesc() { + return showDatabasesDesc; + } + + /** + * @param showDatabasesDesc + * the showDatabasesDesc to set + */ + public void setShowDatabasesDesc(ShowDatabasesDesc showDatabasesDesc) { + this.showDatabasesDesc = showDatabasesDesc; + } + + /** * @return the showTblsDesc */ @Explain(displayName = "Show Table Operator") diff --git ql/src/java/org/apache/hadoop/hive/ql/plan/DropDatabaseDesc.java ql/src/java/org/apache/hadoop/hive/ql/plan/DropDatabaseDesc.java new file mode 100644 index 0000000..ac47eb1 --- /dev/null +++ ql/src/java/org/apache/hadoop/hive/ql/plan/DropDatabaseDesc.java @@ -0,0 +1,57 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hive.ql.plan; + +import java.io.Serializable; + +/** + * DropDatabaseDesc. + * + */ +@Explain(displayName = "Drop Database") +public class DropDatabaseDesc extends DDLDesc implements Serializable { + private static final long serialVersionUID = 1L; + + String databaseName; + boolean ifExists; + + public DropDatabaseDesc(String databaseName, boolean ifExists) { + super(); + this.databaseName = databaseName; + this.ifExists = ifExists; + } + + @Explain(displayName = "database") + public String getDatabaseName() { + return databaseName; + } + + public void setDatabaseName(String databaseName) { + this.databaseName = databaseName; + } + + @Explain(displayName = "if exists") + public boolean getIfExists() { + return ifExists; + } + + public void setIfExists(boolean ifExists) { + this.ifExists = ifExists; + } +} diff --git ql/src/java/org/apache/hadoop/hive/ql/plan/ShowDatabasesDesc.java ql/src/java/org/apache/hadoop/hive/ql/plan/ShowDatabasesDesc.java new file mode 100644 index 0000000..0ad0658 --- /dev/null +++ ql/src/java/org/apache/hadoop/hive/ql/plan/ShowDatabasesDesc.java @@ -0,0 +1,104 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hive.ql.plan; + +import java.io.Serializable; + +import org.apache.hadoop.fs.Path; + +/** + * ShowDatabasesDesc. + * + */ +@Explain(displayName = "Show Databases") +public class ShowDatabasesDesc extends DDLDesc implements Serializable { + private static final long serialVersionUID = 1L; + String pattern; + String resFile; + + /** + * table name for the result of show databases. + */ + private static final String table = "show_databases"; + + /** + * thrift ddl for the result of show databases. + */ + private static final String schema = "database_name#string"; + + public String getTable() { + return table; + } + + public String getSchema() { + return schema; + } + + public ShowDatabasesDesc() { + } + + /** + * @param resFile + */ + public ShowDatabasesDesc(Path resFile) { + this.resFile = resFile.toString(); + pattern = null; + } + + /** + * @param pattern + * names of databases to show + */ + public ShowDatabasesDesc(Path resFile, String pattern) { + this.resFile = resFile.toString(); + this.pattern = pattern; + } + + /** + * @return the pattern + */ + @Explain(displayName = "pattern") + public String getPattern() { + return pattern; + } + + /** + * @param pattern + * the pattern to set + */ + public void setPattern(String pattern) { + this.pattern = pattern; + } + + /** + * @return the resFile + */ + @Explain(displayName = "result file", normalExplain = false) + public String getResFile() { + return resFile; + } + + /** + * @param resFile + * the resFile to set + */ + public void setResFile(String resFile) { + this.resFile = resFile; + } +} diff --git ql/src/java/org/apache/hadoop/hive/ql/plan/SwitchDatabaseDesc.java ql/src/java/org/apache/hadoop/hive/ql/plan/SwitchDatabaseDesc.java new file mode 100644 index 0000000..0cad7c1 --- /dev/null +++ ql/src/java/org/apache/hadoop/hive/ql/plan/SwitchDatabaseDesc.java @@ -0,0 +1,50 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hive.ql.plan; + +import java.io.Serializable; + +/** + * SwitchDatabaseDesc. + * + */ +@Explain(displayName = "Switch Database") +public class SwitchDatabaseDesc extends DDLDesc implements Serializable { + + private static final long serialVersionUID = 1L; + + String databaseName; + + public SwitchDatabaseDesc() { + } + + public SwitchDatabaseDesc(String databaseName) { + super(); + this.databaseName = databaseName; + } + + @Explain(displayName = "name") + public String getDatabaseName() { + return databaseName; + } + + public void setDatabaseName(String databaseName) { + this.databaseName = databaseName; + } +} diff --git ql/src/test/org/apache/hadoop/hive/ql/QTestUtil.java ql/src/test/org/apache/hadoop/hive/ql/QTestUtil.java index 70d64ef..4448ec7 100644 --- ql/src/test/org/apache/hadoop/hive/ql/QTestUtil.java +++ ql/src/test/org/apache/hadoop/hive/ql/QTestUtil.java @@ -18,6 +18,8 @@ package org.apache.hadoop.hive.ql; +import static org.apache.hadoop.hive.metastore.MetaStoreUtils.DEFAULT_DATABASE_NAME; + import java.io.BufferedInputStream; import java.io.DataInputStream; import java.io.File; @@ -316,12 +318,21 @@ public class QTestUtil { * Clear out any side effects of running tests */ public void clearTestSideEffects () throws Exception { - // delete any tables other than the source tables - for (String s: db.getAllTables()) { - if (!srcTables.contains(s)) { - db.dropTable(MetaStoreUtils.DEFAULT_DATABASE_NAME, s); + // Delete any tables other than the source tables + // and any databases other than the default database. + for (String dbName : db.getAllDatabases()) { + db.setCurrentDatabase(dbName); + for (String tblName : db.getAllTables()) { + if (!DEFAULT_DATABASE_NAME.equals(dbName) || !srcTables.contains(tblName)) { + db.dropTable(dbName, tblName); + } + } + if (!DEFAULT_DATABASE_NAME.equals(dbName)) { + db.dropDatabase(dbName); } } + db.setCurrentDatabase(DEFAULT_DATABASE_NAME); + // allocate and initialize a new conf since a test can // modify conf by using 'set' commands conf = new HiveConf (Driver.class); @@ -425,7 +436,7 @@ public class QTestUtil { db.createTable("src_sequencefile", cols, null, SequenceFileInputFormat.class, SequenceFileOutputFormat.class); - Table srcThrift = new Table("src_thrift"); + Table srcThrift = new Table(db.getCurrentDatabase(), "src_thrift"); srcThrift.setInputFormatClass(SequenceFileInputFormat.class.getName()); srcThrift.setOutputFormatClass(SequenceFileOutputFormat.class.getName()); srcThrift.setSerializationLib(ThriftDeserializer.class.getName()); @@ -498,7 +509,7 @@ public class QTestUtil { db.createTable("dest3", cols, part_cols, TextInputFormat.class, IgnoreKeyTextOutputFormat.class); - Table dest3 = db.getTable(MetaStoreUtils.DEFAULT_DATABASE_NAME, "dest3"); + Table dest3 = db.getTable("dest3"); HashMap part_spec = new HashMap(); part_spec.put("ds", "2008-04-08"); diff --git ql/src/test/org/apache/hadoop/hive/ql/hooks/EnforceReadOnlyTables.java ql/src/test/org/apache/hadoop/hive/ql/hooks/EnforceReadOnlyTables.java index 8cff958..fe41e2c 100644 --- ql/src/test/org/apache/hadoop/hive/ql/hooks/EnforceReadOnlyTables.java +++ ql/src/test/org/apache/hadoop/hive/ql/hooks/EnforceReadOnlyTables.java @@ -18,12 +18,14 @@ package org.apache.hadoop.hive.ql.hooks; +import static org.apache.hadoop.hive.metastore.MetaStoreUtils.DEFAULT_DATABASE_NAME; + import java.util.Set; -import org.apache.hadoop.hive.ql.session.SessionState; -import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.hive.ql.QTestUtil; import org.apache.hadoop.hive.ql.metadata.Table; +import org.apache.hadoop.hive.ql.session.SessionState; +import org.apache.hadoop.security.UserGroupInformation; /** * Implementation of a pre execute hook that prevents modifications @@ -40,8 +42,10 @@ public class EnforceReadOnlyTables implements PreExecute { if ((w.getTyp() == WriteEntity.Type.TABLE) || (w.getTyp() == WriteEntity.Type.PARTITION)) { Table t = w.getTable(); - if (QTestUtil.srcTables.contains(t.getTableName())) + if (DEFAULT_DATABASE_NAME.equalsIgnoreCase(t.getDbName()) + && QTestUtil.srcTables.contains(t.getTableName())) { throw new RuntimeException ("Cannot overwrite read-only table: " + t.getTableName()); + } } } } diff --git ql/src/test/org/apache/hadoop/hive/ql/metadata/TestHive.java ql/src/test/org/apache/hadoop/hive/ql/metadata/TestHive.java index ab39ca4..14961d2 100755 --- ql/src/test/org/apache/hadoop/hive/ql/metadata/TestHive.java +++ ql/src/test/org/apache/hadoop/hive/ql/metadata/TestHive.java @@ -18,6 +18,8 @@ package org.apache.hadoop.hive.ql.metadata; +import static org.apache.hadoop.hive.metastore.MetaStoreUtils.DEFAULT_DATABASE_NAME; + import java.util.ArrayList; import java.util.HashMap; import java.util.LinkedList; @@ -29,6 +31,7 @@ import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.hive.conf.HiveConf; import org.apache.hadoop.hive.metastore.MetaStoreUtils; import org.apache.hadoop.hive.metastore.Warehouse; +import org.apache.hadoop.hive.metastore.api.Database; import org.apache.hadoop.hive.metastore.api.FieldSchema; import org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat; import org.apache.hadoop.hive.serde.Constants; @@ -88,22 +91,18 @@ public class TestHive extends TestCase { e1.printStackTrace(); assertTrue("Unable to drop table", false); } - Table tbl = new Table(tableName); + Table tbl = new Table(MetaStoreUtils.DEFAULT_DATABASE_NAME, tableName); List fields = tbl.getCols(); - fields.add(new FieldSchema("col1", Constants.INT_TYPE_NAME, - "int -- first column")); - fields.add(new FieldSchema("col2", Constants.STRING_TYPE_NAME, - "string -- second column")); - fields.add(new FieldSchema("col3", Constants.DOUBLE_TYPE_NAME, - "double -- thrift column")); + fields.add(new FieldSchema("col1", Constants.INT_TYPE_NAME, "int -- first column")); + fields.add(new FieldSchema("col2", Constants.STRING_TYPE_NAME, "string -- second column")); + fields.add(new FieldSchema("col3", Constants.DOUBLE_TYPE_NAME, "double -- thrift column")); tbl.setFields(fields); tbl.setOutputFormatClass(HiveIgnoreKeyTextOutputFormat.class); tbl.setInputFormatClass(SequenceFileInputFormat.class); - tbl.setProperty("comment", - "this is a test table created as part junit tests"); + tbl.setProperty("comment", "this is a test table created as part junit tests"); List bucketCols = tbl.getBucketCols(); bucketCols.add("col1"); @@ -156,9 +155,10 @@ public class TestHive extends TestCase { .getOwner(), ft.getOwner()); assertEquals("Table retention didn't match for table: " + tableName, tbl.getRetention(), ft.getRetention()); - assertEquals("Data location is not set correctly", wh - .getDefaultTablePath(MetaStoreUtils.DEFAULT_DATABASE_NAME, - tableName).toString(), ft.getDataLocation().toString()); + String dbPath = wh.getDefaultDatabasePath(DEFAULT_DATABASE_NAME).toString(); + assertEquals("Data location is not set correctly", + wh.getDefaultTablePath(DEFAULT_DATABASE_NAME, tableName).toString(), + ft.getDataLocation().toString()); // now that URI is set correctly, set the original table's uri and then // compare the two tables tbl.setDataLocation(ft.getDataLocation()); @@ -191,7 +191,7 @@ public class TestHive extends TestCase { /** * Tests create and fetch of a thrift based table. - * + * * @throws Throwable */ public void testThriftTable() throws Throwable { @@ -203,7 +203,7 @@ public class TestHive extends TestCase { System.err.println(StringUtils.stringifyException(e1)); assertTrue("Unable to drop table", false); } - Table tbl = new Table(tableName); + Table tbl = new Table(MetaStoreUtils.DEFAULT_DATABASE_NAME, tableName); tbl.setInputFormatClass(SequenceFileInputFormat.class.getName()); tbl.setOutputFormatClass(SequenceFileOutputFormat.class.getName()); tbl.setSerializationLib(ThriftDeserializer.class.getName()); @@ -229,9 +229,10 @@ public class TestHive extends TestCase { .getOwner(), ft.getOwner()); assertEquals("Table retention didn't match for table: " + tableName, tbl.getRetention(), ft.getRetention()); - assertEquals("Data location is not set correctly", wh - .getDefaultTablePath(MetaStoreUtils.DEFAULT_DATABASE_NAME, - tableName).toString(), ft.getDataLocation().toString()); + String dbPath = wh.getDefaultDatabasePath(DEFAULT_DATABASE_NAME).toString(); + assertEquals("Data location is not set correctly", + wh.getDefaultTablePath(DEFAULT_DATABASE_NAME, tableName).toString(), + ft.getDataLocation().toString()); // now that URI is set correctly, set the original table's uri and then // compare the two tables tbl.setDataLocation(ft.getDataLocation()); @@ -245,7 +246,7 @@ public class TestHive extends TestCase { System.err.println(StringUtils.stringifyException(e)); assertTrue("Unable to fetch table correctly: " + tableName, false); } - hm.dropTable(MetaStoreUtils.DEFAULT_DATABASE_NAME, tableName); + hm.dropTable(DEFAULT_DATABASE_NAME, tableName); } catch (Throwable e) { System.err.println(StringUtils.stringifyException(e)); System.err.println("testThriftTable() failed"); @@ -254,8 +255,7 @@ public class TestHive extends TestCase { } private static Table createTestTable(String dbName, String tableName) throws HiveException { - Table tbl = new Table(tableName); - tbl.setDbName(dbName); + Table tbl = new Table(dbName, tableName); tbl.setInputFormatClass(SequenceFileInputFormat.class.getName()); tbl.setOutputFormatClass(SequenceFileOutputFormat.class.getName()); tbl.setSerializationLib(ThriftDeserializer.class.getName()); @@ -281,8 +281,11 @@ public class TestHive extends TestCase { try { String dbName = "db_for_testgettables"; String table1Name = "table1"; - hm.dropDatabase(dbName); - hm.createDatabase(dbName, ""); + hm.dropDatabase(dbName, true, true); + + Database db = new Database(); + db.setName(dbName); + hm.createDatabase(db); List ts = new ArrayList(2); ts.add(table1Name); @@ -312,6 +315,10 @@ public class TestHive extends TestCase { hm.dropTable(dbName, table1Name); assertFalse(fs.exists(table1.getPath())); + // Drop all tables + for (String tableName : hm.getAllTables(dbName)) { + hm.dropTable(dbName, tableName); + } hm.dropDatabase(dbName); } catch (Throwable e) { System.err.println(StringUtils.stringifyException(e)); diff --git ql/src/test/org/apache/hadoop/hive/ql/metadata/TestHiveMetaStoreChecker.java ql/src/test/org/apache/hadoop/hive/ql/metadata/TestHiveMetaStoreChecker.java index 26cc71a..3a97e31 100644 --- ql/src/test/org/apache/hadoop/hive/ql/metadata/TestHiveMetaStoreChecker.java +++ ql/src/test/org/apache/hadoop/hive/ql/metadata/TestHiveMetaStoreChecker.java @@ -11,10 +11,11 @@ import junit.framework.TestCase; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; import org.apache.hadoop.hive.metastore.api.AlreadyExistsException; +import org.apache.hadoop.hive.metastore.api.Database; import org.apache.hadoop.hive.metastore.api.FieldSchema; import org.apache.hadoop.hive.metastore.api.MetaException; +import org.apache.hadoop.hive.metastore.api.NoSuchObjectException; import org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat; -import org.apache.hadoop.hive.ql.io.IgnoreKeyTextOutputFormat; import org.apache.hadoop.hive.serde.Constants; import org.apache.hadoop.mapred.TextInputFormat; import org.apache.thrift.TException; @@ -60,7 +61,11 @@ public class TestHiveMetaStoreChecker extends TestCase { // cleanup hive.dropTable(dbName, tableName, true, true); - hive.dropDatabase(dbName); + try { + hive.dropDatabase(dbName); + } catch (NoSuchObjectException e) { + // ignore + } } @Override @@ -89,9 +94,11 @@ public class TestHiveMetaStoreChecker extends TestCase { assertTrue(result.getPartitionsNotOnFs().isEmpty()); assertTrue(result.getPartitionsNotInMs().isEmpty()); - hive.createDatabase(dbName, ""); + Database db = new Database(); + db.setName(dbName); + hive.createDatabase(db); - Table table = new Table(tableName); + Table table = new Table(dbName, tableName); table.setDbName(dbName); table.setInputFormatClass(TextInputFormat.class); table.setOutputFormatClass(HiveIgnoreKeyTextOutputFormat.class); @@ -159,9 +166,11 @@ public class TestHiveMetaStoreChecker extends TestCase { public void testPartitionsCheck() throws HiveException, MetaException, IOException, TException, AlreadyExistsException { - hive.createDatabase(dbName, ""); + Database db = new Database(); + db.setName(dbName); + hive.createDatabase(db); - Table table = new Table(tableName); + Table table = new Table(dbName, tableName); table.setDbName(dbName); table.setInputFormatClass(TextInputFormat.class); table.setOutputFormatClass(HiveIgnoreKeyTextOutputFormat.class); diff --git ql/src/test/org/apache/hadoop/hive/ql/metadata/TestPartition.java ql/src/test/org/apache/hadoop/hive/ql/metadata/TestPartition.java index 790abf6..92d834d 100644 --- ql/src/test/org/apache/hadoop/hive/ql/metadata/TestPartition.java +++ ql/src/test/org/apache/hadoop/hive/ql/metadata/TestPartition.java @@ -39,7 +39,7 @@ public class TestPartition extends TestCase { List partCols = new ArrayList(); partCols.add(new FieldSchema(PARTITION_COL, "string", "")); - Table tbl = new Table(TABLENAME); + Table tbl = new Table("default", TABLENAME); tbl.setDataLocation(new URI("tmplocation")); tbl.setPartCols(partCols); diff --git ql/src/test/queries/clientnegative/database_create_already_exists.q ql/src/test/queries/clientnegative/database_create_already_exists.q new file mode 100644 index 0000000..3af7607 --- /dev/null +++ ql/src/test/queries/clientnegative/database_create_already_exists.q @@ -0,0 +1,5 @@ +SHOW DATABASES; + +-- Try to create a database that already exists +CREATE DATABASE test_db; +CREATE DATABASE test_db; diff --git ql/src/test/queries/clientnegative/database_create_invalid_name.q ql/src/test/queries/clientnegative/database_create_invalid_name.q new file mode 100644 index 0000000..5d67495 --- /dev/null +++ ql/src/test/queries/clientnegative/database_create_invalid_name.q @@ -0,0 +1,4 @@ +SHOW DATABASES; + +-- Try to create a database with an invalid name +CREATE DATABASE `test.db`; diff --git ql/src/test/queries/clientnegative/database_drop_does_not_exist.q ql/src/test/queries/clientnegative/database_drop_does_not_exist.q new file mode 100644 index 0000000..66a940e --- /dev/null +++ ql/src/test/queries/clientnegative/database_drop_does_not_exist.q @@ -0,0 +1,4 @@ +SHOW DATABASES; + +-- Try to drop a database that does not exist +DROP DATABASE does_not_exist; diff --git ql/src/test/queries/clientnegative/database_drop_not_empty.q ql/src/test/queries/clientnegative/database_drop_not_empty.q new file mode 100644 index 0000000..ae5a443 --- /dev/null +++ ql/src/test/queries/clientnegative/database_drop_not_empty.q @@ -0,0 +1,8 @@ +SHOW DATABASES; + +-- Try to drop a non-empty database +CREATE DATABASE test_db; +USE test_db; +CREATE TABLE t(a INT); +USE default; +DROP DATABASE test_db; diff --git ql/src/test/queries/clientnegative/database_switch_does_not_exist.q ql/src/test/queries/clientnegative/database_switch_does_not_exist.q new file mode 100644 index 0000000..5cd4697 --- /dev/null +++ ql/src/test/queries/clientnegative/database_switch_does_not_exist.q @@ -0,0 +1,4 @@ +SHOW DATABASES; + +-- Try to switch to a database that does not exist +USE does_not_exist; diff --git ql/src/test/queries/clientpositive/add_part_exist.q ql/src/test/queries/clientpositive/add_part_exist.q index 54d0096..d176661 100644 --- ql/src/test/queries/clientpositive/add_part_exist.q +++ ql/src/test/queries/clientpositive/add_part_exist.q @@ -12,3 +12,26 @@ SHOW PARTITIONS add_part_test; ALTER TABLE add_part_test ADD IF NOT EXISTS PARTITION (ds='2010-01-01') PARTITION (ds='2010-01-02') PARTITION (ds='2010-01-03'); SHOW PARTITIONS add_part_test; + +DROP TABLE add_part_test; +SHOW TABLES; + +-- Test ALTER TABLE ADD PARTITION in non-default Database +CREATE DATABASE add_part_test_db; +USE add_part_test_db; +SHOW TABLES; + +CREATE TABLE add_part_test (key STRING, value STRING) PARTITIONED BY (ds STRING); +SHOW PARTITIONS add_part_test; + +ALTER TABLE add_part_test ADD PARTITION (ds='2010-01-01'); +SHOW PARTITIONS add_part_test; + +ALTER TABLE add_part_test ADD IF NOT EXISTS PARTITION (ds='2010-01-01'); +SHOW PARTITIONS add_part_test; + +ALTER TABLE add_part_test ADD IF NOT EXISTS PARTITION (ds='2010-01-02'); +SHOW PARTITIONS add_part_test; + +ALTER TABLE add_part_test ADD IF NOT EXISTS PARTITION (ds='2010-01-01') PARTITION (ds='2010-01-02') PARTITION (ds='2010-01-03'); +SHOW PARTITIONS add_part_test; diff --git ql/src/test/queries/clientpositive/alter1.q ql/src/test/queries/clientpositive/alter1.q index 6f95685..5fd1945 100644 --- ql/src/test/queries/clientpositive/alter1.q +++ ql/src/test/queries/clientpositive/alter1.q @@ -24,3 +24,48 @@ describe extended alter1; alter table alter1 replace columns (a int, b int, c string); describe alter1; + +-- Cleanup +DROP TABLE alter1; +SHOW TABLES; + +-- With non-default Database + +CREATE DATABASE alter1_db; +USE alter1_db; +SHOW TABLES; + +CREATE TABLE alter1(a INT, b INT); +DESCRIBE EXTENDED alter1; + +ALTER TABLE alter1 SET TBLPROPERTIES ('a'='1', 'c'='3'); +DESCRIBE EXTENDED alter1; + +ALTER TABLE alter1 SET TBLPROPERTIES ('a'='1', 'c'='4', 'd'='3'); +DESCRIBE EXTENDED alter1; + +ALTER TABLE alter1 SET TBLPROPERTIES ('EXTERNAL'='TRUE'); +DESCRIBE EXTENDED alter1; + +ALTER TABLE alter1 SET TBLPROPERTIES ('EXTERNAL'='FALSE'); +DESCRIBE EXTENDED alter1; + +ALTER TABLE alter1 SET SERDEPROPERTIES('s1'='9'); +DESCRIBE EXTENDED alter1; + +ALTER TABLE alter1 SET SERDEPROPERTIES('s1'='10', 's2' ='20'); +DESCRIBE EXTENDED alter1; + +add jar ../data/files/TestSerDe.jar; +ALTER TABLE alter1 SET SERDE 'org.apache.hadoop.hive.serde2.TestSerDe' WITH SERDEPROPERTIES ('s1'='9'); +DESCRIBE EXTENDED alter1; + +ALTER TABLE alter1 SET SERDE 'org.apache.hadoop.hive.serde2.MetadataTypedColumnsetSerDe'; +DESCRIBE EXTENDED alter1; + +ALTER TABLE alter1 REPLACE COLUMNS (a int, b int, c string); +DESCRIBE alter1; + +DROP TABLE alter1; +USE default; +DROP DATABASE alter1_db; diff --git ql/src/test/queries/clientpositive/alter2.q ql/src/test/queries/clientpositive/alter2.q index 2388e21..ddf5787 100644 --- ql/src/test/queries/clientpositive/alter2.q +++ ql/src/test/queries/clientpositive/alter2.q @@ -18,3 +18,38 @@ show partitions alter2; alter table alter2 add partition (insertdate='2008-01-02') location '2008/01/02'; describe extended alter2; show partitions alter2; + +-- Cleanup +DROP TABLE alter2; +SHOW TABLES; + +-- Using non-default Database + +CREATE DATABASE alter2_db; +USE alter2_db; +SHOW TABLES; + +CREATE TABLE alter2(a int, b int) PARTITIONED BY (insertdate string); +DESCRIBE EXTENDED alter2; +SHOW PARTITIONS alter2; +ALTER TABLE alter2 ADD PARTITION (insertdate='2008-01-01') LOCATION '2008/01/01'; +DESCRIBE EXTENDED alter2; +SHOW PARTITIONS alter2; +ALTER TABLE alter2 ADD PARTITION (insertdate='2008-01-02') LOCATION '2008/01/02'; +DESCRIBE EXTENDED alter2; +SHOW PARTITIONS alter2; +DROP TABLE alter2; + +CREATE EXTERNAL TABLE alter2(a int, b int) PARTITIONED BY (insertdate string); +DESCRIBE EXTENDED alter2; +SHOW PARTITIONS alter2; +ALTER TABLE alter2 ADD PARTITION (insertdate='2008-01-01') LOCATION '2008/01/01'; +DESCRIBE EXTENDED alter2; +SHOW PARTITIONS alter2; +ALTER TABLE alter2 ADD PARTITION (insertdate='2008-01-02') LOCATION '2008/01/02'; +DESCRIBE EXTENDED alter2; +SHOW PARTITIONS alter2; + +DROP TABLE alter2; +USE default; +DROP DATABASE alter2_db; diff --git ql/src/test/queries/clientpositive/alter3.q ql/src/test/queries/clientpositive/alter3.q index d9d3b07..86fe4f6 100644 --- ql/src/test/queries/clientpositive/alter3.q +++ ql/src/test/queries/clientpositive/alter3.q @@ -19,3 +19,37 @@ select col1 from alter3_src; alter table alter3_like rename to alter3_like_renamed; describe extended alter3_like_renamed; + +-- Cleanup +DROP TABLE alter3_src; +DROP TABLE alter3_renamed; +DROP TABLE alter3_like_renamed; +SHOW TABLES; + +-- With non-default Database + +CREATE DATABASE alter3_db; +USE alter3_db; +SHOW TABLES; + +CREATE TABLE alter3_src (col1 STRING) STORED AS TEXTFILE ; +LOAD DATA LOCAL INPATH '../data/files/test.dat' OVERWRITE INTO TABLE alter3_src ; + +CREATE TABLE alter3 (col1 STRING) PARTITIONED BY (pcol1 STRING, pcol2 STRING) STORED AS SEQUENCEFILE; + +CREATE TABLE alter3_like LIKE alter3; + +INSERT OVERWRITE TABLE alter3 PARTITION (pCol1='test_part', pcol2='test_part') SELECT col1 FROM alter3_src ; +SELECT * FROM alter3 WHERE pcol1='test_part' AND pcol2='test_part'; + +ALTER TABLE alter3 RENAME TO alter3_renamed; +DESCRIBE EXTENDED alter3_renamed; +DESCRIBE EXTENDED alter3_renamed PARTITION (pCol1='test_part', pcol2='test_part'); +SELECT * FROM alter3_renamed WHERE pcol1='test_part' AND pcol2='test_part'; + +INSERT OVERWRITE TABLE alter3_like +PARTITION (pCol1='test_part', pcol2='test_part') +SELECT col1 FROM alter3_src; +ALTER TABLE alter3_like RENAME TO alter3_like_renamed; + +DESCRIBE EXTENDED alter3_like_renamed; diff --git ql/src/test/queries/clientpositive/alter4.q ql/src/test/queries/clientpositive/alter4.q index 35fa441..542ff01 100644 --- ql/src/test/queries/clientpositive/alter4.q +++ ql/src/test/queries/clientpositive/alter4.q @@ -3,3 +3,24 @@ DESCRIBE EXTENDED set_bucketing_test; ALTER TABLE set_bucketing_test NOT CLUSTERED; DESCRIBE EXTENDED set_bucketing_test; + +-- Cleanup +DROP TABLE set_bucketing_test; +SHOW TABLES; + +-- with non-default Database + +CREATE DATABASE alter4_db; +USE alter4_db; +SHOW TABLES; + +CREATE TABLE set_bucketing_test (key INT, value STRING) CLUSTERED BY (key) INTO 10 BUCKETS; +DESCRIBE EXTENDED set_bucketing_test; + +ALTER TABLE set_bucketing_test NOT CLUSTERED; +DESCRIBE EXTENDED set_bucketing_test; + +DROP TABLE set_bucketing_test; +USE default; +DROP DATABASE alter4_db; +SHOW DATABASES; diff --git ql/src/test/queries/clientpositive/database.q ql/src/test/queries/clientpositive/database.q new file mode 100644 index 0000000..2b6c911 --- /dev/null +++ ql/src/test/queries/clientpositive/database.q @@ -0,0 +1,91 @@ +SHOW DATABASES; + +-- CREATE with comment +CREATE DATABASE test_db COMMENT 'Hive test database'; +SHOW DATABASES; + +-- CREATE INE already exists +CREATE DATABASE IF NOT EXISTS test_db; +SHOW DATABASES; + +-- SHOW DATABASES synonym +SHOW SCHEMAS; + +-- DROP +DROP DATABASE test_db; +SHOW DATABASES; + +-- CREATE INE doesn't exist +CREATE DATABASE IF NOT EXISTS test_db COMMENT 'Hive test database'; +SHOW DATABASES; + +-- DROP IE exists +DROP DATABASE IF EXISTS test_db; +SHOW DATABASES; + +-- DROP IE doesn't exist +DROP DATABASE IF EXISTS test_db; + +-- SHOW +CREATE DATABASE test_db; +SHOW DATABASES; + +-- SHOW pattern +SHOW DATABASES LIKE 'test*'; + +-- SHOW pattern +SHOW DATABASES LIKE '*ef*'; + + +USE test_db; +SHOW DATABASES; + +-- CREATE table in non-default DB +CREATE TABLE test_table (col1 STRING) STORED AS TEXTFILE; +SHOW TABLES; + +-- DESCRIBE table in non-default DB +DESCRIBE test_table; + +-- DESCRIBE EXTENDED in non-default DB +DESCRIBE EXTENDED test_table; + +-- CREATE LIKE in non-default DB +CREATE TABLE test_table_like LIKE test_table; +SHOW TABLES; +DESCRIBE EXTENDED test_table_like; + +-- LOAD and SELECT +LOAD DATA LOCAL INPATH '../data/files/test.dat' OVERWRITE INTO TABLE test_table ; +SELECT * FROM test_table; + +-- DROP and CREATE w/o LOAD +DROP TABLE test_table; +SHOW TABLES; + +CREATE TABLE test_table (col1 STRING) STORED AS TEXTFILE; +SHOW TABLES; + +SELECT * FROM test_table; + +-- CREATE table that already exists in DEFAULT +USE test_db; +CREATE TABLE src (col1 STRING) STORED AS TEXTFILE; +SHOW TABLES; + +SELECT * FROM src LIMIT 10; + +USE default; +SELECT * FROM src LIMIT 10; + +-- DROP DATABASE +USE test_db; + +DROP TABLE src; +DROP TABLE test_table; +DROP TABLE test_table_like; +SHOW TABLES; + +USE default; +DROP DATABASE test_db; +SHOW DATABASES; diff --git ql/src/test/queries/clientpositive/rename_column.q ql/src/test/queries/clientpositive/rename_column.q index ce82ff7..a3f3f30 100644 --- ql/src/test/queries/clientpositive/rename_column.q +++ ql/src/test/queries/clientpositive/rename_column.q @@ -22,5 +22,36 @@ DESCRIBE kv_rename_test; ALTER TABLE kv_rename_test CHANGE COLUMN a2 a INT AFTER b; DESCRIBE kv_rename_test; +DROP TABLE kv_rename_test; +SHOW TABLES; +-- Using non-default Database +CREATE DATABASE kv_rename_test_db; +USE kv_rename_test_db; +CREATE TABLE kv_rename_test(a int, b int, c int); +DESCRIBE kv_rename_test; + +ALTER TABLE kv_rename_test CHANGE a a STRING; +DESCRIBE kv_rename_test; + +ALTER TABLE kv_rename_test CHANGE a a1 INT; +DESCRIBE kv_rename_test; + +ALTER TABLE kv_rename_test CHANGE a1 a2 INT FIRST; +DESCRIBE kv_rename_test; + +ALTER TABLE kv_rename_test CHANGE a2 a INT AFTER b; +DESCRIBE kv_rename_test; + +ALTER TABLE kv_rename_test CHANGE a a1 INT COMMENT 'test comment1'; +DESCRIBE kv_rename_test; + +ALTER TABLE kv_rename_test CHANGE a1 a2 INT COMMENT 'test comment2' FIRST; +DESCRIBE kv_rename_test; + +ALTER TABLE kv_rename_test CHANGE COLUMN a2 a INT AFTER b; +DESCRIBE kv_rename_test; + +DROP TABLE kv_rename_test; +SHOW TABLES; diff --git ql/src/test/results/clientnegative/database_already_exists.q.out ql/src/test/results/clientnegative/database_already_exists.q.out new file mode 100644 index 0000000..08c04f9 --- /dev/null +++ ql/src/test/results/clientnegative/database_already_exists.q.out @@ -0,0 +1,15 @@ +PREHOOK: query: SHOW DATABASES +PREHOOK: type: SHOWDATABASES +POSTHOOK: query: SHOW DATABASES +POSTHOOK: type: SHOWDATABASES +default +PREHOOK: query: -- Try to create a database that already exists +CREATE DATABASE test_db +PREHOOK: type: CREATEDATABASE +POSTHOOK: query: -- Try to create a database that already exists +CREATE DATABASE test_db +POSTHOOK: type: CREATEDATABASE +PREHOOK: query: CREATE DATABASE test_db +PREHOOK: type: CREATEDATABASE +Failed with exception Database test_db already exists +FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.exec.DDLTask diff --git ql/src/test/results/clientnegative/database_create_already_exists.q.out ql/src/test/results/clientnegative/database_create_already_exists.q.out new file mode 100644 index 0000000..08c04f9 --- /dev/null +++ ql/src/test/results/clientnegative/database_create_already_exists.q.out @@ -0,0 +1,15 @@ +PREHOOK: query: SHOW DATABASES +PREHOOK: type: SHOWDATABASES +POSTHOOK: query: SHOW DATABASES +POSTHOOK: type: SHOWDATABASES +default +PREHOOK: query: -- Try to create a database that already exists +CREATE DATABASE test_db +PREHOOK: type: CREATEDATABASE +POSTHOOK: query: -- Try to create a database that already exists +CREATE DATABASE test_db +POSTHOOK: type: CREATEDATABASE +PREHOOK: query: CREATE DATABASE test_db +PREHOOK: type: CREATEDATABASE +Failed with exception Database test_db already exists +FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.exec.DDLTask diff --git ql/src/test/results/clientnegative/database_create_invalid_name.q.out ql/src/test/results/clientnegative/database_create_invalid_name.q.out new file mode 100644 index 0000000..1e58089 --- /dev/null +++ ql/src/test/results/clientnegative/database_create_invalid_name.q.out @@ -0,0 +1,10 @@ +PREHOOK: query: SHOW DATABASES +PREHOOK: type: SHOWDATABASES +POSTHOOK: query: SHOW DATABASES +POSTHOOK: type: SHOWDATABASES +default +PREHOOK: query: -- Try to create a database with an invalid name +CREATE DATABASE `test.db` +PREHOOK: type: CREATEDATABASE +FAILED: Error in metadata: InvalidObjectException(message:test.db is not a valid database name) +FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.exec.DDLTask diff --git ql/src/test/results/clientnegative/database_drop_does_not_exist.q.out ql/src/test/results/clientnegative/database_drop_does_not_exist.q.out new file mode 100644 index 0000000..80c00cd --- /dev/null +++ ql/src/test/results/clientnegative/database_drop_does_not_exist.q.out @@ -0,0 +1,10 @@ +PREHOOK: query: SHOW DATABASES +PREHOOK: type: SHOWDATABASES +POSTHOOK: query: SHOW DATABASES +POSTHOOK: type: SHOWDATABASES +default +PREHOOK: query: -- Try to drop a database that does not exist +DROP DATABASE does_not_exist +PREHOOK: type: DROPDATABASE +Failed with exception There is no database named does_not_exist +FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.exec.DDLTask diff --git ql/src/test/results/clientnegative/database_drop_not_empty.q.out ql/src/test/results/clientnegative/database_drop_not_empty.q.out new file mode 100644 index 0000000..baa8f37 --- /dev/null +++ ql/src/test/results/clientnegative/database_drop_not_empty.q.out @@ -0,0 +1,28 @@ +PREHOOK: query: SHOW DATABASES +PREHOOK: type: SHOWDATABASES +POSTHOOK: query: SHOW DATABASES +POSTHOOK: type: SHOWDATABASES +default +PREHOOK: query: -- Try to drop a non-empty database +CREATE DATABASE test_db +PREHOOK: type: CREATEDATABASE +POSTHOOK: query: -- Try to drop a non-empty database +CREATE DATABASE test_db +POSTHOOK: type: CREATEDATABASE +PREHOOK: query: USE test_db +PREHOOK: type: SWITCHDATABASE +POSTHOOK: query: USE test_db +POSTHOOK: type: SWITCHDATABASE +PREHOOK: query: CREATE TABLE t(a INT) +PREHOOK: type: CREATETABLE +POSTHOOK: query: CREATE TABLE t(a INT) +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: test_db@t +PREHOOK: query: USE default +PREHOOK: type: SWITCHDATABASE +POSTHOOK: query: USE default +POSTHOOK: type: SWITCHDATABASE +PREHOOK: query: DROP DATABASE test_db +PREHOOK: type: DROPDATABASE +FAILED: Error in metadata: InvalidOperationException(message:Database test_db is not empty) +FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.exec.DDLTask diff --git ql/src/test/results/clientnegative/database_switch_does_not_exist.q.out ql/src/test/results/clientnegative/database_switch_does_not_exist.q.out new file mode 100644 index 0000000..8b5674d --- /dev/null +++ ql/src/test/results/clientnegative/database_switch_does_not_exist.q.out @@ -0,0 +1,10 @@ +PREHOOK: query: SHOW DATABASES +PREHOOK: type: SHOWDATABASES +POSTHOOK: query: SHOW DATABASES +POSTHOOK: type: SHOWDATABASES +default +PREHOOK: query: -- Try to switch to a database that does not exist +USE does_not_exist +PREHOOK: type: SWITCHDATABASE +FAILED: Error in metadata: ERROR: The database does_not_exist does not exist. +FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.exec.DDLTask diff --git ql/src/test/results/clientpositive/add_part_exist.q.out ql/src/test/results/clientpositive/add_part_exist.q.out index 5f76c9d..e64cc3c 100644 --- ql/src/test/results/clientpositive/add_part_exist.q.out +++ ql/src/test/results/clientpositive/add_part_exist.q.out @@ -63,3 +63,102 @@ POSTHOOK: type: SHOWPARTITIONS ds=2010-01-01 ds=2010-01-02 ds=2010-01-03 +PREHOOK: query: DROP TABLE add_part_test +PREHOOK: type: DROPTABLE +PREHOOK: Input: default@add_part_test +PREHOOK: Output: default@add_part_test +POSTHOOK: query: DROP TABLE add_part_test +POSTHOOK: type: DROPTABLE +POSTHOOK: Input: default@add_part_test +POSTHOOK: Output: default@add_part_test +PREHOOK: query: SHOW TABLES +PREHOOK: type: SHOWTABLES +POSTHOOK: query: SHOW TABLES +POSTHOOK: type: SHOWTABLES +src +src1 +src_json +src_sequencefile +src_thrift +srcbucket +srcbucket2 +srcpart +PREHOOK: query: -- Test ALTER TABLE ADD PARTITION in non-default Database +CREATE DATABASE add_part_test_db +PREHOOK: type: CREATEDATABASE +POSTHOOK: query: -- Test ALTER TABLE ADD PARTITION in non-default Database +CREATE DATABASE add_part_test_db +POSTHOOK: type: CREATEDATABASE +PREHOOK: query: USE add_part_test_db +PREHOOK: type: SWITCHDATABASE +POSTHOOK: query: USE add_part_test_db +POSTHOOK: type: SWITCHDATABASE +PREHOOK: query: SHOW TABLES +PREHOOK: type: SHOWTABLES +POSTHOOK: query: SHOW TABLES +POSTHOOK: type: SHOWTABLES +PREHOOK: query: CREATE TABLE add_part_test (key STRING, value STRING) PARTITIONED BY (ds STRING) +PREHOOK: type: CREATETABLE +POSTHOOK: query: CREATE TABLE add_part_test (key STRING, value STRING) PARTITIONED BY (ds STRING) +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: add_part_test_db@add_part_test +PREHOOK: query: SHOW PARTITIONS add_part_test +PREHOOK: type: SHOWPARTITIONS +POSTHOOK: query: SHOW PARTITIONS add_part_test +POSTHOOK: type: SHOWPARTITIONS +PREHOOK: query: ALTER TABLE add_part_test ADD PARTITION (ds='2010-01-01') +PREHOOK: type: ALTERTABLE_ADDPARTS +PREHOOK: Input: add_part_test_db@add_part_test +POSTHOOK: query: ALTER TABLE add_part_test ADD PARTITION (ds='2010-01-01') +POSTHOOK: type: ALTERTABLE_ADDPARTS +POSTHOOK: Input: add_part_test_db@add_part_test +POSTHOOK: Output: add_part_test_db@add_part_test@ds=2010-01-01 +PREHOOK: query: SHOW PARTITIONS add_part_test +PREHOOK: type: SHOWPARTITIONS +POSTHOOK: query: SHOW PARTITIONS add_part_test +POSTHOOK: type: SHOWPARTITIONS +ds=2010-01-01 +PREHOOK: query: ALTER TABLE add_part_test ADD IF NOT EXISTS PARTITION (ds='2010-01-01') +PREHOOK: type: ALTERTABLE_ADDPARTS +PREHOOK: Input: add_part_test_db@add_part_test +PREHOOK: Output: add_part_test_db@add_part_test@ds=2010-01-01 +POSTHOOK: query: ALTER TABLE add_part_test ADD IF NOT EXISTS PARTITION (ds='2010-01-01') +POSTHOOK: type: ALTERTABLE_ADDPARTS +POSTHOOK: Input: add_part_test_db@add_part_test +POSTHOOK: Output: add_part_test_db@add_part_test@ds=2010-01-01 +PREHOOK: query: SHOW PARTITIONS add_part_test +PREHOOK: type: SHOWPARTITIONS +POSTHOOK: query: SHOW PARTITIONS add_part_test +POSTHOOK: type: SHOWPARTITIONS +ds=2010-01-01 +PREHOOK: query: ALTER TABLE add_part_test ADD IF NOT EXISTS PARTITION (ds='2010-01-02') +PREHOOK: type: ALTERTABLE_ADDPARTS +PREHOOK: Input: add_part_test_db@add_part_test +POSTHOOK: query: ALTER TABLE add_part_test ADD IF NOT EXISTS PARTITION (ds='2010-01-02') +POSTHOOK: type: ALTERTABLE_ADDPARTS +POSTHOOK: Input: add_part_test_db@add_part_test +POSTHOOK: Output: add_part_test_db@add_part_test@ds=2010-01-02 +PREHOOK: query: SHOW PARTITIONS add_part_test +PREHOOK: type: SHOWPARTITIONS +POSTHOOK: query: SHOW PARTITIONS add_part_test +POSTHOOK: type: SHOWPARTITIONS +ds=2010-01-01 +ds=2010-01-02 +PREHOOK: query: ALTER TABLE add_part_test ADD IF NOT EXISTS PARTITION (ds='2010-01-01') PARTITION (ds='2010-01-02') PARTITION (ds='2010-01-03') +PREHOOK: type: ALTERTABLE_ADDPARTS +PREHOOK: Input: add_part_test_db@add_part_test +PREHOOK: Output: add_part_test_db@add_part_test@ds=2010-01-01 +PREHOOK: Output: add_part_test_db@add_part_test@ds=2010-01-02 +POSTHOOK: query: ALTER TABLE add_part_test ADD IF NOT EXISTS PARTITION (ds='2010-01-01') PARTITION (ds='2010-01-02') PARTITION (ds='2010-01-03') +POSTHOOK: type: ALTERTABLE_ADDPARTS +POSTHOOK: Input: add_part_test_db@add_part_test +POSTHOOK: Output: add_part_test_db@add_part_test@ds=2010-01-01 +POSTHOOK: Output: add_part_test_db@add_part_test@ds=2010-01-02 +POSTHOOK: Output: add_part_test_db@add_part_test@ds=2010-01-03 +PREHOOK: query: SHOW PARTITIONS add_part_test +PREHOOK: type: SHOWPARTITIONS +POSTHOOK: query: SHOW PARTITIONS add_part_test +POSTHOOK: type: SHOWPARTITIONS +ds=2010-01-01 +ds=2010-01-02 +ds=2010-01-03 diff --git ql/src/test/results/clientpositive/alter1.q.out ql/src/test/results/clientpositive/alter1.q.out index 73c5978..381cfbb 100644 --- ql/src/test/results/clientpositive/alter1.q.out +++ ql/src/test/results/clientpositive/alter1.q.out @@ -10,7 +10,7 @@ POSTHOOK: type: DESCTABLE a int b int -Detailed Table Information Table(tableName:alter1, dbName:default, owner:njain, createTime:1282026227, lastAccessTime:0, retention:0, sd:StorageDescriptor(cols:[FieldSchema(name:a, type:int, comment:null), FieldSchema(name:b, type:int, comment:null)], location:pfile:/data/users/njain/hive_commit2/hive_commit2/build/ql/test/data/warehouse/alter1, inputFormat:org.apache.hadoop.mapred.TextInputFormat, outputFormat:org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat, compressed:false, numBuckets:-1, serdeInfo:SerDeInfo(name:null, serializationLib:org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe, parameters:{serialization.format=1}), bucketCols:[], sortCols:[], parameters:{}), partitionKeys:[], parameters:{transient_lastDdlTime=1282026227}, viewOriginalText:null, viewExpandedText:null, tableType:MANAGED_TABLE) +Detailed Table Information Table(tableName:alter1, dbName:default, owner:carl, createTime:1282709490, lastAccessTime:0, retention:0, sd:StorageDescriptor(cols:[FieldSchema(name:a, type:int, comment:null), FieldSchema(name:b, type:int, comment:null)], location:pfile:/Users/carl/Projects/hive/build/ql/test/data/warehouse/alter1, inputFormat:org.apache.hadoop.mapred.TextInputFormat, outputFormat:org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat, compressed:false, numBuckets:-1, serdeInfo:SerDeInfo(name:null, serializationLib:org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe, parameters:{serialization.format=1}), bucketCols:[], sortCols:[], parameters:{}), partitionKeys:[], parameters:{transient_lastDdlTime=1282709490}, viewOriginalText:null, viewExpandedText:null, tableType:MANAGED_TABLE) PREHOOK: query: alter table alter1 set tblproperties ('a'='1', 'c'='3') PREHOOK: type: ALTERTABLE_PROPERTIES PREHOOK: Input: default@alter1 @@ -26,7 +26,7 @@ POSTHOOK: type: DESCTABLE a int b int -Detailed Table Information Table(tableName:alter1, dbName:default, owner:njain, createTime:1282026227, lastAccessTime:0, retention:0, sd:StorageDescriptor(cols:[FieldSchema(name:a, type:int, comment:null), FieldSchema(name:b, type:int, comment:null)], location:pfile:/data/users/njain/hive_commit2/hive_commit2/build/ql/test/data/warehouse/alter1, inputFormat:org.apache.hadoop.mapred.TextInputFormat, outputFormat:org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat, compressed:false, numBuckets:-1, serdeInfo:SerDeInfo(name:null, serializationLib:org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe, parameters:{serialization.format=1}), bucketCols:[], sortCols:[], parameters:{}), partitionKeys:[], parameters:{last_modified_by=njain, c=3, last_modified_time=1282026227, a=1, transient_lastDdlTime=1282026227}, viewOriginalText:null, viewExpandedText:null, tableType:MANAGED_TABLE) +Detailed Table Information Table(tableName:alter1, dbName:default, owner:carl, createTime:1282709490, lastAccessTime:0, retention:0, sd:StorageDescriptor(cols:[FieldSchema(name:a, type:int, comment:null), FieldSchema(name:b, type:int, comment:null)], location:pfile:/Users/carl/Projects/hive/build/ql/test/data/warehouse/alter1, inputFormat:org.apache.hadoop.mapred.TextInputFormat, outputFormat:org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat, compressed:false, numBuckets:-1, serdeInfo:SerDeInfo(name:null, serializationLib:org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe, parameters:{serialization.format=1}), bucketCols:[], sortCols:[], parameters:{}), partitionKeys:[], parameters:{last_modified_by=carl, c=3, last_modified_time=1282709490, a=1, transient_lastDdlTime=1282709490}, viewOriginalText:null, viewExpandedText:null, tableType:MANAGED_TABLE) PREHOOK: query: alter table alter1 set tblproperties ('a'='1', 'c'='4', 'd'='3') PREHOOK: type: ALTERTABLE_PROPERTIES PREHOOK: Input: default@alter1 @@ -42,7 +42,7 @@ POSTHOOK: type: DESCTABLE a int b int -Detailed Table Information Table(tableName:alter1, dbName:default, owner:njain, createTime:1282026227, lastAccessTime:0, retention:0, sd:StorageDescriptor(cols:[FieldSchema(name:a, type:int, comment:null), FieldSchema(name:b, type:int, comment:null)], location:pfile:/data/users/njain/hive_commit2/hive_commit2/build/ql/test/data/warehouse/alter1, inputFormat:org.apache.hadoop.mapred.TextInputFormat, outputFormat:org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat, compressed:false, numBuckets:-1, serdeInfo:SerDeInfo(name:null, serializationLib:org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe, parameters:{serialization.format=1}), bucketCols:[], sortCols:[], parameters:{}), partitionKeys:[], parameters:{d=3, last_modified_by=njain, c=4, last_modified_time=1282026227, a=1, transient_lastDdlTime=1282026227}, viewOriginalText:null, viewExpandedText:null, tableType:MANAGED_TABLE) +Detailed Table Information Table(tableName:alter1, dbName:default, owner:carl, createTime:1282709490, lastAccessTime:0, retention:0, sd:StorageDescriptor(cols:[FieldSchema(name:a, type:int, comment:null), FieldSchema(name:b, type:int, comment:null)], location:pfile:/Users/carl/Projects/hive/build/ql/test/data/warehouse/alter1, inputFormat:org.apache.hadoop.mapred.TextInputFormat, outputFormat:org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat, compressed:false, numBuckets:-1, serdeInfo:SerDeInfo(name:null, serializationLib:org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe, parameters:{serialization.format=1}), bucketCols:[], sortCols:[], parameters:{}), partitionKeys:[], parameters:{d=3, last_modified_by=carl, c=4, last_modified_time=1282709491, a=1, transient_lastDdlTime=1282709491}, viewOriginalText:null, viewExpandedText:null, tableType:MANAGED_TABLE) PREHOOK: query: alter table alter1 set tblproperties ('EXTERNAL'='TRUE') PREHOOK: type: ALTERTABLE_PROPERTIES PREHOOK: Input: default@alter1 @@ -58,7 +58,7 @@ POSTHOOK: type: DESCTABLE a int b int -Detailed Table Information Table(tableName:alter1, dbName:default, owner:njain, createTime:1282026227, lastAccessTime:0, retention:0, sd:StorageDescriptor(cols:[FieldSchema(name:a, type:int, comment:null), FieldSchema(name:b, type:int, comment:null)], location:pfile:/data/users/njain/hive_commit2/hive_commit2/build/ql/test/data/warehouse/alter1, inputFormat:org.apache.hadoop.mapred.TextInputFormat, outputFormat:org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat, compressed:false, numBuckets:-1, serdeInfo:SerDeInfo(name:null, serializationLib:org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe, parameters:{serialization.format=1}), bucketCols:[], sortCols:[], parameters:{}), partitionKeys:[], parameters:{EXTERNAL=TRUE, d=3, last_modified_by=njain, c=4, last_modified_time=1282026228, a=1, transient_lastDdlTime=1282026228}, viewOriginalText:null, viewExpandedText:null, tableType:EXTERNAL_TABLE) +Detailed Table Information Table(tableName:alter1, dbName:default, owner:carl, createTime:1282709490, lastAccessTime:0, retention:0, sd:StorageDescriptor(cols:[FieldSchema(name:a, type:int, comment:null), FieldSchema(name:b, type:int, comment:null)], location:pfile:/Users/carl/Projects/hive/build/ql/test/data/warehouse/alter1, inputFormat:org.apache.hadoop.mapred.TextInputFormat, outputFormat:org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat, compressed:false, numBuckets:-1, serdeInfo:SerDeInfo(name:null, serializationLib:org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe, parameters:{serialization.format=1}), bucketCols:[], sortCols:[], parameters:{}), partitionKeys:[], parameters:{EXTERNAL=TRUE, d=3, last_modified_by=carl, c=4, last_modified_time=1282709491, a=1, transient_lastDdlTime=1282709491}, viewOriginalText:null, viewExpandedText:null, tableType:EXTERNAL_TABLE) PREHOOK: query: alter table alter1 set tblproperties ('EXTERNAL'='FALSE') PREHOOK: type: ALTERTABLE_PROPERTIES PREHOOK: Input: default@alter1 @@ -74,7 +74,7 @@ POSTHOOK: type: DESCTABLE a int b int -Detailed Table Information Table(tableName:alter1, dbName:default, owner:njain, createTime:1282026227, lastAccessTime:0, retention:0, sd:StorageDescriptor(cols:[FieldSchema(name:a, type:int, comment:null), FieldSchema(name:b, type:int, comment:null)], location:pfile:/data/users/njain/hive_commit2/hive_commit2/build/ql/test/data/warehouse/alter1, inputFormat:org.apache.hadoop.mapred.TextInputFormat, outputFormat:org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat, compressed:false, numBuckets:-1, serdeInfo:SerDeInfo(name:null, serializationLib:org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe, parameters:{serialization.format=1}), bucketCols:[], sortCols:[], parameters:{}), partitionKeys:[], parameters:{EXTERNAL=FALSE, d=3, last_modified_by=njain, c=4, last_modified_time=1282026228, a=1, transient_lastDdlTime=1282026228}, viewOriginalText:null, viewExpandedText:null, tableType:MANAGED_TABLE) +Detailed Table Information Table(tableName:alter1, dbName:default, owner:carl, createTime:1282709490, lastAccessTime:0, retention:0, sd:StorageDescriptor(cols:[FieldSchema(name:a, type:int, comment:null), FieldSchema(name:b, type:int, comment:null)], location:pfile:/Users/carl/Projects/hive/build/ql/test/data/warehouse/alter1, inputFormat:org.apache.hadoop.mapred.TextInputFormat, outputFormat:org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat, compressed:false, numBuckets:-1, serdeInfo:SerDeInfo(name:null, serializationLib:org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe, parameters:{serialization.format=1}), bucketCols:[], sortCols:[], parameters:{}), partitionKeys:[], parameters:{EXTERNAL=FALSE, d=3, last_modified_by=carl, c=4, last_modified_time=1282709491, a=1, transient_lastDdlTime=1282709491}, viewOriginalText:null, viewExpandedText:null, tableType:MANAGED_TABLE) PREHOOK: query: alter table alter1 set serdeproperties('s1'='9') PREHOOK: type: ALTERTABLE_SERDEPROPERTIES PREHOOK: Input: default@alter1 @@ -90,7 +90,7 @@ POSTHOOK: type: DESCTABLE a int b int -Detailed Table Information Table(tableName:alter1, dbName:default, owner:njain, createTime:1282026227, lastAccessTime:0, retention:0, sd:StorageDescriptor(cols:[FieldSchema(name:a, type:int, comment:null), FieldSchema(name:b, type:int, comment:null)], location:pfile:/data/users/njain/hive_commit2/hive_commit2/build/ql/test/data/warehouse/alter1, inputFormat:org.apache.hadoop.mapred.TextInputFormat, outputFormat:org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat, compressed:false, numBuckets:-1, serdeInfo:SerDeInfo(name:null, serializationLib:org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe, parameters:{s1=9, serialization.format=1}), bucketCols:[], sortCols:[], parameters:{}), partitionKeys:[], parameters:{EXTERNAL=FALSE, d=3, last_modified_by=njain, c=4, last_modified_time=1282026228, a=1, transient_lastDdlTime=1282026228}, viewOriginalText:null, viewExpandedText:null, tableType:MANAGED_TABLE) +Detailed Table Information Table(tableName:alter1, dbName:default, owner:carl, createTime:1282709490, lastAccessTime:0, retention:0, sd:StorageDescriptor(cols:[FieldSchema(name:a, type:int, comment:null), FieldSchema(name:b, type:int, comment:null)], location:pfile:/Users/carl/Projects/hive/build/ql/test/data/warehouse/alter1, inputFormat:org.apache.hadoop.mapred.TextInputFormat, outputFormat:org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat, compressed:false, numBuckets:-1, serdeInfo:SerDeInfo(name:null, serializationLib:org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe, parameters:{s1=9, serialization.format=1}), bucketCols:[], sortCols:[], parameters:{}), partitionKeys:[], parameters:{EXTERNAL=FALSE, d=3, last_modified_by=carl, c=4, last_modified_time=1282709491, a=1, transient_lastDdlTime=1282709491}, viewOriginalText:null, viewExpandedText:null, tableType:MANAGED_TABLE) PREHOOK: query: alter table alter1 set serdeproperties('s1'='10', 's2' ='20') PREHOOK: type: ALTERTABLE_SERDEPROPERTIES PREHOOK: Input: default@alter1 @@ -106,7 +106,7 @@ POSTHOOK: type: DESCTABLE a int b int -Detailed Table Information Table(tableName:alter1, dbName:default, owner:njain, createTime:1282026227, lastAccessTime:0, retention:0, sd:StorageDescriptor(cols:[FieldSchema(name:a, type:int, comment:null), FieldSchema(name:b, type:int, comment:null)], location:pfile:/data/users/njain/hive_commit2/hive_commit2/build/ql/test/data/warehouse/alter1, inputFormat:org.apache.hadoop.mapred.TextInputFormat, outputFormat:org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat, compressed:false, numBuckets:-1, serdeInfo:SerDeInfo(name:null, serializationLib:org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe, parameters:{s2=20, s1=10, serialization.format=1}), bucketCols:[], sortCols:[], parameters:{}), partitionKeys:[], parameters:{EXTERNAL=FALSE, d=3, last_modified_by=njain, c=4, last_modified_time=1282026228, a=1, transient_lastDdlTime=1282026228}, viewOriginalText:null, viewExpandedText:null, tableType:MANAGED_TABLE) +Detailed Table Information Table(tableName:alter1, dbName:default, owner:carl, createTime:1282709490, lastAccessTime:0, retention:0, sd:StorageDescriptor(cols:[FieldSchema(name:a, type:int, comment:null), FieldSchema(name:b, type:int, comment:null)], location:pfile:/Users/carl/Projects/hive/build/ql/test/data/warehouse/alter1, inputFormat:org.apache.hadoop.mapred.TextInputFormat, outputFormat:org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat, compressed:false, numBuckets:-1, serdeInfo:SerDeInfo(name:null, serializationLib:org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe, parameters:{s2=20, s1=10, serialization.format=1}), bucketCols:[], sortCols:[], parameters:{}), partitionKeys:[], parameters:{EXTERNAL=FALSE, d=3, last_modified_by=carl, c=4, last_modified_time=1282709491, a=1, transient_lastDdlTime=1282709491}, viewOriginalText:null, viewExpandedText:null, tableType:MANAGED_TABLE) PREHOOK: query: alter table alter1 set serde 'org.apache.hadoop.hive.serde2.TestSerDe' with serdeproperties('s1'='9') PREHOOK: type: ALTERTABLE_SERIALIZER PREHOOK: Input: default@alter1 @@ -122,7 +122,7 @@ POSTHOOK: type: DESCTABLE a string from deserializer b string from deserializer -Detailed Table Information Table(tableName:alter1, dbName:default, owner:njain, createTime:1282026227, lastAccessTime:0, retention:0, sd:StorageDescriptor(cols:[FieldSchema(name:a, type:int, comment:from deserializer), FieldSchema(name:b, type:int, comment:from deserializer)], location:pfile:/data/users/njain/hive_commit2/hive_commit2/build/ql/test/data/warehouse/alter1, inputFormat:org.apache.hadoop.mapred.TextInputFormat, outputFormat:org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat, compressed:false, numBuckets:-1, serdeInfo:SerDeInfo(name:null, serializationLib:org.apache.hadoop.hive.serde2.TestSerDe, parameters:{s2=20, s1=9, serialization.format=1}), bucketCols:[], sortCols:[], parameters:{}), partitionKeys:[], parameters:{EXTERNAL=FALSE, d=3, last_modified_by=njain, c=4, last_modified_time=1282026228, a=1, transient_lastDdlTime=1282026228}, viewOriginalText:null, viewExpandedText:null, tableType:MANAGED_TABLE) +Detailed Table Information Table(tableName:alter1, dbName:default, owner:carl, createTime:1282709490, lastAccessTime:0, retention:0, sd:StorageDescriptor(cols:[FieldSchema(name:a, type:int, comment:from deserializer), FieldSchema(name:b, type:int, comment:from deserializer)], location:pfile:/Users/carl/Projects/hive/build/ql/test/data/warehouse/alter1, inputFormat:org.apache.hadoop.mapred.TextInputFormat, outputFormat:org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat, compressed:false, numBuckets:-1, serdeInfo:SerDeInfo(name:null, serializationLib:org.apache.hadoop.hive.serde2.TestSerDe, parameters:{s2=20, s1=9, serialization.format=1}), bucketCols:[], sortCols:[], parameters:{}), partitionKeys:[], parameters:{EXTERNAL=FALSE, d=3, last_modified_by=carl, c=4, last_modified_time=1282709492, a=1, transient_lastDdlTime=1282709492}, viewOriginalText:null, viewExpandedText:null, tableType:MANAGED_TABLE) PREHOOK: query: alter table alter1 set serde 'org.apache.hadoop.hive.serde2.MetadataTypedColumnsetSerDe' PREHOOK: type: ALTERTABLE_SERIALIZER PREHOOK: Input: default@alter1 @@ -138,7 +138,7 @@ POSTHOOK: type: DESCTABLE a string from deserializer b string from deserializer -Detailed Table Information Table(tableName:alter1, dbName:default, owner:njain, createTime:1282026227, lastAccessTime:0, retention:0, sd:StorageDescriptor(cols:[FieldSchema(name:a, type:string, comment:from deserializer), FieldSchema(name:b, type:string, comment:from deserializer)], location:pfile:/data/users/njain/hive_commit2/hive_commit2/build/ql/test/data/warehouse/alter1, inputFormat:org.apache.hadoop.mapred.TextInputFormat, outputFormat:org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat, compressed:false, numBuckets:-1, serdeInfo:SerDeInfo(name:null, serializationLib:org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe, parameters:{s2=20, s1=9, serialization.format=1}), bucketCols:[], sortCols:[], parameters:{}), partitionKeys:[], parameters:{EXTERNAL=FALSE, d=3, last_modified_by=njain, c=4, last_modified_time=1282026229, a=1, transient_lastDdlTime=1282026229}, viewOriginalText:null, viewExpandedText:null, tableType:MANAGED_TABLE) +Detailed Table Information Table(tableName:alter1, dbName:default, owner:carl, createTime:1282709490, lastAccessTime:0, retention:0, sd:StorageDescriptor(cols:[FieldSchema(name:a, type:string, comment:from deserializer), FieldSchema(name:b, type:string, comment:from deserializer)], location:pfile:/Users/carl/Projects/hive/build/ql/test/data/warehouse/alter1, inputFormat:org.apache.hadoop.mapred.TextInputFormat, outputFormat:org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat, compressed:false, numBuckets:-1, serdeInfo:SerDeInfo(name:null, serializationLib:org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe, parameters:{s2=20, s1=9, serialization.format=1}), bucketCols:[], sortCols:[], parameters:{}), partitionKeys:[], parameters:{EXTERNAL=FALSE, d=3, last_modified_by=carl, c=4, last_modified_time=1282709492, a=1, transient_lastDdlTime=1282709492}, viewOriginalText:null, viewExpandedText:null, tableType:MANAGED_TABLE) PREHOOK: query: alter table alter1 replace columns (a int, b int, c string) PREHOOK: type: ALTERTABLE_REPLACECOLS PREHOOK: Input: default@alter1 @@ -154,3 +154,213 @@ POSTHOOK: type: DESCTABLE a int b int c string +PREHOOK: query: -- Cleanup +DROP TABLE alter1 +PREHOOK: type: DROPTABLE +PREHOOK: Input: default@alter1 +PREHOOK: Output: default@alter1 +POSTHOOK: query: -- Cleanup +DROP TABLE alter1 +POSTHOOK: type: DROPTABLE +POSTHOOK: Input: default@alter1 +POSTHOOK: Output: default@alter1 +PREHOOK: query: SHOW TABLES +PREHOOK: type: SHOWTABLES +POSTHOOK: query: SHOW TABLES +POSTHOOK: type: SHOWTABLES +src +src1 +src_json +src_sequencefile +src_thrift +srcbucket +srcbucket2 +srcpart +PREHOOK: query: -- With non-default Database + +CREATE DATABASE alter1_db +PREHOOK: type: CREATEDATABASE +POSTHOOK: query: -- With non-default Database + +CREATE DATABASE alter1_db +POSTHOOK: type: CREATEDATABASE +PREHOOK: query: USE alter1_db +PREHOOK: type: SWITCHDATABASE +POSTHOOK: query: USE alter1_db +POSTHOOK: type: SWITCHDATABASE +PREHOOK: query: SHOW TABLES +PREHOOK: type: SHOWTABLES +POSTHOOK: query: SHOW TABLES +POSTHOOK: type: SHOWTABLES +PREHOOK: query: CREATE TABLE alter1(a INT, b INT) +PREHOOK: type: CREATETABLE +POSTHOOK: query: CREATE TABLE alter1(a INT, b INT) +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: alter1_db@alter1 +PREHOOK: query: DESCRIBE EXTENDED alter1 +PREHOOK: type: DESCTABLE +POSTHOOK: query: DESCRIBE EXTENDED alter1 +POSTHOOK: type: DESCTABLE +a int +b int + +Detailed Table Information Table(tableName:alter1, dbName:alter1_db, owner:carl, createTime:1282709493, lastAccessTime:0, retention:0, sd:StorageDescriptor(cols:[FieldSchema(name:a, type:int, comment:null), FieldSchema(name:b, type:int, comment:null)], location:pfile:/Users/carl/Projects/hive/build/ql/test/data/warehouse/alter1_db.db/alter1, inputFormat:org.apache.hadoop.mapred.TextInputFormat, outputFormat:org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat, compressed:false, numBuckets:-1, serdeInfo:SerDeInfo(name:null, serializationLib:org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe, parameters:{serialization.format=1}), bucketCols:[], sortCols:[], parameters:{}), partitionKeys:[], parameters:{transient_lastDdlTime=1282709493}, viewOriginalText:null, viewExpandedText:null, tableType:MANAGED_TABLE) +PREHOOK: query: ALTER TABLE alter1 SET TBLPROPERTIES ('a'='1', 'c'='3') +PREHOOK: type: ALTERTABLE_PROPERTIES +PREHOOK: Input: alter1_db@alter1 +PREHOOK: Output: alter1_db@alter1 +POSTHOOK: query: ALTER TABLE alter1 SET TBLPROPERTIES ('a'='1', 'c'='3') +POSTHOOK: type: ALTERTABLE_PROPERTIES +POSTHOOK: Input: alter1_db@alter1 +POSTHOOK: Output: alter1_db@alter1 +PREHOOK: query: DESCRIBE EXTENDED alter1 +PREHOOK: type: DESCTABLE +POSTHOOK: query: DESCRIBE EXTENDED alter1 +POSTHOOK: type: DESCTABLE +a int +b int + +Detailed Table Information Table(tableName:alter1, dbName:alter1_db, owner:carl, createTime:1282709493, lastAccessTime:0, retention:0, sd:StorageDescriptor(cols:[FieldSchema(name:a, type:int, comment:null), FieldSchema(name:b, type:int, comment:null)], location:pfile:/Users/carl/Projects/hive/build/ql/test/data/warehouse/alter1_db.db/alter1, inputFormat:org.apache.hadoop.mapred.TextInputFormat, outputFormat:org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat, compressed:false, numBuckets:-1, serdeInfo:SerDeInfo(name:null, serializationLib:org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe, parameters:{serialization.format=1}), bucketCols:[], sortCols:[], parameters:{}), partitionKeys:[], parameters:{last_modified_by=carl, c=3, last_modified_time=1282709493, a=1, transient_lastDdlTime=1282709493}, viewOriginalText:null, viewExpandedText:null, tableType:MANAGED_TABLE) +PREHOOK: query: ALTER TABLE alter1 SET TBLPROPERTIES ('a'='1', 'c'='4', 'd'='3') +PREHOOK: type: ALTERTABLE_PROPERTIES +PREHOOK: Input: alter1_db@alter1 +PREHOOK: Output: alter1_db@alter1 +POSTHOOK: query: ALTER TABLE alter1 SET TBLPROPERTIES ('a'='1', 'c'='4', 'd'='3') +POSTHOOK: type: ALTERTABLE_PROPERTIES +POSTHOOK: Input: alter1_db@alter1 +POSTHOOK: Output: alter1_db@alter1 +PREHOOK: query: DESCRIBE EXTENDED alter1 +PREHOOK: type: DESCTABLE +POSTHOOK: query: DESCRIBE EXTENDED alter1 +POSTHOOK: type: DESCTABLE +a int +b int + +Detailed Table Information Table(tableName:alter1, dbName:alter1_db, owner:carl, createTime:1282709493, lastAccessTime:0, retention:0, sd:StorageDescriptor(cols:[FieldSchema(name:a, type:int, comment:null), FieldSchema(name:b, type:int, comment:null)], location:pfile:/Users/carl/Projects/hive/build/ql/test/data/warehouse/alter1_db.db/alter1, inputFormat:org.apache.hadoop.mapred.TextInputFormat, outputFormat:org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat, compressed:false, numBuckets:-1, serdeInfo:SerDeInfo(name:null, serializationLib:org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe, parameters:{serialization.format=1}), bucketCols:[], sortCols:[], parameters:{}), partitionKeys:[], parameters:{d=3, last_modified_by=carl, c=4, last_modified_time=1282709493, a=1, transient_lastDdlTime=1282709493}, viewOriginalText:null, viewExpandedText:null, tableType:MANAGED_TABLE) +PREHOOK: query: ALTER TABLE alter1 SET TBLPROPERTIES ('EXTERNAL'='TRUE') +PREHOOK: type: ALTERTABLE_PROPERTIES +PREHOOK: Input: alter1_db@alter1 +PREHOOK: Output: alter1_db@alter1 +POSTHOOK: query: ALTER TABLE alter1 SET TBLPROPERTIES ('EXTERNAL'='TRUE') +POSTHOOK: type: ALTERTABLE_PROPERTIES +POSTHOOK: Input: alter1_db@alter1 +POSTHOOK: Output: alter1_db@alter1 +PREHOOK: query: DESCRIBE EXTENDED alter1 +PREHOOK: type: DESCTABLE +POSTHOOK: query: DESCRIBE EXTENDED alter1 +POSTHOOK: type: DESCTABLE +a int +b int + +Detailed Table Information Table(tableName:alter1, dbName:alter1_db, owner:carl, createTime:1282709493, lastAccessTime:0, retention:0, sd:StorageDescriptor(cols:[FieldSchema(name:a, type:int, comment:null), FieldSchema(name:b, type:int, comment:null)], location:pfile:/Users/carl/Projects/hive/build/ql/test/data/warehouse/alter1_db.db/alter1, inputFormat:org.apache.hadoop.mapred.TextInputFormat, outputFormat:org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat, compressed:false, numBuckets:-1, serdeInfo:SerDeInfo(name:null, serializationLib:org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe, parameters:{serialization.format=1}), bucketCols:[], sortCols:[], parameters:{}), partitionKeys:[], parameters:{EXTERNAL=TRUE, d=3, last_modified_by=carl, c=4, last_modified_time=1282709494, a=1, transient_lastDdlTime=1282709494}, viewOriginalText:null, viewExpandedText:null, tableType:EXTERNAL_TABLE) +PREHOOK: query: ALTER TABLE alter1 SET TBLPROPERTIES ('EXTERNAL'='FALSE') +PREHOOK: type: ALTERTABLE_PROPERTIES +PREHOOK: Input: alter1_db@alter1 +PREHOOK: Output: alter1_db@alter1 +POSTHOOK: query: ALTER TABLE alter1 SET TBLPROPERTIES ('EXTERNAL'='FALSE') +POSTHOOK: type: ALTERTABLE_PROPERTIES +POSTHOOK: Input: alter1_db@alter1 +POSTHOOK: Output: alter1_db@alter1 +PREHOOK: query: DESCRIBE EXTENDED alter1 +PREHOOK: type: DESCTABLE +POSTHOOK: query: DESCRIBE EXTENDED alter1 +POSTHOOK: type: DESCTABLE +a int +b int + +Detailed Table Information Table(tableName:alter1, dbName:alter1_db, owner:carl, createTime:1282709493, lastAccessTime:0, retention:0, sd:StorageDescriptor(cols:[FieldSchema(name:a, type:int, comment:null), FieldSchema(name:b, type:int, comment:null)], location:pfile:/Users/carl/Projects/hive/build/ql/test/data/warehouse/alter1_db.db/alter1, inputFormat:org.apache.hadoop.mapred.TextInputFormat, outputFormat:org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat, compressed:false, numBuckets:-1, serdeInfo:SerDeInfo(name:null, serializationLib:org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe, parameters:{serialization.format=1}), bucketCols:[], sortCols:[], parameters:{}), partitionKeys:[], parameters:{EXTERNAL=FALSE, d=3, last_modified_by=carl, c=4, last_modified_time=1282709494, a=1, transient_lastDdlTime=1282709494}, viewOriginalText:null, viewExpandedText:null, tableType:MANAGED_TABLE) +PREHOOK: query: ALTER TABLE alter1 SET SERDEPROPERTIES('s1'='9') +PREHOOK: type: ALTERTABLE_SERDEPROPERTIES +PREHOOK: Input: alter1_db@alter1 +PREHOOK: Output: alter1_db@alter1 +POSTHOOK: query: ALTER TABLE alter1 SET SERDEPROPERTIES('s1'='9') +POSTHOOK: type: ALTERTABLE_SERDEPROPERTIES +POSTHOOK: Input: alter1_db@alter1 +POSTHOOK: Output: alter1_db@alter1 +PREHOOK: query: DESCRIBE EXTENDED alter1 +PREHOOK: type: DESCTABLE +POSTHOOK: query: DESCRIBE EXTENDED alter1 +POSTHOOK: type: DESCTABLE +a int +b int + +Detailed Table Information Table(tableName:alter1, dbName:alter1_db, owner:carl, createTime:1282709493, lastAccessTime:0, retention:0, sd:StorageDescriptor(cols:[FieldSchema(name:a, type:int, comment:null), FieldSchema(name:b, type:int, comment:null)], location:pfile:/Users/carl/Projects/hive/build/ql/test/data/warehouse/alter1_db.db/alter1, inputFormat:org.apache.hadoop.mapred.TextInputFormat, outputFormat:org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat, compressed:false, numBuckets:-1, serdeInfo:SerDeInfo(name:null, serializationLib:org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe, parameters:{s1=9, serialization.format=1}), bucketCols:[], sortCols:[], parameters:{}), partitionKeys:[], parameters:{EXTERNAL=FALSE, d=3, last_modified_by=carl, c=4, last_modified_time=1282709494, a=1, transient_lastDdlTime=1282709494}, viewOriginalText:null, viewExpandedText:null, tableType:MANAGED_TABLE) +PREHOOK: query: ALTER TABLE alter1 SET SERDEPROPERTIES('s1'='10', 's2' ='20') +PREHOOK: type: ALTERTABLE_SERDEPROPERTIES +PREHOOK: Input: alter1_db@alter1 +PREHOOK: Output: alter1_db@alter1 +POSTHOOK: query: ALTER TABLE alter1 SET SERDEPROPERTIES('s1'='10', 's2' ='20') +POSTHOOK: type: ALTERTABLE_SERDEPROPERTIES +POSTHOOK: Input: alter1_db@alter1 +POSTHOOK: Output: alter1_db@alter1 +PREHOOK: query: DESCRIBE EXTENDED alter1 +PREHOOK: type: DESCTABLE +POSTHOOK: query: DESCRIBE EXTENDED alter1 +POSTHOOK: type: DESCTABLE +a int +b int + +Detailed Table Information Table(tableName:alter1, dbName:alter1_db, owner:carl, createTime:1282709493, lastAccessTime:0, retention:0, sd:StorageDescriptor(cols:[FieldSchema(name:a, type:int, comment:null), FieldSchema(name:b, type:int, comment:null)], location:pfile:/Users/carl/Projects/hive/build/ql/test/data/warehouse/alter1_db.db/alter1, inputFormat:org.apache.hadoop.mapred.TextInputFormat, outputFormat:org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat, compressed:false, numBuckets:-1, serdeInfo:SerDeInfo(name:null, serializationLib:org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe, parameters:{s2=20, s1=10, serialization.format=1}), bucketCols:[], sortCols:[], parameters:{}), partitionKeys:[], parameters:{EXTERNAL=FALSE, d=3, last_modified_by=carl, c=4, last_modified_time=1282709494, a=1, transient_lastDdlTime=1282709494}, viewOriginalText:null, viewExpandedText:null, tableType:MANAGED_TABLE) +PREHOOK: query: ALTER TABLE alter1 SET SERDE 'org.apache.hadoop.hive.serde2.TestSerDe' WITH SERDEPROPERTIES ('s1'='9') +PREHOOK: type: ALTERTABLE_SERIALIZER +PREHOOK: Input: alter1_db@alter1 +PREHOOK: Output: alter1_db@alter1 +POSTHOOK: query: ALTER TABLE alter1 SET SERDE 'org.apache.hadoop.hive.serde2.TestSerDe' WITH SERDEPROPERTIES ('s1'='9') +POSTHOOK: type: ALTERTABLE_SERIALIZER +POSTHOOK: Input: alter1_db@alter1 +POSTHOOK: Output: alter1_db@alter1 +PREHOOK: query: DESCRIBE EXTENDED alter1 +PREHOOK: type: DESCTABLE +POSTHOOK: query: DESCRIBE EXTENDED alter1 +POSTHOOK: type: DESCTABLE +a string from deserializer +b string from deserializer + +Detailed Table Information Table(tableName:alter1, dbName:alter1_db, owner:carl, createTime:1282709493, lastAccessTime:0, retention:0, sd:StorageDescriptor(cols:[FieldSchema(name:a, type:int, comment:from deserializer), FieldSchema(name:b, type:int, comment:from deserializer)], location:pfile:/Users/carl/Projects/hive/build/ql/test/data/warehouse/alter1_db.db/alter1, inputFormat:org.apache.hadoop.mapred.TextInputFormat, outputFormat:org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat, compressed:false, numBuckets:-1, serdeInfo:SerDeInfo(name:null, serializationLib:org.apache.hadoop.hive.serde2.TestSerDe, parameters:{s2=20, s1=9, serialization.format=1}), bucketCols:[], sortCols:[], parameters:{}), partitionKeys:[], parameters:{EXTERNAL=FALSE, d=3, last_modified_by=carl, c=4, last_modified_time=1282709494, a=1, transient_lastDdlTime=1282709494}, viewOriginalText:null, viewExpandedText:null, tableType:MANAGED_TABLE) +PREHOOK: query: ALTER TABLE alter1 SET SERDE 'org.apache.hadoop.hive.serde2.MetadataTypedColumnsetSerDe' +PREHOOK: type: ALTERTABLE_SERIALIZER +PREHOOK: Input: alter1_db@alter1 +PREHOOK: Output: alter1_db@alter1 +POSTHOOK: query: ALTER TABLE alter1 SET SERDE 'org.apache.hadoop.hive.serde2.MetadataTypedColumnsetSerDe' +POSTHOOK: type: ALTERTABLE_SERIALIZER +POSTHOOK: Input: alter1_db@alter1 +POSTHOOK: Output: alter1_db@alter1 +PREHOOK: query: DESCRIBE EXTENDED alter1 +PREHOOK: type: DESCTABLE +POSTHOOK: query: DESCRIBE EXTENDED alter1 +POSTHOOK: type: DESCTABLE +a string from deserializer +b string from deserializer + +Detailed Table Information Table(tableName:alter1, dbName:alter1_db, owner:carl, createTime:1282709493, lastAccessTime:0, retention:0, sd:StorageDescriptor(cols:[FieldSchema(name:a, type:string, comment:from deserializer), FieldSchema(name:b, type:string, comment:from deserializer)], location:pfile:/Users/carl/Projects/hive/build/ql/test/data/warehouse/alter1_db.db/alter1, inputFormat:org.apache.hadoop.mapred.TextInputFormat, outputFormat:org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat, compressed:false, numBuckets:-1, serdeInfo:SerDeInfo(name:null, serializationLib:org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe, parameters:{s2=20, s1=9, serialization.format=1}), bucketCols:[], sortCols:[], parameters:{}), partitionKeys:[], parameters:{EXTERNAL=FALSE, d=3, last_modified_by=carl, c=4, last_modified_time=1282709494, a=1, transient_lastDdlTime=1282709494}, viewOriginalText:null, viewExpandedText:null, tableType:MANAGED_TABLE) +PREHOOK: query: ALTER TABLE alter1 REPLACE COLUMNS (a int, b int, c string) +PREHOOK: type: ALTERTABLE_REPLACECOLS +PREHOOK: Input: alter1_db@alter1 +PREHOOK: Output: alter1_db@alter1 +POSTHOOK: query: ALTER TABLE alter1 REPLACE COLUMNS (a int, b int, c string) +POSTHOOK: type: ALTERTABLE_REPLACECOLS +POSTHOOK: Input: alter1_db@alter1 +POSTHOOK: Output: alter1_db@alter1 +PREHOOK: query: DESCRIBE alter1 +PREHOOK: type: DESCTABLE +POSTHOOK: query: DESCRIBE alter1 +POSTHOOK: type: DESCTABLE +a int +b int +c string +PREHOOK: query: DROP TABLE alter1 +PREHOOK: type: DROPTABLE +PREHOOK: Input: alter1_db@alter1 +PREHOOK: Output: alter1_db@alter1 +POSTHOOK: query: DROP TABLE alter1 +POSTHOOK: type: DROPTABLE +POSTHOOK: Input: alter1_db@alter1 +POSTHOOK: Output: alter1_db@alter1 +PREHOOK: query: USE default +PREHOOK: type: SWITCHDATABASE +POSTHOOK: query: USE default +POSTHOOK: type: SWITCHDATABASE +PREHOOK: query: DROP DATABASE alter1_db +PREHOOK: type: DROPDATABASE +POSTHOOK: query: DROP DATABASE alter1_db +POSTHOOK: type: DROPDATABASE diff --git ql/src/test/results/clientpositive/alter2.q.out ql/src/test/results/clientpositive/alter2.q.out index 06a2bab..4c40d8f 100644 --- ql/src/test/results/clientpositive/alter2.q.out +++ ql/src/test/results/clientpositive/alter2.q.out @@ -11,7 +11,7 @@ a int b int insertdate string -Detailed Table Information Table(tableName:alter2, dbName:default, owner:njain, createTime:1282026229, lastAccessTime:0, retention:0, sd:StorageDescriptor(cols:[FieldSchema(name:a, type:int, comment:null), FieldSchema(name:b, type:int, comment:null)], location:pfile:/data/users/njain/hive_commit2/hive_commit2/build/ql/test/data/warehouse/alter2, inputFormat:org.apache.hadoop.mapred.TextInputFormat, outputFormat:org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat, compressed:false, numBuckets:-1, serdeInfo:SerDeInfo(name:null, serializationLib:org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe, parameters:{serialization.format=1}), bucketCols:[], sortCols:[], parameters:{}), partitionKeys:[FieldSchema(name:insertdate, type:string, comment:null)], parameters:{transient_lastDdlTime=1282026229}, viewOriginalText:null, viewExpandedText:null, tableType:MANAGED_TABLE) +Detailed Table Information Table(tableName:alter2, dbName:default, owner:carl, createTime:1282710009, lastAccessTime:0, retention:0, sd:StorageDescriptor(cols:[FieldSchema(name:a, type:int, comment:null), FieldSchema(name:b, type:int, comment:null)], location:pfile:/Users/carl/Projects/hive/build/ql/test/data/warehouse/alter2, inputFormat:org.apache.hadoop.mapred.TextInputFormat, outputFormat:org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat, compressed:false, numBuckets:-1, serdeInfo:SerDeInfo(name:null, serializationLib:org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe, parameters:{serialization.format=1}), bucketCols:[], sortCols:[], parameters:{}), partitionKeys:[FieldSchema(name:insertdate, type:string, comment:null)], parameters:{transient_lastDdlTime=1282710009}, viewOriginalText:null, viewExpandedText:null, tableType:MANAGED_TABLE) PREHOOK: query: show partitions alter2 PREHOOK: type: SHOWPARTITIONS POSTHOOK: query: show partitions alter2 @@ -31,7 +31,7 @@ a int b int insertdate string -Detailed Table Information Table(tableName:alter2, dbName:default, owner:njain, createTime:1282026229, lastAccessTime:0, retention:0, sd:StorageDescriptor(cols:[FieldSchema(name:a, type:int, comment:null), FieldSchema(name:b, type:int, comment:null)], location:pfile:/data/users/njain/hive_commit2/hive_commit2/build/ql/test/data/warehouse/alter2, inputFormat:org.apache.hadoop.mapred.TextInputFormat, outputFormat:org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat, compressed:false, numBuckets:-1, serdeInfo:SerDeInfo(name:null, serializationLib:org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe, parameters:{serialization.format=1}), bucketCols:[], sortCols:[], parameters:{}), partitionKeys:[FieldSchema(name:insertdate, type:string, comment:null)], parameters:{transient_lastDdlTime=1282026229}, viewOriginalText:null, viewExpandedText:null, tableType:MANAGED_TABLE) +Detailed Table Information Table(tableName:alter2, dbName:default, owner:carl, createTime:1282710009, lastAccessTime:0, retention:0, sd:StorageDescriptor(cols:[FieldSchema(name:a, type:int, comment:null), FieldSchema(name:b, type:int, comment:null)], location:pfile:/Users/carl/Projects/hive/build/ql/test/data/warehouse/alter2, inputFormat:org.apache.hadoop.mapred.TextInputFormat, outputFormat:org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat, compressed:false, numBuckets:-1, serdeInfo:SerDeInfo(name:null, serializationLib:org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe, parameters:{serialization.format=1}), bucketCols:[], sortCols:[], parameters:{}), partitionKeys:[FieldSchema(name:insertdate, type:string, comment:null)], parameters:{transient_lastDdlTime=1282710009}, viewOriginalText:null, viewExpandedText:null, tableType:MANAGED_TABLE) PREHOOK: query: show partitions alter2 PREHOOK: type: SHOWPARTITIONS POSTHOOK: query: show partitions alter2 @@ -52,7 +52,7 @@ a int b int insertdate string -Detailed Table Information Table(tableName:alter2, dbName:default, owner:njain, createTime:1282026229, lastAccessTime:0, retention:0, sd:StorageDescriptor(cols:[FieldSchema(name:a, type:int, comment:null), FieldSchema(name:b, type:int, comment:null)], location:pfile:/data/users/njain/hive_commit2/hive_commit2/build/ql/test/data/warehouse/alter2, inputFormat:org.apache.hadoop.mapred.TextInputFormat, outputFormat:org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat, compressed:false, numBuckets:-1, serdeInfo:SerDeInfo(name:null, serializationLib:org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe, parameters:{serialization.format=1}), bucketCols:[], sortCols:[], parameters:{}), partitionKeys:[FieldSchema(name:insertdate, type:string, comment:null)], parameters:{transient_lastDdlTime=1282026229}, viewOriginalText:null, viewExpandedText:null, tableType:MANAGED_TABLE) +Detailed Table Information Table(tableName:alter2, dbName:default, owner:carl, createTime:1282710009, lastAccessTime:0, retention:0, sd:StorageDescriptor(cols:[FieldSchema(name:a, type:int, comment:null), FieldSchema(name:b, type:int, comment:null)], location:pfile:/Users/carl/Projects/hive/build/ql/test/data/warehouse/alter2, inputFormat:org.apache.hadoop.mapred.TextInputFormat, outputFormat:org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat, compressed:false, numBuckets:-1, serdeInfo:SerDeInfo(name:null, serializationLib:org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe, parameters:{serialization.format=1}), bucketCols:[], sortCols:[], parameters:{}), partitionKeys:[FieldSchema(name:insertdate, type:string, comment:null)], parameters:{transient_lastDdlTime=1282710009}, viewOriginalText:null, viewExpandedText:null, tableType:MANAGED_TABLE) PREHOOK: query: show partitions alter2 PREHOOK: type: SHOWPARTITIONS POSTHOOK: query: show partitions alter2 @@ -80,7 +80,7 @@ a int b int insertdate string -Detailed Table Information Table(tableName:alter2, dbName:default, owner:njain, createTime:1282026231, lastAccessTime:0, retention:0, sd:StorageDescriptor(cols:[FieldSchema(name:a, type:int, comment:null), FieldSchema(name:b, type:int, comment:null)], location:pfile:/data/users/njain/hive_commit2/hive_commit2/build/ql/test/data/warehouse/alter2, inputFormat:org.apache.hadoop.mapred.TextInputFormat, outputFormat:org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat, compressed:false, numBuckets:-1, serdeInfo:SerDeInfo(name:null, serializationLib:org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe, parameters:{serialization.format=1}), bucketCols:[], sortCols:[], parameters:{}), partitionKeys:[FieldSchema(name:insertdate, type:string, comment:null)], parameters:{EXTERNAL=TRUE, transient_lastDdlTime=1282026231}, viewOriginalText:null, viewExpandedText:null, tableType:EXTERNAL_TABLE) +Detailed Table Information Table(tableName:alter2, dbName:default, owner:carl, createTime:1282710010, lastAccessTime:0, retention:0, sd:StorageDescriptor(cols:[FieldSchema(name:a, type:int, comment:null), FieldSchema(name:b, type:int, comment:null)], location:pfile:/Users/carl/Projects/hive/build/ql/test/data/warehouse/alter2, inputFormat:org.apache.hadoop.mapred.TextInputFormat, outputFormat:org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat, compressed:false, numBuckets:-1, serdeInfo:SerDeInfo(name:null, serializationLib:org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe, parameters:{serialization.format=1}), bucketCols:[], sortCols:[], parameters:{}), partitionKeys:[FieldSchema(name:insertdate, type:string, comment:null)], parameters:{EXTERNAL=TRUE, transient_lastDdlTime=1282710010}, viewOriginalText:null, viewExpandedText:null, tableType:EXTERNAL_TABLE) PREHOOK: query: show partitions alter2 PREHOOK: type: SHOWPARTITIONS POSTHOOK: query: show partitions alter2 @@ -100,7 +100,7 @@ a int b int insertdate string -Detailed Table Information Table(tableName:alter2, dbName:default, owner:njain, createTime:1282026231, lastAccessTime:0, retention:0, sd:StorageDescriptor(cols:[FieldSchema(name:a, type:int, comment:null), FieldSchema(name:b, type:int, comment:null)], location:pfile:/data/users/njain/hive_commit2/hive_commit2/build/ql/test/data/warehouse/alter2, inputFormat:org.apache.hadoop.mapred.TextInputFormat, outputFormat:org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat, compressed:false, numBuckets:-1, serdeInfo:SerDeInfo(name:null, serializationLib:org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe, parameters:{serialization.format=1}), bucketCols:[], sortCols:[], parameters:{}), partitionKeys:[FieldSchema(name:insertdate, type:string, comment:null)], parameters:{EXTERNAL=TRUE, transient_lastDdlTime=1282026231}, viewOriginalText:null, viewExpandedText:null, tableType:EXTERNAL_TABLE) +Detailed Table Information Table(tableName:alter2, dbName:default, owner:carl, createTime:1282710010, lastAccessTime:0, retention:0, sd:StorageDescriptor(cols:[FieldSchema(name:a, type:int, comment:null), FieldSchema(name:b, type:int, comment:null)], location:pfile:/Users/carl/Projects/hive/build/ql/test/data/warehouse/alter2, inputFormat:org.apache.hadoop.mapred.TextInputFormat, outputFormat:org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat, compressed:false, numBuckets:-1, serdeInfo:SerDeInfo(name:null, serializationLib:org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe, parameters:{serialization.format=1}), bucketCols:[], sortCols:[], parameters:{}), partitionKeys:[FieldSchema(name:insertdate, type:string, comment:null)], parameters:{EXTERNAL=TRUE, transient_lastDdlTime=1282710010}, viewOriginalText:null, viewExpandedText:null, tableType:EXTERNAL_TABLE) PREHOOK: query: show partitions alter2 PREHOOK: type: SHOWPARTITIONS POSTHOOK: query: show partitions alter2 @@ -121,10 +121,194 @@ a int b int insertdate string -Detailed Table Information Table(tableName:alter2, dbName:default, owner:njain, createTime:1282026231, lastAccessTime:0, retention:0, sd:StorageDescriptor(cols:[FieldSchema(name:a, type:int, comment:null), FieldSchema(name:b, type:int, comment:null)], location:pfile:/data/users/njain/hive_commit2/hive_commit2/build/ql/test/data/warehouse/alter2, inputFormat:org.apache.hadoop.mapred.TextInputFormat, outputFormat:org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat, compressed:false, numBuckets:-1, serdeInfo:SerDeInfo(name:null, serializationLib:org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe, parameters:{serialization.format=1}), bucketCols:[], sortCols:[], parameters:{}), partitionKeys:[FieldSchema(name:insertdate, type:string, comment:null)], parameters:{EXTERNAL=TRUE, transient_lastDdlTime=1282026231}, viewOriginalText:null, viewExpandedText:null, tableType:EXTERNAL_TABLE) +Detailed Table Information Table(tableName:alter2, dbName:default, owner:carl, createTime:1282710010, lastAccessTime:0, retention:0, sd:StorageDescriptor(cols:[FieldSchema(name:a, type:int, comment:null), FieldSchema(name:b, type:int, comment:null)], location:pfile:/Users/carl/Projects/hive/build/ql/test/data/warehouse/alter2, inputFormat:org.apache.hadoop.mapred.TextInputFormat, outputFormat:org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat, compressed:false, numBuckets:-1, serdeInfo:SerDeInfo(name:null, serializationLib:org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe, parameters:{serialization.format=1}), bucketCols:[], sortCols:[], parameters:{}), partitionKeys:[FieldSchema(name:insertdate, type:string, comment:null)], parameters:{EXTERNAL=TRUE, transient_lastDdlTime=1282710010}, viewOriginalText:null, viewExpandedText:null, tableType:EXTERNAL_TABLE) PREHOOK: query: show partitions alter2 PREHOOK: type: SHOWPARTITIONS POSTHOOK: query: show partitions alter2 POSTHOOK: type: SHOWPARTITIONS insertdate=2008-01-01 insertdate=2008-01-02 +PREHOOK: query: -- Cleanup +DROP TABLE alter2 +PREHOOK: type: DROPTABLE +PREHOOK: Input: default@alter2 +PREHOOK: Output: default@alter2 +POSTHOOK: query: -- Cleanup +DROP TABLE alter2 +POSTHOOK: type: DROPTABLE +POSTHOOK: Input: default@alter2 +POSTHOOK: Output: default@alter2 +PREHOOK: query: SHOW TABLES +PREHOOK: type: SHOWTABLES +POSTHOOK: query: SHOW TABLES +POSTHOOK: type: SHOWTABLES +src +src1 +src_json +src_sequencefile +src_thrift +srcbucket +srcbucket2 +srcpart +PREHOOK: query: -- Using non-default Database + +CREATE DATABASE alter2_db +PREHOOK: type: CREATEDATABASE +POSTHOOK: query: -- Using non-default Database + +CREATE DATABASE alter2_db +POSTHOOK: type: CREATEDATABASE +PREHOOK: query: USE alter2_db +PREHOOK: type: SWITCHDATABASE +POSTHOOK: query: USE alter2_db +POSTHOOK: type: SWITCHDATABASE +PREHOOK: query: SHOW TABLES +PREHOOK: type: SHOWTABLES +POSTHOOK: query: SHOW TABLES +POSTHOOK: type: SHOWTABLES +PREHOOK: query: CREATE TABLE alter2(a int, b int) PARTITIONED BY (insertdate string) +PREHOOK: type: CREATETABLE +POSTHOOK: query: CREATE TABLE alter2(a int, b int) PARTITIONED BY (insertdate string) +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: alter2_db@alter2 +PREHOOK: query: DESCRIBE EXTENDED alter2 +PREHOOK: type: DESCTABLE +POSTHOOK: query: DESCRIBE EXTENDED alter2 +POSTHOOK: type: DESCTABLE +a int +b int +insertdate string + +Detailed Table Information Table(tableName:alter2, dbName:alter2_db, owner:carl, createTime:1282710012, lastAccessTime:0, retention:0, sd:StorageDescriptor(cols:[FieldSchema(name:a, type:int, comment:null), FieldSchema(name:b, type:int, comment:null)], location:pfile:/Users/carl/Projects/hive/build/ql/test/data/warehouse/alter2_db.db/alter2, inputFormat:org.apache.hadoop.mapred.TextInputFormat, outputFormat:org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat, compressed:false, numBuckets:-1, serdeInfo:SerDeInfo(name:null, serializationLib:org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe, parameters:{serialization.format=1}), bucketCols:[], sortCols:[], parameters:{}), partitionKeys:[FieldSchema(name:insertdate, type:string, comment:null)], parameters:{transient_lastDdlTime=1282710012}, viewOriginalText:null, viewExpandedText:null, tableType:MANAGED_TABLE) +PREHOOK: query: SHOW PARTITIONS alter2 +PREHOOK: type: SHOWPARTITIONS +POSTHOOK: query: SHOW PARTITIONS alter2 +POSTHOOK: type: SHOWPARTITIONS +PREHOOK: query: ALTER TABLE alter2 ADD PARTITION (insertdate='2008-01-01') LOCATION '2008/01/01' +PREHOOK: type: ALTERTABLE_ADDPARTS +PREHOOK: Input: alter2_db@alter2 +POSTHOOK: query: ALTER TABLE alter2 ADD PARTITION (insertdate='2008-01-01') LOCATION '2008/01/01' +POSTHOOK: type: ALTERTABLE_ADDPARTS +POSTHOOK: Input: alter2_db@alter2 +POSTHOOK: Output: alter2_db@alter2@insertdate=2008-01-01 +PREHOOK: query: DESCRIBE EXTENDED alter2 +PREHOOK: type: DESCTABLE +POSTHOOK: query: DESCRIBE EXTENDED alter2 +POSTHOOK: type: DESCTABLE +a int +b int +insertdate string + +Detailed Table Information Table(tableName:alter2, dbName:alter2_db, owner:carl, createTime:1282710012, lastAccessTime:0, retention:0, sd:StorageDescriptor(cols:[FieldSchema(name:a, type:int, comment:null), FieldSchema(name:b, type:int, comment:null)], location:pfile:/Users/carl/Projects/hive/build/ql/test/data/warehouse/alter2_db.db/alter2, inputFormat:org.apache.hadoop.mapred.TextInputFormat, outputFormat:org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat, compressed:false, numBuckets:-1, serdeInfo:SerDeInfo(name:null, serializationLib:org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe, parameters:{serialization.format=1}), bucketCols:[], sortCols:[], parameters:{}), partitionKeys:[FieldSchema(name:insertdate, type:string, comment:null)], parameters:{transient_lastDdlTime=1282710012}, viewOriginalText:null, viewExpandedText:null, tableType:MANAGED_TABLE) +PREHOOK: query: SHOW PARTITIONS alter2 +PREHOOK: type: SHOWPARTITIONS +POSTHOOK: query: SHOW PARTITIONS alter2 +POSTHOOK: type: SHOWPARTITIONS +insertdate=2008-01-01 +PREHOOK: query: ALTER TABLE alter2 ADD PARTITION (insertdate='2008-01-02') LOCATION '2008/01/02' +PREHOOK: type: ALTERTABLE_ADDPARTS +PREHOOK: Input: alter2_db@alter2 +POSTHOOK: query: ALTER TABLE alter2 ADD PARTITION (insertdate='2008-01-02') LOCATION '2008/01/02' +POSTHOOK: type: ALTERTABLE_ADDPARTS +POSTHOOK: Input: alter2_db@alter2 +POSTHOOK: Output: alter2_db@alter2@insertdate=2008-01-02 +PREHOOK: query: DESCRIBE EXTENDED alter2 +PREHOOK: type: DESCTABLE +POSTHOOK: query: DESCRIBE EXTENDED alter2 +POSTHOOK: type: DESCTABLE +a int +b int +insertdate string + +Detailed Table Information Table(tableName:alter2, dbName:alter2_db, owner:carl, createTime:1282710012, lastAccessTime:0, retention:0, sd:StorageDescriptor(cols:[FieldSchema(name:a, type:int, comment:null), FieldSchema(name:b, type:int, comment:null)], location:pfile:/Users/carl/Projects/hive/build/ql/test/data/warehouse/alter2_db.db/alter2, inputFormat:org.apache.hadoop.mapred.TextInputFormat, outputFormat:org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat, compressed:false, numBuckets:-1, serdeInfo:SerDeInfo(name:null, serializationLib:org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe, parameters:{serialization.format=1}), bucketCols:[], sortCols:[], parameters:{}), partitionKeys:[FieldSchema(name:insertdate, type:string, comment:null)], parameters:{transient_lastDdlTime=1282710012}, viewOriginalText:null, viewExpandedText:null, tableType:MANAGED_TABLE) +PREHOOK: query: SHOW PARTITIONS alter2 +PREHOOK: type: SHOWPARTITIONS +POSTHOOK: query: SHOW PARTITIONS alter2 +POSTHOOK: type: SHOWPARTITIONS +insertdate=2008-01-01 +insertdate=2008-01-02 +PREHOOK: query: DROP TABLE alter2 +PREHOOK: type: DROPTABLE +PREHOOK: Input: alter2_db@alter2 +PREHOOK: Output: alter2_db@alter2 +POSTHOOK: query: DROP TABLE alter2 +POSTHOOK: type: DROPTABLE +POSTHOOK: Input: alter2_db@alter2 +POSTHOOK: Output: alter2_db@alter2 +PREHOOK: query: CREATE EXTERNAL TABLE alter2(a int, b int) PARTITIONED BY (insertdate string) +PREHOOK: type: CREATETABLE +POSTHOOK: query: CREATE EXTERNAL TABLE alter2(a int, b int) PARTITIONED BY (insertdate string) +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: alter2_db@alter2 +PREHOOK: query: DESCRIBE EXTENDED alter2 +PREHOOK: type: DESCTABLE +POSTHOOK: query: DESCRIBE EXTENDED alter2 +POSTHOOK: type: DESCTABLE +a int +b int +insertdate string + +Detailed Table Information Table(tableName:alter2, dbName:alter2_db, owner:carl, createTime:1282710013, lastAccessTime:0, retention:0, sd:StorageDescriptor(cols:[FieldSchema(name:a, type:int, comment:null), FieldSchema(name:b, type:int, comment:null)], location:pfile:/Users/carl/Projects/hive/build/ql/test/data/warehouse/alter2_db.db/alter2, inputFormat:org.apache.hadoop.mapred.TextInputFormat, outputFormat:org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat, compressed:false, numBuckets:-1, serdeInfo:SerDeInfo(name:null, serializationLib:org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe, parameters:{serialization.format=1}), bucketCols:[], sortCols:[], parameters:{}), partitionKeys:[FieldSchema(name:insertdate, type:string, comment:null)], parameters:{EXTERNAL=TRUE, transient_lastDdlTime=1282710013}, viewOriginalText:null, viewExpandedText:null, tableType:EXTERNAL_TABLE) +PREHOOK: query: SHOW PARTITIONS alter2 +PREHOOK: type: SHOWPARTITIONS +POSTHOOK: query: SHOW PARTITIONS alter2 +POSTHOOK: type: SHOWPARTITIONS +PREHOOK: query: ALTER TABLE alter2 ADD PARTITION (insertdate='2008-01-01') LOCATION '2008/01/01' +PREHOOK: type: ALTERTABLE_ADDPARTS +PREHOOK: Input: alter2_db@alter2 +POSTHOOK: query: ALTER TABLE alter2 ADD PARTITION (insertdate='2008-01-01') LOCATION '2008/01/01' +POSTHOOK: type: ALTERTABLE_ADDPARTS +POSTHOOK: Input: alter2_db@alter2 +POSTHOOK: Output: alter2_db@alter2@insertdate=2008-01-01 +PREHOOK: query: DESCRIBE EXTENDED alter2 +PREHOOK: type: DESCTABLE +POSTHOOK: query: DESCRIBE EXTENDED alter2 +POSTHOOK: type: DESCTABLE +a int +b int +insertdate string + +Detailed Table Information Table(tableName:alter2, dbName:alter2_db, owner:carl, createTime:1282710013, lastAccessTime:0, retention:0, sd:StorageDescriptor(cols:[FieldSchema(name:a, type:int, comment:null), FieldSchema(name:b, type:int, comment:null)], location:pfile:/Users/carl/Projects/hive/build/ql/test/data/warehouse/alter2_db.db/alter2, inputFormat:org.apache.hadoop.mapred.TextInputFormat, outputFormat:org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat, compressed:false, numBuckets:-1, serdeInfo:SerDeInfo(name:null, serializationLib:org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe, parameters:{serialization.format=1}), bucketCols:[], sortCols:[], parameters:{}), partitionKeys:[FieldSchema(name:insertdate, type:string, comment:null)], parameters:{EXTERNAL=TRUE, transient_lastDdlTime=1282710013}, viewOriginalText:null, viewExpandedText:null, tableType:EXTERNAL_TABLE) +PREHOOK: query: SHOW PARTITIONS alter2 +PREHOOK: type: SHOWPARTITIONS +POSTHOOK: query: SHOW PARTITIONS alter2 +POSTHOOK: type: SHOWPARTITIONS +insertdate=2008-01-01 +PREHOOK: query: ALTER TABLE alter2 ADD PARTITION (insertdate='2008-01-02') LOCATION '2008/01/02' +PREHOOK: type: ALTERTABLE_ADDPARTS +PREHOOK: Input: alter2_db@alter2 +POSTHOOK: query: ALTER TABLE alter2 ADD PARTITION (insertdate='2008-01-02') LOCATION '2008/01/02' +POSTHOOK: type: ALTERTABLE_ADDPARTS +POSTHOOK: Input: alter2_db@alter2 +POSTHOOK: Output: alter2_db@alter2@insertdate=2008-01-02 +PREHOOK: query: DESCRIBE EXTENDED alter2 +PREHOOK: type: DESCTABLE +POSTHOOK: query: DESCRIBE EXTENDED alter2 +POSTHOOK: type: DESCTABLE +a int +b int +insertdate string + +Detailed Table Information Table(tableName:alter2, dbName:alter2_db, owner:carl, createTime:1282710013, lastAccessTime:0, retention:0, sd:StorageDescriptor(cols:[FieldSchema(name:a, type:int, comment:null), FieldSchema(name:b, type:int, comment:null)], location:pfile:/Users/carl/Projects/hive/build/ql/test/data/warehouse/alter2_db.db/alter2, inputFormat:org.apache.hadoop.mapred.TextInputFormat, outputFormat:org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat, compressed:false, numBuckets:-1, serdeInfo:SerDeInfo(name:null, serializationLib:org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe, parameters:{serialization.format=1}), bucketCols:[], sortCols:[], parameters:{}), partitionKeys:[FieldSchema(name:insertdate, type:string, comment:null)], parameters:{EXTERNAL=TRUE, transient_lastDdlTime=1282710013}, viewOriginalText:null, viewExpandedText:null, tableType:EXTERNAL_TABLE) +PREHOOK: query: SHOW PARTITIONS alter2 +PREHOOK: type: SHOWPARTITIONS +POSTHOOK: query: SHOW PARTITIONS alter2 +POSTHOOK: type: SHOWPARTITIONS +insertdate=2008-01-01 +insertdate=2008-01-02 +PREHOOK: query: DROP TABLE alter2 +PREHOOK: type: DROPTABLE +PREHOOK: Input: alter2_db@alter2 +PREHOOK: Output: alter2_db@alter2 +POSTHOOK: query: DROP TABLE alter2 +POSTHOOK: type: DROPTABLE +POSTHOOK: Input: alter2_db@alter2 +POSTHOOK: Output: alter2_db@alter2 +PREHOOK: query: USE default +PREHOOK: type: SWITCHDATABASE +POSTHOOK: query: USE default +POSTHOOK: type: SWITCHDATABASE +PREHOOK: query: DROP DATABASE alter2_db +PREHOOK: type: DROPDATABASE +POSTHOOK: query: DROP DATABASE alter2_db +POSTHOOK: type: DROPDATABASE diff --git ql/src/test/results/clientpositive/alter3.q.out ql/src/test/results/clientpositive/alter3.q.out index 3f91306..ebb2dcb 100644 --- ql/src/test/results/clientpositive/alter3.q.out +++ ql/src/test/results/clientpositive/alter3.q.out @@ -30,11 +30,11 @@ POSTHOOK: Lineage: alter3 PARTITION(pcol1=test_part,pcol2=test_part).col1 SIMPLE PREHOOK: query: select * from alter3 where pcol1='test_part' and pcol2='test_part' PREHOOK: type: QUERY PREHOOK: Input: default@alter3@pcol1=test_part/pcol2=test_part -PREHOOK: Output: file:/tmp/njain/hive_2010-08-16_23-23-57_980_3959366436597643277/-mr-10000 +PREHOOK: Output: file:/var/folders/b7/b7UUwNZdF1KKHtM+5la6f++++TI/-Tmp-/carl/hive_2010-08-24_21-20-21_932_3372743948578297998/-mr-10000 POSTHOOK: query: select * from alter3 where pcol1='test_part' and pcol2='test_part' POSTHOOK: type: QUERY POSTHOOK: Input: default@alter3@pcol1=test_part/pcol2=test_part -POSTHOOK: Output: file:/tmp/njain/hive_2010-08-16_23-23-57_980_3959366436597643277/-mr-10000 +POSTHOOK: Output: file:/var/folders/b7/b7UUwNZdF1KKHtM+5la6f++++TI/-Tmp-/carl/hive_2010-08-24_21-20-21_932_3372743948578297998/-mr-10000 POSTHOOK: Lineage: alter3 PARTITION(pcol1=test_part,pcol2=test_part).col1 SIMPLE [(alter3_src)alter3_src.FieldSchema(name:col1, type:string, comment:null), ] 1 test_part test_part 2 test_part test_part @@ -61,7 +61,7 @@ col1 string pcol1 string pcol2 string -Detailed Table Information Table(tableName:alter3_renamed, dbName:default, owner:njain, createTime:1282026233, lastAccessTime:0, retention:0, sd:StorageDescriptor(cols:[FieldSchema(name:col1, type:string, comment:null)], location:pfile:/data/users/njain/hive_commit2/hive_commit2/build/ql/test/data/warehouse/alter3_renamed, inputFormat:org.apache.hadoop.mapred.SequenceFileInputFormat, outputFormat:org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat, compressed:false, numBuckets:-1, serdeInfo:SerDeInfo(name:null, serializationLib:org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe, parameters:{serialization.format=1}), bucketCols:[], sortCols:[], parameters:{}), partitionKeys:[FieldSchema(name:pcol1, type:string, comment:null), FieldSchema(name:pcol2, type:string, comment:null)], parameters:{last_modified_by=njain, last_modified_time=1282026238, transient_lastDdlTime=1282026238}, viewOriginalText:null, viewExpandedText:null, tableType:MANAGED_TABLE) +Detailed Table Information Table(tableName:alter3_renamed, dbName:default, owner:carl, createTime:1282710015, lastAccessTime:0, retention:0, sd:StorageDescriptor(cols:[FieldSchema(name:col1, type:string, comment:null)], location:pfile:/Users/carl/Projects/hive/build/ql/test/data/warehouse/alter3_renamed, inputFormat:org.apache.hadoop.mapred.SequenceFileInputFormat, outputFormat:org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat, compressed:false, numBuckets:-1, serdeInfo:SerDeInfo(name:null, serializationLib:org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe, parameters:{serialization.format=1}), bucketCols:[], sortCols:[], parameters:{}), partitionKeys:[FieldSchema(name:pcol1, type:string, comment:null), FieldSchema(name:pcol2, type:string, comment:null)], parameters:{last_modified_by=carl, last_modified_time=1282710022, transient_lastDdlTime=1282710022}, viewOriginalText:null, viewExpandedText:null, tableType:MANAGED_TABLE) PREHOOK: query: describe extended alter3_renamed partition (pCol1='test_part', pcol2='test_part') PREHOOK: type: DESCTABLE POSTHOOK: query: describe extended alter3_renamed partition (pCol1='test_part', pcol2='test_part') @@ -71,15 +71,15 @@ col1 string pcol1 string pcol2 string -Detailed Partition Information Partition(values:[test_part, test_part], dbName:default, tableName:alter3_renamed, createTime:1282026237, lastAccessTime:0, sd:StorageDescriptor(cols:[FieldSchema(name:col1, type:string, comment:null)], location:pfile:/data/users/njain/hive_commit2/hive_commit2/build/ql/test/data/warehouse/alter3_renamed/pcol1=test_part/pcol2=test_part, inputFormat:org.apache.hadoop.mapred.SequenceFileInputFormat, outputFormat:org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat, compressed:false, numBuckets:-1, serdeInfo:SerDeInfo(name:null, serializationLib:org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe, parameters:{serialization.format=1}), bucketCols:[], sortCols:[], parameters:{}), parameters:{transient_lastDdlTime=1282026237}) +Detailed Partition Information Partition(values:[test_part, test_part], dbName:default, tableName:alter3_renamed, createTime:1282710021, lastAccessTime:0, sd:StorageDescriptor(cols:[FieldSchema(name:col1, type:string, comment:null)], location:pfile:/Users/carl/Projects/hive/build/ql/test/data/warehouse/alter3_renamed/pcol1=test_part/pcol2=test_part, inputFormat:org.apache.hadoop.mapred.SequenceFileInputFormat, outputFormat:org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat, compressed:false, numBuckets:-1, serdeInfo:SerDeInfo(name:null, serializationLib:org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe, parameters:{serialization.format=1}), bucketCols:[], sortCols:[], parameters:{}), parameters:{transient_lastDdlTime=1282710021}) PREHOOK: query: select * from alter3_renamed where pcol1='test_part' and pcol2='test_part' PREHOOK: type: QUERY PREHOOK: Input: default@alter3_renamed@pcol1=test_part/pcol2=test_part -PREHOOK: Output: file:/tmp/njain/hive_2010-08-16_23-23-58_985_8799413873165106617/-mr-10000 +PREHOOK: Output: file:/var/folders/b7/b7UUwNZdF1KKHtM+5la6f++++TI/-Tmp-/carl/hive_2010-08-24_21-20-22_594_4220539088907093497/-mr-10000 POSTHOOK: query: select * from alter3_renamed where pcol1='test_part' and pcol2='test_part' POSTHOOK: type: QUERY POSTHOOK: Input: default@alter3_renamed@pcol1=test_part/pcol2=test_part -POSTHOOK: Output: file:/tmp/njain/hive_2010-08-16_23-23-58_985_8799413873165106617/-mr-10000 +POSTHOOK: Output: file:/var/folders/b7/b7UUwNZdF1KKHtM+5la6f++++TI/-Tmp-/carl/hive_2010-08-24_21-20-22_594_4220539088907093497/-mr-10000 POSTHOOK: Lineage: alter3 PARTITION(pcol1=test_part,pcol2=test_part).col1 SIMPLE [(alter3_src)alter3_src.FieldSchema(name:col1, type:string, comment:null), ] 1 test_part test_part 2 test_part test_part @@ -122,4 +122,223 @@ col1 string pcol1 string pcol2 string -Detailed Table Information Table(tableName:alter3_like_renamed, dbName:default, owner:njain, createTime:1282026233, lastAccessTime:0, retention:0, sd:StorageDescriptor(cols:[FieldSchema(name:col1, type:string, comment:null)], location:pfile:/data/users/njain/hive_commit2/hive_commit2/build/ql/test/data/warehouse/alter3_like_renamed, inputFormat:org.apache.hadoop.mapred.SequenceFileInputFormat, outputFormat:org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat, compressed:false, numBuckets:-1, serdeInfo:SerDeInfo(name:null, serializationLib:org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe, parameters:{serialization.format=1}), bucketCols:[], sortCols:[], parameters:{}), partitionKeys:[FieldSchema(name:pcol1, type:string, comment:null), FieldSchema(name:pcol2, type:string, comment:null)], parameters:{EXTERNAL=FALSE, last_modified_by=njain, last_modified_time=1282026243, transient_lastDdlTime=1282026243}, viewOriginalText:null, viewExpandedText:null, tableType:MANAGED_TABLE) +Detailed Table Information Table(tableName:alter3_like_renamed, dbName:default, owner:carl, createTime:1282710015, lastAccessTime:0, retention:0, sd:StorageDescriptor(cols:[FieldSchema(name:col1, type:string, comment:null)], location:pfile:/Users/carl/Projects/hive/build/ql/test/data/warehouse/alter3_like_renamed, inputFormat:org.apache.hadoop.mapred.SequenceFileInputFormat, outputFormat:org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat, compressed:false, numBuckets:-1, serdeInfo:SerDeInfo(name:null, serializationLib:org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe, parameters:{serialization.format=1}), bucketCols:[], sortCols:[], parameters:{}), partitionKeys:[FieldSchema(name:pcol1, type:string, comment:null), FieldSchema(name:pcol2, type:string, comment:null)], parameters:{EXTERNAL=FALSE, last_modified_by=carl, last_modified_time=1282710028, transient_lastDdlTime=1282710028}, viewOriginalText:null, viewExpandedText:null, tableType:MANAGED_TABLE) +PREHOOK: query: -- Cleanup +DROP TABLE alter3_src +PREHOOK: type: DROPTABLE +PREHOOK: Input: default@alter3_src +PREHOOK: Output: default@alter3_src +POSTHOOK: query: -- Cleanup +DROP TABLE alter3_src +POSTHOOK: type: DROPTABLE +POSTHOOK: Input: default@alter3_src +POSTHOOK: Output: default@alter3_src +POSTHOOK: Lineage: alter3 PARTITION(pcol1=test_part,pcol2=test_part).col1 SIMPLE [(alter3_src)alter3_src.FieldSchema(name:col1, type:string, comment:null), ] +POSTHOOK: Lineage: alter3_like PARTITION(pcol1=test_part,pcol2=test_part).col1 SIMPLE [(alter3_src)alter3_src.FieldSchema(name:col1, type:string, comment:null), ] +PREHOOK: query: DROP TABLE alter3_renamed +PREHOOK: type: DROPTABLE +PREHOOK: Input: default@alter3_renamed +PREHOOK: Output: default@alter3_renamed +POSTHOOK: query: DROP TABLE alter3_renamed +POSTHOOK: type: DROPTABLE +POSTHOOK: Input: default@alter3_renamed +POSTHOOK: Output: default@alter3_renamed +POSTHOOK: Lineage: alter3 PARTITION(pcol1=test_part,pcol2=test_part).col1 SIMPLE [(alter3_src)alter3_src.FieldSchema(name:col1, type:string, comment:null), ] +POSTHOOK: Lineage: alter3_like PARTITION(pcol1=test_part,pcol2=test_part).col1 SIMPLE [(alter3_src)alter3_src.FieldSchema(name:col1, type:string, comment:null), ] +PREHOOK: query: DROP TABLE alter3_like_renamed +PREHOOK: type: DROPTABLE +PREHOOK: Input: default@alter3_like_renamed +PREHOOK: Output: default@alter3_like_renamed +POSTHOOK: query: DROP TABLE alter3_like_renamed +POSTHOOK: type: DROPTABLE +POSTHOOK: Input: default@alter3_like_renamed +POSTHOOK: Output: default@alter3_like_renamed +POSTHOOK: Lineage: alter3 PARTITION(pcol1=test_part,pcol2=test_part).col1 SIMPLE [(alter3_src)alter3_src.FieldSchema(name:col1, type:string, comment:null), ] +POSTHOOK: Lineage: alter3_like PARTITION(pcol1=test_part,pcol2=test_part).col1 SIMPLE [(alter3_src)alter3_src.FieldSchema(name:col1, type:string, comment:null), ] +PREHOOK: query: SHOW TABLES +PREHOOK: type: SHOWTABLES +POSTHOOK: query: SHOW TABLES +POSTHOOK: type: SHOWTABLES +POSTHOOK: Lineage: alter3 PARTITION(pcol1=test_part,pcol2=test_part).col1 SIMPLE [(alter3_src)alter3_src.FieldSchema(name:col1, type:string, comment:null), ] +POSTHOOK: Lineage: alter3_like PARTITION(pcol1=test_part,pcol2=test_part).col1 SIMPLE [(alter3_src)alter3_src.FieldSchema(name:col1, type:string, comment:null), ] +src +src1 +src_json +src_sequencefile +src_thrift +srcbucket +srcbucket2 +srcpart +PREHOOK: query: -- With non-default Database + +CREATE DATABASE alter3_db +PREHOOK: type: CREATEDATABASE +POSTHOOK: query: -- With non-default Database + +CREATE DATABASE alter3_db +POSTHOOK: type: CREATEDATABASE +POSTHOOK: Lineage: alter3 PARTITION(pcol1=test_part,pcol2=test_part).col1 SIMPLE [(alter3_src)alter3_src.FieldSchema(name:col1, type:string, comment:null), ] +POSTHOOK: Lineage: alter3_like PARTITION(pcol1=test_part,pcol2=test_part).col1 SIMPLE [(alter3_src)alter3_src.FieldSchema(name:col1, type:string, comment:null), ] +PREHOOK: query: USE alter3_db +PREHOOK: type: SWITCHDATABASE +POSTHOOK: query: USE alter3_db +POSTHOOK: type: SWITCHDATABASE +POSTHOOK: Lineage: alter3 PARTITION(pcol1=test_part,pcol2=test_part).col1 SIMPLE [(alter3_src)alter3_src.FieldSchema(name:col1, type:string, comment:null), ] +POSTHOOK: Lineage: alter3_like PARTITION(pcol1=test_part,pcol2=test_part).col1 SIMPLE [(alter3_src)alter3_src.FieldSchema(name:col1, type:string, comment:null), ] +PREHOOK: query: SHOW TABLES +PREHOOK: type: SHOWTABLES +POSTHOOK: query: SHOW TABLES +POSTHOOK: type: SHOWTABLES +POSTHOOK: Lineage: alter3 PARTITION(pcol1=test_part,pcol2=test_part).col1 SIMPLE [(alter3_src)alter3_src.FieldSchema(name:col1, type:string, comment:null), ] +POSTHOOK: Lineage: alter3_like PARTITION(pcol1=test_part,pcol2=test_part).col1 SIMPLE [(alter3_src)alter3_src.FieldSchema(name:col1, type:string, comment:null), ] +PREHOOK: query: CREATE TABLE alter3_src (col1 STRING) STORED AS TEXTFILE +PREHOOK: type: CREATETABLE +POSTHOOK: query: CREATE TABLE alter3_src (col1 STRING) STORED AS TEXTFILE +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: alter3_db@alter3_src +POSTHOOK: Lineage: alter3 PARTITION(pcol1=test_part,pcol2=test_part).col1 SIMPLE [(alter3_src)alter3_src.FieldSchema(name:col1, type:string, comment:null), ] +POSTHOOK: Lineage: alter3_like PARTITION(pcol1=test_part,pcol2=test_part).col1 SIMPLE [(alter3_src)alter3_src.FieldSchema(name:col1, type:string, comment:null), ] +PREHOOK: query: LOAD DATA LOCAL INPATH '../data/files/test.dat' OVERWRITE INTO TABLE alter3_src +PREHOOK: type: LOAD +POSTHOOK: query: LOAD DATA LOCAL INPATH '../data/files/test.dat' OVERWRITE INTO TABLE alter3_src +POSTHOOK: type: LOAD +POSTHOOK: Output: alter3_db@alter3_src +POSTHOOK: Lineage: alter3 PARTITION(pcol1=test_part,pcol2=test_part).col1 SIMPLE [(alter3_src)alter3_src.FieldSchema(name:col1, type:string, comment:null), ] +POSTHOOK: Lineage: alter3_like PARTITION(pcol1=test_part,pcol2=test_part).col1 SIMPLE [(alter3_src)alter3_src.FieldSchema(name:col1, type:string, comment:null), ] +PREHOOK: query: CREATE TABLE alter3 (col1 STRING) PARTITIONED BY (pcol1 STRING, pcol2 STRING) STORED AS SEQUENCEFILE +PREHOOK: type: CREATETABLE +POSTHOOK: query: CREATE TABLE alter3 (col1 STRING) PARTITIONED BY (pcol1 STRING, pcol2 STRING) STORED AS SEQUENCEFILE +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: alter3_db@alter3 +POSTHOOK: Lineage: alter3 PARTITION(pcol1=test_part,pcol2=test_part).col1 SIMPLE [(alter3_src)alter3_src.FieldSchema(name:col1, type:string, comment:null), ] +POSTHOOK: Lineage: alter3_like PARTITION(pcol1=test_part,pcol2=test_part).col1 SIMPLE [(alter3_src)alter3_src.FieldSchema(name:col1, type:string, comment:null), ] +PREHOOK: query: CREATE TABLE alter3_like LIKE alter3 +PREHOOK: type: CREATETABLE +POSTHOOK: query: CREATE TABLE alter3_like LIKE alter3 +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: alter3_db@alter3_like +POSTHOOK: Lineage: alter3 PARTITION(pcol1=test_part,pcol2=test_part).col1 SIMPLE [(alter3_src)alter3_src.FieldSchema(name:col1, type:string, comment:null), ] +POSTHOOK: Lineage: alter3_like PARTITION(pcol1=test_part,pcol2=test_part).col1 SIMPLE [(alter3_src)alter3_src.FieldSchema(name:col1, type:string, comment:null), ] +PREHOOK: query: INSERT OVERWRITE TABLE alter3 PARTITION (pCol1='test_part', pcol2='test_part') SELECT col1 FROM alter3_src +PREHOOK: type: QUERY +PREHOOK: Input: alter3_db@alter3_src +PREHOOK: Output: alter3_db@alter3@pcol1=test_part/pcol2=test_part +POSTHOOK: query: INSERT OVERWRITE TABLE alter3 PARTITION (pCol1='test_part', pcol2='test_part') SELECT col1 FROM alter3_src +POSTHOOK: type: QUERY +POSTHOOK: Input: alter3_db@alter3_src +POSTHOOK: Output: alter3_db@alter3@pcol1=test_part/pcol2=test_part +POSTHOOK: Lineage: alter3 PARTITION(pcol1=test_part,pcol2=test_part).col1 SIMPLE [(alter3_src)alter3_src.FieldSchema(name:col1, type:string, comment:null), ] +POSTHOOK: Lineage: alter3 PARTITION(pcol1=test_part,pcol2=test_part).col1 SIMPLE [(alter3_src)alter3_src.FieldSchema(name:col1, type:string, comment:null), ] +POSTHOOK: Lineage: alter3_like PARTITION(pcol1=test_part,pcol2=test_part).col1 SIMPLE [(alter3_src)alter3_src.FieldSchema(name:col1, type:string, comment:null), ] +PREHOOK: query: SELECT * FROM alter3 WHERE pcol1='test_part' AND pcol2='test_part' +PREHOOK: type: QUERY +PREHOOK: Input: alter3_db@alter3@pcol1=test_part/pcol2=test_part +PREHOOK: Output: file:/var/folders/b7/b7UUwNZdF1KKHtM+5la6f++++TI/-Tmp-/carl/hive_2010-08-24_21-20-37_655_2638711537174977246/-mr-10000 +POSTHOOK: query: SELECT * FROM alter3 WHERE pcol1='test_part' AND pcol2='test_part' +POSTHOOK: type: QUERY +POSTHOOK: Input: alter3_db@alter3@pcol1=test_part/pcol2=test_part +POSTHOOK: Output: file:/var/folders/b7/b7UUwNZdF1KKHtM+5la6f++++TI/-Tmp-/carl/hive_2010-08-24_21-20-37_655_2638711537174977246/-mr-10000 +POSTHOOK: Lineage: alter3 PARTITION(pcol1=test_part,pcol2=test_part).col1 SIMPLE [(alter3_src)alter3_src.FieldSchema(name:col1, type:string, comment:null), ] +POSTHOOK: Lineage: alter3 PARTITION(pcol1=test_part,pcol2=test_part).col1 SIMPLE [(alter3_src)alter3_src.FieldSchema(name:col1, type:string, comment:null), ] +POSTHOOK: Lineage: alter3_like PARTITION(pcol1=test_part,pcol2=test_part).col1 SIMPLE [(alter3_src)alter3_src.FieldSchema(name:col1, type:string, comment:null), ] +1 test_part test_part +2 test_part test_part +3 test_part test_part +4 test_part test_part +5 test_part test_part +6 test_part test_part +PREHOOK: query: ALTER TABLE alter3 RENAME TO alter3_renamed +PREHOOK: type: ALTERTABLE_RENAME +PREHOOK: Input: alter3_db@alter3 +PREHOOK: Output: alter3_db@alter3 +POSTHOOK: query: ALTER TABLE alter3 RENAME TO alter3_renamed +POSTHOOK: type: ALTERTABLE_RENAME +POSTHOOK: Input: alter3_db@alter3 +POSTHOOK: Output: alter3_db@alter3 +POSTHOOK: Output: alter3_db@alter3_renamed +POSTHOOK: Lineage: alter3 PARTITION(pcol1=test_part,pcol2=test_part).col1 SIMPLE [(alter3_src)alter3_src.FieldSchema(name:col1, type:string, comment:null), ] +POSTHOOK: Lineage: alter3 PARTITION(pcol1=test_part,pcol2=test_part).col1 SIMPLE [(alter3_src)alter3_src.FieldSchema(name:col1, type:string, comment:null), ] +POSTHOOK: Lineage: alter3_like PARTITION(pcol1=test_part,pcol2=test_part).col1 SIMPLE [(alter3_src)alter3_src.FieldSchema(name:col1, type:string, comment:null), ] +PREHOOK: query: DESCRIBE EXTENDED alter3_renamed +PREHOOK: type: DESCTABLE +POSTHOOK: query: DESCRIBE EXTENDED alter3_renamed +POSTHOOK: type: DESCTABLE +POSTHOOK: Lineage: alter3 PARTITION(pcol1=test_part,pcol2=test_part).col1 SIMPLE [(alter3_src)alter3_src.FieldSchema(name:col1, type:string, comment:null), ] +POSTHOOK: Lineage: alter3 PARTITION(pcol1=test_part,pcol2=test_part).col1 SIMPLE [(alter3_src)alter3_src.FieldSchema(name:col1, type:string, comment:null), ] +POSTHOOK: Lineage: alter3_like PARTITION(pcol1=test_part,pcol2=test_part).col1 SIMPLE [(alter3_src)alter3_src.FieldSchema(name:col1, type:string, comment:null), ] +col1 string +pcol1 string +pcol2 string + +Detailed Table Information Table(tableName:alter3_renamed, dbName:alter3_db, owner:carl, createTime:1282710029, lastAccessTime:0, retention:0, sd:StorageDescriptor(cols:[FieldSchema(name:col1, type:string, comment:null)], location:pfile:/Users/carl/Projects/hive/build/ql/test/data/warehouse/alter3_db.db/alter3_renamed, inputFormat:org.apache.hadoop.mapred.SequenceFileInputFormat, outputFormat:org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat, compressed:false, numBuckets:-1, serdeInfo:SerDeInfo(name:null, serializationLib:org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe, parameters:{serialization.format=1}), bucketCols:[], sortCols:[], parameters:{}), partitionKeys:[FieldSchema(name:pcol1, type:string, comment:null), FieldSchema(name:pcol2, type:string, comment:null)], parameters:{last_modified_by=carl, last_modified_time=1282710037, transient_lastDdlTime=1282710037}, viewOriginalText:null, viewExpandedText:null, tableType:MANAGED_TABLE) +PREHOOK: query: DESCRIBE EXTENDED alter3_renamed PARTITION (pCol1='test_part', pcol2='test_part') +PREHOOK: type: DESCTABLE +POSTHOOK: query: DESCRIBE EXTENDED alter3_renamed PARTITION (pCol1='test_part', pcol2='test_part') +POSTHOOK: type: DESCTABLE +POSTHOOK: Lineage: alter3 PARTITION(pcol1=test_part,pcol2=test_part).col1 SIMPLE [(alter3_src)alter3_src.FieldSchema(name:col1, type:string, comment:null), ] +POSTHOOK: Lineage: alter3 PARTITION(pcol1=test_part,pcol2=test_part).col1 SIMPLE [(alter3_src)alter3_src.FieldSchema(name:col1, type:string, comment:null), ] +POSTHOOK: Lineage: alter3_like PARTITION(pcol1=test_part,pcol2=test_part).col1 SIMPLE [(alter3_src)alter3_src.FieldSchema(name:col1, type:string, comment:null), ] +col1 string +pcol1 string +pcol2 string + +Detailed Partition Information Partition(values:[test_part, test_part], dbName:alter3_db, tableName:alter3_renamed, createTime:1282710037, lastAccessTime:0, sd:StorageDescriptor(cols:[FieldSchema(name:col1, type:string, comment:null)], location:pfile:/Users/carl/Projects/hive/build/ql/test/data/warehouse/alter3_db.db/alter3_renamed/pcol1=test_part/pcol2=test_part, inputFormat:org.apache.hadoop.mapred.SequenceFileInputFormat, outputFormat:org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat, compressed:false, numBuckets:-1, serdeInfo:SerDeInfo(name:null, serializationLib:org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe, parameters:{serialization.format=1}), bucketCols:[], sortCols:[], parameters:{}), parameters:{transient_lastDdlTime=1282710037}) +PREHOOK: query: SELECT * FROM alter3_renamed WHERE pcol1='test_part' AND pcol2='test_part' +PREHOOK: type: QUERY +PREHOOK: Input: alter3_db@alter3_renamed@pcol1=test_part/pcol2=test_part +PREHOOK: Output: file:/var/folders/b7/b7UUwNZdF1KKHtM+5la6f++++TI/-Tmp-/carl/hive_2010-08-24_21-20-38_226_752504330069771446/-mr-10000 +POSTHOOK: query: SELECT * FROM alter3_renamed WHERE pcol1='test_part' AND pcol2='test_part' +POSTHOOK: type: QUERY +POSTHOOK: Input: alter3_db@alter3_renamed@pcol1=test_part/pcol2=test_part +POSTHOOK: Output: file:/var/folders/b7/b7UUwNZdF1KKHtM+5la6f++++TI/-Tmp-/carl/hive_2010-08-24_21-20-38_226_752504330069771446/-mr-10000 +POSTHOOK: Lineage: alter3 PARTITION(pcol1=test_part,pcol2=test_part).col1 SIMPLE [(alter3_src)alter3_src.FieldSchema(name:col1, type:string, comment:null), ] +POSTHOOK: Lineage: alter3 PARTITION(pcol1=test_part,pcol2=test_part).col1 SIMPLE [(alter3_src)alter3_src.FieldSchema(name:col1, type:string, comment:null), ] +POSTHOOK: Lineage: alter3_like PARTITION(pcol1=test_part,pcol2=test_part).col1 SIMPLE [(alter3_src)alter3_src.FieldSchema(name:col1, type:string, comment:null), ] +1 test_part test_part +2 test_part test_part +3 test_part test_part +4 test_part test_part +5 test_part test_part +6 test_part test_part +PREHOOK: query: INSERT OVERWRITE TABLE alter3_like +PARTITION (pCol1='test_part', pcol2='test_part') +SELECT col1 FROM alter3_src +PREHOOK: type: QUERY +PREHOOK: Input: alter3_db@alter3_src +PREHOOK: Output: alter3_db@alter3_like@pcol1=test_part/pcol2=test_part +POSTHOOK: query: INSERT OVERWRITE TABLE alter3_like +PARTITION (pCol1='test_part', pcol2='test_part') +SELECT col1 FROM alter3_src +POSTHOOK: type: QUERY +POSTHOOK: Input: alter3_db@alter3_src +POSTHOOK: Output: alter3_db@alter3_like@pcol1=test_part/pcol2=test_part +POSTHOOK: Lineage: alter3 PARTITION(pcol1=test_part,pcol2=test_part).col1 SIMPLE [(alter3_src)alter3_src.FieldSchema(name:col1, type:string, comment:null), ] +POSTHOOK: Lineage: alter3 PARTITION(pcol1=test_part,pcol2=test_part).col1 SIMPLE [(alter3_src)alter3_src.FieldSchema(name:col1, type:string, comment:null), ] +POSTHOOK: Lineage: alter3_like PARTITION(pcol1=test_part,pcol2=test_part).col1 SIMPLE [(alter3_src)alter3_src.FieldSchema(name:col1, type:string, comment:null), ] +POSTHOOK: Lineage: alter3_like PARTITION(pcol1=test_part,pcol2=test_part).col1 SIMPLE [(alter3_src)alter3_src.FieldSchema(name:col1, type:string, comment:null), ] +PREHOOK: query: ALTER TABLE alter3_like RENAME TO alter3_like_renamed +PREHOOK: type: ALTERTABLE_RENAME +PREHOOK: Input: alter3_db@alter3_like +PREHOOK: Output: alter3_db@alter3_like +POSTHOOK: query: ALTER TABLE alter3_like RENAME TO alter3_like_renamed +POSTHOOK: type: ALTERTABLE_RENAME +POSTHOOK: Input: alter3_db@alter3_like +POSTHOOK: Output: alter3_db@alter3_like +POSTHOOK: Output: alter3_db@alter3_like_renamed +POSTHOOK: Lineage: alter3 PARTITION(pcol1=test_part,pcol2=test_part).col1 SIMPLE [(alter3_src)alter3_src.FieldSchema(name:col1, type:string, comment:null), ] +POSTHOOK: Lineage: alter3 PARTITION(pcol1=test_part,pcol2=test_part).col1 SIMPLE [(alter3_src)alter3_src.FieldSchema(name:col1, type:string, comment:null), ] +POSTHOOK: Lineage: alter3_like PARTITION(pcol1=test_part,pcol2=test_part).col1 SIMPLE [(alter3_src)alter3_src.FieldSchema(name:col1, type:string, comment:null), ] +POSTHOOK: Lineage: alter3_like PARTITION(pcol1=test_part,pcol2=test_part).col1 SIMPLE [(alter3_src)alter3_src.FieldSchema(name:col1, type:string, comment:null), ] +PREHOOK: query: DESCRIBE EXTENDED alter3_like_renamed +PREHOOK: type: DESCTABLE +POSTHOOK: query: DESCRIBE EXTENDED alter3_like_renamed +POSTHOOK: type: DESCTABLE +POSTHOOK: Lineage: alter3 PARTITION(pcol1=test_part,pcol2=test_part).col1 SIMPLE [(alter3_src)alter3_src.FieldSchema(name:col1, type:string, comment:null), ] +POSTHOOK: Lineage: alter3 PARTITION(pcol1=test_part,pcol2=test_part).col1 SIMPLE [(alter3_src)alter3_src.FieldSchema(name:col1, type:string, comment:null), ] +POSTHOOK: Lineage: alter3_like PARTITION(pcol1=test_part,pcol2=test_part).col1 SIMPLE [(alter3_src)alter3_src.FieldSchema(name:col1, type:string, comment:null), ] +POSTHOOK: Lineage: alter3_like PARTITION(pcol1=test_part,pcol2=test_part).col1 SIMPLE [(alter3_src)alter3_src.FieldSchema(name:col1, type:string, comment:null), ] +col1 string +pcol1 string +pcol2 string + +Detailed Table Information Table(tableName:alter3_like_renamed, dbName:alter3_db, owner:carl, createTime:1282710029, lastAccessTime:0, retention:0, sd:StorageDescriptor(cols:[FieldSchema(name:col1, type:string, comment:null)], location:pfile:/Users/carl/Projects/hive/build/ql/test/data/warehouse/alter3_db.db/alter3_like_renamed, inputFormat:org.apache.hadoop.mapred.SequenceFileInputFormat, outputFormat:org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat, compressed:false, numBuckets:-1, serdeInfo:SerDeInfo(name:null, serializationLib:org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe, parameters:{serialization.format=1}), bucketCols:[], sortCols:[], parameters:{}), partitionKeys:[FieldSchema(name:pcol1, type:string, comment:null), FieldSchema(name:pcol2, type:string, comment:null)], parameters:{EXTERNAL=FALSE, last_modified_by=carl, last_modified_time=1282710044, transient_lastDdlTime=1282710044}, viewOriginalText:null, viewExpandedText:null, tableType:MANAGED_TABLE) diff --git ql/src/test/results/clientpositive/alter4.q.out ql/src/test/results/clientpositive/alter4.q.out index 058d25d..0617b82 100644 --- ql/src/test/results/clientpositive/alter4.q.out +++ ql/src/test/results/clientpositive/alter4.q.out @@ -10,7 +10,7 @@ POSTHOOK: type: DESCTABLE key int value string -Detailed Table Information Table(tableName:set_bucketing_test, dbName:default, owner:njain, createTime:1282026244, lastAccessTime:0, retention:0, sd:StorageDescriptor(cols:[FieldSchema(name:key, type:int, comment:null), FieldSchema(name:value, type:string, comment:null)], location:pfile:/data/users/njain/hive_commit2/hive_commit2/build/ql/test/data/warehouse/set_bucketing_test, inputFormat:org.apache.hadoop.mapred.TextInputFormat, outputFormat:org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat, compressed:false, numBuckets:10, serdeInfo:SerDeInfo(name:null, serializationLib:org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe, parameters:{serialization.format=1}), bucketCols:[key], sortCols:[], parameters:{}), partitionKeys:[], parameters:{transient_lastDdlTime=1282026244}, viewOriginalText:null, viewExpandedText:null, tableType:MANAGED_TABLE) +Detailed Table Information Table(tableName:set_bucketing_test, dbName:default, owner:carl, createTime:1282710046, lastAccessTime:0, retention:0, sd:StorageDescriptor(cols:[FieldSchema(name:key, type:int, comment:null), FieldSchema(name:value, type:string, comment:null)], location:pfile:/Users/carl/Projects/hive/build/ql/test/data/warehouse/set_bucketing_test, inputFormat:org.apache.hadoop.mapred.TextInputFormat, outputFormat:org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat, compressed:false, numBuckets:10, serdeInfo:SerDeInfo(name:null, serializationLib:org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe, parameters:{serialization.format=1}), bucketCols:[key], sortCols:[], parameters:{}), partitionKeys:[], parameters:{transient_lastDdlTime=1282710046}, viewOriginalText:null, viewExpandedText:null, tableType:MANAGED_TABLE) PREHOOK: query: ALTER TABLE set_bucketing_test NOT CLUSTERED PREHOOK: type: null PREHOOK: Input: default@set_bucketing_test @@ -26,4 +26,92 @@ POSTHOOK: type: DESCTABLE key int value string -Detailed Table Information Table(tableName:set_bucketing_test, dbName:default, owner:njain, createTime:1282026244, lastAccessTime:0, retention:0, sd:StorageDescriptor(cols:[FieldSchema(name:key, type:int, comment:null), FieldSchema(name:value, type:string, comment:null)], location:pfile:/data/users/njain/hive_commit2/hive_commit2/build/ql/test/data/warehouse/set_bucketing_test, inputFormat:org.apache.hadoop.mapred.TextInputFormat, outputFormat:org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat, compressed:false, numBuckets:-1, serdeInfo:SerDeInfo(name:null, serializationLib:org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe, parameters:{serialization.format=1}), bucketCols:[], sortCols:[], parameters:{}), partitionKeys:[], parameters:{last_modified_by=njain, last_modified_time=1282026244, transient_lastDdlTime=1282026244}, viewOriginalText:null, viewExpandedText:null, tableType:MANAGED_TABLE) +Detailed Table Information Table(tableName:set_bucketing_test, dbName:default, owner:carl, createTime:1282710046, lastAccessTime:0, retention:0, sd:StorageDescriptor(cols:[FieldSchema(name:key, type:int, comment:null), FieldSchema(name:value, type:string, comment:null)], location:pfile:/Users/carl/Projects/hive/build/ql/test/data/warehouse/set_bucketing_test, inputFormat:org.apache.hadoop.mapred.TextInputFormat, outputFormat:org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat, compressed:false, numBuckets:-1, serdeInfo:SerDeInfo(name:null, serializationLib:org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe, parameters:{serialization.format=1}), bucketCols:[], sortCols:[], parameters:{}), partitionKeys:[], parameters:{last_modified_by=carl, last_modified_time=1282710046, transient_lastDdlTime=1282710046}, viewOriginalText:null, viewExpandedText:null, tableType:MANAGED_TABLE) +PREHOOK: query: -- Cleanup +DROP TABLE set_bucketing_test +PREHOOK: type: DROPTABLE +PREHOOK: Input: default@set_bucketing_test +PREHOOK: Output: default@set_bucketing_test +POSTHOOK: query: -- Cleanup +DROP TABLE set_bucketing_test +POSTHOOK: type: DROPTABLE +POSTHOOK: Input: default@set_bucketing_test +POSTHOOK: Output: default@set_bucketing_test +PREHOOK: query: SHOW TABLES +PREHOOK: type: SHOWTABLES +POSTHOOK: query: SHOW TABLES +POSTHOOK: type: SHOWTABLES +src +src1 +src_json +src_sequencefile +src_thrift +srcbucket +srcbucket2 +srcpart +PREHOOK: query: -- with non-default Database + +CREATE DATABASE alter4_db +PREHOOK: type: CREATEDATABASE +POSTHOOK: query: -- with non-default Database + +CREATE DATABASE alter4_db +POSTHOOK: type: CREATEDATABASE +PREHOOK: query: USE alter4_db +PREHOOK: type: SWITCHDATABASE +POSTHOOK: query: USE alter4_db +POSTHOOK: type: SWITCHDATABASE +PREHOOK: query: SHOW TABLES +PREHOOK: type: SHOWTABLES +POSTHOOK: query: SHOW TABLES +POSTHOOK: type: SHOWTABLES +PREHOOK: query: CREATE TABLE set_bucketing_test (key INT, value STRING) CLUSTERED BY (key) INTO 10 BUCKETS +PREHOOK: type: CREATETABLE +POSTHOOK: query: CREATE TABLE set_bucketing_test (key INT, value STRING) CLUSTERED BY (key) INTO 10 BUCKETS +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: alter4_db@set_bucketing_test +PREHOOK: query: DESCRIBE EXTENDED set_bucketing_test +PREHOOK: type: DESCTABLE +POSTHOOK: query: DESCRIBE EXTENDED set_bucketing_test +POSTHOOK: type: DESCTABLE +key int +value string + +Detailed Table Information Table(tableName:set_bucketing_test, dbName:alter4_db, owner:carl, createTime:1282710046, lastAccessTime:0, retention:0, sd:StorageDescriptor(cols:[FieldSchema(name:key, type:int, comment:null), FieldSchema(name:value, type:string, comment:null)], location:pfile:/Users/carl/Projects/hive/build/ql/test/data/warehouse/alter4_db.db/set_bucketing_test, inputFormat:org.apache.hadoop.mapred.TextInputFormat, outputFormat:org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat, compressed:false, numBuckets:10, serdeInfo:SerDeInfo(name:null, serializationLib:org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe, parameters:{serialization.format=1}), bucketCols:[key], sortCols:[], parameters:{}), partitionKeys:[], parameters:{transient_lastDdlTime=1282710046}, viewOriginalText:null, viewExpandedText:null, tableType:MANAGED_TABLE) +PREHOOK: query: ALTER TABLE set_bucketing_test NOT CLUSTERED +PREHOOK: type: null +PREHOOK: Input: alter4_db@set_bucketing_test +PREHOOK: Output: alter4_db@set_bucketing_test +POSTHOOK: query: ALTER TABLE set_bucketing_test NOT CLUSTERED +POSTHOOK: type: null +POSTHOOK: Input: alter4_db@set_bucketing_test +POSTHOOK: Output: alter4_db@set_bucketing_test +PREHOOK: query: DESCRIBE EXTENDED set_bucketing_test +PREHOOK: type: DESCTABLE +POSTHOOK: query: DESCRIBE EXTENDED set_bucketing_test +POSTHOOK: type: DESCTABLE +key int +value string + +Detailed Table Information Table(tableName:set_bucketing_test, dbName:alter4_db, owner:carl, createTime:1282710046, lastAccessTime:0, retention:0, sd:StorageDescriptor(cols:[FieldSchema(name:key, type:int, comment:null), FieldSchema(name:value, type:string, comment:null)], location:pfile:/Users/carl/Projects/hive/build/ql/test/data/warehouse/alter4_db.db/set_bucketing_test, inputFormat:org.apache.hadoop.mapred.TextInputFormat, outputFormat:org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat, compressed:false, numBuckets:-1, serdeInfo:SerDeInfo(name:null, serializationLib:org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe, parameters:{serialization.format=1}), bucketCols:[], sortCols:[], parameters:{}), partitionKeys:[], parameters:{last_modified_by=carl, last_modified_time=1282710046, transient_lastDdlTime=1282710046}, viewOriginalText:null, viewExpandedText:null, tableType:MANAGED_TABLE) +PREHOOK: query: DROP TABLE set_bucketing_test +PREHOOK: type: DROPTABLE +PREHOOK: Input: alter4_db@set_bucketing_test +PREHOOK: Output: alter4_db@set_bucketing_test +POSTHOOK: query: DROP TABLE set_bucketing_test +POSTHOOK: type: DROPTABLE +POSTHOOK: Input: alter4_db@set_bucketing_test +POSTHOOK: Output: alter4_db@set_bucketing_test +PREHOOK: query: USE default +PREHOOK: type: SWITCHDATABASE +POSTHOOK: query: USE default +POSTHOOK: type: SWITCHDATABASE +PREHOOK: query: DROP DATABASE alter4_db +PREHOOK: type: DROPDATABASE +POSTHOOK: query: DROP DATABASE alter4_db +POSTHOOK: type: DROPDATABASE +PREHOOK: query: SHOW DATABASES +PREHOOK: type: SHOWDATABASES +POSTHOOK: query: SHOW DATABASES +POSTHOOK: type: SHOWDATABASES +default diff --git ql/src/test/results/clientpositive/database.q.out ql/src/test/results/clientpositive/database.q.out new file mode 100644 index 0000000..c8895df --- /dev/null +++ ql/src/test/results/clientpositive/database.q.out @@ -0,0 +1,311 @@ +PREHOOK: query: SHOW DATABASES +PREHOOK: type: SHOWDATABASES +POSTHOOK: query: SHOW DATABASES +POSTHOOK: type: SHOWDATABASES +default +PREHOOK: query: -- CREATE with comment +CREATE DATABASE test_db COMMENT 'Hive test database' +PREHOOK: type: CREATEDATABASE +POSTHOOK: query: -- CREATE with comment +CREATE DATABASE test_db COMMENT 'Hive test database' +POSTHOOK: type: CREATEDATABASE +PREHOOK: query: SHOW DATABASES +PREHOOK: type: SHOWDATABASES +POSTHOOK: query: SHOW DATABASES +POSTHOOK: type: SHOWDATABASES +default +test_db +PREHOOK: query: -- CREATE INE already exists +CREATE DATABASE IF NOT EXISTS test_db +PREHOOK: type: CREATEDATABASE +POSTHOOK: query: -- CREATE INE already exists +CREATE DATABASE IF NOT EXISTS test_db +POSTHOOK: type: CREATEDATABASE +PREHOOK: query: SHOW DATABASES +PREHOOK: type: SHOWDATABASES +POSTHOOK: query: SHOW DATABASES +POSTHOOK: type: SHOWDATABASES +default +test_db +PREHOOK: query: -- SHOW DATABASES synonym +SHOW SCHEMAS +PREHOOK: type: SHOWDATABASES +POSTHOOK: query: -- SHOW DATABASES synonym +SHOW SCHEMAS +POSTHOOK: type: SHOWDATABASES +default +test_db +PREHOOK: query: -- DROP +DROP DATABASE test_db +PREHOOK: type: DROPDATABASE +POSTHOOK: query: -- DROP +DROP DATABASE test_db +POSTHOOK: type: DROPDATABASE +PREHOOK: query: SHOW DATABASES +PREHOOK: type: SHOWDATABASES +POSTHOOK: query: SHOW DATABASES +POSTHOOK: type: SHOWDATABASES +default +PREHOOK: query: -- CREATE INE doesn't exist +CREATE DATABASE IF NOT EXISTS test_db COMMENT 'Hive test database' +PREHOOK: type: CREATEDATABASE +POSTHOOK: query: -- CREATE INE doesn't exist +CREATE DATABASE IF NOT EXISTS test_db COMMENT 'Hive test database' +POSTHOOK: type: CREATEDATABASE +PREHOOK: query: SHOW DATABASES +PREHOOK: type: SHOWDATABASES +POSTHOOK: query: SHOW DATABASES +POSTHOOK: type: SHOWDATABASES +default +test_db +PREHOOK: query: -- DROP IE exists +DROP DATABASE IF EXISTS test_db +PREHOOK: type: DROPDATABASE +POSTHOOK: query: -- DROP IE exists +DROP DATABASE IF EXISTS test_db +POSTHOOK: type: DROPDATABASE +PREHOOK: query: SHOW DATABASES +PREHOOK: type: SHOWDATABASES +POSTHOOK: query: SHOW DATABASES +POSTHOOK: type: SHOWDATABASES +default +PREHOOK: query: -- DROP IE doesn't exist +DROP DATABASE IF EXISTS test_db +PREHOOK: type: DROPDATABASE +POSTHOOK: query: -- DROP IE doesn't exist +DROP DATABASE IF EXISTS test_db +POSTHOOK: type: DROPDATABASE +PREHOOK: query: -- SHOW +CREATE DATABASE test_db +PREHOOK: type: CREATEDATABASE +POSTHOOK: query: -- SHOW +CREATE DATABASE test_db +POSTHOOK: type: CREATEDATABASE +PREHOOK: query: SHOW DATABASES +PREHOOK: type: SHOWDATABASES +POSTHOOK: query: SHOW DATABASES +POSTHOOK: type: SHOWDATABASES +default +test_db +PREHOOK: query: -- SHOW pattern +SHOW DATABASES LIKE 'test*' +PREHOOK: type: SHOWDATABASES +POSTHOOK: query: -- SHOW pattern +SHOW DATABASES LIKE 'test*' +POSTHOOK: type: SHOWDATABASES +test_db +PREHOOK: query: -- SHOW pattern +SHOW DATABASES LIKE '*ef*' +PREHOOK: type: SHOWDATABASES +POSTHOOK: query: -- SHOW pattern +SHOW DATABASES LIKE '*ef*' +POSTHOOK: type: SHOWDATABASES +default +PREHOOK: query: USE test_db +PREHOOK: type: SWITCHDATABASE +POSTHOOK: query: USE test_db +POSTHOOK: type: SWITCHDATABASE +PREHOOK: query: SHOW DATABASES +PREHOOK: type: SHOWDATABASES +POSTHOOK: query: SHOW DATABASES +POSTHOOK: type: SHOWDATABASES +default +test_db +PREHOOK: query: -- CREATE table in non-default DB +CREATE TABLE test_table (col1 STRING) STORED AS TEXTFILE +PREHOOK: type: CREATETABLE +POSTHOOK: query: -- CREATE table in non-default DB +CREATE TABLE test_table (col1 STRING) STORED AS TEXTFILE +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: test_db@test_table +PREHOOK: query: SHOW TABLES +PREHOOK: type: SHOWTABLES +POSTHOOK: query: SHOW TABLES +POSTHOOK: type: SHOWTABLES +test_table +PREHOOK: query: -- DESCRIBE table in non-default DB +DESCRIBE test_table +PREHOOK: type: DESCTABLE +POSTHOOK: query: -- DESCRIBE table in non-default DB +DESCRIBE test_table +POSTHOOK: type: DESCTABLE +col1 string +PREHOOK: query: -- DESCRIBE EXTENDED in non-default DB +DESCRIBE EXTENDED test_table +PREHOOK: type: DESCTABLE +POSTHOOK: query: -- DESCRIBE EXTENDED in non-default DB +DESCRIBE EXTENDED test_table +POSTHOOK: type: DESCTABLE +col1 string + +Detailed Table Information Table(tableName:test_table, dbName:test_db, owner:carl, createTime:1282718578, lastAccessTime:0, retention:0, sd:StorageDescriptor(cols:[FieldSchema(name:col1, type:string, comment:null)], location:pfile:/Users/carl/Projects/hive/build/ql/test/data/warehouse/test_db.db/test_table, inputFormat:org.apache.hadoop.mapred.TextInputFormat, outputFormat:org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat, compressed:false, numBuckets:-1, serdeInfo:SerDeInfo(name:null, serializationLib:org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe, parameters:{serialization.format=1}), bucketCols:[], sortCols:[], parameters:{}), partitionKeys:[], parameters:{transient_lastDdlTime=1282718578}, viewOriginalText:null, viewExpandedText:null, tableType:MANAGED_TABLE) +PREHOOK: query: -- CREATE LIKE in non-default DB +CREATE TABLE test_table_like LIKE test_table +PREHOOK: type: CREATETABLE +POSTHOOK: query: -- CREATE LIKE in non-default DB +CREATE TABLE test_table_like LIKE test_table +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: test_db@test_table_like +PREHOOK: query: SHOW TABLES +PREHOOK: type: SHOWTABLES +POSTHOOK: query: SHOW TABLES +POSTHOOK: type: SHOWTABLES +test_table +test_table_like +PREHOOK: query: DESCRIBE EXTENDED test_table_like +PREHOOK: type: DESCTABLE +POSTHOOK: query: DESCRIBE EXTENDED test_table_like +POSTHOOK: type: DESCTABLE +col1 string + +Detailed Table Information Table(tableName:test_table_like, dbName:test_db, owner:carl, createTime:1282718578, lastAccessTime:0, retention:0, sd:StorageDescriptor(cols:[FieldSchema(name:col1, type:string, comment:null)], location:pfile:/Users/carl/Projects/hive/build/ql/test/data/warehouse/test_db.db/test_table_like, inputFormat:org.apache.hadoop.mapred.TextInputFormat, outputFormat:org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat, compressed:false, numBuckets:-1, serdeInfo:SerDeInfo(name:null, serializationLib:org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe, parameters:{serialization.format=1}), bucketCols:[], sortCols:[], parameters:{}), partitionKeys:[], parameters:{EXTERNAL=FALSE, transient_lastDdlTime=1282718578}, viewOriginalText:null, viewExpandedText:null, tableType:MANAGED_TABLE) +PREHOOK: query: -- LOAD and SELECT +LOAD DATA LOCAL INPATH '../data/files/test.dat' OVERWRITE INTO TABLE test_table +PREHOOK: type: LOAD +POSTHOOK: query: -- LOAD and SELECT +LOAD DATA LOCAL INPATH '../data/files/test.dat' OVERWRITE INTO TABLE test_table +POSTHOOK: type: LOAD +POSTHOOK: Output: test_db@test_table +PREHOOK: query: SELECT * FROM test_table +PREHOOK: type: QUERY +PREHOOK: Input: test_db@test_table +PREHOOK: Output: file:/var/folders/b7/b7UUwNZdF1KKHtM+5la6f++++TI/-Tmp-/carl/hive_2010-08-24_23-42-59_567_2461111790069590361/-mr-10000 +POSTHOOK: query: SELECT * FROM test_table +POSTHOOK: type: QUERY +POSTHOOK: Input: test_db@test_table +POSTHOOK: Output: file:/var/folders/b7/b7UUwNZdF1KKHtM+5la6f++++TI/-Tmp-/carl/hive_2010-08-24_23-42-59_567_2461111790069590361/-mr-10000 +1 +2 +3 +4 +5 +6 +PREHOOK: query: -- DROP and CREATE w/o LOAD +DROP TABLE test_table +PREHOOK: type: DROPTABLE +PREHOOK: Input: test_db@test_table +PREHOOK: Output: test_db@test_table +POSTHOOK: query: -- DROP and CREATE w/o LOAD +DROP TABLE test_table +POSTHOOK: type: DROPTABLE +POSTHOOK: Input: test_db@test_table +POSTHOOK: Output: test_db@test_table +PREHOOK: query: SHOW TABLES +PREHOOK: type: SHOWTABLES +POSTHOOK: query: SHOW TABLES +POSTHOOK: type: SHOWTABLES +test_table_like +PREHOOK: query: CREATE TABLE test_table (col1 STRING) STORED AS TEXTFILE +PREHOOK: type: CREATETABLE +POSTHOOK: query: CREATE TABLE test_table (col1 STRING) STORED AS TEXTFILE +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: test_db@test_table +PREHOOK: query: SHOW TABLES +PREHOOK: type: SHOWTABLES +POSTHOOK: query: SHOW TABLES +POSTHOOK: type: SHOWTABLES +test_table +test_table_like +PREHOOK: query: SELECT * FROM test_table +PREHOOK: type: QUERY +PREHOOK: Input: test_db@test_table +PREHOOK: Output: file:/var/folders/b7/b7UUwNZdF1KKHtM+5la6f++++TI/-Tmp-/carl/hive_2010-08-24_23-43-00_940_5138756558646274720/-mr-10000 +POSTHOOK: query: SELECT * FROM test_table +POSTHOOK: type: QUERY +POSTHOOK: Input: test_db@test_table +POSTHOOK: Output: file:/var/folders/b7/b7UUwNZdF1KKHtM+5la6f++++TI/-Tmp-/carl/hive_2010-08-24_23-43-00_940_5138756558646274720/-mr-10000 +PREHOOK: query: -- CREATE table that already exists in DEFAULT +USE test_db +PREHOOK: type: SWITCHDATABASE +POSTHOOK: query: -- CREATE table that already exists in DEFAULT +USE test_db +POSTHOOK: type: SWITCHDATABASE +PREHOOK: query: CREATE TABLE src (col1 STRING) STORED AS TEXTFILE +PREHOOK: type: CREATETABLE +POSTHOOK: query: CREATE TABLE src (col1 STRING) STORED AS TEXTFILE +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: test_db@src +PREHOOK: query: SHOW TABLES +PREHOOK: type: SHOWTABLES +POSTHOOK: query: SHOW TABLES +POSTHOOK: type: SHOWTABLES +src +test_table +test_table_like +PREHOOK: query: SELECT * FROM src LIMIT 10 +PREHOOK: type: QUERY +PREHOOK: Input: test_db@src +PREHOOK: Output: file:/var/folders/b7/b7UUwNZdF1KKHtM+5la6f++++TI/-Tmp-/carl/hive_2010-08-24_23-43-01_221_2875743952247647479/-mr-10000 +POSTHOOK: query: SELECT * FROM src LIMIT 10 +POSTHOOK: type: QUERY +POSTHOOK: Input: test_db@src +POSTHOOK: Output: file:/var/folders/b7/b7UUwNZdF1KKHtM+5la6f++++TI/-Tmp-/carl/hive_2010-08-24_23-43-01_221_2875743952247647479/-mr-10000 +PREHOOK: query: USE default +PREHOOK: type: SWITCHDATABASE +POSTHOOK: query: USE default +POSTHOOK: type: SWITCHDATABASE +PREHOOK: query: SELECT * FROM src LIMIT 10 +PREHOOK: type: QUERY +PREHOOK: Input: default@src +PREHOOK: Output: file:/var/folders/b7/b7UUwNZdF1KKHtM+5la6f++++TI/-Tmp-/carl/hive_2010-08-24_23-43-01_337_7970214163805278546/-mr-10000 +POSTHOOK: query: SELECT * FROM src LIMIT 10 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@src +POSTHOOK: Output: file:/var/folders/b7/b7UUwNZdF1KKHtM+5la6f++++TI/-Tmp-/carl/hive_2010-08-24_23-43-01_337_7970214163805278546/-mr-10000 +238 val_238 +86 val_86 +311 val_311 +27 val_27 +165 val_165 +409 val_409 +255 val_255 +278 val_278 +98 val_98 +484 val_484 +PREHOOK: query: -- DROP DATABASE +USE test_db +PREHOOK: type: SWITCHDATABASE +POSTHOOK: query: -- DROP DATABASE +USE test_db +POSTHOOK: type: SWITCHDATABASE +PREHOOK: query: DROP TABLE src +PREHOOK: type: DROPTABLE +PREHOOK: Input: test_db@src +PREHOOK: Output: test_db@src +POSTHOOK: query: DROP TABLE src +POSTHOOK: type: DROPTABLE +POSTHOOK: Input: test_db@src +POSTHOOK: Output: test_db@src +PREHOOK: query: DROP TABLE test_table +PREHOOK: type: DROPTABLE +PREHOOK: Input: test_db@test_table +PREHOOK: Output: test_db@test_table +POSTHOOK: query: DROP TABLE test_table +POSTHOOK: type: DROPTABLE +POSTHOOK: Input: test_db@test_table +POSTHOOK: Output: test_db@test_table +PREHOOK: query: DROP TABLE test_table_like +PREHOOK: type: DROPTABLE +PREHOOK: Input: test_db@test_table_like +PREHOOK: Output: test_db@test_table_like +POSTHOOK: query: DROP TABLE test_table_like +POSTHOOK: type: DROPTABLE +POSTHOOK: Input: test_db@test_table_like +POSTHOOK: Output: test_db@test_table_like +PREHOOK: query: SHOW TABLES +PREHOOK: type: SHOWTABLES +POSTHOOK: query: SHOW TABLES +POSTHOOK: type: SHOWTABLES +PREHOOK: query: USE default +PREHOOK: type: SWITCHDATABASE +POSTHOOK: query: USE default +POSTHOOK: type: SWITCHDATABASE +PREHOOK: query: DROP DATABASE test_db +PREHOOK: type: DROPDATABASE +POSTHOOK: query: DROP DATABASE test_db +POSTHOOK: type: DROPDATABASE +PREHOOK: query: SHOW DATABASES +PREHOOK: type: SHOWDATABASES +POSTHOOK: query: SHOW DATABASES +POSTHOOK: type: SHOWDATABASES +default diff --git ql/src/test/results/clientpositive/rename_column.q.out ql/src/test/results/clientpositive/rename_column.q.out index 252d681..0289738 100644 --- ql/src/test/results/clientpositive/rename_column.q.out +++ ql/src/test/results/clientpositive/rename_column.q.out @@ -115,3 +115,162 @@ POSTHOOK: type: DESCTABLE b int a int test comment2 c int +PREHOOK: query: DROP TABLE kv_rename_test +PREHOOK: type: DROPTABLE +PREHOOK: Input: default@kv_rename_test +PREHOOK: Output: default@kv_rename_test +POSTHOOK: query: DROP TABLE kv_rename_test +POSTHOOK: type: DROPTABLE +POSTHOOK: Input: default@kv_rename_test +POSTHOOK: Output: default@kv_rename_test +PREHOOK: query: SHOW TABLES +PREHOOK: type: SHOWTABLES +POSTHOOK: query: SHOW TABLES +POSTHOOK: type: SHOWTABLES +src +src1 +src_json +src_sequencefile +src_thrift +srcbucket +srcbucket2 +srcpart +PREHOOK: query: -- Using non-default Database +CREATE DATABASE kv_rename_test_db +PREHOOK: type: CREATEDATABASE +POSTHOOK: query: -- Using non-default Database +CREATE DATABASE kv_rename_test_db +POSTHOOK: type: CREATEDATABASE +PREHOOK: query: USE kv_rename_test_db +PREHOOK: type: SWITCHDATABASE +POSTHOOK: query: USE kv_rename_test_db +POSTHOOK: type: SWITCHDATABASE +PREHOOK: query: CREATE TABLE kv_rename_test(a int, b int, c int) +PREHOOK: type: CREATETABLE +POSTHOOK: query: CREATE TABLE kv_rename_test(a int, b int, c int) +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: kv_rename_test_db@kv_rename_test +PREHOOK: query: DESCRIBE kv_rename_test +PREHOOK: type: DESCTABLE +POSTHOOK: query: DESCRIBE kv_rename_test +POSTHOOK: type: DESCTABLE +a int +b int +c int +PREHOOK: query: ALTER TABLE kv_rename_test CHANGE a a STRING +PREHOOK: type: ALTERTABLE_RENAMECOL +PREHOOK: Input: kv_rename_test_db@kv_rename_test +PREHOOK: Output: kv_rename_test_db@kv_rename_test +POSTHOOK: query: ALTER TABLE kv_rename_test CHANGE a a STRING +POSTHOOK: type: ALTERTABLE_RENAMECOL +POSTHOOK: Input: kv_rename_test_db@kv_rename_test +POSTHOOK: Output: kv_rename_test_db@kv_rename_test +PREHOOK: query: DESCRIBE kv_rename_test +PREHOOK: type: DESCTABLE +POSTHOOK: query: DESCRIBE kv_rename_test +POSTHOOK: type: DESCTABLE +a string +b int +c int +PREHOOK: query: ALTER TABLE kv_rename_test CHANGE a a1 INT +PREHOOK: type: ALTERTABLE_RENAMECOL +PREHOOK: Input: kv_rename_test_db@kv_rename_test +PREHOOK: Output: kv_rename_test_db@kv_rename_test +POSTHOOK: query: ALTER TABLE kv_rename_test CHANGE a a1 INT +POSTHOOK: type: ALTERTABLE_RENAMECOL +POSTHOOK: Input: kv_rename_test_db@kv_rename_test +POSTHOOK: Output: kv_rename_test_db@kv_rename_test +PREHOOK: query: DESCRIBE kv_rename_test +PREHOOK: type: DESCTABLE +POSTHOOK: query: DESCRIBE kv_rename_test +POSTHOOK: type: DESCTABLE +a1 int +b int +c int +PREHOOK: query: ALTER TABLE kv_rename_test CHANGE a1 a2 INT FIRST +PREHOOK: type: ALTERTABLE_RENAMECOL +PREHOOK: Input: kv_rename_test_db@kv_rename_test +PREHOOK: Output: kv_rename_test_db@kv_rename_test +POSTHOOK: query: ALTER TABLE kv_rename_test CHANGE a1 a2 INT FIRST +POSTHOOK: type: ALTERTABLE_RENAMECOL +POSTHOOK: Input: kv_rename_test_db@kv_rename_test +POSTHOOK: Output: kv_rename_test_db@kv_rename_test +PREHOOK: query: DESCRIBE kv_rename_test +PREHOOK: type: DESCTABLE +POSTHOOK: query: DESCRIBE kv_rename_test +POSTHOOK: type: DESCTABLE +a2 int +b int +c int +PREHOOK: query: ALTER TABLE kv_rename_test CHANGE a2 a INT AFTER b +PREHOOK: type: ALTERTABLE_RENAMECOL +PREHOOK: Input: kv_rename_test_db@kv_rename_test +PREHOOK: Output: kv_rename_test_db@kv_rename_test +POSTHOOK: query: ALTER TABLE kv_rename_test CHANGE a2 a INT AFTER b +POSTHOOK: type: ALTERTABLE_RENAMECOL +POSTHOOK: Input: kv_rename_test_db@kv_rename_test +POSTHOOK: Output: kv_rename_test_db@kv_rename_test +PREHOOK: query: DESCRIBE kv_rename_test +PREHOOK: type: DESCTABLE +POSTHOOK: query: DESCRIBE kv_rename_test +POSTHOOK: type: DESCTABLE +b int +a int +c int +PREHOOK: query: ALTER TABLE kv_rename_test CHANGE a a1 INT COMMENT 'test comment1' +PREHOOK: type: ALTERTABLE_RENAMECOL +PREHOOK: Input: kv_rename_test_db@kv_rename_test +PREHOOK: Output: kv_rename_test_db@kv_rename_test +POSTHOOK: query: ALTER TABLE kv_rename_test CHANGE a a1 INT COMMENT 'test comment1' +POSTHOOK: type: ALTERTABLE_RENAMECOL +POSTHOOK: Input: kv_rename_test_db@kv_rename_test +POSTHOOK: Output: kv_rename_test_db@kv_rename_test +PREHOOK: query: DESCRIBE kv_rename_test +PREHOOK: type: DESCTABLE +POSTHOOK: query: DESCRIBE kv_rename_test +POSTHOOK: type: DESCTABLE +b int +a1 int test comment1 +c int +PREHOOK: query: ALTER TABLE kv_rename_test CHANGE a1 a2 INT COMMENT 'test comment2' FIRST +PREHOOK: type: ALTERTABLE_RENAMECOL +PREHOOK: Input: kv_rename_test_db@kv_rename_test +PREHOOK: Output: kv_rename_test_db@kv_rename_test +POSTHOOK: query: ALTER TABLE kv_rename_test CHANGE a1 a2 INT COMMENT 'test comment2' FIRST +POSTHOOK: type: ALTERTABLE_RENAMECOL +POSTHOOK: Input: kv_rename_test_db@kv_rename_test +POSTHOOK: Output: kv_rename_test_db@kv_rename_test +PREHOOK: query: DESCRIBE kv_rename_test +PREHOOK: type: DESCTABLE +POSTHOOK: query: DESCRIBE kv_rename_test +POSTHOOK: type: DESCTABLE +a2 int test comment2 +b int +c int +PREHOOK: query: ALTER TABLE kv_rename_test CHANGE COLUMN a2 a INT AFTER b +PREHOOK: type: ALTERTABLE_RENAMECOL +PREHOOK: Input: kv_rename_test_db@kv_rename_test +PREHOOK: Output: kv_rename_test_db@kv_rename_test +POSTHOOK: query: ALTER TABLE kv_rename_test CHANGE COLUMN a2 a INT AFTER b +POSTHOOK: type: ALTERTABLE_RENAMECOL +POSTHOOK: Input: kv_rename_test_db@kv_rename_test +POSTHOOK: Output: kv_rename_test_db@kv_rename_test +PREHOOK: query: DESCRIBE kv_rename_test +PREHOOK: type: DESCTABLE +POSTHOOK: query: DESCRIBE kv_rename_test +POSTHOOK: type: DESCTABLE +b int +a int test comment2 +c int +PREHOOK: query: DROP TABLE kv_rename_test +PREHOOK: type: DROPTABLE +PREHOOK: Input: kv_rename_test_db@kv_rename_test +PREHOOK: Output: kv_rename_test_db@kv_rename_test +POSTHOOK: query: DROP TABLE kv_rename_test +POSTHOOK: type: DROPTABLE +POSTHOOK: Input: kv_rename_test_db@kv_rename_test +POSTHOOK: Output: kv_rename_test_db@kv_rename_test +PREHOOK: query: SHOW TABLES +PREHOOK: type: SHOWTABLES +POSTHOOK: query: SHOW TABLES +POSTHOOK: type: SHOWTABLES