diff --git .gitignore .gitignore index 376cdc6..e54804f 100644 --- .gitignore +++ .gitignore @@ -6,3 +6,4 @@ build-eclipse .settings *.launch *~ +metastore_db diff --git build-common.xml build-common.xml index d4ff895..3c6cc50 100644 --- build-common.xml +++ build-common.xml @@ -434,7 +434,7 @@ + excludes="**/TestSerDe.class,**/TestHiveMetaStore.class,**/*$*.class" /> diff --git eclipse-templates/TestEmbeddedHiveMetaStore.launchtemplate eclipse-templates/TestEmbeddedHiveMetaStore.launchtemplate new file mode 100644 index 0000000..c4d8e9a --- /dev/null +++ eclipse-templates/TestEmbeddedHiveMetaStore.launchtemplate @@ -0,0 +1,26 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git eclipse-templates/TestHive.launchtemplate eclipse-templates/TestHive.launchtemplate index 24efc12..e2f46db 100644 --- eclipse-templates/TestHive.launchtemplate +++ eclipse-templates/TestHive.launchtemplate @@ -21,6 +21,6 @@ - + diff --git eclipse-templates/TestRemoteHiveMetaStore.launchtemplate eclipse-templates/TestRemoteHiveMetaStore.launchtemplate new file mode 100644 index 0000000..3600e5c --- /dev/null +++ eclipse-templates/TestRemoteHiveMetaStore.launchtemplate @@ -0,0 +1,26 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git metastore/if/hive_metastore.thrift metastore/if/hive_metastore.thrift index 478d0af..4234c61 100755 --- metastore/if/hive_metastore.thrift +++ metastore/if/hive_metastore.thrift @@ -32,7 +32,8 @@ struct Type { // namespace for tables struct Database { 1: string name, - 2: string description, + 2: string comment, + 3: string locationUri, } // This object holds the information needed by SerDes @@ -150,16 +151,15 @@ exception ConfigValSecurityException { */ service ThriftHiveMetastore extends fb303.FacebookService { - bool create_database(1:string name, 2:string description) - throws(1:AlreadyExistsException o1, 2:MetaException o2) + bool create_database(1:Database database) throws(1:AlreadyExistsException o1, 2:InvalidObjectException o2, 3:MetaException o3) Database get_database(1:string name) throws(1:NoSuchObjectException o1, 2:MetaException o2) - bool drop_database(1:string name) throws(2:MetaException o2) - list get_databases() throws(1:MetaException o1) + bool drop_database(1:string name) throws(1:NoSuchObjectException o1, 2:InvalidOperationException o2, 3:MetaException o3) + list get_databases(1:string pattern) throws(1:MetaException o1) // returns the type with given name (make seperate calls for the dependent types if needed) - Type get_type(1:string name) throws(1:MetaException o2) + Type get_type(1:string name) throws(1:MetaException o1, 2:NoSuchObjectException o2) bool create_type(1:Type type) throws(1:AlreadyExistsException o1, 2:InvalidObjectException o2, 3:MetaException o3) - bool drop_type(1:string type) throws(1:MetaException o2) + bool drop_type(1:string type) throws(1:MetaException o1, 2:NoSuchObjectException o2) map get_type_all(1:string name) throws(1:MetaException o2) diff --git metastore/src/gen-cpp/ThriftHiveMetastore.cpp metastore/src/gen-cpp/ThriftHiveMetastore.cpp index f945a3a..2e1e8a4 100644 --- metastore/src/gen-cpp/ThriftHiveMetastore.cpp +++ metastore/src/gen-cpp/ThriftHiveMetastore.cpp @@ -28,17 +28,9 @@ uint32_t ThriftHiveMetastore_create_database_args::read(apache::thrift::protocol switch (fid) { case 1: - if (ftype == apache::thrift::protocol::T_STRING) { - xfer += iprot->readString(this->name); - this->__isset.name = true; - } else { - xfer += iprot->skip(ftype); - } - break; - case 2: - if (ftype == apache::thrift::protocol::T_STRING) { - xfer += iprot->readString(this->description); - this->__isset.description = true; + if (ftype == apache::thrift::protocol::T_STRUCT) { + xfer += this->database.read(iprot); + this->__isset.database = true; } else { xfer += iprot->skip(ftype); } @@ -58,11 +50,8 @@ uint32_t ThriftHiveMetastore_create_database_args::read(apache::thrift::protocol uint32_t ThriftHiveMetastore_create_database_args::write(apache::thrift::protocol::TProtocol* oprot) const { uint32_t xfer = 0; xfer += oprot->writeStructBegin("ThriftHiveMetastore_create_database_args"); - xfer += oprot->writeFieldBegin("name", apache::thrift::protocol::T_STRING, 1); - xfer += oprot->writeString(this->name); - xfer += oprot->writeFieldEnd(); - xfer += oprot->writeFieldBegin("description", apache::thrift::protocol::T_STRING, 2); - xfer += oprot->writeString(this->description); + xfer += oprot->writeFieldBegin("database", apache::thrift::protocol::T_STRUCT, 1); + xfer += this->database.write(oprot); xfer += oprot->writeFieldEnd(); xfer += oprot->writeFieldStop(); xfer += oprot->writeStructEnd(); @@ -72,11 +61,8 @@ uint32_t ThriftHiveMetastore_create_database_args::write(apache::thrift::protoco uint32_t ThriftHiveMetastore_create_database_pargs::write(apache::thrift::protocol::TProtocol* oprot) const { uint32_t xfer = 0; xfer += oprot->writeStructBegin("ThriftHiveMetastore_create_database_pargs"); - xfer += oprot->writeFieldBegin("name", apache::thrift::protocol::T_STRING, 1); - xfer += oprot->writeString((*(this->name))); - xfer += oprot->writeFieldEnd(); - xfer += oprot->writeFieldBegin("description", apache::thrift::protocol::T_STRING, 2); - xfer += oprot->writeString((*(this->description))); + xfer += oprot->writeFieldBegin("database", apache::thrift::protocol::T_STRUCT, 1); + xfer += (*(this->database)).write(oprot); xfer += oprot->writeFieldEnd(); xfer += oprot->writeFieldStop(); xfer += oprot->writeStructEnd(); @@ -127,6 +113,14 @@ uint32_t ThriftHiveMetastore_create_database_result::read(apache::thrift::protoc xfer += iprot->skip(ftype); } break; + case 3: + if (ftype == apache::thrift::protocol::T_STRUCT) { + xfer += this->o3.read(iprot); + this->__isset.o3 = true; + } else { + xfer += iprot->skip(ftype); + } + break; default: xfer += iprot->skip(ftype); break; @@ -157,6 +151,10 @@ uint32_t ThriftHiveMetastore_create_database_result::write(apache::thrift::proto xfer += oprot->writeFieldBegin("o2", apache::thrift::protocol::T_STRUCT, 2); xfer += this->o2.write(oprot); xfer += oprot->writeFieldEnd(); + } else if (this->__isset.o3) { + xfer += oprot->writeFieldBegin("o3", apache::thrift::protocol::T_STRUCT, 3); + xfer += this->o3.write(oprot); + xfer += oprot->writeFieldEnd(); } xfer += oprot->writeFieldStop(); xfer += oprot->writeStructEnd(); @@ -207,6 +205,14 @@ uint32_t ThriftHiveMetastore_create_database_presult::read(apache::thrift::proto xfer += iprot->skip(ftype); } break; + case 3: + if (ftype == apache::thrift::protocol::T_STRUCT) { + xfer += this->o3.read(iprot); + this->__isset.o3 = true; + } else { + xfer += iprot->skip(ftype); + } + break; default: xfer += iprot->skip(ftype); break; @@ -507,6 +513,14 @@ uint32_t ThriftHiveMetastore_drop_database_result::read(apache::thrift::protocol xfer += iprot->skip(ftype); } break; + case 1: + if (ftype == apache::thrift::protocol::T_STRUCT) { + xfer += this->o1.read(iprot); + this->__isset.o1 = true; + } else { + xfer += iprot->skip(ftype); + } + break; case 2: if (ftype == apache::thrift::protocol::T_STRUCT) { xfer += this->o2.read(iprot); @@ -515,6 +529,14 @@ uint32_t ThriftHiveMetastore_drop_database_result::read(apache::thrift::protocol xfer += iprot->skip(ftype); } break; + case 3: + if (ftype == apache::thrift::protocol::T_STRUCT) { + xfer += this->o3.read(iprot); + this->__isset.o3 = true; + } else { + xfer += iprot->skip(ftype); + } + break; default: xfer += iprot->skip(ftype); break; @@ -537,10 +559,18 @@ uint32_t ThriftHiveMetastore_drop_database_result::write(apache::thrift::protoco xfer += oprot->writeFieldBegin("success", apache::thrift::protocol::T_BOOL, 0); xfer += oprot->writeBool(this->success); xfer += oprot->writeFieldEnd(); + } else if (this->__isset.o1) { + xfer += oprot->writeFieldBegin("o1", apache::thrift::protocol::T_STRUCT, 1); + xfer += this->o1.write(oprot); + xfer += oprot->writeFieldEnd(); } else if (this->__isset.o2) { xfer += oprot->writeFieldBegin("o2", apache::thrift::protocol::T_STRUCT, 2); xfer += this->o2.write(oprot); xfer += oprot->writeFieldEnd(); + } else if (this->__isset.o3) { + xfer += oprot->writeFieldBegin("o3", apache::thrift::protocol::T_STRUCT, 3); + xfer += this->o3.write(oprot); + xfer += oprot->writeFieldEnd(); } xfer += oprot->writeFieldStop(); xfer += oprot->writeStructEnd(); @@ -575,6 +605,14 @@ uint32_t ThriftHiveMetastore_drop_database_presult::read(apache::thrift::protoco xfer += iprot->skip(ftype); } break; + case 1: + if (ftype == apache::thrift::protocol::T_STRUCT) { + xfer += this->o1.read(iprot); + this->__isset.o1 = true; + } else { + xfer += iprot->skip(ftype); + } + break; case 2: if (ftype == apache::thrift::protocol::T_STRUCT) { xfer += this->o2.read(iprot); @@ -583,6 +621,14 @@ uint32_t ThriftHiveMetastore_drop_database_presult::read(apache::thrift::protoco xfer += iprot->skip(ftype); } break; + case 3: + if (ftype == apache::thrift::protocol::T_STRUCT) { + xfer += this->o3.read(iprot); + this->__isset.o3 = true; + } else { + xfer += iprot->skip(ftype); + } + break; default: xfer += iprot->skip(ftype); break; @@ -615,6 +661,14 @@ uint32_t ThriftHiveMetastore_get_databases_args::read(apache::thrift::protocol:: } switch (fid) { + case 1: + if (ftype == apache::thrift::protocol::T_STRING) { + xfer += iprot->readString(this->pattern); + this->__isset.pattern = true; + } else { + xfer += iprot->skip(ftype); + } + break; default: xfer += iprot->skip(ftype); break; @@ -630,6 +684,9 @@ uint32_t ThriftHiveMetastore_get_databases_args::read(apache::thrift::protocol:: uint32_t ThriftHiveMetastore_get_databases_args::write(apache::thrift::protocol::TProtocol* oprot) const { uint32_t xfer = 0; xfer += oprot->writeStructBegin("ThriftHiveMetastore_get_databases_args"); + xfer += oprot->writeFieldBegin("pattern", apache::thrift::protocol::T_STRING, 1); + xfer += oprot->writeString(this->pattern); + xfer += oprot->writeFieldEnd(); xfer += oprot->writeFieldStop(); xfer += oprot->writeStructEnd(); return xfer; @@ -638,6 +695,9 @@ uint32_t ThriftHiveMetastore_get_databases_args::write(apache::thrift::protocol: uint32_t ThriftHiveMetastore_get_databases_pargs::write(apache::thrift::protocol::TProtocol* oprot) const { uint32_t xfer = 0; xfer += oprot->writeStructBegin("ThriftHiveMetastore_get_databases_pargs"); + xfer += oprot->writeFieldBegin("pattern", apache::thrift::protocol::T_STRING, 1); + xfer += oprot->writeString((*(this->pattern))); + xfer += oprot->writeFieldEnd(); xfer += oprot->writeFieldStop(); xfer += oprot->writeStructEnd(); return xfer; @@ -883,6 +943,14 @@ uint32_t ThriftHiveMetastore_get_type_result::read(apache::thrift::protocol::TPr break; case 1: if (ftype == apache::thrift::protocol::T_STRUCT) { + xfer += this->o1.read(iprot); + this->__isset.o1 = true; + } else { + xfer += iprot->skip(ftype); + } + break; + case 2: + if (ftype == apache::thrift::protocol::T_STRUCT) { xfer += this->o2.read(iprot); this->__isset.o2 = true; } else { @@ -911,8 +979,12 @@ uint32_t ThriftHiveMetastore_get_type_result::write(apache::thrift::protocol::TP xfer += oprot->writeFieldBegin("success", apache::thrift::protocol::T_STRUCT, 0); xfer += this->success.write(oprot); xfer += oprot->writeFieldEnd(); + } else if (this->__isset.o1) { + xfer += oprot->writeFieldBegin("o1", apache::thrift::protocol::T_STRUCT, 1); + xfer += this->o1.write(oprot); + xfer += oprot->writeFieldEnd(); } else if (this->__isset.o2) { - xfer += oprot->writeFieldBegin("o2", apache::thrift::protocol::T_STRUCT, 1); + xfer += oprot->writeFieldBegin("o2", apache::thrift::protocol::T_STRUCT, 2); xfer += this->o2.write(oprot); xfer += oprot->writeFieldEnd(); } @@ -951,6 +1023,14 @@ uint32_t ThriftHiveMetastore_get_type_presult::read(apache::thrift::protocol::TP break; case 1: if (ftype == apache::thrift::protocol::T_STRUCT) { + xfer += this->o1.read(iprot); + this->__isset.o1 = true; + } else { + xfer += iprot->skip(ftype); + } + break; + case 2: + if (ftype == apache::thrift::protocol::T_STRUCT) { xfer += this->o2.read(iprot); this->__isset.o2 = true; } else { @@ -1279,6 +1359,14 @@ uint32_t ThriftHiveMetastore_drop_type_result::read(apache::thrift::protocol::TP break; case 1: if (ftype == apache::thrift::protocol::T_STRUCT) { + xfer += this->o1.read(iprot); + this->__isset.o1 = true; + } else { + xfer += iprot->skip(ftype); + } + break; + case 2: + if (ftype == apache::thrift::protocol::T_STRUCT) { xfer += this->o2.read(iprot); this->__isset.o2 = true; } else { @@ -1307,8 +1395,12 @@ uint32_t ThriftHiveMetastore_drop_type_result::write(apache::thrift::protocol::T xfer += oprot->writeFieldBegin("success", apache::thrift::protocol::T_BOOL, 0); xfer += oprot->writeBool(this->success); xfer += oprot->writeFieldEnd(); + } else if (this->__isset.o1) { + xfer += oprot->writeFieldBegin("o1", apache::thrift::protocol::T_STRUCT, 1); + xfer += this->o1.write(oprot); + xfer += oprot->writeFieldEnd(); } else if (this->__isset.o2) { - xfer += oprot->writeFieldBegin("o2", apache::thrift::protocol::T_STRUCT, 1); + xfer += oprot->writeFieldBegin("o2", apache::thrift::protocol::T_STRUCT, 2); xfer += this->o2.write(oprot); xfer += oprot->writeFieldEnd(); } @@ -1347,6 +1439,14 @@ uint32_t ThriftHiveMetastore_drop_type_presult::read(apache::thrift::protocol::T break; case 1: if (ftype == apache::thrift::protocol::T_STRUCT) { + xfer += this->o1.read(iprot); + this->__isset.o1 = true; + } else { + xfer += iprot->skip(ftype); + } + break; + case 2: + if (ftype == apache::thrift::protocol::T_STRUCT) { xfer += this->o2.read(iprot); this->__isset.o2 = true; } else { @@ -7977,20 +8077,19 @@ uint32_t ThriftHiveMetastore_get_index_names_presult::read(apache::thrift::proto return xfer; } -bool ThriftHiveMetastoreClient::create_database(const std::string& name, const std::string& description) +bool ThriftHiveMetastoreClient::create_database(const Database& database) { - send_create_database(name, description); + send_create_database(database); return recv_create_database(); } -void ThriftHiveMetastoreClient::send_create_database(const std::string& name, const std::string& description) +void ThriftHiveMetastoreClient::send_create_database(const Database& database) { int32_t cseqid = 0; oprot_->writeMessageBegin("create_database", apache::thrift::protocol::T_CALL, cseqid); ThriftHiveMetastore_create_database_pargs args; - args.name = &name; - args.description = &description; + args.database = &database; args.write(oprot_); oprot_->writeMessageEnd(); @@ -8041,6 +8140,9 @@ bool ThriftHiveMetastoreClient::recv_create_database() if (result.__isset.o2) { throw result.o2; } + if (result.__isset.o3) { + throw result.o3; + } throw apache::thrift::TApplicationException(apache::thrift::TApplicationException::MISSING_RESULT, "create_database failed: unknown result"); } @@ -8167,24 +8269,31 @@ bool ThriftHiveMetastoreClient::recv_drop_database() if (result.__isset.success) { return _return; } + if (result.__isset.o1) { + throw result.o1; + } if (result.__isset.o2) { throw result.o2; } + if (result.__isset.o3) { + throw result.o3; + } throw apache::thrift::TApplicationException(apache::thrift::TApplicationException::MISSING_RESULT, "drop_database failed: unknown result"); } -void ThriftHiveMetastoreClient::get_databases(std::vector & _return) +void ThriftHiveMetastoreClient::get_databases(std::vector & _return, const std::string& pattern) { - send_get_databases(); + send_get_databases(pattern); recv_get_databases(_return); } -void ThriftHiveMetastoreClient::send_get_databases() +void ThriftHiveMetastoreClient::send_get_databases(const std::string& pattern) { int32_t cseqid = 0; oprot_->writeMessageBegin("get_databases", apache::thrift::protocol::T_CALL, cseqid); ThriftHiveMetastore_get_databases_pargs args; + args.pattern = &pattern; args.write(oprot_); oprot_->writeMessageEnd(); @@ -8292,6 +8401,9 @@ void ThriftHiveMetastoreClient::recv_get_type(Type& _return) // _return pointer has now been filled return; } + if (result.__isset.o1) { + throw result.o1; + } if (result.__isset.o2) { throw result.o2; } @@ -8424,6 +8536,9 @@ bool ThriftHiveMetastoreClient::recv_drop_type() if (result.__isset.success) { return _return; } + if (result.__isset.o1) { + throw result.o1; + } if (result.__isset.o2) { throw result.o2; } @@ -10345,14 +10460,17 @@ void ThriftHiveMetastoreProcessor::process_create_database(int32_t seqid, apache ThriftHiveMetastore_create_database_result result; try { - result.success = iface_->create_database(args.name, args.description); + result.success = iface_->create_database(args.database); result.__isset.success = true; } catch (AlreadyExistsException &o1) { result.o1 = o1; result.__isset.o1 = true; - } catch (MetaException &o2) { + } catch (InvalidObjectException &o2) { result.o2 = o2; result.__isset.o2 = true; + } catch (MetaException &o3) { + result.o3 = o3; + result.__isset.o3 = true; } catch (const std::exception& e) { apache::thrift::TApplicationException x(e.what()); oprot->writeMessageBegin("create_database", apache::thrift::protocol::T_EXCEPTION, seqid); @@ -10415,9 +10533,15 @@ void ThriftHiveMetastoreProcessor::process_drop_database(int32_t seqid, apache:: try { result.success = iface_->drop_database(args.name); result.__isset.success = true; - } catch (MetaException &o2) { + } catch (NoSuchObjectException &o1) { + result.o1 = o1; + result.__isset.o1 = true; + } catch (InvalidOperationException &o2) { result.o2 = o2; result.__isset.o2 = true; + } catch (MetaException &o3) { + result.o3 = o3; + result.__isset.o3 = true; } catch (const std::exception& e) { apache::thrift::TApplicationException x(e.what()); oprot->writeMessageBegin("drop_database", apache::thrift::protocol::T_EXCEPTION, seqid); @@ -10444,7 +10568,7 @@ void ThriftHiveMetastoreProcessor::process_get_databases(int32_t seqid, apache:: ThriftHiveMetastore_get_databases_result result; try { - iface_->get_databases(result.success); + iface_->get_databases(result.success, args.pattern); result.__isset.success = true; } catch (MetaException &o1) { result.o1 = o1; @@ -10477,7 +10601,10 @@ void ThriftHiveMetastoreProcessor::process_get_type(int32_t seqid, apache::thrif try { iface_->get_type(result.success, args.name); result.__isset.success = true; - } catch (MetaException &o2) { + } catch (MetaException &o1) { + result.o1 = o1; + result.__isset.o1 = true; + } catch (NoSuchObjectException &o2) { result.o2 = o2; result.__isset.o2 = true; } catch (const std::exception& e) { @@ -10545,7 +10672,10 @@ void ThriftHiveMetastoreProcessor::process_drop_type(int32_t seqid, apache::thri try { result.success = iface_->drop_type(args.type); result.__isset.success = true; - } catch (MetaException &o2) { + } catch (MetaException &o1) { + result.o1 = o1; + result.__isset.o1 = true; + } catch (NoSuchObjectException &o2) { result.o2 = o2; result.__isset.o2 = true; } catch (const std::exception& e) { diff --git metastore/src/gen-cpp/ThriftHiveMetastore.h metastore/src/gen-cpp/ThriftHiveMetastore.h index e2538fb..97f12cd 100644 --- metastore/src/gen-cpp/ThriftHiveMetastore.h +++ metastore/src/gen-cpp/ThriftHiveMetastore.h @@ -15,10 +15,10 @@ namespace Apache { namespace Hadoop { namespace Hive { class ThriftHiveMetastoreIf : virtual public facebook::fb303::FacebookServiceIf { public: virtual ~ThriftHiveMetastoreIf() {} - virtual bool create_database(const std::string& name, const std::string& description) = 0; + virtual bool create_database(const Database& database) = 0; virtual void get_database(Database& _return, const std::string& name) = 0; virtual bool drop_database(const std::string& name) = 0; - virtual void get_databases(std::vector & _return) = 0; + virtual void get_databases(std::vector & _return, const std::string& pattern) = 0; virtual void get_type(Type& _return, const std::string& name) = 0; virtual bool create_type(const Type& type) = 0; virtual bool drop_type(const std::string& type) = 0; @@ -55,7 +55,7 @@ class ThriftHiveMetastoreIf : virtual public facebook::fb303::FacebookServiceIf class ThriftHiveMetastoreNull : virtual public ThriftHiveMetastoreIf , virtual public facebook::fb303::FacebookServiceNull { public: virtual ~ThriftHiveMetastoreNull() {} - bool create_database(const std::string& /* name */, const std::string& /* description */) { + bool create_database(const Database& /* database */) { bool _return = false; return _return; } @@ -66,7 +66,7 @@ class ThriftHiveMetastoreNull : virtual public ThriftHiveMetastoreIf , virtual p bool _return = false; return _return; } - void get_databases(std::vector & /* _return */) { + void get_databases(std::vector & /* _return */, const std::string& /* pattern */) { return; } void get_type(Type& /* _return */, const std::string& /* name */) { @@ -172,25 +172,21 @@ class ThriftHiveMetastoreNull : virtual public ThriftHiveMetastoreIf , virtual p class ThriftHiveMetastore_create_database_args { public: - ThriftHiveMetastore_create_database_args() : name(""), description("") { + ThriftHiveMetastore_create_database_args() { } virtual ~ThriftHiveMetastore_create_database_args() throw() {} - std::string name; - std::string description; + Database database; struct __isset { - __isset() : name(false), description(false) {} - bool name; - bool description; + __isset() : database(false) {} + bool database; } __isset; bool operator == (const ThriftHiveMetastore_create_database_args & rhs) const { - if (!(name == rhs.name)) - return false; - if (!(description == rhs.description)) + if (!(database == rhs.database)) return false; return true; } @@ -211,8 +207,7 @@ class ThriftHiveMetastore_create_database_pargs { virtual ~ThriftHiveMetastore_create_database_pargs() throw() {} - const std::string* name; - const std::string* description; + const Database* database; uint32_t write(apache::thrift::protocol::TProtocol* oprot) const; @@ -228,13 +223,15 @@ class ThriftHiveMetastore_create_database_result { bool success; AlreadyExistsException o1; - MetaException o2; + InvalidObjectException o2; + MetaException o3; struct __isset { - __isset() : success(false), o1(false), o2(false) {} + __isset() : success(false), o1(false), o2(false), o3(false) {} bool success; bool o1; bool o2; + bool o3; } __isset; bool operator == (const ThriftHiveMetastore_create_database_result & rhs) const @@ -245,6 +242,8 @@ class ThriftHiveMetastore_create_database_result { return false; if (!(o2 == rhs.o2)) return false; + if (!(o3 == rhs.o3)) + return false; return true; } bool operator != (const ThriftHiveMetastore_create_database_result &rhs) const { @@ -266,13 +265,15 @@ class ThriftHiveMetastore_create_database_presult { bool* success; AlreadyExistsException o1; - MetaException o2; + InvalidObjectException o2; + MetaException o3; struct __isset { - __isset() : success(false), o1(false), o2(false) {} + __isset() : success(false), o1(false), o2(false), o3(false) {} bool success; bool o1; bool o2; + bool o3; } __isset; uint32_t read(apache::thrift::protocol::TProtocol* iprot); @@ -437,20 +438,28 @@ class ThriftHiveMetastore_drop_database_result { virtual ~ThriftHiveMetastore_drop_database_result() throw() {} bool success; - MetaException o2; + NoSuchObjectException o1; + InvalidOperationException o2; + MetaException o3; struct __isset { - __isset() : success(false), o2(false) {} + __isset() : success(false), o1(false), o2(false), o3(false) {} bool success; + bool o1; bool o2; + bool o3; } __isset; bool operator == (const ThriftHiveMetastore_drop_database_result & rhs) const { if (!(success == rhs.success)) return false; + if (!(o1 == rhs.o1)) + return false; if (!(o2 == rhs.o2)) return false; + if (!(o3 == rhs.o3)) + return false; return true; } bool operator != (const ThriftHiveMetastore_drop_database_result &rhs) const { @@ -471,12 +480,16 @@ class ThriftHiveMetastore_drop_database_presult { virtual ~ThriftHiveMetastore_drop_database_presult() throw() {} bool* success; - MetaException o2; + NoSuchObjectException o1; + InvalidOperationException o2; + MetaException o3; struct __isset { - __isset() : success(false), o2(false) {} + __isset() : success(false), o1(false), o2(false), o3(false) {} bool success; + bool o1; bool o2; + bool o3; } __isset; uint32_t read(apache::thrift::protocol::TProtocol* iprot); @@ -486,14 +499,22 @@ class ThriftHiveMetastore_drop_database_presult { class ThriftHiveMetastore_get_databases_args { public: - ThriftHiveMetastore_get_databases_args() { + ThriftHiveMetastore_get_databases_args() : pattern("") { } virtual ~ThriftHiveMetastore_get_databases_args() throw() {} + std::string pattern; + + struct __isset { + __isset() : pattern(false) {} + bool pattern; + } __isset; - bool operator == (const ThriftHiveMetastore_get_databases_args & /* rhs */) const + bool operator == (const ThriftHiveMetastore_get_databases_args & rhs) const { + if (!(pattern == rhs.pattern)) + return false; return true; } bool operator != (const ThriftHiveMetastore_get_databases_args &rhs) const { @@ -513,6 +534,7 @@ class ThriftHiveMetastore_get_databases_pargs { virtual ~ThriftHiveMetastore_get_databases_pargs() throw() {} + const std::string* pattern; uint32_t write(apache::thrift::protocol::TProtocol* oprot) const; @@ -626,11 +648,13 @@ class ThriftHiveMetastore_get_type_result { virtual ~ThriftHiveMetastore_get_type_result() throw() {} Type success; - MetaException o2; + MetaException o1; + NoSuchObjectException o2; struct __isset { - __isset() : success(false), o2(false) {} + __isset() : success(false), o1(false), o2(false) {} bool success; + bool o1; bool o2; } __isset; @@ -638,6 +662,8 @@ class ThriftHiveMetastore_get_type_result { { if (!(success == rhs.success)) return false; + if (!(o1 == rhs.o1)) + return false; if (!(o2 == rhs.o2)) return false; return true; @@ -660,11 +686,13 @@ class ThriftHiveMetastore_get_type_presult { virtual ~ThriftHiveMetastore_get_type_presult() throw() {} Type* success; - MetaException o2; + MetaException o1; + NoSuchObjectException o2; struct __isset { - __isset() : success(false), o2(false) {} + __isset() : success(false), o1(false), o2(false) {} bool success; + bool o1; bool o2; } __isset; @@ -836,11 +864,13 @@ class ThriftHiveMetastore_drop_type_result { virtual ~ThriftHiveMetastore_drop_type_result() throw() {} bool success; - MetaException o2; + MetaException o1; + NoSuchObjectException o2; struct __isset { - __isset() : success(false), o2(false) {} + __isset() : success(false), o1(false), o2(false) {} bool success; + bool o1; bool o2; } __isset; @@ -848,6 +878,8 @@ class ThriftHiveMetastore_drop_type_result { { if (!(success == rhs.success)) return false; + if (!(o1 == rhs.o1)) + return false; if (!(o2 == rhs.o2)) return false; return true; @@ -870,11 +902,13 @@ class ThriftHiveMetastore_drop_type_presult { virtual ~ThriftHiveMetastore_drop_type_presult() throw() {} bool* success; - MetaException o2; + MetaException o1; + NoSuchObjectException o2; struct __isset { - __isset() : success(false), o2(false) {} + __isset() : success(false), o1(false), o2(false) {} bool success; + bool o1; bool o2; } __isset; @@ -4029,8 +4063,8 @@ class ThriftHiveMetastoreClient : virtual public ThriftHiveMetastoreIf, public f boost::shared_ptr getOutputProtocol() { return poprot_; } - bool create_database(const std::string& name, const std::string& description); - void send_create_database(const std::string& name, const std::string& description); + bool create_database(const Database& database); + void send_create_database(const Database& database); bool recv_create_database(); void get_database(Database& _return, const std::string& name); void send_get_database(const std::string& name); @@ -4038,8 +4072,8 @@ class ThriftHiveMetastoreClient : virtual public ThriftHiveMetastoreIf, public f bool drop_database(const std::string& name); void send_drop_database(const std::string& name); bool recv_drop_database(); - void get_databases(std::vector & _return); - void send_get_databases(); + void get_databases(std::vector & _return, const std::string& pattern); + void send_get_databases(const std::string& pattern); void recv_get_databases(std::vector & _return); void get_type(Type& _return, const std::string& name); void send_get_type(const std::string& name); @@ -4239,13 +4273,13 @@ class ThriftHiveMetastoreMultiface : virtual public ThriftHiveMetastoreIf, publi ifaces_.push_back(iface); } public: - bool create_database(const std::string& name, const std::string& description) { + bool create_database(const Database& database) { uint32_t sz = ifaces_.size(); for (uint32_t i = 0; i < sz; ++i) { if (i == sz - 1) { - return ifaces_[i]->create_database(name, description); + return ifaces_[i]->create_database(database); } else { - ifaces_[i]->create_database(name, description); + ifaces_[i]->create_database(database); } } } @@ -4273,14 +4307,14 @@ class ThriftHiveMetastoreMultiface : virtual public ThriftHiveMetastoreIf, publi } } - void get_databases(std::vector & _return) { + void get_databases(std::vector & _return, const std::string& pattern) { uint32_t sz = ifaces_.size(); for (uint32_t i = 0; i < sz; ++i) { if (i == sz - 1) { - ifaces_[i]->get_databases(_return); + ifaces_[i]->get_databases(_return, pattern); return; } else { - ifaces_[i]->get_databases(_return); + ifaces_[i]->get_databases(_return, pattern); } } } diff --git metastore/src/gen-cpp/ThriftHiveMetastore_server.skeleton.cpp metastore/src/gen-cpp/ThriftHiveMetastore_server.skeleton.cpp index ed2bb99..1771c63 100644 --- metastore/src/gen-cpp/ThriftHiveMetastore_server.skeleton.cpp +++ metastore/src/gen-cpp/ThriftHiveMetastore_server.skeleton.cpp @@ -22,7 +22,7 @@ class ThriftHiveMetastoreHandler : virtual public ThriftHiveMetastoreIf { // Your initialization goes here } - bool create_database(const std::string& name, const std::string& description) { + bool create_database(const Database& database) { // Your implementation goes here printf("create_database\n"); } @@ -37,7 +37,7 @@ class ThriftHiveMetastoreHandler : virtual public ThriftHiveMetastoreIf { printf("drop_database\n"); } - void get_databases(std::vector & _return) { + void get_databases(std::vector & _return, const std::string& pattern) { // Your implementation goes here printf("get_databases\n"); } diff --git metastore/src/gen-cpp/hive_metastore_types.cpp metastore/src/gen-cpp/hive_metastore_types.cpp index b5a403d..4005d3a 100644 --- metastore/src/gen-cpp/hive_metastore_types.cpp +++ metastore/src/gen-cpp/hive_metastore_types.cpp @@ -261,8 +261,8 @@ uint32_t Type::write(apache::thrift::protocol::TProtocol* oprot) const { return xfer; } -const char* Database::ascii_fingerprint = "07A9615F837F7D0A952B595DD3020972"; -const uint8_t Database::binary_fingerprint[16] = {0x07,0xA9,0x61,0x5F,0x83,0x7F,0x7D,0x0A,0x95,0x2B,0x59,0x5D,0xD3,0x02,0x09,0x72}; +const char* Database::ascii_fingerprint = "AB879940BD15B6B25691265F7384B271"; +const uint8_t Database::binary_fingerprint[16] = {0xAB,0x87,0x99,0x40,0xBD,0x15,0xB6,0xB2,0x56,0x91,0x26,0x5F,0x73,0x84,0xB2,0x71}; uint32_t Database::read(apache::thrift::protocol::TProtocol* iprot) { @@ -294,8 +294,16 @@ uint32_t Database::read(apache::thrift::protocol::TProtocol* iprot) { break; case 2: if (ftype == apache::thrift::protocol::T_STRING) { - xfer += iprot->readString(this->description); - this->__isset.description = true; + xfer += iprot->readString(this->comment); + this->__isset.comment = true; + } else { + xfer += iprot->skip(ftype); + } + break; + case 3: + if (ftype == apache::thrift::protocol::T_STRING) { + xfer += iprot->readString(this->locationUri); + this->__isset.locationUri = true; } else { xfer += iprot->skip(ftype); } @@ -318,8 +326,11 @@ uint32_t Database::write(apache::thrift::protocol::TProtocol* oprot) const { xfer += oprot->writeFieldBegin("name", apache::thrift::protocol::T_STRING, 1); xfer += oprot->writeString(this->name); xfer += oprot->writeFieldEnd(); - xfer += oprot->writeFieldBegin("description", apache::thrift::protocol::T_STRING, 2); - xfer += oprot->writeString(this->description); + xfer += oprot->writeFieldBegin("comment", apache::thrift::protocol::T_STRING, 2); + xfer += oprot->writeString(this->comment); + xfer += oprot->writeFieldEnd(); + xfer += oprot->writeFieldBegin("locationUri", apache::thrift::protocol::T_STRING, 3); + xfer += oprot->writeString(this->locationUri); xfer += oprot->writeFieldEnd(); xfer += oprot->writeFieldStop(); xfer += oprot->writeStructEnd(); diff --git metastore/src/gen-cpp/hive_metastore_types.h metastore/src/gen-cpp/hive_metastore_types.h index 1b0c706..4d842c1 100644 --- metastore/src/gen-cpp/hive_metastore_types.h +++ metastore/src/gen-cpp/hive_metastore_types.h @@ -153,28 +153,32 @@ class Type { class Database { public: - static const char* ascii_fingerprint; // = "07A9615F837F7D0A952B595DD3020972"; - static const uint8_t binary_fingerprint[16]; // = {0x07,0xA9,0x61,0x5F,0x83,0x7F,0x7D,0x0A,0x95,0x2B,0x59,0x5D,0xD3,0x02,0x09,0x72}; + static const char* ascii_fingerprint; // = "AB879940BD15B6B25691265F7384B271"; + static const uint8_t binary_fingerprint[16]; // = {0xAB,0x87,0x99,0x40,0xBD,0x15,0xB6,0xB2,0x56,0x91,0x26,0x5F,0x73,0x84,0xB2,0x71}; - Database() : name(""), description("") { + Database() : name(""), comment(""), locationUri("") { } virtual ~Database() throw() {} std::string name; - std::string description; + std::string comment; + std::string locationUri; struct __isset { - __isset() : name(false), description(false) {} + __isset() : name(false), comment(false), locationUri(false) {} bool name; - bool description; + bool comment; + bool locationUri; } __isset; bool operator == (const Database & rhs) const { if (!(name == rhs.name)) return false; - if (!(description == rhs.description)) + if (!(comment == rhs.comment)) + return false; + if (!(locationUri == rhs.locationUri)) return false; return true; } diff --git metastore/src/gen-javabean/org/apache/hadoop/hive/metastore/api/Database.java metastore/src/gen-javabean/org/apache/hadoop/hive/metastore/api/Database.java index 78c78d9..8b9afad 100644 --- metastore/src/gen-javabean/org/apache/hadoop/hive/metastore/api/Database.java +++ metastore/src/gen-javabean/org/apache/hadoop/hive/metastore/api/Database.java @@ -21,12 +21,15 @@ import org.apache.thrift.protocol.*; public class Database implements TBase, java.io.Serializable, Cloneable { private static final TStruct STRUCT_DESC = new TStruct("Database"); private static final TField NAME_FIELD_DESC = new TField("name", TType.STRING, (short)1); - private static final TField DESCRIPTION_FIELD_DESC = new TField("description", TType.STRING, (short)2); + private static final TField COMMENT_FIELD_DESC = new TField("comment", TType.STRING, (short)2); + private static final TField LOCATION_URI_FIELD_DESC = new TField("locationUri", TType.STRING, (short)3); private String name; public static final int NAME = 1; - private String description; - public static final int DESCRIPTION = 2; + private String comment; + public static final int COMMENT = 2; + private String locationUri; + public static final int LOCATIONURI = 3; private final Isset __isset = new Isset(); private static final class Isset implements java.io.Serializable { @@ -35,7 +38,9 @@ public class Database implements TBase, java.io.Serializable, Cloneable { public static final Map metaDataMap = Collections.unmodifiableMap(new HashMap() {{ put(NAME, new FieldMetaData("name", TFieldRequirementType.DEFAULT, new FieldValueMetaData(TType.STRING))); - put(DESCRIPTION, new FieldMetaData("description", TFieldRequirementType.DEFAULT, + put(COMMENT, new FieldMetaData("comment", TFieldRequirementType.DEFAULT, + new FieldValueMetaData(TType.STRING))); + put(LOCATIONURI, new FieldMetaData("locationUri", TFieldRequirementType.DEFAULT, new FieldValueMetaData(TType.STRING))); }}); @@ -48,11 +53,13 @@ public class Database implements TBase, java.io.Serializable, Cloneable { public Database( String name, - String description) + String comment, + String locationUri) { this(); this.name = name; - this.description = description; + this.comment = comment; + this.locationUri = locationUri; } /** @@ -62,8 +69,11 @@ public class Database implements TBase, java.io.Serializable, Cloneable { if (other.isSetName()) { this.name = other.name; } - if (other.isSetDescription()) { - this.description = other.description; + if (other.isSetComment()) { + this.comment = other.comment; + } + if (other.isSetLocationUri()) { + this.locationUri = other.locationUri; } } @@ -89,21 +99,38 @@ public class Database implements TBase, java.io.Serializable, Cloneable { return this.name != null; } - public String getDescription() { - return this.description; + public String getComment() { + return this.comment; + } + + public void setComment(String comment) { + this.comment = comment; + } + + public void unsetComment() { + this.comment = null; + } + + // Returns true if field comment is set (has been asigned a value) and false otherwise + public boolean isSetComment() { + return this.comment != null; } - public void setDescription(String description) { - this.description = description; + public String getLocationUri() { + return this.locationUri; } - public void unsetDescription() { - this.description = null; + public void setLocationUri(String locationUri) { + this.locationUri = locationUri; } - // Returns true if field description is set (has been asigned a value) and false otherwise - public boolean isSetDescription() { - return this.description != null; + public void unsetLocationUri() { + this.locationUri = null; + } + + // Returns true if field locationUri is set (has been asigned a value) and false otherwise + public boolean isSetLocationUri() { + return this.locationUri != null; } public void setFieldValue(int fieldID, Object value) { @@ -116,11 +143,19 @@ public class Database implements TBase, java.io.Serializable, Cloneable { } break; - case DESCRIPTION: + case COMMENT: + if (value == null) { + unsetComment(); + } else { + setComment((String)value); + } + break; + + case LOCATIONURI: if (value == null) { - unsetDescription(); + unsetLocationUri(); } else { - setDescription((String)value); + setLocationUri((String)value); } break; @@ -134,8 +169,11 @@ public class Database implements TBase, java.io.Serializable, Cloneable { case NAME: return getName(); - case DESCRIPTION: - return getDescription(); + case COMMENT: + return getComment(); + + case LOCATIONURI: + return getLocationUri(); default: throw new IllegalArgumentException("Field " + fieldID + " doesn't exist!"); @@ -147,8 +185,10 @@ public class Database implements TBase, java.io.Serializable, Cloneable { switch (fieldID) { case NAME: return isSetName(); - case DESCRIPTION: - return isSetDescription(); + case COMMENT: + return isSetComment(); + case LOCATIONURI: + return isSetLocationUri(); default: throw new IllegalArgumentException("Field " + fieldID + " doesn't exist!"); } @@ -176,12 +216,21 @@ public class Database implements TBase, java.io.Serializable, Cloneable { return false; } - boolean this_present_description = true && this.isSetDescription(); - boolean that_present_description = true && that.isSetDescription(); - if (this_present_description || that_present_description) { - if (!(this_present_description && that_present_description)) + boolean this_present_comment = true && this.isSetComment(); + boolean that_present_comment = true && that.isSetComment(); + if (this_present_comment || that_present_comment) { + if (!(this_present_comment && that_present_comment)) return false; - if (!this.description.equals(that.description)) + if (!this.comment.equals(that.comment)) + return false; + } + + boolean this_present_locationUri = true && this.isSetLocationUri(); + boolean that_present_locationUri = true && that.isSetLocationUri(); + if (this_present_locationUri || that_present_locationUri) { + if (!(this_present_locationUri && that_present_locationUri)) + return false; + if (!this.locationUri.equals(that.locationUri)) return false; } @@ -211,9 +260,16 @@ public class Database implements TBase, java.io.Serializable, Cloneable { TProtocolUtil.skip(iprot, field.type); } break; - case DESCRIPTION: + case COMMENT: + if (field.type == TType.STRING) { + this.comment = iprot.readString(); + } else { + TProtocolUtil.skip(iprot, field.type); + } + break; + case LOCATIONURI: if (field.type == TType.STRING) { - this.description = iprot.readString(); + this.locationUri = iprot.readString(); } else { TProtocolUtil.skip(iprot, field.type); } @@ -238,9 +294,14 @@ public class Database implements TBase, java.io.Serializable, Cloneable { oprot.writeString(this.name); oprot.writeFieldEnd(); } - if (this.description != null) { - oprot.writeFieldBegin(DESCRIPTION_FIELD_DESC); - oprot.writeString(this.description); + if (this.comment != null) { + oprot.writeFieldBegin(COMMENT_FIELD_DESC); + oprot.writeString(this.comment); + oprot.writeFieldEnd(); + } + if (this.locationUri != null) { + oprot.writeFieldBegin(LOCATION_URI_FIELD_DESC); + oprot.writeString(this.locationUri); oprot.writeFieldEnd(); } oprot.writeFieldStop(); @@ -260,11 +321,19 @@ public class Database implements TBase, java.io.Serializable, Cloneable { } first = false; if (!first) sb.append(", "); - sb.append("description:"); - if (this.description == null) { + sb.append("comment:"); + if (this.comment == null) { + sb.append("null"); + } else { + sb.append(this.comment); + } + first = false; + if (!first) sb.append(", "); + sb.append("locationUri:"); + if (this.locationUri == null) { sb.append("null"); } else { - sb.append(this.description); + sb.append(this.locationUri); } first = false; sb.append(")"); diff --git metastore/src/gen-javabean/org/apache/hadoop/hive/metastore/api/ThriftHiveMetastore.java metastore/src/gen-javabean/org/apache/hadoop/hive/metastore/api/ThriftHiveMetastore.java index 25408d9..a5e1c49 100644 --- metastore/src/gen-javabean/org/apache/hadoop/hive/metastore/api/ThriftHiveMetastore.java +++ metastore/src/gen-javabean/org/apache/hadoop/hive/metastore/api/ThriftHiveMetastore.java @@ -25,19 +25,19 @@ public class ThriftHiveMetastore { */ public interface Iface extends com.facebook.fb303.FacebookService.Iface { - public boolean create_database(String name, String description) throws AlreadyExistsException, MetaException, TException; + public boolean create_database(Database database) throws AlreadyExistsException, InvalidObjectException, MetaException, TException; public Database get_database(String name) throws NoSuchObjectException, MetaException, TException; - public boolean drop_database(String name) throws MetaException, TException; + public boolean drop_database(String name) throws NoSuchObjectException, InvalidOperationException, MetaException, TException; - public List get_databases() throws MetaException, TException; + public List get_databases(String pattern) throws MetaException, TException; - public Type get_type(String name) throws MetaException, TException; + public Type get_type(String name) throws MetaException, NoSuchObjectException, TException; public boolean create_type(Type type) throws AlreadyExistsException, InvalidObjectException, MetaException, TException; - public boolean drop_type(String type) throws MetaException, TException; + public boolean drop_type(String type) throws MetaException, NoSuchObjectException, TException; public Map get_type_all(String name) throws MetaException, TException; @@ -108,24 +108,23 @@ public class ThriftHiveMetastore { super(iprot, oprot); } - public boolean create_database(String name, String description) throws AlreadyExistsException, MetaException, TException + public boolean create_database(Database database) throws AlreadyExistsException, InvalidObjectException, MetaException, TException { - send_create_database(name, description); + send_create_database(database); return recv_create_database(); } - public void send_create_database(String name, String description) throws TException + public void send_create_database(Database database) throws TException { oprot_.writeMessageBegin(new TMessage("create_database", TMessageType.CALL, seqid_)); create_database_args args = new create_database_args(); - args.name = name; - args.description = description; + args.database = database; args.write(oprot_); oprot_.writeMessageEnd(); oprot_.getTransport().flush(); } - public boolean recv_create_database() throws AlreadyExistsException, MetaException, TException + public boolean recv_create_database() throws AlreadyExistsException, InvalidObjectException, MetaException, TException { TMessage msg = iprot_.readMessageBegin(); if (msg.type == TMessageType.EXCEPTION) { @@ -145,6 +144,9 @@ public class ThriftHiveMetastore { if (result.o2 != null) { throw result.o2; } + if (result.o3 != null) { + throw result.o3; + } throw new TApplicationException(TApplicationException.MISSING_RESULT, "create_database failed: unknown result"); } @@ -187,7 +189,7 @@ public class ThriftHiveMetastore { throw new TApplicationException(TApplicationException.MISSING_RESULT, "get_database failed: unknown result"); } - public boolean drop_database(String name) throws MetaException, TException + public boolean drop_database(String name) throws NoSuchObjectException, InvalidOperationException, MetaException, TException { send_drop_database(name); return recv_drop_database(); @@ -203,7 +205,7 @@ public class ThriftHiveMetastore { oprot_.getTransport().flush(); } - public boolean recv_drop_database() throws MetaException, TException + public boolean recv_drop_database() throws NoSuchObjectException, InvalidOperationException, MetaException, TException { TMessage msg = iprot_.readMessageBegin(); if (msg.type == TMessageType.EXCEPTION) { @@ -217,22 +219,29 @@ public class ThriftHiveMetastore { if (result.isSetSuccess()) { return result.success; } + if (result.o1 != null) { + throw result.o1; + } if (result.o2 != null) { throw result.o2; } + if (result.o3 != null) { + throw result.o3; + } throw new TApplicationException(TApplicationException.MISSING_RESULT, "drop_database failed: unknown result"); } - public List get_databases() throws MetaException, TException + public List get_databases(String pattern) throws MetaException, TException { - send_get_databases(); + send_get_databases(pattern); return recv_get_databases(); } - public void send_get_databases() throws TException + public void send_get_databases(String pattern) throws TException { oprot_.writeMessageBegin(new TMessage("get_databases", TMessageType.CALL, seqid_)); get_databases_args args = new get_databases_args(); + args.pattern = pattern; args.write(oprot_); oprot_.writeMessageEnd(); oprot_.getTransport().flush(); @@ -258,7 +267,7 @@ public class ThriftHiveMetastore { throw new TApplicationException(TApplicationException.MISSING_RESULT, "get_databases failed: unknown result"); } - public Type get_type(String name) throws MetaException, TException + public Type get_type(String name) throws MetaException, NoSuchObjectException, TException { send_get_type(name); return recv_get_type(); @@ -274,7 +283,7 @@ public class ThriftHiveMetastore { oprot_.getTransport().flush(); } - public Type recv_get_type() throws MetaException, TException + public Type recv_get_type() throws MetaException, NoSuchObjectException, TException { TMessage msg = iprot_.readMessageBegin(); if (msg.type == TMessageType.EXCEPTION) { @@ -288,6 +297,9 @@ public class ThriftHiveMetastore { if (result.isSetSuccess()) { return result.success; } + if (result.o1 != null) { + throw result.o1; + } if (result.o2 != null) { throw result.o2; } @@ -336,7 +348,7 @@ public class ThriftHiveMetastore { throw new TApplicationException(TApplicationException.MISSING_RESULT, "create_type failed: unknown result"); } - public boolean drop_type(String type) throws MetaException, TException + public boolean drop_type(String type) throws MetaException, NoSuchObjectException, TException { send_drop_type(type); return recv_drop_type(); @@ -352,7 +364,7 @@ public class ThriftHiveMetastore { oprot_.getTransport().flush(); } - public boolean recv_drop_type() throws MetaException, TException + public boolean recv_drop_type() throws MetaException, NoSuchObjectException, TException { TMessage msg = iprot_.readMessageBegin(); if (msg.type == TMessageType.EXCEPTION) { @@ -366,6 +378,9 @@ public class ThriftHiveMetastore { if (result.isSetSuccess()) { return result.success; } + if (result.o1 != null) { + throw result.o1; + } if (result.o2 != null) { throw result.o2; } @@ -1566,12 +1581,14 @@ public class ThriftHiveMetastore { iprot.readMessageEnd(); create_database_result result = new create_database_result(); try { - result.success = iface_.create_database(args.name, args.description); + result.success = iface_.create_database(args.database); result.__isset.success = true; } catch (AlreadyExistsException o1) { result.o1 = o1; - } catch (MetaException o2) { + } catch (InvalidObjectException o2) { result.o2 = o2; + } catch (MetaException o3) { + result.o3 = o3; } catch (Throwable th) { LOGGER.error("Internal error processing create_database", th); TApplicationException x = new TApplicationException(TApplicationException.INTERNAL_ERROR, "Internal error processing create_database"); @@ -1629,8 +1646,12 @@ public class ThriftHiveMetastore { try { result.success = iface_.drop_database(args.name); result.__isset.success = true; - } catch (MetaException o2) { + } catch (NoSuchObjectException o1) { + result.o1 = o1; + } catch (InvalidOperationException o2) { result.o2 = o2; + } catch (MetaException o3) { + result.o3 = o3; } catch (Throwable th) { LOGGER.error("Internal error processing drop_database", th); TApplicationException x = new TApplicationException(TApplicationException.INTERNAL_ERROR, "Internal error processing drop_database"); @@ -1656,7 +1677,7 @@ public class ThriftHiveMetastore { iprot.readMessageEnd(); get_databases_result result = new get_databases_result(); try { - result.success = iface_.get_databases(); + result.success = iface_.get_databases(args.pattern); } catch (MetaException o1) { result.o1 = o1; } catch (Throwable th) { @@ -1685,7 +1706,9 @@ public class ThriftHiveMetastore { get_type_result result = new get_type_result(); try { result.success = iface_.get_type(args.name); - } catch (MetaException o2) { + } catch (MetaException o1) { + result.o1 = o1; + } catch (NoSuchObjectException o2) { result.o2 = o2; } catch (Throwable th) { LOGGER.error("Internal error processing get_type", th); @@ -1747,7 +1770,9 @@ public class ThriftHiveMetastore { try { result.success = iface_.drop_type(args.type); result.__isset.success = true; - } catch (MetaException o2) { + } catch (MetaException o1) { + result.o1 = o1; + } catch (NoSuchObjectException o2) { result.o2 = o2; } catch (Throwable th) { LOGGER.error("Internal error processing drop_type", th); @@ -2611,23 +2636,18 @@ public class ThriftHiveMetastore { public static class create_database_args implements TBase, java.io.Serializable, Cloneable { private static final TStruct STRUCT_DESC = new TStruct("create_database_args"); - private static final TField NAME_FIELD_DESC = new TField("name", TType.STRING, (short)1); - private static final TField DESCRIPTION_FIELD_DESC = new TField("description", TType.STRING, (short)2); + private static final TField DATABASE_FIELD_DESC = new TField("database", TType.STRUCT, (short)1); - private String name; - public static final int NAME = 1; - private String description; - public static final int DESCRIPTION = 2; + private Database database; + public static final int DATABASE = 1; private final Isset __isset = new Isset(); private static final class Isset implements java.io.Serializable { } public static final Map metaDataMap = Collections.unmodifiableMap(new HashMap() {{ - put(NAME, new FieldMetaData("name", TFieldRequirementType.DEFAULT, - new FieldValueMetaData(TType.STRING))); - put(DESCRIPTION, new FieldMetaData("description", TFieldRequirementType.DEFAULT, - new FieldValueMetaData(TType.STRING))); + put(DATABASE, new FieldMetaData("database", TFieldRequirementType.DEFAULT, + new StructMetaData(TType.STRUCT, Database.class))); }}); static { @@ -2638,23 +2658,18 @@ public class ThriftHiveMetastore { } public create_database_args( - String name, - String description) + Database database) { this(); - this.name = name; - this.description = description; + this.database = database; } /** * Performs a deep copy on other. */ public create_database_args(create_database_args other) { - if (other.isSetName()) { - this.name = other.name; - } - if (other.isSetDescription()) { - this.description = other.description; + if (other.isSetDatabase()) { + this.database = new Database(other.database); } } @@ -2663,55 +2678,30 @@ public class ThriftHiveMetastore { return new create_database_args(this); } - public String getName() { - return this.name; - } - - public void setName(String name) { - this.name = name; - } - - public void unsetName() { - this.name = null; - } - - // Returns true if field name is set (has been asigned a value) and false otherwise - public boolean isSetName() { - return this.name != null; - } - - public String getDescription() { - return this.description; + public Database getDatabase() { + return this.database; } - public void setDescription(String description) { - this.description = description; + public void setDatabase(Database database) { + this.database = database; } - public void unsetDescription() { - this.description = null; + public void unsetDatabase() { + this.database = null; } - // Returns true if field description is set (has been asigned a value) and false otherwise - public boolean isSetDescription() { - return this.description != null; + // Returns true if field database is set (has been asigned a value) and false otherwise + public boolean isSetDatabase() { + return this.database != null; } public void setFieldValue(int fieldID, Object value) { switch (fieldID) { - case NAME: + case DATABASE: if (value == null) { - unsetName(); - } else { - setName((String)value); - } - break; - - case DESCRIPTION: - if (value == null) { - unsetDescription(); + unsetDatabase(); } else { - setDescription((String)value); + setDatabase((Database)value); } break; @@ -2722,11 +2712,8 @@ public class ThriftHiveMetastore { public Object getFieldValue(int fieldID) { switch (fieldID) { - case NAME: - return getName(); - - case DESCRIPTION: - return getDescription(); + case DATABASE: + return getDatabase(); default: throw new IllegalArgumentException("Field " + fieldID + " doesn't exist!"); @@ -2736,10 +2723,8 @@ public class ThriftHiveMetastore { // Returns true if field corresponding to fieldID is set (has been asigned a value) and false otherwise public boolean isSet(int fieldID) { switch (fieldID) { - case NAME: - return isSetName(); - case DESCRIPTION: - return isSetDescription(); + case DATABASE: + return isSetDatabase(); default: throw new IllegalArgumentException("Field " + fieldID + " doesn't exist!"); } @@ -2758,21 +2743,12 @@ public class ThriftHiveMetastore { if (that == null) return false; - boolean this_present_name = true && this.isSetName(); - boolean that_present_name = true && that.isSetName(); - if (this_present_name || that_present_name) { - if (!(this_present_name && that_present_name)) - return false; - if (!this.name.equals(that.name)) - return false; - } - - boolean this_present_description = true && this.isSetDescription(); - boolean that_present_description = true && that.isSetDescription(); - if (this_present_description || that_present_description) { - if (!(this_present_description && that_present_description)) + boolean this_present_database = true && this.isSetDatabase(); + boolean that_present_database = true && that.isSetDatabase(); + if (this_present_database || that_present_database) { + if (!(this_present_database && that_present_database)) return false; - if (!this.description.equals(that.description)) + if (!this.database.equals(that.database)) return false; } @@ -2795,16 +2771,10 @@ public class ThriftHiveMetastore { } switch (field.id) { - case NAME: - if (field.type == TType.STRING) { - this.name = iprot.readString(); - } else { - TProtocolUtil.skip(iprot, field.type); - } - break; - case DESCRIPTION: - if (field.type == TType.STRING) { - this.description = iprot.readString(); + case DATABASE: + if (field.type == TType.STRUCT) { + this.database = new Database(); + this.database.read(iprot); } else { TProtocolUtil.skip(iprot, field.type); } @@ -2824,14 +2794,9 @@ public class ThriftHiveMetastore { validate(); oprot.writeStructBegin(STRUCT_DESC); - if (this.name != null) { - oprot.writeFieldBegin(NAME_FIELD_DESC); - oprot.writeString(this.name); - oprot.writeFieldEnd(); - } - if (this.description != null) { - oprot.writeFieldBegin(DESCRIPTION_FIELD_DESC); - oprot.writeString(this.description); + if (this.database != null) { + oprot.writeFieldBegin(DATABASE_FIELD_DESC); + this.database.write(oprot); oprot.writeFieldEnd(); } oprot.writeFieldStop(); @@ -2843,19 +2808,11 @@ public class ThriftHiveMetastore { StringBuilder sb = new StringBuilder("create_database_args("); boolean first = true; - sb.append("name:"); - if (this.name == null) { - sb.append("null"); - } else { - sb.append(this.name); - } - first = false; - if (!first) sb.append(", "); - sb.append("description:"); - if (this.description == null) { + sb.append("database:"); + if (this.database == null) { sb.append("null"); } else { - sb.append(this.description); + sb.append(this.database); } first = false; sb.append(")"); @@ -2874,13 +2831,16 @@ public class ThriftHiveMetastore { private static final TField SUCCESS_FIELD_DESC = new TField("success", TType.BOOL, (short)0); private static final TField O1_FIELD_DESC = new TField("o1", TType.STRUCT, (short)1); private static final TField O2_FIELD_DESC = new TField("o2", TType.STRUCT, (short)2); + private static final TField O3_FIELD_DESC = new TField("o3", TType.STRUCT, (short)3); private boolean success; public static final int SUCCESS = 0; private AlreadyExistsException o1; public static final int O1 = 1; - private MetaException o2; + private InvalidObjectException o2; public static final int O2 = 2; + private MetaException o3; + public static final int O3 = 3; private final Isset __isset = new Isset(); private static final class Isset implements java.io.Serializable { @@ -2894,6 +2854,8 @@ public class ThriftHiveMetastore { new FieldValueMetaData(TType.STRUCT))); put(O2, new FieldMetaData("o2", TFieldRequirementType.DEFAULT, new FieldValueMetaData(TType.STRUCT))); + put(O3, new FieldMetaData("o3", TFieldRequirementType.DEFAULT, + new FieldValueMetaData(TType.STRUCT))); }}); static { @@ -2906,13 +2868,15 @@ public class ThriftHiveMetastore { public create_database_result( boolean success, AlreadyExistsException o1, - MetaException o2) + InvalidObjectException o2, + MetaException o3) { this(); this.success = success; this.__isset.success = true; this.o1 = o1; this.o2 = o2; + this.o3 = o3; } /** @@ -2925,7 +2889,10 @@ public class ThriftHiveMetastore { this.o1 = new AlreadyExistsException(other.o1); } if (other.isSetO2()) { - this.o2 = new MetaException(other.o2); + this.o2 = new InvalidObjectException(other.o2); + } + if (other.isSetO3()) { + this.o3 = new MetaException(other.o3); } } @@ -2969,11 +2936,11 @@ public class ThriftHiveMetastore { return this.o1 != null; } - public MetaException getO2() { + public InvalidObjectException getO2() { return this.o2; } - public void setO2(MetaException o2) { + public void setO2(InvalidObjectException o2) { this.o2 = o2; } @@ -2986,6 +2953,23 @@ public class ThriftHiveMetastore { return this.o2 != null; } + public MetaException getO3() { + return this.o3; + } + + public void setO3(MetaException o3) { + this.o3 = o3; + } + + public void unsetO3() { + this.o3 = null; + } + + // Returns true if field o3 is set (has been asigned a value) and false otherwise + public boolean isSetO3() { + return this.o3 != null; + } + public void setFieldValue(int fieldID, Object value) { switch (fieldID) { case SUCCESS: @@ -3008,7 +2992,15 @@ public class ThriftHiveMetastore { if (value == null) { unsetO2(); } else { - setO2((MetaException)value); + setO2((InvalidObjectException)value); + } + break; + + case O3: + if (value == null) { + unsetO3(); + } else { + setO3((MetaException)value); } break; @@ -3028,6 +3020,9 @@ public class ThriftHiveMetastore { case O2: return getO2(); + case O3: + return getO3(); + default: throw new IllegalArgumentException("Field " + fieldID + " doesn't exist!"); } @@ -3042,6 +3037,8 @@ public class ThriftHiveMetastore { return isSetO1(); case O2: return isSetO2(); + case O3: + return isSetO3(); default: throw new IllegalArgumentException("Field " + fieldID + " doesn't exist!"); } @@ -3087,6 +3084,15 @@ public class ThriftHiveMetastore { return false; } + boolean this_present_o3 = true && this.isSetO3(); + boolean that_present_o3 = true && that.isSetO3(); + if (this_present_o3 || that_present_o3) { + if (!(this_present_o3 && that_present_o3)) + return false; + if (!this.o3.equals(that.o3)) + return false; + } + return true; } @@ -3124,12 +3130,20 @@ public class ThriftHiveMetastore { break; case O2: if (field.type == TType.STRUCT) { - this.o2 = new MetaException(); + this.o2 = new InvalidObjectException(); this.o2.read(iprot); } else { TProtocolUtil.skip(iprot, field.type); } break; + case O3: + if (field.type == TType.STRUCT) { + this.o3 = new MetaException(); + this.o3.read(iprot); + } else { + TProtocolUtil.skip(iprot, field.type); + } + break; default: TProtocolUtil.skip(iprot, field.type); break; @@ -3156,6 +3170,10 @@ public class ThriftHiveMetastore { oprot.writeFieldBegin(O2_FIELD_DESC); this.o2.write(oprot); oprot.writeFieldEnd(); + } else if (this.isSetO3()) { + oprot.writeFieldBegin(O3_FIELD_DESC); + this.o3.write(oprot); + oprot.writeFieldEnd(); } oprot.writeFieldStop(); oprot.writeStructEnd(); @@ -3185,6 +3203,14 @@ public class ThriftHiveMetastore { sb.append(this.o2); } first = false; + if (!first) sb.append(", "); + sb.append("o3:"); + if (this.o3 == null) { + sb.append("null"); + } else { + sb.append(this.o3); + } + first = false; sb.append(")"); return sb.toString(); } @@ -3910,12 +3936,18 @@ public class ThriftHiveMetastore { public static class drop_database_result implements TBase, java.io.Serializable, Cloneable { private static final TStruct STRUCT_DESC = new TStruct("drop_database_result"); private static final TField SUCCESS_FIELD_DESC = new TField("success", TType.BOOL, (short)0); + private static final TField O1_FIELD_DESC = new TField("o1", TType.STRUCT, (short)1); private static final TField O2_FIELD_DESC = new TField("o2", TType.STRUCT, (short)2); + private static final TField O3_FIELD_DESC = new TField("o3", TType.STRUCT, (short)3); private boolean success; public static final int SUCCESS = 0; - private MetaException o2; + private NoSuchObjectException o1; + public static final int O1 = 1; + private InvalidOperationException o2; public static final int O2 = 2; + private MetaException o3; + public static final int O3 = 3; private final Isset __isset = new Isset(); private static final class Isset implements java.io.Serializable { @@ -3925,8 +3957,12 @@ public class ThriftHiveMetastore { public static final Map metaDataMap = Collections.unmodifiableMap(new HashMap() {{ put(SUCCESS, new FieldMetaData("success", TFieldRequirementType.DEFAULT, new FieldValueMetaData(TType.BOOL))); + put(O1, new FieldMetaData("o1", TFieldRequirementType.DEFAULT, + new FieldValueMetaData(TType.STRUCT))); put(O2, new FieldMetaData("o2", TFieldRequirementType.DEFAULT, new FieldValueMetaData(TType.STRUCT))); + put(O3, new FieldMetaData("o3", TFieldRequirementType.DEFAULT, + new FieldValueMetaData(TType.STRUCT))); }}); static { @@ -3938,12 +3974,16 @@ public class ThriftHiveMetastore { public drop_database_result( boolean success, - MetaException o2) + NoSuchObjectException o1, + InvalidOperationException o2, + MetaException o3) { this(); this.success = success; this.__isset.success = true; + this.o1 = o1; this.o2 = o2; + this.o3 = o3; } /** @@ -3952,8 +3992,14 @@ public class ThriftHiveMetastore { public drop_database_result(drop_database_result other) { __isset.success = other.__isset.success; this.success = other.success; + if (other.isSetO1()) { + this.o1 = new NoSuchObjectException(other.o1); + } if (other.isSetO2()) { - this.o2 = new MetaException(other.o2); + this.o2 = new InvalidOperationException(other.o2); + } + if (other.isSetO3()) { + this.o3 = new MetaException(other.o3); } } @@ -3980,11 +4026,28 @@ public class ThriftHiveMetastore { return this.__isset.success; } - public MetaException getO2() { + public NoSuchObjectException getO1() { + return this.o1; + } + + public void setO1(NoSuchObjectException o1) { + this.o1 = o1; + } + + public void unsetO1() { + this.o1 = null; + } + + // Returns true if field o1 is set (has been asigned a value) and false otherwise + public boolean isSetO1() { + return this.o1 != null; + } + + public InvalidOperationException getO2() { return this.o2; } - public void setO2(MetaException o2) { + public void setO2(InvalidOperationException o2) { this.o2 = o2; } @@ -3997,6 +4060,23 @@ public class ThriftHiveMetastore { return this.o2 != null; } + public MetaException getO3() { + return this.o3; + } + + public void setO3(MetaException o3) { + this.o3 = o3; + } + + public void unsetO3() { + this.o3 = null; + } + + // Returns true if field o3 is set (has been asigned a value) and false otherwise + public boolean isSetO3() { + return this.o3 != null; + } + public void setFieldValue(int fieldID, Object value) { switch (fieldID) { case SUCCESS: @@ -4007,16 +4087,32 @@ public class ThriftHiveMetastore { } break; + case O1: + if (value == null) { + unsetO1(); + } else { + setO1((NoSuchObjectException)value); + } + break; + case O2: if (value == null) { unsetO2(); } else { - setO2((MetaException)value); + setO2((InvalidOperationException)value); } break; - default: - throw new IllegalArgumentException("Field " + fieldID + " doesn't exist!"); + case O3: + if (value == null) { + unsetO3(); + } else { + setO3((MetaException)value); + } + break; + + default: + throw new IllegalArgumentException("Field " + fieldID + " doesn't exist!"); } } @@ -4025,9 +4121,15 @@ public class ThriftHiveMetastore { case SUCCESS: return new Boolean(isSuccess()); + case O1: + return getO1(); + case O2: return getO2(); + case O3: + return getO3(); + default: throw new IllegalArgumentException("Field " + fieldID + " doesn't exist!"); } @@ -4038,8 +4140,12 @@ public class ThriftHiveMetastore { switch (fieldID) { case SUCCESS: return isSetSuccess(); + case O1: + return isSetO1(); case O2: return isSetO2(); + case O3: + return isSetO3(); default: throw new IllegalArgumentException("Field " + fieldID + " doesn't exist!"); } @@ -4067,6 +4173,15 @@ public class ThriftHiveMetastore { return false; } + boolean this_present_o1 = true && this.isSetO1(); + boolean that_present_o1 = true && that.isSetO1(); + if (this_present_o1 || that_present_o1) { + if (!(this_present_o1 && that_present_o1)) + return false; + if (!this.o1.equals(that.o1)) + return false; + } + boolean this_present_o2 = true && this.isSetO2(); boolean that_present_o2 = true && that.isSetO2(); if (this_present_o2 || that_present_o2) { @@ -4076,6 +4191,15 @@ public class ThriftHiveMetastore { return false; } + boolean this_present_o3 = true && this.isSetO3(); + boolean that_present_o3 = true && that.isSetO3(); + if (this_present_o3 || that_present_o3) { + if (!(this_present_o3 && that_present_o3)) + return false; + if (!this.o3.equals(that.o3)) + return false; + } + return true; } @@ -4103,14 +4227,30 @@ public class ThriftHiveMetastore { TProtocolUtil.skip(iprot, field.type); } break; + case O1: + if (field.type == TType.STRUCT) { + this.o1 = new NoSuchObjectException(); + this.o1.read(iprot); + } else { + TProtocolUtil.skip(iprot, field.type); + } + break; case O2: if (field.type == TType.STRUCT) { - this.o2 = new MetaException(); + this.o2 = new InvalidOperationException(); this.o2.read(iprot); } else { TProtocolUtil.skip(iprot, field.type); } break; + case O3: + if (field.type == TType.STRUCT) { + this.o3 = new MetaException(); + this.o3.read(iprot); + } else { + TProtocolUtil.skip(iprot, field.type); + } + break; default: TProtocolUtil.skip(iprot, field.type); break; @@ -4129,10 +4269,18 @@ public class ThriftHiveMetastore { oprot.writeFieldBegin(SUCCESS_FIELD_DESC); oprot.writeBool(this.success); oprot.writeFieldEnd(); + } else if (this.isSetO1()) { + oprot.writeFieldBegin(O1_FIELD_DESC); + this.o1.write(oprot); + oprot.writeFieldEnd(); } else if (this.isSetO2()) { oprot.writeFieldBegin(O2_FIELD_DESC); this.o2.write(oprot); oprot.writeFieldEnd(); + } else if (this.isSetO3()) { + oprot.writeFieldBegin(O3_FIELD_DESC); + this.o3.write(oprot); + oprot.writeFieldEnd(); } oprot.writeFieldStop(); oprot.writeStructEnd(); @@ -4147,6 +4295,14 @@ public class ThriftHiveMetastore { sb.append(this.success); first = false; if (!first) sb.append(", "); + sb.append("o1:"); + if (this.o1 == null) { + sb.append("null"); + } else { + sb.append(this.o1); + } + first = false; + if (!first) sb.append(", "); sb.append("o2:"); if (this.o2 == null) { sb.append("null"); @@ -4154,6 +4310,14 @@ public class ThriftHiveMetastore { sb.append(this.o2); } first = false; + if (!first) sb.append(", "); + sb.append("o3:"); + if (this.o3 == null) { + sb.append("null"); + } else { + sb.append(this.o3); + } + first = false; sb.append(")"); return sb.toString(); } @@ -4167,8 +4331,18 @@ public class ThriftHiveMetastore { public static class get_databases_args implements TBase, java.io.Serializable, Cloneable { private static final TStruct STRUCT_DESC = new TStruct("get_databases_args"); + private static final TField PATTERN_FIELD_DESC = new TField("pattern", TType.STRING, (short)1); + + private String pattern; + public static final int PATTERN = 1; + + private final Isset __isset = new Isset(); + private static final class Isset implements java.io.Serializable { + } public static final Map metaDataMap = Collections.unmodifiableMap(new HashMap() {{ + put(PATTERN, new FieldMetaData("pattern", TFieldRequirementType.DEFAULT, + new FieldValueMetaData(TType.STRING))); }}); static { @@ -4178,10 +4352,20 @@ public class ThriftHiveMetastore { public get_databases_args() { } + public get_databases_args( + String pattern) + { + this(); + this.pattern = pattern; + } + /** * Performs a deep copy on other. */ public get_databases_args(get_databases_args other) { + if (other.isSetPattern()) { + this.pattern = other.pattern; + } } @Override @@ -4189,8 +4373,33 @@ public class ThriftHiveMetastore { return new get_databases_args(this); } + public String getPattern() { + return this.pattern; + } + + public void setPattern(String pattern) { + this.pattern = pattern; + } + + public void unsetPattern() { + this.pattern = null; + } + + // Returns true if field pattern is set (has been asigned a value) and false otherwise + public boolean isSetPattern() { + return this.pattern != null; + } + public void setFieldValue(int fieldID, Object value) { switch (fieldID) { + case PATTERN: + if (value == null) { + unsetPattern(); + } else { + setPattern((String)value); + } + break; + default: throw new IllegalArgumentException("Field " + fieldID + " doesn't exist!"); } @@ -4198,6 +4407,9 @@ public class ThriftHiveMetastore { public Object getFieldValue(int fieldID) { switch (fieldID) { + case PATTERN: + return getPattern(); + default: throw new IllegalArgumentException("Field " + fieldID + " doesn't exist!"); } @@ -4206,6 +4418,8 @@ public class ThriftHiveMetastore { // Returns true if field corresponding to fieldID is set (has been asigned a value) and false otherwise public boolean isSet(int fieldID) { switch (fieldID) { + case PATTERN: + return isSetPattern(); default: throw new IllegalArgumentException("Field " + fieldID + " doesn't exist!"); } @@ -4224,6 +4438,15 @@ public class ThriftHiveMetastore { if (that == null) return false; + boolean this_present_pattern = true && this.isSetPattern(); + boolean that_present_pattern = true && that.isSetPattern(); + if (this_present_pattern || that_present_pattern) { + if (!(this_present_pattern && that_present_pattern)) + return false; + if (!this.pattern.equals(that.pattern)) + return false; + } + return true; } @@ -4243,6 +4466,13 @@ public class ThriftHiveMetastore { } switch (field.id) { + case PATTERN: + if (field.type == TType.STRING) { + this.pattern = iprot.readString(); + } else { + TProtocolUtil.skip(iprot, field.type); + } + break; default: TProtocolUtil.skip(iprot, field.type); break; @@ -4258,6 +4488,11 @@ public class ThriftHiveMetastore { validate(); oprot.writeStructBegin(STRUCT_DESC); + if (this.pattern != null) { + oprot.writeFieldBegin(PATTERN_FIELD_DESC); + oprot.writeString(this.pattern); + oprot.writeFieldEnd(); + } oprot.writeFieldStop(); oprot.writeStructEnd(); } @@ -4267,6 +4502,13 @@ public class ThriftHiveMetastore { StringBuilder sb = new StringBuilder("get_databases_args("); boolean first = true; + sb.append("pattern:"); + if (this.pattern == null) { + sb.append("null"); + } else { + sb.append(this.pattern); + } + first = false; sb.append(")"); return sb.toString(); } @@ -4767,12 +5009,15 @@ public class ThriftHiveMetastore { public static class get_type_result implements TBase, java.io.Serializable, Cloneable { private static final TStruct STRUCT_DESC = new TStruct("get_type_result"); private static final TField SUCCESS_FIELD_DESC = new TField("success", TType.STRUCT, (short)0); - private static final TField O2_FIELD_DESC = new TField("o2", TType.STRUCT, (short)1); + private static final TField O1_FIELD_DESC = new TField("o1", TType.STRUCT, (short)1); + private static final TField O2_FIELD_DESC = new TField("o2", TType.STRUCT, (short)2); private Type success; public static final int SUCCESS = 0; - private MetaException o2; - public static final int O2 = 1; + private MetaException o1; + public static final int O1 = 1; + private NoSuchObjectException o2; + public static final int O2 = 2; private final Isset __isset = new Isset(); private static final class Isset implements java.io.Serializable { @@ -4781,6 +5026,8 @@ public class ThriftHiveMetastore { public static final Map metaDataMap = Collections.unmodifiableMap(new HashMap() {{ put(SUCCESS, new FieldMetaData("success", TFieldRequirementType.DEFAULT, new StructMetaData(TType.STRUCT, Type.class))); + put(O1, new FieldMetaData("o1", TFieldRequirementType.DEFAULT, + new FieldValueMetaData(TType.STRUCT))); put(O2, new FieldMetaData("o2", TFieldRequirementType.DEFAULT, new FieldValueMetaData(TType.STRUCT))); }}); @@ -4794,10 +5041,12 @@ public class ThriftHiveMetastore { public get_type_result( Type success, - MetaException o2) + MetaException o1, + NoSuchObjectException o2) { this(); this.success = success; + this.o1 = o1; this.o2 = o2; } @@ -4808,8 +5057,11 @@ public class ThriftHiveMetastore { if (other.isSetSuccess()) { this.success = new Type(other.success); } + if (other.isSetO1()) { + this.o1 = new MetaException(other.o1); + } if (other.isSetO2()) { - this.o2 = new MetaException(other.o2); + this.o2 = new NoSuchObjectException(other.o2); } } @@ -4835,11 +5087,28 @@ public class ThriftHiveMetastore { return this.success != null; } - public MetaException getO2() { + public MetaException getO1() { + return this.o1; + } + + public void setO1(MetaException o1) { + this.o1 = o1; + } + + public void unsetO1() { + this.o1 = null; + } + + // Returns true if field o1 is set (has been asigned a value) and false otherwise + public boolean isSetO1() { + return this.o1 != null; + } + + public NoSuchObjectException getO2() { return this.o2; } - public void setO2(MetaException o2) { + public void setO2(NoSuchObjectException o2) { this.o2 = o2; } @@ -4862,11 +5131,19 @@ public class ThriftHiveMetastore { } break; + case O1: + if (value == null) { + unsetO1(); + } else { + setO1((MetaException)value); + } + break; + case O2: if (value == null) { unsetO2(); } else { - setO2((MetaException)value); + setO2((NoSuchObjectException)value); } break; @@ -4880,6 +5157,9 @@ public class ThriftHiveMetastore { case SUCCESS: return getSuccess(); + case O1: + return getO1(); + case O2: return getO2(); @@ -4893,6 +5173,8 @@ public class ThriftHiveMetastore { switch (fieldID) { case SUCCESS: return isSetSuccess(); + case O1: + return isSetO1(); case O2: return isSetO2(); default: @@ -4922,6 +5204,15 @@ public class ThriftHiveMetastore { return false; } + boolean this_present_o1 = true && this.isSetO1(); + boolean that_present_o1 = true && that.isSetO1(); + if (this_present_o1 || that_present_o1) { + if (!(this_present_o1 && that_present_o1)) + return false; + if (!this.o1.equals(that.o1)) + return false; + } + boolean this_present_o2 = true && this.isSetO2(); boolean that_present_o2 = true && that.isSetO2(); if (this_present_o2 || that_present_o2) { @@ -4958,9 +5249,17 @@ public class ThriftHiveMetastore { TProtocolUtil.skip(iprot, field.type); } break; + case O1: + if (field.type == TType.STRUCT) { + this.o1 = new MetaException(); + this.o1.read(iprot); + } else { + TProtocolUtil.skip(iprot, field.type); + } + break; case O2: if (field.type == TType.STRUCT) { - this.o2 = new MetaException(); + this.o2 = new NoSuchObjectException(); this.o2.read(iprot); } else { TProtocolUtil.skip(iprot, field.type); @@ -4984,6 +5283,10 @@ public class ThriftHiveMetastore { oprot.writeFieldBegin(SUCCESS_FIELD_DESC); this.success.write(oprot); oprot.writeFieldEnd(); + } else if (this.isSetO1()) { + oprot.writeFieldBegin(O1_FIELD_DESC); + this.o1.write(oprot); + oprot.writeFieldEnd(); } else if (this.isSetO2()) { oprot.writeFieldBegin(O2_FIELD_DESC); this.o2.write(oprot); @@ -5006,6 +5309,14 @@ public class ThriftHiveMetastore { } first = false; if (!first) sb.append(", "); + sb.append("o1:"); + if (this.o1 == null) { + sb.append("null"); + } else { + sb.append(this.o1); + } + first = false; + if (!first) sb.append(", "); sb.append("o2:"); if (this.o2 == null) { sb.append("null"); @@ -5806,12 +6117,15 @@ public class ThriftHiveMetastore { public static class drop_type_result implements TBase, java.io.Serializable, Cloneable { private static final TStruct STRUCT_DESC = new TStruct("drop_type_result"); private static final TField SUCCESS_FIELD_DESC = new TField("success", TType.BOOL, (short)0); - private static final TField O2_FIELD_DESC = new TField("o2", TType.STRUCT, (short)1); + private static final TField O1_FIELD_DESC = new TField("o1", TType.STRUCT, (short)1); + private static final TField O2_FIELD_DESC = new TField("o2", TType.STRUCT, (short)2); private boolean success; public static final int SUCCESS = 0; - private MetaException o2; - public static final int O2 = 1; + private MetaException o1; + public static final int O1 = 1; + private NoSuchObjectException o2; + public static final int O2 = 2; private final Isset __isset = new Isset(); private static final class Isset implements java.io.Serializable { @@ -5821,6 +6135,8 @@ public class ThriftHiveMetastore { public static final Map metaDataMap = Collections.unmodifiableMap(new HashMap() {{ put(SUCCESS, new FieldMetaData("success", TFieldRequirementType.DEFAULT, new FieldValueMetaData(TType.BOOL))); + put(O1, new FieldMetaData("o1", TFieldRequirementType.DEFAULT, + new FieldValueMetaData(TType.STRUCT))); put(O2, new FieldMetaData("o2", TFieldRequirementType.DEFAULT, new FieldValueMetaData(TType.STRUCT))); }}); @@ -5834,11 +6150,13 @@ public class ThriftHiveMetastore { public drop_type_result( boolean success, - MetaException o2) + MetaException o1, + NoSuchObjectException o2) { this(); this.success = success; this.__isset.success = true; + this.o1 = o1; this.o2 = o2; } @@ -5848,8 +6166,11 @@ public class ThriftHiveMetastore { public drop_type_result(drop_type_result other) { __isset.success = other.__isset.success; this.success = other.success; + if (other.isSetO1()) { + this.o1 = new MetaException(other.o1); + } if (other.isSetO2()) { - this.o2 = new MetaException(other.o2); + this.o2 = new NoSuchObjectException(other.o2); } } @@ -5876,11 +6197,28 @@ public class ThriftHiveMetastore { return this.__isset.success; } - public MetaException getO2() { + public MetaException getO1() { + return this.o1; + } + + public void setO1(MetaException o1) { + this.o1 = o1; + } + + public void unsetO1() { + this.o1 = null; + } + + // Returns true if field o1 is set (has been asigned a value) and false otherwise + public boolean isSetO1() { + return this.o1 != null; + } + + public NoSuchObjectException getO2() { return this.o2; } - public void setO2(MetaException o2) { + public void setO2(NoSuchObjectException o2) { this.o2 = o2; } @@ -5903,11 +6241,19 @@ public class ThriftHiveMetastore { } break; + case O1: + if (value == null) { + unsetO1(); + } else { + setO1((MetaException)value); + } + break; + case O2: if (value == null) { unsetO2(); } else { - setO2((MetaException)value); + setO2((NoSuchObjectException)value); } break; @@ -5921,6 +6267,9 @@ public class ThriftHiveMetastore { case SUCCESS: return new Boolean(isSuccess()); + case O1: + return getO1(); + case O2: return getO2(); @@ -5934,6 +6283,8 @@ public class ThriftHiveMetastore { switch (fieldID) { case SUCCESS: return isSetSuccess(); + case O1: + return isSetO1(); case O2: return isSetO2(); default: @@ -5963,6 +6314,15 @@ public class ThriftHiveMetastore { return false; } + boolean this_present_o1 = true && this.isSetO1(); + boolean that_present_o1 = true && that.isSetO1(); + if (this_present_o1 || that_present_o1) { + if (!(this_present_o1 && that_present_o1)) + return false; + if (!this.o1.equals(that.o1)) + return false; + } + boolean this_present_o2 = true && this.isSetO2(); boolean that_present_o2 = true && that.isSetO2(); if (this_present_o2 || that_present_o2) { @@ -5999,9 +6359,17 @@ public class ThriftHiveMetastore { TProtocolUtil.skip(iprot, field.type); } break; + case O1: + if (field.type == TType.STRUCT) { + this.o1 = new MetaException(); + this.o1.read(iprot); + } else { + TProtocolUtil.skip(iprot, field.type); + } + break; case O2: if (field.type == TType.STRUCT) { - this.o2 = new MetaException(); + this.o2 = new NoSuchObjectException(); this.o2.read(iprot); } else { TProtocolUtil.skip(iprot, field.type); @@ -6025,6 +6393,10 @@ public class ThriftHiveMetastore { oprot.writeFieldBegin(SUCCESS_FIELD_DESC); oprot.writeBool(this.success); oprot.writeFieldEnd(); + } else if (this.isSetO1()) { + oprot.writeFieldBegin(O1_FIELD_DESC); + this.o1.write(oprot); + oprot.writeFieldEnd(); } else if (this.isSetO2()) { oprot.writeFieldBegin(O2_FIELD_DESC); this.o2.write(oprot); @@ -6043,6 +6415,14 @@ public class ThriftHiveMetastore { sb.append(this.success); first = false; if (!first) sb.append(", "); + sb.append("o1:"); + if (this.o1 == null) { + sb.append("null"); + } else { + sb.append(this.o1); + } + first = false; + if (!first) sb.append(", "); sb.append("o2:"); if (this.o2 == null) { sb.append("null"); diff --git metastore/src/gen-php/ThriftHiveMetastore.php metastore/src/gen-php/ThriftHiveMetastore.php index ea4add5..41475b6 100644 --- metastore/src/gen-php/ThriftHiveMetastore.php +++ metastore/src/gen-php/ThriftHiveMetastore.php @@ -10,10 +10,10 @@ include_once $GLOBALS['THRIFT_ROOT'].'/packages/hive_metastore/hive_metastore_ty include_once $GLOBALS['THRIFT_ROOT'].'/packages/fb303/FacebookService.php'; interface ThriftHiveMetastoreIf extends FacebookServiceIf { - public function create_database($name, $description); + public function create_database($database); public function get_database($name); public function drop_database($name); - public function get_databases(); + public function get_databases($pattern); public function get_type($name); public function create_type($type); public function drop_type($type); @@ -52,17 +52,16 @@ class ThriftHiveMetastoreClient extends FacebookServiceClient implements ThriftH parent::__construct($input, $output); } - public function create_database($name, $description) + public function create_database($database) { - $this->send_create_database($name, $description); + $this->send_create_database($database); return $this->recv_create_database(); } - public function send_create_database($name, $description) + public function send_create_database($database) { $args = new metastore_ThriftHiveMetastore_create_database_args(); - $args->name = $name; - $args->description = $description; + $args->database = $database; $bin_accel = ($this->output_ instanceof TProtocol::$TBINARYPROTOCOLACCELERATED) && function_exists('thrift_protocol_write_binary'); if ($bin_accel) { @@ -107,6 +106,9 @@ class ThriftHiveMetastoreClient extends FacebookServiceClient implements ThriftH if ($result->o2 !== null) { throw $result->o2; } + if ($result->o3 !== null) { + throw $result->o3; + } throw new Exception("create_database failed: unknown result"); } @@ -215,21 +217,28 @@ class ThriftHiveMetastoreClient extends FacebookServiceClient implements ThriftH if ($result->success !== null) { return $result->success; } + if ($result->o1 !== null) { + throw $result->o1; + } if ($result->o2 !== null) { throw $result->o2; } + if ($result->o3 !== null) { + throw $result->o3; + } throw new Exception("drop_database failed: unknown result"); } - public function get_databases() + public function get_databases($pattern) { - $this->send_get_databases(); + $this->send_get_databases($pattern); return $this->recv_get_databases(); } - public function send_get_databases() + public function send_get_databases($pattern) { $args = new metastore_ThriftHiveMetastore_get_databases_args(); + $args->pattern = $pattern; $bin_accel = ($this->output_ instanceof TProtocol::$TBINARYPROTOCOLACCELERATED) && function_exists('thrift_protocol_write_binary'); if ($bin_accel) { @@ -322,6 +331,9 @@ class ThriftHiveMetastoreClient extends FacebookServiceClient implements ThriftH if ($result->success !== null) { return $result->success; } + if ($result->o1 !== null) { + throw $result->o1; + } if ($result->o2 !== null) { throw $result->o2; } @@ -436,6 +448,9 @@ class ThriftHiveMetastoreClient extends FacebookServiceClient implements ThriftH if ($result->success !== null) { return $result->success; } + if ($result->o1 !== null) { + throw $result->o1; + } if ($result->o2 !== null) { throw $result->o2; } @@ -2075,28 +2090,21 @@ class ThriftHiveMetastoreClient extends FacebookServiceClient implements ThriftH class metastore_ThriftHiveMetastore_create_database_args { static $_TSPEC; - public $name = null; - public $description = null; + public $database = null; public function __construct($vals=null) { if (!isset(self::$_TSPEC)) { self::$_TSPEC = array( 1 => array( - 'var' => 'name', - 'type' => TType::STRING, - ), - 2 => array( - 'var' => 'description', - 'type' => TType::STRING, + 'var' => 'database', + 'type' => TType::STRUCT, + 'class' => 'metastore_Database', ), ); } if (is_array($vals)) { - if (isset($vals['name'])) { - $this->name = $vals['name']; - } - if (isset($vals['description'])) { - $this->description = $vals['description']; + if (isset($vals['database'])) { + $this->database = $vals['database']; } } } @@ -2121,15 +2129,9 @@ class metastore_ThriftHiveMetastore_create_database_args { switch ($fid) { case 1: - if ($ftype == TType::STRING) { - $xfer += $input->readString($this->name); - } else { - $xfer += $input->skip($ftype); - } - break; - case 2: - if ($ftype == TType::STRING) { - $xfer += $input->readString($this->description); + if ($ftype == TType::STRUCT) { + $this->database = new metastore_Database(); + $xfer += $this->database->read($input); } else { $xfer += $input->skip($ftype); } @@ -2147,14 +2149,12 @@ class metastore_ThriftHiveMetastore_create_database_args { public function write($output) { $xfer = 0; $xfer += $output->writeStructBegin('ThriftHiveMetastore_create_database_args'); - if ($this->name !== null) { - $xfer += $output->writeFieldBegin('name', TType::STRING, 1); - $xfer += $output->writeString($this->name); - $xfer += $output->writeFieldEnd(); - } - if ($this->description !== null) { - $xfer += $output->writeFieldBegin('description', TType::STRING, 2); - $xfer += $output->writeString($this->description); + if ($this->database !== null) { + if (!is_object($this->database)) { + throw new TProtocolException('Bad type in structure.', TProtocolException::INVALID_DATA); + } + $xfer += $output->writeFieldBegin('database', TType::STRUCT, 1); + $xfer += $this->database->write($output); $xfer += $output->writeFieldEnd(); } $xfer += $output->writeFieldStop(); @@ -2170,6 +2170,7 @@ class metastore_ThriftHiveMetastore_create_database_result { public $success = null; public $o1 = null; public $o2 = null; + public $o3 = null; public function __construct($vals=null) { if (!isset(self::$_TSPEC)) { @@ -2186,6 +2187,11 @@ class metastore_ThriftHiveMetastore_create_database_result { 2 => array( 'var' => 'o2', 'type' => TType::STRUCT, + 'class' => 'metastore_InvalidObjectException', + ), + 3 => array( + 'var' => 'o3', + 'type' => TType::STRUCT, 'class' => 'metastore_MetaException', ), ); @@ -2200,6 +2206,9 @@ class metastore_ThriftHiveMetastore_create_database_result { if (isset($vals['o2'])) { $this->o2 = $vals['o2']; } + if (isset($vals['o3'])) { + $this->o3 = $vals['o3']; + } } } @@ -2239,12 +2248,20 @@ class metastore_ThriftHiveMetastore_create_database_result { break; case 2: if ($ftype == TType::STRUCT) { - $this->o2 = new metastore_MetaException(); + $this->o2 = new metastore_InvalidObjectException(); $xfer += $this->o2->read($input); } else { $xfer += $input->skip($ftype); } break; + case 3: + if ($ftype == TType::STRUCT) { + $this->o3 = new metastore_MetaException(); + $xfer += $this->o3->read($input); + } else { + $xfer += $input->skip($ftype); + } + break; default: $xfer += $input->skip($ftype); break; @@ -2273,6 +2290,11 @@ class metastore_ThriftHiveMetastore_create_database_result { $xfer += $this->o2->write($output); $xfer += $output->writeFieldEnd(); } + if ($this->o3 !== null) { + $xfer += $output->writeFieldBegin('o3', TType::STRUCT, 3); + $xfer += $this->o3->write($output); + $xfer += $output->writeFieldEnd(); + } $xfer += $output->writeFieldStop(); $xfer += $output->writeStructEnd(); return $xfer; @@ -2549,7 +2571,9 @@ class metastore_ThriftHiveMetastore_drop_database_result { static $_TSPEC; public $success = null; + public $o1 = null; public $o2 = null; + public $o3 = null; public function __construct($vals=null) { if (!isset(self::$_TSPEC)) { @@ -2558,9 +2582,19 @@ class metastore_ThriftHiveMetastore_drop_database_result { 'var' => 'success', 'type' => TType::BOOL, ), + 1 => array( + 'var' => 'o1', + 'type' => TType::STRUCT, + 'class' => 'metastore_NoSuchObjectException', + ), 2 => array( 'var' => 'o2', 'type' => TType::STRUCT, + 'class' => 'metastore_InvalidOperationException', + ), + 3 => array( + 'var' => 'o3', + 'type' => TType::STRUCT, 'class' => 'metastore_MetaException', ), ); @@ -2569,9 +2603,15 @@ class metastore_ThriftHiveMetastore_drop_database_result { if (isset($vals['success'])) { $this->success = $vals['success']; } + if (isset($vals['o1'])) { + $this->o1 = $vals['o1']; + } if (isset($vals['o2'])) { $this->o2 = $vals['o2']; } + if (isset($vals['o3'])) { + $this->o3 = $vals['o3']; + } } } @@ -2601,14 +2641,30 @@ class metastore_ThriftHiveMetastore_drop_database_result { $xfer += $input->skip($ftype); } break; + case 1: + if ($ftype == TType::STRUCT) { + $this->o1 = new metastore_NoSuchObjectException(); + $xfer += $this->o1->read($input); + } else { + $xfer += $input->skip($ftype); + } + break; case 2: if ($ftype == TType::STRUCT) { - $this->o2 = new metastore_MetaException(); + $this->o2 = new metastore_InvalidOperationException(); $xfer += $this->o2->read($input); } else { $xfer += $input->skip($ftype); } break; + case 3: + if ($ftype == TType::STRUCT) { + $this->o3 = new metastore_MetaException(); + $xfer += $this->o3->read($input); + } else { + $xfer += $input->skip($ftype); + } + break; default: $xfer += $input->skip($ftype); break; @@ -2627,11 +2683,21 @@ class metastore_ThriftHiveMetastore_drop_database_result { $xfer += $output->writeBool($this->success); $xfer += $output->writeFieldEnd(); } + if ($this->o1 !== null) { + $xfer += $output->writeFieldBegin('o1', TType::STRUCT, 1); + $xfer += $this->o1->write($output); + $xfer += $output->writeFieldEnd(); + } if ($this->o2 !== null) { $xfer += $output->writeFieldBegin('o2', TType::STRUCT, 2); $xfer += $this->o2->write($output); $xfer += $output->writeFieldEnd(); } + if ($this->o3 !== null) { + $xfer += $output->writeFieldBegin('o3', TType::STRUCT, 3); + $xfer += $this->o3->write($output); + $xfer += $output->writeFieldEnd(); + } $xfer += $output->writeFieldStop(); $xfer += $output->writeStructEnd(); return $xfer; @@ -2642,12 +2708,22 @@ class metastore_ThriftHiveMetastore_drop_database_result { class metastore_ThriftHiveMetastore_get_databases_args { static $_TSPEC; + public $pattern = null; - public function __construct() { + public function __construct($vals=null) { if (!isset(self::$_TSPEC)) { self::$_TSPEC = array( + 1 => array( + 'var' => 'pattern', + 'type' => TType::STRING, + ), ); } + if (is_array($vals)) { + if (isset($vals['pattern'])) { + $this->pattern = $vals['pattern']; + } + } } public function getName() { @@ -2669,6 +2745,13 @@ class metastore_ThriftHiveMetastore_get_databases_args { } switch ($fid) { + case 1: + if ($ftype == TType::STRING) { + $xfer += $input->readString($this->pattern); + } else { + $xfer += $input->skip($ftype); + } + break; default: $xfer += $input->skip($ftype); break; @@ -2682,6 +2765,11 @@ class metastore_ThriftHiveMetastore_get_databases_args { public function write($output) { $xfer = 0; $xfer += $output->writeStructBegin('ThriftHiveMetastore_get_databases_args'); + if ($this->pattern !== null) { + $xfer += $output->writeFieldBegin('pattern', TType::STRING, 1); + $xfer += $output->writeString($this->pattern); + $xfer += $output->writeFieldEnd(); + } $xfer += $output->writeFieldStop(); $xfer += $output->writeStructEnd(); return $xfer; @@ -2885,6 +2973,7 @@ class metastore_ThriftHiveMetastore_get_type_result { static $_TSPEC; public $success = null; + public $o1 = null; public $o2 = null; public function __construct($vals=null) { @@ -2896,16 +2985,24 @@ class metastore_ThriftHiveMetastore_get_type_result { 'class' => 'metastore_Type', ), 1 => array( - 'var' => 'o2', + 'var' => 'o1', 'type' => TType::STRUCT, 'class' => 'metastore_MetaException', ), + 2 => array( + 'var' => 'o2', + 'type' => TType::STRUCT, + 'class' => 'metastore_NoSuchObjectException', + ), ); } if (is_array($vals)) { if (isset($vals['success'])) { $this->success = $vals['success']; } + if (isset($vals['o1'])) { + $this->o1 = $vals['o1']; + } if (isset($vals['o2'])) { $this->o2 = $vals['o2']; } @@ -2941,7 +3038,15 @@ class metastore_ThriftHiveMetastore_get_type_result { break; case 1: if ($ftype == TType::STRUCT) { - $this->o2 = new metastore_MetaException(); + $this->o1 = new metastore_MetaException(); + $xfer += $this->o1->read($input); + } else { + $xfer += $input->skip($ftype); + } + break; + case 2: + if ($ftype == TType::STRUCT) { + $this->o2 = new metastore_NoSuchObjectException(); $xfer += $this->o2->read($input); } else { $xfer += $input->skip($ftype); @@ -2968,8 +3073,13 @@ class metastore_ThriftHiveMetastore_get_type_result { $xfer += $this->success->write($output); $xfer += $output->writeFieldEnd(); } + if ($this->o1 !== null) { + $xfer += $output->writeFieldBegin('o1', TType::STRUCT, 1); + $xfer += $this->o1->write($output); + $xfer += $output->writeFieldEnd(); + } if ($this->o2 !== null) { - $xfer += $output->writeFieldBegin('o2', TType::STRUCT, 1); + $xfer += $output->writeFieldBegin('o2', TType::STRUCT, 2); $xfer += $this->o2->write($output); $xfer += $output->writeFieldEnd(); } @@ -3271,6 +3381,7 @@ class metastore_ThriftHiveMetastore_drop_type_result { static $_TSPEC; public $success = null; + public $o1 = null; public $o2 = null; public function __construct($vals=null) { @@ -3281,16 +3392,24 @@ class metastore_ThriftHiveMetastore_drop_type_result { 'type' => TType::BOOL, ), 1 => array( - 'var' => 'o2', + 'var' => 'o1', 'type' => TType::STRUCT, 'class' => 'metastore_MetaException', ), + 2 => array( + 'var' => 'o2', + 'type' => TType::STRUCT, + 'class' => 'metastore_NoSuchObjectException', + ), ); } if (is_array($vals)) { if (isset($vals['success'])) { $this->success = $vals['success']; } + if (isset($vals['o1'])) { + $this->o1 = $vals['o1']; + } if (isset($vals['o2'])) { $this->o2 = $vals['o2']; } @@ -3325,7 +3444,15 @@ class metastore_ThriftHiveMetastore_drop_type_result { break; case 1: if ($ftype == TType::STRUCT) { - $this->o2 = new metastore_MetaException(); + $this->o1 = new metastore_MetaException(); + $xfer += $this->o1->read($input); + } else { + $xfer += $input->skip($ftype); + } + break; + case 2: + if ($ftype == TType::STRUCT) { + $this->o2 = new metastore_NoSuchObjectException(); $xfer += $this->o2->read($input); } else { $xfer += $input->skip($ftype); @@ -3349,8 +3476,13 @@ class metastore_ThriftHiveMetastore_drop_type_result { $xfer += $output->writeBool($this->success); $xfer += $output->writeFieldEnd(); } + if ($this->o1 !== null) { + $xfer += $output->writeFieldBegin('o1', TType::STRUCT, 1); + $xfer += $this->o1->write($output); + $xfer += $output->writeFieldEnd(); + } if ($this->o2 !== null) { - $xfer += $output->writeFieldBegin('o2', TType::STRUCT, 1); + $xfer += $output->writeFieldBegin('o2', TType::STRUCT, 2); $xfer += $this->o2->write($output); $xfer += $output->writeFieldEnd(); } diff --git metastore/src/gen-php/hive_metastore_types.php metastore/src/gen-php/hive_metastore_types.php index 61872a0..e914176 100644 --- metastore/src/gen-php/hive_metastore_types.php +++ metastore/src/gen-php/hive_metastore_types.php @@ -376,7 +376,8 @@ class metastore_Database { static $_TSPEC; public $name = null; - public $description = null; + public $comment = null; + public $locationUri = null; public function __construct($vals=null) { if (!isset(self::$_TSPEC)) { @@ -386,7 +387,11 @@ class metastore_Database { 'type' => TType::STRING, ), 2 => array( - 'var' => 'description', + 'var' => 'comment', + 'type' => TType::STRING, + ), + 3 => array( + 'var' => 'locationUri', 'type' => TType::STRING, ), ); @@ -395,8 +400,11 @@ class metastore_Database { if (isset($vals['name'])) { $this->name = $vals['name']; } - if (isset($vals['description'])) { - $this->description = $vals['description']; + if (isset($vals['comment'])) { + $this->comment = $vals['comment']; + } + if (isset($vals['locationUri'])) { + $this->locationUri = $vals['locationUri']; } } } @@ -429,7 +437,14 @@ class metastore_Database { break; case 2: if ($ftype == TType::STRING) { - $xfer += $input->readString($this->description); + $xfer += $input->readString($this->comment); + } else { + $xfer += $input->skip($ftype); + } + break; + case 3: + if ($ftype == TType::STRING) { + $xfer += $input->readString($this->locationUri); } else { $xfer += $input->skip($ftype); } @@ -452,9 +467,14 @@ class metastore_Database { $xfer += $output->writeString($this->name); $xfer += $output->writeFieldEnd(); } - if ($this->description !== null) { - $xfer += $output->writeFieldBegin('description', TType::STRING, 2); - $xfer += $output->writeString($this->description); + if ($this->comment !== null) { + $xfer += $output->writeFieldBegin('comment', TType::STRING, 2); + $xfer += $output->writeString($this->comment); + $xfer += $output->writeFieldEnd(); + } + if ($this->locationUri !== null) { + $xfer += $output->writeFieldBegin('locationUri', TType::STRING, 3); + $xfer += $output->writeString($this->locationUri); $xfer += $output->writeFieldEnd(); } $xfer += $output->writeFieldStop(); diff --git metastore/src/gen-py/hive_metastore/ThriftHiveMetastore-remote metastore/src/gen-py/hive_metastore/ThriftHiveMetastore-remote index fc06cba..d1eaeb5 100644 --- metastore/src/gen-py/hive_metastore/ThriftHiveMetastore-remote +++ metastore/src/gen-py/hive_metastore/ThriftHiveMetastore-remote @@ -21,10 +21,10 @@ if len(sys.argv) <= 1 or sys.argv[1] == '--help': print 'Usage: ' + sys.argv[0] + ' [-h host:port] [-u url] [-f[ramed]] function [arg1 [arg2...]]' print '' print 'Functions:' - print ' bool create_database(string name, string description)' + print ' bool create_database(Database database)' print ' Database get_database(string name)' print ' bool drop_database(string name)' - print ' get_databases()' + print ' get_databases(string pattern)' print ' Type get_type(string name)' print ' bool create_type(Type type)' print ' bool drop_type(string type)' @@ -105,10 +105,10 @@ client = ThriftHiveMetastore.Client(protocol) transport.open() if cmd == 'create_database': - if len(args) != 2: - print 'create_database requires 2 args' + if len(args) != 1: + print 'create_database requires 1 args' sys.exit(1) - pp.pprint(client.create_database(args[0],args[1],)) + pp.pprint(client.create_database(eval(args[0]),)) elif cmd == 'get_database': if len(args) != 1: @@ -123,10 +123,10 @@ elif cmd == 'drop_database': pp.pprint(client.drop_database(args[0],)) elif cmd == 'get_databases': - if len(args) != 0: - print 'get_databases requires 0 args' + if len(args) != 1: + print 'get_databases requires 1 args' sys.exit(1) - pp.pprint(client.get_databases()) + pp.pprint(client.get_databases(args[0],)) elif cmd == 'get_type': if len(args) != 1: diff --git metastore/src/gen-py/hive_metastore/ThriftHiveMetastore.py metastore/src/gen-py/hive_metastore/ThriftHiveMetastore.py index 4a0bc67..1b5cc5c 100644 --- metastore/src/gen-py/hive_metastore/ThriftHiveMetastore.py +++ metastore/src/gen-py/hive_metastore/ThriftHiveMetastore.py @@ -20,11 +20,10 @@ class Iface(fb303.FacebookService.Iface): """ This interface is live. """ - def create_database(self, name, description): + def create_database(self, database): """ Parameters: - - name - - description + - database """ pass @@ -42,7 +41,11 @@ class Iface(fb303.FacebookService.Iface): """ pass - def get_databases(self, ): + def get_databases(self, pattern): + """ + Parameters: + - pattern + """ pass def get_type(self, name): @@ -315,20 +318,18 @@ class Client(fb303.FacebookService.Client, Iface): def __init__(self, iprot, oprot=None): fb303.FacebookService.Client.__init__(self, iprot, oprot) - def create_database(self, name, description): + def create_database(self, database): """ Parameters: - - name - - description + - database """ - self.send_create_database(name, description) + self.send_create_database(database) return self.recv_create_database() - def send_create_database(self, name, description): + def send_create_database(self, database): self._oprot.writeMessageBegin('create_database', TMessageType.CALL, self._seqid) args = create_database_args() - args.name = name - args.description = description + args.database = database args.write(self._oprot) self._oprot.writeMessageEnd() self._oprot.trans.flush() @@ -349,6 +350,8 @@ class Client(fb303.FacebookService.Client, Iface): raise result.o1 if result.o2 != None: raise result.o2 + if result.o3 != None: + raise result.o3 raise TApplicationException(TApplicationException.MISSING_RESULT, "create_database failed: unknown result"); def get_database(self, name): @@ -413,17 +416,26 @@ class Client(fb303.FacebookService.Client, Iface): self._iprot.readMessageEnd() if result.success != None: return result.success + if result.o1 != None: + raise result.o1 if result.o2 != None: raise result.o2 + if result.o3 != None: + raise result.o3 raise TApplicationException(TApplicationException.MISSING_RESULT, "drop_database failed: unknown result"); - def get_databases(self, ): - self.send_get_databases() + def get_databases(self, pattern): + """ + Parameters: + - pattern + """ + self.send_get_databases(pattern) return self.recv_get_databases() - def send_get_databases(self, ): + def send_get_databases(self, pattern): self._oprot.writeMessageBegin('get_databases', TMessageType.CALL, self._seqid) args = get_databases_args() + args.pattern = pattern args.write(self._oprot) self._oprot.writeMessageEnd() self._oprot.trans.flush() @@ -472,6 +484,8 @@ class Client(fb303.FacebookService.Client, Iface): self._iprot.readMessageEnd() if result.success != None: return result.success + if result.o1 != None: + raise result.o1 if result.o2 != None: raise result.o2 raise TApplicationException(TApplicationException.MISSING_RESULT, "get_type failed: unknown result"); @@ -540,6 +554,8 @@ class Client(fb303.FacebookService.Client, Iface): self._iprot.readMessageEnd() if result.success != None: return result.success + if result.o1 != None: + raise result.o1 if result.o2 != None: raise result.o2 raise TApplicationException(TApplicationException.MISSING_RESULT, "drop_type failed: unknown result"); @@ -1637,11 +1653,13 @@ class Processor(fb303.FacebookService.Processor, Iface, TProcessor): iprot.readMessageEnd() result = create_database_result() try: - result.success = self._handler.create_database(args.name, args.description) + result.success = self._handler.create_database(args.database) except AlreadyExistsException, o1: result.o1 = o1 - except MetaException, o2: + except InvalidObjectException, o2: result.o2 = o2 + except MetaException, o3: + result.o3 = o3 oprot.writeMessageBegin("create_database", TMessageType.REPLY, seqid) result.write(oprot) oprot.writeMessageEnd() @@ -1670,8 +1688,12 @@ class Processor(fb303.FacebookService.Processor, Iface, TProcessor): result = drop_database_result() try: result.success = self._handler.drop_database(args.name) - except MetaException, o2: + except NoSuchObjectException, o1: + result.o1 = o1 + except InvalidOperationException, o2: result.o2 = o2 + except MetaException, o3: + result.o3 = o3 oprot.writeMessageBegin("drop_database", TMessageType.REPLY, seqid) result.write(oprot) oprot.writeMessageEnd() @@ -1683,7 +1705,7 @@ class Processor(fb303.FacebookService.Processor, Iface, TProcessor): iprot.readMessageEnd() result = get_databases_result() try: - result.success = self._handler.get_databases() + result.success = self._handler.get_databases(args.pattern) except MetaException, o1: result.o1 = o1 oprot.writeMessageBegin("get_databases", TMessageType.REPLY, seqid) @@ -1698,7 +1720,9 @@ class Processor(fb303.FacebookService.Processor, Iface, TProcessor): result = get_type_result() try: result.success = self._handler.get_type(args.name) - except MetaException, o2: + except MetaException, o1: + result.o1 = o1 + except NoSuchObjectException, o2: result.o2 = o2 oprot.writeMessageBegin("get_type", TMessageType.REPLY, seqid) result.write(oprot) @@ -1730,7 +1754,9 @@ class Processor(fb303.FacebookService.Processor, Iface, TProcessor): result = drop_type_result() try: result.success = self._handler.drop_type(args.type) - except MetaException, o2: + except MetaException, o1: + result.o1 = o1 + except NoSuchObjectException, o2: result.o2 = o2 oprot.writeMessageBegin("drop_type", TMessageType.REPLY, seqid) result.write(oprot) @@ -2189,19 +2215,16 @@ class Processor(fb303.FacebookService.Processor, Iface, TProcessor): class create_database_args: """ Attributes: - - name - - description + - database """ thrift_spec = ( None, # 0 - (1, TType.STRING, 'name', None, None, ), # 1 - (2, TType.STRING, 'description', None, None, ), # 2 + (1, TType.STRUCT, 'database', (Database, Database.thrift_spec), None, ), # 1 ) - def __init__(self, name=None, description=None,): - self.name = name - self.description = description + def __init__(self, database=None,): + self.database = database def read(self, iprot): if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: @@ -2213,13 +2236,9 @@ class create_database_args: if ftype == TType.STOP: break if fid == 1: - if ftype == TType.STRING: - self.name = iprot.readString(); - else: - iprot.skip(ftype) - elif fid == 2: - if ftype == TType.STRING: - self.description = iprot.readString(); + if ftype == TType.STRUCT: + self.database = Database() + self.database.read(iprot) else: iprot.skip(ftype) else: @@ -2232,13 +2251,9 @@ class create_database_args: oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))) return oprot.writeStructBegin('create_database_args') - if self.name != None: - oprot.writeFieldBegin('name', TType.STRING, 1) - oprot.writeString(self.name) - oprot.writeFieldEnd() - if self.description != None: - oprot.writeFieldBegin('description', TType.STRING, 2) - oprot.writeString(self.description) + if self.database != None: + oprot.writeFieldBegin('database', TType.STRUCT, 1) + self.database.write(oprot) oprot.writeFieldEnd() oprot.writeFieldStop() oprot.writeStructEnd() @@ -2260,18 +2275,21 @@ class create_database_result: - success - o1 - o2 + - o3 """ thrift_spec = ( (0, TType.BOOL, 'success', None, None, ), # 0 (1, TType.STRUCT, 'o1', (AlreadyExistsException, AlreadyExistsException.thrift_spec), None, ), # 1 - (2, TType.STRUCT, 'o2', (MetaException, MetaException.thrift_spec), None, ), # 2 + (2, TType.STRUCT, 'o2', (InvalidObjectException, InvalidObjectException.thrift_spec), None, ), # 2 + (3, TType.STRUCT, 'o3', (MetaException, MetaException.thrift_spec), None, ), # 3 ) - def __init__(self, success=None, o1=None, o2=None,): + def __init__(self, success=None, o1=None, o2=None, o3=None,): self.success = success self.o1 = o1 self.o2 = o2 + self.o3 = o3 def read(self, iprot): if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: @@ -2295,10 +2313,16 @@ class create_database_result: iprot.skip(ftype) elif fid == 2: if ftype == TType.STRUCT: - self.o2 = MetaException() + self.o2 = InvalidObjectException() self.o2.read(iprot) else: iprot.skip(ftype) + elif fid == 3: + if ftype == TType.STRUCT: + self.o3 = MetaException() + self.o3.read(iprot) + else: + iprot.skip(ftype) else: iprot.skip(ftype) iprot.readFieldEnd() @@ -2321,6 +2345,10 @@ class create_database_result: oprot.writeFieldBegin('o2', TType.STRUCT, 2) self.o2.write(oprot) oprot.writeFieldEnd() + if self.o3 != None: + oprot.writeFieldBegin('o3', TType.STRUCT, 3) + self.o3.write(oprot) + oprot.writeFieldEnd() oprot.writeFieldStop() oprot.writeStructEnd() @@ -2533,18 +2561,23 @@ class drop_database_result: """ Attributes: - success + - o1 - o2 + - o3 """ thrift_spec = ( (0, TType.BOOL, 'success', None, None, ), # 0 - None, # 1 - (2, TType.STRUCT, 'o2', (MetaException, MetaException.thrift_spec), None, ), # 2 + (1, TType.STRUCT, 'o1', (NoSuchObjectException, NoSuchObjectException.thrift_spec), None, ), # 1 + (2, TType.STRUCT, 'o2', (InvalidOperationException, InvalidOperationException.thrift_spec), None, ), # 2 + (3, TType.STRUCT, 'o3', (MetaException, MetaException.thrift_spec), None, ), # 3 ) - def __init__(self, success=None, o2=None,): + def __init__(self, success=None, o1=None, o2=None, o3=None,): self.success = success + self.o1 = o1 self.o2 = o2 + self.o3 = o3 def read(self, iprot): if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: @@ -2560,12 +2593,24 @@ class drop_database_result: self.success = iprot.readBool(); else: iprot.skip(ftype) + elif fid == 1: + if ftype == TType.STRUCT: + self.o1 = NoSuchObjectException() + self.o1.read(iprot) + else: + iprot.skip(ftype) elif fid == 2: if ftype == TType.STRUCT: - self.o2 = MetaException() + self.o2 = InvalidOperationException() self.o2.read(iprot) else: iprot.skip(ftype) + elif fid == 3: + if ftype == TType.STRUCT: + self.o3 = MetaException() + self.o3.read(iprot) + else: + iprot.skip(ftype) else: iprot.skip(ftype) iprot.readFieldEnd() @@ -2580,10 +2625,18 @@ class drop_database_result: oprot.writeFieldBegin('success', TType.BOOL, 0) oprot.writeBool(self.success) oprot.writeFieldEnd() + if self.o1 != None: + oprot.writeFieldBegin('o1', TType.STRUCT, 1) + self.o1.write(oprot) + oprot.writeFieldEnd() if self.o2 != None: oprot.writeFieldBegin('o2', TType.STRUCT, 2) self.o2.write(oprot) oprot.writeFieldEnd() + if self.o3 != None: + oprot.writeFieldBegin('o3', TType.STRUCT, 3) + self.o3.write(oprot) + oprot.writeFieldEnd() oprot.writeFieldStop() oprot.writeStructEnd() @@ -2599,10 +2652,19 @@ class drop_database_result: return not (self == other) class get_databases_args: + """ + Attributes: + - pattern + """ thrift_spec = ( + None, # 0 + (1, TType.STRING, 'pattern', None, None, ), # 1 ) + def __init__(self, pattern=None,): + self.pattern = pattern + def read(self, iprot): if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec)) @@ -2612,6 +2674,11 @@ class get_databases_args: (fname, ftype, fid) = iprot.readFieldBegin() if ftype == TType.STOP: break + if fid == 1: + if ftype == TType.STRING: + self.pattern = iprot.readString(); + else: + iprot.skip(ftype) else: iprot.skip(ftype) iprot.readFieldEnd() @@ -2622,6 +2689,10 @@ class get_databases_args: oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))) return oprot.writeStructBegin('get_databases_args') + if self.pattern != None: + oprot.writeFieldBegin('pattern', TType.STRING, 1) + oprot.writeString(self.pattern) + oprot.writeFieldEnd() oprot.writeFieldStop() oprot.writeStructEnd() @@ -2772,16 +2843,19 @@ class get_type_result: """ Attributes: - success + - o1 - o2 """ thrift_spec = ( (0, TType.STRUCT, 'success', (Type, Type.thrift_spec), None, ), # 0 - (1, TType.STRUCT, 'o2', (MetaException, MetaException.thrift_spec), None, ), # 1 + (1, TType.STRUCT, 'o1', (MetaException, MetaException.thrift_spec), None, ), # 1 + (2, TType.STRUCT, 'o2', (NoSuchObjectException, NoSuchObjectException.thrift_spec), None, ), # 2 ) - def __init__(self, success=None, o2=None,): + def __init__(self, success=None, o1=None, o2=None,): self.success = success + self.o1 = o1 self.o2 = o2 def read(self, iprot): @@ -2801,7 +2875,13 @@ class get_type_result: iprot.skip(ftype) elif fid == 1: if ftype == TType.STRUCT: - self.o2 = MetaException() + self.o1 = MetaException() + self.o1.read(iprot) + else: + iprot.skip(ftype) + elif fid == 2: + if ftype == TType.STRUCT: + self.o2 = NoSuchObjectException() self.o2.read(iprot) else: iprot.skip(ftype) @@ -2819,8 +2899,12 @@ class get_type_result: oprot.writeFieldBegin('success', TType.STRUCT, 0) self.success.write(oprot) oprot.writeFieldEnd() + if self.o1 != None: + oprot.writeFieldBegin('o1', TType.STRUCT, 1) + self.o1.write(oprot) + oprot.writeFieldEnd() if self.o2 != None: - oprot.writeFieldBegin('o2', TType.STRUCT, 1) + oprot.writeFieldBegin('o2', TType.STRUCT, 2) self.o2.write(oprot) oprot.writeFieldEnd() oprot.writeFieldStop() @@ -3048,16 +3132,19 @@ class drop_type_result: """ Attributes: - success + - o1 - o2 """ thrift_spec = ( (0, TType.BOOL, 'success', None, None, ), # 0 - (1, TType.STRUCT, 'o2', (MetaException, MetaException.thrift_spec), None, ), # 1 + (1, TType.STRUCT, 'o1', (MetaException, MetaException.thrift_spec), None, ), # 1 + (2, TType.STRUCT, 'o2', (NoSuchObjectException, NoSuchObjectException.thrift_spec), None, ), # 2 ) - def __init__(self, success=None, o2=None,): + def __init__(self, success=None, o1=None, o2=None,): self.success = success + self.o1 = o1 self.o2 = o2 def read(self, iprot): @@ -3076,7 +3163,13 @@ class drop_type_result: iprot.skip(ftype) elif fid == 1: if ftype == TType.STRUCT: - self.o2 = MetaException() + self.o1 = MetaException() + self.o1.read(iprot) + else: + iprot.skip(ftype) + elif fid == 2: + if ftype == TType.STRUCT: + self.o2 = NoSuchObjectException() self.o2.read(iprot) else: iprot.skip(ftype) @@ -3094,8 +3187,12 @@ class drop_type_result: oprot.writeFieldBegin('success', TType.BOOL, 0) oprot.writeBool(self.success) oprot.writeFieldEnd() + if self.o1 != None: + oprot.writeFieldBegin('o1', TType.STRUCT, 1) + self.o1.write(oprot) + oprot.writeFieldEnd() if self.o2 != None: - oprot.writeFieldBegin('o2', TType.STRUCT, 1) + oprot.writeFieldBegin('o2', TType.STRUCT, 2) self.o2.write(oprot) oprot.writeFieldEnd() oprot.writeFieldStop() diff --git metastore/src/gen-py/hive_metastore/ttypes.py metastore/src/gen-py/hive_metastore/ttypes.py index ea7269e..d76b6ba 100644 --- metastore/src/gen-py/hive_metastore/ttypes.py +++ metastore/src/gen-py/hive_metastore/ttypes.py @@ -269,18 +269,21 @@ class Database: """ Attributes: - name - - description + - comment + - locationUri """ thrift_spec = ( None, # 0 (1, TType.STRING, 'name', None, None, ), # 1 - (2, TType.STRING, 'description', None, None, ), # 2 + (2, TType.STRING, 'comment', None, None, ), # 2 + (3, TType.STRING, 'locationUri', None, None, ), # 3 ) - def __init__(self, name=None, description=None,): + def __init__(self, name=None, comment=None, locationUri=None,): self.name = name - self.description = description + self.comment = comment + self.locationUri = locationUri def read(self, iprot): if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: @@ -298,7 +301,12 @@ class Database: iprot.skip(ftype) elif fid == 2: if ftype == TType.STRING: - self.description = iprot.readString(); + self.comment = iprot.readString(); + else: + iprot.skip(ftype) + elif fid == 3: + if ftype == TType.STRING: + self.locationUri = iprot.readString(); else: iprot.skip(ftype) else: @@ -315,9 +323,13 @@ class Database: oprot.writeFieldBegin('name', TType.STRING, 1) oprot.writeString(self.name) oprot.writeFieldEnd() - if self.description != None: - oprot.writeFieldBegin('description', TType.STRING, 2) - oprot.writeString(self.description) + if self.comment != None: + oprot.writeFieldBegin('comment', TType.STRING, 2) + oprot.writeString(self.comment) + oprot.writeFieldEnd() + if self.locationUri != None: + oprot.writeFieldBegin('locationUri', TType.STRING, 3) + oprot.writeString(self.locationUri) oprot.writeFieldEnd() oprot.writeFieldStop() oprot.writeStructEnd() diff --git metastore/src/java/org/apache/hadoop/hive/metastore/HiveAlterHandler.java metastore/src/java/org/apache/hadoop/hive/metastore/HiveAlterHandler.java index 39dbd52..8abbb8a 100644 --- metastore/src/java/org/apache/hadoop/hive/metastore/HiveAlterHandler.java +++ metastore/src/java/org/apache/hadoop/hive/metastore/HiveAlterHandler.java @@ -114,8 +114,7 @@ public class HiveAlterHandler implements AlterHandler { // that means user is asking metastore to move data to new location // corresponding to the new name // get new location - newTblLoc = wh.getDefaultTablePath(newt.getDbName(), - newt.getTableName()).toString(); + newTblLoc = wh.getDefaultTablePath(dbname, newt.getTableName()).toString(); newt.getSd().setLocation(newTblLoc); oldTblLoc = oldt.getSd().getLocation(); moveData = true; diff --git metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStore.java metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStore.java index 4fb296a..0edb43c 100644 --- metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStore.java +++ metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStore.java @@ -18,6 +18,11 @@ package org.apache.hadoop.hive.metastore; +import static org.apache.commons.lang.StringUtils.join; +import static org.apache.hadoop.hive.metastore.MetaStoreUtils.DEFAULT_DATABASE_COMMENT; +import static org.apache.hadoop.hive.metastore.MetaStoreUtils.DEFAULT_DATABASE_NAME; +import static org.apache.hadoop.hive.metastore.MetaStoreUtils.validateName; + import java.util.ArrayList; import java.util.HashMap; import java.util.LinkedHashMap; @@ -349,14 +354,13 @@ public class HiveMetaStore extends ThriftHiveMetastore { return; } - private void createDefaultDB_core(RawStore ms) throws MetaException { + private void createDefaultDB_core(RawStore ms) throws MetaException, InvalidObjectException { try { - ms.getDatabase(MetaStoreUtils.DEFAULT_DATABASE_NAME); + ms.getDatabase(DEFAULT_DATABASE_NAME); } catch (NoSuchObjectException e) { ms.createDatabase( - new Database(MetaStoreUtils.DEFAULT_DATABASE_NAME, wh - .getDefaultDatabasePath(MetaStoreUtils.DEFAULT_DATABASE_NAME) - .toString())); + new Database(DEFAULT_DATABASE_NAME, DEFAULT_DATABASE_COMMENT, + wh.getDefaultDatabasePath(DEFAULT_DATABASE_NAME).toString())); } HMSHandler.createDefaultDB = true; } @@ -378,6 +382,8 @@ public class HiveMetaStore extends ThriftHiveMetastore { return Boolean.TRUE; } }); + } catch (InvalidObjectException e) { + throw new MetaException(e.getMessage()); } catch (MetaException e) { throw e; } catch (Exception e) { @@ -400,9 +406,13 @@ public class HiveMetaStore extends ThriftHiveMetastore { LOG.info(threadLocalId.get().toString() + ": " + m); } - private void logStartFunction(String f, String db, String tbl) { - LOG.info(threadLocalId.get().toString() + ": " + f + " : db=" + db - + " tbl=" + tbl); + private void logStartTableFunction(String f, String db, String tbl) { + LOG.info(threadLocalId.get().toString() + ": " + f + " : db=" + db + " tbl=" + tbl); + } + + private void logStartPartitionFunction(String f, String db, String tbl, List partVals) { + LOG.info(threadLocalId.get().toString() + ": " + f + " : db=" + db + " tbl=" + tbl + + "[" + join(partVals, ",") + "]"); } @Override @@ -420,40 +430,57 @@ public class HiveMetaStore extends ThriftHiveMetastore { System.exit(0); } - private boolean create_database_core(RawStore ms, final String name, - final String location_uri) throws AlreadyExistsException, MetaException { + private boolean create_database_core(RawStore ms, final Database db) + throws AlreadyExistsException, InvalidObjectException, MetaException { + if (!validateName(db.getName())) { + throw new InvalidObjectException(db.getName() + " is not a valid database name"); + } boolean success = false; try { ms.openTransaction(); - Database db = new Database(name, location_uri); - if (ms.createDatabase(db) - && wh.mkdirs(wh.getDefaultDatabasePath(name))) { - success = ms.commitTransaction(); + if (null == db.getLocationUri()) { + db.setLocationUri(wh.getDefaultDatabasePath(db.getName()).toString()); } + ms.createDatabase(db); + success = ms.commitTransaction(); } finally { if (!success) { ms.rollbackTransaction(); + } else { + wh.mkdirs(new Path(db.getLocationUri())); } } return success; } - public boolean create_database(final String name, final String location_uri) - throws AlreadyExistsException, MetaException { + public boolean create_database(final Database db) + throws AlreadyExistsException, InvalidObjectException, MetaException { incrementCounter("create_database"); - logStartFunction("create_database: " + name); + logStartFunction("create_database: " + + db.getName() + " " + + db.getLocationUri() + " " + + db.getComment()); Boolean ret = null; try { + try { + if(null != get_database(db.getName())) { + throw new AlreadyExistsException("Database " + db.getName() + " already exists"); + } + } catch (NoSuchObjectException e) { + // expected + } ret = executeWithRetry(new Command() { @Override Boolean run(RawStore ms) throws Exception { - boolean success = create_database_core(ms, name, location_uri); + boolean success = create_database_core(ms, db); return Boolean.valueOf(success); } }); } catch (AlreadyExistsException e) { throw e; + } catch (InvalidObjectException e) { + throw e; } catch (MetaException e) { throw e; } catch (Exception e) { @@ -488,10 +515,16 @@ public class HiveMetaStore extends ThriftHiveMetastore { return db; } - private boolean drop_database_core(RawStore ms, final String name) throws MetaException { + private boolean drop_database_core(RawStore ms, final String name) + throws NoSuchObjectException, InvalidOperationException, MetaException { boolean success = false; + Database db = null; try { ms.openTransaction(); + db = ms.getDatabase(name); + if (!get_tables(db.getName(), ".*").isEmpty()) { + throw new InvalidOperationException("Database " + db.getName() + " is not empty"); + } if (ms.dropDatabase(name)) { success = ms.commitTransaction(); } @@ -499,18 +532,19 @@ public class HiveMetaStore extends ThriftHiveMetastore { if (!success) { ms.rollbackTransaction(); } else { - wh.deleteDir(wh.getDefaultDatabasePath(name), true); + wh.deleteDir(new Path(db.getLocationUri()), true); // it is not a terrible thing even if the data is not deleted } } return success; } - public boolean drop_database(final String name) throws MetaException { + public boolean drop_database(final String dbName) + throws NoSuchObjectException, InvalidOperationException, MetaException { incrementCounter("drop_database"); - logStartFunction("drop_database: " + name); - if (name.equalsIgnoreCase(MetaStoreUtils.DEFAULT_DATABASE_NAME)) { - throw new MetaException("Can't drop default database"); + logStartFunction("drop_database: " + dbName); + if (dbName.equalsIgnoreCase(DEFAULT_DATABASE_NAME)) { + throw new MetaException("Can not drop default database"); } Boolean ret = null; @@ -518,10 +552,14 @@ public class HiveMetaStore extends ThriftHiveMetastore { ret = executeWithRetry(new Command() { @Override Boolean run(RawStore ms) throws Exception { - boolean success = drop_database_core(ms, name); + boolean success = drop_database_core(ms, dbName); return Boolean.valueOf(success); } }); + } catch (NoSuchObjectException e) { + throw e; + } catch (InvalidOperationException e) { + throw e; } catch (MetaException e) { throw e; } catch (Exception e) { @@ -531,16 +569,16 @@ public class HiveMetaStore extends ThriftHiveMetastore { return ret.booleanValue(); } - public List get_databases() throws MetaException { + public List get_databases(final String pattern) throws MetaException { incrementCounter("get_databases"); - logStartFunction("get_databases"); + logStartFunction("get_databases: " + pattern); List ret = null; try { ret = executeWithRetry(new Command>() { @Override List run(RawStore ms) throws Exception { - return ms.getDatabases(); + return ms.getDatabases(pattern); } }); } catch (MetaException e) { @@ -552,23 +590,38 @@ public class HiveMetaStore extends ThriftHiveMetastore { return ret; } + private void create_type_core(final RawStore ms, final Type type) + throws AlreadyExistsException, MetaException, InvalidObjectException { + if (!MetaStoreUtils.validateName(type.getName())) { + throw new InvalidObjectException("Invalid type name"); + } + + boolean success = false; + try { + ms.openTransaction(); + if (is_type_exists(type.getName())) { + throw new AlreadyExistsException("Type " + type.getName() + " already exists"); + } + ms.createType(type); + success = ms.commitTransaction(); + } finally { + if (!success) { + ms.rollbackTransaction(); + } + } + } + public boolean create_type(final Type type) throws AlreadyExistsException, MetaException, InvalidObjectException { incrementCounter("create_type"); logStartFunction("create_type: " + type.getName()); - // check whether type already exists - if (get_type(type.getName()) != null) { - throw new AlreadyExistsException("Type " + type.getName() - + " already exists"); - } - Boolean ret = null; try { ret = executeWithRetry(new Command() { @Override Boolean run(RawStore ms) throws Exception { - // TODO:pc Validation of types should be done by clients or here???? - return Boolean.valueOf(ms.createType(type)); + create_type_core(ms, type); + return Boolean.TRUE; } }); } catch (AlreadyExistsException e) { @@ -585,7 +638,7 @@ public class HiveMetaStore extends ThriftHiveMetastore { return ret.booleanValue(); } - public Type get_type(final String name) throws MetaException { + public Type get_type(final String name) throws MetaException, NoSuchObjectException { incrementCounter("get_type"); logStartFunction("get_type: " + name); @@ -594,9 +647,15 @@ public class HiveMetaStore extends ThriftHiveMetastore { ret = executeWithRetry(new Command() { @Override Type run(RawStore ms) throws Exception { - return ms.getType(name); + Type type = ms.getType(name); + if (null == type) { + throw new NoSuchObjectException("Type \"" + name + "\" not found."); + } + return type; } }); + } catch (NoSuchObjectException e) { + throw e; } catch (MetaException e) { throw e; } catch (Exception e) { @@ -606,6 +665,37 @@ public class HiveMetaStore extends ThriftHiveMetastore { return ret; } + public boolean is_type_exists(String typeName) throws MetaException { + incrementCounter("is_type_exists"); + logStartFunction("is_type_exists: " + typeName); + try { + return (get_type(typeName) != null); + } catch (NoSuchObjectException e) { + return false; + } + } + + private void drop_type_core(final RawStore ms, String typeName) + throws NoSuchObjectException, MetaException { + boolean success = false; + try { + ms.openTransaction(); + // drop any partitions + if (!is_type_exists(typeName)) { + throw new NoSuchObjectException(typeName + " doesn't exist"); + } + if (!ms.dropType(typeName)) { + throw new MetaException("Unable to drop type " + typeName); + } + success = ms.commitTransaction(); + } finally { + if (!success) { + ms.rollbackTransaction(); + } + } + } + + public boolean drop_type(final String name) throws MetaException { incrementCounter("drop_type"); logStartFunction("drop_type: " + name); @@ -631,7 +721,7 @@ public class HiveMetaStore extends ThriftHiveMetastore { public Map get_type_all(String name) throws MetaException { incrementCounter("get_type_all"); // TODO Auto-generated method stub - logStartFunction("get_type_all"); + logStartFunction("get_type_all: " + name); throw new MetaException("Not yet implemented"); } @@ -650,13 +740,13 @@ public class HiveMetaStore extends ThriftHiveMetastore { boolean success = false, madeDir = false; try { ms.openTransaction(); - + // get_table checks whether database exists, it should be moved here if (is_table_exists(tbl.getDbName(), tbl.getTableName())) { throw new AlreadyExistsException("Table " + tbl.getTableName() + " already exists"); } - + if (!TableType.VIRTUAL_VIEW.toString().equals(tbl.getTableType())) { if (tbl.getSd().getLocation() == null || tbl.getSd().getLocation().isEmpty()) { @@ -727,6 +817,8 @@ public class HiveMetaStore extends ThriftHiveMetastore { public boolean is_table_exists(String dbname, String name) throws MetaException { + incrementCounter("is_table_exists"); + logStartTableFunction("is_table_exists", dbname, name); try { return (get_table(dbname, name) != null); } catch (NoSuchObjectException e) { @@ -754,7 +846,7 @@ public class HiveMetaStore extends ThriftHiveMetastore { if (tbl.getSd() == null) { throw new MetaException("Table metadata is corrupted"); } - + isIndexTable = isIndexTable(tbl); if (isIndexTable) { throw new RuntimeException( @@ -778,7 +870,7 @@ public class HiveMetaStore extends ThriftHiveMetastore { if (tbl.getSd().getLocation() != null) { tblPath = new Path(tbl.getSd().getLocation()); } - + if (!ms.dropTable(dbname, name)) { throw new MetaException("Unable to drop table"); } @@ -797,7 +889,7 @@ public class HiveMetaStore extends ThriftHiveMetastore { public void drop_table(final String dbname, final String name, final boolean deleteData) throws NoSuchObjectException, MetaException { incrementCounter("drop_table"); - logStartFunction("drop_table", dbname, name); + logStartTableFunction("drop_table", dbname, name); try { executeWithRetry(new Command() { @@ -828,7 +920,7 @@ public class HiveMetaStore extends ThriftHiveMetastore { private boolean isExternal(Table table) { return MetaStoreUtils.isExternalTable(table); } - + private boolean isIndexTable (Table table) { return MetaStoreUtils.isIndexTable(table); } @@ -837,7 +929,7 @@ public class HiveMetaStore extends ThriftHiveMetastore { NoSuchObjectException { Table t = null; incrementCounter("get_table"); - logStartFunction("get_table", dbname, name); + logStartTableFunction("get_table", dbname, name); try { t = executeWithRetry(new Command() { @Override @@ -864,7 +956,7 @@ public class HiveMetaStore extends ThriftHiveMetastore { public boolean set_table_parameters(String dbname, String name, Map params) throws NoSuchObjectException, MetaException { incrementCounter("set_table_parameters"); - logStartFunction("set_table_parameters", dbname, name); + logStartTableFunction("set_table_parameters", dbname, name); // TODO Auto-generated method stub return false; } @@ -938,7 +1030,7 @@ public class HiveMetaStore extends ThriftHiveMetastore { final List part_vals) throws InvalidObjectException, AlreadyExistsException, MetaException { incrementCounter("append_partition"); - logStartFunction("append_partition", dbName, tableName); + logStartPartitionFunction("append_partition", dbName, tableName, part_vals); if (LOG.isDebugEnabled()) { for (String part : part_vals) { LOG.debug(part); @@ -970,7 +1062,7 @@ public class HiveMetaStore extends ThriftHiveMetastore { throws MetaException, InvalidObjectException, AlreadyExistsException { String db = parts.get(0).getDbName(); String tbl = parts.get(0).getTableName(); - logStartFunction("add_partitions", db, tbl); + logStartTableFunction("add_partitions", db, tbl); boolean success = false; try { ms.openTransaction(); @@ -1083,7 +1175,7 @@ public class HiveMetaStore extends ThriftHiveMetastore { public Partition add_partition(final Partition part) throws InvalidObjectException, AlreadyExistsException, MetaException { incrementCounter("add_partition"); - logStartFunction("add_partition", part.getDbName(), part.getTableName()); + logStartTableFunction("add_partition", part.getDbName(), part.getTableName()); Partition ret = null; try { @@ -1164,7 +1256,7 @@ public class HiveMetaStore extends ThriftHiveMetastore { final List part_vals, final boolean deleteData) throws NoSuchObjectException, MetaException, TException { incrementCounter("drop_partition"); - logStartFunction("drop_partition", db_name, tbl_name); + logStartPartitionFunction("drop_partition", db_name, tbl_name, part_vals); LOG.info("Partition values:" + part_vals); Boolean ret = null; @@ -1193,7 +1285,7 @@ public class HiveMetaStore extends ThriftHiveMetastore { public Partition get_partition(final String db_name, final String tbl_name, final List part_vals) throws MetaException, NoSuchObjectException { incrementCounter("get_partition"); - logStartFunction("get_partition", db_name, tbl_name); + logStartPartitionFunction("get_partition", db_name, tbl_name, part_vals); Partition ret = null; try { @@ -1217,7 +1309,7 @@ public class HiveMetaStore extends ThriftHiveMetastore { public List get_partitions(final String db_name, final String tbl_name, final short max_parts) throws NoSuchObjectException, MetaException { incrementCounter("get_partitions"); - logStartFunction("get_partitions", db_name, tbl_name); + logStartTableFunction("get_partitions", db_name, tbl_name); List ret = null; try { @@ -1242,7 +1334,7 @@ public class HiveMetaStore extends ThriftHiveMetastore { public List get_partition_names(final String db_name, final String tbl_name, final short max_parts) throws MetaException { incrementCounter("get_partition_names"); - logStartFunction("get_partition_names", db_name, tbl_name); + logStartTableFunction("get_partition_names", db_name, tbl_name); List ret = null; try { @@ -1277,7 +1369,7 @@ public class HiveMetaStore extends ThriftHiveMetastore { final Partition new_part) throws InvalidOperationException, MetaException, TException { incrementCounter("alter_partition"); - logStartFunction("alter_partition", db_name, tbl_name); + logStartTableFunction("alter_partition", db_name, tbl_name); LOG.info("Partition values:" + new_part.getValues()); try { @@ -1622,7 +1714,7 @@ public class HiveMetaStore extends ThriftHiveMetastore { List part_vals, short max_parts) throws MetaException, TException { incrementCounter("get_partitions_ps"); - logStartFunction("get_partitions_ps", db_name, tbl_name); + logStartPartitionFunction("get_partitions_ps", db_name, tbl_name, part_vals); List parts = null; List matchingParts = new ArrayList(); @@ -1650,7 +1742,7 @@ public class HiveMetaStore extends ThriftHiveMetastore { public List get_partition_names_ps(String db_name, String tbl_name, List part_vals, short max_parts) throws MetaException, TException { incrementCounter("get_partition_names_ps"); - logStartFunction("get_partitions_names_ps", db_name, tbl_name); + logStartPartitionFunction("get_partitions_names_ps", db_name, tbl_name, part_vals); Table t; try { t = get_table(db_name, tbl_name); @@ -1724,12 +1816,12 @@ public class HiveMetaStore extends ThriftHiveMetastore { } return ret; } - + private Index add_index_core(final RawStore ms, final Index index, final Table indexTable) throws InvalidObjectException, AlreadyExistsException, MetaException { - + boolean success = false, indexTableCreated = false; - + try { ms.openTransaction(); Index old_index = null; @@ -1746,13 +1838,13 @@ public class HiveMetaStore extends ThriftHiveMetastore { throw new InvalidObjectException( "Unable to add index because database or the orginal table do not exist"); } - + // set create time long time = System.currentTimeMillis() / 1000; Table indexTbl = indexTable; if (indexTbl != null) { try { - indexTbl = ms.getTable(index.getDbName(), index.getIndexTableName()); + indexTbl = ms.getTable(index.getDbName(), index.getIndexTableName()); } catch (Exception e) { } if (indexTbl != null) { @@ -1812,7 +1904,7 @@ public class HiveMetaStore extends ThriftHiveMetastore { return ret.booleanValue(); } - + private boolean drop_index_by_name_core(final RawStore ms, final String dbName, final String tblName, final String indexName, final boolean deleteData) throws NoSuchObjectException, @@ -1822,14 +1914,14 @@ public class HiveMetaStore extends ThriftHiveMetastore { Path tblPath = null; try { ms.openTransaction(); - + //drop the underlying index table Index index = get_index_by_name(dbName, tblName, indexName); if (index == null) { throw new NoSuchObjectException(indexName + " doesn't exist"); } ms.dropIndex(dbName, tblName, indexName); - + String idxTblName = index.getIndexTableName(); if (idxTblName != null) { Table tbl = null; @@ -1837,7 +1929,7 @@ public class HiveMetaStore extends ThriftHiveMetastore { if (tbl.getSd() == null) { throw new MetaException("Table metadata is corrupted"); } - + if (tbl.getSd().getLocation() != null) { tblPath = new Path(tbl.getSd().getLocation()); } @@ -1889,7 +1981,7 @@ public class HiveMetaStore extends ThriftHiveMetastore { } return ret; } - + private Index get_index_by_name_core(final RawStore ms, final String db_name, final String tbl_name, final String index_name) throws MetaException, NoSuchObjectException, TException { @@ -1906,7 +1998,7 @@ public class HiveMetaStore extends ThriftHiveMetastore { public List get_index_names(final String dbName, final String tblName, final short maxIndexes) throws MetaException, TException { incrementCounter("get_index_names"); - logStartFunction("get_index_names", dbName, tblName); + logStartTableFunction("get_index_names", dbName, tblName); List ret = null; try { @@ -1929,8 +2021,8 @@ public class HiveMetaStore extends ThriftHiveMetastore { public List get_indexes(final String dbName, final String tblName, final short maxIndexes) throws NoSuchObjectException, MetaException, TException { - incrementCounter("get_indexs"); - logStartFunction("get_indexs", dbName, tblName); + incrementCounter("get_indexes"); + logStartTableFunction("get_indexes", dbName, tblName); List ret = null; try { diff --git metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStoreClient.java metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStoreClient.java index c6541af..d3ba5fa 100644 --- metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStoreClient.java +++ metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStoreClient.java @@ -18,6 +18,8 @@ package org.apache.hadoop.hive.metastore; +import static org.apache.hadoop.hive.metastore.MetaStoreUtils.DEFAULT_DATABASE_NAME; + import java.net.URI; import java.net.URISyntaxException; import java.util.ArrayList; @@ -60,6 +62,7 @@ public class HiveMetaStoreClient implements IMetaStoreClient { private URI metastoreUris[]; private final boolean standAloneClient = false; private final HiveMetaHookLoader hookLoader; + private final Warehouse wh; // for thrift connects private int retries = 5; @@ -79,6 +82,8 @@ public class HiveMetaStoreClient implements IMetaStoreClient { conf = new HiveConf(HiveMetaStoreClient.class); } + wh = new Warehouse(conf); + boolean localMetaStore = conf.getBoolean("hive.metastore.local", false); if (localMetaStore) { // instantiate the metastore server handler directly instead of connecting @@ -208,8 +213,7 @@ public class HiveMetaStoreClient implements IMetaStoreClient { throws MetaException, NoSuchObjectException { // assume that it is default database try { - this.dropTable(MetaStoreUtils.DEFAULT_DATABASE_NAME, tableName, - deleteData, false); + this.dropTable(DEFAULT_DATABASE_NAME, tableName, deleteData, false); } catch (NoSuchObjectException e) { throw e; } catch (Exception e) { @@ -256,19 +260,55 @@ public class HiveMetaStoreClient implements IMetaStoreClient { return deepCopy( client.append_partition_by_name(dbName, tableName, partName)); } + + + /** - * @param name - * @param location_uri + * @param db * @return true or false * @throws AlreadyExistsException + * @throws InvalidObjectException * @throws MetaException * @throws TException * @see org.apache.hadoop.hive.metastore.api.ThriftHiveMetastore.Iface#create_database(java.lang.String, * java.lang.String) */ - public boolean createDatabase(String name, String location_uri) - throws AlreadyExistsException, MetaException, TException { - return client.create_database(name, location_uri); + public void createDatabase(Database db) + throws AlreadyExistsException, InvalidObjectException, MetaException, TException { + client.create_database(db); + } + + /** + * @param name + * @param comment + * @throws AlreadyExistsException + * @throws InvalidObjectException + * @throws MetaException + * @throws TException + * @see org.apache.hadoop.hive.metastore.api.ThriftHiveMetastore.Iface#create_database(java.lang.String, + * java.lang.String) + */ + public void createDatabase(String name, String comment) + throws AlreadyExistsException, InvalidObjectException, MetaException, TException { + Database db = new Database(); + db.setName(name); + db.setLocationUri(wh.getDefaultDatabasePath(name).toString()); + db.setComment(comment); + createDatabase(db); + } + + /** + * @param name + * @throws AlreadyExistsException + * @throws InvalidObjectException + * @throws MetaException + * @throws TException + * @see org.apache.hadoop.hive.metastore.api.ThriftHiveMetastore.Iface#create_database(java.lang.String, + * java.lang.String) + */ + public void createDatabase(String name) + throws AlreadyExistsException, InvalidObjectException, MetaException, TException { + createDatabase(name, ""); } /** @@ -315,14 +355,32 @@ public class HiveMetaStoreClient implements IMetaStoreClient { /** * @param name * @return true or false + * @throws NoSuchObjectException + * @throws InvalidOperationException * @throws MetaException * @throws TException * @see org.apache.hadoop.hive.metastore.api.ThriftHiveMetastore.Iface#drop_database(java.lang.String) */ - public boolean dropDatabase(String name) throws MetaException, TException { - return client.drop_database(name); + public void dropDatabase(String name) + throws NoSuchObjectException, InvalidOperationException, MetaException, TException { + dropDatabase(name, false); + } + + + public void dropDatabase(String name, boolean ignoreUnknownDb) + throws NoSuchObjectException, InvalidOperationException, MetaException, TException { + try { + getDatabase(name); + } catch (NoSuchObjectException e) { + if (!ignoreUnknownDb) { + throw e; + } + return; + } + client.drop_database(name); } + /** * @param tbl_name * @param db_name @@ -431,7 +489,7 @@ public class HiveMetaStoreClient implements IMetaStoreClient { * @throws TException * @see org.apache.hadoop.hive.metastore.api.ThriftHiveMetastore.Iface#drop_type(java.lang.String) */ - public boolean dropType(String type) throws MetaException, TException { + public boolean dropType(String type) throws NoSuchObjectException, MetaException, TException { return client.drop_type(type); } @@ -461,8 +519,14 @@ public class HiveMetaStoreClient implements IMetaStoreClient { * @throws TException * @see org.apache.hadoop.hive.metastore.api.ThriftHiveMetastore.Iface#get_databases() */ - public List getDatabases() throws MetaException, TException { - return client.get_databases(); + public List getDatabases(String databasePattern) + throws MetaException { + try { + return client.get_databases(databasePattern); + } catch (Exception e) { + MetaStoreUtils.logAndThrowMetaException(e); + } + return null; } /** @@ -537,9 +601,10 @@ public class HiveMetaStoreClient implements IMetaStoreClient { * @return the type * @throws MetaException * @throws TException + * @throws NoSuchObjectException * @see org.apache.hadoop.hive.metastore.api.ThriftHiveMetastore.Iface#get_type(java.lang.String) */ - public Type getType(String name) throws MetaException, TException { + public Type getType(String name) throws NoSuchObjectException, MetaException, TException { return deepCopy(client.get_type(name)); } @@ -554,14 +619,13 @@ public class HiveMetaStoreClient implements IMetaStoreClient { } public List getTables(String tablePattern) throws MetaException { - String dbname = MetaStoreUtils.DEFAULT_DATABASE_NAME; - return this.getTables(dbname, tablePattern); + return getTables(DEFAULT_DATABASE_NAME, tablePattern); } - public boolean tableExists(String tableName) throws MetaException, + public boolean tableExists(String databaseName, String tableName) throws MetaException, TException, UnknownDBException { try { - client.get_table(MetaStoreUtils.DEFAULT_DATABASE_NAME, tableName); + client.get_table(databaseName, tableName); } catch (NoSuchObjectException e) { return false; } @@ -570,7 +634,7 @@ public class HiveMetaStoreClient implements IMetaStoreClient { public Table getTable(String tableName) throws MetaException, TException, NoSuchObjectException { - return getTable(MetaStoreUtils.DEFAULT_DATABASE_NAME, tableName); + return getTable(DEFAULT_DATABASE_NAME, tableName); } public List listPartitionNames(String dbName, String tblName, @@ -604,7 +668,7 @@ public class HiveMetaStoreClient implements IMetaStoreClient { UnknownDBException { return deepCopyFieldSchemas(client.get_fields(db, tableName)); } - + /** * create an index * @param index the index object @@ -613,12 +677,12 @@ public class HiveMetaStoreClient implements IMetaStoreClient { * @throws MetaException * @throws NoSuchObjectException * @throws TException - * @throws AlreadyExistsException + * @throws AlreadyExistsException */ public void createIndex(Index index, Table indexTable) throws AlreadyExistsException, InvalidObjectException, MetaException, NoSuchObjectException, TException { client.add_index(index, indexTable); } - + /** * @param dbName * @param tblName @@ -652,7 +716,7 @@ public class HiveMetaStoreClient implements IMetaStoreClient { /** * list all the index names of the give base table. - * + * * @param db_name * @param tbl_name * @param max @@ -664,7 +728,7 @@ public class HiveMetaStoreClient implements IMetaStoreClient { throws NoSuchObjectException, MetaException, TException { return client.get_indexes(dbName, tblName, max); } - + /** * @param db * @param tableName diff --git metastore/src/java/org/apache/hadoop/hive/metastore/IMetaStoreClient.java metastore/src/java/org/apache/hadoop/hive/metastore/IMetaStoreClient.java index 6013644..dc0a502 100644 --- metastore/src/java/org/apache/hadoop/hive/metastore/IMetaStoreClient.java +++ metastore/src/java/org/apache/hadoop/hive/metastore/IMetaStoreClient.java @@ -23,9 +23,9 @@ import java.util.Map; import org.apache.hadoop.hive.metastore.api.AlreadyExistsException; import org.apache.hadoop.hive.metastore.api.ConfigValSecurityException; +import org.apache.hadoop.hive.metastore.api.Database; import org.apache.hadoop.hive.metastore.api.FieldSchema; import org.apache.hadoop.hive.metastore.api.Index; -import org.apache.hadoop.hive.metastore.api.IndexAlreadyExistsException; import org.apache.hadoop.hive.metastore.api.InvalidObjectException; import org.apache.hadoop.hive.metastore.api.InvalidOperationException; import org.apache.hadoop.hive.metastore.api.MetaException; @@ -44,6 +44,9 @@ public interface IMetaStoreClient { public void close(); + public List getDatabases(String databasePattern) + throws MetaException, UnknownTableException, TException, UnknownDBException; + public List getTables(String dbName, String tablePattern) throws MetaException, UnknownTableException, TException, UnknownDBException; @@ -91,10 +94,22 @@ public interface IMetaStoreClient { // MetaException, UnknownTableException, // TException; - public boolean tableExists(String tableName) throws MetaException, + public boolean tableExists(String databaseName, String tableName) throws MetaException, TException, UnknownDBException; /** + * Get a Database Object + * @param databaseName name of the database to fetch + * @return + * @throws NoSuchObjectException The database does not exist + * @throws MetaException Could not fetch the database + * @throws TException A thrift communication error occurred + */ + public Database getDatabase(String databaseName) + throws NoSuchObjectException, MetaException, TException; + + + /** * Get a table object. * * @param tableName @@ -227,10 +242,14 @@ public interface IMetaStoreClient { public void alter_table(String defaultDatabaseName, String tblName, Table table) throws InvalidOperationException, MetaException, TException; - public boolean createDatabase(String name, String location_uri) - throws AlreadyExistsException, MetaException, TException; + public void createDatabase(Database db) + throws InvalidObjectException, AlreadyExistsException, MetaException, TException; + + public void dropDatabase(String name) + throws NoSuchObjectException, InvalidOperationException, MetaException, TException; - public boolean dropDatabase(String name) throws MetaException, TException; + public void dropDatabase(String name, boolean ignoreUnknownDb) + throws NoSuchObjectException, InvalidOperationException, MetaException, TException; /** * @param db_name @@ -339,13 +358,13 @@ public interface IMetaStoreClient { * @throws MetaException * @throws NoSuchObjectException * @throws TException - * @throws AlreadyExistsException + * @throws AlreadyExistsException */ public void createIndex(Index index, Table indexTable) throws InvalidObjectException, MetaException, NoSuchObjectException, TException, AlreadyExistsException; /** - * + * * @param dbName * @param tblName * @param indexName @@ -375,7 +394,7 @@ public interface IMetaStoreClient { /** * list all the index names of the give base table. - * + * * @param db_name * @param tbl_name * @param max @@ -385,7 +404,7 @@ public interface IMetaStoreClient { */ public List listIndexNames(String db_name, String tbl_name, short max) throws MetaException, TException; - + /** * @param db_name * @param tbl_name diff --git metastore/src/java/org/apache/hadoop/hive/metastore/MetaStoreUtils.java metastore/src/java/org/apache/hadoop/hive/metastore/MetaStoreUtils.java index 0818689..968cc9b 100644 --- metastore/src/java/org/apache/hadoop/hive/metastore/MetaStoreUtils.java +++ metastore/src/java/org/apache/hadoop/hive/metastore/MetaStoreUtils.java @@ -60,7 +60,8 @@ public class MetaStoreUtils { protected static final Log LOG = LogFactory.getLog("hive.log"); public static final String DEFAULT_DATABASE_NAME = "default"; - + public static final String DEFAULT_DATABASE_COMMENT = "Default Hive database"; + /** * printStackTrace * @@ -883,7 +884,7 @@ public class MetaStoreUtils { } return true; } - + public static String getIndexTableName(String dbName, String baseTblName, String indexName) { return dbName + "__" + baseTblName + "_" + indexName + "__"; } @@ -894,5 +895,5 @@ public class MetaStoreUtils { } return TableType.INDEX_TABLE.toString().equals(table.getTableType()); } - + } diff --git metastore/src/java/org/apache/hadoop/hive/metastore/ObjectStore.java metastore/src/java/org/apache/hadoop/hive/metastore/ObjectStore.java index a06384c..a3154cb 100644 --- metastore/src/java/org/apache/hadoop/hive/metastore/ObjectStore.java +++ metastore/src/java/org/apache/hadoop/hive/metastore/ObjectStore.java @@ -286,28 +286,21 @@ public class ObjectStore implements RawStore, Configurable { } } - public boolean createDatabase(Database db) { - boolean success = false; + public void createDatabase(Database db) { boolean commited = false; - MDatabase mdb = new MDatabase(db.getName().toLowerCase(), db - .getDescription()); + MDatabase mdb = new MDatabase(); + mdb.setName(db.getName().toLowerCase()); + mdb.setLocationUri(db.getLocationUri()); + mdb.setComment(db.getComment()); try { openTransaction(); pm.makePersistent(mdb); - success = true; commited = commitTransaction(); } finally { if (!commited) { rollbackTransaction(); } } - return success; - } - - public boolean createDatabase(String name) { - // TODO: get default path - Database db = new Database(name, "default_path"); - return this.createDatabase(db); } @SuppressWarnings("nls") @@ -346,7 +339,7 @@ public class ObjectStore implements RawStore, Configurable { rollbackTransaction(); } } - return new Database(db.getName(), db.getDescription()); + return new Database(db.getName(), db.getComment(), db.getLocationUri()); } public boolean dropDatabase(String dbname) { @@ -389,23 +382,42 @@ public class ObjectStore implements RawStore, Configurable { return success; } - public List getDatabases() { - List dbs = null; + + public List getDatabases(String pattern) + throws MetaException { boolean commited = false; + List databases = null; try { openTransaction(); - Query query = pm.newQuery(MDatabase.class); - query.setResult("name"); - query.setResultClass(String.class); - query.setOrdering("name asc"); - dbs = (List) query.execute(); + // Take the pattern and split it on the | to get all the composing + // patterns + String[] subpatterns = pattern.trim().split("\\|"); + String query = "select name from org.apache.hadoop.hive.metastore.model.MDatabase where ("; + boolean first = true; + for (String subpattern : subpatterns) { + subpattern = "(?i)" + subpattern.replaceAll("\\*", ".*"); + if (!first) { + query = query + " || "; + } + query = query + " name.matches(\"" + subpattern + "\")"; + first = false; + } + query = query + ")"; + + Query q = pm.newQuery(query); + q.setResult("name"); + Collection names = (Collection) q.execute(); + databases = new ArrayList(); + for (Iterator i = names.iterator(); i.hasNext();) { + databases.add((String) i.next()); + } commited = commitTransaction(); } finally { if (!commited) { rollbackTransaction(); } } - return dbs; + return databases; } private MType getMType(Type type) { @@ -1077,7 +1089,7 @@ public class ObjectStore implements RawStore, Configurable { } return success; } - + private MIndex getMIndex(String dbName, String originalTblName, String indexName) throws MetaException { MIndex midx = null; boolean commited = false; @@ -1126,7 +1138,7 @@ public class ObjectStore implements RawStore, Configurable { return new Index( mIndex.getIndexName(), mIndex.getIndexHandlerClass(), - MetaStoreUtils.DEFAULT_DATABASE_NAME, + mIndex.getOrigTable().getDatabase().getName(), mIndex.getOrigTable().getTableName(), mIndex.getCreateTime(), mIndex.getLastAccessTime(), @@ -1156,7 +1168,7 @@ public class ObjectStore implements RawStore, Configurable { } } } - + private List listMIndexes(String dbName, String origTableName, int max) { boolean success = false; diff --git metastore/src/java/org/apache/hadoop/hive/metastore/RawStore.java metastore/src/java/org/apache/hadoop/hive/metastore/RawStore.java index 4951bd6..bc96f47 100644 --- metastore/src/java/org/apache/hadoop/hive/metastore/RawStore.java +++ metastore/src/java/org/apache/hadoop/hive/metastore/RawStore.java @@ -37,7 +37,7 @@ public interface RawStore extends Configurable { /** * Opens a new one or the one already created Every call of this function must * have corresponding commit or rollback function call - * + * * @return an active transaction */ @@ -46,7 +46,7 @@ public interface RawStore extends Configurable { /** * if this is the commit of the first open call then an actual commit is * called. - * + * * @return true or false */ public abstract boolean commitTransaction(); @@ -56,16 +56,15 @@ public interface RawStore extends Configurable { */ public abstract void rollbackTransaction(); - public abstract boolean createDatabase(Database db) throws MetaException; - - public abstract boolean createDatabase(String name) throws MetaException; + public abstract void createDatabase(Database db) + throws InvalidObjectException, MetaException; public abstract Database getDatabase(String name) throws NoSuchObjectException; public abstract boolean dropDatabase(String dbname); - public abstract List getDatabases() throws MetaException; + public abstract List getDatabases(String pattern) throws MetaException; public abstract boolean createType(Type type); diff --git metastore/src/java/org/apache/hadoop/hive/metastore/Warehouse.java metastore/src/java/org/apache/hadoop/hive/metastore/Warehouse.java index 4488f94..cda0c3b 100755 --- metastore/src/java/org/apache/hadoop/hive/metastore/Warehouse.java +++ metastore/src/java/org/apache/hadoop/hive/metastore/Warehouse.java @@ -18,6 +18,8 @@ package org.apache.hadoop.hive.metastore; +import static org.apache.hadoop.hive.metastore.MetaStoreUtils.DEFAULT_DATABASE_NAME; + import java.io.FileNotFoundException; import java.io.IOException; import java.util.ArrayList; @@ -47,7 +49,9 @@ import org.apache.hadoop.hive.metastore.api.MetaException; public class Warehouse { private Path whRoot; private final Configuration conf; - String whRootString; + private final String whRootString; + + private static final String DATABASE_SUFFIX = ".db"; public static final Log LOG = LogFactory.getLog("hive.metastore.warehouse"); @@ -117,10 +121,10 @@ public class Warehouse { } public Path getDefaultDatabasePath(String dbName) throws MetaException { - if (dbName.equalsIgnoreCase(MetaStoreUtils.DEFAULT_DATABASE_NAME)) { + if (dbName.equalsIgnoreCase(DEFAULT_DATABASE_NAME)) { return getWhRoot(); } - return new Path(getWhRoot(), dbName.toLowerCase() + ".db"); + return new Path(getWhRoot(), dbName.toLowerCase() + DATABASE_SUFFIX); } public Path getDefaultTablePath(String dbName, String tableName) @@ -328,7 +332,7 @@ public class Warehouse { } return FileUtils.makePartName(colNames, vals); } - + public static List getPartValuesFromPartName(String partName) throws MetaException { LinkedHashMap partSpec = Warehouse.makeSpecFromName(partName); diff --git metastore/src/model/org/apache/hadoop/hive/metastore/model/MDatabase.java metastore/src/model/org/apache/hadoop/hive/metastore/model/MDatabase.java index b3e098d..0528885 100644 --- metastore/src/model/org/apache/hadoop/hive/metastore/model/MDatabase.java +++ metastore/src/model/org/apache/hadoop/hive/metastore/model/MDatabase.java @@ -27,7 +27,8 @@ package org.apache.hadoop.hive.metastore.model; */ public class MDatabase { private String name; - private String description; + private String locationUri; + private String comment; /** * Default construction to keep jpox/jdo happy @@ -39,9 +40,10 @@ public class MDatabase { * @param name of the database * @param location future use */ - public MDatabase(String name, String location) { + public MDatabase(String name, String locationUri, String comment) { this.name = name; - this.description = location; + this.locationUri = locationUri; + this.comment = comment; } /** @@ -59,17 +61,30 @@ public class MDatabase { } /** - * @return the description + * @return the location_uri */ - public String getDescription() { - return description; + public String getLocationUri() { + return locationUri; } /** - * @param description the description to set + * @param locationUri the locationUri to set */ - public void setDescription(String description) { - this.description = description; + public void setLocationUri(String locationUri) { + this.locationUri = locationUri; } + /** + * @return the comment + */ + public String getComment() { + return comment; + } + + /** + * @param comment the comment to set + */ + public void setComment(String comment) { + this.comment = comment; + } } diff --git metastore/src/model/package.jdo metastore/src/model/package.jdo index 206ba75..7bc1e52 100644 --- metastore/src/model/package.jdo +++ metastore/src/model/package.jdo @@ -8,12 +8,15 @@ - + - - - + + + + + + diff --git metastore/src/test/org/apache/hadoop/hive/metastore/TestEmbeddedHiveMetaStore.java metastore/src/test/org/apache/hadoop/hive/metastore/TestEmbeddedHiveMetaStore.java new file mode 100644 index 0000000..8558ace --- /dev/null +++ metastore/src/test/org/apache/hadoop/hive/metastore/TestEmbeddedHiveMetaStore.java @@ -0,0 +1,49 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hive.metastore; + +import org.apache.hadoop.util.StringUtils; + +public class TestEmbeddedHiveMetaStore extends TestHiveMetaStore { + + @Override + protected void setUp() throws Exception { + super.setUp(); + + try { + client = new HiveMetaStoreClient(hiveConf, null); + } catch (Throwable e) { + System.err.println("Unable to open the metastore"); + System.err.println(StringUtils.stringifyException(e)); + throw new Exception(e); + } + } + + @Override + protected void tearDown() throws Exception { + try { + super.tearDown(); + client.close(); + } catch (Throwable e) { + System.err.println("Unable to close metastore"); + System.err.println(StringUtils.stringifyException(e)); + throw new Exception(e); + } + } +} diff --git metastore/src/test/org/apache/hadoop/hive/metastore/TestHiveMetaStore.java metastore/src/test/org/apache/hadoop/hive/metastore/TestHiveMetaStore.java index fff6aad..9472b52 100644 --- metastore/src/test/org/apache/hadoop/hive/metastore/TestHiveMetaStore.java +++ metastore/src/test/org/apache/hadoop/hive/metastore/TestHiveMetaStore.java @@ -48,40 +48,25 @@ import org.apache.hadoop.hive.serde.Constants; import org.apache.hadoop.util.StringUtils; import org.apache.thrift.TException; -public class TestHiveMetaStore extends TestCase { - private HiveMetaStoreClient client; - private HiveConf hiveConf; +public abstract class TestHiveMetaStore extends TestCase { + protected static HiveMetaStoreClient client; + protected static HiveConf hiveConf; + protected static Warehouse warehouse; + protected static boolean isThriftClient = false; + + private static final String TEST_DB1_NAME = "testdb1"; + private static final String TEST_DB2_NAME = "testdb2"; @Override protected void setUp() throws Exception { - super.setUp(); hiveConf = new HiveConf(this.getClass()); + warehouse = new Warehouse(hiveConf); // set some values to use for getting conf. vars hiveConf.set("hive.key1", "value1"); hiveConf.set("hive.key2", "http://www.example.com"); hiveConf.set("hive.key3", ""); hiveConf.set("hive.key4", "0"); - - try { - client = new HiveMetaStoreClient(hiveConf, null); - } catch (Throwable e) { - System.err.println("Unable to open the metastore"); - System.err.println(StringUtils.stringifyException(e)); - throw new Exception(e); - } - } - - @Override - protected void tearDown() throws Exception { - try { - super.tearDown(); - client.close(); - } catch (Throwable e) { - System.err.println("Unable to close metastore"); - System.err.println(StringUtils.stringifyException(e)); - throw new Exception(e); - } } public void testNameMethods() { @@ -118,11 +103,11 @@ public class TestHiveMetaStore extends TestCase { * @throws Exception */ public void testPartition() throws Exception { - partitionTester(client, hiveConf, false); + partitionTester(client, hiveConf); } - public static void partitionTester(HiveMetaStoreClient client, HiveConf hiveConf, - boolean isThriftClient) throws Exception { + public static void partitionTester(HiveMetaStoreClient client, HiveConf hiveConf) + throws Exception { try { String dbName = "compdb"; String tblName = "comptbl"; @@ -139,9 +124,8 @@ public class TestHiveMetaStore extends TestCase { vals3.add("15"); client.dropTable(dbName, tblName); - client.dropDatabase(dbName); - boolean ret = client.createDatabase(dbName, "strange_loc"); - assertTrue("Unable to create the databse " + dbName, ret); + silentDropDatabase(dbName); + client.createDatabase(dbName, ""); client.dropType(typeName); Type typ1 = new Type(); @@ -151,8 +135,7 @@ public class TestHiveMetaStore extends TestCase { new FieldSchema("name", Constants.STRING_TYPE_NAME, "")); typ1.getFields().add( new FieldSchema("income", Constants.INT_TYPE_NAME, "")); - ret = client.createType(typ1); - assertTrue("Unable to create type " + typeName, ret); + client.createType(typ1); Table tbl = new Table(); tbl.setDbName(dbName); @@ -181,7 +164,7 @@ public class TestHiveMetaStore extends TestCase { client.createTable(tbl); - if(isThriftClient) { + if (isThriftClient) { // the createTable() above does not update the location in the 'tbl' // object when the client is a thrift client and the code below relies // on the location being present in the 'tbl' object - so get the table @@ -306,9 +289,9 @@ public class TestHiveMetaStore extends TestCase { Path partPath = new Path(part2.getSd().getLocation()); FileSystem fs = FileSystem.get(partPath.toUri(), hiveConf); + assertTrue(fs.exists(partPath)); - ret = client.dropPartition(dbName, tblName, part.getValues(), true); - assertTrue(ret); + client.dropPartition(dbName, tblName, part.getValues(), true); assertFalse(fs.exists(partPath)); // Test append_partition_by_name @@ -326,12 +309,11 @@ public class TestHiveMetaStore extends TestCase { // add the partition again so that drop table with a partition can be // tested retp = client.add_partition(part); - assertNotNull("Unable to create partition " + part, ret); + assertNotNull("Unable to create partition " + part, retp); client.dropTable(dbName, tblName); - ret = client.dropType(typeName); - assertTrue("Unable to drop type " + typeName, ret); + client.dropType(typeName); // recreate table as external, drop partition and it should // still exist @@ -343,8 +325,11 @@ public class TestHiveMetaStore extends TestCase { client.dropPartition(dbName, tblName, part.getValues(), true); assertTrue(fs.exists(partPath)); - ret = client.dropDatabase(dbName); - assertTrue("Unable to create the databse " + dbName, ret); + for (String tableName : client.getTables(dbName, "*")) { + client.dropTable(dbName, tableName); + } + + client.dropDatabase(dbName); } catch (Exception e) { System.err.println(StringUtils.stringifyException(e)); @@ -363,9 +348,8 @@ public class TestHiveMetaStore extends TestCase { vals.add("14"); client.dropTable(dbName, tblName); - client.dropDatabase(dbName); - boolean ret = client.createDatabase(dbName, "strange_loc"); - assertTrue("Unable to create the databse " + dbName, ret); + silentDropDatabase(dbName); + client.createDatabase(dbName, "Alter Partition Test database"); ArrayList cols = new ArrayList(2); cols.add(new FieldSchema("name", Constants.STRING_TYPE_NAME, "")); @@ -398,6 +382,14 @@ public class TestHiveMetaStore extends TestCase { client.createTable(tbl); + if (isThriftClient) { + // the createTable() above does not update the location in the 'tbl' + // object when the client is a thrift client and the code below relies + // on the location being present in the 'tbl' object - so get the table + // from the metastore + tbl = client.getTable(dbName, tblName); + } + Partition part = new Partition(); part.setDbName(dbName); part.setTableName(tblName); @@ -426,8 +418,7 @@ public class TestHiveMetaStore extends TestCase { client.dropTable(dbName, tblName); - ret = client.dropDatabase(dbName); - assertTrue("Unable to create the databse " + dbName, ret); + client.dropDatabase(dbName); } catch (Exception e) { System.err.println(StringUtils.stringifyException(e)); System.err.println("testPartition() failed."); @@ -438,40 +429,35 @@ public class TestHiveMetaStore extends TestCase { public void testDatabase() throws Throwable { try { // clear up any existing databases - client.dropDatabase("test1"); - client.dropDatabase("test2"); - - boolean ret = client.createDatabase("test1", "strange_loc"); - assertTrue("Unable to create the databse", ret); + silentDropDatabase(TEST_DB1_NAME); + silentDropDatabase(TEST_DB2_NAME); + client.createDatabase(TEST_DB1_NAME); - Database db = client.getDatabase("test1"); + Database db = client.getDatabase(TEST_DB1_NAME); assertEquals("name of returned db is different from that of inserted db", - "test1", db.getName()); - assertEquals( - "location of the returned db is different from that of inserted db", - "strange_loc", db.getDescription()); + TEST_DB1_NAME, db.getName()); + assertEquals("location of the returned db is different from that of inserted db", + warehouse.getDefaultDatabasePath(TEST_DB1_NAME).toString(), db.getLocationUri()); - boolean ret2 = client.createDatabase("test2", "another_strange_loc"); - assertTrue("Unable to create the databse", ret2); + client.createDatabase(TEST_DB2_NAME); - Database db2 = client.getDatabase("test2"); + Database db2 = client.getDatabase(TEST_DB2_NAME); assertEquals("name of returned db is different from that of inserted db", - "test2", db2.getName()); - assertEquals( - "location of the returned db is different from that of inserted db", - "another_strange_loc", db2.getDescription()); + TEST_DB2_NAME, db2.getName()); + assertEquals("location of the returned db is different from that of inserted db", + warehouse.getDefaultDatabasePath(TEST_DB2_NAME).toString(), db2.getLocationUri()); - List dbs = client.getDatabases(); + List dbs = client.getDatabases(".*"); - assertTrue("first database is not test1", dbs.contains("test1")); - assertTrue("second database is not test2", dbs.contains("test2")); + assertTrue("first database is not " + TEST_DB1_NAME, dbs.contains(TEST_DB1_NAME)); + assertTrue("second database is not " + TEST_DB2_NAME, dbs.contains(TEST_DB2_NAME)); - ret = client.dropDatabase("test1"); - assertTrue("couldn't delete first database", ret); - ret = client.dropDatabase("test2"); - assertTrue("couldn't delete second database", ret); + client.dropDatabase(TEST_DB1_NAME); + client.dropDatabase(TEST_DB2_NAME); + silentDropDatabase(TEST_DB1_NAME); + silentDropDatabase(TEST_DB2_NAME); } catch (Throwable e) { System.err.println(StringUtils.stringifyException(e)); System.err.println("testDatabase() failed."); @@ -495,9 +481,13 @@ public class TestHiveMetaStore extends TestCase { ret = client.dropType(Constants.INT_TYPE_NAME); assertTrue("unable to drop type integer", ret); - Type typ1_3 = null; - typ1_3 = client.getType(Constants.INT_TYPE_NAME); - assertNull("unable to drop type integer", typ1_3); + boolean exceptionThrown = false; + try { + client.getType(Constants.INT_TYPE_NAME); + } catch (NoSuchObjectException e) { + exceptionThrown = true; + } + assertTrue("Expected NoSuchObjectException", exceptionThrown); } catch (Exception e) { System.err.println(StringUtils.stringifyException(e)); System.err.println("testSimpleTypeApi() failed."); @@ -554,9 +544,13 @@ public class TestHiveMetaStore extends TestCase { ret = client.dropType("Person"); assertTrue("unable to drop type Person", ret); - Type typ1_3 = null; - typ1_3 = client.getType("Person"); - assertNull("unable to drop type Person", typ1_3); + boolean exceptionThrown = false; + try { + client.getType("Person"); + } catch (NoSuchObjectException e) { + exceptionThrown = true; + } + assertTrue("Expected NoSuchObjectException", exceptionThrown); } catch (Exception e) { System.err.println(StringUtils.stringifyException(e)); System.err.println("testComplexTypeApi() failed."); @@ -572,9 +566,8 @@ public class TestHiveMetaStore extends TestCase { String typeName = "Person"; client.dropTable(dbName, tblName); - client.dropDatabase(dbName); - boolean ret = client.createDatabase(dbName, "strange_loc"); - assertTrue("Unable to create the databse " + dbName, ret); + silentDropDatabase(dbName); + client.createDatabase(dbName); client.dropType(typeName); Type typ1 = new Type(); @@ -584,8 +577,7 @@ public class TestHiveMetaStore extends TestCase { new FieldSchema("name", Constants.STRING_TYPE_NAME, "")); typ1.getFields().add( new FieldSchema("income", Constants.INT_TYPE_NAME, "")); - ret = client.createType(typ1); - assertTrue("Unable to create type " + typeName, ret); + client.createType(typ1); Table tbl = new Table(); tbl.setDbName(dbName); @@ -610,6 +602,14 @@ public class TestHiveMetaStore extends TestCase { client.createTable(tbl); + if (isThriftClient) { + // the createTable() above does not update the location in the 'tbl' + // object when the client is a thrift client and the code below relies + // on the location being present in the 'tbl' object - so get the table + // from the metastore + tbl = client.getTable(dbName, tblName); + } + Table tbl2 = client.getTable(dbName, tblName); assertNotNull(tbl2); assertEquals(tbl2.getDbName(), dbName); @@ -647,6 +647,9 @@ public class TestHiveMetaStore extends TestCase { } client.createTable(tbl2); + if (isThriftClient) { + tbl2 = client.getTable(tbl2.getDbName(), tbl2.getTableName()); + } Table tbl3 = client.getTable(dbName, tblName2); assertNotNull(tbl3); @@ -683,18 +686,15 @@ public class TestHiveMetaStore extends TestCase { (tbl2.getPartitionKeys() == null) || (tbl2.getPartitionKeys().size() == 0)); - FileSystem fs = FileSystem.get((new Path(tbl.getSd().getLocation())).toUri(), - hiveConf); + FileSystem fs = FileSystem.get((new Path(tbl.getSd().getLocation())).toUri(), hiveConf); client.dropTable(dbName, tblName); assertFalse(fs.exists(new Path(tbl.getSd().getLocation()))); client.dropTable(dbName, tblName2); assertTrue(fs.exists(new Path(tbl2.getSd().getLocation()))); - ret = client.dropType(typeName); - assertTrue("Unable to drop type " + typeName, ret); - ret = client.dropDatabase(dbName); - assertTrue("Unable to drop databse " + dbName, ret); + client.dropType(typeName); + client.dropDatabase(dbName); } catch (Exception e) { System.err.println(StringUtils.stringifyException(e)); System.err.println("testSimpleTable() failed."); @@ -703,15 +703,15 @@ public class TestHiveMetaStore extends TestCase { } public void testAlterTable() throws Exception { - try { - String dbName = "alterdb"; - String invTblName = "alter-tbl"; - String tblName = "altertbl"; + String dbName = "alterdb"; + String invTblName = "alter-tbl"; + String tblName = "altertbl"; + try { client.dropTable(dbName, tblName); - client.dropDatabase(dbName); - boolean ret = client.createDatabase(dbName, "strange_loc"); - assertTrue("Unable to create the databse " + dbName, ret); + silentDropDatabase(dbName); + + client.createDatabase(dbName); ArrayList invCols = new ArrayList(2); invCols.add(new FieldSchema("n-ame", Constants.STRING_TYPE_NAME, "")); @@ -753,6 +753,10 @@ public class TestHiveMetaStore extends TestCase { tbl.getSd().setCols(cols); client.createTable(tbl); + if (isThriftClient) { + tbl = client.getTable(tbl.getDbName(), tbl.getTableName()); + } + // now try to invalid alter table Table tbl2 = client.getTable(dbName, tblName); failed = false; @@ -776,18 +780,22 @@ public class TestHiveMetaStore extends TestCase { assertEquals("Alter table didn't succeed. Num buckets is different ", tbl2.getSd().getNumBuckets(), tbl3.getSd().getNumBuckets()); // check that data has moved - FileSystem fs = FileSystem.get((new Path(tbl.getSd().getLocation())).toUri(), - hiveConf); + FileSystem fs = FileSystem.get((new Path(tbl.getSd().getLocation())).toUri(), hiveConf); assertFalse("old table location still exists", fs.exists(new Path(tbl .getSd().getLocation()))); assertTrue("data did not move to new location", fs.exists(new Path(tbl3 .getSd().getLocation()))); - assertEquals("alter table didn't move data correct location", tbl3 - .getSd().getLocation(), tbl2.getSd().getLocation()); + + if (!isThriftClient) { + assertEquals("alter table didn't move data correct location", tbl3 + .getSd().getLocation(), tbl2.getSd().getLocation()); + } } catch (Exception e) { System.err.println(StringUtils.stringifyException(e)); System.err.println("testSimpleTable() failed."); throw e; + } finally { + silentDropDatabase(dbName); } } @@ -799,9 +807,8 @@ public class TestHiveMetaStore extends TestCase { try { client.dropTable(dbName, tblName); - client.dropDatabase(dbName); - boolean ret = client.createDatabase(dbName, "strange_loc"); - assertTrue("Unable to create the databse " + dbName, ret); + silentDropDatabase(dbName); + client.createDatabase(dbName); client.dropType(typeName); Type typ1 = new Type(); @@ -811,8 +818,7 @@ public class TestHiveMetaStore extends TestCase { new FieldSchema("name", Constants.STRING_TYPE_NAME, "")); typ1.getFields().add( new FieldSchema("income", Constants.INT_TYPE_NAME, "")); - ret = client.createType(typ1); - assertTrue("Unable to create type " + typeName, ret); + client.createType(typ1); Table tbl = new Table(); tbl.setDbName(dbName); @@ -889,8 +895,7 @@ public class TestHiveMetaStore extends TestCase { client.dropTable(dbName, tblName); boolean ret = client.dropType(typeName); assertTrue("Unable to drop type " + typeName, ret); - ret = client.dropDatabase(dbName); - assertTrue("Unable to create the databse " + dbName, ret); + client.dropDatabase(dbName); } } @@ -898,20 +903,21 @@ public class TestHiveMetaStore extends TestCase { String val = "value"; - try { - assertEquals(client.getConfigValue("hive.key1", val), "value1"); - assertEquals(client.getConfigValue("hive.key2", val), - "http://www.example.com"); - assertEquals(client.getConfigValue("hive.key3", val), ""); - assertEquals(client.getConfigValue("hive.key4", val), "0"); - assertEquals(client.getConfigValue("hive.key5", val), val); - assertEquals(client.getConfigValue(null, val), val); - } catch (TException e) { - e.printStackTrace(); - assert (false); - } catch (ConfigValSecurityException e) { - e.printStackTrace(); - assert (false); + if (!isThriftClient) { + try { + assertEquals(client.getConfigValue("hive.key1", val), "value1"); + assertEquals(client.getConfigValue("hive.key2", val), "http://www.example.com"); + assertEquals(client.getConfigValue("hive.key3", val), ""); + assertEquals(client.getConfigValue("hive.key4", val), "0"); + assertEquals(client.getConfigValue("hive.key5", val), val); + assertEquals(client.getConfigValue(null, val), val); + } catch (TException e) { + e.printStackTrace(); + assert (false); + } catch (ConfigValSecurityException e) { + e.printStackTrace(); + assert (false); + } } boolean threwException = false; @@ -934,4 +940,15 @@ public class TestHiveMetaStore extends TestCase { part.setCreateTime(part_get.getCreateTime()); part.putToParameters(org.apache.hadoop.hive.metastore.api.Constants.DDL_TIME, Long.toString(part_get.getCreateTime())); } + + private static void silentDropDatabase(String dbName) throws MetaException, TException { + try { + for (String tableName : client.getTables(dbName, "*")) { + client.dropTable(dbName, tableName); + } + client.dropDatabase(dbName); + } catch (NoSuchObjectException e) { + } catch (InvalidOperationException e) { + } + } } diff --git metastore/src/test/org/apache/hadoop/hive/metastore/TestHiveMetaStoreRemote.java metastore/src/test/org/apache/hadoop/hive/metastore/TestHiveMetaStoreRemote.java deleted file mode 100644 index bc950b9..0000000 --- metastore/src/test/org/apache/hadoop/hive/metastore/TestHiveMetaStoreRemote.java +++ /dev/null @@ -1,79 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.hive.metastore; - -import junit.framework.TestCase; - -import org.apache.hadoop.hive.conf.HiveConf; - - -public class TestHiveMetaStoreRemote extends TestCase { - private static final String METASTORE_PORT = "29083"; -private HiveMetaStoreClient client; - private HiveConf hiveConf; - boolean isServerRunning = false; - - private static class RunMS implements Runnable { - - @Override - public void run() { - System.out.println("Running metastore!"); - String [] args = new String [1]; - args[0] = METASTORE_PORT; - HiveMetaStore.main(args); - } - - } - - @Override - protected void setUp() throws Exception { - super.setUp(); - if(isServerRunning) { - return; - } - Thread t = new Thread(new RunMS()); - t.start(); - - // Wait a little bit for the metastore to start. Should probably have - // a better way of detecting if the metastore has started? - Thread.sleep(5000); - - // Set conf to connect to the local metastore. - hiveConf = new HiveConf(this.getClass()); - // hive.metastore.local should be defined in HiveConf - hiveConf.set("hive.metastore.local", "false"); - hiveConf.setVar(HiveConf.ConfVars.METASTOREURIS, "thrift://localhost:" + METASTORE_PORT); - hiveConf.setIntVar(HiveConf.ConfVars.METATORETHRIFTRETRIES, 3); - - client = new HiveMetaStoreClient(hiveConf); - // Now you have the client - run necessary tests. - isServerRunning = true; - } - - /** - * tests create table and partition and tries to drop the table without - * droppping the partition - * - * @throws Exception - */ - public void testPartition() throws Exception { - TestHiveMetaStore.partitionTester(client, hiveConf, true); - } - -} diff --git metastore/src/test/org/apache/hadoop/hive/metastore/TestRemoteHiveMetaStore.java metastore/src/test/org/apache/hadoop/hive/metastore/TestRemoteHiveMetaStore.java new file mode 100644 index 0000000..57648b6 --- /dev/null +++ metastore/src/test/org/apache/hadoop/hive/metastore/TestRemoteHiveMetaStore.java @@ -0,0 +1,65 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hive.metastore; + +import org.apache.hadoop.hive.conf.HiveConf; + + +public class TestRemoteHiveMetaStore extends TestHiveMetaStore { + private static final String METASTORE_PORT = "29083"; + private static boolean isServerRunning = false; + + private static class RunMS implements Runnable { + + @Override + public void run() { + System.out.println("Running metastore!"); + String [] args = new String [1]; + args[0] = METASTORE_PORT; + HiveMetaStore.main(args); + } + + } + + @Override + protected void setUp() throws Exception { + super.setUp(); + if(isServerRunning) { + return; + } + Thread t = new Thread(new RunMS()); + t.start(); + + // Wait a little bit for the metastore to start. Should probably have + // a better way of detecting if the metastore has started? + Thread.sleep(5000); + + // hive.metastore.local should be defined in HiveConf + hiveConf.set("hive.metastore.local", "false"); + hiveConf.setVar(HiveConf.ConfVars.METASTOREURIS, "thrift://localhost:" + METASTORE_PORT); + hiveConf.setIntVar(HiveConf.ConfVars.METATORETHRIFTRETRIES, 3); + + client = new HiveMetaStoreClient(hiveConf); + isThriftClient = true; + + // Now you have the client - run necessary tests. + isServerRunning = true; + } + +} diff --git ql/src/java/org/apache/hadoop/hive/ql/exec/DDLTask.java ql/src/java/org/apache/hadoop/hive/ql/exec/DDLTask.java index ef1da6b..092e905 100644 --- ql/src/java/org/apache/hadoop/hive/ql/exec/DDLTask.java +++ ql/src/java/org/apache/hadoop/hive/ql/exec/DDLTask.java @@ -53,9 +53,11 @@ import org.apache.hadoop.hive.metastore.MetaStoreUtils; import org.apache.hadoop.hive.metastore.ProtectMode; import org.apache.hadoop.hive.metastore.TableType; import org.apache.hadoop.hive.metastore.Warehouse; +import org.apache.hadoop.hive.metastore.api.AlreadyExistsException; import org.apache.hadoop.hive.metastore.api.FieldSchema; import org.apache.hadoop.hive.metastore.api.InvalidOperationException; import org.apache.hadoop.hive.metastore.api.MetaException; +import org.apache.hadoop.hive.metastore.api.NoSuchObjectException; import org.apache.hadoop.hive.metastore.api.Order; import org.apache.hadoop.hive.ql.Context; import org.apache.hadoop.hive.ql.DriverContext; @@ -73,6 +75,7 @@ import org.apache.hadoop.hive.ql.metadata.Table; import org.apache.hadoop.hive.ql.plan.AddPartitionDesc; import org.apache.hadoop.hive.ql.plan.AlterTableDesc; import org.apache.hadoop.hive.ql.plan.AlterTableSimpleDesc; +import org.apache.hadoop.hive.ql.plan.CreateDatabaseDesc; import org.apache.hadoop.hive.ql.plan.CreateIndexDesc; import org.apache.hadoop.hive.ql.plan.CreateTableDesc; import org.apache.hadoop.hive.ql.plan.CreateTableLikeDesc; @@ -80,13 +83,16 @@ import org.apache.hadoop.hive.ql.plan.CreateViewDesc; import org.apache.hadoop.hive.ql.plan.DDLWork; import org.apache.hadoop.hive.ql.plan.DescFunctionDesc; import org.apache.hadoop.hive.ql.plan.DescTableDesc; +import org.apache.hadoop.hive.ql.plan.DropDatabaseDesc; import org.apache.hadoop.hive.ql.plan.DropIndexDesc; import org.apache.hadoop.hive.ql.plan.DropTableDesc; import org.apache.hadoop.hive.ql.plan.MsckDesc; +import org.apache.hadoop.hive.ql.plan.ShowDatabasesDesc; import org.apache.hadoop.hive.ql.plan.ShowFunctionsDesc; import org.apache.hadoop.hive.ql.plan.ShowPartitionsDesc; import org.apache.hadoop.hive.ql.plan.ShowTableStatusDesc; import org.apache.hadoop.hive.ql.plan.ShowTablesDesc; +import org.apache.hadoop.hive.ql.plan.SwitchDatabaseDesc; import org.apache.hadoop.hive.ql.plan.AlterTableDesc.AlterTableTypes; import org.apache.hadoop.hive.ql.plan.api.StageType; import org.apache.hadoop.hive.serde.Constants; @@ -143,6 +149,21 @@ public class DDLTask extends Task implements Serializable { try { db = Hive.get(conf); + CreateDatabaseDesc createDatabaseDesc = work.getCreateDatabaseDesc(); + if (null != createDatabaseDesc) { + return createDatabase(db, createDatabaseDesc); + } + + DropDatabaseDesc dropDatabaseDesc = work.getDropDatabaseDesc(); + if(dropDatabaseDesc != null) { + return dropDatabase(db, dropDatabaseDesc); + } + + SwitchDatabaseDesc switchDatabaseDesc = work.getSwitchDatabaseDesc(); + if(switchDatabaseDesc != null) { + return switchDatabase(db, switchDatabaseDesc); + } + CreateTableDesc crtTbl = work.getCreateTblDesc(); if (crtTbl != null) { return createTable(db, crtTbl); @@ -210,6 +231,11 @@ public class DDLTask extends Task implements Serializable { return describeFunction(descFunc); } + ShowDatabasesDesc showDatabases = work.getShowDatabasesDesc(); + if (showDatabases != null) { + return showDatabases(db, showDatabases); + } + ShowTablesDesc showTbls = work.getShowTblsDesc(); if (showTbls != null) { return showTables(db, showTbls); @@ -249,7 +275,7 @@ public class DDLTask extends Task implements Serializable { } private int dropIndex(Hive db, DropIndexDesc dropIdx) throws HiveException { - db.dropIndex(MetaStoreUtils.DEFAULT_DATABASE_NAME, dropIdx.getTableName(), + db.dropIndex(db.getCurrentDatabase(), dropIdx.getTableName(), dropIdx.getIndexName(), true); return 0; } @@ -879,11 +905,10 @@ public class DDLTask extends Task implements Serializable { List repairOutput = new ArrayList(); try { HiveMetaStoreChecker checker = new HiveMetaStoreChecker(db); - checker.checkMetastore(MetaStoreUtils.DEFAULT_DATABASE_NAME, msckDesc + checker.checkMetastore(db.getCurrentDatabase(), msckDesc .getTableName(), msckDesc.getPartSpecs(), result); if (msckDesc.isRepairPartitions()) { - Table table = db.getTable(MetaStoreUtils.DEFAULT_DATABASE_NAME, - msckDesc.getTableName()); + Table table = db.getTable(db.getCurrentDatabase(), msckDesc.getTableName()); for (CheckResult.PartitionResult part : result.getPartitionsNotInMs()) { try { db.createPartition(table, Warehouse.makeSpecFromName(part @@ -995,18 +1020,17 @@ public class DDLTask extends Task implements Serializable { Table tbl = null; List parts = null; - tbl = db.getTable(MetaStoreUtils.DEFAULT_DATABASE_NAME, tabName); + tbl = db.getTable(db.getCurrentDatabase(), tabName); if (!tbl.isPartitioned()) { console.printError("Table " + tabName + " is not a partitioned table"); return 1; } if (showParts.getPartSpec() != null) { - parts = db.getPartitionNames(MetaStoreUtils.DEFAULT_DATABASE_NAME, + parts = db.getPartitionNames(db.getCurrentDatabase(), tbl.getTableName(), showParts.getPartSpec(), (short) -1); } else { - parts = db.getPartitionNames(MetaStoreUtils.DEFAULT_DATABASE_NAME, tbl - .getTableName(), (short) -1); + parts = db.getPartitionNames(db.getCurrentDatabase(), tbl.getTableName(), (short) -1); } // write the results in the file @@ -1036,6 +1060,52 @@ public class DDLTask extends Task implements Serializable { } /** + * Write a list of the available databases to a file. + * + * @param showDatabases + * These are the databases we're interested in. + * @return Returns 0 when execution succeeds and above 0 if it fails. + * @throws HiveException + * Throws this exception if an unexpected error occurs. + */ + private int showDatabases(Hive db, ShowDatabasesDesc showDatabasesDesc) throws HiveException { + // get the databases for the desired pattern - populate the output stream + List databases = null; + if (showDatabasesDesc.getPattern() != null) { + LOG.info("pattern: " + showDatabasesDesc.getPattern()); + databases = db.getDatabasesByPattern(showDatabasesDesc.getPattern()); + LOG.info("results : " + databases.size()); + } else { + databases = db.getAllDatabases(); + } + + // write the results in the file + try { + Path resFile = new Path(showDatabasesDesc.getResFile()); + FileSystem fs = resFile.getFileSystem(conf); + DataOutput outStream = fs.create(resFile); + SortedSet sortedDatabases = new TreeSet(databases); + Iterator iterDatabases = sortedDatabases.iterator(); + + while (iterDatabases.hasNext()) { + // create a row per database name + outStream.writeBytes(iterDatabases.next()); + outStream.write(terminator); + } + ((FSDataOutputStream) outStream).close(); + } catch (FileNotFoundException e) { + LOG.warn("show databases: " + stringifyException(e)); + return 1; + } catch (IOException e) { + LOG.warn("show databases: " + stringifyException(e)); + return 1; + } catch (Exception e) { + throw new HiveException(e.toString()); + } + return 0; + } + + /** * Write a list of the tables in the database to a file. * * @param db @@ -1330,8 +1400,7 @@ public class DDLTask extends Task implements Serializable { colPath.indexOf('.') == -1 ? colPath.length() : colPath.indexOf('.')); // describe the table - populate the output stream - Table tbl = db.getTable(MetaStoreUtils.DEFAULT_DATABASE_NAME, tableName, - false); + Table tbl = db.getTable(db.getCurrentDatabase(), tableName, false); Partition part = null; try { Path resFile = new Path(descTbl.getResFile()); @@ -1582,8 +1651,7 @@ public class DDLTask extends Task implements Serializable { */ private int alterTable(Hive db, AlterTableDesc alterTbl) throws HiveException { // alter the table - Table tbl = db.getTable(MetaStoreUtils.DEFAULT_DATABASE_NAME, alterTbl - .getOldName()); + Table tbl = db.getTable(db.getCurrentDatabase(), alterTbl.getOldName()); Partition part = null; if(alterTbl.getPartSpec() != null) { @@ -1902,10 +1970,8 @@ public class DDLTask extends Task implements Serializable { // We need to fetch the table before it is dropped so that it can be passed // to // post-execution hook - Table tbl = null; + Table tbl = db.getTable(db.getCurrentDatabase(), dropTbl.getTableName()); try { - tbl = db.getTable(MetaStoreUtils.DEFAULT_DATABASE_NAME, dropTbl - .getTableName()); if (!tbl.canDrop()) { throw new HiveException("Table " + tbl.getTableName() + " is protected from being dropped"); @@ -1940,25 +2006,20 @@ public class DDLTask extends Task implements Serializable { } // drop the table - db - .dropTable(MetaStoreUtils.DEFAULT_DATABASE_NAME, dropTbl - .getTableName()); + db.dropTable(db.getCurrentDatabase(), dropTbl.getTableName()); if (tbl != null) { work.getOutputs().add(new WriteEntity(tbl)); } } else { // get all partitions of the table - List partitionNames = db.getPartitionNames( - MetaStoreUtils.DEFAULT_DATABASE_NAME, dropTbl.getTableName(), - (short) -1); - + List partitionNames = + db.getPartitionNames(db.getCurrentDatabase(), dropTbl.getTableName(), (short) -1); Set> partitions = new HashSet>(); - for (int i = 0; i < partitionNames.size(); i++) { + for (String partitionName : partitionNames) { try { - partitions.add(Warehouse.makeSpecFromName(partitionNames.get(i))); + partitions.add(Warehouse.makeSpecFromName(partitionName)); } catch (MetaException e) { - LOG.warn("Unrecognized partition name from metastore: " - + partitionNames.get(i)); + LOG.warn("Unrecognized partition name from metastore: " + partitionName); } } // drop partitions in the list @@ -1992,8 +2053,8 @@ public class DDLTask extends Task implements Serializable { // drop all existing partitions from the list for (Partition partition : partsToDelete) { console.printInfo("Dropping the partition " + partition.getName()); - db.dropPartition(MetaStoreUtils.DEFAULT_DATABASE_NAME, dropTbl - .getTableName(), partition.getValues(), true); // drop data for the + db.dropPartition(db.getCurrentDatabase(), dropTbl.getTableName(), + partition.getValues(), true); // drop data for the // partition work.getOutputs().add(new WriteEntity(partition)); } @@ -2017,6 +2078,53 @@ public class DDLTask extends Task implements Serializable { } /** + * Create a Database + * @param db + * @param crtDb + * @return Always returns 0 + * @throws HiveException + * @throws AlreadyExistsException + */ + private int createDatabase(Hive db, CreateDatabaseDesc crtDb) + throws HiveException, AlreadyExistsException { + db.createDatabase(crtDb.getName(), crtDb.getComment(), + crtDb.getLocationUri(), crtDb.getIfNotExists()); + return 0; + } + + /** + * Drop a Database + * @param db + * @param dropDb + * @return Always returns 0 + * @throws HiveException + * @throws NoSuchObjectException + */ + private int dropDatabase(Hive db, DropDatabaseDesc dropDb) + throws HiveException, NoSuchObjectException { + db.dropDatabase(dropDb.getDatabaseName(), dropDb.getIfExists()); + return 0; + } + + /** + * Switch to a different Database + * @param db + * @param switchDb + * @return Always returns 0 + * @throws HiveException + */ + private int switchDatabase(Hive db, SwitchDatabaseDesc switchDb) + throws HiveException { + String dbName = switchDb.getDatabaseName(); + if (!db.databaseExists(dbName)) { + throw new HiveException("ERROR: The database " + dbName + " does not exist."); + } + db.setCurrentDatabase(dbName); + return 0; + } + + + /** * Create a new table. * * @param db @@ -2029,7 +2137,7 @@ public class DDLTask extends Task implements Serializable { */ private int createTable(Hive db, CreateTableDesc crtTbl) throws HiveException { // create the table - Table tbl = new Table(crtTbl.getTableName()); + Table tbl = new Table(db.getCurrentDatabase(), crtTbl.getTableName()); if (crtTbl.getPartCols() != null) { tbl.setPartCols(crtTbl.getPartCols()); } @@ -2188,8 +2296,7 @@ public class DDLTask extends Task implements Serializable { */ private int createTableLike(Hive db, CreateTableLikeDesc crtTbl) throws HiveException { // Get the existing table - Table tbl = db.getTable(MetaStoreUtils.DEFAULT_DATABASE_NAME, crtTbl - .getLikeTableName()); + Table tbl = db.getTable(db.getCurrentDatabase(), crtTbl.getLikeTableName()); tbl.setTableName(crtTbl.getTableName()); @@ -2223,7 +2330,7 @@ public class DDLTask extends Task implements Serializable { * Throws this exception if an unexpected error occurs. */ private int createView(Hive db, CreateViewDesc crtView) throws HiveException { - Table tbl = new Table(crtView.getViewName()); + Table tbl = new Table(db.getCurrentDatabase(), crtView.getViewName()); tbl.setTableType(TableType.VIRTUAL_VIEW); tbl.setSerializationLib(null); tbl.clearSerDeInfo(); diff --git ql/src/java/org/apache/hadoop/hive/ql/exec/MoveTask.java ql/src/java/org/apache/hadoop/hive/ql/exec/MoveTask.java index d59f48c..a760059 100644 --- ql/src/java/org/apache/hadoop/hive/ql/exec/MoveTask.java +++ ql/src/java/org/apache/hadoop/hive/ql/exec/MoveTask.java @@ -32,10 +32,9 @@ import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.LocalFileSystem; import org.apache.hadoop.fs.Path; import org.apache.hadoop.hive.conf.HiveConf; -import org.apache.hadoop.hive.metastore.MetaStoreUtils; import org.apache.hadoop.hive.ql.DriverContext; -import org.apache.hadoop.hive.ql.hooks.LineageInfo.DataContainer; import org.apache.hadoop.hive.ql.hooks.WriteEntity; +import org.apache.hadoop.hive.ql.hooks.LineageInfo.DataContainer; import org.apache.hadoop.hive.ql.io.HiveFileFormatUtils; import org.apache.hadoop.hive.ql.metadata.HiveException; import org.apache.hadoop.hive.ql.metadata.Partition; @@ -132,8 +131,7 @@ public class MoveTask extends Task implements Serializable { } String mesg_detail = " from " + tbd.getSourceDir(); console.printInfo(mesg.toString(), mesg_detail); - Table table = db.getTable(MetaStoreUtils.DEFAULT_DATABASE_NAME, tbd - .getTable().getTableName()); + Table table = db.getTable(db.getCurrentDatabase(), tbd.getTable().getTableName()); if (work.getCheckFileFormat()) { // Get all files from the src directory diff --git ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java index e0761fc..3760b8a 100644 --- ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java +++ ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java @@ -18,6 +18,16 @@ package org.apache.hadoop.hive.ql.metadata; +import static org.apache.hadoop.hive.metastore.MetaStoreUtils.DEFAULT_DATABASE_NAME; +import static org.apache.hadoop.hive.metastore.api.Constants.META_TABLE_STORAGE; +import static org.apache.hadoop.hive.serde.Constants.COLLECTION_DELIM; +import static org.apache.hadoop.hive.serde.Constants.ESCAPE_CHAR; +import static org.apache.hadoop.hive.serde.Constants.FIELD_DELIM; +import static org.apache.hadoop.hive.serde.Constants.LINE_DELIM; +import static org.apache.hadoop.hive.serde.Constants.MAPKEY_DELIM; +import static org.apache.hadoop.hive.serde.Constants.SERIALIZATION_FORMAT; +import static org.apache.hadoop.hive.serde.Constants.STRING_TYPE_NAME; + import java.io.IOException; import java.util.ArrayList; import java.util.HashMap; @@ -42,6 +52,7 @@ import org.apache.hadoop.hive.metastore.MetaStoreUtils; import org.apache.hadoop.hive.metastore.TableType; import org.apache.hadoop.hive.metastore.Warehouse; import org.apache.hadoop.hive.metastore.api.AlreadyExistsException; +import org.apache.hadoop.hive.metastore.api.Database; import org.apache.hadoop.hive.metastore.api.FieldSchema; import org.apache.hadoop.hive.metastore.api.Index; import org.apache.hadoop.hive.metastore.api.InvalidOperationException; @@ -51,7 +62,6 @@ import org.apache.hadoop.hive.metastore.api.Order; import org.apache.hadoop.hive.metastore.api.SerDeInfo; import org.apache.hadoop.hive.ql.exec.Utilities; import org.apache.hadoop.hive.ql.index.HiveIndexHandler; -import org.apache.hadoop.hive.serde.Constants; import org.apache.hadoop.hive.serde2.Deserializer; import org.apache.hadoop.hive.serde2.SerDeException; import org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe; @@ -71,7 +81,9 @@ public class Hive { static final private Log LOG = LogFactory.getLog("hive.ql.metadata.Hive"); private HiveConf conf = null; + private Warehouse warehouse; private IMetaStoreClient metaStoreClient; + private String currentDatabase; private static ThreadLocal hiveDB = new ThreadLocal() { @Override @@ -159,6 +171,11 @@ public class Hive { */ private Hive(HiveConf c) throws HiveException { conf = c; + try { + warehouse = new Warehouse(conf); + } catch (Exception e) { + throw new HiveException(e); + } } /** @@ -172,6 +189,78 @@ public class Hive { } /** + * create a database + * @param name + * @param comment + * @param locationUri + * @param ifNotExist if true, will ignore AlreadyExistsException exception + * @throws AlreadyExistsException + * @throws HiveException + */ + public void createDatabase(String name, String comment, String locationUri, + boolean ifNotExist) throws AlreadyExistsException, HiveException { + try { + Database db = new Database(); + db.setName(name); + db.setLocationUri(locationUri); + db.setComment(comment); + getMSC().createDatabase(db); + } catch (AlreadyExistsException e) { + if (!ifNotExist) { + throw e; + } + } catch (Exception e) { + throw new HiveException(e); + } + } + +/** + * @param name + * @param comment + * @throws HiveException + * @throws AlreadyExistsException + * @see org.apache.hadoop.hive.metastore.HiveMetaStoreClient#createDatabase(java.lang.String, + * java.lang.String) + */ + protected void createDatabase(String name, String comment) + throws HiveException, AlreadyExistsException { + createDatabase(name, comment, null, false); + } + + /** + * @param name + * @param ignoreUnknownDb + * @return true or false + * @throws NoSuchObjectException + * @throws HiveException + * @see org.apache.hadoop.hive.metastore.HiveMetaStoreClient#dropDatabase(java.lang.String) + */ + protected void dropDatabase(String name) throws HiveException, NoSuchObjectException { + dropDatabase(name, false); + } + + + /** + * drop a database + * @param name + * @param ignoreUnknownDb + * @return + * @throws HiveException + * @throws NoSuchObjectException + */ + public void dropDatabase(String name, boolean ignoreUnknownDb) + throws HiveException, NoSuchObjectException { + try { + getMSC().dropDatabase(name, ignoreUnknownDb); + } catch (NoSuchObjectException e) { + throw e; + } catch (Exception e) { + throw new HiveException(e); + } + } + + + /** * Creates a table metdata and the directory for the table data * * @param tableName @@ -223,13 +312,12 @@ public class Hive { throw new HiveException("columns not specified for table " + tableName); } - Table tbl = new Table(tableName); + Table tbl = new Table(getCurrentDatabase(), tableName); tbl.setInputFormatClass(fileInputFormat.getName()); tbl.setOutputFormatClass(fileOutputFormat.getName()); for (String col : columns) { - FieldSchema field = new FieldSchema(col, - org.apache.hadoop.hive.serde.Constants.STRING_TYPE_NAME, "default"); + FieldSchema field = new FieldSchema(col, STRING_TYPE_NAME, "default"); tbl.getCols().add(field); } @@ -237,9 +325,7 @@ public class Hive { for (String partCol : partCols) { FieldSchema part = new FieldSchema(); part.setName(partCol); - part.setType(org.apache.hadoop.hive.serde.Constants.STRING_TYPE_NAME); // default - // partition - // key + part.setType(STRING_TYPE_NAME); // default partition key tbl.getPartCols().add(part); } } @@ -263,8 +349,7 @@ public class Hive { public void alterTable(String tblName, Table newTbl) throws InvalidOperationException, HiveException { try { - getMSC().alter_table(MetaStoreUtils.DEFAULT_DATABASE_NAME, tblName, - newTbl.getTTable()); + getMSC().alter_table(getCurrentDatabase(), tblName, newTbl.getTTable()); } catch (MetaException e) { throw new HiveException("Unable to alter table.", e); } catch (TException e) { @@ -286,7 +371,7 @@ public class Hive { public void alterPartition(String tblName, Partition newPart) throws InvalidOperationException, HiveException { try { - getMSC().alter_partition(MetaStoreUtils.DEFAULT_DATABASE_NAME, tblName, + getMSC().alter_partition(getCurrentDatabase(), tblName, newPart.getTPartition()); } catch (MetaException e) { @@ -318,6 +403,9 @@ public class Hive { */ public void createTable(Table tbl, boolean ifNotExists) throws HiveException { try { + if (tbl.getDbName() == null || "".equals(tbl.getDbName().trim())) { + tbl.setDbName(getCurrentDatabase()); + } if (tbl.getCols().size() == 0) { tbl.setFields(MetaStoreUtils.getFieldsFromDeserializer(tbl.getTableName(), tbl.getDeserializer())); @@ -334,7 +422,7 @@ public class Hive { } /** - * + * * @param tableName * table name * @param indexName @@ -377,23 +465,23 @@ public class Hive { throws HiveException { try { - String dbName = MetaStoreUtils.DEFAULT_DATABASE_NAME; + String dbName = getCurrentDatabase(); Index old_index = null; try { - old_index = getIndex(dbName, tableName, indexName); + old_index = getIndex(dbName, tableName, indexName); } catch (Exception e) { } if (old_index != null) { throw new HiveException("Index " + indexName + " already exists on table " + tableName + ", db=" + dbName); } - + org.apache.hadoop.hive.metastore.api.Table baseTbl = getMSC().getTable(dbName, tableName); if (baseTbl.getTableType() == TableType.VIRTUAL_VIEW.toString()) { throw new HiveException("tableName="+ tableName +" is a VIRTUAL VIEW. Index on VIRTUAL VIEW is not supported."); } - + if (indexTblName == null) { - indexTblName = MetaStoreUtils.getIndexTableName(dbName, tableName, indexName); + indexTblName = MetaStoreUtils.getIndexTableName(dbName, tableName, indexName); } else { org.apache.hadoop.hive.metastore.api.Table temp = null; try { @@ -404,11 +492,11 @@ public class Hive { throw new HiveException("Table name " + indexTblName + " already exists. Choose another name."); } } - + org.apache.hadoop.hive.metastore.api.StorageDescriptor storageDescriptor = baseTbl.getSd().clone(); SerDeInfo serdeInfo = storageDescriptor.getSerdeInfo(); if(serde != null) { - serdeInfo.setSerializationLib(serde); + serdeInfo.setSerializationLib(serde); } else { if (storageHandler == null) { serdeInfo.setSerializationLib(org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe.class.getName()); @@ -420,24 +508,22 @@ public class Hive { } if (fieldDelim != null) { - serdeInfo.getParameters().put(Constants.FIELD_DELIM, fieldDelim); - serdeInfo.getParameters().put(Constants.SERIALIZATION_FORMAT, - fieldDelim); + serdeInfo.getParameters().put(FIELD_DELIM, fieldDelim); + serdeInfo.getParameters().put(SERIALIZATION_FORMAT, fieldDelim); } if (fieldEscape != null) { - serdeInfo.getParameters().put(Constants.ESCAPE_CHAR, fieldEscape); + serdeInfo.getParameters().put(ESCAPE_CHAR, fieldEscape); } if (collItemDelim != null) { - serdeInfo.getParameters() - .put(Constants.COLLECTION_DELIM, collItemDelim); + serdeInfo.getParameters().put(COLLECTION_DELIM, collItemDelim); } if (mapKeyDelim != null) { - serdeInfo.getParameters().put(Constants.MAPKEY_DELIM, mapKeyDelim); + serdeInfo.getParameters().put(MAPKEY_DELIM, mapKeyDelim); } if (lineDelim != null) { - serdeInfo.getParameters().put(Constants.LINE_DELIM, lineDelim); + serdeInfo.getParameters().put(LINE_DELIM, lineDelim); } - + if (serdeProps != null) { Iterator> iter = serdeProps.entrySet() .iterator(); @@ -446,16 +532,16 @@ public class Hive { serdeInfo.getParameters().put(m.getKey(), m.getValue()); } } - + storageDescriptor.setLocation(null); if (location != null) { - storageDescriptor.setLocation(location); + storageDescriptor.setLocation(location); } storageDescriptor.setInputFormat(inputFormat); storageDescriptor.setOutputFormat(outputFormat); - + Map params = new HashMap(); - + List indexTblCols = new ArrayList(); List sortCols = new ArrayList(); storageDescriptor.setBucketCols(null); @@ -468,14 +554,15 @@ public class Hive { k++; } } - if (k != indexedCols.size()) + if (k != indexedCols.size()) { throw new RuntimeException( "Check the index columns, they should appear in the table being indexed."); - + } + storageDescriptor.setCols(indexTblCols); storageDescriptor.setSortCols(sortCols); - int time = (int) (System.currentTimeMillis() / 1000); + int time = (int) (System.currentTimeMillis() / 1000); org.apache.hadoop.hive.metastore.api.Table tt = null; HiveIndexHandler indexHandler = HiveUtils.getIndexHandler(this.getConf(), indexHandlerClass); @@ -489,18 +576,18 @@ public class Hive { if(!deferredRebuild) { throw new RuntimeException("Please specify deferred rebuild using \" WITH DEFERRED REBUILD \"."); } - + Index indexDesc = new Index(indexName, indexHandlerClass, dbName, tableName, time, time, indexTblName, storageDescriptor, params, deferredRebuild); indexHandler.analyzeIndexDefinition(baseTbl, indexDesc, tt); - + this.getMSC().createIndex(indexDesc, tt); - + } catch (Exception e) { throw new HiveException(e); } } - + public Index getIndex(String dbName, String baseTableName, String indexName) throws HiveException { try { @@ -509,7 +596,7 @@ public class Hive { throw new HiveException(e); } } - + public boolean dropIndex(String db_name, String tbl_name, String index_name, boolean deleteData) throws HiveException { try { return getMSC().dropIndex(db_name, tbl_name, index_name, deleteData); @@ -519,7 +606,7 @@ public class Hive { throw new HiveException("Unknow error. Please check logs.", e); } } - + /** * Drops table along with the data in it. If the table doesn't exist * then it is a no-op @@ -570,6 +657,17 @@ public class Hive { /** * Returns metadata of the table. + * @param tableName the name of the table + * @return + * @throws HiveException if there's an internal error or if the + * table doesn't exist + */ + public Table getTable(final String tableName) throws HiveException { + return this.getTable(getCurrentDatabase(), tableName, true); + } + + /** + * Returns metadata of the table. * * @param dbName * the name of the database @@ -579,9 +677,7 @@ public class Hive { * @exception HiveException * if there's an internal error or if the table doesn't exist */ - public Table getTable(final String dbName, final String tableName) - throws HiveException { - + public Table getTable(final String dbName, final String tableName) throws HiveException { return this.getTable(dbName, tableName, true); } @@ -622,12 +718,11 @@ public class Hive { if (!TableType.VIRTUAL_VIEW.toString().equals(tTable.getTableType())) { // Fix the non-printable chars Map parameters = tTable.getSd().getParameters(); - String sf = parameters.get(org.apache.hadoop.hive.serde.Constants.SERIALIZATION_FORMAT); + String sf = parameters.get(SERIALIZATION_FORMAT); if (sf != null) { char[] b = sf.toCharArray(); if ((b.length == 1) && (b[0] < 10)) { // ^A, ^B, ^C, ^D, \t - parameters.put(org.apache.hadoop.hive.serde.Constants.SERIALIZATION_FORMAT, - Integer.toString(b[0])); + parameters.put(SERIALIZATION_FORMAT, Integer.toString(b[0])); } } @@ -653,12 +748,27 @@ public class Hive { return table; } + /** + * Get all table names for the current database. + * @return List of table names + * @throws HiveException + */ public List getAllTables() throws HiveException { - return getTablesByPattern(".*"); + return getAllTables(getCurrentDatabase()); + } + + /** + * Get all table names for the specified database. + * @param dbName + * @return List of table names + * @throws HiveException + */ + public List getAllTables(String dbName) throws HiveException { + return getTablesByPattern(dbName, ".*"); } /** - * returns all existing tables from default database which match the given + * Returns all existing tables from default database which match the given * pattern. The matching occurs as per Java regular expressions * * @param tablePattern @@ -666,13 +776,28 @@ public class Hive { * @return list of table names * @throws HiveException */ - public List getTablesByPattern(String tablePattern) - throws HiveException { - return getTablesForDb(MetaStoreUtils.DEFAULT_DATABASE_NAME, tablePattern); + public List getTablesByPattern(String tablePattern) throws HiveException { + return getTablesByPattern(getCurrentDatabase(), tablePattern); } /** - * returns all existing tables from the given database which match the given + * Returns all existing tables from the specified database which match the given + * pattern. The matching occurs as per Java regular expressions. + * @param dbName + * @param tablePattern + * @return list of table names + * @throws HiveException + */ + public List getTablesByPattern(String dbName, String tablePattern) throws HiveException { + try { + return getMSC().getTables(dbName, tablePattern); + } catch (Exception e) { + throw new HiveException(e); + } + } + + /** + * Returns all existing tables from the given database which match the given * pattern. The matching occurs as per Java regular expressions * * @param database @@ -692,32 +817,56 @@ public class Hive { } /** - * @param name - * @param locationUri - * @return true or false - * @throws AlreadyExistsException - * @throws MetaException - * @throws TException - * @see org.apache.hadoop.hive.metastore.HiveMetaStoreClient#createDatabase(java.lang.String, - * java.lang.String) + * Get all existing database names. + * + * @return List of database names. + * @throws HiveException */ - protected boolean createDatabase(String name, String locationUri) - throws AlreadyExistsException, MetaException, TException { - return getMSC().createDatabase(name, locationUri); + public List getAllDatabases() throws HiveException { + return getDatabasesByPattern(".*"); } /** - * @param name - * @return true or false - * @throws MetaException - * @throws TException - * @see org.apache.hadoop.hive.metastore.HiveMetaStoreClient#dropDatabase(java.lang.String) + * Get all existing databases that match the given + * pattern. The matching occurs as per Java regular expressions + * + * @param databasePattern + * java re pattern + * @return list of database names + * @throws HiveException */ - protected boolean dropDatabase(String name) throws MetaException, TException { - return getMSC().dropDatabase(name); + public List getDatabasesByPattern(String databasePattern) + throws HiveException { + try { + return getMSC().getDatabases(databasePattern); + } catch (Exception e) { + throw new HiveException(e); + } } /** + * Query metadata to see if a database with the given name already exists. + * + * @param dbName + * @return true if a database with the given name already exists, false if + * does not exist. + * @throws HiveException + */ + public boolean databaseExists(String dbName) throws HiveException { + try { + if (null != getMSC().getDatabase(dbName)) { + return true; + } + return false; + } catch (NoSuchObjectException e) { + return false; + } catch (Exception e) { + throw new HiveException(e); + } + } + + + /** * Load a directory into a Hive Table Partition - Alters existing content of * the partition with the contents of loadPath. - If he partition does not * exist - one is created - files in loadPath are moved into Hive. But the @@ -739,7 +888,7 @@ public class Hive { Map partSpec, boolean replace, Path tmpDirPath, boolean holdDDLTime) throws HiveException { - Table tbl = getTable(MetaStoreUtils.DEFAULT_DATABASE_NAME, tableName); + Table tbl = getTable(getCurrentDatabase(), tableName); try { /** * Move files before creating the partition since down stream processes @@ -861,7 +1010,7 @@ public class Hive { */ public void loadTable(Path loadPath, String tableName, boolean replace, Path tmpDirPath, boolean holdDDLTime) throws HiveException { - Table tbl = getTable(MetaStoreUtils.DEFAULT_DATABASE_NAME, tableName); + Table tbl = getTable(getCurrentDatabase(), tableName); if (replace) { tbl.replaceFiles(loadPath, tmpDirPath); @@ -1118,6 +1267,17 @@ public class Hive { return qlPartitions; } + public String getCurrentDatabase() { + if (null == currentDatabase) { + currentDatabase = DEFAULT_DATABASE_NAME; + } + return currentDatabase; + } + + public void setCurrentDatabase(String currentDatabase) { + this.currentDatabase = currentDatabase; + } + static private void checkPaths(FileSystem fs, FileStatus[] srcs, Path destf, boolean replace) throws HiveException { try { @@ -1280,10 +1440,8 @@ public class Hive { return null; } HiveStorageHandler storageHandler = - HiveUtils.getStorageHandler( - conf, - tbl.getParameters().get( - org.apache.hadoop.hive.metastore.api.Constants.META_TABLE_STORAGE)); + HiveUtils.getStorageHandler(conf, + tbl.getParameters().get(META_TABLE_STORAGE)); if (storageHandler == null) { return null; } diff --git ql/src/java/org/apache/hadoop/hive/ql/metadata/Table.java ql/src/java/org/apache/hadoop/hive/ql/metadata/Table.java index 3300e2a..604d652 100644 --- ql/src/java/org/apache/hadoop/hive/ql/metadata/Table.java +++ ql/src/java/org/apache/hadoop/hive/ql/metadata/Table.java @@ -18,6 +18,8 @@ package org.apache.hadoop.hive.ql.metadata; +import static org.apache.hadoop.hive.metastore.MetaStoreUtils.DEFAULT_DATABASE_NAME; + import java.io.IOException; import java.io.Serializable; import java.net.URI; @@ -97,8 +99,12 @@ public class Table implements Serializable { } } - public Table(String name) { - this(getEmptyTable(name)); + public Table(String tableName) { + this(DEFAULT_DATABASE_NAME, tableName); + } + + public Table(String databaseName, String tableName) { + this(getEmptyTable(databaseName, tableName)); } /** @@ -120,7 +126,8 @@ public class Table implements Serializable { /** * Initialize an emtpy table. */ - static org.apache.hadoop.hive.metastore.api.Table getEmptyTable(String name) { + static org.apache.hadoop.hive.metastore.api.Table + getEmptyTable(String databaseName, String tableName) { StorageDescriptor sd = new StorageDescriptor(); { sd.setSerdeInfo(new SerDeInfo()); @@ -144,8 +151,8 @@ public class Table implements Serializable { t.setPartitionKeys(new ArrayList()); t.setParameters(new HashMap()); t.setTableType(TableType.MANAGED_TABLE.toString()); - t.setTableName(name); - t.setDbName(MetaStoreUtils.DEFAULT_DATABASE_NAME); + t.setDbName(databaseName); + t.setTableName(tableName); } return t; } diff --git ql/src/java/org/apache/hadoop/hive/ql/parse/BaseSemanticAnalyzer.java ql/src/java/org/apache/hadoop/hive/ql/parse/BaseSemanticAnalyzer.java index eedf9e3..91c8888 100644 --- ql/src/java/org/apache/hadoop/hive/ql/parse/BaseSemanticAnalyzer.java +++ ql/src/java/org/apache/hadoop/hive/ql/parse/BaseSemanticAnalyzer.java @@ -33,7 +33,6 @@ import org.antlr.runtime.tree.Tree; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.hive.conf.HiveConf; -import org.apache.hadoop.hive.metastore.MetaStoreUtils; import org.apache.hadoop.hive.metastore.api.FieldSchema; import org.apache.hadoop.hive.metastore.api.Order; import org.apache.hadoop.hive.ql.Context; @@ -571,8 +570,7 @@ public abstract class BaseSemanticAnalyzer { + tableName; } - tableHandle = db.getTable(MetaStoreUtils.DEFAULT_DATABASE_NAME, - tableName); + tableHandle = db.getTable(tableName); } catch (InvalidTableException ite) { throw new SemanticException(ErrorMsg.INVALID_TABLE.getMsg(ast .getChild(0)), ite); diff --git ql/src/java/org/apache/hadoop/hive/ql/parse/DDLSemanticAnalyzer.java ql/src/java/org/apache/hadoop/hive/ql/parse/DDLSemanticAnalyzer.java index 1801c10..fc80780 100644 --- ql/src/java/org/apache/hadoop/hive/ql/parse/DDLSemanticAnalyzer.java +++ ql/src/java/org/apache/hadoop/hive/ql/parse/DDLSemanticAnalyzer.java @@ -18,6 +18,14 @@ package org.apache.hadoop.hive.ql.parse; +import static org.apache.hadoop.hive.ql.parse.HiveParser.TOK_CREATEDATABASE; +import static org.apache.hadoop.hive.ql.parse.HiveParser.TOK_DATABASECOMMENT; +import static org.apache.hadoop.hive.ql.parse.HiveParser.TOK_DROPDATABASE; +import static org.apache.hadoop.hive.ql.parse.HiveParser.TOK_IFEXISTS; +import static org.apache.hadoop.hive.ql.parse.HiveParser.TOK_IFNOTEXISTS; +import static org.apache.hadoop.hive.ql.parse.HiveParser.TOK_SHOWDATABASES; +import static org.apache.hadoop.hive.ql.parse.HiveParser.TOK_SWITCHDATABASE; + import java.util.ArrayList; import java.util.HashMap; import java.util.HashSet; @@ -36,7 +44,6 @@ import org.apache.commons.logging.LogFactory; import org.apache.hadoop.fs.Path; import org.apache.hadoop.hive.conf.HiveConf; import org.apache.hadoop.hive.conf.HiveConf.ConfVars; -import org.apache.hadoop.hive.metastore.MetaStoreUtils; import org.apache.hadoop.hive.metastore.Warehouse; import org.apache.hadoop.hive.metastore.api.FieldSchema; import org.apache.hadoop.hive.metastore.api.Index; @@ -57,18 +64,22 @@ import org.apache.hadoop.hive.ql.metadata.Table; import org.apache.hadoop.hive.ql.plan.AddPartitionDesc; import org.apache.hadoop.hive.ql.plan.AlterTableDesc; import org.apache.hadoop.hive.ql.plan.AlterTableSimpleDesc; +import org.apache.hadoop.hive.ql.plan.CreateDatabaseDesc; import org.apache.hadoop.hive.ql.plan.CreateIndexDesc; import org.apache.hadoop.hive.ql.plan.DDLWork; import org.apache.hadoop.hive.ql.plan.DescFunctionDesc; import org.apache.hadoop.hive.ql.plan.DescTableDesc; +import org.apache.hadoop.hive.ql.plan.DropDatabaseDesc; import org.apache.hadoop.hive.ql.plan.DropIndexDesc; import org.apache.hadoop.hive.ql.plan.DropTableDesc; import org.apache.hadoop.hive.ql.plan.FetchWork; import org.apache.hadoop.hive.ql.plan.MsckDesc; +import org.apache.hadoop.hive.ql.plan.ShowDatabasesDesc; import org.apache.hadoop.hive.ql.plan.ShowFunctionsDesc; import org.apache.hadoop.hive.ql.plan.ShowPartitionsDesc; import org.apache.hadoop.hive.ql.plan.ShowTableStatusDesc; import org.apache.hadoop.hive.ql.plan.ShowTablesDesc; +import org.apache.hadoop.hive.ql.plan.SwitchDatabaseDesc; import org.apache.hadoop.hive.ql.plan.TableDesc; import org.apache.hadoop.hive.ql.plan.AlterTableDesc.AlterTableTypes; import org.apache.hadoop.hive.serde.Constants; @@ -110,10 +121,10 @@ public class DDLSemanticAnalyzer extends BaseSemanticAnalyzer { static class TablePartition { String tableName; HashMap partSpec = null; - + public TablePartition(){ } - + public TablePartition (ASTNode tblPart) throws SemanticException { tableName = unescapeIdentifier(tblPart.getChild(0).getText()); if (tblPart.getChildCount() > 1) { @@ -124,7 +135,7 @@ public class DDLSemanticAnalyzer extends BaseSemanticAnalyzer { } } } - + public DDLSemanticAnalyzer(HiveConf conf) throws SemanticException { super(conf); // Partition can't have this name @@ -137,7 +148,7 @@ public class DDLSemanticAnalyzer extends BaseSemanticAnalyzer { @Override public void analyzeInternal(ASTNode ast) throws SemanticException { - + if(ast.getToken().getType() == HiveParser.TOK_ALTERTABLE_PARTITION) { TablePartition tblPart = new TablePartition((ASTNode)ast.getChild(0)); String tableName = tblPart.tableName; @@ -149,7 +160,7 @@ public class DDLSemanticAnalyzer extends BaseSemanticAnalyzer { analyzeAlterTableProtectMode(ast, tableName, partSpec); } else if (ast.getToken().getType() == HiveParser.TOK_ALTERTABLE_LOCATION) { analyzeAlterTableLocation(ast, tableName, partSpec); - } + } } else if (ast.getToken().getType() == HiveParser.TOK_DROPTABLE) { analyzeDropTable(ast, false); } else if (ast.getToken().getType() == HiveParser.TOK_CREATEINDEX) { @@ -159,6 +170,9 @@ public class DDLSemanticAnalyzer extends BaseSemanticAnalyzer { } else if (ast.getToken().getType() == HiveParser.TOK_DESCTABLE) { ctx.setResFile(new Path(ctx.getLocalTmpFileURI())); analyzeDescribeTable(ast); + } else if (ast.getToken().getType() == TOK_SHOWDATABASES) { + ctx.setResFile(new Path(ctx.getLocalTmpFileURI())); + analyzeShowDatabases(ast); } else if (ast.getToken().getType() == HiveParser.TOK_SHOWTABLES) { ctx.setResFile(new Path(ctx.getLocalTmpFileURI())); analyzeShowTables(ast); @@ -209,11 +223,67 @@ public class DDLSemanticAnalyzer extends BaseSemanticAnalyzer { } else if (ast.getToken().getType() == HiveParser.TOK_SHOWPARTITIONS) { ctx.setResFile(new Path(ctx.getLocalTmpFileURI())); analyzeShowPartitions(ast); + } else if (ast.getToken().getType() == TOK_CREATEDATABASE) { + analyzeCreateDatabase(ast); + } else if (ast.getToken().getType() == TOK_DROPDATABASE) { + analyzeDropDatabase(ast); + } else if (ast.getToken().getType() == TOK_SWITCHDATABASE) { + analyzeSwitchDatabase(ast); } else { throw new SemanticException("Unsupported command."); } } + private void analyzeCreateDatabase(ASTNode ast) throws SemanticException { + String dbName = unescapeIdentifier(ast.getChild(0).getText()); + boolean ifNotExists = false; + String dbComment = null; + + for (int i = 1; i < ast.getChildCount(); i++) { + ASTNode childNode = (ASTNode) ast.getChild(i); + switch (childNode.getToken().getType()) { + case TOK_IFNOTEXISTS: + ifNotExists = true; + break; + case TOK_DATABASECOMMENT: + dbComment = unescapeSQLString(childNode.getChild(0).getText()); + break; + default: + throw new SemanticException("Unrecognized token in CREATE DATABASE statement"); + } + } + + CreateDatabaseDesc createDatabaseDesc = new CreateDatabaseDesc(); + createDatabaseDesc.setName(dbName); + createDatabaseDesc.setComment(dbComment); + createDatabaseDesc.setIfNotExists(ifNotExists); + createDatabaseDesc.setLocationUri(null); + + rootTasks.add(TaskFactory.get(new DDLWork(getInputs(), getOutputs(), + createDatabaseDesc), conf)); + } + + private void analyzeDropDatabase(ASTNode ast) throws SemanticException { + String dbName = unescapeIdentifier(ast.getChild(0).getText()); + boolean ifExists = false; + + if (null != ast.getFirstChildWithType(TOK_IFEXISTS)) { + ifExists = true; + } + + DropDatabaseDesc dropDatabaseDesc = new DropDatabaseDesc(dbName, ifExists); + rootTasks.add(TaskFactory.get(new DDLWork(getInputs(), getOutputs(), dropDatabaseDesc), conf)); + } + + private void analyzeSwitchDatabase(ASTNode ast) { + String dbName = unescapeIdentifier(ast.getChild(0).getText()); + SwitchDatabaseDesc switchDatabaseDesc = new SwitchDatabaseDesc(dbName); + rootTasks.add(TaskFactory.get(new DDLWork(getInputs(), getOutputs(), + switchDatabaseDesc), conf)); + } + + + private void analyzeDropTable(ASTNode ast, boolean expectView) throws SemanticException { String tableName = unescapeIdentifier(ast.getChild(0).getText()); @@ -316,11 +386,11 @@ public class DDLSemanticAnalyzer extends BaseSemanticAnalyzer { private List> getIndexBuilderMapRed(String baseTableName, String indexName, HashMap partSpec) throws SemanticException { try { - Index index = db.getIndex(MetaStoreUtils.DEFAULT_DATABASE_NAME, baseTableName, indexName); - Table indexTbl = db.getTable(MetaStoreUtils.DEFAULT_DATABASE_NAME,index.getIndexTableName()); + String dbName = db.getCurrentDatabase(); + Index index = db.getIndex(dbName, baseTableName, indexName); + Table indexTbl = db.getTable(dbName, index.getIndexTableName()); String baseTblName = index.getOrigTableName(); - Table baseTbl = db.getTable(MetaStoreUtils.DEFAULT_DATABASE_NAME, - baseTblName); + Table baseTbl = db.getTable(dbName, baseTblName); String handlerCls = index.getIndexHandlerClass(); HiveIndexHandler handler = HiveUtils.getIndexHandler(conf, handlerCls); @@ -333,7 +403,7 @@ public class DDLSemanticAnalyzer extends BaseSemanticAnalyzer { indexTbl, db, indexTblPartitions); } - List> ret = handler.generateIndexBuildTaskList(baseTbl, + List> ret = handler.generateIndexBuildTaskList(baseTbl, index, indexTblPartitions, baseTblPartitions, indexTbl, getInputs(), getOutputs()); return ret; } catch (Exception e) { @@ -467,7 +537,7 @@ public class DDLSemanticAnalyzer extends BaseSemanticAnalyzer { serde = COLUMNAR_SERDE; break; } - + AlterTableDesc alterTblDesc = new AlterTableDesc(tableName, inputFormat, outputFormat, serde, storageHandler, partSpec); rootTasks.add(TaskFactory.get(new DDLWork(getInputs(), getOutputs(), @@ -653,6 +723,19 @@ public class DDLSemanticAnalyzer extends BaseSemanticAnalyzer { setFetchTask(createFetchTask(showPartsDesc.getSchema())); } + private void analyzeShowDatabases(ASTNode ast) throws SemanticException { + ShowDatabasesDesc showDatabasesDesc; + if (ast.getChildCount() == 1) { + String databaseNames = unescapeSQLString(ast.getChild(0).getText()); + showDatabasesDesc = new ShowDatabasesDesc(ctx.getResFile(), databaseNames); + } else { + showDatabasesDesc = new ShowDatabasesDesc(ctx.getResFile()); + } + rootTasks.add(TaskFactory.get(new DDLWork(getInputs(), getOutputs(), + showDatabasesDesc), conf)); + setFetchTask(createFetchTask(showDatabasesDesc.getSchema())); + } + private void analyzeShowTables(ASTNode ast) throws SemanticException { ShowTablesDesc showTblsDesc; if (ast.getChildCount() == 1) { @@ -669,7 +752,7 @@ public class DDLSemanticAnalyzer extends BaseSemanticAnalyzer { private void analyzeShowTableStatus(ASTNode ast) throws SemanticException { ShowTableStatusDesc showTblStatusDesc; String tableNames = unescapeIdentifier(ast.getChild(0).getText()); - String dbName = MetaStoreUtils.DEFAULT_DATABASE_NAME; + String dbName = db.getCurrentDatabase(); int children = ast.getChildCount(); HashMap partSpec = null; if (children >= 2) { @@ -839,7 +922,7 @@ public class DDLSemanticAnalyzer extends BaseSemanticAnalyzer { if (currentPart != null) { validatePartitionValues(currentPart); AddPartitionDesc addPartitionDesc = new AddPartitionDesc( - MetaStoreUtils.DEFAULT_DATABASE_NAME, tblName, currentPart, + db.getCurrentDatabase(), tblName, currentPart, currentLocation, ifNotExists); rootTasks.add(TaskFactory.get(new DDLWork(getInputs(), getOutputs(), addPartitionDesc), conf)); @@ -861,7 +944,7 @@ public class DDLSemanticAnalyzer extends BaseSemanticAnalyzer { if (currentPart != null) { validatePartitionValues(currentPart); AddPartitionDesc addPartitionDesc = new AddPartitionDesc( - MetaStoreUtils.DEFAULT_DATABASE_NAME, tblName, currentPart, + db.getCurrentDatabase(), tblName, currentPart, currentLocation, ifNotExists); rootTasks.add(TaskFactory.get(new DDLWork(getInputs(), getOutputs(), addPartitionDesc), conf)); @@ -887,14 +970,14 @@ public class DDLSemanticAnalyzer extends BaseSemanticAnalyzer { if (partSpecs.size() == 0) { AlterTableSimpleDesc touchDesc = new AlterTableSimpleDesc( - MetaStoreUtils.DEFAULT_DATABASE_NAME, tblName, null, + db.getCurrentDatabase(), tblName, null, AlterTableDesc.AlterTableTypes.TOUCH); rootTasks.add(TaskFactory.get(new DDLWork(getInputs(), getOutputs(), touchDesc), conf)); } else { for (Map partSpec : partSpecs) { AlterTableSimpleDesc touchDesc = new AlterTableSimpleDesc( - MetaStoreUtils.DEFAULT_DATABASE_NAME, tblName, partSpec, + db.getCurrentDatabase(), tblName, partSpec, AlterTableDesc.AlterTableTypes.TOUCH); rootTasks.add(TaskFactory.get(new DDLWork(getInputs(), getOutputs(), touchDesc), conf)); @@ -923,7 +1006,7 @@ public class DDLSemanticAnalyzer extends BaseSemanticAnalyzer { Map partSpec = partSpecs.get(0); AlterTableSimpleDesc archiveDesc = new AlterTableSimpleDesc( - MetaStoreUtils.DEFAULT_DATABASE_NAME, tblName, partSpec, + db.getCurrentDatabase(), tblName, partSpec, (isUnArchive ? AlterTableTypes.UNARCHIVE : AlterTableTypes.ARCHIVE)); rootTasks.add(TaskFactory.get(new DDLWork(getInputs(), getOutputs(), archiveDesc), conf)); diff --git ql/src/java/org/apache/hadoop/hive/ql/parse/Hive.g ql/src/java/org/apache/hadoop/hive/ql/parse/Hive.g index b080780..65d86d0 100644 --- ql/src/java/org/apache/hadoop/hive/ql/parse/Hive.g +++ ql/src/java/org/apache/hadoop/hive/ql/parse/Hive.g @@ -85,6 +85,7 @@ TOK_STRING; TOK_LIST; TOK_STRUCT; TOK_MAP; +TOK_CREATEDATABASE; TOK_CREATETABLE; TOK_CREATEINDEX; TOK_CREATEINDEX_INDEXTBLNAME; @@ -113,11 +114,15 @@ TOK_ALTERTABLE_PROPERTIES; TOK_ALTERTABLE_CHANGECOL_AFTER_POSITION; TOK_ALTERINDEX_REBUILD; TOK_MSCK; +TOK_SHOWDATABASES; TOK_SHOWTABLES; TOK_SHOWFUNCTIONS; TOK_SHOWPARTITIONS; TOK_SHOW_TABLESTATUS; +TOK_SWITCHDATABASE; +TOK_DROPDATABASE; TOK_DROPTABLE; +TOK_DATABASECOMMENT; TOK_TABCOLLIST; TOK_TABCOL; TOK_TABLECOMMENT; @@ -159,6 +164,7 @@ TOK_TABLEPROPLIST; TOK_TABTYPE; TOK_LIMIT; TOK_TABLEPROPERTY; +TOK_IFEXISTS; TOK_IFNOTEXISTS; TOK_HINTLIST; TOK_HINT; @@ -224,7 +230,10 @@ loadStatement ddlStatement @init { msgs.push("ddl statement"); } @after { msgs.pop(); } - : createTableStatement + : createDatabaseStatement + | switchDatabaseStatement + | dropDatabaseStatement + | createTableStatement | dropTableStatement | alterStatement | descStatement @@ -239,6 +248,13 @@ ddlStatement | dropFunctionStatement ; +ifExists +@init { msgs.push("if exists clause"); } +@after { msgs.pop(); } + : KW_IF KW_EXISTS + -> ^(TOK_IFEXISTS) + ; + ifNotExists @init { msgs.push("if not exists clause"); } @after { msgs.pop(); } @@ -246,6 +262,38 @@ ifNotExists -> ^(TOK_IFNOTEXISTS) ; + +createDatabaseStatement +@init { msgs.push("create database statement"); } +@after { msgs.pop(); } + : KW_CREATE (KW_DATABASE|KW_SCHEMA) + ifNotExists? + name=Identifier + databaseComment? + -> ^(TOK_CREATEDATABASE $name ifNotExists? databaseComment?) + ; + +switchDatabaseStatement +@init { msgs.push("switch database statement"); } +@after { msgs.pop(); } + : KW_USE Identifier + -> ^(TOK_SWITCHDATABASE Identifier) + ; + +dropDatabaseStatement +@init { msgs.push("drop database statement"); } +@after { msgs.pop(); } + : KW_DROP (KW_DATABASE|KW_SCHEMA) ifExists? Identifier + -> ^(TOK_DROPDATABASE Identifier ifExists?) + ; + +databaseComment +@init { msgs.push("database's comment"); } +@after { msgs.pop(); } + : KW_COMMENT comment=StringLiteral + -> ^(TOK_DATABASECOMMENT $comment) + ; + createTableStatement @init { msgs.push("create table statement"); } @after { msgs.pop(); } @@ -572,7 +620,8 @@ descStatement showStatement @init { msgs.push("show statement"); } @after { msgs.pop(); } - : KW_SHOW KW_TABLES showStmtIdentifier? -> ^(TOK_SHOWTABLES showStmtIdentifier?) + : KW_SHOW (KW_DATABASES|KW_SCHEMAS) (KW_LIKE showStmtIdentifier)? -> ^(TOK_SHOWDATABASES showStmtIdentifier?) + | KW_SHOW KW_TABLES showStmtIdentifier? -> ^(TOK_SHOWTABLES showStmtIdentifier?) | KW_SHOW KW_FUNCTIONS showStmtIdentifier? -> ^(TOK_SHOWFUNCTIONS showStmtIdentifier?) | KW_SHOW KW_PARTITIONS Identifier partitionSpec? -> ^(TOK_SHOWPARTITIONS Identifier partitionSpec?) | KW_SHOW KW_TABLE KW_EXTENDED ((KW_FROM|KW_IN) db_name=Identifier)? KW_LIKE showStmtIdentifier partitionSpec? @@ -1762,6 +1811,7 @@ KW_INTERSECT: 'INTERSECT'; KW_VIEW: 'VIEW'; KW_IN: 'IN'; KW_DATABASE: 'DATABASE'; +KW_DATABASES: 'DATABASES'; KW_MATERIALIZED: 'MATERIALIZED'; KW_SCHEMA: 'SCHEMA'; KW_SCHEMAS: 'SCHEMAS'; @@ -1794,6 +1844,7 @@ KW_LATERAL: 'LATERAL'; KW_TOUCH: 'TOUCH'; KW_ARCHIVE: 'ARCHIVE'; KW_UNARCHIVE: 'UNARCHIVE'; +KW_USE: 'USE'; // Operators // NOTE: if you add a new function/operator, add it to sysFuncNames so that describe function _FUNC_ will work. diff --git ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java index 682a8b5..3622028 100644 --- ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java +++ ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java @@ -45,7 +45,6 @@ import org.apache.hadoop.fs.PathFilter; import org.apache.hadoop.hive.common.FileUtils; import org.apache.hadoop.hive.common.JavaUtils; import org.apache.hadoop.hive.conf.HiveConf; -import org.apache.hadoop.hive.metastore.MetaStoreUtils; import org.apache.hadoop.hive.metastore.api.FieldSchema; import org.apache.hadoop.hive.metastore.api.Order; import org.apache.hadoop.hive.ql.Context; @@ -744,7 +743,7 @@ public class SemanticAnalyzer extends BaseSemanticAnalyzer { String tab_name = qb.getTabNameForAlias(alias); Table tab = null; try { - tab = db.getTable(MetaStoreUtils.DEFAULT_DATABASE_NAME, tab_name); + tab = db.getTable(db.getCurrentDatabase(), tab_name); } catch (InvalidTableException ite) { throw new SemanticException(ErrorMsg.INVALID_TABLE.getMsg(qb .getParseInfo().getSrcForAlias(alias))); @@ -6766,16 +6765,12 @@ public class SemanticAnalyzer extends BaseSemanticAnalyzer { case CTAS: // create table as select - // check for existence of table. Throw an exception if it exists. + // Verify that the table does not already exist try { - Table tab = db.getTable(MetaStoreUtils.DEFAULT_DATABASE_NAME, - tableName, false); // do not throw exception if table does not exist - - if (tab != null) { - throw new SemanticException(ErrorMsg.TABLE_ALREADY_EXISTS - .getMsg(tableName)); + if (null != db.getTable(db.getCurrentDatabase(), tableName, false)) { + throw new SemanticException(ErrorMsg.TABLE_ALREADY_EXISTS.getMsg(tableName)); } - } catch (HiveException e) { // may be unable to get meta data + } catch (HiveException e) { throw new SemanticException(e); } @@ -6788,7 +6783,7 @@ public class SemanticAnalyzer extends BaseSemanticAnalyzer { return selectStmt; default: - assert false; // should never be unknown command type + throw new SemanticException("Unrecognized command."); } return null; } diff --git ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzerFactory.java ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzerFactory.java index 6af3b17..b81d75e 100644 --- ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzerFactory.java +++ ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzerFactory.java @@ -35,6 +35,9 @@ public final class SemanticAnalyzerFactory { static { commandType.put(HiveParser.TOK_EXPLAIN, "EXPLAIN"); commandType.put(HiveParser.TOK_LOAD, "LOAD"); + commandType.put(HiveParser.TOK_CREATEDATABASE, "CREATEDATABASE"); + commandType.put(HiveParser.TOK_DROPDATABASE, "DROPDATABASE"); + commandType.put(HiveParser.TOK_SWITCHDATABASE, "SWITCHDATABASE"); commandType.put(HiveParser.TOK_CREATETABLE, "CREATETABLE"); commandType.put(HiveParser.TOK_DROPTABLE, "DROPTABLE"); commandType.put(HiveParser.TOK_DESCTABLE, "DESCTABLE"); @@ -52,6 +55,7 @@ public final class SemanticAnalyzerFactory { commandType.put(HiveParser.TOK_ALTERTABLE_PROPERTIES, "ALTERTABLE_PROPERTIES"); commandType.put(HiveParser.TOK_ALTERTABLE_SERIALIZER, "ALTERTABLE_SERIALIZER"); commandType.put(HiveParser.TOK_ALTERTABLE_SERDEPROPERTIES, "ALTERTABLE_SERDEPROPERTIES"); + commandType.put(HiveParser.TOK_SHOWDATABASES, "SHOWDATABASES"); commandType.put(HiveParser.TOK_SHOWTABLES, "SHOWTABLES"); commandType.put(HiveParser.TOK_SHOW_TABLESTATUS, "SHOW_TABLESTATUS"); commandType.put(HiveParser.TOK_SHOWFUNCTIONS, "SHOWFUNCTIONS"); @@ -89,6 +93,9 @@ public final class SemanticAnalyzerFactory { return new ExplainSemanticAnalyzer(conf); case HiveParser.TOK_LOAD: return new LoadSemanticAnalyzer(conf); + case HiveParser.TOK_CREATEDATABASE: + case HiveParser.TOK_DROPDATABASE: + case HiveParser.TOK_SWITCHDATABASE: case HiveParser.TOK_DROPTABLE: case HiveParser.TOK_DROPVIEW: case HiveParser.TOK_DESCTABLE: @@ -105,6 +112,7 @@ public final class SemanticAnalyzerFactory { case HiveParser.TOK_ALTERTABLE_SERDEPROPERTIES: case HiveParser.TOK_ALTERINDEX_REBUILD: case HiveParser.TOK_ALTERVIEW_PROPERTIES: + case HiveParser.TOK_SHOWDATABASES: case HiveParser.TOK_SHOWTABLES: case HiveParser.TOK_SHOW_TABLESTATUS: case HiveParser.TOK_SHOWFUNCTIONS: diff --git ql/src/java/org/apache/hadoop/hive/ql/plan/CreateDatabaseDesc.java ql/src/java/org/apache/hadoop/hive/ql/plan/CreateDatabaseDesc.java new file mode 100644 index 0000000..57ccf94 --- /dev/null +++ ql/src/java/org/apache/hadoop/hive/ql/plan/CreateDatabaseDesc.java @@ -0,0 +1,93 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hive.ql.plan; + +import java.io.Serializable; + +/** + * CreateDatabaseDesc. + * + */ +@Explain(displayName = "Create Database") +public class CreateDatabaseDesc extends DDLDesc implements Serializable { + + private static final long serialVersionUID = 1L; + + String databaseName; + String locationUri; + String comment; + boolean ifNotExists; + + /** + * For serialization only. + */ + public CreateDatabaseDesc() { + } + + public CreateDatabaseDesc(String databaseName, String comment, + String locationUri, boolean ifNotExists) { + super(); + this.databaseName = databaseName; + this.comment = comment; + this.locationUri = locationUri; + this.ifNotExists = ifNotExists; + } + + public CreateDatabaseDesc(String databaseName, boolean ifNotExists) { + this(databaseName, null, null, ifNotExists); + } + + + + @Explain(displayName="if not exists") + public boolean getIfNotExists() { + return ifNotExists; + } + + public void setIfNotExists(boolean ifNotExists) { + this.ifNotExists = ifNotExists; + } + + @Explain(displayName="name") + public String getName() { + return databaseName; + } + + public void setName(String databaseName) { + this.databaseName = databaseName; + } + + @Explain(displayName="comment") + public String getComment() { + return comment; + } + + public void setComment(String comment) { + this.comment = comment; + } + + @Explain(displayName="locationUri") + public String getLocationUri() { + return locationUri; + } + + public void setLocationUri(String locationUri) { + this.locationUri = locationUri; + } +} diff --git ql/src/java/org/apache/hadoop/hive/ql/plan/DDLWork.java ql/src/java/org/apache/hadoop/hive/ql/plan/DDLWork.java index ed4ed22..a570cdb 100644 --- ql/src/java/org/apache/hadoop/hive/ql/plan/DDLWork.java +++ ql/src/java/org/apache/hadoop/hive/ql/plan/DDLWork.java @@ -30,13 +30,17 @@ import org.apache.hadoop.hive.ql.hooks.WriteEntity; */ public class DDLWork implements Serializable { private static final long serialVersionUID = 1L; - private CreateIndexDesc createIndexDesc; + private CreateIndexDesc createIndexDesc; private DropIndexDesc dropIdxDesc; + private CreateDatabaseDesc createDatabaseDesc; + private SwitchDatabaseDesc switchDatabaseDesc; + private DropDatabaseDesc dropDatabaseDesc; private CreateTableDesc createTblDesc; private CreateTableLikeDesc createTblLikeDesc; private CreateViewDesc createVwDesc; private DropTableDesc dropTblDesc; private AlterTableDesc alterTblDesc; + private ShowDatabasesDesc showDatabasesDesc; private ShowTablesDesc showTblsDesc; private ShowFunctionsDesc showFuncsDesc; private DescFunctionDesc descFunctionDesc; @@ -69,6 +73,36 @@ public class DDLWork implements Serializable { } /** + * @param createDatabaseDesc + * Create Database descriptor + */ + public DDLWork(HashSet inputs, HashSet outputs, + CreateDatabaseDesc createDatabaseDesc) { + this(inputs, outputs); + this.createDatabaseDesc = createDatabaseDesc; + } + + /** + * @param dropDatabaseDesc + * Drop Database descriptor + */ + public DDLWork(HashSet inputs, HashSet outputs, + DropDatabaseDesc dropDatabaseDesc) { + this(inputs, outputs); + this.dropDatabaseDesc = dropDatabaseDesc; + } + + /** + * @param switchDatabaseDesc + * Switch Database descriptor + */ + public DDLWork(HashSet inputs, HashSet outputs, + SwitchDatabaseDesc switchDatabaseDesc) { + this(inputs, outputs); + this.switchDatabaseDesc = switchDatabaseDesc; + } + + /** * @param alterTblDesc * alter table descriptor */ @@ -133,6 +167,16 @@ public class DDLWork implements Serializable { } /** + * @param showDatabasesDesc + */ + public DDLWork(HashSet inputs, HashSet outputs, + ShowDatabasesDesc showDatabasesDesc) { + this(inputs, outputs); + + this.showDatabasesDesc = showDatabasesDesc; + } + + /** * @param showTblsDesc */ public DDLWork(HashSet inputs, HashSet outputs, @@ -219,6 +263,51 @@ public class DDLWork implements Serializable { } /** + * @return Create Database descriptor + */ + public CreateDatabaseDesc getCreateDatabaseDesc() { + return createDatabaseDesc; + } + + /** + * Set Create Database descriptor + * @param createDatabaseDesc + */ + public void setCreateDatabaseDesc(CreateDatabaseDesc createDatabaseDesc) { + this.createDatabaseDesc = createDatabaseDesc; + } + + /** + * @return Drop Database descriptor + */ + public DropDatabaseDesc getDropDatabaseDesc() { + return dropDatabaseDesc; + } + + /** + * Set Drop Database descriptor + * @param dropDatabaseDesc + */ + public void setDropDatabaseDesc(DropDatabaseDesc dropDatabaseDesc) { + this.dropDatabaseDesc = dropDatabaseDesc; + } + + /** + * @return Switch Database descriptor + */ + public SwitchDatabaseDesc getSwitchDatabaseDesc() { + return switchDatabaseDesc; + } + + /** + * Set Switch Database descriptor + * @param switchDatabaseDesc + */ + public void setSwitchDatabaseDesc(SwitchDatabaseDesc switchDatabaseDesc) { + this.switchDatabaseDesc = switchDatabaseDesc; + } + + /** * @return the createTblDesc */ @Explain(displayName = "Create Table Operator") @@ -307,6 +396,22 @@ public class DDLWork implements Serializable { } /** + * @return the showDatabasesDesc + */ + @Explain(displayName = "Show Databases Operator") + public ShowDatabasesDesc getShowDatabasesDesc() { + return showDatabasesDesc; + } + + /** + * @param showDatabasesDesc + * the showDatabasesDesc to set + */ + public void setShowDatabasesDesc(ShowDatabasesDesc showDatabasesDesc) { + this.showDatabasesDesc = showDatabasesDesc; + } + + /** * @return the showTblsDesc */ @Explain(displayName = "Show Table Operator") diff --git ql/src/java/org/apache/hadoop/hive/ql/plan/DropDatabaseDesc.java ql/src/java/org/apache/hadoop/hive/ql/plan/DropDatabaseDesc.java new file mode 100644 index 0000000..ac47eb1 --- /dev/null +++ ql/src/java/org/apache/hadoop/hive/ql/plan/DropDatabaseDesc.java @@ -0,0 +1,57 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hive.ql.plan; + +import java.io.Serializable; + +/** + * DropDatabaseDesc. + * + */ +@Explain(displayName = "Drop Database") +public class DropDatabaseDesc extends DDLDesc implements Serializable { + private static final long serialVersionUID = 1L; + + String databaseName; + boolean ifExists; + + public DropDatabaseDesc(String databaseName, boolean ifExists) { + super(); + this.databaseName = databaseName; + this.ifExists = ifExists; + } + + @Explain(displayName = "database") + public String getDatabaseName() { + return databaseName; + } + + public void setDatabaseName(String databaseName) { + this.databaseName = databaseName; + } + + @Explain(displayName = "if exists") + public boolean getIfExists() { + return ifExists; + } + + public void setIfExists(boolean ifExists) { + this.ifExists = ifExists; + } +} diff --git ql/src/java/org/apache/hadoop/hive/ql/plan/ShowDatabasesDesc.java ql/src/java/org/apache/hadoop/hive/ql/plan/ShowDatabasesDesc.java new file mode 100644 index 0000000..1cf7589 --- /dev/null +++ ql/src/java/org/apache/hadoop/hive/ql/plan/ShowDatabasesDesc.java @@ -0,0 +1,102 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hive.ql.plan; + +import java.io.Serializable; + +import org.apache.hadoop.fs.Path; + +/** + * ShowDatabasesDesc. + * + */ +@Explain(displayName = "Show Databases") +public class ShowDatabasesDesc extends DDLDesc implements Serializable { + private static final long serialVersionUID = 1L; + String pattern; + String resFile; + /** + * database name for the result of show databases. + */ + private static final String database = "show"; + /** + * thrift ddl for the result of show databases. + */ + private static final String schema = "tab_name#string"; + + public String getDatabase() { + return database; + } + + public String getSchema() { + return schema; + } + + public ShowDatabasesDesc() { + } + + /** + * @param resFile + */ + public ShowDatabasesDesc(Path resFile) { + this.resFile = resFile.toString(); + pattern = null; + } + + /** + * @param pattern + * names of databases to show + */ + public ShowDatabasesDesc(Path resFile, String pattern) { + this.resFile = resFile.toString(); + this.pattern = pattern; + } + + /** + * @return the pattern + */ + @Explain(displayName = "pattern") + public String getPattern() { + return pattern; + } + + /** + * @param pattern + * the pattern to set + */ + public void setPattern(String pattern) { + this.pattern = pattern; + } + + /** + * @return the resFile + */ + @Explain(displayName = "result file", normalExplain = false) + public String getResFile() { + return resFile; + } + + /** + * @param resFile + * the resFile to set + */ + public void setResFile(String resFile) { + this.resFile = resFile; + } +} diff --git ql/src/java/org/apache/hadoop/hive/ql/plan/SwitchDatabaseDesc.java ql/src/java/org/apache/hadoop/hive/ql/plan/SwitchDatabaseDesc.java new file mode 100644 index 0000000..0cad7c1 --- /dev/null +++ ql/src/java/org/apache/hadoop/hive/ql/plan/SwitchDatabaseDesc.java @@ -0,0 +1,50 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hive.ql.plan; + +import java.io.Serializable; + +/** + * SwitchDatabaseDesc. + * + */ +@Explain(displayName = "Switch Database") +public class SwitchDatabaseDesc extends DDLDesc implements Serializable { + + private static final long serialVersionUID = 1L; + + String databaseName; + + public SwitchDatabaseDesc() { + } + + public SwitchDatabaseDesc(String databaseName) { + super(); + this.databaseName = databaseName; + } + + @Explain(displayName = "name") + public String getDatabaseName() { + return databaseName; + } + + public void setDatabaseName(String databaseName) { + this.databaseName = databaseName; + } +} diff --git ql/src/test/org/apache/hadoop/hive/ql/QTestUtil.java ql/src/test/org/apache/hadoop/hive/ql/QTestUtil.java index b4651a2..ee077ed 100644 --- ql/src/test/org/apache/hadoop/hive/ql/QTestUtil.java +++ ql/src/test/org/apache/hadoop/hive/ql/QTestUtil.java @@ -18,6 +18,8 @@ package org.apache.hadoop.hive.ql; +import static org.apache.hadoop.hive.metastore.MetaStoreUtils.DEFAULT_DATABASE_NAME; + import java.io.BufferedInputStream; import java.io.DataInputStream; import java.io.File; @@ -302,12 +304,21 @@ public class QTestUtil { * Clear out any side effects of running tests */ public void clearTestSideEffects () throws Exception { - // delete any tables other than the source tables - for (String s: db.getAllTables()) { - if (!srcTables.contains(s)) { - db.dropTable(MetaStoreUtils.DEFAULT_DATABASE_NAME, s); + // Delete any tables other than the source tables + // and any databases other than the default database. + for (String dbName : db.getAllDatabases()) { + db.setCurrentDatabase(dbName); + for (String tblName : db.getAllTables()) { + if (!DEFAULT_DATABASE_NAME.equals(dbName) || !srcTables.contains(tblName)) { + db.dropTable(dbName, tblName); + } + } + if (!DEFAULT_DATABASE_NAME.equals(dbName)) { + db.dropDatabase(dbName, false); } } + db.setCurrentDatabase(DEFAULT_DATABASE_NAME); + // allocate and initialize a new conf since a test can // modify conf by using 'set' commands conf = new HiveConf (Driver.class); @@ -410,7 +421,7 @@ public class QTestUtil { db.createTable("src_sequencefile", cols, null, SequenceFileInputFormat.class, SequenceFileOutputFormat.class); - Table srcThrift = new Table("src_thrift"); + Table srcThrift = new Table(db.getCurrentDatabase(), "src_thrift"); srcThrift.setInputFormatClass(SequenceFileInputFormat.class.getName()); srcThrift.setOutputFormatClass(SequenceFileOutputFormat.class.getName()); srcThrift.setSerializationLib(ThriftDeserializer.class.getName()); diff --git ql/src/test/org/apache/hadoop/hive/ql/metadata/TestHive.java ql/src/test/org/apache/hadoop/hive/ql/metadata/TestHive.java index ab39ca4..0dd6524 100755 --- ql/src/test/org/apache/hadoop/hive/ql/metadata/TestHive.java +++ ql/src/test/org/apache/hadoop/hive/ql/metadata/TestHive.java @@ -18,6 +18,8 @@ package org.apache.hadoop.hive.ql.metadata; +import static org.apache.hadoop.hive.metastore.MetaStoreUtils.DEFAULT_DATABASE_NAME; + import java.util.ArrayList; import java.util.HashMap; import java.util.LinkedList; @@ -88,22 +90,18 @@ public class TestHive extends TestCase { e1.printStackTrace(); assertTrue("Unable to drop table", false); } - Table tbl = new Table(tableName); + Table tbl = new Table(MetaStoreUtils.DEFAULT_DATABASE_NAME, tableName); List fields = tbl.getCols(); - fields.add(new FieldSchema("col1", Constants.INT_TYPE_NAME, - "int -- first column")); - fields.add(new FieldSchema("col2", Constants.STRING_TYPE_NAME, - "string -- second column")); - fields.add(new FieldSchema("col3", Constants.DOUBLE_TYPE_NAME, - "double -- thrift column")); + fields.add(new FieldSchema("col1", Constants.INT_TYPE_NAME, "int -- first column")); + fields.add(new FieldSchema("col2", Constants.STRING_TYPE_NAME, "string -- second column")); + fields.add(new FieldSchema("col3", Constants.DOUBLE_TYPE_NAME, "double -- thrift column")); tbl.setFields(fields); tbl.setOutputFormatClass(HiveIgnoreKeyTextOutputFormat.class); tbl.setInputFormatClass(SequenceFileInputFormat.class); - tbl.setProperty("comment", - "this is a test table created as part junit tests"); + tbl.setProperty("comment", "this is a test table created as part junit tests"); List bucketCols = tbl.getBucketCols(); bucketCols.add("col1"); @@ -156,9 +154,10 @@ public class TestHive extends TestCase { .getOwner(), ft.getOwner()); assertEquals("Table retention didn't match for table: " + tableName, tbl.getRetention(), ft.getRetention()); - assertEquals("Data location is not set correctly", wh - .getDefaultTablePath(MetaStoreUtils.DEFAULT_DATABASE_NAME, - tableName).toString(), ft.getDataLocation().toString()); + String dbPath = wh.getDefaultDatabasePath(DEFAULT_DATABASE_NAME).toString(); + assertEquals("Data location is not set correctly", + wh.getDefaultTablePath(DEFAULT_DATABASE_NAME, tableName).toString(), + ft.getDataLocation().toString()); // now that URI is set correctly, set the original table's uri and then // compare the two tables tbl.setDataLocation(ft.getDataLocation()); @@ -191,7 +190,7 @@ public class TestHive extends TestCase { /** * Tests create and fetch of a thrift based table. - * + * * @throws Throwable */ public void testThriftTable() throws Throwable { @@ -203,7 +202,7 @@ public class TestHive extends TestCase { System.err.println(StringUtils.stringifyException(e1)); assertTrue("Unable to drop table", false); } - Table tbl = new Table(tableName); + Table tbl = new Table(MetaStoreUtils.DEFAULT_DATABASE_NAME, tableName); tbl.setInputFormatClass(SequenceFileInputFormat.class.getName()); tbl.setOutputFormatClass(SequenceFileOutputFormat.class.getName()); tbl.setSerializationLib(ThriftDeserializer.class.getName()); @@ -229,9 +228,10 @@ public class TestHive extends TestCase { .getOwner(), ft.getOwner()); assertEquals("Table retention didn't match for table: " + tableName, tbl.getRetention(), ft.getRetention()); - assertEquals("Data location is not set correctly", wh - .getDefaultTablePath(MetaStoreUtils.DEFAULT_DATABASE_NAME, - tableName).toString(), ft.getDataLocation().toString()); + String dbPath = wh.getDefaultDatabasePath(DEFAULT_DATABASE_NAME).toString(); + assertEquals("Data location is not set correctly", + wh.getDefaultTablePath(DEFAULT_DATABASE_NAME, tableName).toString(), + ft.getDataLocation().toString()); // now that URI is set correctly, set the original table's uri and then // compare the two tables tbl.setDataLocation(ft.getDataLocation()); @@ -245,7 +245,7 @@ public class TestHive extends TestCase { System.err.println(StringUtils.stringifyException(e)); assertTrue("Unable to fetch table correctly: " + tableName, false); } - hm.dropTable(MetaStoreUtils.DEFAULT_DATABASE_NAME, tableName); + hm.dropTable(DEFAULT_DATABASE_NAME, tableName); } catch (Throwable e) { System.err.println(StringUtils.stringifyException(e)); System.err.println("testThriftTable() failed"); @@ -254,8 +254,7 @@ public class TestHive extends TestCase { } private static Table createTestTable(String dbName, String tableName) throws HiveException { - Table tbl = new Table(tableName); - tbl.setDbName(dbName); + Table tbl = new Table(dbName, tableName); tbl.setInputFormatClass(SequenceFileInputFormat.class.getName()); tbl.setOutputFormatClass(SequenceFileOutputFormat.class.getName()); tbl.setSerializationLib(ThriftDeserializer.class.getName()); @@ -279,10 +278,11 @@ public class TestHive extends TestCase { public void testGetAndDropTables() throws Throwable { try { + Warehouse wh = new Warehouse(hiveConf); String dbName = "db_for_testgettables"; String table1Name = "table1"; - hm.dropDatabase(dbName); - hm.createDatabase(dbName, ""); + hm.dropDatabase(dbName, true); + hm.createDatabase(dbName, wh.getDefaultDatabasePath(dbName).toString()); List ts = new ArrayList(2); ts.add(table1Name); diff --git ql/src/test/org/apache/hadoop/hive/ql/metadata/TestHiveMetaStoreChecker.java ql/src/test/org/apache/hadoop/hive/ql/metadata/TestHiveMetaStoreChecker.java index 26cc71a..3a8e5b4 100644 --- ql/src/test/org/apache/hadoop/hive/ql/metadata/TestHiveMetaStoreChecker.java +++ ql/src/test/org/apache/hadoop/hive/ql/metadata/TestHiveMetaStoreChecker.java @@ -10,11 +10,12 @@ import junit.framework.TestCase; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; +import org.apache.hadoop.hive.conf.HiveConf; +import org.apache.hadoop.hive.metastore.Warehouse; import org.apache.hadoop.hive.metastore.api.AlreadyExistsException; import org.apache.hadoop.hive.metastore.api.FieldSchema; import org.apache.hadoop.hive.metastore.api.MetaException; import org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat; -import org.apache.hadoop.hive.ql.io.IgnoreKeyTextOutputFormat; import org.apache.hadoop.hive.serde.Constants; import org.apache.hadoop.mapred.TextInputFormat; import org.apache.thrift.TException; @@ -37,6 +38,7 @@ public class TestHiveMetaStoreChecker extends TestCase { private List partCols; private List> parts; + private Warehouse wh; @Override protected void setUp() throws Exception { @@ -57,6 +59,8 @@ public class TestHiveMetaStoreChecker extends TestCase { part2.put(partDateName, "2008-01-02"); part2.put(partCityName, "stockholm"); parts.add(part2); + HiveConf conf = new HiveConf(this.getClass()); + wh = new Warehouse(conf); // cleanup hive.dropTable(dbName, tableName, true, true); @@ -89,9 +93,9 @@ public class TestHiveMetaStoreChecker extends TestCase { assertTrue(result.getPartitionsNotOnFs().isEmpty()); assertTrue(result.getPartitionsNotInMs().isEmpty()); - hive.createDatabase(dbName, ""); + hive.createDatabase(dbName, wh.getDefaultDatabasePath(dbName).toString()); - Table table = new Table(tableName); + Table table = new Table(dbName, tableName); table.setDbName(dbName); table.setInputFormatClass(TextInputFormat.class); table.setOutputFormatClass(HiveIgnoreKeyTextOutputFormat.class); @@ -159,9 +163,9 @@ public class TestHiveMetaStoreChecker extends TestCase { public void testPartitionsCheck() throws HiveException, MetaException, IOException, TException, AlreadyExistsException { - hive.createDatabase(dbName, ""); + hive.createDatabase(dbName, wh.getDefaultDatabasePath(dbName).toString()); - Table table = new Table(tableName); + Table table = new Table(dbName, tableName); table.setDbName(dbName); table.setInputFormatClass(TextInputFormat.class); table.setOutputFormatClass(HiveIgnoreKeyTextOutputFormat.class); diff --git ql/src/test/queries/clientnegative/database_create_already_exists.q ql/src/test/queries/clientnegative/database_create_already_exists.q new file mode 100644 index 0000000..3af7607 --- /dev/null +++ ql/src/test/queries/clientnegative/database_create_already_exists.q @@ -0,0 +1,5 @@ +SHOW DATABASES; + +-- Try to create a database that already exists +CREATE DATABASE test_db; +CREATE DATABASE test_db; diff --git ql/src/test/queries/clientnegative/database_create_invalid_name.q ql/src/test/queries/clientnegative/database_create_invalid_name.q new file mode 100644 index 0000000..c585ebc --- /dev/null +++ ql/src/test/queries/clientnegative/database_create_invalid_name.q @@ -0,0 +1,4 @@ +SHOW DATABASES; + +-- Try to create a database with an invalid name +CREATE DATABASE `test-db`; diff --git ql/src/test/queries/clientnegative/database_drop_does_not_exist.q ql/src/test/queries/clientnegative/database_drop_does_not_exist.q new file mode 100644 index 0000000..66a940e --- /dev/null +++ ql/src/test/queries/clientnegative/database_drop_does_not_exist.q @@ -0,0 +1,4 @@ +SHOW DATABASES; + +-- Try to drop a database that does not exist +DROP DATABASE does_not_exist; diff --git ql/src/test/queries/clientnegative/database_drop_not_empty.q ql/src/test/queries/clientnegative/database_drop_not_empty.q new file mode 100644 index 0000000..ae5a443 --- /dev/null +++ ql/src/test/queries/clientnegative/database_drop_not_empty.q @@ -0,0 +1,8 @@ +SHOW DATABASES; + +-- Try to drop a non-empty database +CREATE DATABASE test_db; +USE test_db; +CREATE TABLE t(a INT); +USE default; +DROP DATABASE test_db; diff --git ql/src/test/queries/clientnegative/database_switch_does_not_exist.q ql/src/test/queries/clientnegative/database_switch_does_not_exist.q new file mode 100644 index 0000000..5cd4697 --- /dev/null +++ ql/src/test/queries/clientnegative/database_switch_does_not_exist.q @@ -0,0 +1,4 @@ +SHOW DATABASES; + +-- Try to switch to a database that does not exist +USE does_not_exist; diff --git ql/src/test/queries/clientpositive/add_part_exist.q ql/src/test/queries/clientpositive/add_part_exist.q index 54d0096..d176661 100644 --- ql/src/test/queries/clientpositive/add_part_exist.q +++ ql/src/test/queries/clientpositive/add_part_exist.q @@ -12,3 +12,26 @@ SHOW PARTITIONS add_part_test; ALTER TABLE add_part_test ADD IF NOT EXISTS PARTITION (ds='2010-01-01') PARTITION (ds='2010-01-02') PARTITION (ds='2010-01-03'); SHOW PARTITIONS add_part_test; + +DROP TABLE add_part_test; +SHOW TABLES; + +-- Test ALTER TABLE ADD PARTITION in non-default Database +CREATE DATABASE add_part_test_db; +USE add_part_test_db; +SHOW TABLES; + +CREATE TABLE add_part_test (key STRING, value STRING) PARTITIONED BY (ds STRING); +SHOW PARTITIONS add_part_test; + +ALTER TABLE add_part_test ADD PARTITION (ds='2010-01-01'); +SHOW PARTITIONS add_part_test; + +ALTER TABLE add_part_test ADD IF NOT EXISTS PARTITION (ds='2010-01-01'); +SHOW PARTITIONS add_part_test; + +ALTER TABLE add_part_test ADD IF NOT EXISTS PARTITION (ds='2010-01-02'); +SHOW PARTITIONS add_part_test; + +ALTER TABLE add_part_test ADD IF NOT EXISTS PARTITION (ds='2010-01-01') PARTITION (ds='2010-01-02') PARTITION (ds='2010-01-03'); +SHOW PARTITIONS add_part_test; diff --git ql/src/test/queries/clientpositive/alter1.q ql/src/test/queries/clientpositive/alter1.q index 6f95685..5fd1945 100644 --- ql/src/test/queries/clientpositive/alter1.q +++ ql/src/test/queries/clientpositive/alter1.q @@ -24,3 +24,48 @@ describe extended alter1; alter table alter1 replace columns (a int, b int, c string); describe alter1; + +-- Cleanup +DROP TABLE alter1; +SHOW TABLES; + +-- With non-default Database + +CREATE DATABASE alter1_db; +USE alter1_db; +SHOW TABLES; + +CREATE TABLE alter1(a INT, b INT); +DESCRIBE EXTENDED alter1; + +ALTER TABLE alter1 SET TBLPROPERTIES ('a'='1', 'c'='3'); +DESCRIBE EXTENDED alter1; + +ALTER TABLE alter1 SET TBLPROPERTIES ('a'='1', 'c'='4', 'd'='3'); +DESCRIBE EXTENDED alter1; + +ALTER TABLE alter1 SET TBLPROPERTIES ('EXTERNAL'='TRUE'); +DESCRIBE EXTENDED alter1; + +ALTER TABLE alter1 SET TBLPROPERTIES ('EXTERNAL'='FALSE'); +DESCRIBE EXTENDED alter1; + +ALTER TABLE alter1 SET SERDEPROPERTIES('s1'='9'); +DESCRIBE EXTENDED alter1; + +ALTER TABLE alter1 SET SERDEPROPERTIES('s1'='10', 's2' ='20'); +DESCRIBE EXTENDED alter1; + +add jar ../data/files/TestSerDe.jar; +ALTER TABLE alter1 SET SERDE 'org.apache.hadoop.hive.serde2.TestSerDe' WITH SERDEPROPERTIES ('s1'='9'); +DESCRIBE EXTENDED alter1; + +ALTER TABLE alter1 SET SERDE 'org.apache.hadoop.hive.serde2.MetadataTypedColumnsetSerDe'; +DESCRIBE EXTENDED alter1; + +ALTER TABLE alter1 REPLACE COLUMNS (a int, b int, c string); +DESCRIBE alter1; + +DROP TABLE alter1; +USE default; +DROP DATABASE alter1_db; diff --git ql/src/test/queries/clientpositive/alter2.q ql/src/test/queries/clientpositive/alter2.q index 2388e21..ddf5787 100644 --- ql/src/test/queries/clientpositive/alter2.q +++ ql/src/test/queries/clientpositive/alter2.q @@ -18,3 +18,38 @@ show partitions alter2; alter table alter2 add partition (insertdate='2008-01-02') location '2008/01/02'; describe extended alter2; show partitions alter2; + +-- Cleanup +DROP TABLE alter2; +SHOW TABLES; + +-- Using non-default Database + +CREATE DATABASE alter2_db; +USE alter2_db; +SHOW TABLES; + +CREATE TABLE alter2(a int, b int) PARTITIONED BY (insertdate string); +DESCRIBE EXTENDED alter2; +SHOW PARTITIONS alter2; +ALTER TABLE alter2 ADD PARTITION (insertdate='2008-01-01') LOCATION '2008/01/01'; +DESCRIBE EXTENDED alter2; +SHOW PARTITIONS alter2; +ALTER TABLE alter2 ADD PARTITION (insertdate='2008-01-02') LOCATION '2008/01/02'; +DESCRIBE EXTENDED alter2; +SHOW PARTITIONS alter2; +DROP TABLE alter2; + +CREATE EXTERNAL TABLE alter2(a int, b int) PARTITIONED BY (insertdate string); +DESCRIBE EXTENDED alter2; +SHOW PARTITIONS alter2; +ALTER TABLE alter2 ADD PARTITION (insertdate='2008-01-01') LOCATION '2008/01/01'; +DESCRIBE EXTENDED alter2; +SHOW PARTITIONS alter2; +ALTER TABLE alter2 ADD PARTITION (insertdate='2008-01-02') LOCATION '2008/01/02'; +DESCRIBE EXTENDED alter2; +SHOW PARTITIONS alter2; + +DROP TABLE alter2; +USE default; +DROP DATABASE alter2_db; diff --git ql/src/test/queries/clientpositive/alter3.q ql/src/test/queries/clientpositive/alter3.q index d9d3b07..86fe4f6 100644 --- ql/src/test/queries/clientpositive/alter3.q +++ ql/src/test/queries/clientpositive/alter3.q @@ -19,3 +19,37 @@ select col1 from alter3_src; alter table alter3_like rename to alter3_like_renamed; describe extended alter3_like_renamed; + +-- Cleanup +DROP TABLE alter3_src; +DROP TABLE alter3_renamed; +DROP TABLE alter3_like_renamed; +SHOW TABLES; + +-- With non-default Database + +CREATE DATABASE alter3_db; +USE alter3_db; +SHOW TABLES; + +CREATE TABLE alter3_src (col1 STRING) STORED AS TEXTFILE ; +LOAD DATA LOCAL INPATH '../data/files/test.dat' OVERWRITE INTO TABLE alter3_src ; + +CREATE TABLE alter3 (col1 STRING) PARTITIONED BY (pcol1 STRING, pcol2 STRING) STORED AS SEQUENCEFILE; + +CREATE TABLE alter3_like LIKE alter3; + +INSERT OVERWRITE TABLE alter3 PARTITION (pCol1='test_part', pcol2='test_part') SELECT col1 FROM alter3_src ; +SELECT * FROM alter3 WHERE pcol1='test_part' AND pcol2='test_part'; + +ALTER TABLE alter3 RENAME TO alter3_renamed; +DESCRIBE EXTENDED alter3_renamed; +DESCRIBE EXTENDED alter3_renamed PARTITION (pCol1='test_part', pcol2='test_part'); +SELECT * FROM alter3_renamed WHERE pcol1='test_part' AND pcol2='test_part'; + +INSERT OVERWRITE TABLE alter3_like +PARTITION (pCol1='test_part', pcol2='test_part') +SELECT col1 FROM alter3_src; +ALTER TABLE alter3_like RENAME TO alter3_like_renamed; + +DESCRIBE EXTENDED alter3_like_renamed; diff --git ql/src/test/queries/clientpositive/alter4.q ql/src/test/queries/clientpositive/alter4.q index 35fa441..542ff01 100644 --- ql/src/test/queries/clientpositive/alter4.q +++ ql/src/test/queries/clientpositive/alter4.q @@ -3,3 +3,24 @@ DESCRIBE EXTENDED set_bucketing_test; ALTER TABLE set_bucketing_test NOT CLUSTERED; DESCRIBE EXTENDED set_bucketing_test; + +-- Cleanup +DROP TABLE set_bucketing_test; +SHOW TABLES; + +-- with non-default Database + +CREATE DATABASE alter4_db; +USE alter4_db; +SHOW TABLES; + +CREATE TABLE set_bucketing_test (key INT, value STRING) CLUSTERED BY (key) INTO 10 BUCKETS; +DESCRIBE EXTENDED set_bucketing_test; + +ALTER TABLE set_bucketing_test NOT CLUSTERED; +DESCRIBE EXTENDED set_bucketing_test; + +DROP TABLE set_bucketing_test; +USE default; +DROP DATABASE alter4_db; +SHOW DATABASES; diff --git ql/src/test/queries/clientpositive/database.q ql/src/test/queries/clientpositive/database.q new file mode 100644 index 0000000..0e2fe97 --- /dev/null +++ ql/src/test/queries/clientpositive/database.q @@ -0,0 +1,88 @@ +SHOW DATABASES; + +-- CREATE with comment +CREATE DATABASE test_db COMMENT 'Hive test database'; +SHOW DATABASES; + +-- CREATE INE already exists +CREATE DATABASE IF NOT EXISTS test_db; +SHOW DATABASES; + +-- DROP +DROP DATABASE test_db; +SHOW DATABASES; + +-- CREATE INE doesn't exist +CREATE DATABASE IF NOT EXISTS test_db COMMENT 'Hive test database'; +SHOW DATABASES; + +-- DROP IE exists +DROP DATABASE IF EXISTS test_db; +SHOW DATABASES; + +-- DROP IE doesn't exist +DROP DATABASE IF EXISTS test_db; + +-- SHOW +CREATE DATABASE test_db; +SHOW DATABASES; + +-- SHOW pattern +SHOW DATABASES LIKE 'test*'; + +-- SHOW pattern +SHOW DATABASES LIKE '*ef*'; + + +USE test_db; +SHOW DATABASES; + +-- CREATE table in non-default DB +CREATE TABLE test_table (col1 STRING) STORED AS TEXTFILE; +SHOW TABLES; + +-- DESCRIBE table in non-default DB +DESCRIBE test_table; + +-- DESCRIBE EXTENDED in non-default DB +DESCRIBE EXTENDED test_table; + +-- CREATE LIKE in non-default DB +CREATE TABLE test_table_like LIKE test_table; +SHOW TABLES; +DESCRIBE EXTENDED test_table_like; + +-- LOAD and SELECT +LOAD DATA LOCAL INPATH '../data/files/test.dat' OVERWRITE INTO TABLE test_table ; +SELECT * FROM test_table; + +-- DROP and CREATE w/o LOAD +DROP TABLE test_table; +SHOW TABLES; + +CREATE TABLE test_table (col1 STRING) STORED AS TEXTFILE; +SHOW TABLES; + +SELECT * FROM test_table; + +-- CREATE table that exists in DEFAULT +USE test_db; +CREATE TABLE src (col1 STRING) STORED AS TEXTFILE; +SHOW TABLES; + +SELECT * FROM src LIMIT 10; + +USE default; +SELECT * FROM src LIMIT 10; + +-- DROP DATABASE +USE test_db; + +DROP TABLE src; +DROP TABLE test_table; +DROP TABLE test_table_like; +SHOW TABLES; + +USE default; +DROP DATABASE test_db; +SHOW DATABASES; diff --git ql/src/test/queries/clientpositive/rename_column.q ql/src/test/queries/clientpositive/rename_column.q index ce82ff7..a3f3f30 100644 --- ql/src/test/queries/clientpositive/rename_column.q +++ ql/src/test/queries/clientpositive/rename_column.q @@ -22,5 +22,36 @@ DESCRIBE kv_rename_test; ALTER TABLE kv_rename_test CHANGE COLUMN a2 a INT AFTER b; DESCRIBE kv_rename_test; +DROP TABLE kv_rename_test; +SHOW TABLES; +-- Using non-default Database +CREATE DATABASE kv_rename_test_db; +USE kv_rename_test_db; +CREATE TABLE kv_rename_test(a int, b int, c int); +DESCRIBE kv_rename_test; + +ALTER TABLE kv_rename_test CHANGE a a STRING; +DESCRIBE kv_rename_test; + +ALTER TABLE kv_rename_test CHANGE a a1 INT; +DESCRIBE kv_rename_test; + +ALTER TABLE kv_rename_test CHANGE a1 a2 INT FIRST; +DESCRIBE kv_rename_test; + +ALTER TABLE kv_rename_test CHANGE a2 a INT AFTER b; +DESCRIBE kv_rename_test; + +ALTER TABLE kv_rename_test CHANGE a a1 INT COMMENT 'test comment1'; +DESCRIBE kv_rename_test; + +ALTER TABLE kv_rename_test CHANGE a1 a2 INT COMMENT 'test comment2' FIRST; +DESCRIBE kv_rename_test; + +ALTER TABLE kv_rename_test CHANGE COLUMN a2 a INT AFTER b; +DESCRIBE kv_rename_test; + +DROP TABLE kv_rename_test; +SHOW TABLES; diff --git ql/src/test/results/clientnegative/database_create_already_exists.q.out ql/src/test/results/clientnegative/database_create_already_exists.q.out new file mode 100644 index 0000000..08c04f9 --- /dev/null +++ ql/src/test/results/clientnegative/database_create_already_exists.q.out @@ -0,0 +1,15 @@ +PREHOOK: query: SHOW DATABASES +PREHOOK: type: SHOWDATABASES +POSTHOOK: query: SHOW DATABASES +POSTHOOK: type: SHOWDATABASES +default +PREHOOK: query: -- Try to create a database that already exists +CREATE DATABASE test_db +PREHOOK: type: CREATEDATABASE +POSTHOOK: query: -- Try to create a database that already exists +CREATE DATABASE test_db +POSTHOOK: type: CREATEDATABASE +PREHOOK: query: CREATE DATABASE test_db +PREHOOK: type: CREATEDATABASE +Failed with exception Database test_db already exists +FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.exec.DDLTask diff --git ql/src/test/results/clientnegative/database_create_invalid_name.q.out ql/src/test/results/clientnegative/database_create_invalid_name.q.out new file mode 100644 index 0000000..a526c11 --- /dev/null +++ ql/src/test/results/clientnegative/database_create_invalid_name.q.out @@ -0,0 +1,10 @@ +PREHOOK: query: SHOW DATABASES +PREHOOK: type: SHOWDATABASES +POSTHOOK: query: SHOW DATABASES +POSTHOOK: type: SHOWDATABASES +default +PREHOOK: query: -- Try to create a database with an invalid name +CREATE DATABASE `test-db` +PREHOOK: type: CREATEDATABASE +FAILED: Error in metadata: InvalidObjectException(message:test-db is not a valid database name) +FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.exec.DDLTask diff --git ql/src/test/results/clientnegative/database_drop_does_not_exist.q.out ql/src/test/results/clientnegative/database_drop_does_not_exist.q.out new file mode 100644 index 0000000..80c00cd --- /dev/null +++ ql/src/test/results/clientnegative/database_drop_does_not_exist.q.out @@ -0,0 +1,10 @@ +PREHOOK: query: SHOW DATABASES +PREHOOK: type: SHOWDATABASES +POSTHOOK: query: SHOW DATABASES +POSTHOOK: type: SHOWDATABASES +default +PREHOOK: query: -- Try to drop a database that does not exist +DROP DATABASE does_not_exist +PREHOOK: type: DROPDATABASE +Failed with exception There is no database named does_not_exist +FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.exec.DDLTask diff --git ql/src/test/results/clientnegative/database_drop_not_empty.q.out ql/src/test/results/clientnegative/database_drop_not_empty.q.out new file mode 100644 index 0000000..baa8f37 --- /dev/null +++ ql/src/test/results/clientnegative/database_drop_not_empty.q.out @@ -0,0 +1,28 @@ +PREHOOK: query: SHOW DATABASES +PREHOOK: type: SHOWDATABASES +POSTHOOK: query: SHOW DATABASES +POSTHOOK: type: SHOWDATABASES +default +PREHOOK: query: -- Try to drop a non-empty database +CREATE DATABASE test_db +PREHOOK: type: CREATEDATABASE +POSTHOOK: query: -- Try to drop a non-empty database +CREATE DATABASE test_db +POSTHOOK: type: CREATEDATABASE +PREHOOK: query: USE test_db +PREHOOK: type: SWITCHDATABASE +POSTHOOK: query: USE test_db +POSTHOOK: type: SWITCHDATABASE +PREHOOK: query: CREATE TABLE t(a INT) +PREHOOK: type: CREATETABLE +POSTHOOK: query: CREATE TABLE t(a INT) +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: test_db@t +PREHOOK: query: USE default +PREHOOK: type: SWITCHDATABASE +POSTHOOK: query: USE default +POSTHOOK: type: SWITCHDATABASE +PREHOOK: query: DROP DATABASE test_db +PREHOOK: type: DROPDATABASE +FAILED: Error in metadata: InvalidOperationException(message:Database test_db is not empty) +FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.exec.DDLTask diff --git ql/src/test/results/clientnegative/database_switch_does_not_exist.q.out ql/src/test/results/clientnegative/database_switch_does_not_exist.q.out new file mode 100644 index 0000000..8b5674d --- /dev/null +++ ql/src/test/results/clientnegative/database_switch_does_not_exist.q.out @@ -0,0 +1,10 @@ +PREHOOK: query: SHOW DATABASES +PREHOOK: type: SHOWDATABASES +POSTHOOK: query: SHOW DATABASES +POSTHOOK: type: SHOWDATABASES +default +PREHOOK: query: -- Try to switch to a database that does not exist +USE does_not_exist +PREHOOK: type: SWITCHDATABASE +FAILED: Error in metadata: ERROR: The database does_not_exist does not exist. +FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.exec.DDLTask diff --git ql/src/test/results/clientpositive/add_part_exist.q.out ql/src/test/results/clientpositive/add_part_exist.q.out index 4bcdf8a..f875357 100644 --- ql/src/test/results/clientpositive/add_part_exist.q.out +++ ql/src/test/results/clientpositive/add_part_exist.q.out @@ -49,3 +49,85 @@ POSTHOOK: type: SHOWPARTITIONS ds=2010-01-01 ds=2010-01-02 ds=2010-01-03 +PREHOOK: query: DROP TABLE add_part_test +PREHOOK: type: DROPTABLE +POSTHOOK: query: DROP TABLE add_part_test +POSTHOOK: type: DROPTABLE +POSTHOOK: Output: default@add_part_test +PREHOOK: query: SHOW TABLES +PREHOOK: type: SHOWTABLES +POSTHOOK: query: SHOW TABLES +POSTHOOK: type: SHOWTABLES +src +src1 +src_json +src_sequencefile +src_thrift +srcbucket +srcbucket2 +srcpart +PREHOOK: query: -- Test ALTER TABLE ADD PARTITION in non-default Database +CREATE DATABASE add_part_test_db +PREHOOK: type: CREATEDATABASE +POSTHOOK: query: -- Test ALTER TABLE ADD PARTITION in non-default Database +CREATE DATABASE add_part_test_db +POSTHOOK: type: CREATEDATABASE +PREHOOK: query: USE add_part_test_db +PREHOOK: type: SWITCHDATABASE +POSTHOOK: query: USE add_part_test_db +POSTHOOK: type: SWITCHDATABASE +PREHOOK: query: SHOW TABLES +PREHOOK: type: SHOWTABLES +POSTHOOK: query: SHOW TABLES +POSTHOOK: type: SHOWTABLES +PREHOOK: query: CREATE TABLE add_part_test (key STRING, value STRING) PARTITIONED BY (ds STRING) +PREHOOK: type: CREATETABLE +POSTHOOK: query: CREATE TABLE add_part_test (key STRING, value STRING) PARTITIONED BY (ds STRING) +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: add_part_test_db@add_part_test +PREHOOK: query: SHOW PARTITIONS add_part_test +PREHOOK: type: SHOWPARTITIONS +POSTHOOK: query: SHOW PARTITIONS add_part_test +POSTHOOK: type: SHOWPARTITIONS +PREHOOK: query: ALTER TABLE add_part_test ADD PARTITION (ds='2010-01-01') +PREHOOK: type: ALTERTABLE_ADDPARTS +POSTHOOK: query: ALTER TABLE add_part_test ADD PARTITION (ds='2010-01-01') +POSTHOOK: type: ALTERTABLE_ADDPARTS +POSTHOOK: Output: add_part_test_db@add_part_test@ds=2010-01-01 +PREHOOK: query: SHOW PARTITIONS add_part_test +PREHOOK: type: SHOWPARTITIONS +POSTHOOK: query: SHOW PARTITIONS add_part_test +POSTHOOK: type: SHOWPARTITIONS +ds=2010-01-01 +PREHOOK: query: ALTER TABLE add_part_test ADD IF NOT EXISTS PARTITION (ds='2010-01-01') +PREHOOK: type: ALTERTABLE_ADDPARTS +POSTHOOK: query: ALTER TABLE add_part_test ADD IF NOT EXISTS PARTITION (ds='2010-01-01') +POSTHOOK: type: ALTERTABLE_ADDPARTS +PREHOOK: query: SHOW PARTITIONS add_part_test +PREHOOK: type: SHOWPARTITIONS +POSTHOOK: query: SHOW PARTITIONS add_part_test +POSTHOOK: type: SHOWPARTITIONS +ds=2010-01-01 +PREHOOK: query: ALTER TABLE add_part_test ADD IF NOT EXISTS PARTITION (ds='2010-01-02') +PREHOOK: type: ALTERTABLE_ADDPARTS +POSTHOOK: query: ALTER TABLE add_part_test ADD IF NOT EXISTS PARTITION (ds='2010-01-02') +POSTHOOK: type: ALTERTABLE_ADDPARTS +POSTHOOK: Output: add_part_test_db@add_part_test@ds=2010-01-02 +PREHOOK: query: SHOW PARTITIONS add_part_test +PREHOOK: type: SHOWPARTITIONS +POSTHOOK: query: SHOW PARTITIONS add_part_test +POSTHOOK: type: SHOWPARTITIONS +ds=2010-01-01 +ds=2010-01-02 +PREHOOK: query: ALTER TABLE add_part_test ADD IF NOT EXISTS PARTITION (ds='2010-01-01') PARTITION (ds='2010-01-02') PARTITION (ds='2010-01-03') +PREHOOK: type: ALTERTABLE_ADDPARTS +POSTHOOK: query: ALTER TABLE add_part_test ADD IF NOT EXISTS PARTITION (ds='2010-01-01') PARTITION (ds='2010-01-02') PARTITION (ds='2010-01-03') +POSTHOOK: type: ALTERTABLE_ADDPARTS +POSTHOOK: Output: add_part_test_db@add_part_test@ds=2010-01-03 +PREHOOK: query: SHOW PARTITIONS add_part_test +PREHOOK: type: SHOWPARTITIONS +POSTHOOK: query: SHOW PARTITIONS add_part_test +POSTHOOK: type: SHOWPARTITIONS +ds=2010-01-01 +ds=2010-01-02 +ds=2010-01-03 diff --git ql/src/test/results/clientpositive/alter1.q.out ql/src/test/results/clientpositive/alter1.q.out index ea143a6..ed4e5f1 100644 --- ql/src/test/results/clientpositive/alter1.q.out +++ ql/src/test/results/clientpositive/alter1.q.out @@ -10,7 +10,7 @@ POSTHOOK: type: DESCTABLE a int b int -Detailed Table Information Table(tableName:alter1, dbName:default, owner:jssarma, createTime:1279735687, lastAccessTime:0, retention:0, sd:StorageDescriptor(cols:[FieldSchema(name:a, type:int, comment:null), FieldSchema(name:b, type:int, comment:null)], location:file:/mnt/vol/devrs004.snc1/jssarma/projects/hive_trunk/build/ql/test/data/warehouse/alter1, inputFormat:org.apache.hadoop.mapred.TextInputFormat, outputFormat:org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat, compressed:false, numBuckets:-1, serdeInfo:SerDeInfo(name:null, serializationLib:org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe, parameters:{serialization.format=1}), bucketCols:[], sortCols:[], parameters:{}), partitionKeys:[], parameters:{transient_lastDdlTime=1279735687}, viewOriginalText:null, viewExpandedText:null, tableType:MANAGED_TABLE) +Detailed Table Information Table(tableName:alter1, dbName:default, owner:carl, createTime:1281930885, lastAccessTime:0, retention:0, sd:StorageDescriptor(cols:[FieldSchema(name:a, type:int, comment:null), FieldSchema(name:b, type:int, comment:null)], location:pfile:/Users/carl/Projects/hive/build/ql/test/data/warehouse/alter1, inputFormat:org.apache.hadoop.mapred.TextInputFormat, outputFormat:org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat, compressed:false, numBuckets:-1, serdeInfo:SerDeInfo(name:null, serializationLib:org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe, parameters:{serialization.format=1}), bucketCols:[], sortCols:[], parameters:{}), partitionKeys:[], parameters:{transient_lastDdlTime=1281930885}, viewOriginalText:null, viewExpandedText:null, tableType:MANAGED_TABLE) PREHOOK: query: alter table alter1 set tblproperties ('a'='1', 'c'='3') PREHOOK: type: ALTERTABLE_PROPERTIES POSTHOOK: query: alter table alter1 set tblproperties ('a'='1', 'c'='3') @@ -24,7 +24,7 @@ POSTHOOK: type: DESCTABLE a int b int -Detailed Table Information Table(tableName:alter1, dbName:default, owner:jssarma, createTime:1279735687, lastAccessTime:0, retention:0, sd:StorageDescriptor(cols:[FieldSchema(name:a, type:int, comment:null), FieldSchema(name:b, type:int, comment:null)], location:file:/mnt/vol/devrs004.snc1/jssarma/projects/hive_trunk/build/ql/test/data/warehouse/alter1, inputFormat:org.apache.hadoop.mapred.TextInputFormat, outputFormat:org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat, compressed:false, numBuckets:-1, serdeInfo:SerDeInfo(name:null, serializationLib:org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe, parameters:{serialization.format=1}), bucketCols:[], sortCols:[], parameters:{}), partitionKeys:[], parameters:{last_modified_by=jssarma, c=3, last_modified_time=1279735688, a=1, transient_lastDdlTime=1279735688}, viewOriginalText:null, viewExpandedText:null, tableType:MANAGED_TABLE) +Detailed Table Information Table(tableName:alter1, dbName:default, owner:carl, createTime:1281930885, lastAccessTime:0, retention:0, sd:StorageDescriptor(cols:[FieldSchema(name:a, type:int, comment:null), FieldSchema(name:b, type:int, comment:null)], location:pfile:/Users/carl/Projects/hive/build/ql/test/data/warehouse/alter1, inputFormat:org.apache.hadoop.mapred.TextInputFormat, outputFormat:org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat, compressed:false, numBuckets:-1, serdeInfo:SerDeInfo(name:null, serializationLib:org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe, parameters:{serialization.format=1}), bucketCols:[], sortCols:[], parameters:{}), partitionKeys:[], parameters:{last_modified_by=carl, c=3, last_modified_time=1281930885, a=1, transient_lastDdlTime=1281930885}, viewOriginalText:null, viewExpandedText:null, tableType:MANAGED_TABLE) PREHOOK: query: alter table alter1 set tblproperties ('a'='1', 'c'='4', 'd'='3') PREHOOK: type: ALTERTABLE_PROPERTIES POSTHOOK: query: alter table alter1 set tblproperties ('a'='1', 'c'='4', 'd'='3') @@ -38,7 +38,7 @@ POSTHOOK: type: DESCTABLE a int b int -Detailed Table Information Table(tableName:alter1, dbName:default, owner:jssarma, createTime:1279735687, lastAccessTime:0, retention:0, sd:StorageDescriptor(cols:[FieldSchema(name:a, type:int, comment:null), FieldSchema(name:b, type:int, comment:null)], location:file:/mnt/vol/devrs004.snc1/jssarma/projects/hive_trunk/build/ql/test/data/warehouse/alter1, inputFormat:org.apache.hadoop.mapred.TextInputFormat, outputFormat:org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat, compressed:false, numBuckets:-1, serdeInfo:SerDeInfo(name:null, serializationLib:org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe, parameters:{serialization.format=1}), bucketCols:[], sortCols:[], parameters:{}), partitionKeys:[], parameters:{d=3, last_modified_by=jssarma, c=4, last_modified_time=1279735688, a=1, transient_lastDdlTime=1279735688}, viewOriginalText:null, viewExpandedText:null, tableType:MANAGED_TABLE) +Detailed Table Information Table(tableName:alter1, dbName:default, owner:carl, createTime:1281930885, lastAccessTime:0, retention:0, sd:StorageDescriptor(cols:[FieldSchema(name:a, type:int, comment:null), FieldSchema(name:b, type:int, comment:null)], location:pfile:/Users/carl/Projects/hive/build/ql/test/data/warehouse/alter1, inputFormat:org.apache.hadoop.mapred.TextInputFormat, outputFormat:org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat, compressed:false, numBuckets:-1, serdeInfo:SerDeInfo(name:null, serializationLib:org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe, parameters:{serialization.format=1}), bucketCols:[], sortCols:[], parameters:{}), partitionKeys:[], parameters:{d=3, last_modified_by=carl, c=4, last_modified_time=1281930885, a=1, transient_lastDdlTime=1281930885}, viewOriginalText:null, viewExpandedText:null, tableType:MANAGED_TABLE) PREHOOK: query: alter table alter1 set tblproperties ('EXTERNAL'='TRUE') PREHOOK: type: ALTERTABLE_PROPERTIES POSTHOOK: query: alter table alter1 set tblproperties ('EXTERNAL'='TRUE') @@ -52,7 +52,7 @@ POSTHOOK: type: DESCTABLE a int b int -Detailed Table Information Table(tableName:alter1, dbName:default, owner:jssarma, createTime:1279735687, lastAccessTime:0, retention:0, sd:StorageDescriptor(cols:[FieldSchema(name:a, type:int, comment:null), FieldSchema(name:b, type:int, comment:null)], location:file:/mnt/vol/devrs004.snc1/jssarma/projects/hive_trunk/build/ql/test/data/warehouse/alter1, inputFormat:org.apache.hadoop.mapred.TextInputFormat, outputFormat:org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat, compressed:false, numBuckets:-1, serdeInfo:SerDeInfo(name:null, serializationLib:org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe, parameters:{serialization.format=1}), bucketCols:[], sortCols:[], parameters:{}), partitionKeys:[], parameters:{EXTERNAL=TRUE, d=3, last_modified_by=jssarma, c=4, last_modified_time=1279735688, a=1, transient_lastDdlTime=1279735688}, viewOriginalText:null, viewExpandedText:null, tableType:EXTERNAL_TABLE) +Detailed Table Information Table(tableName:alter1, dbName:default, owner:carl, createTime:1281930885, lastAccessTime:0, retention:0, sd:StorageDescriptor(cols:[FieldSchema(name:a, type:int, comment:null), FieldSchema(name:b, type:int, comment:null)], location:pfile:/Users/carl/Projects/hive/build/ql/test/data/warehouse/alter1, inputFormat:org.apache.hadoop.mapred.TextInputFormat, outputFormat:org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat, compressed:false, numBuckets:-1, serdeInfo:SerDeInfo(name:null, serializationLib:org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe, parameters:{serialization.format=1}), bucketCols:[], sortCols:[], parameters:{}), partitionKeys:[], parameters:{EXTERNAL=TRUE, d=3, last_modified_by=carl, c=4, last_modified_time=1281930885, a=1, transient_lastDdlTime=1281930885}, viewOriginalText:null, viewExpandedText:null, tableType:EXTERNAL_TABLE) PREHOOK: query: alter table alter1 set tblproperties ('EXTERNAL'='FALSE') PREHOOK: type: ALTERTABLE_PROPERTIES POSTHOOK: query: alter table alter1 set tblproperties ('EXTERNAL'='FALSE') @@ -66,7 +66,7 @@ POSTHOOK: type: DESCTABLE a int b int -Detailed Table Information Table(tableName:alter1, dbName:default, owner:jssarma, createTime:1279735687, lastAccessTime:0, retention:0, sd:StorageDescriptor(cols:[FieldSchema(name:a, type:int, comment:null), FieldSchema(name:b, type:int, comment:null)], location:file:/mnt/vol/devrs004.snc1/jssarma/projects/hive_trunk/build/ql/test/data/warehouse/alter1, inputFormat:org.apache.hadoop.mapred.TextInputFormat, outputFormat:org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat, compressed:false, numBuckets:-1, serdeInfo:SerDeInfo(name:null, serializationLib:org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe, parameters:{serialization.format=1}), bucketCols:[], sortCols:[], parameters:{}), partitionKeys:[], parameters:{EXTERNAL=FALSE, d=3, last_modified_by=jssarma, c=4, last_modified_time=1279735688, a=1, transient_lastDdlTime=1279735688}, viewOriginalText:null, viewExpandedText:null, tableType:MANAGED_TABLE) +Detailed Table Information Table(tableName:alter1, dbName:default, owner:carl, createTime:1281930885, lastAccessTime:0, retention:0, sd:StorageDescriptor(cols:[FieldSchema(name:a, type:int, comment:null), FieldSchema(name:b, type:int, comment:null)], location:pfile:/Users/carl/Projects/hive/build/ql/test/data/warehouse/alter1, inputFormat:org.apache.hadoop.mapred.TextInputFormat, outputFormat:org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat, compressed:false, numBuckets:-1, serdeInfo:SerDeInfo(name:null, serializationLib:org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe, parameters:{serialization.format=1}), bucketCols:[], sortCols:[], parameters:{}), partitionKeys:[], parameters:{EXTERNAL=FALSE, d=3, last_modified_by=carl, c=4, last_modified_time=1281930886, a=1, transient_lastDdlTime=1281930886}, viewOriginalText:null, viewExpandedText:null, tableType:MANAGED_TABLE) PREHOOK: query: alter table alter1 set serdeproperties('s1'='9') PREHOOK: type: ALTERTABLE_SERDEPROPERTIES POSTHOOK: query: alter table alter1 set serdeproperties('s1'='9') @@ -80,7 +80,7 @@ POSTHOOK: type: DESCTABLE a int b int -Detailed Table Information Table(tableName:alter1, dbName:default, owner:jssarma, createTime:1279735687, lastAccessTime:0, retention:0, sd:StorageDescriptor(cols:[FieldSchema(name:a, type:int, comment:null), FieldSchema(name:b, type:int, comment:null)], location:file:/mnt/vol/devrs004.snc1/jssarma/projects/hive_trunk/build/ql/test/data/warehouse/alter1, inputFormat:org.apache.hadoop.mapred.TextInputFormat, outputFormat:org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat, compressed:false, numBuckets:-1, serdeInfo:SerDeInfo(name:null, serializationLib:org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe, parameters:{s1=9, serialization.format=1}), bucketCols:[], sortCols:[], parameters:{}), partitionKeys:[], parameters:{EXTERNAL=FALSE, d=3, last_modified_by=jssarma, c=4, last_modified_time=1279735688, a=1, transient_lastDdlTime=1279735688}, viewOriginalText:null, viewExpandedText:null, tableType:MANAGED_TABLE) +Detailed Table Information Table(tableName:alter1, dbName:default, owner:carl, createTime:1281930885, lastAccessTime:0, retention:0, sd:StorageDescriptor(cols:[FieldSchema(name:a, type:int, comment:null), FieldSchema(name:b, type:int, comment:null)], location:pfile:/Users/carl/Projects/hive/build/ql/test/data/warehouse/alter1, inputFormat:org.apache.hadoop.mapred.TextInputFormat, outputFormat:org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat, compressed:false, numBuckets:-1, serdeInfo:SerDeInfo(name:null, serializationLib:org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe, parameters:{s1=9, serialization.format=1}), bucketCols:[], sortCols:[], parameters:{}), partitionKeys:[], parameters:{EXTERNAL=FALSE, d=3, last_modified_by=carl, c=4, last_modified_time=1281930886, a=1, transient_lastDdlTime=1281930886}, viewOriginalText:null, viewExpandedText:null, tableType:MANAGED_TABLE) PREHOOK: query: alter table alter1 set serdeproperties('s1'='10', 's2' ='20') PREHOOK: type: ALTERTABLE_SERDEPROPERTIES POSTHOOK: query: alter table alter1 set serdeproperties('s1'='10', 's2' ='20') @@ -94,7 +94,7 @@ POSTHOOK: type: DESCTABLE a int b int -Detailed Table Information Table(tableName:alter1, dbName:default, owner:jssarma, createTime:1279735687, lastAccessTime:0, retention:0, sd:StorageDescriptor(cols:[FieldSchema(name:a, type:int, comment:null), FieldSchema(name:b, type:int, comment:null)], location:file:/mnt/vol/devrs004.snc1/jssarma/projects/hive_trunk/build/ql/test/data/warehouse/alter1, inputFormat:org.apache.hadoop.mapred.TextInputFormat, outputFormat:org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat, compressed:false, numBuckets:-1, serdeInfo:SerDeInfo(name:null, serializationLib:org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe, parameters:{s2=20, s1=10, serialization.format=1}), bucketCols:[], sortCols:[], parameters:{}), partitionKeys:[], parameters:{EXTERNAL=FALSE, d=3, last_modified_by=jssarma, c=4, last_modified_time=1279735689, a=1, transient_lastDdlTime=1279735689}, viewOriginalText:null, viewExpandedText:null, tableType:MANAGED_TABLE) +Detailed Table Information Table(tableName:alter1, dbName:default, owner:carl, createTime:1281930885, lastAccessTime:0, retention:0, sd:StorageDescriptor(cols:[FieldSchema(name:a, type:int, comment:null), FieldSchema(name:b, type:int, comment:null)], location:pfile:/Users/carl/Projects/hive/build/ql/test/data/warehouse/alter1, inputFormat:org.apache.hadoop.mapred.TextInputFormat, outputFormat:org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat, compressed:false, numBuckets:-1, serdeInfo:SerDeInfo(name:null, serializationLib:org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe, parameters:{s2=20, s1=10, serialization.format=1}), bucketCols:[], sortCols:[], parameters:{}), partitionKeys:[], parameters:{EXTERNAL=FALSE, d=3, last_modified_by=carl, c=4, last_modified_time=1281930886, a=1, transient_lastDdlTime=1281930886}, viewOriginalText:null, viewExpandedText:null, tableType:MANAGED_TABLE) PREHOOK: query: alter table alter1 set serde 'org.apache.hadoop.hive.serde2.TestSerDe' with serdeproperties('s1'='9') PREHOOK: type: ALTERTABLE_SERIALIZER POSTHOOK: query: alter table alter1 set serde 'org.apache.hadoop.hive.serde2.TestSerDe' with serdeproperties('s1'='9') @@ -108,7 +108,7 @@ POSTHOOK: type: DESCTABLE a string from deserializer b string from deserializer -Detailed Table Information Table(tableName:alter1, dbName:default, owner:jssarma, createTime:1279735687, lastAccessTime:0, retention:0, sd:StorageDescriptor(cols:[FieldSchema(name:a, type:int, comment:from deserializer), FieldSchema(name:b, type:int, comment:from deserializer)], location:file:/mnt/vol/devrs004.snc1/jssarma/projects/hive_trunk/build/ql/test/data/warehouse/alter1, inputFormat:org.apache.hadoop.mapred.TextInputFormat, outputFormat:org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat, compressed:false, numBuckets:-1, serdeInfo:SerDeInfo(name:null, serializationLib:org.apache.hadoop.hive.serde2.TestSerDe, parameters:{s2=20, s1=9, serialization.format=1}), bucketCols:[], sortCols:[], parameters:{}), partitionKeys:[], parameters:{EXTERNAL=FALSE, d=3, last_modified_by=jssarma, c=4, last_modified_time=1279735689, a=1, transient_lastDdlTime=1279735689}, viewOriginalText:null, viewExpandedText:null, tableType:MANAGED_TABLE) +Detailed Table Information Table(tableName:alter1, dbName:default, owner:carl, createTime:1281930885, lastAccessTime:0, retention:0, sd:StorageDescriptor(cols:[FieldSchema(name:a, type:int, comment:from deserializer), FieldSchema(name:b, type:int, comment:from deserializer)], location:pfile:/Users/carl/Projects/hive/build/ql/test/data/warehouse/alter1, inputFormat:org.apache.hadoop.mapred.TextInputFormat, outputFormat:org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat, compressed:false, numBuckets:-1, serdeInfo:SerDeInfo(name:null, serializationLib:org.apache.hadoop.hive.serde2.TestSerDe, parameters:{s2=20, s1=9, serialization.format=1}), bucketCols:[], sortCols:[], parameters:{}), partitionKeys:[], parameters:{EXTERNAL=FALSE, d=3, last_modified_by=carl, c=4, last_modified_time=1281930886, a=1, transient_lastDdlTime=1281930886}, viewOriginalText:null, viewExpandedText:null, tableType:MANAGED_TABLE) PREHOOK: query: alter table alter1 set serde 'org.apache.hadoop.hive.serde2.MetadataTypedColumnsetSerDe' PREHOOK: type: ALTERTABLE_SERIALIZER POSTHOOK: query: alter table alter1 set serde 'org.apache.hadoop.hive.serde2.MetadataTypedColumnsetSerDe' @@ -122,7 +122,7 @@ POSTHOOK: type: DESCTABLE a string from deserializer b string from deserializer -Detailed Table Information Table(tableName:alter1, dbName:default, owner:jssarma, createTime:1279735687, lastAccessTime:0, retention:0, sd:StorageDescriptor(cols:[FieldSchema(name:a, type:string, comment:from deserializer), FieldSchema(name:b, type:string, comment:from deserializer)], location:file:/mnt/vol/devrs004.snc1/jssarma/projects/hive_trunk/build/ql/test/data/warehouse/alter1, inputFormat:org.apache.hadoop.mapred.TextInputFormat, outputFormat:org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat, compressed:false, numBuckets:-1, serdeInfo:SerDeInfo(name:null, serializationLib:org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe, parameters:{s2=20, s1=9, serialization.format=1}), bucketCols:[], sortCols:[], parameters:{}), partitionKeys:[], parameters:{EXTERNAL=FALSE, d=3, last_modified_by=jssarma, c=4, last_modified_time=1279735689, a=1, transient_lastDdlTime=1279735689}, viewOriginalText:null, viewExpandedText:null, tableType:MANAGED_TABLE) +Detailed Table Information Table(tableName:alter1, dbName:default, owner:carl, createTime:1281930885, lastAccessTime:0, retention:0, sd:StorageDescriptor(cols:[FieldSchema(name:a, type:string, comment:from deserializer), FieldSchema(name:b, type:string, comment:from deserializer)], location:pfile:/Users/carl/Projects/hive/build/ql/test/data/warehouse/alter1, inputFormat:org.apache.hadoop.mapred.TextInputFormat, outputFormat:org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat, compressed:false, numBuckets:-1, serdeInfo:SerDeInfo(name:null, serializationLib:org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe, parameters:{s2=20, s1=9, serialization.format=1}), bucketCols:[], sortCols:[], parameters:{}), partitionKeys:[], parameters:{EXTERNAL=FALSE, d=3, last_modified_by=carl, c=4, last_modified_time=1281930886, a=1, transient_lastDdlTime=1281930886}, viewOriginalText:null, viewExpandedText:null, tableType:MANAGED_TABLE) PREHOOK: query: alter table alter1 replace columns (a int, b int, c string) PREHOOK: type: ALTERTABLE_REPLACECOLS POSTHOOK: query: alter table alter1 replace columns (a int, b int, c string) @@ -136,3 +136,189 @@ POSTHOOK: type: DESCTABLE a int b int c string +PREHOOK: query: -- Cleanup +DROP TABLE alter1 +PREHOOK: type: DROPTABLE +POSTHOOK: query: -- Cleanup +DROP TABLE alter1 +POSTHOOK: type: DROPTABLE +POSTHOOK: Output: default@alter1 +PREHOOK: query: SHOW TABLES +PREHOOK: type: SHOWTABLES +POSTHOOK: query: SHOW TABLES +POSTHOOK: type: SHOWTABLES +src +src1 +src_json +src_sequencefile +src_thrift +srcbucket +srcbucket2 +srcpart +PREHOOK: query: -- With non-default Database + +CREATE DATABASE alter1_db +PREHOOK: type: CREATEDATABASE +POSTHOOK: query: -- With non-default Database + +CREATE DATABASE alter1_db +POSTHOOK: type: CREATEDATABASE +PREHOOK: query: USE alter1_db +PREHOOK: type: SWITCHDATABASE +POSTHOOK: query: USE alter1_db +POSTHOOK: type: SWITCHDATABASE +PREHOOK: query: SHOW TABLES +PREHOOK: type: SHOWTABLES +POSTHOOK: query: SHOW TABLES +POSTHOOK: type: SHOWTABLES +PREHOOK: query: CREATE TABLE alter1(a INT, b INT) +PREHOOK: type: CREATETABLE +POSTHOOK: query: CREATE TABLE alter1(a INT, b INT) +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: alter1_db@alter1 +PREHOOK: query: DESCRIBE EXTENDED alter1 +PREHOOK: type: DESCTABLE +POSTHOOK: query: DESCRIBE EXTENDED alter1 +POSTHOOK: type: DESCTABLE +a int +b int + +Detailed Table Information Table(tableName:alter1, dbName:alter1_db, owner:carl, createTime:1281930887, lastAccessTime:0, retention:0, sd:StorageDescriptor(cols:[FieldSchema(name:a, type:int, comment:null), FieldSchema(name:b, type:int, comment:null)], location:pfile:/Users/carl/Projects/hive/build/ql/test/data/warehouse/alter1_db.db/alter1, inputFormat:org.apache.hadoop.mapred.TextInputFormat, outputFormat:org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat, compressed:false, numBuckets:-1, serdeInfo:SerDeInfo(name:null, serializationLib:org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe, parameters:{serialization.format=1}), bucketCols:[], sortCols:[], parameters:{}), partitionKeys:[], parameters:{transient_lastDdlTime=1281930887}, viewOriginalText:null, viewExpandedText:null, tableType:MANAGED_TABLE) +PREHOOK: query: ALTER TABLE alter1 SET TBLPROPERTIES ('a'='1', 'c'='3') +PREHOOK: type: ALTERTABLE_PROPERTIES +POSTHOOK: query: ALTER TABLE alter1 SET TBLPROPERTIES ('a'='1', 'c'='3') +POSTHOOK: type: ALTERTABLE_PROPERTIES +POSTHOOK: Input: alter1_db@alter1 +POSTHOOK: Output: alter1_db@alter1 +PREHOOK: query: DESCRIBE EXTENDED alter1 +PREHOOK: type: DESCTABLE +POSTHOOK: query: DESCRIBE EXTENDED alter1 +POSTHOOK: type: DESCTABLE +a int +b int + +Detailed Table Information Table(tableName:alter1, dbName:alter1_db, owner:carl, createTime:1281930887, lastAccessTime:0, retention:0, sd:StorageDescriptor(cols:[FieldSchema(name:a, type:int, comment:null), FieldSchema(name:b, type:int, comment:null)], location:pfile:/Users/carl/Projects/hive/build/ql/test/data/warehouse/alter1_db.db/alter1, inputFormat:org.apache.hadoop.mapred.TextInputFormat, outputFormat:org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat, compressed:false, numBuckets:-1, serdeInfo:SerDeInfo(name:null, serializationLib:org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe, parameters:{serialization.format=1}), bucketCols:[], sortCols:[], parameters:{}), partitionKeys:[], parameters:{last_modified_by=carl, c=3, last_modified_time=1281930887, a=1, transient_lastDdlTime=1281930887}, viewOriginalText:null, viewExpandedText:null, tableType:MANAGED_TABLE) +PREHOOK: query: ALTER TABLE alter1 SET TBLPROPERTIES ('a'='1', 'c'='4', 'd'='3') +PREHOOK: type: ALTERTABLE_PROPERTIES +POSTHOOK: query: ALTER TABLE alter1 SET TBLPROPERTIES ('a'='1', 'c'='4', 'd'='3') +POSTHOOK: type: ALTERTABLE_PROPERTIES +POSTHOOK: Input: alter1_db@alter1 +POSTHOOK: Output: alter1_db@alter1 +PREHOOK: query: DESCRIBE EXTENDED alter1 +PREHOOK: type: DESCTABLE +POSTHOOK: query: DESCRIBE EXTENDED alter1 +POSTHOOK: type: DESCTABLE +a int +b int + +Detailed Table Information Table(tableName:alter1, dbName:alter1_db, owner:carl, createTime:1281930887, lastAccessTime:0, retention:0, sd:StorageDescriptor(cols:[FieldSchema(name:a, type:int, comment:null), FieldSchema(name:b, type:int, comment:null)], location:pfile:/Users/carl/Projects/hive/build/ql/test/data/warehouse/alter1_db.db/alter1, inputFormat:org.apache.hadoop.mapred.TextInputFormat, outputFormat:org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat, compressed:false, numBuckets:-1, serdeInfo:SerDeInfo(name:null, serializationLib:org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe, parameters:{serialization.format=1}), bucketCols:[], sortCols:[], parameters:{}), partitionKeys:[], parameters:{d=3, last_modified_by=carl, c=4, last_modified_time=1281930887, a=1, transient_lastDdlTime=1281930887}, viewOriginalText:null, viewExpandedText:null, tableType:MANAGED_TABLE) +PREHOOK: query: ALTER TABLE alter1 SET TBLPROPERTIES ('EXTERNAL'='TRUE') +PREHOOK: type: ALTERTABLE_PROPERTIES +POSTHOOK: query: ALTER TABLE alter1 SET TBLPROPERTIES ('EXTERNAL'='TRUE') +POSTHOOK: type: ALTERTABLE_PROPERTIES +POSTHOOK: Input: alter1_db@alter1 +POSTHOOK: Output: alter1_db@alter1 +PREHOOK: query: DESCRIBE EXTENDED alter1 +PREHOOK: type: DESCTABLE +POSTHOOK: query: DESCRIBE EXTENDED alter1 +POSTHOOK: type: DESCTABLE +a int +b int + +Detailed Table Information Table(tableName:alter1, dbName:alter1_db, owner:carl, createTime:1281930887, lastAccessTime:0, retention:0, sd:StorageDescriptor(cols:[FieldSchema(name:a, type:int, comment:null), FieldSchema(name:b, type:int, comment:null)], location:pfile:/Users/carl/Projects/hive/build/ql/test/data/warehouse/alter1_db.db/alter1, inputFormat:org.apache.hadoop.mapred.TextInputFormat, outputFormat:org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat, compressed:false, numBuckets:-1, serdeInfo:SerDeInfo(name:null, serializationLib:org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe, parameters:{serialization.format=1}), bucketCols:[], sortCols:[], parameters:{}), partitionKeys:[], parameters:{EXTERNAL=TRUE, d=3, last_modified_by=carl, c=4, last_modified_time=1281930888, a=1, transient_lastDdlTime=1281930888}, viewOriginalText:null, viewExpandedText:null, tableType:EXTERNAL_TABLE) +PREHOOK: query: ALTER TABLE alter1 SET TBLPROPERTIES ('EXTERNAL'='FALSE') +PREHOOK: type: ALTERTABLE_PROPERTIES +POSTHOOK: query: ALTER TABLE alter1 SET TBLPROPERTIES ('EXTERNAL'='FALSE') +POSTHOOK: type: ALTERTABLE_PROPERTIES +POSTHOOK: Input: alter1_db@alter1 +POSTHOOK: Output: alter1_db@alter1 +PREHOOK: query: DESCRIBE EXTENDED alter1 +PREHOOK: type: DESCTABLE +POSTHOOK: query: DESCRIBE EXTENDED alter1 +POSTHOOK: type: DESCTABLE +a int +b int + +Detailed Table Information Table(tableName:alter1, dbName:alter1_db, owner:carl, createTime:1281930887, lastAccessTime:0, retention:0, sd:StorageDescriptor(cols:[FieldSchema(name:a, type:int, comment:null), FieldSchema(name:b, type:int, comment:null)], location:pfile:/Users/carl/Projects/hive/build/ql/test/data/warehouse/alter1_db.db/alter1, inputFormat:org.apache.hadoop.mapred.TextInputFormat, outputFormat:org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat, compressed:false, numBuckets:-1, serdeInfo:SerDeInfo(name:null, serializationLib:org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe, parameters:{serialization.format=1}), bucketCols:[], sortCols:[], parameters:{}), partitionKeys:[], parameters:{EXTERNAL=FALSE, d=3, last_modified_by=carl, c=4, last_modified_time=1281930888, a=1, transient_lastDdlTime=1281930888}, viewOriginalText:null, viewExpandedText:null, tableType:MANAGED_TABLE) +PREHOOK: query: ALTER TABLE alter1 SET SERDEPROPERTIES('s1'='9') +PREHOOK: type: ALTERTABLE_SERDEPROPERTIES +POSTHOOK: query: ALTER TABLE alter1 SET SERDEPROPERTIES('s1'='9') +POSTHOOK: type: ALTERTABLE_SERDEPROPERTIES +POSTHOOK: Input: alter1_db@alter1 +POSTHOOK: Output: alter1_db@alter1 +PREHOOK: query: DESCRIBE EXTENDED alter1 +PREHOOK: type: DESCTABLE +POSTHOOK: query: DESCRIBE EXTENDED alter1 +POSTHOOK: type: DESCTABLE +a int +b int + +Detailed Table Information Table(tableName:alter1, dbName:alter1_db, owner:carl, createTime:1281930887, lastAccessTime:0, retention:0, sd:StorageDescriptor(cols:[FieldSchema(name:a, type:int, comment:null), FieldSchema(name:b, type:int, comment:null)], location:pfile:/Users/carl/Projects/hive/build/ql/test/data/warehouse/alter1_db.db/alter1, inputFormat:org.apache.hadoop.mapred.TextInputFormat, outputFormat:org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat, compressed:false, numBuckets:-1, serdeInfo:SerDeInfo(name:null, serializationLib:org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe, parameters:{s1=9, serialization.format=1}), bucketCols:[], sortCols:[], parameters:{}), partitionKeys:[], parameters:{EXTERNAL=FALSE, d=3, last_modified_by=carl, c=4, last_modified_time=1281930888, a=1, transient_lastDdlTime=1281930888}, viewOriginalText:null, viewExpandedText:null, tableType:MANAGED_TABLE) +PREHOOK: query: ALTER TABLE alter1 SET SERDEPROPERTIES('s1'='10', 's2' ='20') +PREHOOK: type: ALTERTABLE_SERDEPROPERTIES +POSTHOOK: query: ALTER TABLE alter1 SET SERDEPROPERTIES('s1'='10', 's2' ='20') +POSTHOOK: type: ALTERTABLE_SERDEPROPERTIES +POSTHOOK: Input: alter1_db@alter1 +POSTHOOK: Output: alter1_db@alter1 +PREHOOK: query: DESCRIBE EXTENDED alter1 +PREHOOK: type: DESCTABLE +POSTHOOK: query: DESCRIBE EXTENDED alter1 +POSTHOOK: type: DESCTABLE +a int +b int + +Detailed Table Information Table(tableName:alter1, dbName:alter1_db, owner:carl, createTime:1281930887, lastAccessTime:0, retention:0, sd:StorageDescriptor(cols:[FieldSchema(name:a, type:int, comment:null), FieldSchema(name:b, type:int, comment:null)], location:pfile:/Users/carl/Projects/hive/build/ql/test/data/warehouse/alter1_db.db/alter1, inputFormat:org.apache.hadoop.mapred.TextInputFormat, outputFormat:org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat, compressed:false, numBuckets:-1, serdeInfo:SerDeInfo(name:null, serializationLib:org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe, parameters:{s2=20, s1=10, serialization.format=1}), bucketCols:[], sortCols:[], parameters:{}), partitionKeys:[], parameters:{EXTERNAL=FALSE, d=3, last_modified_by=carl, c=4, last_modified_time=1281930888, a=1, transient_lastDdlTime=1281930888}, viewOriginalText:null, viewExpandedText:null, tableType:MANAGED_TABLE) +PREHOOK: query: ALTER TABLE alter1 SET SERDE 'org.apache.hadoop.hive.serde2.TestSerDe' WITH SERDEPROPERTIES ('s1'='9') +PREHOOK: type: ALTERTABLE_SERIALIZER +POSTHOOK: query: ALTER TABLE alter1 SET SERDE 'org.apache.hadoop.hive.serde2.TestSerDe' WITH SERDEPROPERTIES ('s1'='9') +POSTHOOK: type: ALTERTABLE_SERIALIZER +POSTHOOK: Input: alter1_db@alter1 +POSTHOOK: Output: alter1_db@alter1 +PREHOOK: query: DESCRIBE EXTENDED alter1 +PREHOOK: type: DESCTABLE +POSTHOOK: query: DESCRIBE EXTENDED alter1 +POSTHOOK: type: DESCTABLE +a string from deserializer +b string from deserializer + +Detailed Table Information Table(tableName:alter1, dbName:alter1_db, owner:carl, createTime:1281930887, lastAccessTime:0, retention:0, sd:StorageDescriptor(cols:[FieldSchema(name:a, type:int, comment:from deserializer), FieldSchema(name:b, type:int, comment:from deserializer)], location:pfile:/Users/carl/Projects/hive/build/ql/test/data/warehouse/alter1_db.db/alter1, inputFormat:org.apache.hadoop.mapred.TextInputFormat, outputFormat:org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat, compressed:false, numBuckets:-1, serdeInfo:SerDeInfo(name:null, serializationLib:org.apache.hadoop.hive.serde2.TestSerDe, parameters:{s2=20, s1=9, serialization.format=1}), bucketCols:[], sortCols:[], parameters:{}), partitionKeys:[], parameters:{EXTERNAL=FALSE, d=3, last_modified_by=carl, c=4, last_modified_time=1281930888, a=1, transient_lastDdlTime=1281930888}, viewOriginalText:null, viewExpandedText:null, tableType:MANAGED_TABLE) +PREHOOK: query: ALTER TABLE alter1 SET SERDE 'org.apache.hadoop.hive.serde2.MetadataTypedColumnsetSerDe' +PREHOOK: type: ALTERTABLE_SERIALIZER +POSTHOOK: query: ALTER TABLE alter1 SET SERDE 'org.apache.hadoop.hive.serde2.MetadataTypedColumnsetSerDe' +POSTHOOK: type: ALTERTABLE_SERIALIZER +POSTHOOK: Input: alter1_db@alter1 +POSTHOOK: Output: alter1_db@alter1 +PREHOOK: query: DESCRIBE EXTENDED alter1 +PREHOOK: type: DESCTABLE +POSTHOOK: query: DESCRIBE EXTENDED alter1 +POSTHOOK: type: DESCTABLE +a string from deserializer +b string from deserializer + +Detailed Table Information Table(tableName:alter1, dbName:alter1_db, owner:carl, createTime:1281930887, lastAccessTime:0, retention:0, sd:StorageDescriptor(cols:[FieldSchema(name:a, type:string, comment:from deserializer), FieldSchema(name:b, type:string, comment:from deserializer)], location:pfile:/Users/carl/Projects/hive/build/ql/test/data/warehouse/alter1_db.db/alter1, inputFormat:org.apache.hadoop.mapred.TextInputFormat, outputFormat:org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat, compressed:false, numBuckets:-1, serdeInfo:SerDeInfo(name:null, serializationLib:org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe, parameters:{s2=20, s1=9, serialization.format=1}), bucketCols:[], sortCols:[], parameters:{}), partitionKeys:[], parameters:{EXTERNAL=FALSE, d=3, last_modified_by=carl, c=4, last_modified_time=1281930888, a=1, transient_lastDdlTime=1281930888}, viewOriginalText:null, viewExpandedText:null, tableType:MANAGED_TABLE) +PREHOOK: query: ALTER TABLE alter1 REPLACE COLUMNS (a int, b int, c string) +PREHOOK: type: ALTERTABLE_REPLACECOLS +POSTHOOK: query: ALTER TABLE alter1 REPLACE COLUMNS (a int, b int, c string) +POSTHOOK: type: ALTERTABLE_REPLACECOLS +POSTHOOK: Input: alter1_db@alter1 +POSTHOOK: Output: alter1_db@alter1 +PREHOOK: query: DESCRIBE alter1 +PREHOOK: type: DESCTABLE +POSTHOOK: query: DESCRIBE alter1 +POSTHOOK: type: DESCTABLE +a int +b int +c string +PREHOOK: query: DROP TABLE alter1 +PREHOOK: type: DROPTABLE +POSTHOOK: query: DROP TABLE alter1 +POSTHOOK: type: DROPTABLE +POSTHOOK: Output: alter1_db@alter1 +PREHOOK: query: USE default +PREHOOK: type: SWITCHDATABASE +POSTHOOK: query: USE default +POSTHOOK: type: SWITCHDATABASE +PREHOOK: query: DROP DATABASE alter1_db +PREHOOK: type: DROPDATABASE +POSTHOOK: query: DROP DATABASE alter1_db +POSTHOOK: type: DROPDATABASE diff --git ql/src/test/results/clientpositive/alter2.q.out ql/src/test/results/clientpositive/alter2.q.out index 6de5f19..18d1be2 100644 --- ql/src/test/results/clientpositive/alter2.q.out +++ ql/src/test/results/clientpositive/alter2.q.out @@ -11,7 +11,7 @@ a int b int insertdate string -Detailed Table Information Table(tableName:alter2, dbName:default, owner:jssarma, createTime:1279735690, lastAccessTime:0, retention:0, sd:StorageDescriptor(cols:[FieldSchema(name:a, type:int, comment:null), FieldSchema(name:b, type:int, comment:null)], location:file:/mnt/vol/devrs004.snc1/jssarma/projects/hive_trunk/build/ql/test/data/warehouse/alter2, inputFormat:org.apache.hadoop.mapred.TextInputFormat, outputFormat:org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat, compressed:false, numBuckets:-1, serdeInfo:SerDeInfo(name:null, serializationLib:org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe, parameters:{serialization.format=1}), bucketCols:[], sortCols:[], parameters:{}), partitionKeys:[FieldSchema(name:insertdate, type:string, comment:null)], parameters:{transient_lastDdlTime=1279735690}, viewOriginalText:null, viewExpandedText:null, tableType:MANAGED_TABLE) +Detailed Table Information Table(tableName:alter2, dbName:default, owner:carl, createTime:1281930889, lastAccessTime:0, retention:0, sd:StorageDescriptor(cols:[FieldSchema(name:a, type:int, comment:null), FieldSchema(name:b, type:int, comment:null)], location:pfile:/Users/carl/Projects/hive/build/ql/test/data/warehouse/alter2, inputFormat:org.apache.hadoop.mapred.TextInputFormat, outputFormat:org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat, compressed:false, numBuckets:-1, serdeInfo:SerDeInfo(name:null, serializationLib:org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe, parameters:{serialization.format=1}), bucketCols:[], sortCols:[], parameters:{}), partitionKeys:[FieldSchema(name:insertdate, type:string, comment:null)], parameters:{transient_lastDdlTime=1281930889}, viewOriginalText:null, viewExpandedText:null, tableType:MANAGED_TABLE) PREHOOK: query: show partitions alter2 PREHOOK: type: SHOWPARTITIONS POSTHOOK: query: show partitions alter2 @@ -29,7 +29,7 @@ a int b int insertdate string -Detailed Table Information Table(tableName:alter2, dbName:default, owner:jssarma, createTime:1279735690, lastAccessTime:0, retention:0, sd:StorageDescriptor(cols:[FieldSchema(name:a, type:int, comment:null), FieldSchema(name:b, type:int, comment:null)], location:file:/mnt/vol/devrs004.snc1/jssarma/projects/hive_trunk/build/ql/test/data/warehouse/alter2, inputFormat:org.apache.hadoop.mapred.TextInputFormat, outputFormat:org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat, compressed:false, numBuckets:-1, serdeInfo:SerDeInfo(name:null, serializationLib:org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe, parameters:{serialization.format=1}), bucketCols:[], sortCols:[], parameters:{}), partitionKeys:[FieldSchema(name:insertdate, type:string, comment:null)], parameters:{transient_lastDdlTime=1279735690}, viewOriginalText:null, viewExpandedText:null, tableType:MANAGED_TABLE) +Detailed Table Information Table(tableName:alter2, dbName:default, owner:carl, createTime:1281930889, lastAccessTime:0, retention:0, sd:StorageDescriptor(cols:[FieldSchema(name:a, type:int, comment:null), FieldSchema(name:b, type:int, comment:null)], location:pfile:/Users/carl/Projects/hive/build/ql/test/data/warehouse/alter2, inputFormat:org.apache.hadoop.mapred.TextInputFormat, outputFormat:org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat, compressed:false, numBuckets:-1, serdeInfo:SerDeInfo(name:null, serializationLib:org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe, parameters:{serialization.format=1}), bucketCols:[], sortCols:[], parameters:{}), partitionKeys:[FieldSchema(name:insertdate, type:string, comment:null)], parameters:{transient_lastDdlTime=1281930889}, viewOriginalText:null, viewExpandedText:null, tableType:MANAGED_TABLE) PREHOOK: query: show partitions alter2 PREHOOK: type: SHOWPARTITIONS POSTHOOK: query: show partitions alter2 @@ -48,7 +48,7 @@ a int b int insertdate string -Detailed Table Information Table(tableName:alter2, dbName:default, owner:jssarma, createTime:1279735690, lastAccessTime:0, retention:0, sd:StorageDescriptor(cols:[FieldSchema(name:a, type:int, comment:null), FieldSchema(name:b, type:int, comment:null)], location:file:/mnt/vol/devrs004.snc1/jssarma/projects/hive_trunk/build/ql/test/data/warehouse/alter2, inputFormat:org.apache.hadoop.mapred.TextInputFormat, outputFormat:org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat, compressed:false, numBuckets:-1, serdeInfo:SerDeInfo(name:null, serializationLib:org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe, parameters:{serialization.format=1}), bucketCols:[], sortCols:[], parameters:{}), partitionKeys:[FieldSchema(name:insertdate, type:string, comment:null)], parameters:{transient_lastDdlTime=1279735690}, viewOriginalText:null, viewExpandedText:null, tableType:MANAGED_TABLE) +Detailed Table Information Table(tableName:alter2, dbName:default, owner:carl, createTime:1281930889, lastAccessTime:0, retention:0, sd:StorageDescriptor(cols:[FieldSchema(name:a, type:int, comment:null), FieldSchema(name:b, type:int, comment:null)], location:pfile:/Users/carl/Projects/hive/build/ql/test/data/warehouse/alter2, inputFormat:org.apache.hadoop.mapred.TextInputFormat, outputFormat:org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat, compressed:false, numBuckets:-1, serdeInfo:SerDeInfo(name:null, serializationLib:org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe, parameters:{serialization.format=1}), bucketCols:[], sortCols:[], parameters:{}), partitionKeys:[FieldSchema(name:insertdate, type:string, comment:null)], parameters:{transient_lastDdlTime=1281930889}, viewOriginalText:null, viewExpandedText:null, tableType:MANAGED_TABLE) PREHOOK: query: show partitions alter2 PREHOOK: type: SHOWPARTITIONS POSTHOOK: query: show partitions alter2 @@ -73,7 +73,7 @@ a int b int insertdate string -Detailed Table Information Table(tableName:alter2, dbName:default, owner:jssarma, createTime:1279735690, lastAccessTime:0, retention:0, sd:StorageDescriptor(cols:[FieldSchema(name:a, type:int, comment:null), FieldSchema(name:b, type:int, comment:null)], location:file:/mnt/vol/devrs004.snc1/jssarma/projects/hive_trunk/build/ql/test/data/warehouse/alter2, inputFormat:org.apache.hadoop.mapred.TextInputFormat, outputFormat:org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat, compressed:false, numBuckets:-1, serdeInfo:SerDeInfo(name:null, serializationLib:org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe, parameters:{serialization.format=1}), bucketCols:[], sortCols:[], parameters:{}), partitionKeys:[FieldSchema(name:insertdate, type:string, comment:null)], parameters:{EXTERNAL=TRUE, transient_lastDdlTime=1279735690}, viewOriginalText:null, viewExpandedText:null, tableType:EXTERNAL_TABLE) +Detailed Table Information Table(tableName:alter2, dbName:default, owner:carl, createTime:1281930890, lastAccessTime:0, retention:0, sd:StorageDescriptor(cols:[FieldSchema(name:a, type:int, comment:null), FieldSchema(name:b, type:int, comment:null)], location:pfile:/Users/carl/Projects/hive/build/ql/test/data/warehouse/alter2, inputFormat:org.apache.hadoop.mapred.TextInputFormat, outputFormat:org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat, compressed:false, numBuckets:-1, serdeInfo:SerDeInfo(name:null, serializationLib:org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe, parameters:{serialization.format=1}), bucketCols:[], sortCols:[], parameters:{}), partitionKeys:[FieldSchema(name:insertdate, type:string, comment:null)], parameters:{EXTERNAL=TRUE, transient_lastDdlTime=1281930890}, viewOriginalText:null, viewExpandedText:null, tableType:EXTERNAL_TABLE) PREHOOK: query: show partitions alter2 PREHOOK: type: SHOWPARTITIONS POSTHOOK: query: show partitions alter2 @@ -91,7 +91,7 @@ a int b int insertdate string -Detailed Table Information Table(tableName:alter2, dbName:default, owner:jssarma, createTime:1279735690, lastAccessTime:0, retention:0, sd:StorageDescriptor(cols:[FieldSchema(name:a, type:int, comment:null), FieldSchema(name:b, type:int, comment:null)], location:file:/mnt/vol/devrs004.snc1/jssarma/projects/hive_trunk/build/ql/test/data/warehouse/alter2, inputFormat:org.apache.hadoop.mapred.TextInputFormat, outputFormat:org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat, compressed:false, numBuckets:-1, serdeInfo:SerDeInfo(name:null, serializationLib:org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe, parameters:{serialization.format=1}), bucketCols:[], sortCols:[], parameters:{}), partitionKeys:[FieldSchema(name:insertdate, type:string, comment:null)], parameters:{EXTERNAL=TRUE, transient_lastDdlTime=1279735690}, viewOriginalText:null, viewExpandedText:null, tableType:EXTERNAL_TABLE) +Detailed Table Information Table(tableName:alter2, dbName:default, owner:carl, createTime:1281930890, lastAccessTime:0, retention:0, sd:StorageDescriptor(cols:[FieldSchema(name:a, type:int, comment:null), FieldSchema(name:b, type:int, comment:null)], location:pfile:/Users/carl/Projects/hive/build/ql/test/data/warehouse/alter2, inputFormat:org.apache.hadoop.mapred.TextInputFormat, outputFormat:org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat, compressed:false, numBuckets:-1, serdeInfo:SerDeInfo(name:null, serializationLib:org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe, parameters:{serialization.format=1}), bucketCols:[], sortCols:[], parameters:{}), partitionKeys:[FieldSchema(name:insertdate, type:string, comment:null)], parameters:{EXTERNAL=TRUE, transient_lastDdlTime=1281930890}, viewOriginalText:null, viewExpandedText:null, tableType:EXTERNAL_TABLE) PREHOOK: query: show partitions alter2 PREHOOK: type: SHOWPARTITIONS POSTHOOK: query: show partitions alter2 @@ -110,10 +110,177 @@ a int b int insertdate string -Detailed Table Information Table(tableName:alter2, dbName:default, owner:jssarma, createTime:1279735690, lastAccessTime:0, retention:0, sd:StorageDescriptor(cols:[FieldSchema(name:a, type:int, comment:null), FieldSchema(name:b, type:int, comment:null)], location:file:/mnt/vol/devrs004.snc1/jssarma/projects/hive_trunk/build/ql/test/data/warehouse/alter2, inputFormat:org.apache.hadoop.mapred.TextInputFormat, outputFormat:org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat, compressed:false, numBuckets:-1, serdeInfo:SerDeInfo(name:null, serializationLib:org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe, parameters:{serialization.format=1}), bucketCols:[], sortCols:[], parameters:{}), partitionKeys:[FieldSchema(name:insertdate, type:string, comment:null)], parameters:{EXTERNAL=TRUE, transient_lastDdlTime=1279735690}, viewOriginalText:null, viewExpandedText:null, tableType:EXTERNAL_TABLE) +Detailed Table Information Table(tableName:alter2, dbName:default, owner:carl, createTime:1281930890, lastAccessTime:0, retention:0, sd:StorageDescriptor(cols:[FieldSchema(name:a, type:int, comment:null), FieldSchema(name:b, type:int, comment:null)], location:pfile:/Users/carl/Projects/hive/build/ql/test/data/warehouse/alter2, inputFormat:org.apache.hadoop.mapred.TextInputFormat, outputFormat:org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat, compressed:false, numBuckets:-1, serdeInfo:SerDeInfo(name:null, serializationLib:org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe, parameters:{serialization.format=1}), bucketCols:[], sortCols:[], parameters:{}), partitionKeys:[FieldSchema(name:insertdate, type:string, comment:null)], parameters:{EXTERNAL=TRUE, transient_lastDdlTime=1281930890}, viewOriginalText:null, viewExpandedText:null, tableType:EXTERNAL_TABLE) PREHOOK: query: show partitions alter2 PREHOOK: type: SHOWPARTITIONS POSTHOOK: query: show partitions alter2 POSTHOOK: type: SHOWPARTITIONS insertdate=2008-01-01 insertdate=2008-01-02 +PREHOOK: query: -- Cleanup +DROP TABLE alter2 +PREHOOK: type: DROPTABLE +POSTHOOK: query: -- Cleanup +DROP TABLE alter2 +POSTHOOK: type: DROPTABLE +POSTHOOK: Output: default@alter2 +PREHOOK: query: SHOW TABLES +PREHOOK: type: SHOWTABLES +POSTHOOK: query: SHOW TABLES +POSTHOOK: type: SHOWTABLES +src +src1 +src_json +src_sequencefile +src_thrift +srcbucket +srcbucket2 +srcpart +PREHOOK: query: -- Using non-default Database + +CREATE DATABASE alter2_db +PREHOOK: type: CREATEDATABASE +POSTHOOK: query: -- Using non-default Database + +CREATE DATABASE alter2_db +POSTHOOK: type: CREATEDATABASE +PREHOOK: query: USE alter2_db +PREHOOK: type: SWITCHDATABASE +POSTHOOK: query: USE alter2_db +POSTHOOK: type: SWITCHDATABASE +PREHOOK: query: SHOW TABLES +PREHOOK: type: SHOWTABLES +POSTHOOK: query: SHOW TABLES +POSTHOOK: type: SHOWTABLES +PREHOOK: query: CREATE TABLE alter2(a int, b int) PARTITIONED BY (insertdate string) +PREHOOK: type: CREATETABLE +POSTHOOK: query: CREATE TABLE alter2(a int, b int) PARTITIONED BY (insertdate string) +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: alter2_db@alter2 +PREHOOK: query: DESCRIBE EXTENDED alter2 +PREHOOK: type: DESCTABLE +POSTHOOK: query: DESCRIBE EXTENDED alter2 +POSTHOOK: type: DESCTABLE +a int +b int +insertdate string + +Detailed Table Information Table(tableName:alter2, dbName:alter2_db, owner:carl, createTime:1281930891, lastAccessTime:0, retention:0, sd:StorageDescriptor(cols:[FieldSchema(name:a, type:int, comment:null), FieldSchema(name:b, type:int, comment:null)], location:pfile:/Users/carl/Projects/hive/build/ql/test/data/warehouse/alter2_db.db/alter2, inputFormat:org.apache.hadoop.mapred.TextInputFormat, outputFormat:org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat, compressed:false, numBuckets:-1, serdeInfo:SerDeInfo(name:null, serializationLib:org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe, parameters:{serialization.format=1}), bucketCols:[], sortCols:[], parameters:{}), partitionKeys:[FieldSchema(name:insertdate, type:string, comment:null)], parameters:{transient_lastDdlTime=1281930891}, viewOriginalText:null, viewExpandedText:null, tableType:MANAGED_TABLE) +PREHOOK: query: SHOW PARTITIONS alter2 +PREHOOK: type: SHOWPARTITIONS +POSTHOOK: query: SHOW PARTITIONS alter2 +POSTHOOK: type: SHOWPARTITIONS +PREHOOK: query: ALTER TABLE alter2 ADD PARTITION (insertdate='2008-01-01') LOCATION '2008/01/01' +PREHOOK: type: ALTERTABLE_ADDPARTS +POSTHOOK: query: ALTER TABLE alter2 ADD PARTITION (insertdate='2008-01-01') LOCATION '2008/01/01' +POSTHOOK: type: ALTERTABLE_ADDPARTS +POSTHOOK: Output: alter2_db@alter2@insertdate=2008-01-01 +PREHOOK: query: DESCRIBE EXTENDED alter2 +PREHOOK: type: DESCTABLE +POSTHOOK: query: DESCRIBE EXTENDED alter2 +POSTHOOK: type: DESCTABLE +a int +b int +insertdate string + +Detailed Table Information Table(tableName:alter2, dbName:alter2_db, owner:carl, createTime:1281930891, lastAccessTime:0, retention:0, sd:StorageDescriptor(cols:[FieldSchema(name:a, type:int, comment:null), FieldSchema(name:b, type:int, comment:null)], location:pfile:/Users/carl/Projects/hive/build/ql/test/data/warehouse/alter2_db.db/alter2, inputFormat:org.apache.hadoop.mapred.TextInputFormat, outputFormat:org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat, compressed:false, numBuckets:-1, serdeInfo:SerDeInfo(name:null, serializationLib:org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe, parameters:{serialization.format=1}), bucketCols:[], sortCols:[], parameters:{}), partitionKeys:[FieldSchema(name:insertdate, type:string, comment:null)], parameters:{transient_lastDdlTime=1281930891}, viewOriginalText:null, viewExpandedText:null, tableType:MANAGED_TABLE) +PREHOOK: query: SHOW PARTITIONS alter2 +PREHOOK: type: SHOWPARTITIONS +POSTHOOK: query: SHOW PARTITIONS alter2 +POSTHOOK: type: SHOWPARTITIONS +insertdate=2008-01-01 +PREHOOK: query: ALTER TABLE alter2 ADD PARTITION (insertdate='2008-01-02') LOCATION '2008/01/02' +PREHOOK: type: ALTERTABLE_ADDPARTS +POSTHOOK: query: ALTER TABLE alter2 ADD PARTITION (insertdate='2008-01-02') LOCATION '2008/01/02' +POSTHOOK: type: ALTERTABLE_ADDPARTS +POSTHOOK: Output: alter2_db@alter2@insertdate=2008-01-02 +PREHOOK: query: DESCRIBE EXTENDED alter2 +PREHOOK: type: DESCTABLE +POSTHOOK: query: DESCRIBE EXTENDED alter2 +POSTHOOK: type: DESCTABLE +a int +b int +insertdate string + +Detailed Table Information Table(tableName:alter2, dbName:alter2_db, owner:carl, createTime:1281930891, lastAccessTime:0, retention:0, sd:StorageDescriptor(cols:[FieldSchema(name:a, type:int, comment:null), FieldSchema(name:b, type:int, comment:null)], location:pfile:/Users/carl/Projects/hive/build/ql/test/data/warehouse/alter2_db.db/alter2, inputFormat:org.apache.hadoop.mapred.TextInputFormat, outputFormat:org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat, compressed:false, numBuckets:-1, serdeInfo:SerDeInfo(name:null, serializationLib:org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe, parameters:{serialization.format=1}), bucketCols:[], sortCols:[], parameters:{}), partitionKeys:[FieldSchema(name:insertdate, type:string, comment:null)], parameters:{transient_lastDdlTime=1281930891}, viewOriginalText:null, viewExpandedText:null, tableType:MANAGED_TABLE) +PREHOOK: query: SHOW PARTITIONS alter2 +PREHOOK: type: SHOWPARTITIONS +POSTHOOK: query: SHOW PARTITIONS alter2 +POSTHOOK: type: SHOWPARTITIONS +insertdate=2008-01-01 +insertdate=2008-01-02 +PREHOOK: query: DROP TABLE alter2 +PREHOOK: type: DROPTABLE +POSTHOOK: query: DROP TABLE alter2 +POSTHOOK: type: DROPTABLE +POSTHOOK: Output: alter2_db@alter2 +PREHOOK: query: CREATE EXTERNAL TABLE alter2(a int, b int) PARTITIONED BY (insertdate string) +PREHOOK: type: CREATETABLE +POSTHOOK: query: CREATE EXTERNAL TABLE alter2(a int, b int) PARTITIONED BY (insertdate string) +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: alter2_db@alter2 +PREHOOK: query: DESCRIBE EXTENDED alter2 +PREHOOK: type: DESCTABLE +POSTHOOK: query: DESCRIBE EXTENDED alter2 +POSTHOOK: type: DESCTABLE +a int +b int +insertdate string + +Detailed Table Information Table(tableName:alter2, dbName:alter2_db, owner:carl, createTime:1281930892, lastAccessTime:0, retention:0, sd:StorageDescriptor(cols:[FieldSchema(name:a, type:int, comment:null), FieldSchema(name:b, type:int, comment:null)], location:pfile:/Users/carl/Projects/hive/build/ql/test/data/warehouse/alter2_db.db/alter2, inputFormat:org.apache.hadoop.mapred.TextInputFormat, outputFormat:org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat, compressed:false, numBuckets:-1, serdeInfo:SerDeInfo(name:null, serializationLib:org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe, parameters:{serialization.format=1}), bucketCols:[], sortCols:[], parameters:{}), partitionKeys:[FieldSchema(name:insertdate, type:string, comment:null)], parameters:{EXTERNAL=TRUE, transient_lastDdlTime=1281930892}, viewOriginalText:null, viewExpandedText:null, tableType:EXTERNAL_TABLE) +PREHOOK: query: SHOW PARTITIONS alter2 +PREHOOK: type: SHOWPARTITIONS +POSTHOOK: query: SHOW PARTITIONS alter2 +POSTHOOK: type: SHOWPARTITIONS +PREHOOK: query: ALTER TABLE alter2 ADD PARTITION (insertdate='2008-01-01') LOCATION '2008/01/01' +PREHOOK: type: ALTERTABLE_ADDPARTS +POSTHOOK: query: ALTER TABLE alter2 ADD PARTITION (insertdate='2008-01-01') LOCATION '2008/01/01' +POSTHOOK: type: ALTERTABLE_ADDPARTS +POSTHOOK: Output: alter2_db@alter2@insertdate=2008-01-01 +PREHOOK: query: DESCRIBE EXTENDED alter2 +PREHOOK: type: DESCTABLE +POSTHOOK: query: DESCRIBE EXTENDED alter2 +POSTHOOK: type: DESCTABLE +a int +b int +insertdate string + +Detailed Table Information Table(tableName:alter2, dbName:alter2_db, owner:carl, createTime:1281930892, lastAccessTime:0, retention:0, sd:StorageDescriptor(cols:[FieldSchema(name:a, type:int, comment:null), FieldSchema(name:b, type:int, comment:null)], location:pfile:/Users/carl/Projects/hive/build/ql/test/data/warehouse/alter2_db.db/alter2, inputFormat:org.apache.hadoop.mapred.TextInputFormat, outputFormat:org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat, compressed:false, numBuckets:-1, serdeInfo:SerDeInfo(name:null, serializationLib:org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe, parameters:{serialization.format=1}), bucketCols:[], sortCols:[], parameters:{}), partitionKeys:[FieldSchema(name:insertdate, type:string, comment:null)], parameters:{EXTERNAL=TRUE, transient_lastDdlTime=1281930892}, viewOriginalText:null, viewExpandedText:null, tableType:EXTERNAL_TABLE) +PREHOOK: query: SHOW PARTITIONS alter2 +PREHOOK: type: SHOWPARTITIONS +POSTHOOK: query: SHOW PARTITIONS alter2 +POSTHOOK: type: SHOWPARTITIONS +insertdate=2008-01-01 +PREHOOK: query: ALTER TABLE alter2 ADD PARTITION (insertdate='2008-01-02') LOCATION '2008/01/02' +PREHOOK: type: ALTERTABLE_ADDPARTS +POSTHOOK: query: ALTER TABLE alter2 ADD PARTITION (insertdate='2008-01-02') LOCATION '2008/01/02' +POSTHOOK: type: ALTERTABLE_ADDPARTS +POSTHOOK: Output: alter2_db@alter2@insertdate=2008-01-02 +PREHOOK: query: DESCRIBE EXTENDED alter2 +PREHOOK: type: DESCTABLE +POSTHOOK: query: DESCRIBE EXTENDED alter2 +POSTHOOK: type: DESCTABLE +a int +b int +insertdate string + +Detailed Table Information Table(tableName:alter2, dbName:alter2_db, owner:carl, createTime:1281930892, lastAccessTime:0, retention:0, sd:StorageDescriptor(cols:[FieldSchema(name:a, type:int, comment:null), FieldSchema(name:b, type:int, comment:null)], location:pfile:/Users/carl/Projects/hive/build/ql/test/data/warehouse/alter2_db.db/alter2, inputFormat:org.apache.hadoop.mapred.TextInputFormat, outputFormat:org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat, compressed:false, numBuckets:-1, serdeInfo:SerDeInfo(name:null, serializationLib:org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe, parameters:{serialization.format=1}), bucketCols:[], sortCols:[], parameters:{}), partitionKeys:[FieldSchema(name:insertdate, type:string, comment:null)], parameters:{EXTERNAL=TRUE, transient_lastDdlTime=1281930892}, viewOriginalText:null, viewExpandedText:null, tableType:EXTERNAL_TABLE) +PREHOOK: query: SHOW PARTITIONS alter2 +PREHOOK: type: SHOWPARTITIONS +POSTHOOK: query: SHOW PARTITIONS alter2 +POSTHOOK: type: SHOWPARTITIONS +insertdate=2008-01-01 +insertdate=2008-01-02 +PREHOOK: query: DROP TABLE alter2 +PREHOOK: type: DROPTABLE +POSTHOOK: query: DROP TABLE alter2 +POSTHOOK: type: DROPTABLE +POSTHOOK: Output: alter2_db@alter2 +PREHOOK: query: USE default +PREHOOK: type: SWITCHDATABASE +POSTHOOK: query: USE default +POSTHOOK: type: SWITCHDATABASE +PREHOOK: query: DROP DATABASE alter2_db +PREHOOK: type: DROPDATABASE +POSTHOOK: query: DROP DATABASE alter2_db +POSTHOOK: type: DROPDATABASE diff --git ql/src/test/results/clientpositive/alter3.q.out ql/src/test/results/clientpositive/alter3.q.out index 2a1f4d5..fde28c2 100644 --- ql/src/test/results/clientpositive/alter3.q.out +++ ql/src/test/results/clientpositive/alter3.q.out @@ -30,11 +30,11 @@ POSTHOOK: Lineage: alter3 PARTITION(pcol1=test_part,pcol2=test_part).col1 SIMPLE PREHOOK: query: select * from alter3 where pcol1='test_part' and pcol2='test_part' PREHOOK: type: QUERY PREHOOK: Input: default@alter3@pcol1=test_part/pcol2=test_part -PREHOOK: Output: file:/tmp/jssarma/hive_2010-07-21_11-08-15_543_4992541295505544702/10000 +PREHOOK: Output: file:/var/folders/b7/b7UUwNZdF1KKHtM+5la6f++++TI/-Tmp-/carl/hive_2010-08-15_21-09-20_433_7510982091928222176/-mr-10000 POSTHOOK: query: select * from alter3 where pcol1='test_part' and pcol2='test_part' POSTHOOK: type: QUERY POSTHOOK: Input: default@alter3@pcol1=test_part/pcol2=test_part -POSTHOOK: Output: file:/tmp/jssarma/hive_2010-07-21_11-08-15_543_4992541295505544702/10000 +POSTHOOK: Output: file:/var/folders/b7/b7UUwNZdF1KKHtM+5la6f++++TI/-Tmp-/carl/hive_2010-08-15_21-09-20_433_7510982091928222176/-mr-10000 POSTHOOK: Lineage: alter3 PARTITION(pcol1=test_part,pcol2=test_part).col1 SIMPLE [(alter3_src)alter3_src.FieldSchema(name:col1, type:string, comment:null), ] 1 test_part test_part 2 test_part test_part @@ -58,7 +58,7 @@ col1 string pcol1 string pcol2 string -Detailed Table Information Table(tableName:alter3_renamed, dbName:default, owner:jssarma, createTime:1279735692, lastAccessTime:0, retention:0, sd:StorageDescriptor(cols:[FieldSchema(name:col1, type:string, comment:null)], location:file:/mnt/vol/devrs004.snc1/jssarma/projects/hive_trunk/build/ql/test/data/warehouse/alter3_renamed, inputFormat:org.apache.hadoop.mapred.SequenceFileInputFormat, outputFormat:org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat, compressed:false, numBuckets:-1, serdeInfo:SerDeInfo(name:null, serializationLib:org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe, parameters:{serialization.format=1}), bucketCols:[], sortCols:[], parameters:{}), partitionKeys:[FieldSchema(name:pcol1, type:string, comment:null), FieldSchema(name:pcol2, type:string, comment:null)], parameters:{last_modified_by=jssarma, last_modified_time=1279735695, transient_lastDdlTime=1279735695}, viewOriginalText:null, viewExpandedText:null, tableType:MANAGED_TABLE) +Detailed Table Information Table(tableName:alter3_renamed, dbName:default, owner:carl, createTime:1281931755, lastAccessTime:0, retention:0, sd:StorageDescriptor(cols:[FieldSchema(name:col1, type:string, comment:null)], location:pfile:/Users/carl/Projects/hive/build/ql/test/data/warehouse/alter3_renamed, inputFormat:org.apache.hadoop.mapred.SequenceFileInputFormat, outputFormat:org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat, compressed:false, numBuckets:-1, serdeInfo:SerDeInfo(name:null, serializationLib:org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe, parameters:{serialization.format=1}), bucketCols:[], sortCols:[], parameters:{}), partitionKeys:[FieldSchema(name:pcol1, type:string, comment:null), FieldSchema(name:pcol2, type:string, comment:null)], parameters:{last_modified_by=carl, last_modified_time=1281931760, transient_lastDdlTime=1281931760}, viewOriginalText:null, viewExpandedText:null, tableType:MANAGED_TABLE) PREHOOK: query: describe extended alter3_renamed partition (pCol1='test_part', pcol2='test_part') PREHOOK: type: DESCTABLE POSTHOOK: query: describe extended alter3_renamed partition (pCol1='test_part', pcol2='test_part') @@ -68,15 +68,15 @@ col1 string pcol1 string pcol2 string -Detailed Partition Information Partition(values:[test_part, test_part], dbName:default, tableName:alter3_renamed, createTime:1279735695, lastAccessTime:0, sd:StorageDescriptor(cols:[FieldSchema(name:col1, type:string, comment:null)], location:file:/mnt/vol/devrs004.snc1/jssarma/projects/hive_trunk/build/ql/test/data/warehouse/alter3_renamed/pcol1=test_part/pcol2=test_part, inputFormat:org.apache.hadoop.mapred.SequenceFileInputFormat, outputFormat:org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat, compressed:false, numBuckets:-1, serdeInfo:SerDeInfo(name:null, serializationLib:org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe, parameters:{serialization.format=1}), bucketCols:[], sortCols:[], parameters:{}), parameters:{transient_lastDdlTime=1279735695}) +Detailed Partition Information Partition(values:[test_part, test_part], dbName:default, tableName:alter3_renamed, createTime:1281931760, lastAccessTime:0, sd:StorageDescriptor(cols:[FieldSchema(name:col1, type:string, comment:null)], location:pfile:/Users/carl/Projects/hive/build/ql/test/data/warehouse/alter3_renamed/pcol1=test_part/pcol2=test_part, inputFormat:org.apache.hadoop.mapred.SequenceFileInputFormat, outputFormat:org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat, compressed:false, numBuckets:-1, serdeInfo:SerDeInfo(name:null, serializationLib:org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe, parameters:{serialization.format=1}), bucketCols:[], sortCols:[], parameters:{}), parameters:{transient_lastDdlTime=1281931760}) PREHOOK: query: select * from alter3_renamed where pcol1='test_part' and pcol2='test_part' PREHOOK: type: QUERY PREHOOK: Input: default@alter3_renamed@pcol1=test_part/pcol2=test_part -PREHOOK: Output: file:/tmp/jssarma/hive_2010-07-21_11-08-15_958_1754525751517388753/10000 +PREHOOK: Output: file:/var/folders/b7/b7UUwNZdF1KKHtM+5la6f++++TI/-Tmp-/carl/hive_2010-08-15_21-09-21_104_5348341576784234708/-mr-10000 POSTHOOK: query: select * from alter3_renamed where pcol1='test_part' and pcol2='test_part' POSTHOOK: type: QUERY POSTHOOK: Input: default@alter3_renamed@pcol1=test_part/pcol2=test_part -POSTHOOK: Output: file:/tmp/jssarma/hive_2010-07-21_11-08-15_958_1754525751517388753/10000 +POSTHOOK: Output: file:/var/folders/b7/b7UUwNZdF1KKHtM+5la6f++++TI/-Tmp-/carl/hive_2010-08-15_21-09-21_104_5348341576784234708/-mr-10000 POSTHOOK: Lineage: alter3 PARTITION(pcol1=test_part,pcol2=test_part).col1 SIMPLE [(alter3_src)alter3_src.FieldSchema(name:col1, type:string, comment:null), ] 1 test_part test_part 2 test_part test_part @@ -116,4 +116,208 @@ col1 string pcol1 string pcol2 string -Detailed Table Information Table(tableName:alter3_like_renamed, dbName:default, owner:jssarma, createTime:1279735692, lastAccessTime:0, retention:0, sd:StorageDescriptor(cols:[FieldSchema(name:col1, type:string, comment:null)], location:file:/mnt/vol/devrs004.snc1/jssarma/projects/hive_trunk/build/ql/test/data/warehouse/alter3_like_renamed, inputFormat:org.apache.hadoop.mapred.SequenceFileInputFormat, outputFormat:org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat, compressed:false, numBuckets:-1, serdeInfo:SerDeInfo(name:null, serializationLib:org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe, parameters:{serialization.format=1}), bucketCols:[], sortCols:[], parameters:{}), partitionKeys:[FieldSchema(name:pcol1, type:string, comment:null), FieldSchema(name:pcol2, type:string, comment:null)], parameters:{EXTERNAL=FALSE, last_modified_by=jssarma, last_modified_time=1279735698, transient_lastDdlTime=1279735698}, viewOriginalText:null, viewExpandedText:null, tableType:MANAGED_TABLE) +Detailed Table Information Table(tableName:alter3_like_renamed, dbName:default, owner:carl, createTime:1281931755, lastAccessTime:0, retention:0, sd:StorageDescriptor(cols:[FieldSchema(name:col1, type:string, comment:null)], location:pfile:/Users/carl/Projects/hive/build/ql/test/data/warehouse/alter3_like_renamed, inputFormat:org.apache.hadoop.mapred.SequenceFileInputFormat, outputFormat:org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat, compressed:false, numBuckets:-1, serdeInfo:SerDeInfo(name:null, serializationLib:org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe, parameters:{serialization.format=1}), bucketCols:[], sortCols:[], parameters:{}), partitionKeys:[FieldSchema(name:pcol1, type:string, comment:null), FieldSchema(name:pcol2, type:string, comment:null)], parameters:{EXTERNAL=FALSE, last_modified_by=carl, last_modified_time=1281931765, transient_lastDdlTime=1281931765}, viewOriginalText:null, viewExpandedText:null, tableType:MANAGED_TABLE) +PREHOOK: query: -- Cleanup +DROP TABLE alter3_src +PREHOOK: type: DROPTABLE +POSTHOOK: query: -- Cleanup +DROP TABLE alter3_src +POSTHOOK: type: DROPTABLE +POSTHOOK: Output: default@alter3_src +POSTHOOK: Lineage: alter3 PARTITION(pcol1=test_part,pcol2=test_part).col1 SIMPLE [(alter3_src)alter3_src.FieldSchema(name:col1, type:string, comment:null), ] +POSTHOOK: Lineage: alter3_like PARTITION(pcol1=test_part,pcol2=test_part).col1 SIMPLE [(alter3_src)alter3_src.FieldSchema(name:col1, type:string, comment:null), ] +PREHOOK: query: DROP TABLE alter3_renamed +PREHOOK: type: DROPTABLE +POSTHOOK: query: DROP TABLE alter3_renamed +POSTHOOK: type: DROPTABLE +POSTHOOK: Output: default@alter3_renamed +POSTHOOK: Lineage: alter3 PARTITION(pcol1=test_part,pcol2=test_part).col1 SIMPLE [(alter3_src)alter3_src.FieldSchema(name:col1, type:string, comment:null), ] +POSTHOOK: Lineage: alter3_like PARTITION(pcol1=test_part,pcol2=test_part).col1 SIMPLE [(alter3_src)alter3_src.FieldSchema(name:col1, type:string, comment:null), ] +PREHOOK: query: DROP TABLE alter3_like_renamed +PREHOOK: type: DROPTABLE +POSTHOOK: query: DROP TABLE alter3_like_renamed +POSTHOOK: type: DROPTABLE +POSTHOOK: Output: default@alter3_like_renamed +POSTHOOK: Lineage: alter3 PARTITION(pcol1=test_part,pcol2=test_part).col1 SIMPLE [(alter3_src)alter3_src.FieldSchema(name:col1, type:string, comment:null), ] +POSTHOOK: Lineage: alter3_like PARTITION(pcol1=test_part,pcol2=test_part).col1 SIMPLE [(alter3_src)alter3_src.FieldSchema(name:col1, type:string, comment:null), ] +PREHOOK: query: SHOW TABLES +PREHOOK: type: SHOWTABLES +POSTHOOK: query: SHOW TABLES +POSTHOOK: type: SHOWTABLES +POSTHOOK: Lineage: alter3 PARTITION(pcol1=test_part,pcol2=test_part).col1 SIMPLE [(alter3_src)alter3_src.FieldSchema(name:col1, type:string, comment:null), ] +POSTHOOK: Lineage: alter3_like PARTITION(pcol1=test_part,pcol2=test_part).col1 SIMPLE [(alter3_src)alter3_src.FieldSchema(name:col1, type:string, comment:null), ] +src +src1 +src_json +src_sequencefile +src_thrift +srcbucket +srcbucket2 +srcpart +PREHOOK: query: -- With non-default Database + +CREATE DATABASE alter3_db +PREHOOK: type: CREATEDATABASE +POSTHOOK: query: -- With non-default Database + +CREATE DATABASE alter3_db +POSTHOOK: type: CREATEDATABASE +POSTHOOK: Lineage: alter3 PARTITION(pcol1=test_part,pcol2=test_part).col1 SIMPLE [(alter3_src)alter3_src.FieldSchema(name:col1, type:string, comment:null), ] +POSTHOOK: Lineage: alter3_like PARTITION(pcol1=test_part,pcol2=test_part).col1 SIMPLE [(alter3_src)alter3_src.FieldSchema(name:col1, type:string, comment:null), ] +PREHOOK: query: USE alter3_db +PREHOOK: type: SWITCHDATABASE +POSTHOOK: query: USE alter3_db +POSTHOOK: type: SWITCHDATABASE +POSTHOOK: Lineage: alter3 PARTITION(pcol1=test_part,pcol2=test_part).col1 SIMPLE [(alter3_src)alter3_src.FieldSchema(name:col1, type:string, comment:null), ] +POSTHOOK: Lineage: alter3_like PARTITION(pcol1=test_part,pcol2=test_part).col1 SIMPLE [(alter3_src)alter3_src.FieldSchema(name:col1, type:string, comment:null), ] +PREHOOK: query: SHOW TABLES +PREHOOK: type: SHOWTABLES +POSTHOOK: query: SHOW TABLES +POSTHOOK: type: SHOWTABLES +POSTHOOK: Lineage: alter3 PARTITION(pcol1=test_part,pcol2=test_part).col1 SIMPLE [(alter3_src)alter3_src.FieldSchema(name:col1, type:string, comment:null), ] +POSTHOOK: Lineage: alter3_like PARTITION(pcol1=test_part,pcol2=test_part).col1 SIMPLE [(alter3_src)alter3_src.FieldSchema(name:col1, type:string, comment:null), ] +PREHOOK: query: CREATE TABLE alter3_src (col1 STRING) STORED AS TEXTFILE +PREHOOK: type: CREATETABLE +POSTHOOK: query: CREATE TABLE alter3_src (col1 STRING) STORED AS TEXTFILE +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: alter3_db@alter3_src +POSTHOOK: Lineage: alter3 PARTITION(pcol1=test_part,pcol2=test_part).col1 SIMPLE [(alter3_src)alter3_src.FieldSchema(name:col1, type:string, comment:null), ] +POSTHOOK: Lineage: alter3_like PARTITION(pcol1=test_part,pcol2=test_part).col1 SIMPLE [(alter3_src)alter3_src.FieldSchema(name:col1, type:string, comment:null), ] +PREHOOK: query: LOAD DATA LOCAL INPATH '../data/files/test.dat' OVERWRITE INTO TABLE alter3_src +PREHOOK: type: LOAD +POSTHOOK: query: LOAD DATA LOCAL INPATH '../data/files/test.dat' OVERWRITE INTO TABLE alter3_src +POSTHOOK: type: LOAD +POSTHOOK: Output: alter3_db@alter3_src +POSTHOOK: Lineage: alter3 PARTITION(pcol1=test_part,pcol2=test_part).col1 SIMPLE [(alter3_src)alter3_src.FieldSchema(name:col1, type:string, comment:null), ] +POSTHOOK: Lineage: alter3_like PARTITION(pcol1=test_part,pcol2=test_part).col1 SIMPLE [(alter3_src)alter3_src.FieldSchema(name:col1, type:string, comment:null), ] +PREHOOK: query: CREATE TABLE alter3 (col1 STRING) PARTITIONED BY (pcol1 STRING, pcol2 STRING) STORED AS SEQUENCEFILE +PREHOOK: type: CREATETABLE +POSTHOOK: query: CREATE TABLE alter3 (col1 STRING) PARTITIONED BY (pcol1 STRING, pcol2 STRING) STORED AS SEQUENCEFILE +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: alter3_db@alter3 +POSTHOOK: Lineage: alter3 PARTITION(pcol1=test_part,pcol2=test_part).col1 SIMPLE [(alter3_src)alter3_src.FieldSchema(name:col1, type:string, comment:null), ] +POSTHOOK: Lineage: alter3_like PARTITION(pcol1=test_part,pcol2=test_part).col1 SIMPLE [(alter3_src)alter3_src.FieldSchema(name:col1, type:string, comment:null), ] +PREHOOK: query: CREATE TABLE alter3_like LIKE alter3 +PREHOOK: type: CREATETABLE +POSTHOOK: query: CREATE TABLE alter3_like LIKE alter3 +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: alter3_db@alter3_like +POSTHOOK: Lineage: alter3 PARTITION(pcol1=test_part,pcol2=test_part).col1 SIMPLE [(alter3_src)alter3_src.FieldSchema(name:col1, type:string, comment:null), ] +POSTHOOK: Lineage: alter3_like PARTITION(pcol1=test_part,pcol2=test_part).col1 SIMPLE [(alter3_src)alter3_src.FieldSchema(name:col1, type:string, comment:null), ] +PREHOOK: query: INSERT OVERWRITE TABLE alter3 PARTITION (pCol1='test_part', pcol2='test_part') SELECT col1 FROM alter3_src +PREHOOK: type: QUERY +PREHOOK: Input: alter3_db@alter3_src +PREHOOK: Output: alter3_db@alter3@pcol1=test_part/pcol2=test_part +POSTHOOK: query: INSERT OVERWRITE TABLE alter3 PARTITION (pCol1='test_part', pcol2='test_part') SELECT col1 FROM alter3_src +POSTHOOK: type: QUERY +POSTHOOK: Input: alter3_db@alter3_src +POSTHOOK: Output: alter3_db@alter3@pcol1=test_part/pcol2=test_part +POSTHOOK: Lineage: alter3 PARTITION(pcol1=test_part,pcol2=test_part).col1 SIMPLE [(alter3_src)alter3_src.FieldSchema(name:col1, type:string, comment:null), ] +POSTHOOK: Lineage: alter3 PARTITION(pcol1=test_part,pcol2=test_part).col1 SIMPLE [(alter3_src)alter3_src.FieldSchema(name:col1, type:string, comment:null), ] +POSTHOOK: Lineage: alter3_like PARTITION(pcol1=test_part,pcol2=test_part).col1 SIMPLE [(alter3_src)alter3_src.FieldSchema(name:col1, type:string, comment:null), ] +PREHOOK: query: SELECT * FROM alter3 WHERE pcol1='test_part' AND pcol2='test_part' +PREHOOK: type: QUERY +PREHOOK: Input: alter3_db@alter3@pcol1=test_part/pcol2=test_part +PREHOOK: Output: file:/var/folders/b7/b7UUwNZdF1KKHtM+5la6f++++TI/-Tmp-/carl/hive_2010-08-15_21-09-30_790_319631436431580169/-mr-10000 +POSTHOOK: query: SELECT * FROM alter3 WHERE pcol1='test_part' AND pcol2='test_part' +POSTHOOK: type: QUERY +POSTHOOK: Input: alter3_db@alter3@pcol1=test_part/pcol2=test_part +POSTHOOK: Output: file:/var/folders/b7/b7UUwNZdF1KKHtM+5la6f++++TI/-Tmp-/carl/hive_2010-08-15_21-09-30_790_319631436431580169/-mr-10000 +POSTHOOK: Lineage: alter3 PARTITION(pcol1=test_part,pcol2=test_part).col1 SIMPLE [(alter3_src)alter3_src.FieldSchema(name:col1, type:string, comment:null), ] +POSTHOOK: Lineage: alter3 PARTITION(pcol1=test_part,pcol2=test_part).col1 SIMPLE [(alter3_src)alter3_src.FieldSchema(name:col1, type:string, comment:null), ] +POSTHOOK: Lineage: alter3_like PARTITION(pcol1=test_part,pcol2=test_part).col1 SIMPLE [(alter3_src)alter3_src.FieldSchema(name:col1, type:string, comment:null), ] +1 test_part test_part +2 test_part test_part +3 test_part test_part +4 test_part test_part +5 test_part test_part +6 test_part test_part +PREHOOK: query: ALTER TABLE alter3 RENAME TO alter3_renamed +PREHOOK: type: ALTERTABLE_RENAME +POSTHOOK: query: ALTER TABLE alter3 RENAME TO alter3_renamed +POSTHOOK: type: ALTERTABLE_RENAME +POSTHOOK: Input: alter3_db@alter3 +POSTHOOK: Output: alter3_db@alter3_renamed +POSTHOOK: Lineage: alter3 PARTITION(pcol1=test_part,pcol2=test_part).col1 SIMPLE [(alter3_src)alter3_src.FieldSchema(name:col1, type:string, comment:null), ] +POSTHOOK: Lineage: alter3 PARTITION(pcol1=test_part,pcol2=test_part).col1 SIMPLE [(alter3_src)alter3_src.FieldSchema(name:col1, type:string, comment:null), ] +POSTHOOK: Lineage: alter3_like PARTITION(pcol1=test_part,pcol2=test_part).col1 SIMPLE [(alter3_src)alter3_src.FieldSchema(name:col1, type:string, comment:null), ] +PREHOOK: query: DESCRIBE EXTENDED alter3_renamed +PREHOOK: type: DESCTABLE +POSTHOOK: query: DESCRIBE EXTENDED alter3_renamed +POSTHOOK: type: DESCTABLE +POSTHOOK: Lineage: alter3 PARTITION(pcol1=test_part,pcol2=test_part).col1 SIMPLE [(alter3_src)alter3_src.FieldSchema(name:col1, type:string, comment:null), ] +POSTHOOK: Lineage: alter3 PARTITION(pcol1=test_part,pcol2=test_part).col1 SIMPLE [(alter3_src)alter3_src.FieldSchema(name:col1, type:string, comment:null), ] +POSTHOOK: Lineage: alter3_like PARTITION(pcol1=test_part,pcol2=test_part).col1 SIMPLE [(alter3_src)alter3_src.FieldSchema(name:col1, type:string, comment:null), ] +col1 string +pcol1 string +pcol2 string + +Detailed Table Information Table(tableName:alter3_renamed, dbName:alter3_db, owner:carl, createTime:1281931766, lastAccessTime:0, retention:0, sd:StorageDescriptor(cols:[FieldSchema(name:col1, type:string, comment:null)], location:pfile:/Users/carl/Projects/hive/build/ql/test/data/warehouse/alter3_db.db/alter3_renamed, inputFormat:org.apache.hadoop.mapred.SequenceFileInputFormat, outputFormat:org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat, compressed:false, numBuckets:-1, serdeInfo:SerDeInfo(name:null, serializationLib:org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe, parameters:{serialization.format=1}), bucketCols:[], sortCols:[], parameters:{}), partitionKeys:[FieldSchema(name:pcol1, type:string, comment:null), FieldSchema(name:pcol2, type:string, comment:null)], parameters:{last_modified_by=carl, last_modified_time=1281931771, transient_lastDdlTime=1281931771}, viewOriginalText:null, viewExpandedText:null, tableType:MANAGED_TABLE) +PREHOOK: query: DESCRIBE EXTENDED alter3_renamed PARTITION (pCol1='test_part', pcol2='test_part') +PREHOOK: type: DESCTABLE +POSTHOOK: query: DESCRIBE EXTENDED alter3_renamed PARTITION (pCol1='test_part', pcol2='test_part') +POSTHOOK: type: DESCTABLE +POSTHOOK: Lineage: alter3 PARTITION(pcol1=test_part,pcol2=test_part).col1 SIMPLE [(alter3_src)alter3_src.FieldSchema(name:col1, type:string, comment:null), ] +POSTHOOK: Lineage: alter3 PARTITION(pcol1=test_part,pcol2=test_part).col1 SIMPLE [(alter3_src)alter3_src.FieldSchema(name:col1, type:string, comment:null), ] +POSTHOOK: Lineage: alter3_like PARTITION(pcol1=test_part,pcol2=test_part).col1 SIMPLE [(alter3_src)alter3_src.FieldSchema(name:col1, type:string, comment:null), ] +col1 string +pcol1 string +pcol2 string + +Detailed Partition Information Partition(values:[test_part, test_part], dbName:alter3_db, tableName:alter3_renamed, createTime:1281931770, lastAccessTime:0, sd:StorageDescriptor(cols:[FieldSchema(name:col1, type:string, comment:null)], location:pfile:/Users/carl/Projects/hive/build/ql/test/data/warehouse/alter3_db.db/alter3_renamed/pcol1=test_part/pcol2=test_part, inputFormat:org.apache.hadoop.mapred.SequenceFileInputFormat, outputFormat:org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat, compressed:false, numBuckets:-1, serdeInfo:SerDeInfo(name:null, serializationLib:org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe, parameters:{serialization.format=1}), bucketCols:[], sortCols:[], parameters:{}), parameters:{transient_lastDdlTime=1281931770}) +PREHOOK: query: SELECT * FROM alter3_renamed WHERE pcol1='test_part' AND pcol2='test_part' +PREHOOK: type: QUERY +PREHOOK: Input: alter3_db@alter3_renamed@pcol1=test_part/pcol2=test_part +PREHOOK: Output: file:/var/folders/b7/b7UUwNZdF1KKHtM+5la6f++++TI/-Tmp-/carl/hive_2010-08-15_21-09-31_306_3102824579181674666/-mr-10000 +POSTHOOK: query: SELECT * FROM alter3_renamed WHERE pcol1='test_part' AND pcol2='test_part' +POSTHOOK: type: QUERY +POSTHOOK: Input: alter3_db@alter3_renamed@pcol1=test_part/pcol2=test_part +POSTHOOK: Output: file:/var/folders/b7/b7UUwNZdF1KKHtM+5la6f++++TI/-Tmp-/carl/hive_2010-08-15_21-09-31_306_3102824579181674666/-mr-10000 +POSTHOOK: Lineage: alter3 PARTITION(pcol1=test_part,pcol2=test_part).col1 SIMPLE [(alter3_src)alter3_src.FieldSchema(name:col1, type:string, comment:null), ] +POSTHOOK: Lineage: alter3 PARTITION(pcol1=test_part,pcol2=test_part).col1 SIMPLE [(alter3_src)alter3_src.FieldSchema(name:col1, type:string, comment:null), ] +POSTHOOK: Lineage: alter3_like PARTITION(pcol1=test_part,pcol2=test_part).col1 SIMPLE [(alter3_src)alter3_src.FieldSchema(name:col1, type:string, comment:null), ] +1 test_part test_part +2 test_part test_part +3 test_part test_part +4 test_part test_part +5 test_part test_part +6 test_part test_part +PREHOOK: query: INSERT OVERWRITE TABLE alter3_like +PARTITION (pCol1='test_part', pcol2='test_part') +SELECT col1 FROM alter3_src +PREHOOK: type: QUERY +PREHOOK: Input: alter3_db@alter3_src +PREHOOK: Output: alter3_db@alter3_like@pcol1=test_part/pcol2=test_part +POSTHOOK: query: INSERT OVERWRITE TABLE alter3_like +PARTITION (pCol1='test_part', pcol2='test_part') +SELECT col1 FROM alter3_src +POSTHOOK: type: QUERY +POSTHOOK: Input: alter3_db@alter3_src +POSTHOOK: Output: alter3_db@alter3_like@pcol1=test_part/pcol2=test_part +POSTHOOK: Lineage: alter3 PARTITION(pcol1=test_part,pcol2=test_part).col1 SIMPLE [(alter3_src)alter3_src.FieldSchema(name:col1, type:string, comment:null), ] +POSTHOOK: Lineage: alter3 PARTITION(pcol1=test_part,pcol2=test_part).col1 SIMPLE [(alter3_src)alter3_src.FieldSchema(name:col1, type:string, comment:null), ] +POSTHOOK: Lineage: alter3_like PARTITION(pcol1=test_part,pcol2=test_part).col1 SIMPLE [(alter3_src)alter3_src.FieldSchema(name:col1, type:string, comment:null), ] +POSTHOOK: Lineage: alter3_like PARTITION(pcol1=test_part,pcol2=test_part).col1 SIMPLE [(alter3_src)alter3_src.FieldSchema(name:col1, type:string, comment:null), ] +PREHOOK: query: ALTER TABLE alter3_like RENAME TO alter3_like_renamed +PREHOOK: type: ALTERTABLE_RENAME +POSTHOOK: query: ALTER TABLE alter3_like RENAME TO alter3_like_renamed +POSTHOOK: type: ALTERTABLE_RENAME +POSTHOOK: Input: alter3_db@alter3_like +POSTHOOK: Output: alter3_db@alter3_like_renamed +POSTHOOK: Lineage: alter3 PARTITION(pcol1=test_part,pcol2=test_part).col1 SIMPLE [(alter3_src)alter3_src.FieldSchema(name:col1, type:string, comment:null), ] +POSTHOOK: Lineage: alter3 PARTITION(pcol1=test_part,pcol2=test_part).col1 SIMPLE [(alter3_src)alter3_src.FieldSchema(name:col1, type:string, comment:null), ] +POSTHOOK: Lineage: alter3_like PARTITION(pcol1=test_part,pcol2=test_part).col1 SIMPLE [(alter3_src)alter3_src.FieldSchema(name:col1, type:string, comment:null), ] +POSTHOOK: Lineage: alter3_like PARTITION(pcol1=test_part,pcol2=test_part).col1 SIMPLE [(alter3_src)alter3_src.FieldSchema(name:col1, type:string, comment:null), ] +PREHOOK: query: DESCRIBE EXTENDED alter3_like_renamed +PREHOOK: type: DESCTABLE +POSTHOOK: query: DESCRIBE EXTENDED alter3_like_renamed +POSTHOOK: type: DESCTABLE +POSTHOOK: Lineage: alter3 PARTITION(pcol1=test_part,pcol2=test_part).col1 SIMPLE [(alter3_src)alter3_src.FieldSchema(name:col1, type:string, comment:null), ] +POSTHOOK: Lineage: alter3 PARTITION(pcol1=test_part,pcol2=test_part).col1 SIMPLE [(alter3_src)alter3_src.FieldSchema(name:col1, type:string, comment:null), ] +POSTHOOK: Lineage: alter3_like PARTITION(pcol1=test_part,pcol2=test_part).col1 SIMPLE [(alter3_src)alter3_src.FieldSchema(name:col1, type:string, comment:null), ] +POSTHOOK: Lineage: alter3_like PARTITION(pcol1=test_part,pcol2=test_part).col1 SIMPLE [(alter3_src)alter3_src.FieldSchema(name:col1, type:string, comment:null), ] +col1 string +pcol1 string +pcol2 string + +Detailed Table Information Table(tableName:alter3_like_renamed, dbName:alter3_db, owner:carl, createTime:1281931766, lastAccessTime:0, retention:0, sd:StorageDescriptor(cols:[FieldSchema(name:col1, type:string, comment:null)], location:pfile:/Users/carl/Projects/hive/build/ql/test/data/warehouse/alter3_db.db/alter3_like_renamed, inputFormat:org.apache.hadoop.mapred.SequenceFileInputFormat, outputFormat:org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat, compressed:false, numBuckets:-1, serdeInfo:SerDeInfo(name:null, serializationLib:org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe, parameters:{serialization.format=1}), bucketCols:[], sortCols:[], parameters:{}), partitionKeys:[FieldSchema(name:pcol1, type:string, comment:null), FieldSchema(name:pcol2, type:string, comment:null)], parameters:{EXTERNAL=FALSE, last_modified_by=carl, last_modified_time=1281931778, transient_lastDdlTime=1281931778}, viewOriginalText:null, viewExpandedText:null, tableType:MANAGED_TABLE) diff --git ql/src/test/results/clientpositive/alter4.q.out ql/src/test/results/clientpositive/alter4.q.out index da7cd9a..cccb148 100644 --- ql/src/test/results/clientpositive/alter4.q.out +++ ql/src/test/results/clientpositive/alter4.q.out @@ -10,7 +10,7 @@ POSTHOOK: type: DESCTABLE key int value string -Detailed Table Information Table(tableName:set_bucketing_test, dbName:default, owner:jssarma, createTime:1279735699, lastAccessTime:0, retention:0, sd:StorageDescriptor(cols:[FieldSchema(name:key, type:int, comment:null), FieldSchema(name:value, type:string, comment:null)], location:file:/mnt/vol/devrs004.snc1/jssarma/projects/hive_trunk/build/ql/test/data/warehouse/set_bucketing_test, inputFormat:org.apache.hadoop.mapred.TextInputFormat, outputFormat:org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat, compressed:false, numBuckets:10, serdeInfo:SerDeInfo(name:null, serializationLib:org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe, parameters:{serialization.format=1}), bucketCols:[key], sortCols:[], parameters:{}), partitionKeys:[], parameters:{transient_lastDdlTime=1279735699}, viewOriginalText:null, viewExpandedText:null, tableType:MANAGED_TABLE) +Detailed Table Information Table(tableName:set_bucketing_test, dbName:default, owner:carl, createTime:1281930907, lastAccessTime:0, retention:0, sd:StorageDescriptor(cols:[FieldSchema(name:key, type:int, comment:null), FieldSchema(name:value, type:string, comment:null)], location:pfile:/Users/carl/Projects/hive/build/ql/test/data/warehouse/set_bucketing_test, inputFormat:org.apache.hadoop.mapred.TextInputFormat, outputFormat:org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat, compressed:false, numBuckets:10, serdeInfo:SerDeInfo(name:null, serializationLib:org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe, parameters:{serialization.format=1}), bucketCols:[key], sortCols:[], parameters:{}), partitionKeys:[], parameters:{transient_lastDdlTime=1281930907}, viewOriginalText:null, viewExpandedText:null, tableType:MANAGED_TABLE) PREHOOK: query: ALTER TABLE set_bucketing_test NOT CLUSTERED PREHOOK: type: null POSTHOOK: query: ALTER TABLE set_bucketing_test NOT CLUSTERED @@ -24,4 +24,84 @@ POSTHOOK: type: DESCTABLE key int value string -Detailed Table Information Table(tableName:set_bucketing_test, dbName:default, owner:jssarma, createTime:1279735699, lastAccessTime:0, retention:0, sd:StorageDescriptor(cols:[FieldSchema(name:key, type:int, comment:null), FieldSchema(name:value, type:string, comment:null)], location:file:/mnt/vol/devrs004.snc1/jssarma/projects/hive_trunk/build/ql/test/data/warehouse/set_bucketing_test, inputFormat:org.apache.hadoop.mapred.TextInputFormat, outputFormat:org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat, compressed:false, numBuckets:-1, serdeInfo:SerDeInfo(name:null, serializationLib:org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe, parameters:{serialization.format=1}), bucketCols:[], sortCols:[], parameters:{}), partitionKeys:[], parameters:{last_modified_by=jssarma, last_modified_time=1279735699, transient_lastDdlTime=1279735699}, viewOriginalText:null, viewExpandedText:null, tableType:MANAGED_TABLE) +Detailed Table Information Table(tableName:set_bucketing_test, dbName:default, owner:carl, createTime:1281930907, lastAccessTime:0, retention:0, sd:StorageDescriptor(cols:[FieldSchema(name:key, type:int, comment:null), FieldSchema(name:value, type:string, comment:null)], location:pfile:/Users/carl/Projects/hive/build/ql/test/data/warehouse/set_bucketing_test, inputFormat:org.apache.hadoop.mapred.TextInputFormat, outputFormat:org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat, compressed:false, numBuckets:-1, serdeInfo:SerDeInfo(name:null, serializationLib:org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe, parameters:{serialization.format=1}), bucketCols:[], sortCols:[], parameters:{}), partitionKeys:[], parameters:{last_modified_by=carl, last_modified_time=1281930907, transient_lastDdlTime=1281930907}, viewOriginalText:null, viewExpandedText:null, tableType:MANAGED_TABLE) +PREHOOK: query: -- Cleanup +DROP TABLE set_bucketing_test +PREHOOK: type: DROPTABLE +POSTHOOK: query: -- Cleanup +DROP TABLE set_bucketing_test +POSTHOOK: type: DROPTABLE +POSTHOOK: Output: default@set_bucketing_test +PREHOOK: query: SHOW TABLES +PREHOOK: type: SHOWTABLES +POSTHOOK: query: SHOW TABLES +POSTHOOK: type: SHOWTABLES +src +src1 +src_json +src_sequencefile +src_thrift +srcbucket +srcbucket2 +srcpart +PREHOOK: query: -- with non-default Database + +CREATE DATABASE alter4_db +PREHOOK: type: CREATEDATABASE +POSTHOOK: query: -- with non-default Database + +CREATE DATABASE alter4_db +POSTHOOK: type: CREATEDATABASE +PREHOOK: query: USE alter4_db +PREHOOK: type: SWITCHDATABASE +POSTHOOK: query: USE alter4_db +POSTHOOK: type: SWITCHDATABASE +PREHOOK: query: SHOW TABLES +PREHOOK: type: SHOWTABLES +POSTHOOK: query: SHOW TABLES +POSTHOOK: type: SHOWTABLES +PREHOOK: query: CREATE TABLE set_bucketing_test (key INT, value STRING) CLUSTERED BY (key) INTO 10 BUCKETS +PREHOOK: type: CREATETABLE +POSTHOOK: query: CREATE TABLE set_bucketing_test (key INT, value STRING) CLUSTERED BY (key) INTO 10 BUCKETS +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: alter4_db@set_bucketing_test +PREHOOK: query: DESCRIBE EXTENDED set_bucketing_test +PREHOOK: type: DESCTABLE +POSTHOOK: query: DESCRIBE EXTENDED set_bucketing_test +POSTHOOK: type: DESCTABLE +key int +value string + +Detailed Table Information Table(tableName:set_bucketing_test, dbName:alter4_db, owner:carl, createTime:1281930907, lastAccessTime:0, retention:0, sd:StorageDescriptor(cols:[FieldSchema(name:key, type:int, comment:null), FieldSchema(name:value, type:string, comment:null)], location:pfile:/Users/carl/Projects/hive/build/ql/test/data/warehouse/alter4_db.db/set_bucketing_test, inputFormat:org.apache.hadoop.mapred.TextInputFormat, outputFormat:org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat, compressed:false, numBuckets:10, serdeInfo:SerDeInfo(name:null, serializationLib:org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe, parameters:{serialization.format=1}), bucketCols:[key], sortCols:[], parameters:{}), partitionKeys:[], parameters:{transient_lastDdlTime=1281930907}, viewOriginalText:null, viewExpandedText:null, tableType:MANAGED_TABLE) +PREHOOK: query: ALTER TABLE set_bucketing_test NOT CLUSTERED +PREHOOK: type: null +POSTHOOK: query: ALTER TABLE set_bucketing_test NOT CLUSTERED +POSTHOOK: type: null +POSTHOOK: Input: alter4_db@set_bucketing_test +POSTHOOK: Output: alter4_db@set_bucketing_test +PREHOOK: query: DESCRIBE EXTENDED set_bucketing_test +PREHOOK: type: DESCTABLE +POSTHOOK: query: DESCRIBE EXTENDED set_bucketing_test +POSTHOOK: type: DESCTABLE +key int +value string + +Detailed Table Information Table(tableName:set_bucketing_test, dbName:alter4_db, owner:carl, createTime:1281930907, lastAccessTime:0, retention:0, sd:StorageDescriptor(cols:[FieldSchema(name:key, type:int, comment:null), FieldSchema(name:value, type:string, comment:null)], location:pfile:/Users/carl/Projects/hive/build/ql/test/data/warehouse/alter4_db.db/set_bucketing_test, inputFormat:org.apache.hadoop.mapred.TextInputFormat, outputFormat:org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat, compressed:false, numBuckets:-1, serdeInfo:SerDeInfo(name:null, serializationLib:org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe, parameters:{serialization.format=1}), bucketCols:[], sortCols:[], parameters:{}), partitionKeys:[], parameters:{last_modified_by=carl, last_modified_time=1281930908, transient_lastDdlTime=1281930908}, viewOriginalText:null, viewExpandedText:null, tableType:MANAGED_TABLE) +PREHOOK: query: DROP TABLE set_bucketing_test +PREHOOK: type: DROPTABLE +POSTHOOK: query: DROP TABLE set_bucketing_test +POSTHOOK: type: DROPTABLE +POSTHOOK: Output: alter4_db@set_bucketing_test +PREHOOK: query: USE default +PREHOOK: type: SWITCHDATABASE +POSTHOOK: query: USE default +POSTHOOK: type: SWITCHDATABASE +PREHOOK: query: DROP DATABASE alter4_db +PREHOOK: type: DROPDATABASE +POSTHOOK: query: DROP DATABASE alter4_db +POSTHOOK: type: DROPDATABASE +PREHOOK: query: SHOW DATABASES +PREHOOK: type: SHOWDATABASES +POSTHOOK: query: SHOW DATABASES +POSTHOOK: type: SHOWDATABASES +default diff --git ql/src/test/results/clientpositive/database.q.out ql/src/test/results/clientpositive/database.q.out new file mode 100644 index 0000000..ef5c2d3 --- /dev/null +++ ql/src/test/results/clientpositive/database.q.out @@ -0,0 +1,291 @@ +PREHOOK: query: SHOW DATABASES +PREHOOK: type: SHOWDATABASES +POSTHOOK: query: SHOW DATABASES +POSTHOOK: type: SHOWDATABASES +default +PREHOOK: query: -- CREATE with comment +CREATE DATABASE test_db COMMENT 'Hive test database' +PREHOOK: type: CREATEDATABASE +POSTHOOK: query: -- CREATE with comment +CREATE DATABASE test_db COMMENT 'Hive test database' +POSTHOOK: type: CREATEDATABASE +PREHOOK: query: SHOW DATABASES +PREHOOK: type: SHOWDATABASES +POSTHOOK: query: SHOW DATABASES +POSTHOOK: type: SHOWDATABASES +default +test_db +PREHOOK: query: -- CREATE INE already exists +CREATE DATABASE IF NOT EXISTS test_db +PREHOOK: type: CREATEDATABASE +POSTHOOK: query: -- CREATE INE already exists +CREATE DATABASE IF NOT EXISTS test_db +POSTHOOK: type: CREATEDATABASE +PREHOOK: query: SHOW DATABASES +PREHOOK: type: SHOWDATABASES +POSTHOOK: query: SHOW DATABASES +POSTHOOK: type: SHOWDATABASES +default +test_db +PREHOOK: query: -- DROP +DROP DATABASE test_db +PREHOOK: type: DROPDATABASE +POSTHOOK: query: -- DROP +DROP DATABASE test_db +POSTHOOK: type: DROPDATABASE +PREHOOK: query: SHOW DATABASES +PREHOOK: type: SHOWDATABASES +POSTHOOK: query: SHOW DATABASES +POSTHOOK: type: SHOWDATABASES +default +PREHOOK: query: -- CREATE INE doesn't exist +CREATE DATABASE IF NOT EXISTS test_db COMMENT 'Hive test database' +PREHOOK: type: CREATEDATABASE +POSTHOOK: query: -- CREATE INE doesn't exist +CREATE DATABASE IF NOT EXISTS test_db COMMENT 'Hive test database' +POSTHOOK: type: CREATEDATABASE +PREHOOK: query: SHOW DATABASES +PREHOOK: type: SHOWDATABASES +POSTHOOK: query: SHOW DATABASES +POSTHOOK: type: SHOWDATABASES +default +test_db +PREHOOK: query: -- DROP IE exists +DROP DATABASE IF EXISTS test_db +PREHOOK: type: DROPDATABASE +POSTHOOK: query: -- DROP IE exists +DROP DATABASE IF EXISTS test_db +POSTHOOK: type: DROPDATABASE +PREHOOK: query: SHOW DATABASES +PREHOOK: type: SHOWDATABASES +POSTHOOK: query: SHOW DATABASES +POSTHOOK: type: SHOWDATABASES +default +PREHOOK: query: -- DROP IE doesn't exist +DROP DATABASE IF EXISTS test_db +PREHOOK: type: DROPDATABASE +POSTHOOK: query: -- DROP IE doesn't exist +DROP DATABASE IF EXISTS test_db +POSTHOOK: type: DROPDATABASE +PREHOOK: query: -- SHOW +CREATE DATABASE test_db +PREHOOK: type: CREATEDATABASE +POSTHOOK: query: -- SHOW +CREATE DATABASE test_db +POSTHOOK: type: CREATEDATABASE +PREHOOK: query: SHOW DATABASES +PREHOOK: type: SHOWDATABASES +POSTHOOK: query: SHOW DATABASES +POSTHOOK: type: SHOWDATABASES +default +test_db +PREHOOK: query: -- SHOW pattern +SHOW DATABASES LIKE 'test*' +PREHOOK: type: SHOWDATABASES +POSTHOOK: query: -- SHOW pattern +SHOW DATABASES LIKE 'test*' +POSTHOOK: type: SHOWDATABASES +test_db +PREHOOK: query: -- SHOW pattern +SHOW DATABASES LIKE '*ef*' +PREHOOK: type: SHOWDATABASES +POSTHOOK: query: -- SHOW pattern +SHOW DATABASES LIKE '*ef*' +POSTHOOK: type: SHOWDATABASES +default +PREHOOK: query: USE test_db +PREHOOK: type: SWITCHDATABASE +POSTHOOK: query: USE test_db +POSTHOOK: type: SWITCHDATABASE +PREHOOK: query: SHOW DATABASES +PREHOOK: type: SHOWDATABASES +POSTHOOK: query: SHOW DATABASES +POSTHOOK: type: SHOWDATABASES +default +test_db +PREHOOK: query: -- CREATE table in non-default DB +CREATE TABLE test_table (col1 STRING) STORED AS TEXTFILE +PREHOOK: type: CREATETABLE +POSTHOOK: query: -- CREATE table in non-default DB +CREATE TABLE test_table (col1 STRING) STORED AS TEXTFILE +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: test_db@test_table +PREHOOK: query: SHOW TABLES +PREHOOK: type: SHOWTABLES +POSTHOOK: query: SHOW TABLES +POSTHOOK: type: SHOWTABLES +test_table +PREHOOK: query: -- DESCRIBE table in non-default DB +DESCRIBE test_table +PREHOOK: type: DESCTABLE +POSTHOOK: query: -- DESCRIBE table in non-default DB +DESCRIBE test_table +POSTHOOK: type: DESCTABLE +col1 string +PREHOOK: query: -- DESCRIBE EXTENDED in non-default DB +DESCRIBE EXTENDED test_table +PREHOOK: type: DESCTABLE +POSTHOOK: query: -- DESCRIBE EXTENDED in non-default DB +DESCRIBE EXTENDED test_table +POSTHOOK: type: DESCTABLE +col1 string + +Detailed Table Information Table(tableName:test_table, dbName:test_db, owner:carl, createTime:1281929946, lastAccessTime:0, retention:0, sd:StorageDescriptor(cols:[FieldSchema(name:col1, type:string, comment:null)], location:pfile:/Users/carl/Projects/hive/build/ql/test/data/warehouse/test_db.db/test_table, inputFormat:org.apache.hadoop.mapred.TextInputFormat, outputFormat:org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat, compressed:false, numBuckets:-1, serdeInfo:SerDeInfo(name:null, serializationLib:org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe, parameters:{serialization.format=1}), bucketCols:[], sortCols:[], parameters:{}), partitionKeys:[], parameters:{transient_lastDdlTime=1281929946}, viewOriginalText:null, viewExpandedText:null, tableType:MANAGED_TABLE) +PREHOOK: query: -- CREATE LIKE in non-default DB +CREATE TABLE test_table_like LIKE test_table +PREHOOK: type: CREATETABLE +POSTHOOK: query: -- CREATE LIKE in non-default DB +CREATE TABLE test_table_like LIKE test_table +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: test_db@test_table_like +PREHOOK: query: SHOW TABLES +PREHOOK: type: SHOWTABLES +POSTHOOK: query: SHOW TABLES +POSTHOOK: type: SHOWTABLES +test_table +test_table_like +PREHOOK: query: DESCRIBE EXTENDED test_table_like +PREHOOK: type: DESCTABLE +POSTHOOK: query: DESCRIBE EXTENDED test_table_like +POSTHOOK: type: DESCTABLE +col1 string + +Detailed Table Information Table(tableName:test_table_like, dbName:test_db, owner:carl, createTime:1281929947, lastAccessTime:0, retention:0, sd:StorageDescriptor(cols:[FieldSchema(name:col1, type:string, comment:null)], location:pfile:/Users/carl/Projects/hive/build/ql/test/data/warehouse/test_db.db/test_table_like, inputFormat:org.apache.hadoop.mapred.TextInputFormat, outputFormat:org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat, compressed:false, numBuckets:-1, serdeInfo:SerDeInfo(name:null, serializationLib:org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe, parameters:{serialization.format=1}), bucketCols:[], sortCols:[], parameters:{}), partitionKeys:[], parameters:{EXTERNAL=FALSE, transient_lastDdlTime=1281929947}, viewOriginalText:null, viewExpandedText:null, tableType:MANAGED_TABLE) +PREHOOK: query: -- LOAD and SELECT +LOAD DATA LOCAL INPATH '../data/files/test.dat' OVERWRITE INTO TABLE test_table +PREHOOK: type: LOAD +POSTHOOK: query: -- LOAD and SELECT +LOAD DATA LOCAL INPATH '../data/files/test.dat' OVERWRITE INTO TABLE test_table +POSTHOOK: type: LOAD +POSTHOOK: Output: test_db@test_table +PREHOOK: query: SELECT * FROM test_table +PREHOOK: type: QUERY +PREHOOK: Input: test_db@test_table +PREHOOK: Output: file:/var/folders/b7/b7UUwNZdF1KKHtM+5la6f++++TI/-Tmp-/carl/hive_2010-08-15_20-39-07_934_5944156231762187563/-mr-10000 +POSTHOOK: query: SELECT * FROM test_table +POSTHOOK: type: QUERY +POSTHOOK: Input: test_db@test_table +POSTHOOK: Output: file:/var/folders/b7/b7UUwNZdF1KKHtM+5la6f++++TI/-Tmp-/carl/hive_2010-08-15_20-39-07_934_5944156231762187563/-mr-10000 +1 +2 +3 +4 +5 +6 +PREHOOK: query: -- DROP and CREATE w/o LOAD +DROP TABLE test_table +PREHOOK: type: DROPTABLE +POSTHOOK: query: -- DROP and CREATE w/o LOAD +DROP TABLE test_table +POSTHOOK: type: DROPTABLE +POSTHOOK: Output: test_db@test_table +PREHOOK: query: SHOW TABLES +PREHOOK: type: SHOWTABLES +POSTHOOK: query: SHOW TABLES +POSTHOOK: type: SHOWTABLES +test_table_like +PREHOOK: query: CREATE TABLE test_table (col1 STRING) STORED AS TEXTFILE +PREHOOK: type: CREATETABLE +POSTHOOK: query: CREATE TABLE test_table (col1 STRING) STORED AS TEXTFILE +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: test_db@test_table +PREHOOK: query: SHOW TABLES +PREHOOK: type: SHOWTABLES +POSTHOOK: query: SHOW TABLES +POSTHOOK: type: SHOWTABLES +test_table +test_table_like +PREHOOK: query: SELECT * FROM test_table +PREHOOK: type: QUERY +PREHOOK: Input: test_db@test_table +PREHOOK: Output: file:/var/folders/b7/b7UUwNZdF1KKHtM+5la6f++++TI/-Tmp-/carl/hive_2010-08-15_20-39-08_384_7341088480958217486/-mr-10000 +POSTHOOK: query: SELECT * FROM test_table +POSTHOOK: type: QUERY +POSTHOOK: Input: test_db@test_table +POSTHOOK: Output: file:/var/folders/b7/b7UUwNZdF1KKHtM+5la6f++++TI/-Tmp-/carl/hive_2010-08-15_20-39-08_384_7341088480958217486/-mr-10000 +PREHOOK: query: -- CREATE table that exists in DEFAULT +USE test_db +PREHOOK: type: SWITCHDATABASE +POSTHOOK: query: -- CREATE table that exists in DEFAULT +USE test_db +POSTHOOK: type: SWITCHDATABASE +PREHOOK: query: CREATE TABLE src (col1 STRING) STORED AS TEXTFILE +PREHOOK: type: CREATETABLE +POSTHOOK: query: CREATE TABLE src (col1 STRING) STORED AS TEXTFILE +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: test_db@src +PREHOOK: query: SHOW TABLES +PREHOOK: type: SHOWTABLES +POSTHOOK: query: SHOW TABLES +POSTHOOK: type: SHOWTABLES +src +test_table +test_table_like +PREHOOK: query: SELECT * FROM src LIMIT 10 +PREHOOK: type: QUERY +PREHOOK: Input: test_db@src +PREHOOK: Output: file:/var/folders/b7/b7UUwNZdF1KKHtM+5la6f++++TI/-Tmp-/carl/hive_2010-08-15_20-39-08_592_503329405204841203/-mr-10000 +POSTHOOK: query: SELECT * FROM src LIMIT 10 +POSTHOOK: type: QUERY +POSTHOOK: Input: test_db@src +POSTHOOK: Output: file:/var/folders/b7/b7UUwNZdF1KKHtM+5la6f++++TI/-Tmp-/carl/hive_2010-08-15_20-39-08_592_503329405204841203/-mr-10000 +PREHOOK: query: USE default +PREHOOK: type: SWITCHDATABASE +POSTHOOK: query: USE default +POSTHOOK: type: SWITCHDATABASE +PREHOOK: query: SELECT * FROM src LIMIT 10 +PREHOOK: type: QUERY +PREHOOK: Input: default@src +PREHOOK: Output: file:/var/folders/b7/b7UUwNZdF1KKHtM+5la6f++++TI/-Tmp-/carl/hive_2010-08-15_20-39-08_693_6205056472475029067/-mr-10000 +POSTHOOK: query: SELECT * FROM src LIMIT 10 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@src +POSTHOOK: Output: file:/var/folders/b7/b7UUwNZdF1KKHtM+5la6f++++TI/-Tmp-/carl/hive_2010-08-15_20-39-08_693_6205056472475029067/-mr-10000 +238 val_238 +86 val_86 +311 val_311 +27 val_27 +165 val_165 +409 val_409 +255 val_255 +278 val_278 +98 val_98 +484 val_484 +PREHOOK: query: -- DROP DATABASE +USE test_db +PREHOOK: type: SWITCHDATABASE +POSTHOOK: query: -- DROP DATABASE +USE test_db +POSTHOOK: type: SWITCHDATABASE +PREHOOK: query: DROP TABLE src +PREHOOK: type: DROPTABLE +POSTHOOK: query: DROP TABLE src +POSTHOOK: type: DROPTABLE +POSTHOOK: Output: test_db@src +PREHOOK: query: DROP TABLE test_table +PREHOOK: type: DROPTABLE +POSTHOOK: query: DROP TABLE test_table +POSTHOOK: type: DROPTABLE +POSTHOOK: Output: test_db@test_table +PREHOOK: query: DROP TABLE test_table_like +PREHOOK: type: DROPTABLE +POSTHOOK: query: DROP TABLE test_table_like +POSTHOOK: type: DROPTABLE +POSTHOOK: Output: test_db@test_table_like +PREHOOK: query: SHOW TABLES +PREHOOK: type: SHOWTABLES +POSTHOOK: query: SHOW TABLES +POSTHOOK: type: SHOWTABLES +PREHOOK: query: USE default +PREHOOK: type: SWITCHDATABASE +POSTHOOK: query: USE default +POSTHOOK: type: SWITCHDATABASE +PREHOOK: query: DROP DATABASE test_db +PREHOOK: type: DROPDATABASE +POSTHOOK: query: DROP DATABASE test_db +POSTHOOK: type: DROPDATABASE +PREHOOK: query: SHOW DATABASES +PREHOOK: type: SHOWDATABASES +POSTHOOK: query: SHOW DATABASES +POSTHOOK: type: SHOWDATABASES +default diff --git ql/src/test/results/clientpositive/rename_column.q.out ql/src/test/results/clientpositive/rename_column.q.out index 8d5da8d..6fc5cc0 100644 --- ql/src/test/results/clientpositive/rename_column.q.out +++ ql/src/test/results/clientpositive/rename_column.q.out @@ -101,3 +101,142 @@ POSTHOOK: type: DESCTABLE b int a int test comment2 c int +PREHOOK: query: DROP TABLE kv_rename_test +PREHOOK: type: DROPTABLE +POSTHOOK: query: DROP TABLE kv_rename_test +POSTHOOK: type: DROPTABLE +POSTHOOK: Output: default@kv_rename_test +PREHOOK: query: SHOW TABLES +PREHOOK: type: SHOWTABLES +POSTHOOK: query: SHOW TABLES +POSTHOOK: type: SHOWTABLES +src +src1 +src_json +src_sequencefile +src_thrift +srcbucket +srcbucket2 +srcpart +PREHOOK: query: -- Using non-default Database +CREATE DATABASE kv_rename_test_db +PREHOOK: type: CREATEDATABASE +POSTHOOK: query: -- Using non-default Database +CREATE DATABASE kv_rename_test_db +POSTHOOK: type: CREATEDATABASE +PREHOOK: query: USE kv_rename_test_db +PREHOOK: type: SWITCHDATABASE +POSTHOOK: query: USE kv_rename_test_db +POSTHOOK: type: SWITCHDATABASE +PREHOOK: query: CREATE TABLE kv_rename_test(a int, b int, c int) +PREHOOK: type: CREATETABLE +POSTHOOK: query: CREATE TABLE kv_rename_test(a int, b int, c int) +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: kv_rename_test_db@kv_rename_test +PREHOOK: query: DESCRIBE kv_rename_test +PREHOOK: type: DESCTABLE +POSTHOOK: query: DESCRIBE kv_rename_test +POSTHOOK: type: DESCTABLE +a int +b int +c int +PREHOOK: query: ALTER TABLE kv_rename_test CHANGE a a STRING +PREHOOK: type: ALTERTABLE_RENAMECOL +POSTHOOK: query: ALTER TABLE kv_rename_test CHANGE a a STRING +POSTHOOK: type: ALTERTABLE_RENAMECOL +POSTHOOK: Input: kv_rename_test_db@kv_rename_test +POSTHOOK: Output: kv_rename_test_db@kv_rename_test +PREHOOK: query: DESCRIBE kv_rename_test +PREHOOK: type: DESCTABLE +POSTHOOK: query: DESCRIBE kv_rename_test +POSTHOOK: type: DESCTABLE +a string +b int +c int +PREHOOK: query: ALTER TABLE kv_rename_test CHANGE a a1 INT +PREHOOK: type: ALTERTABLE_RENAMECOL +POSTHOOK: query: ALTER TABLE kv_rename_test CHANGE a a1 INT +POSTHOOK: type: ALTERTABLE_RENAMECOL +POSTHOOK: Input: kv_rename_test_db@kv_rename_test +POSTHOOK: Output: kv_rename_test_db@kv_rename_test +PREHOOK: query: DESCRIBE kv_rename_test +PREHOOK: type: DESCTABLE +POSTHOOK: query: DESCRIBE kv_rename_test +POSTHOOK: type: DESCTABLE +a1 int +b int +c int +PREHOOK: query: ALTER TABLE kv_rename_test CHANGE a1 a2 INT FIRST +PREHOOK: type: ALTERTABLE_RENAMECOL +POSTHOOK: query: ALTER TABLE kv_rename_test CHANGE a1 a2 INT FIRST +POSTHOOK: type: ALTERTABLE_RENAMECOL +POSTHOOK: Input: kv_rename_test_db@kv_rename_test +POSTHOOK: Output: kv_rename_test_db@kv_rename_test +PREHOOK: query: DESCRIBE kv_rename_test +PREHOOK: type: DESCTABLE +POSTHOOK: query: DESCRIBE kv_rename_test +POSTHOOK: type: DESCTABLE +a2 int +b int +c int +PREHOOK: query: ALTER TABLE kv_rename_test CHANGE a2 a INT AFTER b +PREHOOK: type: ALTERTABLE_RENAMECOL +POSTHOOK: query: ALTER TABLE kv_rename_test CHANGE a2 a INT AFTER b +POSTHOOK: type: ALTERTABLE_RENAMECOL +POSTHOOK: Input: kv_rename_test_db@kv_rename_test +POSTHOOK: Output: kv_rename_test_db@kv_rename_test +PREHOOK: query: DESCRIBE kv_rename_test +PREHOOK: type: DESCTABLE +POSTHOOK: query: DESCRIBE kv_rename_test +POSTHOOK: type: DESCTABLE +b int +a int +c int +PREHOOK: query: ALTER TABLE kv_rename_test CHANGE a a1 INT COMMENT 'test comment1' +PREHOOK: type: ALTERTABLE_RENAMECOL +POSTHOOK: query: ALTER TABLE kv_rename_test CHANGE a a1 INT COMMENT 'test comment1' +POSTHOOK: type: ALTERTABLE_RENAMECOL +POSTHOOK: Input: kv_rename_test_db@kv_rename_test +POSTHOOK: Output: kv_rename_test_db@kv_rename_test +PREHOOK: query: DESCRIBE kv_rename_test +PREHOOK: type: DESCTABLE +POSTHOOK: query: DESCRIBE kv_rename_test +POSTHOOK: type: DESCTABLE +b int +a1 int test comment1 +c int +PREHOOK: query: ALTER TABLE kv_rename_test CHANGE a1 a2 INT COMMENT 'test comment2' FIRST +PREHOOK: type: ALTERTABLE_RENAMECOL +POSTHOOK: query: ALTER TABLE kv_rename_test CHANGE a1 a2 INT COMMENT 'test comment2' FIRST +POSTHOOK: type: ALTERTABLE_RENAMECOL +POSTHOOK: Input: kv_rename_test_db@kv_rename_test +POSTHOOK: Output: kv_rename_test_db@kv_rename_test +PREHOOK: query: DESCRIBE kv_rename_test +PREHOOK: type: DESCTABLE +POSTHOOK: query: DESCRIBE kv_rename_test +POSTHOOK: type: DESCTABLE +a2 int test comment2 +b int +c int +PREHOOK: query: ALTER TABLE kv_rename_test CHANGE COLUMN a2 a INT AFTER b +PREHOOK: type: ALTERTABLE_RENAMECOL +POSTHOOK: query: ALTER TABLE kv_rename_test CHANGE COLUMN a2 a INT AFTER b +POSTHOOK: type: ALTERTABLE_RENAMECOL +POSTHOOK: Input: kv_rename_test_db@kv_rename_test +POSTHOOK: Output: kv_rename_test_db@kv_rename_test +PREHOOK: query: DESCRIBE kv_rename_test +PREHOOK: type: DESCTABLE +POSTHOOK: query: DESCRIBE kv_rename_test +POSTHOOK: type: DESCTABLE +b int +a int test comment2 +c int +PREHOOK: query: DROP TABLE kv_rename_test +PREHOOK: type: DROPTABLE +POSTHOOK: query: DROP TABLE kv_rename_test +POSTHOOK: type: DROPTABLE +POSTHOOK: Output: kv_rename_test_db@kv_rename_test +PREHOOK: query: SHOW TABLES +PREHOOK: type: SHOWTABLES +POSTHOOK: query: SHOW TABLES +POSTHOOK: type: SHOWTABLES