diff --git metastore/if/hive_metastore.thrift metastore/if/hive_metastore.thrift index 2b43605..cfafcc6 100755 --- metastore/if/hive_metastore.thrift +++ metastore/if/hive_metastore.thrift @@ -256,6 +256,8 @@ service ThriftHiveMetastore extends fb303.FacebookService //index Index add_index(1:Index new_index, 2: Table index_table) throws(1:InvalidObjectException o1, 2:AlreadyExistsException o2, 3:MetaException o3) + void alter_index(1:string dbname, 2:string base_tbl_name, 3:string idx_name, 4:Index new_idx) + throws (1:InvalidOperationException o1, 2:MetaException o2) bool drop_index_by_name(1:string db_name, 2:string tbl_name, 3:string index_name, 4:bool deleteData) throws(1:NoSuchObjectException o1, 2:MetaException o2) Index get_index_by_name(1:string db_name 2:string tbl_name, 3:string index_name) diff --git metastore/src/gen-cpp/ThriftHiveMetastore.cpp metastore/src/gen-cpp/ThriftHiveMetastore.cpp index 566e4e9..1bdfb52 100644 --- metastore/src/gen-cpp/ThriftHiveMetastore.cpp +++ metastore/src/gen-cpp/ThriftHiveMetastore.cpp @@ -7767,6 +7767,226 @@ uint32_t ThriftHiveMetastore_add_index_presult::read(apache::thrift::protocol::T return xfer; } +uint32_t ThriftHiveMetastore_alter_index_args::read(apache::thrift::protocol::TProtocol* iprot) { + + uint32_t xfer = 0; + std::string fname; + apache::thrift::protocol::TType ftype; + int16_t fid; + + xfer += iprot->readStructBegin(fname); + + using apache::thrift::protocol::TProtocolException; + + + while (true) + { + xfer += iprot->readFieldBegin(fname, ftype, fid); + if (ftype == apache::thrift::protocol::T_STOP) { + break; + } + switch (fid) + { + case 1: + if (ftype == apache::thrift::protocol::T_STRING) { + xfer += iprot->readString(this->dbname); + this->__isset.dbname = true; + } else { + xfer += iprot->skip(ftype); + } + break; + case 2: + if (ftype == apache::thrift::protocol::T_STRING) { + xfer += iprot->readString(this->base_tbl_name); + this->__isset.base_tbl_name = true; + } else { + xfer += iprot->skip(ftype); + } + break; + case 3: + if (ftype == apache::thrift::protocol::T_STRING) { + xfer += iprot->readString(this->idx_name); + this->__isset.idx_name = true; + } else { + xfer += iprot->skip(ftype); + } + break; + case 4: + if (ftype == apache::thrift::protocol::T_STRUCT) { + xfer += this->new_idx.read(iprot); + this->__isset.new_idx = true; + } else { + xfer += iprot->skip(ftype); + } + break; + default: + xfer += iprot->skip(ftype); + break; + } + xfer += iprot->readFieldEnd(); + } + + xfer += iprot->readStructEnd(); + + return xfer; +} + +uint32_t ThriftHiveMetastore_alter_index_args::write(apache::thrift::protocol::TProtocol* oprot) const { + uint32_t xfer = 0; + xfer += oprot->writeStructBegin("ThriftHiveMetastore_alter_index_args"); + xfer += oprot->writeFieldBegin("dbname", apache::thrift::protocol::T_STRING, 1); + xfer += oprot->writeString(this->dbname); + xfer += oprot->writeFieldEnd(); + xfer += oprot->writeFieldBegin("base_tbl_name", apache::thrift::protocol::T_STRING, 2); + xfer += oprot->writeString(this->base_tbl_name); + xfer += oprot->writeFieldEnd(); + xfer += oprot->writeFieldBegin("idx_name", apache::thrift::protocol::T_STRING, 3); + xfer += oprot->writeString(this->idx_name); + xfer += oprot->writeFieldEnd(); + xfer += oprot->writeFieldBegin("new_idx", apache::thrift::protocol::T_STRUCT, 4); + xfer += this->new_idx.write(oprot); + xfer += oprot->writeFieldEnd(); + xfer += oprot->writeFieldStop(); + xfer += oprot->writeStructEnd(); + return xfer; +} + +uint32_t ThriftHiveMetastore_alter_index_pargs::write(apache::thrift::protocol::TProtocol* oprot) const { + uint32_t xfer = 0; + xfer += oprot->writeStructBegin("ThriftHiveMetastore_alter_index_pargs"); + xfer += oprot->writeFieldBegin("dbname", apache::thrift::protocol::T_STRING, 1); + xfer += oprot->writeString((*(this->dbname))); + xfer += oprot->writeFieldEnd(); + xfer += oprot->writeFieldBegin("base_tbl_name", apache::thrift::protocol::T_STRING, 2); + xfer += oprot->writeString((*(this->base_tbl_name))); + xfer += oprot->writeFieldEnd(); + xfer += oprot->writeFieldBegin("idx_name", apache::thrift::protocol::T_STRING, 3); + xfer += oprot->writeString((*(this->idx_name))); + xfer += oprot->writeFieldEnd(); + xfer += oprot->writeFieldBegin("new_idx", apache::thrift::protocol::T_STRUCT, 4); + xfer += (*(this->new_idx)).write(oprot); + xfer += oprot->writeFieldEnd(); + xfer += oprot->writeFieldStop(); + xfer += oprot->writeStructEnd(); + return xfer; +} + +uint32_t ThriftHiveMetastore_alter_index_result::read(apache::thrift::protocol::TProtocol* iprot) { + + uint32_t xfer = 0; + std::string fname; + apache::thrift::protocol::TType ftype; + int16_t fid; + + xfer += iprot->readStructBegin(fname); + + using apache::thrift::protocol::TProtocolException; + + + while (true) + { + xfer += iprot->readFieldBegin(fname, ftype, fid); + if (ftype == apache::thrift::protocol::T_STOP) { + break; + } + switch (fid) + { + case 1: + if (ftype == apache::thrift::protocol::T_STRUCT) { + xfer += this->o1.read(iprot); + this->__isset.o1 = true; + } else { + xfer += iprot->skip(ftype); + } + break; + case 2: + if (ftype == apache::thrift::protocol::T_STRUCT) { + xfer += this->o2.read(iprot); + this->__isset.o2 = true; + } else { + xfer += iprot->skip(ftype); + } + break; + default: + xfer += iprot->skip(ftype); + break; + } + xfer += iprot->readFieldEnd(); + } + + xfer += iprot->readStructEnd(); + + return xfer; +} + +uint32_t ThriftHiveMetastore_alter_index_result::write(apache::thrift::protocol::TProtocol* oprot) const { + + uint32_t xfer = 0; + + xfer += oprot->writeStructBegin("ThriftHiveMetastore_alter_index_result"); + + if (this->__isset.o1) { + xfer += oprot->writeFieldBegin("o1", apache::thrift::protocol::T_STRUCT, 1); + xfer += this->o1.write(oprot); + xfer += oprot->writeFieldEnd(); + } else if (this->__isset.o2) { + xfer += oprot->writeFieldBegin("o2", apache::thrift::protocol::T_STRUCT, 2); + xfer += this->o2.write(oprot); + xfer += oprot->writeFieldEnd(); + } + xfer += oprot->writeFieldStop(); + xfer += oprot->writeStructEnd(); + return xfer; +} + +uint32_t ThriftHiveMetastore_alter_index_presult::read(apache::thrift::protocol::TProtocol* iprot) { + + uint32_t xfer = 0; + std::string fname; + apache::thrift::protocol::TType ftype; + int16_t fid; + + xfer += iprot->readStructBegin(fname); + + using apache::thrift::protocol::TProtocolException; + + + while (true) + { + xfer += iprot->readFieldBegin(fname, ftype, fid); + if (ftype == apache::thrift::protocol::T_STOP) { + break; + } + switch (fid) + { + case 1: + if (ftype == apache::thrift::protocol::T_STRUCT) { + xfer += this->o1.read(iprot); + this->__isset.o1 = true; + } else { + xfer += iprot->skip(ftype); + } + break; + case 2: + if (ftype == apache::thrift::protocol::T_STRUCT) { + xfer += this->o2.read(iprot); + this->__isset.o2 = true; + } else { + xfer += iprot->skip(ftype); + } + break; + default: + xfer += iprot->skip(ftype); + break; + } + xfer += iprot->readFieldEnd(); + } + + xfer += iprot->readStructEnd(); + + return xfer; +} + uint32_t ThriftHiveMetastore_drop_index_by_name_args::read(apache::thrift::protocol::TProtocol* iprot) { uint32_t xfer = 0; @@ -10982,6 +11202,70 @@ void ThriftHiveMetastoreClient::recv_add_index(Index& _return) throw apache::thrift::TApplicationException(apache::thrift::TApplicationException::MISSING_RESULT, "add_index failed: unknown result"); } +void ThriftHiveMetastoreClient::alter_index(const std::string& dbname, const std::string& base_tbl_name, const std::string& idx_name, const Index& new_idx) +{ + send_alter_index(dbname, base_tbl_name, idx_name, new_idx); + recv_alter_index(); +} + +void ThriftHiveMetastoreClient::send_alter_index(const std::string& dbname, const std::string& base_tbl_name, const std::string& idx_name, const Index& new_idx) +{ + int32_t cseqid = 0; + oprot_->writeMessageBegin("alter_index", apache::thrift::protocol::T_CALL, cseqid); + + ThriftHiveMetastore_alter_index_pargs args; + args.dbname = &dbname; + args.base_tbl_name = &base_tbl_name; + args.idx_name = &idx_name; + args.new_idx = &new_idx; + args.write(oprot_); + + oprot_->writeMessageEnd(); + oprot_->getTransport()->flush(); + oprot_->getTransport()->writeEnd(); +} + +void ThriftHiveMetastoreClient::recv_alter_index() +{ + + int32_t rseqid = 0; + std::string fname; + apache::thrift::protocol::TMessageType mtype; + + iprot_->readMessageBegin(fname, mtype, rseqid); + if (mtype == apache::thrift::protocol::T_EXCEPTION) { + apache::thrift::TApplicationException x; + x.read(iprot_); + iprot_->readMessageEnd(); + iprot_->getTransport()->readEnd(); + throw x; + } + if (mtype != apache::thrift::protocol::T_REPLY) { + iprot_->skip(apache::thrift::protocol::T_STRUCT); + iprot_->readMessageEnd(); + iprot_->getTransport()->readEnd(); + throw apache::thrift::TApplicationException(apache::thrift::TApplicationException::INVALID_MESSAGE_TYPE); + } + if (fname.compare("alter_index") != 0) { + iprot_->skip(apache::thrift::protocol::T_STRUCT); + iprot_->readMessageEnd(); + iprot_->getTransport()->readEnd(); + throw apache::thrift::TApplicationException(apache::thrift::TApplicationException::WRONG_METHOD_NAME); + } + ThriftHiveMetastore_alter_index_presult result; + result.read(iprot_); + iprot_->readMessageEnd(); + iprot_->getTransport()->readEnd(); + + if (result.__isset.o1) { + throw result.o1; + } + if (result.__isset.o2) { + throw result.o2; + } + return; +} + bool ThriftHiveMetastoreClient::drop_index_by_name(const std::string& db_name, const std::string& tbl_name, const std::string& index_name, const bool deleteData) { send_drop_index_by_name(db_name, tbl_name, index_name, deleteData); @@ -12438,6 +12722,39 @@ void ThriftHiveMetastoreProcessor::process_add_index(int32_t seqid, apache::thri oprot->getTransport()->writeEnd(); } +void ThriftHiveMetastoreProcessor::process_alter_index(int32_t seqid, apache::thrift::protocol::TProtocol* iprot, apache::thrift::protocol::TProtocol* oprot) +{ + ThriftHiveMetastore_alter_index_args args; + args.read(iprot); + iprot->readMessageEnd(); + iprot->getTransport()->readEnd(); + + ThriftHiveMetastore_alter_index_result result; + try { + iface_->alter_index(args.dbname, args.base_tbl_name, args.idx_name, args.new_idx); + } catch (InvalidOperationException &o1) { + result.o1 = o1; + result.__isset.o1 = true; + } catch (MetaException &o2) { + result.o2 = o2; + result.__isset.o2 = true; + } catch (const std::exception& e) { + apache::thrift::TApplicationException x(e.what()); + oprot->writeMessageBegin("alter_index", apache::thrift::protocol::T_EXCEPTION, seqid); + x.write(oprot); + oprot->writeMessageEnd(); + oprot->getTransport()->flush(); + oprot->getTransport()->writeEnd(); + return; + } + + oprot->writeMessageBegin("alter_index", apache::thrift::protocol::T_REPLY, seqid); + result.write(oprot); + oprot->writeMessageEnd(); + oprot->getTransport()->flush(); + oprot->getTransport()->writeEnd(); +} + void ThriftHiveMetastoreProcessor::process_drop_index_by_name(int32_t seqid, apache::thrift::protocol::TProtocol* iprot, apache::thrift::protocol::TProtocol* oprot) { ThriftHiveMetastore_drop_index_by_name_args args; diff --git metastore/src/gen-cpp/ThriftHiveMetastore.h metastore/src/gen-cpp/ThriftHiveMetastore.h index c67481e..d7f3bc3 100644 --- metastore/src/gen-cpp/ThriftHiveMetastore.h +++ metastore/src/gen-cpp/ThriftHiveMetastore.h @@ -49,6 +49,7 @@ class ThriftHiveMetastoreIf : virtual public facebook::fb303::FacebookServiceIf virtual void partition_name_to_vals(std::vector & _return, const std::string& part_name) = 0; virtual void partition_name_to_spec(std::map & _return, const std::string& part_name) = 0; virtual void add_index(Index& _return, const Index& new_index, const Table& index_table) = 0; + virtual void alter_index(const std::string& dbname, const std::string& base_tbl_name, const std::string& idx_name, const Index& new_idx) = 0; virtual bool drop_index_by_name(const std::string& db_name, const std::string& tbl_name, const std::string& index_name, const bool deleteData) = 0; virtual void get_index_by_name(Index& _return, const std::string& db_name, const std::string& tbl_name, const std::string& index_name) = 0; virtual void get_indexes(std::vector & _return, const std::string& db_name, const std::string& tbl_name, const int16_t max_indexes) = 0; @@ -164,6 +165,9 @@ class ThriftHiveMetastoreNull : virtual public ThriftHiveMetastoreIf , virtual p void add_index(Index& /* _return */, const Index& /* new_index */, const Table& /* index_table */) { return; } + void alter_index(const std::string& /* dbname */, const std::string& /* base_tbl_name */, const std::string& /* idx_name */, const Index& /* new_idx */) { + return; + } bool drop_index_by_name(const std::string& /* db_name */, const std::string& /* tbl_name */, const std::string& /* index_name */, const bool /* deleteData */) { bool _return = false; return _return; @@ -3904,6 +3908,120 @@ class ThriftHiveMetastore_add_index_presult { }; +class ThriftHiveMetastore_alter_index_args { + public: + + ThriftHiveMetastore_alter_index_args() : dbname(""), base_tbl_name(""), idx_name("") { + } + + virtual ~ThriftHiveMetastore_alter_index_args() throw() {} + + std::string dbname; + std::string base_tbl_name; + std::string idx_name; + Index new_idx; + + struct __isset { + __isset() : dbname(false), base_tbl_name(false), idx_name(false), new_idx(false) {} + bool dbname; + bool base_tbl_name; + bool idx_name; + bool new_idx; + } __isset; + + bool operator == (const ThriftHiveMetastore_alter_index_args & rhs) const + { + if (!(dbname == rhs.dbname)) + return false; + if (!(base_tbl_name == rhs.base_tbl_name)) + return false; + if (!(idx_name == rhs.idx_name)) + return false; + if (!(new_idx == rhs.new_idx)) + return false; + return true; + } + bool operator != (const ThriftHiveMetastore_alter_index_args &rhs) const { + return !(*this == rhs); + } + + bool operator < (const ThriftHiveMetastore_alter_index_args & ) const; + + uint32_t read(apache::thrift::protocol::TProtocol* iprot); + uint32_t write(apache::thrift::protocol::TProtocol* oprot) const; + +}; + +class ThriftHiveMetastore_alter_index_pargs { + public: + + + virtual ~ThriftHiveMetastore_alter_index_pargs() throw() {} + + const std::string* dbname; + const std::string* base_tbl_name; + const std::string* idx_name; + const Index* new_idx; + + uint32_t write(apache::thrift::protocol::TProtocol* oprot) const; + +}; + +class ThriftHiveMetastore_alter_index_result { + public: + + ThriftHiveMetastore_alter_index_result() { + } + + virtual ~ThriftHiveMetastore_alter_index_result() throw() {} + + InvalidOperationException o1; + MetaException o2; + + struct __isset { + __isset() : o1(false), o2(false) {} + bool o1; + bool o2; + } __isset; + + bool operator == (const ThriftHiveMetastore_alter_index_result & rhs) const + { + if (!(o1 == rhs.o1)) + return false; + if (!(o2 == rhs.o2)) + return false; + return true; + } + bool operator != (const ThriftHiveMetastore_alter_index_result &rhs) const { + return !(*this == rhs); + } + + bool operator < (const ThriftHiveMetastore_alter_index_result & ) const; + + uint32_t read(apache::thrift::protocol::TProtocol* iprot); + uint32_t write(apache::thrift::protocol::TProtocol* oprot) const; + +}; + +class ThriftHiveMetastore_alter_index_presult { + public: + + + virtual ~ThriftHiveMetastore_alter_index_presult() throw() {} + + InvalidOperationException o1; + MetaException o2; + + struct __isset { + __isset() : o1(false), o2(false) {} + bool o1; + bool o2; + } __isset; + + uint32_t read(apache::thrift::protocol::TProtocol* iprot); + +}; + class ThriftHiveMetastore_drop_index_by_name_args { public: @@ -4477,6 +4595,9 @@ class ThriftHiveMetastoreClient : virtual public ThriftHiveMetastoreIf, public f void add_index(Index& _return, const Index& new_index, const Table& index_table); void send_add_index(const Index& new_index, const Table& index_table); void recv_add_index(Index& _return); + void alter_index(const std::string& dbname, const std::string& base_tbl_name, const std::string& idx_name, const Index& new_idx); + void send_alter_index(const std::string& dbname, const std::string& base_tbl_name, const std::string& idx_name, const Index& new_idx); + void recv_alter_index(); bool drop_index_by_name(const std::string& db_name, const std::string& tbl_name, const std::string& index_name, const bool deleteData); void send_drop_index_by_name(const std::string& db_name, const std::string& tbl_name, const std::string& index_name, const bool deleteData); bool recv_drop_index_by_name(); @@ -4531,6 +4652,7 @@ class ThriftHiveMetastoreProcessor : virtual public apache::thrift::TProcessor, void process_partition_name_to_vals(int32_t seqid, apache::thrift::protocol::TProtocol* iprot, apache::thrift::protocol::TProtocol* oprot); void process_partition_name_to_spec(int32_t seqid, apache::thrift::protocol::TProtocol* iprot, apache::thrift::protocol::TProtocol* oprot); void process_add_index(int32_t seqid, apache::thrift::protocol::TProtocol* iprot, apache::thrift::protocol::TProtocol* oprot); + void process_alter_index(int32_t seqid, apache::thrift::protocol::TProtocol* iprot, apache::thrift::protocol::TProtocol* oprot); void process_drop_index_by_name(int32_t seqid, apache::thrift::protocol::TProtocol* iprot, apache::thrift::protocol::TProtocol* oprot); void process_get_index_by_name(int32_t seqid, apache::thrift::protocol::TProtocol* iprot, apache::thrift::protocol::TProtocol* oprot); void process_get_indexes(int32_t seqid, apache::thrift::protocol::TProtocol* iprot, apache::thrift::protocol::TProtocol* oprot); @@ -4573,6 +4695,7 @@ class ThriftHiveMetastoreProcessor : virtual public apache::thrift::TProcessor, processMap_["partition_name_to_vals"] = &ThriftHiveMetastoreProcessor::process_partition_name_to_vals; processMap_["partition_name_to_spec"] = &ThriftHiveMetastoreProcessor::process_partition_name_to_spec; processMap_["add_index"] = &ThriftHiveMetastoreProcessor::process_add_index; + processMap_["alter_index"] = &ThriftHiveMetastoreProcessor::process_alter_index; processMap_["drop_index_by_name"] = &ThriftHiveMetastoreProcessor::process_drop_index_by_name; processMap_["get_index_by_name"] = &ThriftHiveMetastoreProcessor::process_get_index_by_name; processMap_["get_indexes"] = &ThriftHiveMetastoreProcessor::process_get_indexes; @@ -4974,6 +5097,13 @@ class ThriftHiveMetastoreMultiface : virtual public ThriftHiveMetastoreIf, publi } } + void alter_index(const std::string& dbname, const std::string& base_tbl_name, const std::string& idx_name, const Index& new_idx) { + uint32_t sz = ifaces_.size(); + for (uint32_t i = 0; i < sz; ++i) { + ifaces_[i]->alter_index(dbname, base_tbl_name, idx_name, new_idx); + } + } + bool drop_index_by_name(const std::string& db_name, const std::string& tbl_name, const std::string& index_name, const bool deleteData) { uint32_t sz = ifaces_.size(); for (uint32_t i = 0; i < sz; ++i) { diff --git metastore/src/gen-cpp/ThriftHiveMetastore_server.skeleton.cpp metastore/src/gen-cpp/ThriftHiveMetastore_server.skeleton.cpp index 66b2fb3..3ef03b5 100644 --- metastore/src/gen-cpp/ThriftHiveMetastore_server.skeleton.cpp +++ metastore/src/gen-cpp/ThriftHiveMetastore_server.skeleton.cpp @@ -192,6 +192,11 @@ class ThriftHiveMetastoreHandler : virtual public ThriftHiveMetastoreIf { printf("add_index\n"); } + void alter_index(const std::string& dbname, const std::string& base_tbl_name, const std::string& idx_name, const Index& new_idx) { + // Your implementation goes here + printf("alter_index\n"); + } + bool drop_index_by_name(const std::string& db_name, const std::string& tbl_name, const std::string& index_name, const bool deleteData) { // Your implementation goes here printf("drop_index_by_name\n"); diff --git metastore/src/gen-javabean/org/apache/hadoop/hive/metastore/api/ThriftHiveMetastore.java metastore/src/gen-javabean/org/apache/hadoop/hive/metastore/api/ThriftHiveMetastore.java index 743fcbf..545b64e 100644 --- metastore/src/gen-javabean/org/apache/hadoop/hive/metastore/api/ThriftHiveMetastore.java +++ metastore/src/gen-javabean/org/apache/hadoop/hive/metastore/api/ThriftHiveMetastore.java @@ -93,6 +93,8 @@ public class ThriftHiveMetastore { public Index add_index(Index new_index, Table index_table) throws InvalidObjectException, AlreadyExistsException, MetaException, TException; + public void alter_index(String dbname, String base_tbl_name, String idx_name, Index new_idx) throws InvalidOperationException, MetaException, TException; + public boolean drop_index_by_name(String db_name, String tbl_name, String index_name, boolean deleteData) throws NoSuchObjectException, MetaException, TException; public Index get_index_by_name(String db_name, String tbl_name, String index_name) throws MetaException, NoSuchObjectException, TException; @@ -1461,6 +1463,45 @@ public class ThriftHiveMetastore { throw new TApplicationException(TApplicationException.MISSING_RESULT, "add_index failed: unknown result"); } + public void alter_index(String dbname, String base_tbl_name, String idx_name, Index new_idx) throws InvalidOperationException, MetaException, TException + { + send_alter_index(dbname, base_tbl_name, idx_name, new_idx); + recv_alter_index(); + } + + public void send_alter_index(String dbname, String base_tbl_name, String idx_name, Index new_idx) throws TException + { + oprot_.writeMessageBegin(new TMessage("alter_index", TMessageType.CALL, seqid_)); + alter_index_args args = new alter_index_args(); + args.dbname = dbname; + args.base_tbl_name = base_tbl_name; + args.idx_name = idx_name; + args.new_idx = new_idx; + args.write(oprot_); + oprot_.writeMessageEnd(); + oprot_.getTransport().flush(); + } + + public void recv_alter_index() throws InvalidOperationException, MetaException, TException + { + TMessage msg = iprot_.readMessageBegin(); + if (msg.type == TMessageType.EXCEPTION) { + TApplicationException x = TApplicationException.read(iprot_); + iprot_.readMessageEnd(); + throw x; + } + alter_index_result result = new alter_index_result(); + result.read(iprot_); + iprot_.readMessageEnd(); + if (result.o1 != null) { + throw result.o1; + } + if (result.o2 != null) { + throw result.o2; + } + return; + } + public boolean drop_index_by_name(String db_name, String tbl_name, String index_name, boolean deleteData) throws NoSuchObjectException, MetaException, TException { send_drop_index_by_name(db_name, tbl_name, index_name, deleteData); @@ -1664,6 +1705,7 @@ public class ThriftHiveMetastore { processMap_.put("partition_name_to_vals", new partition_name_to_vals()); processMap_.put("partition_name_to_spec", new partition_name_to_spec()); processMap_.put("add_index", new add_index()); + processMap_.put("alter_index", new alter_index()); processMap_.put("drop_index_by_name", new drop_index_by_name()); processMap_.put("get_index_by_name", new get_index_by_name()); processMap_.put("get_indexes", new get_indexes()); @@ -2714,6 +2756,36 @@ public class ThriftHiveMetastore { } + private class alter_index implements ProcessFunction { + public void process(int seqid, TProtocol iprot, TProtocol oprot) throws TException + { + alter_index_args args = new alter_index_args(); + args.read(iprot); + iprot.readMessageEnd(); + alter_index_result result = new alter_index_result(); + try { + iface_.alter_index(args.dbname, args.base_tbl_name, args.idx_name, args.new_idx); + } catch (InvalidOperationException o1) { + result.o1 = o1; + } catch (MetaException o2) { + result.o2 = o2; + } catch (Throwable th) { + LOGGER.error("Internal error processing alter_index", th); + TApplicationException x = new TApplicationException(TApplicationException.INTERNAL_ERROR, "Internal error processing alter_index"); + oprot.writeMessageBegin(new TMessage("alter_index", TMessageType.EXCEPTION, seqid)); + x.write(oprot); + oprot.writeMessageEnd(); + oprot.getTransport().flush(); + return; + } + oprot.writeMessageBegin(new TMessage("alter_index", TMessageType.REPLY, seqid)); + result.write(oprot); + oprot.writeMessageEnd(); + oprot.getTransport().flush(); + } + + } + private class drop_index_by_name implements ProcessFunction { public void process(int seqid, TProtocol iprot, TProtocol oprot) throws TException { @@ -23457,6 +23529,665 @@ public class ThriftHiveMetastore { } + public static class alter_index_args implements TBase, java.io.Serializable, Cloneable { + private static final TStruct STRUCT_DESC = new TStruct("alter_index_args"); + private static final TField DBNAME_FIELD_DESC = new TField("dbname", TType.STRING, (short)1); + private static final TField BASE_TBL_NAME_FIELD_DESC = new TField("base_tbl_name", TType.STRING, (short)2); + private static final TField IDX_NAME_FIELD_DESC = new TField("idx_name", TType.STRING, (short)3); + private static final TField NEW_IDX_FIELD_DESC = new TField("new_idx", TType.STRUCT, (short)4); + + private String dbname; + public static final int DBNAME = 1; + private String base_tbl_name; + public static final int BASE_TBL_NAME = 2; + private String idx_name; + public static final int IDX_NAME = 3; + private Index new_idx; + public static final int NEW_IDX = 4; + + private final Isset __isset = new Isset(); + private static final class Isset implements java.io.Serializable { + } + + public static final Map metaDataMap = Collections.unmodifiableMap(new HashMap() {{ + put(DBNAME, new FieldMetaData("dbname", TFieldRequirementType.DEFAULT, + new FieldValueMetaData(TType.STRING))); + put(BASE_TBL_NAME, new FieldMetaData("base_tbl_name", TFieldRequirementType.DEFAULT, + new FieldValueMetaData(TType.STRING))); + put(IDX_NAME, new FieldMetaData("idx_name", TFieldRequirementType.DEFAULT, + new FieldValueMetaData(TType.STRING))); + put(NEW_IDX, new FieldMetaData("new_idx", TFieldRequirementType.DEFAULT, + new StructMetaData(TType.STRUCT, Index.class))); + }}); + + static { + FieldMetaData.addStructMetaDataMap(alter_index_args.class, metaDataMap); + } + + public alter_index_args() { + } + + public alter_index_args( + String dbname, + String base_tbl_name, + String idx_name, + Index new_idx) + { + this(); + this.dbname = dbname; + this.base_tbl_name = base_tbl_name; + this.idx_name = idx_name; + this.new_idx = new_idx; + } + + /** + * Performs a deep copy on other. + */ + public alter_index_args(alter_index_args other) { + if (other.isSetDbname()) { + this.dbname = other.dbname; + } + if (other.isSetBase_tbl_name()) { + this.base_tbl_name = other.base_tbl_name; + } + if (other.isSetIdx_name()) { + this.idx_name = other.idx_name; + } + if (other.isSetNew_idx()) { + this.new_idx = new Index(other.new_idx); + } + } + + @Override + public alter_index_args clone() { + return new alter_index_args(this); + } + + public String getDbname() { + return this.dbname; + } + + public void setDbname(String dbname) { + this.dbname = dbname; + } + + public void unsetDbname() { + this.dbname = null; + } + + // Returns true if field dbname is set (has been asigned a value) and false otherwise + public boolean isSetDbname() { + return this.dbname != null; + } + + public String getBase_tbl_name() { + return this.base_tbl_name; + } + + public void setBase_tbl_name(String base_tbl_name) { + this.base_tbl_name = base_tbl_name; + } + + public void unsetBase_tbl_name() { + this.base_tbl_name = null; + } + + // Returns true if field base_tbl_name is set (has been asigned a value) and false otherwise + public boolean isSetBase_tbl_name() { + return this.base_tbl_name != null; + } + + public String getIdx_name() { + return this.idx_name; + } + + public void setIdx_name(String idx_name) { + this.idx_name = idx_name; + } + + public void unsetIdx_name() { + this.idx_name = null; + } + + // Returns true if field idx_name is set (has been asigned a value) and false otherwise + public boolean isSetIdx_name() { + return this.idx_name != null; + } + + public Index getNew_idx() { + return this.new_idx; + } + + public void setNew_idx(Index new_idx) { + this.new_idx = new_idx; + } + + public void unsetNew_idx() { + this.new_idx = null; + } + + // Returns true if field new_idx is set (has been asigned a value) and false otherwise + public boolean isSetNew_idx() { + return this.new_idx != null; + } + + public void setFieldValue(int fieldID, Object value) { + switch (fieldID) { + case DBNAME: + if (value == null) { + unsetDbname(); + } else { + setDbname((String)value); + } + break; + + case BASE_TBL_NAME: + if (value == null) { + unsetBase_tbl_name(); + } else { + setBase_tbl_name((String)value); + } + break; + + case IDX_NAME: + if (value == null) { + unsetIdx_name(); + } else { + setIdx_name((String)value); + } + break; + + case NEW_IDX: + if (value == null) { + unsetNew_idx(); + } else { + setNew_idx((Index)value); + } + break; + + default: + throw new IllegalArgumentException("Field " + fieldID + " doesn't exist!"); + } + } + + public Object getFieldValue(int fieldID) { + switch (fieldID) { + case DBNAME: + return getDbname(); + + case BASE_TBL_NAME: + return getBase_tbl_name(); + + case IDX_NAME: + return getIdx_name(); + + case NEW_IDX: + return getNew_idx(); + + default: + throw new IllegalArgumentException("Field " + fieldID + " doesn't exist!"); + } + } + + // Returns true if field corresponding to fieldID is set (has been asigned a value) and false otherwise + public boolean isSet(int fieldID) { + switch (fieldID) { + case DBNAME: + return isSetDbname(); + case BASE_TBL_NAME: + return isSetBase_tbl_name(); + case IDX_NAME: + return isSetIdx_name(); + case NEW_IDX: + return isSetNew_idx(); + default: + throw new IllegalArgumentException("Field " + fieldID + " doesn't exist!"); + } + } + + @Override + public boolean equals(Object that) { + if (that == null) + return false; + if (that instanceof alter_index_args) + return this.equals((alter_index_args)that); + return false; + } + + public boolean equals(alter_index_args that) { + if (that == null) + return false; + + boolean this_present_dbname = true && this.isSetDbname(); + boolean that_present_dbname = true && that.isSetDbname(); + if (this_present_dbname || that_present_dbname) { + if (!(this_present_dbname && that_present_dbname)) + return false; + if (!this.dbname.equals(that.dbname)) + return false; + } + + boolean this_present_base_tbl_name = true && this.isSetBase_tbl_name(); + boolean that_present_base_tbl_name = true && that.isSetBase_tbl_name(); + if (this_present_base_tbl_name || that_present_base_tbl_name) { + if (!(this_present_base_tbl_name && that_present_base_tbl_name)) + return false; + if (!this.base_tbl_name.equals(that.base_tbl_name)) + return false; + } + + boolean this_present_idx_name = true && this.isSetIdx_name(); + boolean that_present_idx_name = true && that.isSetIdx_name(); + if (this_present_idx_name || that_present_idx_name) { + if (!(this_present_idx_name && that_present_idx_name)) + return false; + if (!this.idx_name.equals(that.idx_name)) + return false; + } + + boolean this_present_new_idx = true && this.isSetNew_idx(); + boolean that_present_new_idx = true && that.isSetNew_idx(); + if (this_present_new_idx || that_present_new_idx) { + if (!(this_present_new_idx && that_present_new_idx)) + return false; + if (!this.new_idx.equals(that.new_idx)) + return false; + } + + return true; + } + + @Override + public int hashCode() { + return 0; + } + + public void read(TProtocol iprot) throws TException { + TField field; + iprot.readStructBegin(); + while (true) + { + field = iprot.readFieldBegin(); + if (field.type == TType.STOP) { + break; + } + switch (field.id) + { + case DBNAME: + if (field.type == TType.STRING) { + this.dbname = iprot.readString(); + } else { + TProtocolUtil.skip(iprot, field.type); + } + break; + case BASE_TBL_NAME: + if (field.type == TType.STRING) { + this.base_tbl_name = iprot.readString(); + } else { + TProtocolUtil.skip(iprot, field.type); + } + break; + case IDX_NAME: + if (field.type == TType.STRING) { + this.idx_name = iprot.readString(); + } else { + TProtocolUtil.skip(iprot, field.type); + } + break; + case NEW_IDX: + if (field.type == TType.STRUCT) { + this.new_idx = new Index(); + this.new_idx.read(iprot); + } else { + TProtocolUtil.skip(iprot, field.type); + } + break; + default: + TProtocolUtil.skip(iprot, field.type); + break; + } + iprot.readFieldEnd(); + } + iprot.readStructEnd(); + + validate(); + } + + public void write(TProtocol oprot) throws TException { + validate(); + + oprot.writeStructBegin(STRUCT_DESC); + if (this.dbname != null) { + oprot.writeFieldBegin(DBNAME_FIELD_DESC); + oprot.writeString(this.dbname); + oprot.writeFieldEnd(); + } + if (this.base_tbl_name != null) { + oprot.writeFieldBegin(BASE_TBL_NAME_FIELD_DESC); + oprot.writeString(this.base_tbl_name); + oprot.writeFieldEnd(); + } + if (this.idx_name != null) { + oprot.writeFieldBegin(IDX_NAME_FIELD_DESC); + oprot.writeString(this.idx_name); + oprot.writeFieldEnd(); + } + if (this.new_idx != null) { + oprot.writeFieldBegin(NEW_IDX_FIELD_DESC); + this.new_idx.write(oprot); + oprot.writeFieldEnd(); + } + oprot.writeFieldStop(); + oprot.writeStructEnd(); + } + + @Override + public String toString() { + StringBuilder sb = new StringBuilder("alter_index_args("); + boolean first = true; + + sb.append("dbname:"); + if (this.dbname == null) { + sb.append("null"); + } else { + sb.append(this.dbname); + } + first = false; + if (!first) sb.append(", "); + sb.append("base_tbl_name:"); + if (this.base_tbl_name == null) { + sb.append("null"); + } else { + sb.append(this.base_tbl_name); + } + first = false; + if (!first) sb.append(", "); + sb.append("idx_name:"); + if (this.idx_name == null) { + sb.append("null"); + } else { + sb.append(this.idx_name); + } + first = false; + if (!first) sb.append(", "); + sb.append("new_idx:"); + if (this.new_idx == null) { + sb.append("null"); + } else { + sb.append(this.new_idx); + } + first = false; + sb.append(")"); + return sb.toString(); + } + + public void validate() throws TException { + // check for required fields + // check that fields of type enum have valid values + } + + } + + public static class alter_index_result implements TBase, java.io.Serializable, Cloneable { + private static final TStruct STRUCT_DESC = new TStruct("alter_index_result"); + private static final TField O1_FIELD_DESC = new TField("o1", TType.STRUCT, (short)1); + private static final TField O2_FIELD_DESC = new TField("o2", TType.STRUCT, (short)2); + + private InvalidOperationException o1; + public static final int O1 = 1; + private MetaException o2; + public static final int O2 = 2; + + private final Isset __isset = new Isset(); + private static final class Isset implements java.io.Serializable { + } + + public static final Map metaDataMap = Collections.unmodifiableMap(new HashMap() {{ + put(O1, new FieldMetaData("o1", TFieldRequirementType.DEFAULT, + new FieldValueMetaData(TType.STRUCT))); + put(O2, new FieldMetaData("o2", TFieldRequirementType.DEFAULT, + new FieldValueMetaData(TType.STRUCT))); + }}); + + static { + FieldMetaData.addStructMetaDataMap(alter_index_result.class, metaDataMap); + } + + public alter_index_result() { + } + + public alter_index_result( + InvalidOperationException o1, + MetaException o2) + { + this(); + this.o1 = o1; + this.o2 = o2; + } + + /** + * Performs a deep copy on other. + */ + public alter_index_result(alter_index_result other) { + if (other.isSetO1()) { + this.o1 = new InvalidOperationException(other.o1); + } + if (other.isSetO2()) { + this.o2 = new MetaException(other.o2); + } + } + + @Override + public alter_index_result clone() { + return new alter_index_result(this); + } + + public InvalidOperationException getO1() { + return this.o1; + } + + public void setO1(InvalidOperationException o1) { + this.o1 = o1; + } + + public void unsetO1() { + this.o1 = null; + } + + // Returns true if field o1 is set (has been asigned a value) and false otherwise + public boolean isSetO1() { + return this.o1 != null; + } + + public MetaException getO2() { + return this.o2; + } + + public void setO2(MetaException o2) { + this.o2 = o2; + } + + public void unsetO2() { + this.o2 = null; + } + + // Returns true if field o2 is set (has been asigned a value) and false otherwise + public boolean isSetO2() { + return this.o2 != null; + } + + public void setFieldValue(int fieldID, Object value) { + switch (fieldID) { + case O1: + if (value == null) { + unsetO1(); + } else { + setO1((InvalidOperationException)value); + } + break; + + case O2: + if (value == null) { + unsetO2(); + } else { + setO2((MetaException)value); + } + break; + + default: + throw new IllegalArgumentException("Field " + fieldID + " doesn't exist!"); + } + } + + public Object getFieldValue(int fieldID) { + switch (fieldID) { + case O1: + return getO1(); + + case O2: + return getO2(); + + default: + throw new IllegalArgumentException("Field " + fieldID + " doesn't exist!"); + } + } + + // Returns true if field corresponding to fieldID is set (has been asigned a value) and false otherwise + public boolean isSet(int fieldID) { + switch (fieldID) { + case O1: + return isSetO1(); + case O2: + return isSetO2(); + default: + throw new IllegalArgumentException("Field " + fieldID + " doesn't exist!"); + } + } + + @Override + public boolean equals(Object that) { + if (that == null) + return false; + if (that instanceof alter_index_result) + return this.equals((alter_index_result)that); + return false; + } + + public boolean equals(alter_index_result that) { + if (that == null) + return false; + + boolean this_present_o1 = true && this.isSetO1(); + boolean that_present_o1 = true && that.isSetO1(); + if (this_present_o1 || that_present_o1) { + if (!(this_present_o1 && that_present_o1)) + return false; + if (!this.o1.equals(that.o1)) + return false; + } + + boolean this_present_o2 = true && this.isSetO2(); + boolean that_present_o2 = true && that.isSetO2(); + if (this_present_o2 || that_present_o2) { + if (!(this_present_o2 && that_present_o2)) + return false; + if (!this.o2.equals(that.o2)) + return false; + } + + return true; + } + + @Override + public int hashCode() { + return 0; + } + + public void read(TProtocol iprot) throws TException { + TField field; + iprot.readStructBegin(); + while (true) + { + field = iprot.readFieldBegin(); + if (field.type == TType.STOP) { + break; + } + switch (field.id) + { + case O1: + if (field.type == TType.STRUCT) { + this.o1 = new InvalidOperationException(); + this.o1.read(iprot); + } else { + TProtocolUtil.skip(iprot, field.type); + } + break; + case O2: + if (field.type == TType.STRUCT) { + this.o2 = new MetaException(); + this.o2.read(iprot); + } else { + TProtocolUtil.skip(iprot, field.type); + } + break; + default: + TProtocolUtil.skip(iprot, field.type); + break; + } + iprot.readFieldEnd(); + } + iprot.readStructEnd(); + + validate(); + } + + public void write(TProtocol oprot) throws TException { + oprot.writeStructBegin(STRUCT_DESC); + + if (this.isSetO1()) { + oprot.writeFieldBegin(O1_FIELD_DESC); + this.o1.write(oprot); + oprot.writeFieldEnd(); + } else if (this.isSetO2()) { + oprot.writeFieldBegin(O2_FIELD_DESC); + this.o2.write(oprot); + oprot.writeFieldEnd(); + } + oprot.writeFieldStop(); + oprot.writeStructEnd(); + } + + @Override + public String toString() { + StringBuilder sb = new StringBuilder("alter_index_result("); + boolean first = true; + + sb.append("o1:"); + if (this.o1 == null) { + sb.append("null"); + } else { + sb.append(this.o1); + } + first = false; + if (!first) sb.append(", "); + sb.append("o2:"); + if (this.o2 == null) { + sb.append("null"); + } else { + sb.append(this.o2); + } + first = false; + sb.append(")"); + return sb.toString(); + } + + public void validate() throws TException { + // check for required fields + // check that fields of type enum have valid values + } + + } + public static class drop_index_by_name_args implements TBase, java.io.Serializable, Cloneable { private static final TStruct STRUCT_DESC = new TStruct("drop_index_by_name_args"); private static final TField DB_NAME_FIELD_DESC = new TField("db_name", TType.STRING, (short)1); diff --git metastore/src/gen-php/ThriftHiveMetastore.php metastore/src/gen-php/ThriftHiveMetastore.php index ef972ef..894379c 100644 --- metastore/src/gen-php/ThriftHiveMetastore.php +++ metastore/src/gen-php/ThriftHiveMetastore.php @@ -44,6 +44,7 @@ interface ThriftHiveMetastoreIf extends FacebookServiceIf { public function partition_name_to_vals($part_name); public function partition_name_to_spec($part_name); public function add_index($new_index, $index_table); + public function alter_index($dbname, $base_tbl_name, $idx_name, $new_idx); public function drop_index_by_name($db_name, $tbl_name, $index_name, $deleteData); public function get_index_by_name($db_name, $tbl_name, $index_name); public function get_indexes($db_name, $tbl_name, $max_indexes); @@ -2014,6 +2015,63 @@ class ThriftHiveMetastoreClient extends FacebookServiceClient implements ThriftH throw new Exception("add_index failed: unknown result"); } + public function alter_index($dbname, $base_tbl_name, $idx_name, $new_idx) + { + $this->send_alter_index($dbname, $base_tbl_name, $idx_name, $new_idx); + $this->recv_alter_index(); + } + + public function send_alter_index($dbname, $base_tbl_name, $idx_name, $new_idx) + { + $args = new metastore_ThriftHiveMetastore_alter_index_args(); + $args->dbname = $dbname; + $args->base_tbl_name = $base_tbl_name; + $args->idx_name = $idx_name; + $args->new_idx = $new_idx; + $bin_accel = ($this->output_ instanceof TProtocol::$TBINARYPROTOCOLACCELERATED) && function_exists('thrift_protocol_write_binary'); + if ($bin_accel) + { + thrift_protocol_write_binary($this->output_, 'alter_index', TMessageType::CALL, $args, $this->seqid_, $this->output_->isStrictWrite()); + } + else + { + $this->output_->writeMessageBegin('alter_index', TMessageType::CALL, $this->seqid_); + $args->write($this->output_); + $this->output_->writeMessageEnd(); + $this->output_->getTransport()->flush(); + } + } + + public function recv_alter_index() + { + $bin_accel = ($this->input_ instanceof TProtocol::$TBINARYPROTOCOLACCELERATED) && function_exists('thrift_protocol_read_binary'); + if ($bin_accel) $result = thrift_protocol_read_binary($this->input_, 'metastore_ThriftHiveMetastore_alter_index_result', $this->input_->isStrictRead()); + else + { + $rseqid = 0; + $fname = null; + $mtype = 0; + + $this->input_->readMessageBegin($fname, $mtype, $rseqid); + if ($mtype == TMessageType::EXCEPTION) { + $x = new TApplicationException(); + $x->read($this->input_); + $this->input_->readMessageEnd(); + throw $x; + } + $result = new metastore_ThriftHiveMetastore_alter_index_result(); + $result->read($this->input_); + $this->input_->readMessageEnd(); + } + if ($result->o1 !== null) { + throw $result->o1; + } + if ($result->o2 !== null) { + throw $result->o2; + } + return; + } + public function drop_index_by_name($db_name, $tbl_name, $index_name, $deleteData) { $this->send_drop_index_by_name($db_name, $tbl_name, $index_name, $deleteData); @@ -9909,6 +9967,239 @@ class metastore_ThriftHiveMetastore_add_index_result { } +class metastore_ThriftHiveMetastore_alter_index_args { + static $_TSPEC; + + public $dbname = null; + public $base_tbl_name = null; + public $idx_name = null; + public $new_idx = null; + + public function __construct($vals=null) { + if (!isset(self::$_TSPEC)) { + self::$_TSPEC = array( + 1 => array( + 'var' => 'dbname', + 'type' => TType::STRING, + ), + 2 => array( + 'var' => 'base_tbl_name', + 'type' => TType::STRING, + ), + 3 => array( + 'var' => 'idx_name', + 'type' => TType::STRING, + ), + 4 => array( + 'var' => 'new_idx', + 'type' => TType::STRUCT, + 'class' => 'metastore_Index', + ), + ); + } + if (is_array($vals)) { + if (isset($vals['dbname'])) { + $this->dbname = $vals['dbname']; + } + if (isset($vals['base_tbl_name'])) { + $this->base_tbl_name = $vals['base_tbl_name']; + } + if (isset($vals['idx_name'])) { + $this->idx_name = $vals['idx_name']; + } + if (isset($vals['new_idx'])) { + $this->new_idx = $vals['new_idx']; + } + } + } + + public function getName() { + return 'ThriftHiveMetastore_alter_index_args'; + } + + public function read($input) + { + $xfer = 0; + $fname = null; + $ftype = 0; + $fid = 0; + $xfer += $input->readStructBegin($fname); + while (true) + { + $xfer += $input->readFieldBegin($fname, $ftype, $fid); + if ($ftype == TType::STOP) { + break; + } + switch ($fid) + { + case 1: + if ($ftype == TType::STRING) { + $xfer += $input->readString($this->dbname); + } else { + $xfer += $input->skip($ftype); + } + break; + case 2: + if ($ftype == TType::STRING) { + $xfer += $input->readString($this->base_tbl_name); + } else { + $xfer += $input->skip($ftype); + } + break; + case 3: + if ($ftype == TType::STRING) { + $xfer += $input->readString($this->idx_name); + } else { + $xfer += $input->skip($ftype); + } + break; + case 4: + if ($ftype == TType::STRUCT) { + $this->new_idx = new metastore_Index(); + $xfer += $this->new_idx->read($input); + } else { + $xfer += $input->skip($ftype); + } + break; + default: + $xfer += $input->skip($ftype); + break; + } + $xfer += $input->readFieldEnd(); + } + $xfer += $input->readStructEnd(); + return $xfer; + } + + public function write($output) { + $xfer = 0; + $xfer += $output->writeStructBegin('ThriftHiveMetastore_alter_index_args'); + if ($this->dbname !== null) { + $xfer += $output->writeFieldBegin('dbname', TType::STRING, 1); + $xfer += $output->writeString($this->dbname); + $xfer += $output->writeFieldEnd(); + } + if ($this->base_tbl_name !== null) { + $xfer += $output->writeFieldBegin('base_tbl_name', TType::STRING, 2); + $xfer += $output->writeString($this->base_tbl_name); + $xfer += $output->writeFieldEnd(); + } + if ($this->idx_name !== null) { + $xfer += $output->writeFieldBegin('idx_name', TType::STRING, 3); + $xfer += $output->writeString($this->idx_name); + $xfer += $output->writeFieldEnd(); + } + if ($this->new_idx !== null) { + if (!is_object($this->new_idx)) { + throw new TProtocolException('Bad type in structure.', TProtocolException::INVALID_DATA); + } + $xfer += $output->writeFieldBegin('new_idx', TType::STRUCT, 4); + $xfer += $this->new_idx->write($output); + $xfer += $output->writeFieldEnd(); + } + $xfer += $output->writeFieldStop(); + $xfer += $output->writeStructEnd(); + return $xfer; + } + +} + +class metastore_ThriftHiveMetastore_alter_index_result { + static $_TSPEC; + + public $o1 = null; + public $o2 = null; + + public function __construct($vals=null) { + if (!isset(self::$_TSPEC)) { + self::$_TSPEC = array( + 1 => array( + 'var' => 'o1', + 'type' => TType::STRUCT, + 'class' => 'metastore_InvalidOperationException', + ), + 2 => array( + 'var' => 'o2', + 'type' => TType::STRUCT, + 'class' => 'metastore_MetaException', + ), + ); + } + if (is_array($vals)) { + if (isset($vals['o1'])) { + $this->o1 = $vals['o1']; + } + if (isset($vals['o2'])) { + $this->o2 = $vals['o2']; + } + } + } + + public function getName() { + return 'ThriftHiveMetastore_alter_index_result'; + } + + public function read($input) + { + $xfer = 0; + $fname = null; + $ftype = 0; + $fid = 0; + $xfer += $input->readStructBegin($fname); + while (true) + { + $xfer += $input->readFieldBegin($fname, $ftype, $fid); + if ($ftype == TType::STOP) { + break; + } + switch ($fid) + { + case 1: + if ($ftype == TType::STRUCT) { + $this->o1 = new metastore_InvalidOperationException(); + $xfer += $this->o1->read($input); + } else { + $xfer += $input->skip($ftype); + } + break; + case 2: + if ($ftype == TType::STRUCT) { + $this->o2 = new metastore_MetaException(); + $xfer += $this->o2->read($input); + } else { + $xfer += $input->skip($ftype); + } + break; + default: + $xfer += $input->skip($ftype); + break; + } + $xfer += $input->readFieldEnd(); + } + $xfer += $input->readStructEnd(); + return $xfer; + } + + public function write($output) { + $xfer = 0; + $xfer += $output->writeStructBegin('ThriftHiveMetastore_alter_index_result'); + if ($this->o1 !== null) { + $xfer += $output->writeFieldBegin('o1', TType::STRUCT, 1); + $xfer += $this->o1->write($output); + $xfer += $output->writeFieldEnd(); + } + if ($this->o2 !== null) { + $xfer += $output->writeFieldBegin('o2', TType::STRUCT, 2); + $xfer += $this->o2->write($output); + $xfer += $output->writeFieldEnd(); + } + $xfer += $output->writeFieldStop(); + $xfer += $output->writeStructEnd(); + return $xfer; + } + +} + class metastore_ThriftHiveMetastore_drop_index_by_name_args { static $_TSPEC; diff --git metastore/src/gen-py/hive_metastore/ThriftHiveMetastore-remote metastore/src/gen-py/hive_metastore/ThriftHiveMetastore-remote old mode 100644 new mode 100755 index 80a1765..ef9b192 --- metastore/src/gen-py/hive_metastore/ThriftHiveMetastore-remote +++ metastore/src/gen-py/hive_metastore/ThriftHiveMetastore-remote @@ -55,6 +55,7 @@ if len(sys.argv) <= 1 or sys.argv[1] == '--help': print ' partition_name_to_vals(string part_name)' print ' partition_name_to_spec(string part_name)' print ' Index add_index(Index new_index, Table index_table)' + print ' void alter_index(string dbname, string base_tbl_name, string idx_name, Index new_idx)' print ' bool drop_index_by_name(string db_name, string tbl_name, string index_name, bool deleteData)' print ' Index get_index_by_name(string db_name, string tbl_name, string index_name)' print ' get_indexes(string db_name, string tbl_name, i16 max_indexes)' @@ -311,6 +312,12 @@ elif cmd == 'add_index': sys.exit(1) pp.pprint(client.add_index(eval(args[0]),eval(args[1]),)) +elif cmd == 'alter_index': + if len(args) != 4: + print 'alter_index requires 4 args' + sys.exit(1) + pp.pprint(client.alter_index(args[0],args[1],args[2],eval(args[3]),)) + elif cmd == 'drop_index_by_name': if len(args) != 4: print 'drop_index_by_name requires 4 args' diff --git metastore/src/gen-py/hive_metastore/ThriftHiveMetastore.py metastore/src/gen-py/hive_metastore/ThriftHiveMetastore.py index 78318f0..02ea96a 100644 --- metastore/src/gen-py/hive_metastore/ThriftHiveMetastore.py +++ metastore/src/gen-py/hive_metastore/ThriftHiveMetastore.py @@ -294,6 +294,16 @@ class Iface(fb303.FacebookService.Iface): """ pass + def alter_index(self, dbname, base_tbl_name, idx_name, new_idx): + """ + Parameters: + - dbname + - base_tbl_name + - idx_name + - new_idx + """ + pass + def drop_index_by_name(self, db_name, tbl_name, index_name, deleteData): """ Parameters: @@ -1558,6 +1568,44 @@ class Client(fb303.FacebookService.Client, Iface): raise result.o3 raise TApplicationException(TApplicationException.MISSING_RESULT, "add_index failed: unknown result"); + def alter_index(self, dbname, base_tbl_name, idx_name, new_idx): + """ + Parameters: + - dbname + - base_tbl_name + - idx_name + - new_idx + """ + self.send_alter_index(dbname, base_tbl_name, idx_name, new_idx) + self.recv_alter_index() + + def send_alter_index(self, dbname, base_tbl_name, idx_name, new_idx): + self._oprot.writeMessageBegin('alter_index', TMessageType.CALL, self._seqid) + args = alter_index_args() + args.dbname = dbname + args.base_tbl_name = base_tbl_name + args.idx_name = idx_name + args.new_idx = new_idx + args.write(self._oprot) + self._oprot.writeMessageEnd() + self._oprot.trans.flush() + + def recv_alter_index(self, ): + (fname, mtype, rseqid) = self._iprot.readMessageBegin() + if mtype == TMessageType.EXCEPTION: + x = TApplicationException() + x.read(self._iprot) + self._iprot.readMessageEnd() + raise x + result = alter_index_result() + result.read(self._iprot) + self._iprot.readMessageEnd() + if result.o1 != None: + raise result.o1 + if result.o2 != None: + raise result.o2 + return + def drop_index_by_name(self, db_name, tbl_name, index_name, deleteData): """ Parameters: @@ -1748,6 +1796,7 @@ class Processor(fb303.FacebookService.Processor, Iface, TProcessor): self._processMap["partition_name_to_vals"] = Processor.process_partition_name_to_vals self._processMap["partition_name_to_spec"] = Processor.process_partition_name_to_spec self._processMap["add_index"] = Processor.process_add_index + self._processMap["alter_index"] = Processor.process_alter_index self._processMap["drop_index_by_name"] = Processor.process_drop_index_by_name self._processMap["get_index_by_name"] = Processor.process_get_index_by_name self._processMap["get_indexes"] = Processor.process_get_indexes @@ -2312,6 +2361,22 @@ class Processor(fb303.FacebookService.Processor, Iface, TProcessor): oprot.writeMessageEnd() oprot.trans.flush() + def process_alter_index(self, seqid, iprot, oprot): + args = alter_index_args() + args.read(iprot) + iprot.readMessageEnd() + result = alter_index_result() + try: + self._handler.alter_index(args.dbname, args.base_tbl_name, args.idx_name, args.new_idx) + except InvalidOperationException, o1: + result.o1 = o1 + except MetaException, o2: + result.o2 = o2 + oprot.writeMessageBegin("alter_index", TMessageType.REPLY, seqid) + result.write(oprot) + oprot.writeMessageEnd() + oprot.trans.flush() + def process_drop_index_by_name(self, seqid, iprot, oprot): args = drop_index_by_name_args() args.read(iprot) @@ -7610,6 +7675,169 @@ class add_index_result: def __ne__(self, other): return not (self == other) +class alter_index_args: + """ + Attributes: + - dbname + - base_tbl_name + - idx_name + - new_idx + """ + + thrift_spec = ( + None, # 0 + (1, TType.STRING, 'dbname', None, None, ), # 1 + (2, TType.STRING, 'base_tbl_name', None, None, ), # 2 + (3, TType.STRING, 'idx_name', None, None, ), # 3 + (4, TType.STRUCT, 'new_idx', (Index, Index.thrift_spec), None, ), # 4 + ) + + def __init__(self, dbname=None, base_tbl_name=None, idx_name=None, new_idx=None,): + self.dbname = dbname + self.base_tbl_name = base_tbl_name + self.idx_name = idx_name + self.new_idx = new_idx + + def read(self, iprot): + if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: + fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec)) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 1: + if ftype == TType.STRING: + self.dbname = iprot.readString(); + else: + iprot.skip(ftype) + elif fid == 2: + if ftype == TType.STRING: + self.base_tbl_name = iprot.readString(); + else: + iprot.skip(ftype) + elif fid == 3: + if ftype == TType.STRING: + self.idx_name = iprot.readString(); + else: + iprot.skip(ftype) + elif fid == 4: + if ftype == TType.STRUCT: + self.new_idx = Index() + self.new_idx.read(iprot) + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None: + oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))) + return + oprot.writeStructBegin('alter_index_args') + if self.dbname != None: + oprot.writeFieldBegin('dbname', TType.STRING, 1) + oprot.writeString(self.dbname) + oprot.writeFieldEnd() + if self.base_tbl_name != None: + oprot.writeFieldBegin('base_tbl_name', TType.STRING, 2) + oprot.writeString(self.base_tbl_name) + oprot.writeFieldEnd() + if self.idx_name != None: + oprot.writeFieldBegin('idx_name', TType.STRING, 3) + oprot.writeString(self.idx_name) + oprot.writeFieldEnd() + if self.new_idx != None: + oprot.writeFieldBegin('new_idx', TType.STRUCT, 4) + self.new_idx.write(oprot) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def __repr__(self): + L = ['%s=%r' % (key, value) + for key, value in self.__dict__.iteritems()] + return '%s(%s)' % (self.__class__.__name__, ', '.join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + +class alter_index_result: + """ + Attributes: + - o1 + - o2 + """ + + thrift_spec = ( + None, # 0 + (1, TType.STRUCT, 'o1', (InvalidOperationException, InvalidOperationException.thrift_spec), None, ), # 1 + (2, TType.STRUCT, 'o2', (MetaException, MetaException.thrift_spec), None, ), # 2 + ) + + def __init__(self, o1=None, o2=None,): + self.o1 = o1 + self.o2 = o2 + + def read(self, iprot): + if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: + fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec)) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 1: + if ftype == TType.STRUCT: + self.o1 = InvalidOperationException() + self.o1.read(iprot) + else: + iprot.skip(ftype) + elif fid == 2: + if ftype == TType.STRUCT: + self.o2 = MetaException() + self.o2.read(iprot) + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None: + oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))) + return + oprot.writeStructBegin('alter_index_result') + if self.o1 != None: + oprot.writeFieldBegin('o1', TType.STRUCT, 1) + self.o1.write(oprot) + oprot.writeFieldEnd() + if self.o2 != None: + oprot.writeFieldBegin('o2', TType.STRUCT, 2) + self.o2.write(oprot) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def __repr__(self): + L = ['%s=%r' % (key, value) + for key, value in self.__dict__.iteritems()] + return '%s(%s)' % (self.__class__.__name__, ', '.join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + class drop_index_by_name_args: """ Attributes: diff --git metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStore.java metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStore.java index b04e6a9..f7bb8fc 100644 --- metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStore.java +++ metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStore.java @@ -1403,6 +1403,33 @@ public class HiveMetaStore extends ThriftHiveMetastore { throw new MetaException("Not yet implemented"); } + public void alter_index(final String dbname, final String base_table_name, final String index_name, final Index newIndex) + throws InvalidOperationException, MetaException { + incrementCounter("alter_index"); + logStartFunction("alter_index: db=" + dbname + " base_tbl=" + base_table_name + + " idx=" + index_name + " newidx=" + newIndex.getIndexName()); + newIndex.putToParameters(Constants.DDL_TIME, Long.toString(System + .currentTimeMillis() / 1000)); + + try { + executeWithRetry(new Command() { + @Override + Boolean run(RawStore ms) throws Exception { + ms.alterIndex(dbname, base_table_name, index_name, newIndex); + return Boolean.TRUE; + } + }); + } catch (MetaException e) { + throw e; + } catch (InvalidOperationException e) { + throw e; + } catch (Exception e) { + assert(e instanceof RuntimeException); + throw (RuntimeException)e; + } + return; + } + public String getVersion() throws TException { incrementCounter("getVersion"); logStartFunction("getVersion"); diff --git metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStoreClient.java metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStoreClient.java index c13f45c..f83158f 100644 --- metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStoreClient.java +++ metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStoreClient.java @@ -684,6 +684,22 @@ public class HiveMetaStoreClient implements IMetaStoreClient { } /** + * @param dbname + * @param base_tbl_name + * @param idx_name + * @param new_idx + * @throws InvalidOperationException + * @throws MetaException + * @throws TException + * @see org.apache.hadoop.hive.metastore.api.ThriftHiveMetastore.Iface#alter_index(java.lang.String, + * java.lang.String, java.lang.String, org.apache.hadoop.hive.metastore.api.Index) + */ + public void alter_index(String dbname, String base_tbl_name, String idx_name, Index new_idx) + throws InvalidOperationException, MetaException, TException { + client.alter_index(dbname, base_tbl_name, idx_name, new_idx); + } + + /** * @param dbName * @param tblName * @param indexName @@ -696,7 +712,7 @@ public class HiveMetaStoreClient implements IMetaStoreClient { public Index getIndex(String dbName, String tblName, String indexName) throws MetaException, UnknownTableException, NoSuchObjectException, TException { - return client.get_index_by_name(dbName, tblName, indexName); + return deepCopy(client.get_index_by_name(dbName, tblName, indexName)); } /** @@ -811,6 +827,14 @@ public class HiveMetaStoreClient implements IMetaStoreClient { return copy; } + private Index deepCopy(Index index) { + Index copy = null; + if (index != null) { + copy = new Index(index); + } + return copy; + } + private Type deepCopy(Type type) { Type copy = null; if (type != null) { diff --git metastore/src/java/org/apache/hadoop/hive/metastore/IMetaStoreClient.java metastore/src/java/org/apache/hadoop/hive/metastore/IMetaStoreClient.java index 1bf4293..ee3eed0 100644 --- metastore/src/java/org/apache/hadoop/hive/metastore/IMetaStoreClient.java +++ metastore/src/java/org/apache/hadoop/hive/metastore/IMetaStoreClient.java @@ -420,6 +420,9 @@ public interface IMetaStoreClient { public void createIndex(Index index, Table indexTable) throws InvalidObjectException, MetaException, NoSuchObjectException, TException, AlreadyExistsException; + public void alter_index(String dbName, String tblName, String indexName, + Index index) throws InvalidOperationException, MetaException, TException; + /** * * @param dbName diff --git metastore/src/java/org/apache/hadoop/hive/metastore/ObjectStore.java metastore/src/java/org/apache/hadoop/hive/metastore/ObjectStore.java index 4f5623c..4c977b8 100644 --- metastore/src/java/org/apache/hadoop/hive/metastore/ObjectStore.java +++ metastore/src/java/org/apache/hadoop/hive/metastore/ObjectStore.java @@ -1122,6 +1122,7 @@ public class ObjectStore implements RawStore, Configurable { } return partNames; } + public void alterTable(String dbname, String name, Table newTable) throws InvalidObjectException, MetaException { boolean success = false; @@ -1158,6 +1159,36 @@ public class ObjectStore implements RawStore, Configurable { } } + public void alterIndex(String dbname, String baseTblName, String name, Index newIndex) + throws InvalidObjectException, MetaException { + boolean success = false; + try { + openTransaction(); + name = name.toLowerCase(); + baseTblName = baseTblName.toLowerCase(); + dbname = dbname.toLowerCase(); + MIndex newi = convertToMIndex(newIndex); + if (newi == null) { + throw new InvalidObjectException("new index is invalid"); + } + + MIndex oldi = getMIndex(dbname, baseTblName, name); + if (oldi == null) { + throw new MetaException("index " + name + " doesn't exist"); + } + + // For now only alter paramters are allowed + oldi.setParameters(newi.getParameters()); + + // commit the changes + success = commitTransaction(); + } finally { + if (!success) { + rollbackTransaction(); + } + } + } + public void alterPartition(String dbname, String name, Partition newPart) throws InvalidObjectException, MetaException { boolean success = false; diff --git metastore/src/java/org/apache/hadoop/hive/metastore/RawStore.java metastore/src/java/org/apache/hadoop/hive/metastore/RawStore.java index 9f11f9f..55754f0 100644 --- metastore/src/java/org/apache/hadoop/hive/metastore/RawStore.java +++ metastore/src/java/org/apache/hadoop/hive/metastore/RawStore.java @@ -125,6 +125,9 @@ public interface RawStore extends Configurable { public abstract List listIndexNames(String dbName, String origTableName, short max) throws MetaException; + public abstract void alterIndex(String dbname, String baseTblName, String name, Index newIndex) + throws InvalidObjectException, MetaException; + public abstract List getPartitionsByFilter( String dbName, String tblName, String filter, short maxParts) throws MetaException, NoSuchObjectException; diff --git ql/src/java/org/apache/hadoop/hive/ql/exec/DDLTask.java ql/src/java/org/apache/hadoop/hive/ql/exec/DDLTask.java index 204b4d0..c64563a 100644 --- ql/src/java/org/apache/hadoop/hive/ql/exec/DDLTask.java +++ ql/src/java/org/apache/hadoop/hive/ql/exec/DDLTask.java @@ -62,6 +62,7 @@ import org.apache.hadoop.hive.metastore.api.InvalidOperationException; import org.apache.hadoop.hive.metastore.api.MetaException; import org.apache.hadoop.hive.metastore.api.NoSuchObjectException; import org.apache.hadoop.hive.metastore.api.Order; +import org.apache.hadoop.hive.metastore.api.Index; import org.apache.hadoop.hive.ql.Context; import org.apache.hadoop.hive.ql.DriverContext; import org.apache.hadoop.hive.ql.QueryPlan; @@ -84,6 +85,8 @@ import org.apache.hadoop.hive.ql.plan.AddPartitionDesc; import org.apache.hadoop.hive.ql.plan.AlterTableDesc; import org.apache.hadoop.hive.ql.plan.AlterTableDesc.AlterTableTypes; import org.apache.hadoop.hive.ql.plan.AlterTableSimpleDesc; +import org.apache.hadoop.hive.ql.plan.AlterIndexDesc; +import org.apache.hadoop.hive.ql.plan.AlterIndexDesc.AlterIndexTypes; import org.apache.hadoop.hive.ql.plan.CreateDatabaseDesc; import org.apache.hadoop.hive.ql.plan.CreateIndexDesc; import org.apache.hadoop.hive.ql.plan.CreateTableDesc; @@ -185,6 +188,11 @@ public class DDLTask extends Task implements Serializable { return createIndex(db, crtIndex); } + AlterIndexDesc alterIndex = work.getAlterIndexDesc(); + if (alterIndex != null) { + return alterIndex(db, alterIndex); + } + DropIndexDesc dropIdx = work.getDropIdxDesc(); if (dropIdx != null) { return dropIndex(db, dropIdx); @@ -323,6 +331,45 @@ public class DDLTask extends Task implements Serializable { return 0; } + private int alterIndex(Hive db, AlterIndexDesc alterIndex) throws HiveException { + Index idx = db.getIndex(alterIndex.getBaseTableName(), alterIndex.getIndexName()); + + if (alterIndex.getOp() == AlterIndexDesc.AlterIndexTypes.ADDPROPS) { + idx.getParameters().putAll(alterIndex.getProps()); + } else { + console.printError("Unsupported Alter commnad"); + return 1; + } + + // set last modified by properties + String user = null; + try { + user = conf.getUser(); + } catch (IOException e) { + console.printError("Unable to get current user: " + e.getMessage(), + stringifyException(e)); + return 1; + } + + idx.getParameters().put("last_modified_by", user); + idx.getParameters().put("last_modified_time", Long.toString(System + .currentTimeMillis() / 1000)); + + try { + System.out.println("Trying to alter index"); + db.alterIndex(alterIndex.getBaseTableName(), alterIndex.getIndexName(), idx); + } catch (InvalidOperationException e) { + console.printError("Invalid alter operation: " + e.getMessage()); + LOG.info("alter index: " + stringifyException(e)); + return 1; + } catch (HiveException e) { + console.printError("Invalid alter operation: " + e.getMessage()); + System.out.println("alter index: " + stringifyException(e)); + return 1; + } + return 0; + } + /** * Add a partition to a table. * diff --git ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java index 5be41d8..a4ef476 100644 --- ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java +++ ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java @@ -345,6 +345,28 @@ public class Hive { } /** + * Updates the existing index metadata with the new metadata. + * + * @param idxName + * name of the existing index + * @param newIdx + * new name of the index. could be the old name + * @throws InvalidOperationException + * if the changes in metadata is not acceptable + * @throws TException + */ + public void alterIndex(String baseTblName, String idxName, Index newIdx) + throws InvalidOperationException, HiveException { + try { + getMSC().alter_index(getCurrentDatabase(), baseTblName, idxName, newIdx); + } catch (MetaException e) { + throw new HiveException("Unable to alter index.", e); + } catch (TException e) { + throw new HiveException("Unable to alter index.", e); + } + } + + /** * Updates the existing table metadata with the new metadata. * * @param tblName @@ -585,6 +607,10 @@ public class Hive { } } + public Index getIndex(String baseTableName, String indexName) throws HiveException { + return this.getIndex(getCurrentDatabase(), baseTableName, indexName); + } + public Index getIndex(String dbName, String baseTableName, String indexName) throws HiveException { try { diff --git ql/src/java/org/apache/hadoop/hive/ql/parse/DDLSemanticAnalyzer.java ql/src/java/org/apache/hadoop/hive/ql/parse/DDLSemanticAnalyzer.java index 68e73cb..a0d87e5 100644 --- ql/src/java/org/apache/hadoop/hive/ql/parse/DDLSemanticAnalyzer.java +++ ql/src/java/org/apache/hadoop/hive/ql/parse/DDLSemanticAnalyzer.java @@ -65,6 +65,7 @@ import org.apache.hadoop.hive.ql.metadata.Partition; import org.apache.hadoop.hive.ql.metadata.Table; import org.apache.hadoop.hive.ql.plan.AddPartitionDesc; import org.apache.hadoop.hive.ql.plan.AlterTableDesc; +import org.apache.hadoop.hive.ql.plan.AlterIndexDesc; import org.apache.hadoop.hive.ql.plan.AlterTableSimpleDesc; import org.apache.hadoop.hive.ql.plan.CreateDatabaseDesc; import org.apache.hadoop.hive.ql.plan.CreateIndexDesc; @@ -87,6 +88,7 @@ import org.apache.hadoop.hive.ql.plan.SwitchDatabaseDesc; import org.apache.hadoop.hive.ql.plan.TableDesc; import org.apache.hadoop.hive.ql.plan.UnlockTableDesc; import org.apache.hadoop.hive.ql.plan.AlterTableDesc.AlterTableTypes; +import org.apache.hadoop.hive.ql.plan.AlterIndexDesc.AlterIndexTypes; import org.apache.hadoop.hive.serde.Constants; import org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe; import org.apache.hadoop.mapred.TextInputFormat; @@ -228,7 +230,9 @@ public class DDLSemanticAnalyzer extends BaseSemanticAnalyzer { } else if (ast.getToken().getType() == HiveParser.TOK_ALTERTABLE_CLUSTER_SORT) { analyzeAlterTableClusterSort(ast); } else if (ast.getToken().getType() == HiveParser.TOK_ALTERINDEX_REBUILD) { - analyzeUpdateIndex(ast); + analyzeAlterIndexRebuild(ast); + } else if (ast.getToken().getType() == HiveParser.TOK_ALTERINDEX_PROPERTIES) { + analyzeAlterIndexProps(ast); } else if (ast.getToken().getType() == HiveParser.TOK_SHOWPARTITIONS) { ctx.setResFile(new Path(ctx.getLocalTmpFileURI())); analyzeShowPartitions(ast); @@ -399,7 +403,7 @@ public class DDLSemanticAnalyzer extends BaseSemanticAnalyzer { dropIdxDesc), conf)); } - private void analyzeUpdateIndex(ASTNode ast) throws SemanticException { + private void analyzeAlterIndexRebuild(ASTNode ast) throws SemanticException { String baseTableName = unescapeIdentifier(ast.getChild(0).getText()); String indexName = unescapeIdentifier(ast.getChild(1).getText()); HashMap partSpec = null; @@ -411,6 +415,23 @@ public class DDLSemanticAnalyzer extends BaseSemanticAnalyzer { rootTasks.addAll(indexBuilder); } + private void analyzeAlterIndexProps(ASTNode ast) + throws SemanticException { + + String baseTableName = unescapeIdentifier(ast.getChild(0).getText()); + String indexName = unescapeIdentifier(ast.getChild(1).getText()); + HashMap mapProp = getProps((ASTNode) (ast.getChild(2)) + .getChild(0)); + + AlterIndexDesc alterIdxDesc = + new AlterIndexDesc(AlterIndexTypes.ADDPROPS); + alterIdxDesc.setProps(mapProp); + alterIdxDesc.setIndexName(indexName); + alterIdxDesc.setBaseTableName(baseTableName); + + rootTasks.add(TaskFactory.get(new DDLWork(alterIdxDesc), conf)); + } + private List> getIndexBuilderMapRed(String baseTableName, String indexName, HashMap partSpec) throws SemanticException { try { diff --git ql/src/java/org/apache/hadoop/hive/ql/parse/Hive.g ql/src/java/org/apache/hadoop/hive/ql/parse/Hive.g index 5a4101d..fcea3c0 100644 --- ql/src/java/org/apache/hadoop/hive/ql/parse/Hive.g +++ ql/src/java/org/apache/hadoop/hive/ql/parse/Hive.g @@ -115,6 +115,7 @@ TOK_ALTERTABLE_LOCATION; TOK_ALTERTABLE_PROPERTIES; TOK_ALTERTABLE_CHANGECOL_AFTER_POSITION; TOK_ALTERINDEX_REBUILD; +TOK_ALTERINDEX_PROPERTIES; TOK_MSCK; TOK_SHOWDATABASES; TOK_SHOWTABLES; @@ -253,7 +254,6 @@ ddlStatement | createFunctionStatement | createIndexStatement | dropIndexStatement - | alterIndexRebuild | dropFunctionStatement | analyzeStatement | lockStatement @@ -415,6 +415,8 @@ alterStatement KW_TABLE! alterTableStatementSuffix | KW_VIEW! alterViewStatementSuffix + | + KW_INDEX! alterIndexStatementSuffix ) ; @@ -441,6 +443,22 @@ alterViewStatementSuffix : alterViewSuffixProperties ; +alterIndexStatementSuffix +@init { msgs.push("alter index statement"); } +@after { msgs.pop(); } + : indexName=Identifier + (KW_ON tableName=Identifier) + partitionSpec? + ( + KW_REBUILD + ->^(TOK_ALTERINDEX_REBUILD $tableName $indexName partitionSpec?) + | + KW_SET KW_IDXPROPERTIES + indexProperties + ->^(TOK_ALTERINDEX_PROPERTIES $tableName $indexName indexProperties) + ) + ; + alterStatementSuffixRename @init { msgs.push("rename statement"); } @after { msgs.pop(); } @@ -603,16 +621,6 @@ alterStatementSuffixClusterbySortby ->^(TOK_ALTERTABLE_CLUSTER_SORT $name) ; -alterIndexRebuild -@init { msgs.push("update index statement");} -@after {msgs.pop();} - : KW_ALTER KW_INDEX indexName=Identifier - KW_ON base_table_name=Identifier - partitionSpec? - KW_REBUILD - ->^(TOK_ALTERINDEX_REBUILD $base_table_name $indexName partitionSpec?) - ; - fileFormat @init { msgs.push("file format specification"); } @after { msgs.pop(); } diff --git ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzerFactory.java ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzerFactory.java index 3b78d25..52c6ce8 100644 --- ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzerFactory.java +++ ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzerFactory.java @@ -55,6 +55,8 @@ public final class SemanticAnalyzerFactory { commandType.put(HiveParser.TOK_ALTERTABLE_PROPERTIES, "ALTERTABLE_PROPERTIES"); commandType.put(HiveParser.TOK_ALTERTABLE_SERIALIZER, "ALTERTABLE_SERIALIZER"); commandType.put(HiveParser.TOK_ALTERTABLE_SERDEPROPERTIES, "ALTERTABLE_SERDEPROPERTIES"); + commandType.put(HiveParser.TOK_ALTERINDEX_REBUILD, "ALTERINDEX_REBUILD"); + commandType.put(HiveParser.TOK_ALTERINDEX_PROPERTIES, "ALTERINDEX_PROPS"); commandType.put(HiveParser.TOK_SHOWDATABASES, "SHOWDATABASES"); commandType.put(HiveParser.TOK_SHOWTABLES, "SHOWTABLES"); commandType.put(HiveParser.TOK_SHOW_TABLESTATUS, "SHOW_TABLESTATUS"); @@ -67,7 +69,6 @@ public final class SemanticAnalyzerFactory { commandType.put(HiveParser.TOK_DROPVIEW, "DROPVIEW"); commandType.put(HiveParser.TOK_CREATEINDEX, "CREATEINDEX"); commandType.put(HiveParser.TOK_DROPINDEX, "DROPINDEX"); - commandType.put(HiveParser.TOK_ALTERINDEX_REBUILD, "ALTERINDEX_REBUILD"); commandType.put(HiveParser.TOK_ALTERVIEW_PROPERTIES, "ALTERVIEW_PROPERTIES"); commandType.put(HiveParser.TOK_QUERY, "QUERY"); commandType.put(HiveParser.TOK_LOCKTABLE, "LOCKTABLE"); @@ -114,6 +115,7 @@ public final class SemanticAnalyzerFactory { case HiveParser.TOK_ALTERTABLE_SERIALIZER: case HiveParser.TOK_ALTERTABLE_SERDEPROPERTIES: case HiveParser.TOK_ALTERINDEX_REBUILD: + case HiveParser.TOK_ALTERINDEX_PROPERTIES: case HiveParser.TOK_ALTERVIEW_PROPERTIES: case HiveParser.TOK_SHOWDATABASES: case HiveParser.TOK_SHOWTABLES: diff --git ql/src/java/org/apache/hadoop/hive/ql/plan/AlterIndexDesc.java ql/src/java/org/apache/hadoop/hive/ql/plan/AlterIndexDesc.java new file mode 100644 index 0000000..3cde3c2 --- /dev/null +++ ql/src/java/org/apache/hadoop/hive/ql/plan/AlterIndexDesc.java @@ -0,0 +1,119 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hive.ql.plan; + +import java.io.Serializable; +import java.util.ArrayList; +import java.util.HashMap; +import java.util.List; + +import org.apache.hadoop.hive.metastore.api.FieldSchema; +import org.apache.hadoop.hive.metastore.api.Order; +import org.apache.hadoop.hive.ql.exec.Utilities; + +/** + * AlterIndexDesc. + * + */ +@Explain(displayName = "Alter Index") +public class AlterIndexDesc extends DDLDesc implements Serializable { + private static final long serialVersionUID = 1L; + + /** + * alterIndexTypes. + * + */ + public static enum AlterIndexTypes { + ADDPROPS}; + + AlterIndexTypes op; + String indexName; + String baseTable; + HashMap props; + + public AlterIndexDesc() { + } + + public AlterIndexDesc(AlterIndexTypes type) { + this.op = type; + } + + /** + * @return the name of the index + */ + @Explain(displayName = "name") + public String getIndexName() { + return indexName; + } + + /** + * @param indexName + * the indexName to set + */ + public void setIndexName(String indexName) { + this.indexName = indexName; + } + + /** + * @return the baseTable + */ + @Explain(displayName = "new name") + public String getBaseTableName() { + return baseTable; + } + + /** + * @param baseTable + * the baseTable to set + */ + public void setBaseTableName(String baseTable) { + this.baseTable = baseTable; + } + + /** + * @return the op + */ + public AlterIndexTypes getOp() { + return op; + } + + /** + * @param op + * the op to set + */ + public void setOp(AlterIndexTypes op) { + this.op = op; + } + + /** + * @return the props + */ + @Explain(displayName = "properties") + public HashMap getProps() { + return props; + } + + /** + * @param props + * the props to set + */ + public void setProps(HashMap props) { + this.props = props; + } +} diff --git ql/src/java/org/apache/hadoop/hive/ql/plan/DDLWork.java ql/src/java/org/apache/hadoop/hive/ql/plan/DDLWork.java index d445be1..7573086 100644 --- ql/src/java/org/apache/hadoop/hive/ql/plan/DDLWork.java +++ ql/src/java/org/apache/hadoop/hive/ql/plan/DDLWork.java @@ -31,6 +31,7 @@ import org.apache.hadoop.hive.ql.hooks.WriteEntity; public class DDLWork implements Serializable { private static final long serialVersionUID = 1L; private CreateIndexDesc createIndexDesc; + private AlterIndexDesc alterIndexDesc; private DropIndexDesc dropIdxDesc; private CreateDatabaseDesc createDatabaseDesc; private SwitchDatabaseDesc switchDatabaseDesc; @@ -40,6 +41,7 @@ public class DDLWork implements Serializable { private CreateViewDesc createVwDesc; private DropTableDesc dropTblDesc; private AlterTableDesc alterTblDesc; + private AlterIndexDesc alterIdxDesc; private ShowDatabasesDesc showDatabasesDesc; private ShowTablesDesc showTblsDesc; private LockTableDesc lockTblDesc; @@ -74,6 +76,10 @@ public class DDLWork implements Serializable { public DDLWork(CreateIndexDesc createIndex) { this.createIndexDesc = createIndex; } + + public DDLWork(AlterIndexDesc alterIndex) { + this.alterIndexDesc = alterIndex; + } /** * @param createDatabaseDesc @@ -116,6 +122,16 @@ public class DDLWork implements Serializable { } /** + * @param alterIdxDesc + * alter index descriptor + */ + public DDLWork(HashSet inputs, HashSet outputs, + AlterIndexDesc alterIdxDesc) { + this(inputs, outputs); + this.alterIdxDesc = alterIdxDesc; + } + + /** * @param createTblDesc * create table descriptor */ @@ -356,15 +372,37 @@ public class DDLWork implements Serializable { this.createTblDesc = createTblDesc; } + /** + * @return the createIndexDesc + */ public CreateIndexDesc getCreateIndexDesc() { return createIndexDesc; } + /** + * @param createIndexDesc + * the createIndexDesc to set + */ public void setCreateIndexDesc(CreateIndexDesc createIndexDesc) { this.createIndexDesc = createIndexDesc; } /** + * @return the alterIndexDesc + */ + public AlterIndexDesc getAlterIndexDesc() { + return alterIndexDesc; + } + + /** + * @param alterTblDesc + * the alterTblDesc to set + */ + public void setAlterIndexDesc(AlterIndexDesc alterIndexDesc) { + this.alterIndexDesc = alterIndexDesc; + } + + /** * @return the createTblDesc */ @Explain(displayName = "Create Table Operator") diff --git ql/src/test/queries/clientpositive/alter_index.q ql/src/test/queries/clientpositive/alter_index.q new file mode 100644 index 0000000..2aa13da --- /dev/null +++ ql/src/test/queries/clientpositive/alter_index.q @@ -0,0 +1,11 @@ +drop index src_index_8 on src; + +create index src_index_8 on table src(key) as 'compact' WITH DEFERRED REBUILD IDXPROPERTIES ("prop1"="val1", "prop2"="val2"); +desc extended default__src_src_index_8__; + +alter index src_index_8 on src set IDXPROPERTIES ("prop1"="val1_new", "prop3"="val3"); +desc extended default__src_src_index_8__; + +drop index src_index_8 on src; + +show tables; diff --git ql/src/test/results/clientpositive/alter_index.q.out ql/src/test/results/clientpositive/alter_index.q.out new file mode 100644 index 0000000..8df6627 --- /dev/null +++ ql/src/test/results/clientpositive/alter_index.q.out @@ -0,0 +1,46 @@ +PREHOOK: query: drop index src_index_8 on src +PREHOOK: type: DROPINDEX +POSTHOOK: query: drop index src_index_8 on src +POSTHOOK: type: DROPINDEX +PREHOOK: query: create index src_index_8 on table src(key) as 'compact' WITH DEFERRED REBUILD IDXPROPERTIES ("prop1"="val1", "prop2"="val2") +PREHOOK: type: CREATEINDEX +POSTHOOK: query: create index src_index_8 on table src(key) as 'compact' WITH DEFERRED REBUILD IDXPROPERTIES ("prop1"="val1", "prop2"="val2") +POSTHOOK: type: CREATEINDEX +PREHOOK: query: desc extended default__src_src_index_8__ +PREHOOK: type: DESCTABLE +POSTHOOK: query: desc extended default__src_src_index_8__ +POSTHOOK: type: DESCTABLE +key string default +_bucketname string +_offsets array + +Detailed Table Information Table(tableName:default__src_src_index_8__, dbName:default, owner:null, createTime:1288855262, lastAccessTime:0, retention:0, sd:StorageDescriptor(cols:[FieldSchema(name:key, type:string, comment:default), FieldSchema(name:_bucketname, type:string, comment:), FieldSchema(name:_offsets, type:array, comment:)], location:pfile:/home/mwang/Projects/hive/build/ql/test/data/warehouse/default__src_src_index_8__, inputFormat:org.apache.hadoop.mapred.TextInputFormat, outputFormat:org.apache.hadoop.hive.ql.io.IgnoreKeyTextOutputFormat, compressed:false, numBuckets:-1, serdeInfo:SerDeInfo(name:null, serializationLib:org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe, parameters:{serialization.format=1}), bucketCols:[], sortCols:[Order(col:key, order:1)], parameters:{}), partitionKeys:[], parameters:{transient_lastDdlTime=1288855262}, viewOriginalText:null, viewExpandedText:null, tableType:INDEX_TABLE) +PREHOOK: query: alter index src_index_8 on src set IDXPROPERTIES ("prop1"="val1_new", "prop3"="val3") +PREHOOK: type: ALTERINDEX_PROPS +POSTHOOK: query: alter index src_index_8 on src set IDXPROPERTIES ("prop1"="val1_new", "prop3"="val3") +POSTHOOK: type: ALTERINDEX_PROPS +PREHOOK: query: desc extended default__src_src_index_8__ +PREHOOK: type: DESCTABLE +POSTHOOK: query: desc extended default__src_src_index_8__ +POSTHOOK: type: DESCTABLE +key string default +_bucketname string +_offsets array + +Detailed Table Information Table(tableName:default__src_src_index_8__, dbName:default, owner:null, createTime:1288855262, lastAccessTime:0, retention:0, sd:StorageDescriptor(cols:[FieldSchema(name:key, type:string, comment:default), FieldSchema(name:_bucketname, type:string, comment:), FieldSchema(name:_offsets, type:array, comment:)], location:pfile:/home/mwang/Projects/hive/build/ql/test/data/warehouse/default__src_src_index_8__, inputFormat:org.apache.hadoop.mapred.TextInputFormat, outputFormat:org.apache.hadoop.hive.ql.io.IgnoreKeyTextOutputFormat, compressed:false, numBuckets:-1, serdeInfo:SerDeInfo(name:null, serializationLib:org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe, parameters:{serialization.format=1}), bucketCols:[], sortCols:[Order(col:key, order:1)], parameters:{}), partitionKeys:[], parameters:{transient_lastDdlTime=1288855262}, viewOriginalText:null, viewExpandedText:null, tableType:INDEX_TABLE) +PREHOOK: query: drop index src_index_8 on src +PREHOOK: type: DROPINDEX +POSTHOOK: query: drop index src_index_8 on src +POSTHOOK: type: DROPINDEX +PREHOOK: query: show tables +PREHOOK: type: SHOWTABLES +POSTHOOK: query: show tables +POSTHOOK: type: SHOWTABLES +src +src1 +src_json +src_sequencefile +src_thrift +srcbucket +srcbucket2 +srcpart