Index: metastore/src/gen-py/hive_metastore/ThriftHiveMetastore.py =================================================================== --- metastore/src/gen-py/hive_metastore/ThriftHiveMetastore.py (revision 5373) +++ metastore/src/gen-py/hive_metastore/ThriftHiveMetastore.py (working copy) @@ -211,6 +211,26 @@ """ pass + def get_partitions_ps(self, db_name, tbl_name, part_vals, max_parts): + """ + Parameters: + - db_name + - tbl_name + - part_vals + - max_parts + """ + pass + + def get_partition_names_ps(self, db_name, tbl_name, part_vals, max_parts): + """ + Parameters: + - db_name + - tbl_name + - part_vals + - max_parts + """ + pass + def alter_partition(self, db_name, tbl_name, new_part): """ Parameters: @@ -1095,6 +1115,82 @@ raise result.o2 raise TApplicationException(TApplicationException.MISSING_RESULT, "get_partition_names failed: unknown result"); + def get_partitions_ps(self, db_name, tbl_name, part_vals, max_parts): + """ + Parameters: + - db_name + - tbl_name + - part_vals + - max_parts + """ + self.send_get_partitions_ps(db_name, tbl_name, part_vals, max_parts) + return self.recv_get_partitions_ps() + + def send_get_partitions_ps(self, db_name, tbl_name, part_vals, max_parts): + self._oprot.writeMessageBegin('get_partitions_ps', TMessageType.CALL, self._seqid) + args = get_partitions_ps_args() + args.db_name = db_name + args.tbl_name = tbl_name + args.part_vals = part_vals + args.max_parts = max_parts + args.write(self._oprot) + self._oprot.writeMessageEnd() + self._oprot.trans.flush() + + def recv_get_partitions_ps(self, ): + (fname, mtype, rseqid) = self._iprot.readMessageBegin() + if mtype == TMessageType.EXCEPTION: + x = TApplicationException() + x.read(self._iprot) + self._iprot.readMessageEnd() + raise x + result = get_partitions_ps_result() + result.read(self._iprot) + self._iprot.readMessageEnd() + if result.success != None: + return result.success + if result.o1 != None: + raise result.o1 + raise TApplicationException(TApplicationException.MISSING_RESULT, "get_partitions_ps failed: unknown result"); + + def get_partition_names_ps(self, db_name, tbl_name, part_vals, max_parts): + """ + Parameters: + - db_name + - tbl_name + - part_vals + - max_parts + """ + self.send_get_partition_names_ps(db_name, tbl_name, part_vals, max_parts) + return self.recv_get_partition_names_ps() + + def send_get_partition_names_ps(self, db_name, tbl_name, part_vals, max_parts): + self._oprot.writeMessageBegin('get_partition_names_ps', TMessageType.CALL, self._seqid) + args = get_partition_names_ps_args() + args.db_name = db_name + args.tbl_name = tbl_name + args.part_vals = part_vals + args.max_parts = max_parts + args.write(self._oprot) + self._oprot.writeMessageEnd() + self._oprot.trans.flush() + + def recv_get_partition_names_ps(self, ): + (fname, mtype, rseqid) = self._iprot.readMessageBegin() + if mtype == TMessageType.EXCEPTION: + x = TApplicationException() + x.read(self._iprot) + self._iprot.readMessageEnd() + raise x + result = get_partition_names_ps_result() + result.read(self._iprot) + self._iprot.readMessageEnd() + if result.success != None: + return result.success + if result.o1 != None: + raise result.o1 + raise TApplicationException(TApplicationException.MISSING_RESULT, "get_partition_names_ps failed: unknown result"); + def alter_partition(self, db_name, tbl_name, new_part): """ Parameters: @@ -1193,6 +1289,8 @@ self._processMap["get_partition_by_name"] = Processor.process_get_partition_by_name self._processMap["get_partitions"] = Processor.process_get_partitions self._processMap["get_partition_names"] = Processor.process_get_partition_names + self._processMap["get_partitions_ps"] = Processor.process_get_partitions_ps + self._processMap["get_partition_names_ps"] = Processor.process_get_partition_names_ps self._processMap["alter_partition"] = Processor.process_alter_partition self._processMap["get_config_value"] = Processor.process_get_config_value @@ -1595,6 +1693,34 @@ oprot.writeMessageEnd() oprot.trans.flush() + def process_get_partitions_ps(self, seqid, iprot, oprot): + args = get_partitions_ps_args() + args.read(iprot) + iprot.readMessageEnd() + result = get_partitions_ps_result() + try: + result.success = self._handler.get_partitions_ps(args.db_name, args.tbl_name, args.part_vals, args.max_parts) + except MetaException, o1: + result.o1 = o1 + oprot.writeMessageBegin("get_partitions_ps", TMessageType.REPLY, seqid) + result.write(oprot) + oprot.writeMessageEnd() + oprot.trans.flush() + + def process_get_partition_names_ps(self, seqid, iprot, oprot): + args = get_partition_names_ps_args() + args.read(iprot) + iprot.readMessageEnd() + result = get_partition_names_ps_result() + try: + result.success = self._handler.get_partition_names_ps(args.db_name, args.tbl_name, args.part_vals, args.max_parts) + except MetaException, o1: + result.o1 = o1 + oprot.writeMessageBegin("get_partition_names_ps", TMessageType.REPLY, seqid) + result.write(oprot) + oprot.writeMessageEnd() + oprot.trans.flush() + def process_alter_partition(self, seqid, iprot, oprot): args = alter_partition_args() args.read(iprot) @@ -5288,6 +5414,359 @@ def __ne__(self, other): return not (self == other) +class get_partitions_ps_args: + """ + Attributes: + - db_name + - tbl_name + - part_vals + - max_parts + """ + + thrift_spec = ( + None, # 0 + (1, TType.STRING, 'db_name', None, None, ), # 1 + (2, TType.STRING, 'tbl_name', None, None, ), # 2 + (3, TType.LIST, 'part_vals', (TType.STRING,None), None, ), # 3 + (4, TType.I16, 'max_parts', None, -1, ), # 4 + ) + + def __init__(self, db_name=None, tbl_name=None, part_vals=None, max_parts=thrift_spec[4][4],): + self.db_name = db_name + self.tbl_name = tbl_name + self.part_vals = part_vals + self.max_parts = max_parts + + def read(self, iprot): + if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: + fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec)) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 1: + if ftype == TType.STRING: + self.db_name = iprot.readString(); + else: + iprot.skip(ftype) + elif fid == 2: + if ftype == TType.STRING: + self.tbl_name = iprot.readString(); + else: + iprot.skip(ftype) + elif fid == 3: + if ftype == TType.LIST: + self.part_vals = [] + (_etype176, _size173) = iprot.readListBegin() + for _i177 in xrange(_size173): + _elem178 = iprot.readString(); + self.part_vals.append(_elem178) + iprot.readListEnd() + else: + iprot.skip(ftype) + elif fid == 4: + if ftype == TType.I16: + self.max_parts = iprot.readI16(); + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None: + oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))) + return + oprot.writeStructBegin('get_partitions_ps_args') + if self.db_name != None: + oprot.writeFieldBegin('db_name', TType.STRING, 1) + oprot.writeString(self.db_name) + oprot.writeFieldEnd() + if self.tbl_name != None: + oprot.writeFieldBegin('tbl_name', TType.STRING, 2) + oprot.writeString(self.tbl_name) + oprot.writeFieldEnd() + if self.part_vals != None: + oprot.writeFieldBegin('part_vals', TType.LIST, 3) + oprot.writeListBegin(TType.STRING, len(self.part_vals)) + for iter179 in self.part_vals: + oprot.writeString(iter179) + oprot.writeListEnd() + oprot.writeFieldEnd() + if self.max_parts != None: + oprot.writeFieldBegin('max_parts', TType.I16, 4) + oprot.writeI16(self.max_parts) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def __repr__(self): + L = ['%s=%r' % (key, value) + for key, value in self.__dict__.iteritems()] + return '%s(%s)' % (self.__class__.__name__, ', '.join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + +class get_partitions_ps_result: + """ + Attributes: + - success + - o1 + """ + + thrift_spec = ( + (0, TType.LIST, 'success', (TType.STRUCT,(Partition, Partition.thrift_spec)), None, ), # 0 + (1, TType.STRUCT, 'o1', (MetaException, MetaException.thrift_spec), None, ), # 1 + ) + + def __init__(self, success=None, o1=None,): + self.success = success + self.o1 = o1 + + def read(self, iprot): + if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: + fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec)) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 0: + if ftype == TType.LIST: + self.success = [] + (_etype183, _size180) = iprot.readListBegin() + for _i184 in xrange(_size180): + _elem185 = Partition() + _elem185.read(iprot) + self.success.append(_elem185) + iprot.readListEnd() + else: + iprot.skip(ftype) + elif fid == 1: + if ftype == TType.STRUCT: + self.o1 = MetaException() + self.o1.read(iprot) + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None: + oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))) + return + oprot.writeStructBegin('get_partitions_ps_result') + if self.success != None: + oprot.writeFieldBegin('success', TType.LIST, 0) + oprot.writeListBegin(TType.STRUCT, len(self.success)) + for iter186 in self.success: + iter186.write(oprot) + oprot.writeListEnd() + oprot.writeFieldEnd() + if self.o1 != None: + oprot.writeFieldBegin('o1', TType.STRUCT, 1) + self.o1.write(oprot) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def __repr__(self): + L = ['%s=%r' % (key, value) + for key, value in self.__dict__.iteritems()] + return '%s(%s)' % (self.__class__.__name__, ', '.join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + +class get_partition_names_ps_args: + """ + Attributes: + - db_name + - tbl_name + - part_vals + - max_parts + """ + + thrift_spec = ( + None, # 0 + (1, TType.STRING, 'db_name', None, None, ), # 1 + (2, TType.STRING, 'tbl_name', None, None, ), # 2 + (3, TType.LIST, 'part_vals', (TType.STRING,None), None, ), # 3 + (4, TType.I16, 'max_parts', None, -1, ), # 4 + ) + + def __init__(self, db_name=None, tbl_name=None, part_vals=None, max_parts=thrift_spec[4][4],): + self.db_name = db_name + self.tbl_name = tbl_name + self.part_vals = part_vals + self.max_parts = max_parts + + def read(self, iprot): + if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: + fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec)) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 1: + if ftype == TType.STRING: + self.db_name = iprot.readString(); + else: + iprot.skip(ftype) + elif fid == 2: + if ftype == TType.STRING: + self.tbl_name = iprot.readString(); + else: + iprot.skip(ftype) + elif fid == 3: + if ftype == TType.LIST: + self.part_vals = [] + (_etype190, _size187) = iprot.readListBegin() + for _i191 in xrange(_size187): + _elem192 = iprot.readString(); + self.part_vals.append(_elem192) + iprot.readListEnd() + else: + iprot.skip(ftype) + elif fid == 4: + if ftype == TType.I16: + self.max_parts = iprot.readI16(); + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None: + oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))) + return + oprot.writeStructBegin('get_partition_names_ps_args') + if self.db_name != None: + oprot.writeFieldBegin('db_name', TType.STRING, 1) + oprot.writeString(self.db_name) + oprot.writeFieldEnd() + if self.tbl_name != None: + oprot.writeFieldBegin('tbl_name', TType.STRING, 2) + oprot.writeString(self.tbl_name) + oprot.writeFieldEnd() + if self.part_vals != None: + oprot.writeFieldBegin('part_vals', TType.LIST, 3) + oprot.writeListBegin(TType.STRING, len(self.part_vals)) + for iter193 in self.part_vals: + oprot.writeString(iter193) + oprot.writeListEnd() + oprot.writeFieldEnd() + if self.max_parts != None: + oprot.writeFieldBegin('max_parts', TType.I16, 4) + oprot.writeI16(self.max_parts) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def __repr__(self): + L = ['%s=%r' % (key, value) + for key, value in self.__dict__.iteritems()] + return '%s(%s)' % (self.__class__.__name__, ', '.join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + +class get_partition_names_ps_result: + """ + Attributes: + - success + - o1 + """ + + thrift_spec = ( + (0, TType.LIST, 'success', (TType.STRING,None), None, ), # 0 + (1, TType.STRUCT, 'o1', (MetaException, MetaException.thrift_spec), None, ), # 1 + ) + + def __init__(self, success=None, o1=None,): + self.success = success + self.o1 = o1 + + def read(self, iprot): + if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: + fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec)) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 0: + if ftype == TType.LIST: + self.success = [] + (_etype197, _size194) = iprot.readListBegin() + for _i198 in xrange(_size194): + _elem199 = iprot.readString(); + self.success.append(_elem199) + iprot.readListEnd() + else: + iprot.skip(ftype) + elif fid == 1: + if ftype == TType.STRUCT: + self.o1 = MetaException() + self.o1.read(iprot) + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None: + oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))) + return + oprot.writeStructBegin('get_partition_names_ps_result') + if self.success != None: + oprot.writeFieldBegin('success', TType.LIST, 0) + oprot.writeListBegin(TType.STRING, len(self.success)) + for iter200 in self.success: + oprot.writeString(iter200) + oprot.writeListEnd() + oprot.writeFieldEnd() + if self.o1 != None: + oprot.writeFieldBegin('o1', TType.STRUCT, 1) + self.o1.write(oprot) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def __repr__(self): + L = ['%s=%r' % (key, value) + for key, value in self.__dict__.iteritems()] + return '%s(%s)' % (self.__class__.__name__, ', '.join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + class alter_partition_args: """ Attributes: Index: metastore/src/gen-py/hive_metastore/ThriftHiveMetastore-remote =================================================================== --- metastore/src/gen-py/hive_metastore/ThriftHiveMetastore-remote (revision 5373) +++ metastore/src/gen-py/hive_metastore/ThriftHiveMetastore-remote (working copy) @@ -45,6 +45,8 @@ print ' Partition get_partition_by_name(string db_name, string tbl_name, string part_name)' print ' get_partitions(string db_name, string tbl_name, i16 max_parts)' print ' get_partition_names(string db_name, string tbl_name, i16 max_parts)' + print ' get_partitions_ps(string db_name, string tbl_name, part_vals, i16 max_parts)' + print ' get_partition_names_ps(string db_name, string tbl_name, part_vals, i16 max_parts)' print ' void alter_partition(string db_name, string tbl_name, Partition new_part)' print ' string get_config_value(string name, string defaultValue)' print '' @@ -239,6 +241,18 @@ sys.exit(1) pp.pprint(client.get_partition_names(args[0],args[1],eval(args[2]),)) +elif cmd == 'get_partitions_ps': + if len(args) != 4: + print 'get_partitions_ps requires 4 args' + sys.exit(1) + pp.pprint(client.get_partitions_ps(args[0],args[1],eval(args[2]),eval(args[3]),)) + +elif cmd == 'get_partition_names_ps': + if len(args) != 4: + print 'get_partition_names_ps requires 4 args' + sys.exit(1) + pp.pprint(client.get_partition_names_ps(args[0],args[1],eval(args[2]),eval(args[3]),)) + elif cmd == 'alter_partition': if len(args) != 3: print 'alter_partition requires 3 args' Index: metastore/src/test/org/apache/hadoop/hive/metastore/TestHiveMetaStore.java =================================================================== --- metastore/src/test/org/apache/hadoop/hive/metastore/TestHiveMetaStore.java (revision 5373) +++ metastore/src/test/org/apache/hadoop/hive/metastore/TestHiveMetaStore.java (working copy) @@ -20,7 +20,9 @@ import java.util.ArrayList; import java.util.HashMap; +import java.util.HashSet; import java.util.List; +import java.util.Set; import junit.framework.TestCase; @@ -47,6 +49,7 @@ private HiveMetaStoreClient client; private HiveConf hiveConf; + @Override protected void setUp() throws Exception { super.setUp(); hiveConf = new HiveConf(this.getClass()); @@ -66,6 +69,7 @@ } } + @Override protected void tearDown() throws Exception { try { super.tearDown(); @@ -80,7 +84,7 @@ /** * tests create table and partition and tries to drop the table without * droppping the partition - * + * * @throws Exception */ public void testPartition() throws Exception { @@ -91,6 +95,13 @@ List vals = new ArrayList(2); vals.add("2008-07-01 14:13:12"); vals.add("14"); + List vals2 = new ArrayList(2); + vals2.add("2008-07-01 14:13:12"); + vals2.add("15"); + List vals3 = new ArrayList(2); + vals3 = new ArrayList(2); + vals3.add("2008-07-02 14:13:12"); + vals3.add("15"); client.dropTable(dbName, tblName); client.dropDatabase(dbName); @@ -144,25 +155,93 @@ part.getSd().setSerdeInfo(tbl.getSd().getSerdeInfo()); part.getSd().setLocation(tbl.getSd().getLocation() + "/part1"); + Partition part2 = new Partition(); + part2.setDbName(dbName); + part2.setTableName(tblName); + part2.setValues(vals2); + part2.setParameters(new HashMap()); + part2.setSd(tbl.getSd()); + part2.getSd().setSerdeInfo(tbl.getSd().getSerdeInfo()); + part2.getSd().setLocation(tbl.getSd().getLocation() + "/part2"); + + Partition part3 = new Partition(); + part3.setDbName(dbName); + part3.setTableName(tblName); + part3.setValues(vals3); + part3.setParameters(new HashMap()); + part3.setSd(tbl.getSd()); + part3.getSd().setSerdeInfo(tbl.getSd().getSerdeInfo()); + part3.getSd().setLocation(tbl.getSd().getLocation() + "/part2"); + Partition retp = client.add_partition(part); assertNotNull("Unable to create partition " + part, retp); + Partition retp2 = client.add_partition(part2); + assertNotNull("Unable to create partition " + part2, retp2); + Partition retp3 = client.add_partition(part3); + assertNotNull("Unable to create partition " + part3, retp3); - Partition part2 = client.getPartition(dbName, tblName, part.getValues()); - assertTrue("Partitions are not same", part.equals(part2)); + Partition part_get = client.getPartition(dbName, tblName, part.getValues()); + assertTrue("Partitions are not same", part.equals(part_get)); String partName = "ds=2008-07-01 14%3A13%3A12/hr=14"; - Partition part3 = client.getPartitionByName(dbName, tblName, partName); - assertTrue("Partitions are not the same", part.equals(part2)); - + String part2Name = "ds=2008-07-01 14%3A13%3A12/hr=15"; + String part3Name ="ds=2008-07-02 14%3A13%3A12/hr=15"; + + part_get = client.getPartition(dbName, tblName, partName); + assertTrue("Partitions are not the same", part.equals(part_get)); + + // Test partition listing with a partial spec - ds is specified but hr is not + List partialVals = new ArrayList(); + partialVals.add(vals.get(0)); + partialVals.add(""); + Set parts = new HashSet(); + parts.add(part); + parts.add(part2); + + List partial = client.listPartitions(dbName, tblName, partialVals, + (short) -1); + assertTrue("Should have returned 2 partitions", partial.size() == 2); + assertTrue("Not all parts returned", partial.containsAll(parts)); + + Set partNames = new HashSet(); + partNames.add(partName); + partNames.add(part2Name); + List partialNames = client.listPartitionNames(dbName, tblName, partialVals, + (short) -1); + assertTrue("Should have returned 2 partition names", partialNames.size() == 2); + assertTrue("Not all part names returned", partialNames.containsAll(partNames)); + + // Test partition listing with a partial spec - hr is specified but ds is not + parts.clear(); + parts.add(part2); + parts.add(part3); + + partialVals.clear(); + partialVals.add(""); + partialVals.add(vals2.get(1)); + + partial = client.listPartitions(dbName, tblName, partialVals, (short) -1); + assertTrue("Should have returned 2 partitions", partial.size() == 2); + assertTrue("Not all parts returned", partial.containsAll(parts)); + + partNames.clear(); + partNames.add(part2Name); + partNames.add(part3Name); + partialNames = client.listPartitionNames(dbName, tblName, partialVals, + (short) -1); + assertTrue("Should have returned 2 partition names", partialNames.size() == 2); + assertTrue("Not all part names returned", partialNames.containsAll(partNames)); + + // Verify escaped partition names don't return partitions boolean exceptionThrown = false; try { String badPartName = "ds=2008-07-01 14%3A13%3A12/hrs=14"; - client.getPartitionByName(dbName, tblName, badPartName); + client.getPartition(dbName, tblName, badPartName); } catch(NoSuchObjectException e) { exceptionThrown = true; } assertTrue("Bad partition spec should have thrown an exception", exceptionThrown); - + FileSystem fs = FileSystem.get(hiveConf); Path partPath = new Path(part2.getSd().getLocation()); @@ -172,15 +251,15 @@ assertFalse(fs.exists(partPath)); // Test append_partition_by_name - client.appendPartitionByName(dbName, tblName, partName); + client.appendPartition(dbName, tblName, partName); Partition part4 = client.getPartition(dbName, tblName, part.getValues()); assertTrue("Append partition by name failed", part4.getValues().equals(vals));; Path part4Path = new Path(part4.getSd().getLocation()); assertTrue(fs.exists(part4Path)); - + // Test drop_partition_by_name - assertTrue("Drop partition by name failed", - client.dropPartitionByName(dbName, tblName, partName, true)); + assertTrue("Drop partition by name failed", + client.dropPartition(dbName, tblName, partName, true)); assertFalse(fs.exists(part4Path)); // add the partition again so that drop table with a partition can be @@ -202,10 +281,10 @@ assertTrue(fs.exists(partPath)); client.dropPartition(dbName, tblName, part.getValues(), true); assertTrue(fs.exists(partPath)); - + ret = client.dropDatabase(dbName); assertTrue("Unable to create the databse " + dbName, ret); - + } catch (Exception e) { System.err.println(StringUtils.stringifyException(e)); System.err.println("testPartition() failed."); Index: metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStore.java =================================================================== --- metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStore.java (revision 5373) +++ metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStore.java (working copy) @@ -152,7 +152,7 @@ /** * create default database if it doesn't exist - * + * * @throws MetaException */ private void createDefaultDB() throws MetaException { @@ -193,6 +193,7 @@ return fb_status.ALIVE; } + @Override public void shutdown() { logStartFunction("Shutting down the object store..."); try { @@ -412,7 +413,7 @@ /** * Is this an external table? - * + * * @param table * Check if this table is external. * @return True if the table is external, otherwise false. @@ -499,7 +500,7 @@ return part; } - + public Partition append_partition(String dbName, String tableName, List part_vals) throws InvalidObjectException, AlreadyExistsException, MetaException { @@ -510,7 +511,7 @@ LOG.debug(part); } } - return append_partition_common(dbName, tableName, part_vals); + return append_partition_common(dbName, tableName, part_vals); } public int add_partitions(List parts) throws MetaException, @@ -570,7 +571,7 @@ part.getSd().setLocation(partLocation.toString()); // Check to see if the directory already exists before calling mkdirs() - // because if the file system is read-only, mkdirs will throw an + // because if the file system is read-only, mkdirs will throw an // exception even if the directory already exists. if (!wh.isDir(partLocation)) { if (!wh.mkdirs(partLocation)) { @@ -637,7 +638,7 @@ incrementCounter("drop_partition"); logStartFunction("drop_partition", db_name, tbl_name); LOG.info("Partition values:" + part_vals); - + return drop_partition_common(db_name, tbl_name, part_vals, deleteData); } @@ -739,7 +740,7 @@ /** * Return the schema of the table. This function includes partition columns * in addition to the regular columns. - * + * * @param db * Name of the database * @param tableName @@ -812,11 +813,11 @@ return toReturn; } - private List getPartValsFromName(String dbName, String tblName, + private List getPartValsFromName(String dbName, String tblName, String partName) throws MetaException, InvalidObjectException { // Unescape the partition name LinkedHashMap hm = Warehouse.makeSpecFromName(partName); - + // getPartition expects partition values in a list. use info from the // table to put the partition column values in order Table t = getMS().getTable(dbName, tblName); @@ -824,7 +825,7 @@ throw new InvalidObjectException(dbName + "." + tblName + " table not found"); } - + List partVals = new ArrayList(); for(FieldSchema field : t.getPartitionKeys()) { String key = field.getName(); @@ -836,13 +837,13 @@ } return partVals; } - + public Partition get_partition_by_name(String db_name, String tbl_name, String part_name) throws MetaException, NoSuchObjectException, TException { incrementCounter("get_partition_by_name"); logStartFunction("get_partition_by_name: db=" + db_name + " tbl=" + tbl_name + " part=" + part_name); - + List partVals = null; try { partVals = getPartValsFromName(db_name, tbl_name, part_name); @@ -850,7 +851,7 @@ throw new NoSuchObjectException(e.getMessage()); } Partition p = getMS().getPartition(db_name, tbl_name, partVals); - + if(p == null) { throw new NoSuchObjectException(db_name + "." + tbl_name + " partition (" + part_name + ") not found"); @@ -859,13 +860,13 @@ } public Partition append_partition_by_name(String db_name, String tbl_name, - String part_name) throws InvalidObjectException, + String part_name) throws InvalidObjectException, AlreadyExistsException, MetaException, TException { incrementCounter("append_partition_by_name"); logStartFunction("append_partition_by_name: db=" + db_name + " tbl=" + tbl_name + " part=" + part_name); List partVals = getPartValsFromName(db_name, tbl_name, part_name); - + return append_partition_common(db_name, tbl_name, partVals); } @@ -876,16 +877,77 @@ incrementCounter("drop_partition_by_name"); logStartFunction("drop_partition_by_name: db=" + db_name + " tbl=" + tbl_name + " part=" + part_name); - + List partVals = null; try { partVals = getPartValsFromName(db_name, tbl_name, part_name); } catch (InvalidObjectException e) { throw new NoSuchObjectException(e.getMessage()); } - + return drop_partition_common(db_name, tbl_name, partVals, deleteData); } + + @Override + public List get_partitions_ps(String db_name, String tbl_name, + List part_vals, short max_parts) throws MetaException, + TException { + incrementCounter("get_partitions_ps"); + logStartFunction("get_partitions_ps", db_name, tbl_name); + List parts = null; + List matchingParts = new ArrayList(); + + // This gets all the partitions and then filters based on the specified + // criteria. An alternative approach would be to get all the partition + // names, do the filtering on the names, and get the partition for each + // of the names. that match. + + try { + parts = get_partitions(db_name, tbl_name, (short) -1); + } catch (NoSuchObjectException e) { + throw new MetaException(e.getMessage()); + } + + for (Partition p : parts) { + if (MetaStoreUtils.pvalMatches(part_vals, p.getValues())) { + matchingParts.add(p); + } + } + + return matchingParts; + } + + @Override + public List get_partition_names_ps(String db_name, String tbl_name, + List part_vals, short max_parts) throws MetaException, TException { + incrementCounter("get_partition_names_ps"); + logStartFunction("get_partitions_names_ps", db_name, tbl_name); + Table t; + try { + t = get_table(db_name, tbl_name); + } catch (NoSuchObjectException e) { + throw new MetaException(e.getMessage()); + } + + List partNames = get_partition_names(db_name, tbl_name, max_parts); + List filteredPartNames = new ArrayList(); + + for(String name : partNames) { + LinkedHashMap spec = Warehouse.makeSpecFromName(name); + List vals = new ArrayList(); + // Since we are iterating through a LinkedHashMap, iteration should + // return the partition values in the correct order for comparison. + for (String val : spec.values()) { + vals.add(val); + } + if (MetaStoreUtils.pvalMatches(part_vals, vals)) { + filteredPartNames.add(name); + } + } + + return filteredPartNames; + } + } /** Index: metastore/src/java/org/apache/hadoop/hive/metastore/MetaStoreUtils.java =================================================================== --- metastore/src/java/org/apache/hadoop/hive/metastore/MetaStoreUtils.java (revision 5373) +++ metastore/src/java/org/apache/hadoop/hive/metastore/MetaStoreUtils.java (working copy) @@ -22,6 +22,7 @@ import java.io.IOException; import java.util.ArrayList; import java.util.HashMap; +import java.util.Iterator; import java.util.List; import java.util.Map; import java.util.Properties; @@ -34,7 +35,6 @@ import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; -import org.apache.hadoop.hive.common.JavaUtils; import org.apache.hadoop.hive.conf.HiveConf; import org.apache.hadoop.hive.metastore.api.Constants; import org.apache.hadoop.hive.metastore.api.FieldSchema; @@ -53,7 +53,6 @@ import org.apache.hadoop.hive.serde2.objectinspector.StructObjectInspector; import org.apache.hadoop.hive.serde2.objectinspector.ObjectInspector.Category; import org.apache.hadoop.hive.serde2.typeinfo.TypeInfo; -import org.apache.hadoop.util.ReflectionUtils; import org.apache.hadoop.util.StringUtils; public class MetaStoreUtils { @@ -64,12 +63,12 @@ /** * printStackTrace - * + * * Helper function to print an exception stack trace to the log and not stderr - * + * * @param e * the exception - * + * */ static public void printStackTrace(Exception e) { for (StackTraceElement s : e.getStackTrace()) { @@ -118,15 +117,15 @@ /** * recursiveDelete - * + * * just recursively deletes a dir - you'd think Java would have something to * do this?? - * + * * @param f * - the file/dir to delete * @exception IOException * propogate f.delete() exceptions - * + * */ static public void recursiveDelete(File f) throws IOException { if (f.isDirectory()) { @@ -142,9 +141,9 @@ /** * getDeserializer - * + * * Get the Deserializer for a table given its name and properties. - * + * * @param conf * hadoop config * @param schema @@ -152,9 +151,9 @@ * @return the Deserializer * @exception MetaException * if any problems instantiating the Deserializer - * + * * todo - this should move somewhere into serde.jar - * + * */ static public Deserializer getDeserializer(Configuration conf, Properties schema) throws MetaException { @@ -174,9 +173,9 @@ /** * getDeserializer - * + * * Get the Deserializer for a table. - * + * * @param conf * - hadoop config * @param table @@ -184,9 +183,9 @@ * @return the Deserializer * @exception MetaException * if any problems instantiating the Deserializer - * + * * todo - this should move somewhere into serde.jar - * + * */ static public Deserializer getDeserializer(Configuration conf, org.apache.hadoop.hive.metastore.api.Table table) throws MetaException { @@ -210,9 +209,9 @@ /** * getDeserializer - * + * * Get the Deserializer for a partition. - * + * * @param conf * - hadoop config * @param partition @@ -220,7 +219,7 @@ * @return the Deserializer * @exception MetaException * if any problems instantiating the Deserializer - * + * */ static public Deserializer getDeserializer(Configuration conf, org.apache.hadoop.hive.metastore.api.Partition part, @@ -289,10 +288,10 @@ /** * validateName - * + * * Checks the name conforms to our standars which are: "[a-zA-z_0-9]+". checks * this is just characters and numbers and _ - * + * * @param name * the name to validate * @return true or false depending on conformance @@ -521,7 +520,7 @@ /** * Convert FieldSchemas to Thrift DDL + column names and column types - * + * * @param structName * The name of the table * @param fieldSchemas @@ -665,7 +664,7 @@ org.apache.hadoop.hive.serde.Constants.SERIALIZATION_DDL, getDDLFromFieldSchema(tableName, sd.getCols())); } - + String partString = ""; String partStringSep = ""; for (FieldSchema partKey : partitionKeys) { @@ -736,7 +735,7 @@ /** * Catches exceptions that can't be handled and bundles them to MetaException - * + * * @param e * @throws MetaException */ @@ -841,4 +840,27 @@ } return (table.getParameters().get(Constants.META_TABLE_STORAGE) != null); } + + /** + * Returns true if partial has the same values as full for all values that + * aren't empty in partial. + */ + + public static boolean pvalMatches(List partial, List full) { + if(partial.size() != full.size()) { + return false; + } + Iterator p = partial.iterator(); + Iterator f = full.iterator(); + + while(p.hasNext()) { + String pval = p.next(); + String fval = f.next(); + + if (pval.length() != 0 && !pval.equals(fval)) { + return false; + } + } + return true; + } } Index: metastore/src/java/org/apache/hadoop/hive/metastore/IMetaStoreClient.java =================================================================== --- metastore/src/java/org/apache/hadoop/hive/metastore/IMetaStoreClient.java (revision 5373) +++ metastore/src/java/org/apache/hadoop/hive/metastore/IMetaStoreClient.java (working copy) @@ -47,7 +47,7 @@ /** * Drop the table. - * + * * @param tableName * The table to drop * @param deleteData @@ -67,7 +67,7 @@ /** * Drop the table. - * + * * @param dbname * The database for this table * @param tableName @@ -93,7 +93,7 @@ /** * Get a table object. - * + * * @param tableName * Name of the table to fetch. * @return An object representing the table. @@ -109,7 +109,7 @@ /** * Get a table object. - * + * * @param dbName * The database the table is located in. * @param tableName @@ -141,9 +141,11 @@ List partVals) throws InvalidObjectException, AlreadyExistsException, MetaException, TException; + public Partition appendPartition(String tableName, String dbName, String name) + throws InvalidObjectException, AlreadyExistsException, MetaException, TException; /** * Add a partition to the table. - * + * * @param partition * The partition to add * @return The partition added @@ -183,9 +185,9 @@ * @see org.apache.hadoop.hive.metastore.api.ThriftHiveMetastore.Iface#get_partition(java.lang.String, * java.lang.String, java.util.List) */ - public Partition getPartitionByName(String dbName, String tblName, + public Partition getPartition(String dbName, String tblName, String name) throws MetaException, UnknownTableException, NoSuchObjectException, TException; - + /** * @param tbl_name * @param db_name @@ -198,9 +200,15 @@ public List listPartitions(String db_name, String tbl_name, short max_parts) throws NoSuchObjectException, MetaException, TException; + public List listPartitions(String db_name, String tbl_name, + List part_vals, short max_parts) throws NoSuchObjectException, MetaException, TException; + public List listPartitionNames(String db_name, String tbl_name, short max_parts) throws MetaException, TException; + public List listPartitionNames(String db_name, String tbl_name, + List part_vals, short max_parts) throws MetaException, TException; + /** * @param tbl * @throws AlreadyExistsException @@ -238,9 +246,12 @@ List part_vals, boolean deleteData) throws NoSuchObjectException, MetaException, TException; + public boolean dropPartition(String db_name, String tbl_name, + String name, boolean deleteData) throws NoSuchObjectException, + MetaException, TException; /** * updates a partition to new partition - * + * * @param dbName * database of the old partition * @param tblName Index: metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStoreClient.java =================================================================== --- metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStoreClient.java (revision 5373) +++ metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStoreClient.java (working copy) @@ -242,6 +242,10 @@ return client.append_partition(db_name, table_name, part_vals); } + public Partition appendPartition(String dbName, String tableName, String partName) + throws InvalidObjectException, AlreadyExistsException, MetaException, TException { + return client.append_partition_by_name(dbName, tableName, partName); + } /** * @param name * @param location_uri @@ -326,6 +330,10 @@ return dropPartition(db_name, tbl_name, part_vals, true); } + public boolean dropPartition(String dbName, String tableName, String partName, boolean deleteData) + throws NoSuchObjectException, MetaException, TException { + return client.drop_partition_by_name(dbName, tableName, partName, deleteData); + } /** * @param db_name * @param tbl_name @@ -453,6 +461,12 @@ return client.get_partitions(db_name, tbl_name, max_parts); } + @Override + public List listPartitions(String db_name, String tbl_name, List part_vals, + short max_parts) throws NoSuchObjectException, MetaException, TException { + return client.get_partitions_ps(db_name, tbl_name, part_vals, max_parts); + } + /** * @param name * @return the database @@ -543,6 +557,12 @@ return client.get_partition_names(dbName, tblName, max); } + @Override + public List listPartitionNames(String db_name, String tbl_name, + List part_vals, short max_parts) throws MetaException, TException { + return client.get_partition_names_ps(db_name, tbl_name, part_vals, max_parts); + } + public void alter_partition(String dbName, String tblName, Partition newPart) throws InvalidOperationException, MetaException, TException { client.alter_partition(dbName, tblName, newPart); @@ -585,20 +605,11 @@ return client.get_config_value(name, defaultValue); } - public Partition getPartitionByName(String db, String tableName, String partName) + public Partition getPartition(String db, String tableName, String partName) throws MetaException, TException, UnknownTableException, NoSuchObjectException { return client.get_partition_by_name(db, tableName, partName); } - public Partition appendPartitionByName(String dbName, String tableName, String partName) - throws InvalidObjectException, AlreadyExistsException, MetaException, TException { - return client.append_partition_by_name(dbName, tableName, partName); - } - - public boolean dropPartitionByName(String dbName, String tableName, String partName, boolean deleteData) - throws NoSuchObjectException, MetaException, TException { - return client.drop_partition_by_name(dbName, tableName, partName, deleteData); - } private HiveMetaHook getHook(Table tbl) throws MetaException { if (hookLoader == null) { Index: metastore/src/gen-cpp/ThriftHiveMetastore.cpp =================================================================== --- metastore/src/gen-cpp/ThriftHiveMetastore.cpp (revision 5373) +++ metastore/src/gen-cpp/ThriftHiveMetastore.cpp (working copy) @@ -5378,6 +5378,566 @@ return xfer; } +uint32_t ThriftHiveMetastore_get_partitions_ps_args::read(apache::thrift::protocol::TProtocol* iprot) { + + uint32_t xfer = 0; + std::string fname; + apache::thrift::protocol::TType ftype; + int16_t fid; + + xfer += iprot->readStructBegin(fname); + + using apache::thrift::protocol::TProtocolException; + + + while (true) + { + xfer += iprot->readFieldBegin(fname, ftype, fid); + if (ftype == apache::thrift::protocol::T_STOP) { + break; + } + switch (fid) + { + case 1: + if (ftype == apache::thrift::protocol::T_STRING) { + xfer += iprot->readString(this->db_name); + this->__isset.db_name = true; + } else { + xfer += iprot->skip(ftype); + } + break; + case 2: + if (ftype == apache::thrift::protocol::T_STRING) { + xfer += iprot->readString(this->tbl_name); + this->__isset.tbl_name = true; + } else { + xfer += iprot->skip(ftype); + } + break; + case 3: + if (ftype == apache::thrift::protocol::T_LIST) { + { + this->part_vals.clear(); + uint32_t _size190; + apache::thrift::protocol::TType _etype193; + iprot->readListBegin(_etype193, _size190); + this->part_vals.resize(_size190); + uint32_t _i194; + for (_i194 = 0; _i194 < _size190; ++_i194) + { + xfer += iprot->readString(this->part_vals[_i194]); + } + iprot->readListEnd(); + } + this->__isset.part_vals = true; + } else { + xfer += iprot->skip(ftype); + } + break; + case 4: + if (ftype == apache::thrift::protocol::T_I16) { + xfer += iprot->readI16(this->max_parts); + this->__isset.max_parts = true; + } else { + xfer += iprot->skip(ftype); + } + break; + default: + xfer += iprot->skip(ftype); + break; + } + xfer += iprot->readFieldEnd(); + } + + xfer += iprot->readStructEnd(); + + return xfer; +} + +uint32_t ThriftHiveMetastore_get_partitions_ps_args::write(apache::thrift::protocol::TProtocol* oprot) const { + uint32_t xfer = 0; + xfer += oprot->writeStructBegin("ThriftHiveMetastore_get_partitions_ps_args"); + xfer += oprot->writeFieldBegin("db_name", apache::thrift::protocol::T_STRING, 1); + xfer += oprot->writeString(this->db_name); + xfer += oprot->writeFieldEnd(); + xfer += oprot->writeFieldBegin("tbl_name", apache::thrift::protocol::T_STRING, 2); + xfer += oprot->writeString(this->tbl_name); + xfer += oprot->writeFieldEnd(); + xfer += oprot->writeFieldBegin("part_vals", apache::thrift::protocol::T_LIST, 3); + { + xfer += oprot->writeListBegin(apache::thrift::protocol::T_STRING, this->part_vals.size()); + std::vector ::const_iterator _iter195; + for (_iter195 = this->part_vals.begin(); _iter195 != this->part_vals.end(); ++_iter195) + { + xfer += oprot->writeString((*_iter195)); + } + xfer += oprot->writeListEnd(); + } + xfer += oprot->writeFieldEnd(); + xfer += oprot->writeFieldBegin("max_parts", apache::thrift::protocol::T_I16, 4); + xfer += oprot->writeI16(this->max_parts); + xfer += oprot->writeFieldEnd(); + xfer += oprot->writeFieldStop(); + xfer += oprot->writeStructEnd(); + return xfer; +} + +uint32_t ThriftHiveMetastore_get_partitions_ps_pargs::write(apache::thrift::protocol::TProtocol* oprot) const { + uint32_t xfer = 0; + xfer += oprot->writeStructBegin("ThriftHiveMetastore_get_partitions_ps_pargs"); + xfer += oprot->writeFieldBegin("db_name", apache::thrift::protocol::T_STRING, 1); + xfer += oprot->writeString((*(this->db_name))); + xfer += oprot->writeFieldEnd(); + xfer += oprot->writeFieldBegin("tbl_name", apache::thrift::protocol::T_STRING, 2); + xfer += oprot->writeString((*(this->tbl_name))); + xfer += oprot->writeFieldEnd(); + xfer += oprot->writeFieldBegin("part_vals", apache::thrift::protocol::T_LIST, 3); + { + xfer += oprot->writeListBegin(apache::thrift::protocol::T_STRING, (*(this->part_vals)).size()); + std::vector ::const_iterator _iter196; + for (_iter196 = (*(this->part_vals)).begin(); _iter196 != (*(this->part_vals)).end(); ++_iter196) + { + xfer += oprot->writeString((*_iter196)); + } + xfer += oprot->writeListEnd(); + } + xfer += oprot->writeFieldEnd(); + xfer += oprot->writeFieldBegin("max_parts", apache::thrift::protocol::T_I16, 4); + xfer += oprot->writeI16((*(this->max_parts))); + xfer += oprot->writeFieldEnd(); + xfer += oprot->writeFieldStop(); + xfer += oprot->writeStructEnd(); + return xfer; +} + +uint32_t ThriftHiveMetastore_get_partitions_ps_result::read(apache::thrift::protocol::TProtocol* iprot) { + + uint32_t xfer = 0; + std::string fname; + apache::thrift::protocol::TType ftype; + int16_t fid; + + xfer += iprot->readStructBegin(fname); + + using apache::thrift::protocol::TProtocolException; + + + while (true) + { + xfer += iprot->readFieldBegin(fname, ftype, fid); + if (ftype == apache::thrift::protocol::T_STOP) { + break; + } + switch (fid) + { + case 0: + if (ftype == apache::thrift::protocol::T_LIST) { + { + this->success.clear(); + uint32_t _size197; + apache::thrift::protocol::TType _etype200; + iprot->readListBegin(_etype200, _size197); + this->success.resize(_size197); + uint32_t _i201; + for (_i201 = 0; _i201 < _size197; ++_i201) + { + xfer += this->success[_i201].read(iprot); + } + iprot->readListEnd(); + } + this->__isset.success = true; + } else { + xfer += iprot->skip(ftype); + } + break; + case 1: + if (ftype == apache::thrift::protocol::T_STRUCT) { + xfer += this->o1.read(iprot); + this->__isset.o1 = true; + } else { + xfer += iprot->skip(ftype); + } + break; + default: + xfer += iprot->skip(ftype); + break; + } + xfer += iprot->readFieldEnd(); + } + + xfer += iprot->readStructEnd(); + + return xfer; +} + +uint32_t ThriftHiveMetastore_get_partitions_ps_result::write(apache::thrift::protocol::TProtocol* oprot) const { + + uint32_t xfer = 0; + + xfer += oprot->writeStructBegin("ThriftHiveMetastore_get_partitions_ps_result"); + + if (this->__isset.success) { + xfer += oprot->writeFieldBegin("success", apache::thrift::protocol::T_LIST, 0); + { + xfer += oprot->writeListBegin(apache::thrift::protocol::T_STRUCT, this->success.size()); + std::vector ::const_iterator _iter202; + for (_iter202 = this->success.begin(); _iter202 != this->success.end(); ++_iter202) + { + xfer += (*_iter202).write(oprot); + } + xfer += oprot->writeListEnd(); + } + xfer += oprot->writeFieldEnd(); + } else if (this->__isset.o1) { + xfer += oprot->writeFieldBegin("o1", apache::thrift::protocol::T_STRUCT, 1); + xfer += this->o1.write(oprot); + xfer += oprot->writeFieldEnd(); + } + xfer += oprot->writeFieldStop(); + xfer += oprot->writeStructEnd(); + return xfer; +} + +uint32_t ThriftHiveMetastore_get_partitions_ps_presult::read(apache::thrift::protocol::TProtocol* iprot) { + + uint32_t xfer = 0; + std::string fname; + apache::thrift::protocol::TType ftype; + int16_t fid; + + xfer += iprot->readStructBegin(fname); + + using apache::thrift::protocol::TProtocolException; + + + while (true) + { + xfer += iprot->readFieldBegin(fname, ftype, fid); + if (ftype == apache::thrift::protocol::T_STOP) { + break; + } + switch (fid) + { + case 0: + if (ftype == apache::thrift::protocol::T_LIST) { + { + (*(this->success)).clear(); + uint32_t _size203; + apache::thrift::protocol::TType _etype206; + iprot->readListBegin(_etype206, _size203); + (*(this->success)).resize(_size203); + uint32_t _i207; + for (_i207 = 0; _i207 < _size203; ++_i207) + { + xfer += (*(this->success))[_i207].read(iprot); + } + iprot->readListEnd(); + } + this->__isset.success = true; + } else { + xfer += iprot->skip(ftype); + } + break; + case 1: + if (ftype == apache::thrift::protocol::T_STRUCT) { + xfer += this->o1.read(iprot); + this->__isset.o1 = true; + } else { + xfer += iprot->skip(ftype); + } + break; + default: + xfer += iprot->skip(ftype); + break; + } + xfer += iprot->readFieldEnd(); + } + + xfer += iprot->readStructEnd(); + + return xfer; +} + +uint32_t ThriftHiveMetastore_get_partition_names_ps_args::read(apache::thrift::protocol::TProtocol* iprot) { + + uint32_t xfer = 0; + std::string fname; + apache::thrift::protocol::TType ftype; + int16_t fid; + + xfer += iprot->readStructBegin(fname); + + using apache::thrift::protocol::TProtocolException; + + + while (true) + { + xfer += iprot->readFieldBegin(fname, ftype, fid); + if (ftype == apache::thrift::protocol::T_STOP) { + break; + } + switch (fid) + { + case 1: + if (ftype == apache::thrift::protocol::T_STRING) { + xfer += iprot->readString(this->db_name); + this->__isset.db_name = true; + } else { + xfer += iprot->skip(ftype); + } + break; + case 2: + if (ftype == apache::thrift::protocol::T_STRING) { + xfer += iprot->readString(this->tbl_name); + this->__isset.tbl_name = true; + } else { + xfer += iprot->skip(ftype); + } + break; + case 3: + if (ftype == apache::thrift::protocol::T_LIST) { + { + this->part_vals.clear(); + uint32_t _size208; + apache::thrift::protocol::TType _etype211; + iprot->readListBegin(_etype211, _size208); + this->part_vals.resize(_size208); + uint32_t _i212; + for (_i212 = 0; _i212 < _size208; ++_i212) + { + xfer += iprot->readString(this->part_vals[_i212]); + } + iprot->readListEnd(); + } + this->__isset.part_vals = true; + } else { + xfer += iprot->skip(ftype); + } + break; + case 4: + if (ftype == apache::thrift::protocol::T_I16) { + xfer += iprot->readI16(this->max_parts); + this->__isset.max_parts = true; + } else { + xfer += iprot->skip(ftype); + } + break; + default: + xfer += iprot->skip(ftype); + break; + } + xfer += iprot->readFieldEnd(); + } + + xfer += iprot->readStructEnd(); + + return xfer; +} + +uint32_t ThriftHiveMetastore_get_partition_names_ps_args::write(apache::thrift::protocol::TProtocol* oprot) const { + uint32_t xfer = 0; + xfer += oprot->writeStructBegin("ThriftHiveMetastore_get_partition_names_ps_args"); + xfer += oprot->writeFieldBegin("db_name", apache::thrift::protocol::T_STRING, 1); + xfer += oprot->writeString(this->db_name); + xfer += oprot->writeFieldEnd(); + xfer += oprot->writeFieldBegin("tbl_name", apache::thrift::protocol::T_STRING, 2); + xfer += oprot->writeString(this->tbl_name); + xfer += oprot->writeFieldEnd(); + xfer += oprot->writeFieldBegin("part_vals", apache::thrift::protocol::T_LIST, 3); + { + xfer += oprot->writeListBegin(apache::thrift::protocol::T_STRING, this->part_vals.size()); + std::vector ::const_iterator _iter213; + for (_iter213 = this->part_vals.begin(); _iter213 != this->part_vals.end(); ++_iter213) + { + xfer += oprot->writeString((*_iter213)); + } + xfer += oprot->writeListEnd(); + } + xfer += oprot->writeFieldEnd(); + xfer += oprot->writeFieldBegin("max_parts", apache::thrift::protocol::T_I16, 4); + xfer += oprot->writeI16(this->max_parts); + xfer += oprot->writeFieldEnd(); + xfer += oprot->writeFieldStop(); + xfer += oprot->writeStructEnd(); + return xfer; +} + +uint32_t ThriftHiveMetastore_get_partition_names_ps_pargs::write(apache::thrift::protocol::TProtocol* oprot) const { + uint32_t xfer = 0; + xfer += oprot->writeStructBegin("ThriftHiveMetastore_get_partition_names_ps_pargs"); + xfer += oprot->writeFieldBegin("db_name", apache::thrift::protocol::T_STRING, 1); + xfer += oprot->writeString((*(this->db_name))); + xfer += oprot->writeFieldEnd(); + xfer += oprot->writeFieldBegin("tbl_name", apache::thrift::protocol::T_STRING, 2); + xfer += oprot->writeString((*(this->tbl_name))); + xfer += oprot->writeFieldEnd(); + xfer += oprot->writeFieldBegin("part_vals", apache::thrift::protocol::T_LIST, 3); + { + xfer += oprot->writeListBegin(apache::thrift::protocol::T_STRING, (*(this->part_vals)).size()); + std::vector ::const_iterator _iter214; + for (_iter214 = (*(this->part_vals)).begin(); _iter214 != (*(this->part_vals)).end(); ++_iter214) + { + xfer += oprot->writeString((*_iter214)); + } + xfer += oprot->writeListEnd(); + } + xfer += oprot->writeFieldEnd(); + xfer += oprot->writeFieldBegin("max_parts", apache::thrift::protocol::T_I16, 4); + xfer += oprot->writeI16((*(this->max_parts))); + xfer += oprot->writeFieldEnd(); + xfer += oprot->writeFieldStop(); + xfer += oprot->writeStructEnd(); + return xfer; +} + +uint32_t ThriftHiveMetastore_get_partition_names_ps_result::read(apache::thrift::protocol::TProtocol* iprot) { + + uint32_t xfer = 0; + std::string fname; + apache::thrift::protocol::TType ftype; + int16_t fid; + + xfer += iprot->readStructBegin(fname); + + using apache::thrift::protocol::TProtocolException; + + + while (true) + { + xfer += iprot->readFieldBegin(fname, ftype, fid); + if (ftype == apache::thrift::protocol::T_STOP) { + break; + } + switch (fid) + { + case 0: + if (ftype == apache::thrift::protocol::T_LIST) { + { + this->success.clear(); + uint32_t _size215; + apache::thrift::protocol::TType _etype218; + iprot->readListBegin(_etype218, _size215); + this->success.resize(_size215); + uint32_t _i219; + for (_i219 = 0; _i219 < _size215; ++_i219) + { + xfer += iprot->readString(this->success[_i219]); + } + iprot->readListEnd(); + } + this->__isset.success = true; + } else { + xfer += iprot->skip(ftype); + } + break; + case 1: + if (ftype == apache::thrift::protocol::T_STRUCT) { + xfer += this->o1.read(iprot); + this->__isset.o1 = true; + } else { + xfer += iprot->skip(ftype); + } + break; + default: + xfer += iprot->skip(ftype); + break; + } + xfer += iprot->readFieldEnd(); + } + + xfer += iprot->readStructEnd(); + + return xfer; +} + +uint32_t ThriftHiveMetastore_get_partition_names_ps_result::write(apache::thrift::protocol::TProtocol* oprot) const { + + uint32_t xfer = 0; + + xfer += oprot->writeStructBegin("ThriftHiveMetastore_get_partition_names_ps_result"); + + if (this->__isset.success) { + xfer += oprot->writeFieldBegin("success", apache::thrift::protocol::T_LIST, 0); + { + xfer += oprot->writeListBegin(apache::thrift::protocol::T_STRING, this->success.size()); + std::vector ::const_iterator _iter220; + for (_iter220 = this->success.begin(); _iter220 != this->success.end(); ++_iter220) + { + xfer += oprot->writeString((*_iter220)); + } + xfer += oprot->writeListEnd(); + } + xfer += oprot->writeFieldEnd(); + } else if (this->__isset.o1) { + xfer += oprot->writeFieldBegin("o1", apache::thrift::protocol::T_STRUCT, 1); + xfer += this->o1.write(oprot); + xfer += oprot->writeFieldEnd(); + } + xfer += oprot->writeFieldStop(); + xfer += oprot->writeStructEnd(); + return xfer; +} + +uint32_t ThriftHiveMetastore_get_partition_names_ps_presult::read(apache::thrift::protocol::TProtocol* iprot) { + + uint32_t xfer = 0; + std::string fname; + apache::thrift::protocol::TType ftype; + int16_t fid; + + xfer += iprot->readStructBegin(fname); + + using apache::thrift::protocol::TProtocolException; + + + while (true) + { + xfer += iprot->readFieldBegin(fname, ftype, fid); + if (ftype == apache::thrift::protocol::T_STOP) { + break; + } + switch (fid) + { + case 0: + if (ftype == apache::thrift::protocol::T_LIST) { + { + (*(this->success)).clear(); + uint32_t _size221; + apache::thrift::protocol::TType _etype224; + iprot->readListBegin(_etype224, _size221); + (*(this->success)).resize(_size221); + uint32_t _i225; + for (_i225 = 0; _i225 < _size221; ++_i225) + { + xfer += iprot->readString((*(this->success))[_i225]); + } + iprot->readListEnd(); + } + this->__isset.success = true; + } else { + xfer += iprot->skip(ftype); + } + break; + case 1: + if (ftype == apache::thrift::protocol::T_STRUCT) { + xfer += this->o1.read(iprot); + this->__isset.o1 = true; + } else { + xfer += iprot->skip(ftype); + } + break; + default: + xfer += iprot->skip(ftype); + break; + } + xfer += iprot->readFieldEnd(); + } + + xfer += iprot->readStructEnd(); + + return xfer; +} + uint32_t ThriftHiveMetastore_alter_partition_args::read(apache::thrift::protocol::TProtocol* iprot) { uint32_t xfer = 0; @@ -7371,6 +7931,138 @@ throw apache::thrift::TApplicationException(apache::thrift::TApplicationException::MISSING_RESULT, "get_partition_names failed: unknown result"); } +void ThriftHiveMetastoreClient::get_partitions_ps(std::vector & _return, const std::string& db_name, const std::string& tbl_name, const std::vector & part_vals, const int16_t max_parts) +{ + send_get_partitions_ps(db_name, tbl_name, part_vals, max_parts); + recv_get_partitions_ps(_return); +} + +void ThriftHiveMetastoreClient::send_get_partitions_ps(const std::string& db_name, const std::string& tbl_name, const std::vector & part_vals, const int16_t max_parts) +{ + int32_t cseqid = 0; + oprot_->writeMessageBegin("get_partitions_ps", apache::thrift::protocol::T_CALL, cseqid); + + ThriftHiveMetastore_get_partitions_ps_pargs args; + args.db_name = &db_name; + args.tbl_name = &tbl_name; + args.part_vals = &part_vals; + args.max_parts = &max_parts; + args.write(oprot_); + + oprot_->writeMessageEnd(); + oprot_->getTransport()->flush(); + oprot_->getTransport()->writeEnd(); +} + +void ThriftHiveMetastoreClient::recv_get_partitions_ps(std::vector & _return) +{ + + int32_t rseqid = 0; + std::string fname; + apache::thrift::protocol::TMessageType mtype; + + iprot_->readMessageBegin(fname, mtype, rseqid); + if (mtype == apache::thrift::protocol::T_EXCEPTION) { + apache::thrift::TApplicationException x; + x.read(iprot_); + iprot_->readMessageEnd(); + iprot_->getTransport()->readEnd(); + throw x; + } + if (mtype != apache::thrift::protocol::T_REPLY) { + iprot_->skip(apache::thrift::protocol::T_STRUCT); + iprot_->readMessageEnd(); + iprot_->getTransport()->readEnd(); + throw apache::thrift::TApplicationException(apache::thrift::TApplicationException::INVALID_MESSAGE_TYPE); + } + if (fname.compare("get_partitions_ps") != 0) { + iprot_->skip(apache::thrift::protocol::T_STRUCT); + iprot_->readMessageEnd(); + iprot_->getTransport()->readEnd(); + throw apache::thrift::TApplicationException(apache::thrift::TApplicationException::WRONG_METHOD_NAME); + } + ThriftHiveMetastore_get_partitions_ps_presult result; + result.success = &_return; + result.read(iprot_); + iprot_->readMessageEnd(); + iprot_->getTransport()->readEnd(); + + if (result.__isset.success) { + // _return pointer has now been filled + return; + } + if (result.__isset.o1) { + throw result.o1; + } + throw apache::thrift::TApplicationException(apache::thrift::TApplicationException::MISSING_RESULT, "get_partitions_ps failed: unknown result"); +} + +void ThriftHiveMetastoreClient::get_partition_names_ps(std::vector & _return, const std::string& db_name, const std::string& tbl_name, const std::vector & part_vals, const int16_t max_parts) +{ + send_get_partition_names_ps(db_name, tbl_name, part_vals, max_parts); + recv_get_partition_names_ps(_return); +} + +void ThriftHiveMetastoreClient::send_get_partition_names_ps(const std::string& db_name, const std::string& tbl_name, const std::vector & part_vals, const int16_t max_parts) +{ + int32_t cseqid = 0; + oprot_->writeMessageBegin("get_partition_names_ps", apache::thrift::protocol::T_CALL, cseqid); + + ThriftHiveMetastore_get_partition_names_ps_pargs args; + args.db_name = &db_name; + args.tbl_name = &tbl_name; + args.part_vals = &part_vals; + args.max_parts = &max_parts; + args.write(oprot_); + + oprot_->writeMessageEnd(); + oprot_->getTransport()->flush(); + oprot_->getTransport()->writeEnd(); +} + +void ThriftHiveMetastoreClient::recv_get_partition_names_ps(std::vector & _return) +{ + + int32_t rseqid = 0; + std::string fname; + apache::thrift::protocol::TMessageType mtype; + + iprot_->readMessageBegin(fname, mtype, rseqid); + if (mtype == apache::thrift::protocol::T_EXCEPTION) { + apache::thrift::TApplicationException x; + x.read(iprot_); + iprot_->readMessageEnd(); + iprot_->getTransport()->readEnd(); + throw x; + } + if (mtype != apache::thrift::protocol::T_REPLY) { + iprot_->skip(apache::thrift::protocol::T_STRUCT); + iprot_->readMessageEnd(); + iprot_->getTransport()->readEnd(); + throw apache::thrift::TApplicationException(apache::thrift::TApplicationException::INVALID_MESSAGE_TYPE); + } + if (fname.compare("get_partition_names_ps") != 0) { + iprot_->skip(apache::thrift::protocol::T_STRUCT); + iprot_->readMessageEnd(); + iprot_->getTransport()->readEnd(); + throw apache::thrift::TApplicationException(apache::thrift::TApplicationException::WRONG_METHOD_NAME); + } + ThriftHiveMetastore_get_partition_names_ps_presult result; + result.success = &_return; + result.read(iprot_); + iprot_->readMessageEnd(); + iprot_->getTransport()->readEnd(); + + if (result.__isset.success) { + // _return pointer has now been filled + return; + } + if (result.__isset.o1) { + throw result.o1; + } + throw apache::thrift::TApplicationException(apache::thrift::TApplicationException::MISSING_RESULT, "get_partition_names_ps failed: unknown result"); +} + void ThriftHiveMetastoreClient::alter_partition(const std::string& db_name, const std::string& tbl_name, const Partition& new_part) { send_alter_partition(db_name, tbl_name, new_part); @@ -8347,6 +9039,68 @@ oprot->getTransport()->writeEnd(); } +void ThriftHiveMetastoreProcessor::process_get_partitions_ps(int32_t seqid, apache::thrift::protocol::TProtocol* iprot, apache::thrift::protocol::TProtocol* oprot) +{ + ThriftHiveMetastore_get_partitions_ps_args args; + args.read(iprot); + iprot->readMessageEnd(); + iprot->getTransport()->readEnd(); + + ThriftHiveMetastore_get_partitions_ps_result result; + try { + iface_->get_partitions_ps(result.success, args.db_name, args.tbl_name, args.part_vals, args.max_parts); + result.__isset.success = true; + } catch (MetaException &o1) { + result.o1 = o1; + result.__isset.o1 = true; + } catch (const std::exception& e) { + apache::thrift::TApplicationException x(e.what()); + oprot->writeMessageBegin("get_partitions_ps", apache::thrift::protocol::T_EXCEPTION, seqid); + x.write(oprot); + oprot->writeMessageEnd(); + oprot->getTransport()->flush(); + oprot->getTransport()->writeEnd(); + return; + } + + oprot->writeMessageBegin("get_partitions_ps", apache::thrift::protocol::T_REPLY, seqid); + result.write(oprot); + oprot->writeMessageEnd(); + oprot->getTransport()->flush(); + oprot->getTransport()->writeEnd(); +} + +void ThriftHiveMetastoreProcessor::process_get_partition_names_ps(int32_t seqid, apache::thrift::protocol::TProtocol* iprot, apache::thrift::protocol::TProtocol* oprot) +{ + ThriftHiveMetastore_get_partition_names_ps_args args; + args.read(iprot); + iprot->readMessageEnd(); + iprot->getTransport()->readEnd(); + + ThriftHiveMetastore_get_partition_names_ps_result result; + try { + iface_->get_partition_names_ps(result.success, args.db_name, args.tbl_name, args.part_vals, args.max_parts); + result.__isset.success = true; + } catch (MetaException &o1) { + result.o1 = o1; + result.__isset.o1 = true; + } catch (const std::exception& e) { + apache::thrift::TApplicationException x(e.what()); + oprot->writeMessageBegin("get_partition_names_ps", apache::thrift::protocol::T_EXCEPTION, seqid); + x.write(oprot); + oprot->writeMessageEnd(); + oprot->getTransport()->flush(); + oprot->getTransport()->writeEnd(); + return; + } + + oprot->writeMessageBegin("get_partition_names_ps", apache::thrift::protocol::T_REPLY, seqid); + result.write(oprot); + oprot->writeMessageEnd(); + oprot->getTransport()->flush(); + oprot->getTransport()->writeEnd(); +} + void ThriftHiveMetastoreProcessor::process_alter_partition(int32_t seqid, apache::thrift::protocol::TProtocol* iprot, apache::thrift::protocol::TProtocol* oprot) { ThriftHiveMetastore_alter_partition_args args; Index: metastore/src/gen-cpp/ThriftHiveMetastore.h =================================================================== --- metastore/src/gen-cpp/ThriftHiveMetastore.h (revision 5373) +++ metastore/src/gen-cpp/ThriftHiveMetastore.h (working copy) @@ -39,6 +39,8 @@ virtual void get_partition_by_name(Partition& _return, const std::string& db_name, const std::string& tbl_name, const std::string& part_name) = 0; virtual void get_partitions(std::vector & _return, const std::string& db_name, const std::string& tbl_name, const int16_t max_parts) = 0; virtual void get_partition_names(std::vector & _return, const std::string& db_name, const std::string& tbl_name, const int16_t max_parts) = 0; + virtual void get_partitions_ps(std::vector & _return, const std::string& db_name, const std::string& tbl_name, const std::vector & part_vals, const int16_t max_parts) = 0; + virtual void get_partition_names_ps(std::vector & _return, const std::string& db_name, const std::string& tbl_name, const std::vector & part_vals, const int16_t max_parts) = 0; virtual void alter_partition(const std::string& db_name, const std::string& tbl_name, const Partition& new_part) = 0; virtual void get_config_value(std::string& _return, const std::string& name, const std::string& defaultValue) = 0; }; @@ -124,6 +126,12 @@ void get_partition_names(std::vector & /* _return */, const std::string& /* db_name */, const std::string& /* tbl_name */, const int16_t /* max_parts */) { return; } + void get_partitions_ps(std::vector & /* _return */, const std::string& /* db_name */, const std::string& /* tbl_name */, const std::vector & /* part_vals */, const int16_t /* max_parts */) { + return; + } + void get_partition_names_ps(std::vector & /* _return */, const std::string& /* db_name */, const std::string& /* tbl_name */, const std::vector & /* part_vals */, const int16_t /* max_parts */) { + return; + } void alter_partition(const std::string& /* db_name */, const std::string& /* tbl_name */, const Partition& /* new_part */) { return; } @@ -2760,6 +2768,234 @@ }; +class ThriftHiveMetastore_get_partitions_ps_args { + public: + + ThriftHiveMetastore_get_partitions_ps_args() : db_name(""), tbl_name(""), max_parts(-1) { + } + + virtual ~ThriftHiveMetastore_get_partitions_ps_args() throw() {} + + std::string db_name; + std::string tbl_name; + std::vector part_vals; + int16_t max_parts; + + struct __isset { + __isset() : db_name(false), tbl_name(false), part_vals(false), max_parts(false) {} + bool db_name; + bool tbl_name; + bool part_vals; + bool max_parts; + } __isset; + + bool operator == (const ThriftHiveMetastore_get_partitions_ps_args & rhs) const + { + if (!(db_name == rhs.db_name)) + return false; + if (!(tbl_name == rhs.tbl_name)) + return false; + if (!(part_vals == rhs.part_vals)) + return false; + if (!(max_parts == rhs.max_parts)) + return false; + return true; + } + bool operator != (const ThriftHiveMetastore_get_partitions_ps_args &rhs) const { + return !(*this == rhs); + } + + bool operator < (const ThriftHiveMetastore_get_partitions_ps_args & ) const; + + uint32_t read(apache::thrift::protocol::TProtocol* iprot); + uint32_t write(apache::thrift::protocol::TProtocol* oprot) const; + +}; + +class ThriftHiveMetastore_get_partitions_ps_pargs { + public: + + + virtual ~ThriftHiveMetastore_get_partitions_ps_pargs() throw() {} + + const std::string* db_name; + const std::string* tbl_name; + const std::vector * part_vals; + const int16_t* max_parts; + + uint32_t write(apache::thrift::protocol::TProtocol* oprot) const; + +}; + +class ThriftHiveMetastore_get_partitions_ps_result { + public: + + ThriftHiveMetastore_get_partitions_ps_result() { + } + + virtual ~ThriftHiveMetastore_get_partitions_ps_result() throw() {} + + std::vector success; + MetaException o1; + + struct __isset { + __isset() : success(false), o1(false) {} + bool success; + bool o1; + } __isset; + + bool operator == (const ThriftHiveMetastore_get_partitions_ps_result & rhs) const + { + if (!(success == rhs.success)) + return false; + if (!(o1 == rhs.o1)) + return false; + return true; + } + bool operator != (const ThriftHiveMetastore_get_partitions_ps_result &rhs) const { + return !(*this == rhs); + } + + bool operator < (const ThriftHiveMetastore_get_partitions_ps_result & ) const; + + uint32_t read(apache::thrift::protocol::TProtocol* iprot); + uint32_t write(apache::thrift::protocol::TProtocol* oprot) const; + +}; + +class ThriftHiveMetastore_get_partitions_ps_presult { + public: + + + virtual ~ThriftHiveMetastore_get_partitions_ps_presult() throw() {} + + std::vector * success; + MetaException o1; + + struct __isset { + __isset() : success(false), o1(false) {} + bool success; + bool o1; + } __isset; + + uint32_t read(apache::thrift::protocol::TProtocol* iprot); + +}; + +class ThriftHiveMetastore_get_partition_names_ps_args { + public: + + ThriftHiveMetastore_get_partition_names_ps_args() : db_name(""), tbl_name(""), max_parts(-1) { + } + + virtual ~ThriftHiveMetastore_get_partition_names_ps_args() throw() {} + + std::string db_name; + std::string tbl_name; + std::vector part_vals; + int16_t max_parts; + + struct __isset { + __isset() : db_name(false), tbl_name(false), part_vals(false), max_parts(false) {} + bool db_name; + bool tbl_name; + bool part_vals; + bool max_parts; + } __isset; + + bool operator == (const ThriftHiveMetastore_get_partition_names_ps_args & rhs) const + { + if (!(db_name == rhs.db_name)) + return false; + if (!(tbl_name == rhs.tbl_name)) + return false; + if (!(part_vals == rhs.part_vals)) + return false; + if (!(max_parts == rhs.max_parts)) + return false; + return true; + } + bool operator != (const ThriftHiveMetastore_get_partition_names_ps_args &rhs) const { + return !(*this == rhs); + } + + bool operator < (const ThriftHiveMetastore_get_partition_names_ps_args & ) const; + + uint32_t read(apache::thrift::protocol::TProtocol* iprot); + uint32_t write(apache::thrift::protocol::TProtocol* oprot) const; + +}; + +class ThriftHiveMetastore_get_partition_names_ps_pargs { + public: + + + virtual ~ThriftHiveMetastore_get_partition_names_ps_pargs() throw() {} + + const std::string* db_name; + const std::string* tbl_name; + const std::vector * part_vals; + const int16_t* max_parts; + + uint32_t write(apache::thrift::protocol::TProtocol* oprot) const; + +}; + +class ThriftHiveMetastore_get_partition_names_ps_result { + public: + + ThriftHiveMetastore_get_partition_names_ps_result() { + } + + virtual ~ThriftHiveMetastore_get_partition_names_ps_result() throw() {} + + std::vector success; + MetaException o1; + + struct __isset { + __isset() : success(false), o1(false) {} + bool success; + bool o1; + } __isset; + + bool operator == (const ThriftHiveMetastore_get_partition_names_ps_result & rhs) const + { + if (!(success == rhs.success)) + return false; + if (!(o1 == rhs.o1)) + return false; + return true; + } + bool operator != (const ThriftHiveMetastore_get_partition_names_ps_result &rhs) const { + return !(*this == rhs); + } + + bool operator < (const ThriftHiveMetastore_get_partition_names_ps_result & ) const; + + uint32_t read(apache::thrift::protocol::TProtocol* iprot); + uint32_t write(apache::thrift::protocol::TProtocol* oprot) const; + +}; + +class ThriftHiveMetastore_get_partition_names_ps_presult { + public: + + + virtual ~ThriftHiveMetastore_get_partition_names_ps_presult() throw() {} + + std::vector * success; + MetaException o1; + + struct __isset { + __isset() : success(false), o1(false) {} + bool success; + bool o1; + } __isset; + + uint32_t read(apache::thrift::protocol::TProtocol* iprot); + +}; + class ThriftHiveMetastore_alter_partition_args { public: @@ -3057,6 +3293,12 @@ void get_partition_names(std::vector & _return, const std::string& db_name, const std::string& tbl_name, const int16_t max_parts); void send_get_partition_names(const std::string& db_name, const std::string& tbl_name, const int16_t max_parts); void recv_get_partition_names(std::vector & _return); + void get_partitions_ps(std::vector & _return, const std::string& db_name, const std::string& tbl_name, const std::vector & part_vals, const int16_t max_parts); + void send_get_partitions_ps(const std::string& db_name, const std::string& tbl_name, const std::vector & part_vals, const int16_t max_parts); + void recv_get_partitions_ps(std::vector & _return); + void get_partition_names_ps(std::vector & _return, const std::string& db_name, const std::string& tbl_name, const std::vector & part_vals, const int16_t max_parts); + void send_get_partition_names_ps(const std::string& db_name, const std::string& tbl_name, const std::vector & part_vals, const int16_t max_parts); + void recv_get_partition_names_ps(std::vector & _return); void alter_partition(const std::string& db_name, const std::string& tbl_name, const Partition& new_part); void send_alter_partition(const std::string& db_name, const std::string& tbl_name, const Partition& new_part); void recv_alter_partition(); @@ -3095,6 +3337,8 @@ void process_get_partition_by_name(int32_t seqid, apache::thrift::protocol::TProtocol* iprot, apache::thrift::protocol::TProtocol* oprot); void process_get_partitions(int32_t seqid, apache::thrift::protocol::TProtocol* iprot, apache::thrift::protocol::TProtocol* oprot); void process_get_partition_names(int32_t seqid, apache::thrift::protocol::TProtocol* iprot, apache::thrift::protocol::TProtocol* oprot); + void process_get_partitions_ps(int32_t seqid, apache::thrift::protocol::TProtocol* iprot, apache::thrift::protocol::TProtocol* oprot); + void process_get_partition_names_ps(int32_t seqid, apache::thrift::protocol::TProtocol* iprot, apache::thrift::protocol::TProtocol* oprot); void process_alter_partition(int32_t seqid, apache::thrift::protocol::TProtocol* iprot, apache::thrift::protocol::TProtocol* oprot); void process_get_config_value(int32_t seqid, apache::thrift::protocol::TProtocol* iprot, apache::thrift::protocol::TProtocol* oprot); public: @@ -3125,6 +3369,8 @@ processMap_["get_partition_by_name"] = &ThriftHiveMetastoreProcessor::process_get_partition_by_name; processMap_["get_partitions"] = &ThriftHiveMetastoreProcessor::process_get_partitions; processMap_["get_partition_names"] = &ThriftHiveMetastoreProcessor::process_get_partition_names; + processMap_["get_partitions_ps"] = &ThriftHiveMetastoreProcessor::process_get_partitions_ps; + processMap_["get_partition_names_ps"] = &ThriftHiveMetastoreProcessor::process_get_partition_names_ps; processMap_["alter_partition"] = &ThriftHiveMetastoreProcessor::process_alter_partition; processMap_["get_config_value"] = &ThriftHiveMetastoreProcessor::process_get_config_value; } @@ -3417,6 +3663,30 @@ } } + void get_partitions_ps(std::vector & _return, const std::string& db_name, const std::string& tbl_name, const std::vector & part_vals, const int16_t max_parts) { + uint32_t sz = ifaces_.size(); + for (uint32_t i = 0; i < sz; ++i) { + if (i == sz - 1) { + ifaces_[i]->get_partitions_ps(_return, db_name, tbl_name, part_vals, max_parts); + return; + } else { + ifaces_[i]->get_partitions_ps(_return, db_name, tbl_name, part_vals, max_parts); + } + } + } + + void get_partition_names_ps(std::vector & _return, const std::string& db_name, const std::string& tbl_name, const std::vector & part_vals, const int16_t max_parts) { + uint32_t sz = ifaces_.size(); + for (uint32_t i = 0; i < sz; ++i) { + if (i == sz - 1) { + ifaces_[i]->get_partition_names_ps(_return, db_name, tbl_name, part_vals, max_parts); + return; + } else { + ifaces_[i]->get_partition_names_ps(_return, db_name, tbl_name, part_vals, max_parts); + } + } + } + void alter_partition(const std::string& db_name, const std::string& tbl_name, const Partition& new_part) { uint32_t sz = ifaces_.size(); for (uint32_t i = 0; i < sz; ++i) { Index: metastore/src/gen-cpp/ThriftHiveMetastore_server.skeleton.cpp =================================================================== --- metastore/src/gen-cpp/ThriftHiveMetastore_server.skeleton.cpp (revision 5373) +++ metastore/src/gen-cpp/ThriftHiveMetastore_server.skeleton.cpp (working copy) @@ -142,6 +142,16 @@ printf("get_partition_names\n"); } + void get_partitions_ps(std::vector & _return, const std::string& db_name, const std::string& tbl_name, const std::vector & part_vals, const int16_t max_parts) { + // Your implementation goes here + printf("get_partitions_ps\n"); + } + + void get_partition_names_ps(std::vector & _return, const std::string& db_name, const std::string& tbl_name, const std::vector & part_vals, const int16_t max_parts) { + // Your implementation goes here + printf("get_partition_names_ps\n"); + } + void alter_partition(const std::string& db_name, const std::string& tbl_name, const Partition& new_part) { // Your implementation goes here printf("alter_partition\n"); Index: metastore/src/gen-javabean/org/apache/hadoop/hive/metastore/api/ThriftHiveMetastore.java =================================================================== --- metastore/src/gen-javabean/org/apache/hadoop/hive/metastore/api/ThriftHiveMetastore.java (revision 5373) +++ metastore/src/gen-javabean/org/apache/hadoop/hive/metastore/api/ThriftHiveMetastore.java (working copy) @@ -73,6 +73,10 @@ public List get_partition_names(String db_name, String tbl_name, short max_parts) throws MetaException, TException; + public List get_partitions_ps(String db_name, String tbl_name, List part_vals, short max_parts) throws MetaException, TException; + + public List get_partition_names_ps(String db_name, String tbl_name, List part_vals, short max_parts) throws MetaException, TException; + public void alter_partition(String db_name, String tbl_name, Partition new_part) throws InvalidOperationException, MetaException, TException; public String get_config_value(String name, String defaultValue) throws ConfigValSecurityException, TException; @@ -1043,6 +1047,84 @@ throw new TApplicationException(TApplicationException.MISSING_RESULT, "get_partition_names failed: unknown result"); } + public List get_partitions_ps(String db_name, String tbl_name, List part_vals, short max_parts) throws MetaException, TException + { + send_get_partitions_ps(db_name, tbl_name, part_vals, max_parts); + return recv_get_partitions_ps(); + } + + public void send_get_partitions_ps(String db_name, String tbl_name, List part_vals, short max_parts) throws TException + { + oprot_.writeMessageBegin(new TMessage("get_partitions_ps", TMessageType.CALL, seqid_)); + get_partitions_ps_args args = new get_partitions_ps_args(); + args.db_name = db_name; + args.tbl_name = tbl_name; + args.part_vals = part_vals; + args.max_parts = max_parts; + args.write(oprot_); + oprot_.writeMessageEnd(); + oprot_.getTransport().flush(); + } + + public List recv_get_partitions_ps() throws MetaException, TException + { + TMessage msg = iprot_.readMessageBegin(); + if (msg.type == TMessageType.EXCEPTION) { + TApplicationException x = TApplicationException.read(iprot_); + iprot_.readMessageEnd(); + throw x; + } + get_partitions_ps_result result = new get_partitions_ps_result(); + result.read(iprot_); + iprot_.readMessageEnd(); + if (result.isSetSuccess()) { + return result.success; + } + if (result.o1 != null) { + throw result.o1; + } + throw new TApplicationException(TApplicationException.MISSING_RESULT, "get_partitions_ps failed: unknown result"); + } + + public List get_partition_names_ps(String db_name, String tbl_name, List part_vals, short max_parts) throws MetaException, TException + { + send_get_partition_names_ps(db_name, tbl_name, part_vals, max_parts); + return recv_get_partition_names_ps(); + } + + public void send_get_partition_names_ps(String db_name, String tbl_name, List part_vals, short max_parts) throws TException + { + oprot_.writeMessageBegin(new TMessage("get_partition_names_ps", TMessageType.CALL, seqid_)); + get_partition_names_ps_args args = new get_partition_names_ps_args(); + args.db_name = db_name; + args.tbl_name = tbl_name; + args.part_vals = part_vals; + args.max_parts = max_parts; + args.write(oprot_); + oprot_.writeMessageEnd(); + oprot_.getTransport().flush(); + } + + public List recv_get_partition_names_ps() throws MetaException, TException + { + TMessage msg = iprot_.readMessageBegin(); + if (msg.type == TMessageType.EXCEPTION) { + TApplicationException x = TApplicationException.read(iprot_); + iprot_.readMessageEnd(); + throw x; + } + get_partition_names_ps_result result = new get_partition_names_ps_result(); + result.read(iprot_); + iprot_.readMessageEnd(); + if (result.isSetSuccess()) { + return result.success; + } + if (result.o1 != null) { + throw result.o1; + } + throw new TApplicationException(TApplicationException.MISSING_RESULT, "get_partition_names_ps failed: unknown result"); + } + public void alter_partition(String db_name, String tbl_name, Partition new_part) throws InvalidOperationException, MetaException, TException { send_alter_partition(db_name, tbl_name, new_part); @@ -1149,6 +1231,8 @@ processMap_.put("get_partition_by_name", new get_partition_by_name()); processMap_.put("get_partitions", new get_partitions()); processMap_.put("get_partition_names", new get_partition_names()); + processMap_.put("get_partitions_ps", new get_partitions_ps()); + processMap_.put("get_partition_names_ps", new get_partition_names_ps()); processMap_.put("alter_partition", new alter_partition()); processMap_.put("get_config_value", new get_config_value()); } @@ -1899,6 +1983,62 @@ } + private class get_partitions_ps implements ProcessFunction { + public void process(int seqid, TProtocol iprot, TProtocol oprot) throws TException + { + get_partitions_ps_args args = new get_partitions_ps_args(); + args.read(iprot); + iprot.readMessageEnd(); + get_partitions_ps_result result = new get_partitions_ps_result(); + try { + result.success = iface_.get_partitions_ps(args.db_name, args.tbl_name, args.part_vals, args.max_parts); + } catch (MetaException o1) { + result.o1 = o1; + } catch (Throwable th) { + LOGGER.error("Internal error processing get_partitions_ps", th); + TApplicationException x = new TApplicationException(TApplicationException.INTERNAL_ERROR, "Internal error processing get_partitions_ps"); + oprot.writeMessageBegin(new TMessage("get_partitions_ps", TMessageType.EXCEPTION, seqid)); + x.write(oprot); + oprot.writeMessageEnd(); + oprot.getTransport().flush(); + return; + } + oprot.writeMessageBegin(new TMessage("get_partitions_ps", TMessageType.REPLY, seqid)); + result.write(oprot); + oprot.writeMessageEnd(); + oprot.getTransport().flush(); + } + + } + + private class get_partition_names_ps implements ProcessFunction { + public void process(int seqid, TProtocol iprot, TProtocol oprot) throws TException + { + get_partition_names_ps_args args = new get_partition_names_ps_args(); + args.read(iprot); + iprot.readMessageEnd(); + get_partition_names_ps_result result = new get_partition_names_ps_result(); + try { + result.success = iface_.get_partition_names_ps(args.db_name, args.tbl_name, args.part_vals, args.max_parts); + } catch (MetaException o1) { + result.o1 = o1; + } catch (Throwable th) { + LOGGER.error("Internal error processing get_partition_names_ps", th); + TApplicationException x = new TApplicationException(TApplicationException.INTERNAL_ERROR, "Internal error processing get_partition_names_ps"); + oprot.writeMessageBegin(new TMessage("get_partition_names_ps", TMessageType.EXCEPTION, seqid)); + x.write(oprot); + oprot.writeMessageEnd(); + oprot.getTransport().flush(); + return; + } + oprot.writeMessageBegin(new TMessage("get_partition_names_ps", TMessageType.REPLY, seqid)); + result.write(oprot); + oprot.writeMessageEnd(); + oprot.getTransport().flush(); + } + + } + private class alter_partition implements ProcessFunction { public void process(int seqid, TProtocol iprot, TProtocol oprot) throws TException { @@ -16363,6 +16503,1463 @@ } + public static class get_partitions_ps_args implements TBase, java.io.Serializable, Cloneable { + private static final TStruct STRUCT_DESC = new TStruct("get_partitions_ps_args"); + private static final TField DB_NAME_FIELD_DESC = new TField("db_name", TType.STRING, (short)1); + private static final TField TBL_NAME_FIELD_DESC = new TField("tbl_name", TType.STRING, (short)2); + private static final TField PART_VALS_FIELD_DESC = new TField("part_vals", TType.LIST, (short)3); + private static final TField MAX_PARTS_FIELD_DESC = new TField("max_parts", TType.I16, (short)4); + + private String db_name; + public static final int DB_NAME = 1; + private String tbl_name; + public static final int TBL_NAME = 2; + private List part_vals; + public static final int PART_VALS = 3; + private short max_parts; + public static final int MAX_PARTS = 4; + + private final Isset __isset = new Isset(); + private static final class Isset implements java.io.Serializable { + public boolean max_parts = false; + } + + public static final Map metaDataMap = Collections.unmodifiableMap(new HashMap() {{ + put(DB_NAME, new FieldMetaData("db_name", TFieldRequirementType.DEFAULT, + new FieldValueMetaData(TType.STRING))); + put(TBL_NAME, new FieldMetaData("tbl_name", TFieldRequirementType.DEFAULT, + new FieldValueMetaData(TType.STRING))); + put(PART_VALS, new FieldMetaData("part_vals", TFieldRequirementType.DEFAULT, + new ListMetaData(TType.LIST, + new FieldValueMetaData(TType.STRING)))); + put(MAX_PARTS, new FieldMetaData("max_parts", TFieldRequirementType.DEFAULT, + new FieldValueMetaData(TType.I16))); + }}); + + static { + FieldMetaData.addStructMetaDataMap(get_partitions_ps_args.class, metaDataMap); + } + + public get_partitions_ps_args() { + this.max_parts = (short)-1; + + } + + public get_partitions_ps_args( + String db_name, + String tbl_name, + List part_vals, + short max_parts) + { + this(); + this.db_name = db_name; + this.tbl_name = tbl_name; + this.part_vals = part_vals; + this.max_parts = max_parts; + this.__isset.max_parts = true; + } + + /** + * Performs a deep copy on other. + */ + public get_partitions_ps_args(get_partitions_ps_args other) { + if (other.isSetDb_name()) { + this.db_name = other.db_name; + } + if (other.isSetTbl_name()) { + this.tbl_name = other.tbl_name; + } + if (other.isSetPart_vals()) { + List __this__part_vals = new ArrayList(); + for (String other_element : other.part_vals) { + __this__part_vals.add(other_element); + } + this.part_vals = __this__part_vals; + } + __isset.max_parts = other.__isset.max_parts; + this.max_parts = other.max_parts; + } + + @Override + public get_partitions_ps_args clone() { + return new get_partitions_ps_args(this); + } + + public String getDb_name() { + return this.db_name; + } + + public void setDb_name(String db_name) { + this.db_name = db_name; + } + + public void unsetDb_name() { + this.db_name = null; + } + + // Returns true if field db_name is set (has been asigned a value) and false otherwise + public boolean isSetDb_name() { + return this.db_name != null; + } + + public String getTbl_name() { + return this.tbl_name; + } + + public void setTbl_name(String tbl_name) { + this.tbl_name = tbl_name; + } + + public void unsetTbl_name() { + this.tbl_name = null; + } + + // Returns true if field tbl_name is set (has been asigned a value) and false otherwise + public boolean isSetTbl_name() { + return this.tbl_name != null; + } + + public int getPart_valsSize() { + return (this.part_vals == null) ? 0 : this.part_vals.size(); + } + + public java.util.Iterator getPart_valsIterator() { + return (this.part_vals == null) ? null : this.part_vals.iterator(); + } + + public void addToPart_vals(String elem) { + if (this.part_vals == null) { + this.part_vals = new ArrayList(); + } + this.part_vals.add(elem); + } + + public List getPart_vals() { + return this.part_vals; + } + + public void setPart_vals(List part_vals) { + this.part_vals = part_vals; + } + + public void unsetPart_vals() { + this.part_vals = null; + } + + // Returns true if field part_vals is set (has been asigned a value) and false otherwise + public boolean isSetPart_vals() { + return this.part_vals != null; + } + + public short getMax_parts() { + return this.max_parts; + } + + public void setMax_parts(short max_parts) { + this.max_parts = max_parts; + this.__isset.max_parts = true; + } + + public void unsetMax_parts() { + this.__isset.max_parts = false; + } + + // Returns true if field max_parts is set (has been asigned a value) and false otherwise + public boolean isSetMax_parts() { + return this.__isset.max_parts; + } + + public void setFieldValue(int fieldID, Object value) { + switch (fieldID) { + case DB_NAME: + if (value == null) { + unsetDb_name(); + } else { + setDb_name((String)value); + } + break; + + case TBL_NAME: + if (value == null) { + unsetTbl_name(); + } else { + setTbl_name((String)value); + } + break; + + case PART_VALS: + if (value == null) { + unsetPart_vals(); + } else { + setPart_vals((List)value); + } + break; + + case MAX_PARTS: + if (value == null) { + unsetMax_parts(); + } else { + setMax_parts((Short)value); + } + break; + + default: + throw new IllegalArgumentException("Field " + fieldID + " doesn't exist!"); + } + } + + public Object getFieldValue(int fieldID) { + switch (fieldID) { + case DB_NAME: + return getDb_name(); + + case TBL_NAME: + return getTbl_name(); + + case PART_VALS: + return getPart_vals(); + + case MAX_PARTS: + return new Short(getMax_parts()); + + default: + throw new IllegalArgumentException("Field " + fieldID + " doesn't exist!"); + } + } + + // Returns true if field corresponding to fieldID is set (has been asigned a value) and false otherwise + public boolean isSet(int fieldID) { + switch (fieldID) { + case DB_NAME: + return isSetDb_name(); + case TBL_NAME: + return isSetTbl_name(); + case PART_VALS: + return isSetPart_vals(); + case MAX_PARTS: + return isSetMax_parts(); + default: + throw new IllegalArgumentException("Field " + fieldID + " doesn't exist!"); + } + } + + @Override + public boolean equals(Object that) { + if (that == null) + return false; + if (that instanceof get_partitions_ps_args) + return this.equals((get_partitions_ps_args)that); + return false; + } + + public boolean equals(get_partitions_ps_args that) { + if (that == null) + return false; + + boolean this_present_db_name = true && this.isSetDb_name(); + boolean that_present_db_name = true && that.isSetDb_name(); + if (this_present_db_name || that_present_db_name) { + if (!(this_present_db_name && that_present_db_name)) + return false; + if (!this.db_name.equals(that.db_name)) + return false; + } + + boolean this_present_tbl_name = true && this.isSetTbl_name(); + boolean that_present_tbl_name = true && that.isSetTbl_name(); + if (this_present_tbl_name || that_present_tbl_name) { + if (!(this_present_tbl_name && that_present_tbl_name)) + return false; + if (!this.tbl_name.equals(that.tbl_name)) + return false; + } + + boolean this_present_part_vals = true && this.isSetPart_vals(); + boolean that_present_part_vals = true && that.isSetPart_vals(); + if (this_present_part_vals || that_present_part_vals) { + if (!(this_present_part_vals && that_present_part_vals)) + return false; + if (!this.part_vals.equals(that.part_vals)) + return false; + } + + boolean this_present_max_parts = true; + boolean that_present_max_parts = true; + if (this_present_max_parts || that_present_max_parts) { + if (!(this_present_max_parts && that_present_max_parts)) + return false; + if (this.max_parts != that.max_parts) + return false; + } + + return true; + } + + @Override + public int hashCode() { + return 0; + } + + public void read(TProtocol iprot) throws TException { + TField field; + iprot.readStructBegin(); + while (true) + { + field = iprot.readFieldBegin(); + if (field.type == TType.STOP) { + break; + } + switch (field.id) + { + case DB_NAME: + if (field.type == TType.STRING) { + this.db_name = iprot.readString(); + } else { + TProtocolUtil.skip(iprot, field.type); + } + break; + case TBL_NAME: + if (field.type == TType.STRING) { + this.tbl_name = iprot.readString(); + } else { + TProtocolUtil.skip(iprot, field.type); + } + break; + case PART_VALS: + if (field.type == TType.LIST) { + { + TList _list98 = iprot.readListBegin(); + this.part_vals = new ArrayList(_list98.size); + for (int _i99 = 0; _i99 < _list98.size; ++_i99) + { + String _elem100; + _elem100 = iprot.readString(); + this.part_vals.add(_elem100); + } + iprot.readListEnd(); + } + } else { + TProtocolUtil.skip(iprot, field.type); + } + break; + case MAX_PARTS: + if (field.type == TType.I16) { + this.max_parts = iprot.readI16(); + this.__isset.max_parts = true; + } else { + TProtocolUtil.skip(iprot, field.type); + } + break; + default: + TProtocolUtil.skip(iprot, field.type); + break; + } + iprot.readFieldEnd(); + } + iprot.readStructEnd(); + + validate(); + } + + public void write(TProtocol oprot) throws TException { + validate(); + + oprot.writeStructBegin(STRUCT_DESC); + if (this.db_name != null) { + oprot.writeFieldBegin(DB_NAME_FIELD_DESC); + oprot.writeString(this.db_name); + oprot.writeFieldEnd(); + } + if (this.tbl_name != null) { + oprot.writeFieldBegin(TBL_NAME_FIELD_DESC); + oprot.writeString(this.tbl_name); + oprot.writeFieldEnd(); + } + if (this.part_vals != null) { + oprot.writeFieldBegin(PART_VALS_FIELD_DESC); + { + oprot.writeListBegin(new TList(TType.STRING, this.part_vals.size())); + for (String _iter101 : this.part_vals) { + oprot.writeString(_iter101); + } + oprot.writeListEnd(); + } + oprot.writeFieldEnd(); + } + oprot.writeFieldBegin(MAX_PARTS_FIELD_DESC); + oprot.writeI16(this.max_parts); + oprot.writeFieldEnd(); + oprot.writeFieldStop(); + oprot.writeStructEnd(); + } + + @Override + public String toString() { + StringBuilder sb = new StringBuilder("get_partitions_ps_args("); + boolean first = true; + + sb.append("db_name:"); + if (this.db_name == null) { + sb.append("null"); + } else { + sb.append(this.db_name); + } + first = false; + if (!first) sb.append(", "); + sb.append("tbl_name:"); + if (this.tbl_name == null) { + sb.append("null"); + } else { + sb.append(this.tbl_name); + } + first = false; + if (!first) sb.append(", "); + sb.append("part_vals:"); + if (this.part_vals == null) { + sb.append("null"); + } else { + sb.append(this.part_vals); + } + first = false; + if (!first) sb.append(", "); + sb.append("max_parts:"); + sb.append(this.max_parts); + first = false; + sb.append(")"); + return sb.toString(); + } + + public void validate() throws TException { + // check for required fields + // check that fields of type enum have valid values + } + + } + + public static class get_partitions_ps_result implements TBase, java.io.Serializable, Cloneable { + private static final TStruct STRUCT_DESC = new TStruct("get_partitions_ps_result"); + private static final TField SUCCESS_FIELD_DESC = new TField("success", TType.LIST, (short)0); + private static final TField O1_FIELD_DESC = new TField("o1", TType.STRUCT, (short)1); + + private List success; + public static final int SUCCESS = 0; + private MetaException o1; + public static final int O1 = 1; + + private final Isset __isset = new Isset(); + private static final class Isset implements java.io.Serializable { + } + + public static final Map metaDataMap = Collections.unmodifiableMap(new HashMap() {{ + put(SUCCESS, new FieldMetaData("success", TFieldRequirementType.DEFAULT, + new ListMetaData(TType.LIST, + new StructMetaData(TType.STRUCT, Partition.class)))); + put(O1, new FieldMetaData("o1", TFieldRequirementType.DEFAULT, + new FieldValueMetaData(TType.STRUCT))); + }}); + + static { + FieldMetaData.addStructMetaDataMap(get_partitions_ps_result.class, metaDataMap); + } + + public get_partitions_ps_result() { + } + + public get_partitions_ps_result( + List success, + MetaException o1) + { + this(); + this.success = success; + this.o1 = o1; + } + + /** + * Performs a deep copy on other. + */ + public get_partitions_ps_result(get_partitions_ps_result other) { + if (other.isSetSuccess()) { + List __this__success = new ArrayList(); + for (Partition other_element : other.success) { + __this__success.add(new Partition(other_element)); + } + this.success = __this__success; + } + if (other.isSetO1()) { + this.o1 = new MetaException(other.o1); + } + } + + @Override + public get_partitions_ps_result clone() { + return new get_partitions_ps_result(this); + } + + public int getSuccessSize() { + return (this.success == null) ? 0 : this.success.size(); + } + + public java.util.Iterator getSuccessIterator() { + return (this.success == null) ? null : this.success.iterator(); + } + + public void addToSuccess(Partition elem) { + if (this.success == null) { + this.success = new ArrayList(); + } + this.success.add(elem); + } + + public List getSuccess() { + return this.success; + } + + public void setSuccess(List success) { + this.success = success; + } + + public void unsetSuccess() { + this.success = null; + } + + // Returns true if field success is set (has been asigned a value) and false otherwise + public boolean isSetSuccess() { + return this.success != null; + } + + public MetaException getO1() { + return this.o1; + } + + public void setO1(MetaException o1) { + this.o1 = o1; + } + + public void unsetO1() { + this.o1 = null; + } + + // Returns true if field o1 is set (has been asigned a value) and false otherwise + public boolean isSetO1() { + return this.o1 != null; + } + + public void setFieldValue(int fieldID, Object value) { + switch (fieldID) { + case SUCCESS: + if (value == null) { + unsetSuccess(); + } else { + setSuccess((List)value); + } + break; + + case O1: + if (value == null) { + unsetO1(); + } else { + setO1((MetaException)value); + } + break; + + default: + throw new IllegalArgumentException("Field " + fieldID + " doesn't exist!"); + } + } + + public Object getFieldValue(int fieldID) { + switch (fieldID) { + case SUCCESS: + return getSuccess(); + + case O1: + return getO1(); + + default: + throw new IllegalArgumentException("Field " + fieldID + " doesn't exist!"); + } + } + + // Returns true if field corresponding to fieldID is set (has been asigned a value) and false otherwise + public boolean isSet(int fieldID) { + switch (fieldID) { + case SUCCESS: + return isSetSuccess(); + case O1: + return isSetO1(); + default: + throw new IllegalArgumentException("Field " + fieldID + " doesn't exist!"); + } + } + + @Override + public boolean equals(Object that) { + if (that == null) + return false; + if (that instanceof get_partitions_ps_result) + return this.equals((get_partitions_ps_result)that); + return false; + } + + public boolean equals(get_partitions_ps_result that) { + if (that == null) + return false; + + boolean this_present_success = true && this.isSetSuccess(); + boolean that_present_success = true && that.isSetSuccess(); + if (this_present_success || that_present_success) { + if (!(this_present_success && that_present_success)) + return false; + if (!this.success.equals(that.success)) + return false; + } + + boolean this_present_o1 = true && this.isSetO1(); + boolean that_present_o1 = true && that.isSetO1(); + if (this_present_o1 || that_present_o1) { + if (!(this_present_o1 && that_present_o1)) + return false; + if (!this.o1.equals(that.o1)) + return false; + } + + return true; + } + + @Override + public int hashCode() { + return 0; + } + + public void read(TProtocol iprot) throws TException { + TField field; + iprot.readStructBegin(); + while (true) + { + field = iprot.readFieldBegin(); + if (field.type == TType.STOP) { + break; + } + switch (field.id) + { + case SUCCESS: + if (field.type == TType.LIST) { + { + TList _list102 = iprot.readListBegin(); + this.success = new ArrayList(_list102.size); + for (int _i103 = 0; _i103 < _list102.size; ++_i103) + { + Partition _elem104; + _elem104 = new Partition(); + _elem104.read(iprot); + this.success.add(_elem104); + } + iprot.readListEnd(); + } + } else { + TProtocolUtil.skip(iprot, field.type); + } + break; + case O1: + if (field.type == TType.STRUCT) { + this.o1 = new MetaException(); + this.o1.read(iprot); + } else { + TProtocolUtil.skip(iprot, field.type); + } + break; + default: + TProtocolUtil.skip(iprot, field.type); + break; + } + iprot.readFieldEnd(); + } + iprot.readStructEnd(); + + validate(); + } + + public void write(TProtocol oprot) throws TException { + oprot.writeStructBegin(STRUCT_DESC); + + if (this.isSetSuccess()) { + oprot.writeFieldBegin(SUCCESS_FIELD_DESC); + { + oprot.writeListBegin(new TList(TType.STRUCT, this.success.size())); + for (Partition _iter105 : this.success) { + _iter105.write(oprot); + } + oprot.writeListEnd(); + } + oprot.writeFieldEnd(); + } else if (this.isSetO1()) { + oprot.writeFieldBegin(O1_FIELD_DESC); + this.o1.write(oprot); + oprot.writeFieldEnd(); + } + oprot.writeFieldStop(); + oprot.writeStructEnd(); + } + + @Override + public String toString() { + StringBuilder sb = new StringBuilder("get_partitions_ps_result("); + boolean first = true; + + sb.append("success:"); + if (this.success == null) { + sb.append("null"); + } else { + sb.append(this.success); + } + first = false; + if (!first) sb.append(", "); + sb.append("o1:"); + if (this.o1 == null) { + sb.append("null"); + } else { + sb.append(this.o1); + } + first = false; + sb.append(")"); + return sb.toString(); + } + + public void validate() throws TException { + // check for required fields + // check that fields of type enum have valid values + } + + } + + public static class get_partition_names_ps_args implements TBase, java.io.Serializable, Cloneable { + private static final TStruct STRUCT_DESC = new TStruct("get_partition_names_ps_args"); + private static final TField DB_NAME_FIELD_DESC = new TField("db_name", TType.STRING, (short)1); + private static final TField TBL_NAME_FIELD_DESC = new TField("tbl_name", TType.STRING, (short)2); + private static final TField PART_VALS_FIELD_DESC = new TField("part_vals", TType.LIST, (short)3); + private static final TField MAX_PARTS_FIELD_DESC = new TField("max_parts", TType.I16, (short)4); + + private String db_name; + public static final int DB_NAME = 1; + private String tbl_name; + public static final int TBL_NAME = 2; + private List part_vals; + public static final int PART_VALS = 3; + private short max_parts; + public static final int MAX_PARTS = 4; + + private final Isset __isset = new Isset(); + private static final class Isset implements java.io.Serializable { + public boolean max_parts = false; + } + + public static final Map metaDataMap = Collections.unmodifiableMap(new HashMap() {{ + put(DB_NAME, new FieldMetaData("db_name", TFieldRequirementType.DEFAULT, + new FieldValueMetaData(TType.STRING))); + put(TBL_NAME, new FieldMetaData("tbl_name", TFieldRequirementType.DEFAULT, + new FieldValueMetaData(TType.STRING))); + put(PART_VALS, new FieldMetaData("part_vals", TFieldRequirementType.DEFAULT, + new ListMetaData(TType.LIST, + new FieldValueMetaData(TType.STRING)))); + put(MAX_PARTS, new FieldMetaData("max_parts", TFieldRequirementType.DEFAULT, + new FieldValueMetaData(TType.I16))); + }}); + + static { + FieldMetaData.addStructMetaDataMap(get_partition_names_ps_args.class, metaDataMap); + } + + public get_partition_names_ps_args() { + this.max_parts = (short)-1; + + } + + public get_partition_names_ps_args( + String db_name, + String tbl_name, + List part_vals, + short max_parts) + { + this(); + this.db_name = db_name; + this.tbl_name = tbl_name; + this.part_vals = part_vals; + this.max_parts = max_parts; + this.__isset.max_parts = true; + } + + /** + * Performs a deep copy on other. + */ + public get_partition_names_ps_args(get_partition_names_ps_args other) { + if (other.isSetDb_name()) { + this.db_name = other.db_name; + } + if (other.isSetTbl_name()) { + this.tbl_name = other.tbl_name; + } + if (other.isSetPart_vals()) { + List __this__part_vals = new ArrayList(); + for (String other_element : other.part_vals) { + __this__part_vals.add(other_element); + } + this.part_vals = __this__part_vals; + } + __isset.max_parts = other.__isset.max_parts; + this.max_parts = other.max_parts; + } + + @Override + public get_partition_names_ps_args clone() { + return new get_partition_names_ps_args(this); + } + + public String getDb_name() { + return this.db_name; + } + + public void setDb_name(String db_name) { + this.db_name = db_name; + } + + public void unsetDb_name() { + this.db_name = null; + } + + // Returns true if field db_name is set (has been asigned a value) and false otherwise + public boolean isSetDb_name() { + return this.db_name != null; + } + + public String getTbl_name() { + return this.tbl_name; + } + + public void setTbl_name(String tbl_name) { + this.tbl_name = tbl_name; + } + + public void unsetTbl_name() { + this.tbl_name = null; + } + + // Returns true if field tbl_name is set (has been asigned a value) and false otherwise + public boolean isSetTbl_name() { + return this.tbl_name != null; + } + + public int getPart_valsSize() { + return (this.part_vals == null) ? 0 : this.part_vals.size(); + } + + public java.util.Iterator getPart_valsIterator() { + return (this.part_vals == null) ? null : this.part_vals.iterator(); + } + + public void addToPart_vals(String elem) { + if (this.part_vals == null) { + this.part_vals = new ArrayList(); + } + this.part_vals.add(elem); + } + + public List getPart_vals() { + return this.part_vals; + } + + public void setPart_vals(List part_vals) { + this.part_vals = part_vals; + } + + public void unsetPart_vals() { + this.part_vals = null; + } + + // Returns true if field part_vals is set (has been asigned a value) and false otherwise + public boolean isSetPart_vals() { + return this.part_vals != null; + } + + public short getMax_parts() { + return this.max_parts; + } + + public void setMax_parts(short max_parts) { + this.max_parts = max_parts; + this.__isset.max_parts = true; + } + + public void unsetMax_parts() { + this.__isset.max_parts = false; + } + + // Returns true if field max_parts is set (has been asigned a value) and false otherwise + public boolean isSetMax_parts() { + return this.__isset.max_parts; + } + + public void setFieldValue(int fieldID, Object value) { + switch (fieldID) { + case DB_NAME: + if (value == null) { + unsetDb_name(); + } else { + setDb_name((String)value); + } + break; + + case TBL_NAME: + if (value == null) { + unsetTbl_name(); + } else { + setTbl_name((String)value); + } + break; + + case PART_VALS: + if (value == null) { + unsetPart_vals(); + } else { + setPart_vals((List)value); + } + break; + + case MAX_PARTS: + if (value == null) { + unsetMax_parts(); + } else { + setMax_parts((Short)value); + } + break; + + default: + throw new IllegalArgumentException("Field " + fieldID + " doesn't exist!"); + } + } + + public Object getFieldValue(int fieldID) { + switch (fieldID) { + case DB_NAME: + return getDb_name(); + + case TBL_NAME: + return getTbl_name(); + + case PART_VALS: + return getPart_vals(); + + case MAX_PARTS: + return new Short(getMax_parts()); + + default: + throw new IllegalArgumentException("Field " + fieldID + " doesn't exist!"); + } + } + + // Returns true if field corresponding to fieldID is set (has been asigned a value) and false otherwise + public boolean isSet(int fieldID) { + switch (fieldID) { + case DB_NAME: + return isSetDb_name(); + case TBL_NAME: + return isSetTbl_name(); + case PART_VALS: + return isSetPart_vals(); + case MAX_PARTS: + return isSetMax_parts(); + default: + throw new IllegalArgumentException("Field " + fieldID + " doesn't exist!"); + } + } + + @Override + public boolean equals(Object that) { + if (that == null) + return false; + if (that instanceof get_partition_names_ps_args) + return this.equals((get_partition_names_ps_args)that); + return false; + } + + public boolean equals(get_partition_names_ps_args that) { + if (that == null) + return false; + + boolean this_present_db_name = true && this.isSetDb_name(); + boolean that_present_db_name = true && that.isSetDb_name(); + if (this_present_db_name || that_present_db_name) { + if (!(this_present_db_name && that_present_db_name)) + return false; + if (!this.db_name.equals(that.db_name)) + return false; + } + + boolean this_present_tbl_name = true && this.isSetTbl_name(); + boolean that_present_tbl_name = true && that.isSetTbl_name(); + if (this_present_tbl_name || that_present_tbl_name) { + if (!(this_present_tbl_name && that_present_tbl_name)) + return false; + if (!this.tbl_name.equals(that.tbl_name)) + return false; + } + + boolean this_present_part_vals = true && this.isSetPart_vals(); + boolean that_present_part_vals = true && that.isSetPart_vals(); + if (this_present_part_vals || that_present_part_vals) { + if (!(this_present_part_vals && that_present_part_vals)) + return false; + if (!this.part_vals.equals(that.part_vals)) + return false; + } + + boolean this_present_max_parts = true; + boolean that_present_max_parts = true; + if (this_present_max_parts || that_present_max_parts) { + if (!(this_present_max_parts && that_present_max_parts)) + return false; + if (this.max_parts != that.max_parts) + return false; + } + + return true; + } + + @Override + public int hashCode() { + return 0; + } + + public void read(TProtocol iprot) throws TException { + TField field; + iprot.readStructBegin(); + while (true) + { + field = iprot.readFieldBegin(); + if (field.type == TType.STOP) { + break; + } + switch (field.id) + { + case DB_NAME: + if (field.type == TType.STRING) { + this.db_name = iprot.readString(); + } else { + TProtocolUtil.skip(iprot, field.type); + } + break; + case TBL_NAME: + if (field.type == TType.STRING) { + this.tbl_name = iprot.readString(); + } else { + TProtocolUtil.skip(iprot, field.type); + } + break; + case PART_VALS: + if (field.type == TType.LIST) { + { + TList _list106 = iprot.readListBegin(); + this.part_vals = new ArrayList(_list106.size); + for (int _i107 = 0; _i107 < _list106.size; ++_i107) + { + String _elem108; + _elem108 = iprot.readString(); + this.part_vals.add(_elem108); + } + iprot.readListEnd(); + } + } else { + TProtocolUtil.skip(iprot, field.type); + } + break; + case MAX_PARTS: + if (field.type == TType.I16) { + this.max_parts = iprot.readI16(); + this.__isset.max_parts = true; + } else { + TProtocolUtil.skip(iprot, field.type); + } + break; + default: + TProtocolUtil.skip(iprot, field.type); + break; + } + iprot.readFieldEnd(); + } + iprot.readStructEnd(); + + validate(); + } + + public void write(TProtocol oprot) throws TException { + validate(); + + oprot.writeStructBegin(STRUCT_DESC); + if (this.db_name != null) { + oprot.writeFieldBegin(DB_NAME_FIELD_DESC); + oprot.writeString(this.db_name); + oprot.writeFieldEnd(); + } + if (this.tbl_name != null) { + oprot.writeFieldBegin(TBL_NAME_FIELD_DESC); + oprot.writeString(this.tbl_name); + oprot.writeFieldEnd(); + } + if (this.part_vals != null) { + oprot.writeFieldBegin(PART_VALS_FIELD_DESC); + { + oprot.writeListBegin(new TList(TType.STRING, this.part_vals.size())); + for (String _iter109 : this.part_vals) { + oprot.writeString(_iter109); + } + oprot.writeListEnd(); + } + oprot.writeFieldEnd(); + } + oprot.writeFieldBegin(MAX_PARTS_FIELD_DESC); + oprot.writeI16(this.max_parts); + oprot.writeFieldEnd(); + oprot.writeFieldStop(); + oprot.writeStructEnd(); + } + + @Override + public String toString() { + StringBuilder sb = new StringBuilder("get_partition_names_ps_args("); + boolean first = true; + + sb.append("db_name:"); + if (this.db_name == null) { + sb.append("null"); + } else { + sb.append(this.db_name); + } + first = false; + if (!first) sb.append(", "); + sb.append("tbl_name:"); + if (this.tbl_name == null) { + sb.append("null"); + } else { + sb.append(this.tbl_name); + } + first = false; + if (!first) sb.append(", "); + sb.append("part_vals:"); + if (this.part_vals == null) { + sb.append("null"); + } else { + sb.append(this.part_vals); + } + first = false; + if (!first) sb.append(", "); + sb.append("max_parts:"); + sb.append(this.max_parts); + first = false; + sb.append(")"); + return sb.toString(); + } + + public void validate() throws TException { + // check for required fields + // check that fields of type enum have valid values + } + + } + + public static class get_partition_names_ps_result implements TBase, java.io.Serializable, Cloneable { + private static final TStruct STRUCT_DESC = new TStruct("get_partition_names_ps_result"); + private static final TField SUCCESS_FIELD_DESC = new TField("success", TType.LIST, (short)0); + private static final TField O1_FIELD_DESC = new TField("o1", TType.STRUCT, (short)1); + + private List success; + public static final int SUCCESS = 0; + private MetaException o1; + public static final int O1 = 1; + + private final Isset __isset = new Isset(); + private static final class Isset implements java.io.Serializable { + } + + public static final Map metaDataMap = Collections.unmodifiableMap(new HashMap() {{ + put(SUCCESS, new FieldMetaData("success", TFieldRequirementType.DEFAULT, + new ListMetaData(TType.LIST, + new FieldValueMetaData(TType.STRING)))); + put(O1, new FieldMetaData("o1", TFieldRequirementType.DEFAULT, + new FieldValueMetaData(TType.STRUCT))); + }}); + + static { + FieldMetaData.addStructMetaDataMap(get_partition_names_ps_result.class, metaDataMap); + } + + public get_partition_names_ps_result() { + } + + public get_partition_names_ps_result( + List success, + MetaException o1) + { + this(); + this.success = success; + this.o1 = o1; + } + + /** + * Performs a deep copy on other. + */ + public get_partition_names_ps_result(get_partition_names_ps_result other) { + if (other.isSetSuccess()) { + List __this__success = new ArrayList(); + for (String other_element : other.success) { + __this__success.add(other_element); + } + this.success = __this__success; + } + if (other.isSetO1()) { + this.o1 = new MetaException(other.o1); + } + } + + @Override + public get_partition_names_ps_result clone() { + return new get_partition_names_ps_result(this); + } + + public int getSuccessSize() { + return (this.success == null) ? 0 : this.success.size(); + } + + public java.util.Iterator getSuccessIterator() { + return (this.success == null) ? null : this.success.iterator(); + } + + public void addToSuccess(String elem) { + if (this.success == null) { + this.success = new ArrayList(); + } + this.success.add(elem); + } + + public List getSuccess() { + return this.success; + } + + public void setSuccess(List success) { + this.success = success; + } + + public void unsetSuccess() { + this.success = null; + } + + // Returns true if field success is set (has been asigned a value) and false otherwise + public boolean isSetSuccess() { + return this.success != null; + } + + public MetaException getO1() { + return this.o1; + } + + public void setO1(MetaException o1) { + this.o1 = o1; + } + + public void unsetO1() { + this.o1 = null; + } + + // Returns true if field o1 is set (has been asigned a value) and false otherwise + public boolean isSetO1() { + return this.o1 != null; + } + + public void setFieldValue(int fieldID, Object value) { + switch (fieldID) { + case SUCCESS: + if (value == null) { + unsetSuccess(); + } else { + setSuccess((List)value); + } + break; + + case O1: + if (value == null) { + unsetO1(); + } else { + setO1((MetaException)value); + } + break; + + default: + throw new IllegalArgumentException("Field " + fieldID + " doesn't exist!"); + } + } + + public Object getFieldValue(int fieldID) { + switch (fieldID) { + case SUCCESS: + return getSuccess(); + + case O1: + return getO1(); + + default: + throw new IllegalArgumentException("Field " + fieldID + " doesn't exist!"); + } + } + + // Returns true if field corresponding to fieldID is set (has been asigned a value) and false otherwise + public boolean isSet(int fieldID) { + switch (fieldID) { + case SUCCESS: + return isSetSuccess(); + case O1: + return isSetO1(); + default: + throw new IllegalArgumentException("Field " + fieldID + " doesn't exist!"); + } + } + + @Override + public boolean equals(Object that) { + if (that == null) + return false; + if (that instanceof get_partition_names_ps_result) + return this.equals((get_partition_names_ps_result)that); + return false; + } + + public boolean equals(get_partition_names_ps_result that) { + if (that == null) + return false; + + boolean this_present_success = true && this.isSetSuccess(); + boolean that_present_success = true && that.isSetSuccess(); + if (this_present_success || that_present_success) { + if (!(this_present_success && that_present_success)) + return false; + if (!this.success.equals(that.success)) + return false; + } + + boolean this_present_o1 = true && this.isSetO1(); + boolean that_present_o1 = true && that.isSetO1(); + if (this_present_o1 || that_present_o1) { + if (!(this_present_o1 && that_present_o1)) + return false; + if (!this.o1.equals(that.o1)) + return false; + } + + return true; + } + + @Override + public int hashCode() { + return 0; + } + + public void read(TProtocol iprot) throws TException { + TField field; + iprot.readStructBegin(); + while (true) + { + field = iprot.readFieldBegin(); + if (field.type == TType.STOP) { + break; + } + switch (field.id) + { + case SUCCESS: + if (field.type == TType.LIST) { + { + TList _list110 = iprot.readListBegin(); + this.success = new ArrayList(_list110.size); + for (int _i111 = 0; _i111 < _list110.size; ++_i111) + { + String _elem112; + _elem112 = iprot.readString(); + this.success.add(_elem112); + } + iprot.readListEnd(); + } + } else { + TProtocolUtil.skip(iprot, field.type); + } + break; + case O1: + if (field.type == TType.STRUCT) { + this.o1 = new MetaException(); + this.o1.read(iprot); + } else { + TProtocolUtil.skip(iprot, field.type); + } + break; + default: + TProtocolUtil.skip(iprot, field.type); + break; + } + iprot.readFieldEnd(); + } + iprot.readStructEnd(); + + validate(); + } + + public void write(TProtocol oprot) throws TException { + oprot.writeStructBegin(STRUCT_DESC); + + if (this.isSetSuccess()) { + oprot.writeFieldBegin(SUCCESS_FIELD_DESC); + { + oprot.writeListBegin(new TList(TType.STRING, this.success.size())); + for (String _iter113 : this.success) { + oprot.writeString(_iter113); + } + oprot.writeListEnd(); + } + oprot.writeFieldEnd(); + } else if (this.isSetO1()) { + oprot.writeFieldBegin(O1_FIELD_DESC); + this.o1.write(oprot); + oprot.writeFieldEnd(); + } + oprot.writeFieldStop(); + oprot.writeStructEnd(); + } + + @Override + public String toString() { + StringBuilder sb = new StringBuilder("get_partition_names_ps_result("); + boolean first = true; + + sb.append("success:"); + if (this.success == null) { + sb.append("null"); + } else { + sb.append(this.success); + } + first = false; + if (!first) sb.append(", "); + sb.append("o1:"); + if (this.o1 == null) { + sb.append("null"); + } else { + sb.append(this.o1); + } + first = false; + sb.append(")"); + return sb.toString(); + } + + public void validate() throws TException { + // check for required fields + // check that fields of type enum have valid values + } + + } + public static class alter_partition_args implements TBase, java.io.Serializable, Cloneable { private static final TStruct STRUCT_DESC = new TStruct("alter_partition_args"); private static final TField DB_NAME_FIELD_DESC = new TField("db_name", TType.STRING, (short)1); Index: metastore/src/gen-php/ThriftHiveMetastore.php =================================================================== --- metastore/src/gen-php/ThriftHiveMetastore.php (revision 5373) +++ metastore/src/gen-php/ThriftHiveMetastore.php (working copy) @@ -34,6 +34,8 @@ public function get_partition_by_name($db_name, $tbl_name, $part_name); public function get_partitions($db_name, $tbl_name, $max_parts); public function get_partition_names($db_name, $tbl_name, $max_parts); + public function get_partitions_ps($db_name, $tbl_name, $part_vals, $max_parts); + public function get_partition_names_ps($db_name, $tbl_name, $part_vals, $max_parts); public function alter_partition($db_name, $tbl_name, $new_part); public function get_config_value($name, $defaultValue); } @@ -1428,6 +1430,120 @@ throw new Exception("get_partition_names failed: unknown result"); } + public function get_partitions_ps($db_name, $tbl_name, $part_vals, $max_parts) + { + $this->send_get_partitions_ps($db_name, $tbl_name, $part_vals, $max_parts); + return $this->recv_get_partitions_ps(); + } + + public function send_get_partitions_ps($db_name, $tbl_name, $part_vals, $max_parts) + { + $args = new metastore_ThriftHiveMetastore_get_partitions_ps_args(); + $args->db_name = $db_name; + $args->tbl_name = $tbl_name; + $args->part_vals = $part_vals; + $args->max_parts = $max_parts; + $bin_accel = ($this->output_ instanceof TProtocol::$TBINARYPROTOCOLACCELERATED) && function_exists('thrift_protocol_write_binary'); + if ($bin_accel) + { + thrift_protocol_write_binary($this->output_, 'get_partitions_ps', TMessageType::CALL, $args, $this->seqid_, $this->output_->isStrictWrite()); + } + else + { + $this->output_->writeMessageBegin('get_partitions_ps', TMessageType::CALL, $this->seqid_); + $args->write($this->output_); + $this->output_->writeMessageEnd(); + $this->output_->getTransport()->flush(); + } + } + + public function recv_get_partitions_ps() + { + $bin_accel = ($this->input_ instanceof TProtocol::$TBINARYPROTOCOLACCELERATED) && function_exists('thrift_protocol_read_binary'); + if ($bin_accel) $result = thrift_protocol_read_binary($this->input_, 'metastore_ThriftHiveMetastore_get_partitions_ps_result', $this->input_->isStrictRead()); + else + { + $rseqid = 0; + $fname = null; + $mtype = 0; + + $this->input_->readMessageBegin($fname, $mtype, $rseqid); + if ($mtype == TMessageType::EXCEPTION) { + $x = new TApplicationException(); + $x->read($this->input_); + $this->input_->readMessageEnd(); + throw $x; + } + $result = new metastore_ThriftHiveMetastore_get_partitions_ps_result(); + $result->read($this->input_); + $this->input_->readMessageEnd(); + } + if ($result->success !== null) { + return $result->success; + } + if ($result->o1 !== null) { + throw $result->o1; + } + throw new Exception("get_partitions_ps failed: unknown result"); + } + + public function get_partition_names_ps($db_name, $tbl_name, $part_vals, $max_parts) + { + $this->send_get_partition_names_ps($db_name, $tbl_name, $part_vals, $max_parts); + return $this->recv_get_partition_names_ps(); + } + + public function send_get_partition_names_ps($db_name, $tbl_name, $part_vals, $max_parts) + { + $args = new metastore_ThriftHiveMetastore_get_partition_names_ps_args(); + $args->db_name = $db_name; + $args->tbl_name = $tbl_name; + $args->part_vals = $part_vals; + $args->max_parts = $max_parts; + $bin_accel = ($this->output_ instanceof TProtocol::$TBINARYPROTOCOLACCELERATED) && function_exists('thrift_protocol_write_binary'); + if ($bin_accel) + { + thrift_protocol_write_binary($this->output_, 'get_partition_names_ps', TMessageType::CALL, $args, $this->seqid_, $this->output_->isStrictWrite()); + } + else + { + $this->output_->writeMessageBegin('get_partition_names_ps', TMessageType::CALL, $this->seqid_); + $args->write($this->output_); + $this->output_->writeMessageEnd(); + $this->output_->getTransport()->flush(); + } + } + + public function recv_get_partition_names_ps() + { + $bin_accel = ($this->input_ instanceof TProtocol::$TBINARYPROTOCOLACCELERATED) && function_exists('thrift_protocol_read_binary'); + if ($bin_accel) $result = thrift_protocol_read_binary($this->input_, 'metastore_ThriftHiveMetastore_get_partition_names_ps_result', $this->input_->isStrictRead()); + else + { + $rseqid = 0; + $fname = null; + $mtype = 0; + + $this->input_->readMessageBegin($fname, $mtype, $rseqid); + if ($mtype == TMessageType::EXCEPTION) { + $x = new TApplicationException(); + $x->read($this->input_); + $this->input_->readMessageEnd(); + throw $x; + } + $result = new metastore_ThriftHiveMetastore_get_partition_names_ps_result(); + $result->read($this->input_); + $this->input_->readMessageEnd(); + } + if ($result->success !== null) { + return $result->success; + } + if ($result->o1 !== null) { + throw $result->o1; + } + throw new Exception("get_partition_names_ps failed: unknown result"); + } + public function alter_partition($db_name, $tbl_name, $new_part) { $this->send_alter_partition($db_name, $tbl_name, $new_part); @@ -6849,6 +6965,564 @@ } +class metastore_ThriftHiveMetastore_get_partitions_ps_args { + static $_TSPEC; + + public $db_name = null; + public $tbl_name = null; + public $part_vals = null; + public $max_parts = -1; + + public function __construct($vals=null) { + if (!isset(self::$_TSPEC)) { + self::$_TSPEC = array( + 1 => array( + 'var' => 'db_name', + 'type' => TType::STRING, + ), + 2 => array( + 'var' => 'tbl_name', + 'type' => TType::STRING, + ), + 3 => array( + 'var' => 'part_vals', + 'type' => TType::LST, + 'etype' => TType::STRING, + 'elem' => array( + 'type' => TType::STRING, + ), + ), + 4 => array( + 'var' => 'max_parts', + 'type' => TType::I16, + ), + ); + } + if (is_array($vals)) { + if (isset($vals['db_name'])) { + $this->db_name = $vals['db_name']; + } + if (isset($vals['tbl_name'])) { + $this->tbl_name = $vals['tbl_name']; + } + if (isset($vals['part_vals'])) { + $this->part_vals = $vals['part_vals']; + } + if (isset($vals['max_parts'])) { + $this->max_parts = $vals['max_parts']; + } + } + } + + public function getName() { + return 'ThriftHiveMetastore_get_partitions_ps_args'; + } + + public function read($input) + { + $xfer = 0; + $fname = null; + $ftype = 0; + $fid = 0; + $xfer += $input->readStructBegin($fname); + while (true) + { + $xfer += $input->readFieldBegin($fname, $ftype, $fid); + if ($ftype == TType::STOP) { + break; + } + switch ($fid) + { + case 1: + if ($ftype == TType::STRING) { + $xfer += $input->readString($this->db_name); + } else { + $xfer += $input->skip($ftype); + } + break; + case 2: + if ($ftype == TType::STRING) { + $xfer += $input->readString($this->tbl_name); + } else { + $xfer += $input->skip($ftype); + } + break; + case 3: + if ($ftype == TType::LST) { + $this->part_vals = array(); + $_size173 = 0; + $_etype176 = 0; + $xfer += $input->readListBegin($_etype176, $_size173); + for ($_i177 = 0; $_i177 < $_size173; ++$_i177) + { + $elem178 = null; + $xfer += $input->readString($elem178); + $this->part_vals []= $elem178; + } + $xfer += $input->readListEnd(); + } else { + $xfer += $input->skip($ftype); + } + break; + case 4: + if ($ftype == TType::I16) { + $xfer += $input->readI16($this->max_parts); + } else { + $xfer += $input->skip($ftype); + } + break; + default: + $xfer += $input->skip($ftype); + break; + } + $xfer += $input->readFieldEnd(); + } + $xfer += $input->readStructEnd(); + return $xfer; + } + + public function write($output) { + $xfer = 0; + $xfer += $output->writeStructBegin('ThriftHiveMetastore_get_partitions_ps_args'); + if ($this->db_name !== null) { + $xfer += $output->writeFieldBegin('db_name', TType::STRING, 1); + $xfer += $output->writeString($this->db_name); + $xfer += $output->writeFieldEnd(); + } + if ($this->tbl_name !== null) { + $xfer += $output->writeFieldBegin('tbl_name', TType::STRING, 2); + $xfer += $output->writeString($this->tbl_name); + $xfer += $output->writeFieldEnd(); + } + if ($this->part_vals !== null) { + if (!is_array($this->part_vals)) { + throw new TProtocolException('Bad type in structure.', TProtocolException::INVALID_DATA); + } + $xfer += $output->writeFieldBegin('part_vals', TType::LST, 3); + { + $output->writeListBegin(TType::STRING, count($this->part_vals)); + { + foreach ($this->part_vals as $iter179) + { + $xfer += $output->writeString($iter179); + } + } + $output->writeListEnd(); + } + $xfer += $output->writeFieldEnd(); + } + if ($this->max_parts !== null) { + $xfer += $output->writeFieldBegin('max_parts', TType::I16, 4); + $xfer += $output->writeI16($this->max_parts); + $xfer += $output->writeFieldEnd(); + } + $xfer += $output->writeFieldStop(); + $xfer += $output->writeStructEnd(); + return $xfer; + } + +} + +class metastore_ThriftHiveMetastore_get_partitions_ps_result { + static $_TSPEC; + + public $success = null; + public $o1 = null; + + public function __construct($vals=null) { + if (!isset(self::$_TSPEC)) { + self::$_TSPEC = array( + 0 => array( + 'var' => 'success', + 'type' => TType::LST, + 'etype' => TType::STRUCT, + 'elem' => array( + 'type' => TType::STRUCT, + 'class' => 'metastore_Partition', + ), + ), + 1 => array( + 'var' => 'o1', + 'type' => TType::STRUCT, + 'class' => 'metastore_MetaException', + ), + ); + } + if (is_array($vals)) { + if (isset($vals['success'])) { + $this->success = $vals['success']; + } + if (isset($vals['o1'])) { + $this->o1 = $vals['o1']; + } + } + } + + public function getName() { + return 'ThriftHiveMetastore_get_partitions_ps_result'; + } + + public function read($input) + { + $xfer = 0; + $fname = null; + $ftype = 0; + $fid = 0; + $xfer += $input->readStructBegin($fname); + while (true) + { + $xfer += $input->readFieldBegin($fname, $ftype, $fid); + if ($ftype == TType::STOP) { + break; + } + switch ($fid) + { + case 0: + if ($ftype == TType::LST) { + $this->success = array(); + $_size180 = 0; + $_etype183 = 0; + $xfer += $input->readListBegin($_etype183, $_size180); + for ($_i184 = 0; $_i184 < $_size180; ++$_i184) + { + $elem185 = null; + $elem185 = new metastore_Partition(); + $xfer += $elem185->read($input); + $this->success []= $elem185; + } + $xfer += $input->readListEnd(); + } else { + $xfer += $input->skip($ftype); + } + break; + case 1: + if ($ftype == TType::STRUCT) { + $this->o1 = new metastore_MetaException(); + $xfer += $this->o1->read($input); + } else { + $xfer += $input->skip($ftype); + } + break; + default: + $xfer += $input->skip($ftype); + break; + } + $xfer += $input->readFieldEnd(); + } + $xfer += $input->readStructEnd(); + return $xfer; + } + + public function write($output) { + $xfer = 0; + $xfer += $output->writeStructBegin('ThriftHiveMetastore_get_partitions_ps_result'); + if ($this->success !== null) { + if (!is_array($this->success)) { + throw new TProtocolException('Bad type in structure.', TProtocolException::INVALID_DATA); + } + $xfer += $output->writeFieldBegin('success', TType::LST, 0); + { + $output->writeListBegin(TType::STRUCT, count($this->success)); + { + foreach ($this->success as $iter186) + { + $xfer += $iter186->write($output); + } + } + $output->writeListEnd(); + } + $xfer += $output->writeFieldEnd(); + } + if ($this->o1 !== null) { + $xfer += $output->writeFieldBegin('o1', TType::STRUCT, 1); + $xfer += $this->o1->write($output); + $xfer += $output->writeFieldEnd(); + } + $xfer += $output->writeFieldStop(); + $xfer += $output->writeStructEnd(); + return $xfer; + } + +} + +class metastore_ThriftHiveMetastore_get_partition_names_ps_args { + static $_TSPEC; + + public $db_name = null; + public $tbl_name = null; + public $part_vals = null; + public $max_parts = -1; + + public function __construct($vals=null) { + if (!isset(self::$_TSPEC)) { + self::$_TSPEC = array( + 1 => array( + 'var' => 'db_name', + 'type' => TType::STRING, + ), + 2 => array( + 'var' => 'tbl_name', + 'type' => TType::STRING, + ), + 3 => array( + 'var' => 'part_vals', + 'type' => TType::LST, + 'etype' => TType::STRING, + 'elem' => array( + 'type' => TType::STRING, + ), + ), + 4 => array( + 'var' => 'max_parts', + 'type' => TType::I16, + ), + ); + } + if (is_array($vals)) { + if (isset($vals['db_name'])) { + $this->db_name = $vals['db_name']; + } + if (isset($vals['tbl_name'])) { + $this->tbl_name = $vals['tbl_name']; + } + if (isset($vals['part_vals'])) { + $this->part_vals = $vals['part_vals']; + } + if (isset($vals['max_parts'])) { + $this->max_parts = $vals['max_parts']; + } + } + } + + public function getName() { + return 'ThriftHiveMetastore_get_partition_names_ps_args'; + } + + public function read($input) + { + $xfer = 0; + $fname = null; + $ftype = 0; + $fid = 0; + $xfer += $input->readStructBegin($fname); + while (true) + { + $xfer += $input->readFieldBegin($fname, $ftype, $fid); + if ($ftype == TType::STOP) { + break; + } + switch ($fid) + { + case 1: + if ($ftype == TType::STRING) { + $xfer += $input->readString($this->db_name); + } else { + $xfer += $input->skip($ftype); + } + break; + case 2: + if ($ftype == TType::STRING) { + $xfer += $input->readString($this->tbl_name); + } else { + $xfer += $input->skip($ftype); + } + break; + case 3: + if ($ftype == TType::LST) { + $this->part_vals = array(); + $_size187 = 0; + $_etype190 = 0; + $xfer += $input->readListBegin($_etype190, $_size187); + for ($_i191 = 0; $_i191 < $_size187; ++$_i191) + { + $elem192 = null; + $xfer += $input->readString($elem192); + $this->part_vals []= $elem192; + } + $xfer += $input->readListEnd(); + } else { + $xfer += $input->skip($ftype); + } + break; + case 4: + if ($ftype == TType::I16) { + $xfer += $input->readI16($this->max_parts); + } else { + $xfer += $input->skip($ftype); + } + break; + default: + $xfer += $input->skip($ftype); + break; + } + $xfer += $input->readFieldEnd(); + } + $xfer += $input->readStructEnd(); + return $xfer; + } + + public function write($output) { + $xfer = 0; + $xfer += $output->writeStructBegin('ThriftHiveMetastore_get_partition_names_ps_args'); + if ($this->db_name !== null) { + $xfer += $output->writeFieldBegin('db_name', TType::STRING, 1); + $xfer += $output->writeString($this->db_name); + $xfer += $output->writeFieldEnd(); + } + if ($this->tbl_name !== null) { + $xfer += $output->writeFieldBegin('tbl_name', TType::STRING, 2); + $xfer += $output->writeString($this->tbl_name); + $xfer += $output->writeFieldEnd(); + } + if ($this->part_vals !== null) { + if (!is_array($this->part_vals)) { + throw new TProtocolException('Bad type in structure.', TProtocolException::INVALID_DATA); + } + $xfer += $output->writeFieldBegin('part_vals', TType::LST, 3); + { + $output->writeListBegin(TType::STRING, count($this->part_vals)); + { + foreach ($this->part_vals as $iter193) + { + $xfer += $output->writeString($iter193); + } + } + $output->writeListEnd(); + } + $xfer += $output->writeFieldEnd(); + } + if ($this->max_parts !== null) { + $xfer += $output->writeFieldBegin('max_parts', TType::I16, 4); + $xfer += $output->writeI16($this->max_parts); + $xfer += $output->writeFieldEnd(); + } + $xfer += $output->writeFieldStop(); + $xfer += $output->writeStructEnd(); + return $xfer; + } + +} + +class metastore_ThriftHiveMetastore_get_partition_names_ps_result { + static $_TSPEC; + + public $success = null; + public $o1 = null; + + public function __construct($vals=null) { + if (!isset(self::$_TSPEC)) { + self::$_TSPEC = array( + 0 => array( + 'var' => 'success', + 'type' => TType::LST, + 'etype' => TType::STRING, + 'elem' => array( + 'type' => TType::STRING, + ), + ), + 1 => array( + 'var' => 'o1', + 'type' => TType::STRUCT, + 'class' => 'metastore_MetaException', + ), + ); + } + if (is_array($vals)) { + if (isset($vals['success'])) { + $this->success = $vals['success']; + } + if (isset($vals['o1'])) { + $this->o1 = $vals['o1']; + } + } + } + + public function getName() { + return 'ThriftHiveMetastore_get_partition_names_ps_result'; + } + + public function read($input) + { + $xfer = 0; + $fname = null; + $ftype = 0; + $fid = 0; + $xfer += $input->readStructBegin($fname); + while (true) + { + $xfer += $input->readFieldBegin($fname, $ftype, $fid); + if ($ftype == TType::STOP) { + break; + } + switch ($fid) + { + case 0: + if ($ftype == TType::LST) { + $this->success = array(); + $_size194 = 0; + $_etype197 = 0; + $xfer += $input->readListBegin($_etype197, $_size194); + for ($_i198 = 0; $_i198 < $_size194; ++$_i198) + { + $elem199 = null; + $xfer += $input->readString($elem199); + $this->success []= $elem199; + } + $xfer += $input->readListEnd(); + } else { + $xfer += $input->skip($ftype); + } + break; + case 1: + if ($ftype == TType::STRUCT) { + $this->o1 = new metastore_MetaException(); + $xfer += $this->o1->read($input); + } else { + $xfer += $input->skip($ftype); + } + break; + default: + $xfer += $input->skip($ftype); + break; + } + $xfer += $input->readFieldEnd(); + } + $xfer += $input->readStructEnd(); + return $xfer; + } + + public function write($output) { + $xfer = 0; + $xfer += $output->writeStructBegin('ThriftHiveMetastore_get_partition_names_ps_result'); + if ($this->success !== null) { + if (!is_array($this->success)) { + throw new TProtocolException('Bad type in structure.', TProtocolException::INVALID_DATA); + } + $xfer += $output->writeFieldBegin('success', TType::LST, 0); + { + $output->writeListBegin(TType::STRING, count($this->success)); + { + foreach ($this->success as $iter200) + { + $xfer += $output->writeString($iter200); + } + } + $output->writeListEnd(); + } + $xfer += $output->writeFieldEnd(); + } + if ($this->o1 !== null) { + $xfer += $output->writeFieldBegin('o1', TType::STRUCT, 1); + $xfer += $this->o1->write($output); + $xfer += $output->writeFieldEnd(); + } + $xfer += $output->writeFieldStop(); + $xfer += $output->writeStructEnd(); + return $xfer; + } + +} + class metastore_ThriftHiveMetastore_alter_partition_args { static $_TSPEC; Index: metastore/if/hive_metastore.thrift =================================================================== --- metastore/if/hive_metastore.thrift (revision 5373) +++ metastore/if/hive_metastore.thrift (working copy) @@ -206,11 +206,22 @@ throws(1:MetaException o1, 2:NoSuchObjectException o2) // returns all the partitions for this table in reverse chronological order. - // if max parts is given then it will return only that many + // If max parts is given then it will return only that many. list get_partitions(1:string db_name, 2:string tbl_name, 3:i16 max_parts=-1) throws(1:NoSuchObjectException o1, 2:MetaException o2) list get_partition_names(1:string db_name, 2:string tbl_name, 3:i16 max_parts=-1) throws(1:MetaException o2) + + // get_partition_mp methods allow filtering by a partial partition specification, + // as needed for dynamic partitions. The values that are not restricted should + // be empty strings. Nulls were considered (instead of "") but caused errors in + // generated Python code. + list get_partitions_ps(1:string db_name 2:string tbl_name + 3:list part_vals, 4:i16 max_parts=-1) + throws(1:MetaException o1) + list get_partition_names_ps(1:string db_name, + 2:string tbl_name, 3:list part_vals, 4:i16 max_parts=-1) + throws(1:MetaException o1) // changes the partition to the new partition object. partition is identified from the part values // in the new_part Index: ql/src/test/results/clientpositive/show_partitions.q.out =================================================================== --- ql/src/test/results/clientpositive/show_partitions.q.out (revision 0) +++ ql/src/test/results/clientpositive/show_partitions.q.out (revision 0) @@ -0,0 +1,25 @@ +PREHOOK: query: SHOW PARTITIONS srcpart +PREHOOK: type: SHOWPARTITIONS +POSTHOOK: query: SHOW PARTITIONS srcpart +POSTHOOK: type: SHOWPARTITIONS +ds=2008-04-08/hr=11 +ds=2008-04-08/hr=12 +ds=2008-04-09/hr=11 +ds=2008-04-09/hr=12 +PREHOOK: query: SHOW PARTITIONS srcpart PARTITION(hr='11') +PREHOOK: type: SHOWPARTITIONS +POSTHOOK: query: SHOW PARTITIONS srcpart PARTITION(hr='11') +POSTHOOK: type: SHOWPARTITIONS +ds=2008-04-08/hr=11 +ds=2008-04-09/hr=11 +PREHOOK: query: SHOW PARTITIONS srcpart PARTITION(ds='2008-04-08') +PREHOOK: type: SHOWPARTITIONS +POSTHOOK: query: SHOW PARTITIONS srcpart PARTITION(ds='2008-04-08') +POSTHOOK: type: SHOWPARTITIONS +ds=2008-04-08/hr=11 +ds=2008-04-08/hr=12 +PREHOOK: query: SHOW PARTITIONS srcpart PARTITION(ds='2008-04-08', hr='12') +PREHOOK: type: SHOWPARTITIONS +POSTHOOK: query: SHOW PARTITIONS srcpart PARTITION(ds='2008-04-08', hr='12') +POSTHOOK: type: SHOWPARTITIONS +ds=2008-04-08/hr=12 Index: ql/src/test/queries/clientpositive/show_partitions.q =================================================================== --- ql/src/test/queries/clientpositive/show_partitions.q (revision 0) +++ ql/src/test/queries/clientpositive/show_partitions.q (revision 0) @@ -0,0 +1,4 @@ +SHOW PARTITIONS srcpart; +SHOW PARTITIONS srcpart PARTITION(hr='11'); +SHOW PARTITIONS srcpart PARTITION(ds='2008-04-08'); +SHOW PARTITIONS srcpart PARTITION(ds='2008-04-08', hr='12'); \ No newline at end of file Index: ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java =================================================================== --- ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java (revision 5373) +++ ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java (working copy) @@ -19,12 +19,10 @@ package org.apache.hadoop.hive.ql.metadata; import java.io.IOException; -import java.net.URI; import java.util.AbstractMap; import java.util.ArrayList; import java.util.List; import java.util.Map; -import java.util.Properties; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; @@ -48,13 +46,9 @@ import org.apache.hadoop.hive.metastore.api.MetaException; import org.apache.hadoop.hive.metastore.api.NoSuchObjectException; import org.apache.hadoop.hive.ql.exec.Utilities; -import org.apache.hadoop.hive.ql.io.HiveFileFormatUtils; -import org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat; import org.apache.hadoop.hive.serde2.Deserializer; import org.apache.hadoop.hive.serde2.SerDeException; import org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe; -import org.apache.hadoop.io.Writable; -import org.apache.hadoop.io.WritableComparable; import org.apache.hadoop.mapred.InputFormat; import org.apache.hadoop.util.ReflectionUtils; import org.apache.hadoop.util.StringUtils; @@ -64,7 +58,7 @@ * The Hive class contains information about this instance of Hive. An instance * of Hive represents a set of data in a file system (usually HDFS) organized * for easy query processing - * + * */ public class Hive { @@ -93,12 +87,12 @@ * Gets hive object for the current thread. If one is not initialized then a * new one is created If the new configuration is different in metadata conf * vars then a new one is created. - * + * * @param c * new Hive Configuration * @return Hive object for current thread * @throws HiveException - * + * */ public static Hive get(HiveConf c) throws HiveException { boolean needsRefresh = false; @@ -118,7 +112,7 @@ /** * get a connection to metastore. see get(HiveConf) function for comments - * + * * @param c * new conf * @param needsRefresh @@ -152,10 +146,10 @@ /** * Hive - * + * * @param argFsRoot * @param c - * + * */ private Hive(HiveConf c) throws HiveException { conf = c; @@ -171,7 +165,7 @@ /** * Creates a table metdata and the directory for the table data - * + * * @param tableName * name of the table * @param columns @@ -195,7 +189,7 @@ /** * Creates a table metdata and the directory for the table data - * + * * @param tableName * name of the table * @param columns @@ -249,7 +243,7 @@ /** * Updates the existing table metadata with the new metadata. - * + * * @param tblName * name of the existing table * @param newTbl @@ -272,7 +266,7 @@ /** * Updates the existing table metadata with the new metadata. - * + * * @param tblName * name of the existing table * @param newTbl @@ -296,7 +290,7 @@ /** * Creates the table with the give objects - * + * * @param tbl * a table object * @throws HiveException @@ -307,7 +301,7 @@ /** * Creates the table with the give objects - * + * * @param tbl * a table object * @param ifNotExists @@ -334,7 +328,7 @@ /** * Drops table along with the data in it. If the table doesn't exist then it * is a no-op - * + * * @param dbName * database where the table lives * @param tableName @@ -348,7 +342,7 @@ /** * Drops the table. - * + * * @param tableName * @param deleteData * deletes the underlying data along with metadata @@ -376,7 +370,7 @@ /** * Returns metadata of the table. - * + * * @param dbName * the name of the database * @param tableName @@ -393,7 +387,7 @@ /** * Returns metadata of the table - * + * * @param dbName * the name of the database * @param tableName @@ -409,7 +403,7 @@ if (tableName == null || tableName.equals("")) { throw new HiveException("empty table creation??"); } - + // Get the table from metastore org.apache.hadoop.hive.metastore.api.Table tTable = null; try { @@ -423,7 +417,7 @@ } catch (Exception e) { throw new HiveException("Unable to fetch table " + tableName, e); } - + // For non-views, we need to do some extra fixes if (!TableType.VIRTUAL_VIEW.toString().equals(tTable.getTableType())) { // Fix the non-printable chars @@ -436,7 +430,7 @@ Integer.toString(b[0])); } } - + // Use LazySimpleSerDe for MetadataTypedColumnsetSerDe. // NOTE: LazySimpleSerDe does not support tables with a single column of // col @@ -452,9 +446,9 @@ org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe.class.getName()); } } - + Table table = new Table(tTable); - + table.checkValidity(); return table; } @@ -466,7 +460,7 @@ /** * returns all existing tables from default database which match the given * pattern. The matching occurs as per Java regular expressions - * + * * @param tablePattern * java re pattern * @return list of table names @@ -480,7 +474,7 @@ /** * returns all existing tables from the given database which match the given * pattern. The matching occurs as per Java regular expressions - * + * * @param database * the database name * @param tablePattern @@ -528,7 +522,7 @@ * the partition with the contents of loadPath. - If he partition does not * exist - one is created - files in loadPath are moved into Hive. But the * directory itself is not removed. - * + * * @param loadPath * Directory containing files to load into Table * @param tableName @@ -597,7 +591,7 @@ * the contents of loadPath. - If table does not exist - an exception is * thrown - files in loadPath are moved into Hive. But the directory itself is * not removed. - * + * * @param loadPath * Directory containing files to load into Table * @param tableName @@ -620,7 +614,7 @@ /** * Creates a partition. - * + * * @param tbl * table for which partition needs to be created * @param partSpec @@ -636,7 +630,7 @@ /** * Creates a partition - * + * * @param tbl * table for which partition needs to be created * @param partSpec @@ -673,7 +667,7 @@ /** * Returns partition metadata - * + * * @param tbl * the partition's table * @param partSpec @@ -730,7 +724,7 @@ public List getPartitionNames(String dbName, String tblName, short max) throws HiveException { - List names = null; + List names = null; try { names = getMSC().listPartitionNames(dbName, tblName, max); } catch (Exception e) { @@ -740,9 +734,25 @@ return names; } + public List getPartitionNames(String dbName, String tblName, + Map partSpec, short max) throws HiveException { + List names = null; + Table t = getTable(dbName, tblName); + + List pvals = getPvals(t.getPartCols(), partSpec); + + try { + names = getMSC().listPartitionNames(dbName, tblName, pvals, max); + } catch (Exception e) { + LOG.error(StringUtils.stringifyException(e)); + throw new HiveException(e); + } + return names; + } + /** * get all the partitions that the table has - * + * * @param tbl * object for which partition is needed * @return list of partition objects @@ -771,6 +781,54 @@ } } + private static List getPvals(List partCols, + Map partSpec) { + List pvals = new ArrayList(); + for (FieldSchema field : partCols) { + String val = partSpec.get(field.getName()); + if (val == null) { + val = ""; + } + pvals.add(val); + } + return pvals; + } + + /** + * get all the partitions of the table that matches the given partial + * specification. partition columns whose value is can be anything should be + * an empty string. + * + * @param tbl + * object for which partition is needed. Must be partitioned. + * @return list of partition objects + * @throws HiveException + */ + public List getPartitions(Table tbl, Map partialPartSpec) + throws HiveException { + if (!tbl.isPartitioned()) { + throw new HiveException("Partition spec should only be supplied for a " + + "partitioned table"); + } + + List partialPvals = getPvals(tbl.getPartCols(), partialPartSpec); + + List partitions = null; + try { + partitions = getMSC().listPartitions(tbl.getDbName(), tbl.getTableName(), + partialPvals, (short) -1); + } catch (Exception e) { + throw new HiveException(e); + } + + List qlPartitions = new ArrayList(); + for (org.apache.hadoop.hive.metastore.api.Partition p : partitions) { + qlPartitions.add( new Partition(tbl, p)); + } + + return qlPartitions; + } + static private void checkPaths(FileSystem fs, FileStatus[] srcs, Path destf, boolean replace) throws HiveException { try { @@ -849,7 +907,7 @@ /** * Replaces files in the partition with new data set specifed by srcf. Works * by moving files - * + * * @param srcf * Files to be moved. Leaf Directories or Globbed File Paths * @param destf @@ -916,7 +974,7 @@ /** * Creates a metastore client. Currently it creates only JDBC based client as * File based store support is removed - * + * * @returns a Meta Store Client * @throws HiveMetaException * if a working client can't be created @@ -951,7 +1009,7 @@ } /** - * + * * @return the metastore client for the current thread * @throws MetaException */ Index: ql/src/java/org/apache/hadoop/hive/ql/exec/DDLTask.java =================================================================== --- ql/src/java/org/apache/hadoop/hive/ql/exec/DDLTask.java (revision 5373) +++ ql/src/java/org/apache/hadoop/hive/ql/exec/DDLTask.java (working copy) @@ -52,7 +52,6 @@ import org.apache.hadoop.hive.metastore.api.InvalidOperationException; import org.apache.hadoop.hive.metastore.api.MetaException; import org.apache.hadoop.hive.metastore.api.Order; -import org.apache.hadoop.hive.metastore.api.StorageDescriptor; import org.apache.hadoop.hive.ql.DriverContext; import org.apache.hadoop.hive.ql.QueryPlan; import org.apache.hadoop.hive.ql.hooks.ReadEntity; @@ -92,7 +91,7 @@ /** * DDLTask implementation. - * + * **/ public class DDLTask extends Task implements Serializable { private static final long serialVersionUID = 1L; @@ -112,6 +111,7 @@ this.conf = conf; } + @Override public int execute() { // Create the db @@ -204,7 +204,7 @@ /** * Add a partition to a table. - * + * * @param db * Database to add the partition to. * @param addPartitionDesc @@ -250,12 +250,12 @@ throw new HiveException("Cannot use ALTER TABLE on a non-native table"); } } - + /** * MetastoreCheck, see if the data in the metastore matches what is on the * dfs. Current version checks for tables and partitions that are either * missing on disk on in the metastore. - * + * * @param db * The database in question. * @param msckDesc @@ -334,7 +334,7 @@ /** * Write the result of msck to a writer. - * + * * @param result * The result we're going to write * @param msg @@ -368,7 +368,7 @@ /** * Write a list of partitions to a file. - * + * * @param db * The database in question. * @param showParts @@ -389,15 +389,19 @@ console.printError("Table " + tabName + " is not a partitioned table"); return 1; } + if (showParts.getPartSpec() != null) { + parts = db.getPartitionNames(MetaStoreUtils.DEFAULT_DATABASE_NAME, + tbl.getTableName(), showParts.getPartSpec(), (short) -1); + } else { + parts = db.getPartitionNames(MetaStoreUtils.DEFAULT_DATABASE_NAME, tbl + .getTableName(), (short) -1); + } - parts = db.getPartitionNames(MetaStoreUtils.DEFAULT_DATABASE_NAME, tbl - .getTableName(), Short.MAX_VALUE); - // write the results in the file try { Path resFile = new Path(showParts.getResFile()); FileSystem fs = resFile.getFileSystem(conf); - DataOutput outStream = (DataOutput) fs.create(resFile); + DataOutput outStream = fs.create(resFile); Iterator iterParts = parts.iterator(); while (iterParts.hasNext()) { @@ -421,7 +425,7 @@ /** * Write a list of the tables in the database to a file. - * + * * @param db * The database in question. * @param showTbls @@ -445,7 +449,7 @@ try { Path resFile = new Path(showTbls.getResFile()); FileSystem fs = resFile.getFileSystem(conf); - DataOutput outStream = (DataOutput) fs.create(resFile); + DataOutput outStream = fs.create(resFile); SortedSet sortedTbls = new TreeSet(tbls); Iterator iterTbls = sortedTbls.iterator(); @@ -469,7 +473,7 @@ /** * Write a list of the user defined functions to a file. - * + * * @param showFuncs * are the functions we're interested in. * @return Returns 0 when execution succeeds and above 0 if it fails. @@ -491,7 +495,7 @@ try { Path resFile = new Path(showFuncs.getResFile()); FileSystem fs = resFile.getFileSystem(conf); - DataOutput outStream = (DataOutput) fs.create(resFile); + DataOutput outStream = fs.create(resFile); SortedSet sortedFuncs = new TreeSet(funcs); Iterator iterFuncs = sortedFuncs.iterator(); @@ -515,7 +519,7 @@ /** * Shows a description of a function. - * + * * @param descFunc * is the function we are describing * @throws HiveException @@ -527,7 +531,7 @@ try { Path resFile = new Path(descFunc.getResFile()); FileSystem fs = resFile.getFileSystem(conf); - DataOutput outStream = (DataOutput) fs.create(resFile); + DataOutput outStream = fs.create(resFile); // get the function documentation Description desc = null; @@ -577,7 +581,7 @@ /** * Write the status of tables to a file. - * + * * @param db * The database in question. * @param showTblStatus @@ -617,7 +621,7 @@ try { Path resFile = new Path(showTblStatus.getResFile()); FileSystem fs = resFile.getFileSystem(conf); - DataOutput outStream = (DataOutput) fs.create(resFile); + DataOutput outStream = fs.create(resFile); Iterator iterTables = tbls.iterator(); while (iterTables.hasNext()) { @@ -699,7 +703,7 @@ /** * Write the description of a table to a file. - * + * * @param db * The database in question. * @param descTbl @@ -763,7 +767,7 @@ } Path resFile = new Path(descTbl.getResFile()); FileSystem fs = resFile.getFileSystem(conf); - DataOutput outStream = (DataOutput) fs.create(resFile); + DataOutput outStream = fs.create(resFile); Iterator iterCols = cols.iterator(); while (iterCols.hasNext()) { // create a row per column @@ -955,7 +959,7 @@ /** * Alter a given table. - * + * * @param db * The database in question. * @param alterTbl @@ -1162,7 +1166,7 @@ /** * Drop a given table. - * + * * @param db * The database in question. * @param dropTbl @@ -1267,7 +1271,7 @@ /** * Create a new table. - * + * * @param db * The database in question. * @param crtTbl @@ -1322,14 +1326,14 @@ Iterator> iter = crtTbl.getMapProp().entrySet() .iterator(); while (iter.hasNext()) { - Entry m = (Entry) iter.next(); + Entry m = iter.next(); tbl.setSerdeParam(m.getKey(), m.getValue()); } } /* * We use LazySimpleSerDe by default. - * + * * If the user didn't specify a SerDe, and any of the columns are not simple * types, we will have to use DynamicSerDe instead. */ @@ -1423,7 +1427,7 @@ /** * Create a new table like an existing table. - * + * * @param db * The database in question. * @param crtTbl @@ -1459,7 +1463,7 @@ /** * Create a new view. - * + * * @param db * The database in question. * @param crtView @@ -1503,6 +1507,7 @@ return 0; } + @Override public int getType() { return StageType.DDL; } Index: ql/src/java/org/apache/hadoop/hive/ql/plan/ShowPartitionsDesc.java =================================================================== --- ql/src/java/org/apache/hadoop/hive/ql/plan/ShowPartitionsDesc.java (revision 5373) +++ ql/src/java/org/apache/hadoop/hive/ql/plan/ShowPartitionsDesc.java (working copy) @@ -19,6 +19,7 @@ package org.apache.hadoop.hive.ql.plan; import java.io.Serializable; +import java.util.Map; import org.apache.hadoop.fs.Path; @@ -31,6 +32,9 @@ private static final long serialVersionUID = 1L; String tabName; String resFile; + // Filter the partitions to show based on on supplied spec + Map partSpec; + /** * table name for the result of show tables. */ @@ -57,9 +61,11 @@ * @param resFile * File to store the results in */ - public ShowPartitionsDesc(String tabName, Path resFile) { + public ShowPartitionsDesc(String tabName, Path resFile, + Map partSpec) { this.tabName = tabName; this.resFile = resFile.toString(); + this.partSpec = partSpec; } /** @@ -79,6 +85,22 @@ } /** + * @return the name of the table. + */ + @Explain(displayName = "partSpec") + public Map getPartSpec() { + return partSpec; + } + + /** + * @param tabName + * the table whose partitions have to be listed + */ + public void setPartSpec(Map partSpec) { + this.partSpec = partSpec; + } + + /** * @return the results file */ @Explain(displayName = "result file", normalExplain = false) Index: ql/src/java/org/apache/hadoop/hive/ql/parse/Hive.g =================================================================== --- ql/src/java/org/apache/hadoop/hive/ql/parse/Hive.g (revision 5373) +++ ql/src/java/org/apache/hadoop/hive/ql/parse/Hive.g (working copy) @@ -391,7 +391,7 @@ @after { msgs.pop(); } : KW_SHOW KW_TABLES showStmtIdentifier? -> ^(TOK_SHOWTABLES showStmtIdentifier?) | KW_SHOW KW_FUNCTIONS showStmtIdentifier? -> ^(TOK_SHOWFUNCTIONS showStmtIdentifier?) - | KW_SHOW KW_PARTITIONS Identifier -> ^(TOK_SHOWPARTITIONS Identifier) + | KW_SHOW KW_PARTITIONS Identifier partitionSpec? -> ^(TOK_SHOWPARTITIONS Identifier partitionSpec?) | KW_SHOW KW_TABLE KW_EXTENDED ((KW_FROM|KW_IN) db_name=Identifier)? KW_LIKE showStmtIdentifier partitionSpec? -> ^(TOK_SHOW_TABLESTATUS showStmtIdentifier $db_name? partitionSpec?) ; Index: ql/src/java/org/apache/hadoop/hive/ql/parse/DDLSemanticAnalyzer.java =================================================================== --- ql/src/java/org/apache/hadoop/hive/ql/parse/DDLSemanticAnalyzer.java (revision 5373) +++ ql/src/java/org/apache/hadoop/hive/ql/parse/DDLSemanticAnalyzer.java (working copy) @@ -273,7 +273,7 @@ /** * Get the fully qualified name in the ast. e.g. the ast of the form ^(DOT * ^(DOT a b) c) will generate a name of the form a.b.c - * + * * @param ast * The AST from which the qualified name has to be extracted * @return String @@ -289,7 +289,7 @@ /** * Create a FetchTask for a given table and thrift ddl schema. - * + * * @param tablename * tablename * @param schema @@ -346,7 +346,14 @@ private void analyzeShowPartitions(ASTNode ast) throws SemanticException { ShowPartitionsDesc showPartsDesc; String tableName = unescapeIdentifier(ast.getChild(0).getText()); - showPartsDesc = new ShowPartitionsDesc(tableName, ctx.getResFile()); + List> partSpecs = getPartitionSpecs(ast); + // We only can have a single partition spec + assert(partSpecs.size() <= 1); + Map partSpec = null; + if(partSpecs.size() > 0) { + partSpec = partSpecs.get(0); + } + showPartsDesc = new ShowPartitionsDesc(tableName, ctx.getResFile(), partSpec); rootTasks.add(TaskFactory.get(new DDLWork(getInputs(), getOutputs(), showPartsDesc), conf)); setFetchTask(createFetchTask(showPartsDesc.getSchema())); @@ -396,7 +403,7 @@ /** * Add the task according to the parsed command tree. This is used for the CLI * command "SHOW FUNCTIONS;". - * + * * @param ast * The parsed command tree. * @throws SemanticException @@ -418,7 +425,7 @@ /** * Add the task according to the parsed command tree. This is used for the CLI * command "DESCRIBE FUNCTION;". - * + * * @param ast * The parsed command tree. * @throws SemanticException @@ -508,7 +515,7 @@ /** * Add one or more partitions to a table. Useful when the data has been copied * to the right location by some other process. - * + * * @param ast * The parsed command tree. * @throws SemanticException @@ -568,7 +575,7 @@ /** * Verify that the information in the metastore matches up with the data on * the fs. - * + * * @param ast * Query tree. * @throws SemanticException @@ -593,7 +600,7 @@ /** * Get the partition specs from the tree. - * + * * @param ast * Tree to extract partitions from. * @return A list of partition name to value mappings.