Index: metastore/ivy.xml
===================================================================
--- metastore/ivy.xml (revision 991274)
+++ metastore/ivy.xml (working copy)
@@ -9,7 +9,7 @@
-
+
@@ -17,8 +17,9 @@
+
-
+
@@ -27,13 +28,13 @@
-
+
-
+
@@ -41,7 +42,7 @@
-
+
Index: metastore/src/gen-py/hive_metastore/ThriftHiveMetastore.py
===================================================================
--- metastore/src/gen-py/hive_metastore/ThriftHiveMetastore.py (revision 991274)
+++ metastore/src/gen-py/hive_metastore/ThriftHiveMetastore.py (working copy)
@@ -245,6 +245,16 @@
"""
pass
+ def get_partitions_by_filter(self, db_name, tbl_name, filter, max_parts):
+ """
+ Parameters:
+ - db_name
+ - tbl_name
+ - filter
+ - max_parts
+ """
+ pass
+
def alter_partition(self, db_name, tbl_name, new_part):
"""
Parameters:
@@ -1336,6 +1346,46 @@
raise result.o1
raise TApplicationException(TApplicationException.MISSING_RESULT, "get_partition_names_ps failed: unknown result");
+ def get_partitions_by_filter(self, db_name, tbl_name, filter, max_parts):
+ """
+ Parameters:
+ - db_name
+ - tbl_name
+ - filter
+ - max_parts
+ """
+ self.send_get_partitions_by_filter(db_name, tbl_name, filter, max_parts)
+ return self.recv_get_partitions_by_filter()
+
+ def send_get_partitions_by_filter(self, db_name, tbl_name, filter, max_parts):
+ self._oprot.writeMessageBegin('get_partitions_by_filter', TMessageType.CALL, self._seqid)
+ args = get_partitions_by_filter_args()
+ args.db_name = db_name
+ args.tbl_name = tbl_name
+ args.filter = filter
+ args.max_parts = max_parts
+ args.write(self._oprot)
+ self._oprot.writeMessageEnd()
+ self._oprot.trans.flush()
+
+ def recv_get_partitions_by_filter(self, ):
+ (fname, mtype, rseqid) = self._iprot.readMessageBegin()
+ if mtype == TMessageType.EXCEPTION:
+ x = TApplicationException()
+ x.read(self._iprot)
+ self._iprot.readMessageEnd()
+ raise x
+ result = get_partitions_by_filter_result()
+ result.read(self._iprot)
+ self._iprot.readMessageEnd()
+ if result.success != None:
+ return result.success
+ if result.o1 != None:
+ raise result.o1
+ if result.o2 != None:
+ raise result.o2
+ raise TApplicationException(TApplicationException.MISSING_RESULT, "get_partitions_by_filter failed: unknown result");
+
def alter_partition(self, db_name, tbl_name, new_part):
"""
Parameters:
@@ -1692,6 +1742,7 @@
self._processMap["get_partition_names"] = Processor.process_get_partition_names
self._processMap["get_partitions_ps"] = Processor.process_get_partitions_ps
self._processMap["get_partition_names_ps"] = Processor.process_get_partition_names_ps
+ self._processMap["get_partitions_by_filter"] = Processor.process_get_partitions_by_filter
self._processMap["alter_partition"] = Processor.process_alter_partition
self._processMap["get_config_value"] = Processor.process_get_config_value
self._processMap["partition_name_to_vals"] = Processor.process_partition_name_to_vals
@@ -2169,6 +2220,22 @@
oprot.writeMessageEnd()
oprot.trans.flush()
+ def process_get_partitions_by_filter(self, seqid, iprot, oprot):
+ args = get_partitions_by_filter_args()
+ args.read(iprot)
+ iprot.readMessageEnd()
+ result = get_partitions_by_filter_result()
+ try:
+ result.success = self._handler.get_partitions_by_filter(args.db_name, args.tbl_name, args.filter, args.max_parts)
+ except MetaException, o1:
+ result.o1 = o1
+ except NoSuchObjectException, o2:
+ result.o2 = o2
+ oprot.writeMessageBegin("get_partitions_by_filter", TMessageType.REPLY, seqid)
+ result.write(oprot)
+ oprot.writeMessageEnd()
+ oprot.trans.flush()
+
def process_alter_partition(self, seqid, iprot, oprot):
args = alter_partition_args()
args.read(iprot)
@@ -6643,6 +6710,188 @@
def __ne__(self, other):
return not (self == other)
+class get_partitions_by_filter_args:
+ """
+ Attributes:
+ - db_name
+ - tbl_name
+ - filter
+ - max_parts
+ """
+
+ thrift_spec = (
+ None, # 0
+ (1, TType.STRING, 'db_name', None, None, ), # 1
+ (2, TType.STRING, 'tbl_name', None, None, ), # 2
+ (3, TType.STRING, 'filter', None, None, ), # 3
+ (4, TType.I16, 'max_parts', None, -1, ), # 4
+ )
+
+ def __init__(self, db_name=None, tbl_name=None, filter=None, max_parts=thrift_spec[4][4],):
+ self.db_name = db_name
+ self.tbl_name = tbl_name
+ self.filter = filter
+ self.max_parts = max_parts
+
+ def read(self, iprot):
+ if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
+ fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
+ return
+ iprot.readStructBegin()
+ while True:
+ (fname, ftype, fid) = iprot.readFieldBegin()
+ if ftype == TType.STOP:
+ break
+ if fid == 1:
+ if ftype == TType.STRING:
+ self.db_name = iprot.readString();
+ else:
+ iprot.skip(ftype)
+ elif fid == 2:
+ if ftype == TType.STRING:
+ self.tbl_name = iprot.readString();
+ else:
+ iprot.skip(ftype)
+ elif fid == 3:
+ if ftype == TType.STRING:
+ self.filter = iprot.readString();
+ else:
+ iprot.skip(ftype)
+ elif fid == 4:
+ if ftype == TType.I16:
+ self.max_parts = iprot.readI16();
+ else:
+ iprot.skip(ftype)
+ else:
+ iprot.skip(ftype)
+ iprot.readFieldEnd()
+ iprot.readStructEnd()
+
+ def write(self, oprot):
+ if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
+ oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
+ return
+ oprot.writeStructBegin('get_partitions_by_filter_args')
+ if self.db_name != None:
+ oprot.writeFieldBegin('db_name', TType.STRING, 1)
+ oprot.writeString(self.db_name)
+ oprot.writeFieldEnd()
+ if self.tbl_name != None:
+ oprot.writeFieldBegin('tbl_name', TType.STRING, 2)
+ oprot.writeString(self.tbl_name)
+ oprot.writeFieldEnd()
+ if self.filter != None:
+ oprot.writeFieldBegin('filter', TType.STRING, 3)
+ oprot.writeString(self.filter)
+ oprot.writeFieldEnd()
+ if self.max_parts != None:
+ oprot.writeFieldBegin('max_parts', TType.I16, 4)
+ oprot.writeI16(self.max_parts)
+ oprot.writeFieldEnd()
+ oprot.writeFieldStop()
+ oprot.writeStructEnd()
+
+ def __repr__(self):
+ L = ['%s=%r' % (key, value)
+ for key, value in self.__dict__.iteritems()]
+ return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
+
+ def __eq__(self, other):
+ return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
+
+ def __ne__(self, other):
+ return not (self == other)
+
+class get_partitions_by_filter_result:
+ """
+ Attributes:
+ - success
+ - o1
+ - o2
+ """
+
+ thrift_spec = (
+ (0, TType.LIST, 'success', (TType.STRUCT,(Partition, Partition.thrift_spec)), None, ), # 0
+ (1, TType.STRUCT, 'o1', (MetaException, MetaException.thrift_spec), None, ), # 1
+ (2, TType.STRUCT, 'o2', (NoSuchObjectException, NoSuchObjectException.thrift_spec), None, ), # 2
+ )
+
+ def __init__(self, success=None, o1=None, o2=None,):
+ self.success = success
+ self.o1 = o1
+ self.o2 = o2
+
+ def read(self, iprot):
+ if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
+ fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
+ return
+ iprot.readStructBegin()
+ while True:
+ (fname, ftype, fid) = iprot.readFieldBegin()
+ if ftype == TType.STOP:
+ break
+ if fid == 0:
+ if ftype == TType.LIST:
+ self.success = []
+ (_etype220, _size217) = iprot.readListBegin()
+ for _i221 in xrange(_size217):
+ _elem222 = Partition()
+ _elem222.read(iprot)
+ self.success.append(_elem222)
+ iprot.readListEnd()
+ else:
+ iprot.skip(ftype)
+ elif fid == 1:
+ if ftype == TType.STRUCT:
+ self.o1 = MetaException()
+ self.o1.read(iprot)
+ else:
+ iprot.skip(ftype)
+ elif fid == 2:
+ if ftype == TType.STRUCT:
+ self.o2 = NoSuchObjectException()
+ self.o2.read(iprot)
+ else:
+ iprot.skip(ftype)
+ else:
+ iprot.skip(ftype)
+ iprot.readFieldEnd()
+ iprot.readStructEnd()
+
+ def write(self, oprot):
+ if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
+ oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
+ return
+ oprot.writeStructBegin('get_partitions_by_filter_result')
+ if self.success != None:
+ oprot.writeFieldBegin('success', TType.LIST, 0)
+ oprot.writeListBegin(TType.STRUCT, len(self.success))
+ for iter223 in self.success:
+ iter223.write(oprot)
+ oprot.writeListEnd()
+ oprot.writeFieldEnd()
+ if self.o1 != None:
+ oprot.writeFieldBegin('o1', TType.STRUCT, 1)
+ self.o1.write(oprot)
+ oprot.writeFieldEnd()
+ if self.o2 != None:
+ oprot.writeFieldBegin('o2', TType.STRUCT, 2)
+ self.o2.write(oprot)
+ oprot.writeFieldEnd()
+ oprot.writeFieldStop()
+ oprot.writeStructEnd()
+
+ def __repr__(self):
+ L = ['%s=%r' % (key, value)
+ for key, value in self.__dict__.iteritems()]
+ return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
+
+ def __eq__(self, other):
+ return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
+
+ def __ne__(self, other):
+ return not (self == other)
+
class alter_partition_args:
"""
Attributes:
@@ -7014,10 +7263,10 @@
if fid == 0:
if ftype == TType.LIST:
self.success = []
- (_etype220, _size217) = iprot.readListBegin()
- for _i221 in xrange(_size217):
- _elem222 = iprot.readString();
- self.success.append(_elem222)
+ (_etype227, _size224) = iprot.readListBegin()
+ for _i228 in xrange(_size224):
+ _elem229 = iprot.readString();
+ self.success.append(_elem229)
iprot.readListEnd()
else:
iprot.skip(ftype)
@@ -7040,8 +7289,8 @@
if self.success != None:
oprot.writeFieldBegin('success', TType.LIST, 0)
oprot.writeListBegin(TType.STRING, len(self.success))
- for iter223 in self.success:
- oprot.writeString(iter223)
+ for iter230 in self.success:
+ oprot.writeString(iter230)
oprot.writeListEnd()
oprot.writeFieldEnd()
if self.o1 != None:
@@ -7146,11 +7395,11 @@
if fid == 0:
if ftype == TType.MAP:
self.success = {}
- (_ktype225, _vtype226, _size224 ) = iprot.readMapBegin()
- for _i228 in xrange(_size224):
- _key229 = iprot.readString();
- _val230 = iprot.readString();
- self.success[_key229] = _val230
+ (_ktype232, _vtype233, _size231 ) = iprot.readMapBegin()
+ for _i235 in xrange(_size231):
+ _key236 = iprot.readString();
+ _val237 = iprot.readString();
+ self.success[_key236] = _val237
iprot.readMapEnd()
else:
iprot.skip(ftype)
@@ -7173,9 +7422,9 @@
if self.success != None:
oprot.writeFieldBegin('success', TType.MAP, 0)
oprot.writeMapBegin(TType.STRING, TType.STRING, len(self.success))
- for kiter231,viter232 in self.success.items():
- oprot.writeString(kiter231)
- oprot.writeString(viter232)
+ for kiter238,viter239 in self.success.items():
+ oprot.writeString(kiter238)
+ oprot.writeString(viter239)
oprot.writeMapEnd()
oprot.writeFieldEnd()
if self.o1 != None:
@@ -7807,11 +8056,11 @@
if fid == 0:
if ftype == TType.LIST:
self.success = []
- (_etype236, _size233) = iprot.readListBegin()
- for _i237 in xrange(_size233):
- _elem238 = Index()
- _elem238.read(iprot)
- self.success.append(_elem238)
+ (_etype243, _size240) = iprot.readListBegin()
+ for _i244 in xrange(_size240):
+ _elem245 = Index()
+ _elem245.read(iprot)
+ self.success.append(_elem245)
iprot.readListEnd()
else:
iprot.skip(ftype)
@@ -7840,8 +8089,8 @@
if self.success != None:
oprot.writeFieldBegin('success', TType.LIST, 0)
oprot.writeListBegin(TType.STRUCT, len(self.success))
- for iter239 in self.success:
- iter239.write(oprot)
+ for iter246 in self.success:
+ iter246.write(oprot)
oprot.writeListEnd()
oprot.writeFieldEnd()
if self.o1 != None:
@@ -7974,10 +8223,10 @@
if fid == 0:
if ftype == TType.LIST:
self.success = []
- (_etype243, _size240) = iprot.readListBegin()
- for _i244 in xrange(_size240):
- _elem245 = iprot.readString();
- self.success.append(_elem245)
+ (_etype250, _size247) = iprot.readListBegin()
+ for _i251 in xrange(_size247):
+ _elem252 = iprot.readString();
+ self.success.append(_elem252)
iprot.readListEnd()
else:
iprot.skip(ftype)
@@ -8000,8 +8249,8 @@
if self.success != None:
oprot.writeFieldBegin('success', TType.LIST, 0)
oprot.writeListBegin(TType.STRING, len(self.success))
- for iter246 in self.success:
- oprot.writeString(iter246)
+ for iter253 in self.success:
+ oprot.writeString(iter253)
oprot.writeListEnd()
oprot.writeFieldEnd()
if self.o2 != None:
Index: metastore/src/gen-py/hive_metastore/ThriftHiveMetastore-remote
===================================================================
--- metastore/src/gen-py/hive_metastore/ThriftHiveMetastore-remote (revision 991274)
+++ metastore/src/gen-py/hive_metastore/ThriftHiveMetastore-remote (working copy)
@@ -49,6 +49,7 @@
print ' get_partition_names(string db_name, string tbl_name, i16 max_parts)'
print ' get_partitions_ps(string db_name, string tbl_name, part_vals, i16 max_parts)'
print ' get_partition_names_ps(string db_name, string tbl_name, part_vals, i16 max_parts)'
+ print ' get_partitions_by_filter(string db_name, string tbl_name, string filter, i16 max_parts)'
print ' void alter_partition(string db_name, string tbl_name, Partition new_part)'
print ' string get_config_value(string name, string defaultValue)'
print ' partition_name_to_vals(string part_name)'
@@ -274,6 +275,12 @@
sys.exit(1)
pp.pprint(client.get_partition_names_ps(args[0],args[1],eval(args[2]),eval(args[3]),))
+elif cmd == 'get_partitions_by_filter':
+ if len(args) != 4:
+ print 'get_partitions_by_filter requires 4 args'
+ sys.exit(1)
+ pp.pprint(client.get_partitions_by_filter(args[0],args[1],args[2],eval(args[3]),))
+
elif cmd == 'alter_partition':
if len(args) != 3:
print 'alter_partition requires 3 args'
Index: metastore/src/test/org/apache/hadoop/hive/metastore/TestHiveMetaStore.java
===================================================================
--- metastore/src/test/org/apache/hadoop/hive/metastore/TestHiveMetaStore.java (revision 991274)
+++ metastore/src/test/org/apache/hadoop/hive/metastore/TestHiveMetaStore.java (working copy)
@@ -31,6 +31,7 @@
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hive.conf.HiveConf;
+import org.apache.hadoop.hive.metastore.api.AlreadyExistsException;
import org.apache.hadoop.hive.metastore.api.ConfigValSecurityException;
import org.apache.hadoop.hive.metastore.api.Database;
import org.apache.hadoop.hive.metastore.api.FieldSchema;
@@ -968,4 +969,186 @@
} catch (InvalidOperationException e) {
}
}
+
+ /**
+ * Tests for list partition by filter functionality.
+ * @throws Exception
+ */
+ public void testPartitionFilter() throws Exception {
+ String dbName = "filterdb";
+ String tblName = "filtertbl";
+
+ List vals = new ArrayList(3);
+ vals.add("p11");
+ vals.add("p21");
+ vals.add("p31");
+ List vals2 = new ArrayList(3);
+ vals2.add("p11");
+ vals2.add("p22");
+ vals2.add("p31");
+ List vals3 = new ArrayList(3);
+ vals3.add("p12");
+ vals3.add("p21");
+ vals3.add("p31");
+ List vals4 = new ArrayList(3);
+ vals4.add("p12");
+ vals4.add("p23");
+ vals4.add("p31");
+ List vals5 = new ArrayList(3);
+ vals5.add("p13");
+ vals5.add("p24");
+ vals5.add("p31");
+ List vals6 = new ArrayList(3);
+ vals6.add("p13");
+ vals6.add("p25");
+ vals6.add("p31");
+
+ silentDropDatabase(dbName);
+
+ Database db = new Database();
+ db.setName(dbName);
+ client.createDatabase(db);
+
+ ArrayList cols = new ArrayList(2);
+ cols.add(new FieldSchema("c1", Constants.STRING_TYPE_NAME, ""));
+ cols.add(new FieldSchema("c2", Constants.INT_TYPE_NAME, ""));
+
+ ArrayList partCols = new ArrayList(3);
+ partCols.add(new FieldSchema("p1", Constants.STRING_TYPE_NAME, ""));
+ partCols.add(new FieldSchema("p2", Constants.STRING_TYPE_NAME, ""));
+ partCols.add(new FieldSchema("p3", Constants.INT_TYPE_NAME, ""));
+
+ Table tbl = new Table();
+ tbl.setDbName(dbName);
+ tbl.setTableName(tblName);
+ StorageDescriptor sd = new StorageDescriptor();
+ tbl.setSd(sd);
+ sd.setCols(cols);
+ sd.setCompressed(false);
+ sd.setNumBuckets(1);
+ sd.setParameters(new HashMap());
+ sd.setBucketCols(new ArrayList());
+ sd.setSerdeInfo(new SerDeInfo());
+ sd.getSerdeInfo().setName(tbl.getTableName());
+ sd.getSerdeInfo().setParameters(new HashMap());
+ sd.getSerdeInfo().getParameters()
+ .put(Constants.SERIALIZATION_FORMAT, "1");
+ sd.setSortCols(new ArrayList());
+
+ tbl.setPartitionKeys(partCols);
+ client.createTable(tbl);
+
+ tbl = client.getTable(dbName, tblName);
+
+ add_partition(client, tbl, vals, "part1");
+ add_partition(client, tbl, vals2, "part2");
+ add_partition(client, tbl, vals3, "part3");
+ add_partition(client, tbl, vals4, "part4");
+ add_partition(client, tbl, vals5, "part5");
+ add_partition(client, tbl, vals6, "part6");
+
+ checkFilter(client, dbName, tblName, "p1 = \"p11\"", 2);
+ checkFilter(client, dbName, tblName, "p1 = \"p12\"", 2);
+ checkFilter(client, dbName, tblName, "p2 = \"p21\"", 2);
+ checkFilter(client, dbName, tblName, "p2 = \"p23\"", 1);
+ checkFilter(client, dbName, tblName, "p1 = \"p11\" and p2=\"p22\"", 1);
+ checkFilter(client, dbName, tblName, "p1 = \"p11\" or p2=\"p23\"", 3);
+ checkFilter(client, dbName, tblName, "p1 = \"p11\" or p1=\"p12\"", 4);
+
+ checkFilter(client, dbName, tblName,
+ "p1 = \"p11\" or (p1=\"p12\" and p2=\"p21\")", 3);
+ checkFilter(client, dbName, tblName,
+ "p1 = \"p11\" or (p1=\"p12\" and p2=\"p21\") Or " +
+ "(p1=\"p13\" aNd p2=\"p24\")", 4);
+ //test for and or precedence
+ checkFilter(client, dbName, tblName,
+ "p1=\"p12\" and (p2=\"p27\" Or p2=\"p21\")", 1);
+ checkFilter(client, dbName, tblName,
+ "p1=\"p12\" and p2=\"p27\" Or p2=\"p21\"", 2);
+
+ checkFilter(client, dbName, tblName, "p1 > \"p12\"", 2);
+ checkFilter(client, dbName, tblName, "p1 >= \"p12\"", 4);
+ checkFilter(client, dbName, tblName, "p1 < \"p12\"", 2);
+ checkFilter(client, dbName, tblName, "p1 <= \"p12\"", 4);
+ checkFilter(client, dbName, tblName, "p1 <> \"p12\"", 4);
+ checkFilter(client, dbName, tblName, "p1 like \"p1.*\"", 6);
+ checkFilter(client, dbName, tblName, "p2 like \"p.*3\"", 1);
+
+ //Test for setting the maximum partition count
+ List partitions = client.listPartitionsByFilter(dbName,
+ tblName, "p1 >= \"p12\"", (short) 2);
+ assertEquals("User specified row limit for partitions",
+ 2, partitions.size());
+
+ //Negative tests
+ Exception me = null;
+ try {
+ client.listPartitionsByFilter(dbName,
+ tblName, "p3 >= \"p12\"", (short) -1);
+ } catch(MetaException e) {
+ me = e;
+ }
+ assertNotNull(me);
+ assertTrue("Filter on int partition key", me.getMessage().contains(
+ "Filtering is supported only on partition keys of type string"));
+
+ me = null;
+ try {
+ client.listPartitionsByFilter(dbName,
+ tblName, "c1 >= \"p12\"", (short) -1);
+ } catch(MetaException e) {
+ me = e;
+ }
+ assertNotNull(me);
+ assertTrue("Filter on invalid key", me.getMessage().contains(
+ " is not a partitioning key for the table"));
+
+ me = null;
+ try {
+ client.listPartitionsByFilter(dbName,
+ tblName, "c1 >= ", (short) -1);
+ } catch(MetaException e) {
+ me = e;
+ }
+ assertNotNull(me);
+ assertTrue("Invalid filter string", me.getMessage().contains(
+ "Error parsing partition filter"));
+
+ me = null;
+ try {
+ client.listPartitionsByFilter("invDBName",
+ "invTableName", "p1 = \"p11\"", (short) -1);
+ } catch(NoSuchObjectException e) {
+ me = e;
+ }
+ assertNotNull(me);
+ assertTrue("NoSuchObject exception", me.getMessage().contains(
+ "database/table does not exist"));
+ }
+
+ private void checkFilter(HiveMetaStoreClient client, String dbName,
+ String tblName, String filter, int expectedCount)
+ throws MetaException, NoSuchObjectException, TException {
+ List partitions = client.listPartitionsByFilter(dbName,
+ tblName, filter, (short) -1);
+
+ assertEquals("Partition count expected for filter " + filter,
+ expectedCount, partitions.size());
+ }
+
+ private void add_partition(HiveMetaStoreClient client, Table table,
+ List vals, String location) throws InvalidObjectException,
+ AlreadyExistsException, MetaException, TException {
+
+ Partition part = new Partition();
+ part.setDbName(table.getDbName());
+ part.setTableName(table.getTableName());
+ part.setValues(vals);
+ part.setParameters(new HashMap());
+ part.setSd(table.getSd());
+ part.getSd().setSerdeInfo(table.getSd().getSerdeInfo());
+ part.getSd().setLocation(table.getSd().getLocation() + location);
+
+ client.add_partition(part);
+ }
}
Index: metastore/src/model/package.jdo
===================================================================
--- metastore/src/model/package.jdo (revision 991274)
+++ metastore/src/model/package.jdo (working copy)
@@ -128,10 +128,10 @@
-
+
-
+
Index: metastore/src/java/org/apache/hadoop/hive/metastore/RawStore.java
===================================================================
--- metastore/src/java/org/apache/hadoop/hive/metastore/RawStore.java (revision 991274)
+++ metastore/src/java/org/apache/hadoop/hive/metastore/RawStore.java (working copy)
@@ -122,4 +122,8 @@
public abstract List listIndexNames(String dbName,
String origTableName, short max) throws MetaException;
+ public abstract List getPartitionsByFilter(
+ String dbName, String tblName, String filter, short maxParts)
+ throws MetaException, NoSuchObjectException;
+
}
Index: metastore/src/java/org/apache/hadoop/hive/metastore/parser/ExpressionTree.java
===================================================================
--- metastore/src/java/org/apache/hadoop/hive/metastore/parser/ExpressionTree.java (revision 0)
+++ metastore/src/java/org/apache/hadoop/hive/metastore/parser/ExpressionTree.java (revision 0)
@@ -0,0 +1,266 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hive.metastore.parser;
+
+import java.io.IOException;
+import java.util.Map;
+import java.util.Stack;
+
+import org.apache.hadoop.hive.metastore.api.MetaException;
+import org.apache.hadoop.hive.metastore.api.Table;
+import org.apache.hadoop.hive.serde.Constants;
+
+import org.antlr.runtime.ANTLRStringStream;
+import org.antlr.runtime.CharStream;
+
+/**
+ * The Class representing the filter as a binary tree. The tree has TreeNode's
+ * at intermediate level and the leaf level nodes are of type LeafNode.
+ */
+public class ExpressionTree {
+
+ /** The logical operations supported. */
+ public enum LogicalOperator {
+ AND,
+ OR
+ }
+
+ /** The operators supported. */
+ public enum Operator {
+ EQUALS ("=", "=="),
+ GREATERTHAN (">"),
+ LESSTHAN ("<"),
+ LESSTHANOREQUALTO ("<="),
+ GREATERTHANOREQUALTO (">="),
+ LIKE ("LIKE", "matches"),
+ NOTEQUALS ("<>", "!=");
+
+ private final String op;
+ private final String jdoOp;
+
+ // private constructor
+ private Operator(String op){
+ this.op = op;
+ this.jdoOp = op;
+ }
+
+ private Operator(String op, String jdoOp){
+ this.op = op;
+ this.jdoOp = jdoOp;
+ }
+
+ public String getOp() {
+ return op;
+ }
+
+ public String getJdoOp() {
+ return jdoOp;
+ }
+
+ public static Operator fromString(String inputOperator) {
+ for(Operator op : Operator.values()) {
+ if(op.getOp().equals(inputOperator)){
+ return op;
+ }
+ }
+
+ throw new Error("Invalid value " + inputOperator +
+ " for " + Operator.class.getSimpleName());
+ }
+ }
+
+
+ /**
+ * The Class representing a Node in the ExpressionTree.
+ */
+ public static class TreeNode {
+ private TreeNode lhs;
+ private LogicalOperator andOr;
+ private TreeNode rhs;
+
+ public TreeNode() {
+ }
+
+ public TreeNode(TreeNode lhs, LogicalOperator andOr, TreeNode rhs) {
+ this.lhs = lhs;
+ this.andOr = andOr;
+ this.rhs = rhs;
+ }
+
+ public String generateJDOFilter(Table table, Map params)
+ throws MetaException {
+ StringBuilder filterBuffer = new StringBuilder();
+
+ if ( lhs != null) {
+ filterBuffer.append (" (");
+ filterBuffer.append(lhs.generateJDOFilter(table, params));
+
+ if (rhs != null) {
+ if( andOr == LogicalOperator.AND ) {
+ filterBuffer.append(" && ");
+ } else {
+ filterBuffer.append(" || ");
+ }
+
+ filterBuffer.append(rhs.generateJDOFilter(table, params));
+ }
+ filterBuffer.append (") ");
+ }
+
+ return filterBuffer.toString();
+ }
+
+ }
+
+ /**
+ * The Class representing the leaf level nodes in the ExpressionTree.
+ */
+ public static class LeafNode extends TreeNode {
+ public String keyName;
+ public Operator operator;
+ public String value;
+ public boolean isReverseOrder = false;
+ private static final String PARAM_PREFIX = "hive_filter_param_";
+
+ @Override
+ public String generateJDOFilter(Table table, Map params)
+ throws MetaException {
+ int partitionIndex;
+ for(partitionIndex = 0;
+ partitionIndex < table.getPartitionKeys().size();
+ partitionIndex++ ) {
+ if( table.getPartitionKeys().get(partitionIndex).getName().
+ equalsIgnoreCase(keyName)) {
+ break;
+ }
+ }
+
+ if( partitionIndex == table.getPartitionKeys().size() ) {
+ throw new MetaException("Specified key <" + keyName +
+ "> is not a partitioning key for the table");
+ }
+
+ if( ! table.getPartitionKeys().get(partitionIndex).
+ getType().equals(Constants.STRING_TYPE_NAME) ) {
+ throw new MetaException
+ ("Filtering is supported only on partition keys of type string");
+ }
+
+ String paramName = PARAM_PREFIX + params.size();
+ params.put(paramName, value);
+ String filter;
+
+ //Handle "a > 10" and "10 > a" appropriately
+ if (isReverseOrder){
+ //For LIKE, the value should be on the RHS
+ if( operator == Operator.LIKE ) {
+ throw new MetaException(
+ "Value should be on the RHS for LIKE operator : " +
+ "Key <" + keyName + ">");
+ }
+
+ filter = paramName +
+ " " + operator.getJdoOp() + " " +
+ " this.values.get(" + partitionIndex + ")";
+ } else {
+ if( operator == Operator.LIKE ) {
+ //generate this.values.get(i).matches("abc%")
+ filter = " this.values.get(" + partitionIndex + ")."
+ + operator.getJdoOp() + "(" + paramName + ") ";
+ } else {
+ filter = " this.values.get(" + partitionIndex + ") "
+ + operator.getJdoOp() + " " + paramName;
+ }
+ }
+ return filter;
+ }
+ }
+
+ /**
+ * The root node for the tree.
+ */
+ private TreeNode root = null;
+
+ /**
+ * The node stack used to keep track of the tree nodes during parsing.
+ */
+ private final Stack nodeStack = new Stack();
+
+ /**
+ * Adds a intermediate node of either type(AND/OR). Pops last two nodes from
+ * the stack and sets them as children of the new node and pushes itself
+ * onto the stack.
+ * @param andOr the operator type
+ */
+ public void addIntermediateNode(LogicalOperator andOr) {
+
+ TreeNode rhs = nodeStack.pop();
+ TreeNode lhs = nodeStack.pop();
+ TreeNode newNode = new TreeNode(lhs, andOr, rhs);
+ nodeStack.push(newNode);
+ root = newNode;
+ }
+
+ /**
+ * Adds a leaf node, pushes the new node onto the stack.
+ * @param newNode the new node
+ */
+ public void addLeafNode(LeafNode newNode) {
+ if( root == null ) {
+ root = newNode;
+ }
+ nodeStack.push(newNode);
+ }
+
+ /** Generate the JDOQL filter for the given expression tree
+ * @param table the table being queried
+ * @param params the input map which is updated with the
+ * the parameterized values. Keys are the parameter names and values
+ * are the parameter values
+ * @return the string representation of the expression tree
+ * @throws MetaException
+ */
+ public String generateJDOFilter(Table table,
+ Map params) throws MetaException {
+ if( root == null ) {
+ return "";
+ }
+
+ return root.generateJDOFilter(table, params);
+ }
+
+ /** Case insensitive ANTLR string stream */
+ public static class ANTLRNoCaseStringStream extends ANTLRStringStream {
+ public ANTLRNoCaseStringStream (String input) {
+ super(input);
+ }
+
+ public int LA (int i) {
+ int returnChar = super.LA (i);
+
+ if (returnChar == CharStream.EOF) {
+ return returnChar;
+ }
+ else if (returnChar == 0) {
+ return returnChar;
+ }
+
+ return Character.toUpperCase ((char) returnChar);
+ }
+ }
+}
Index: metastore/src/java/org/apache/hadoop/hive/metastore/parser/package-info.java
===================================================================
--- metastore/src/java/org/apache/hadoop/hive/metastore/parser/package-info.java (revision 0)
+++ metastore/src/java/org/apache/hadoop/hive/metastore/parser/package-info.java (revision 0)
@@ -0,0 +1,23 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/**
+ * This package implements the parser for parsing the string filter
+ * for the listPartitionsByFilter API.
+ */
+package org.apache.hadoop.hive.metastore.parser;
Index: metastore/src/java/org/apache/hadoop/hive/metastore/parser/Filter.g
===================================================================
--- metastore/src/java/org/apache/hadoop/hive/metastore/parser/Filter.g (revision 0)
+++ metastore/src/java/org/apache/hadoop/hive/metastore/parser/Filter.g (revision 0)
@@ -0,0 +1,130 @@
+grammar Filter;
+
+options
+{
+ k=3;
+}
+
+
+// Package headers
+@header {
+package org.apache.hadoop.hive.metastore.parser;
+
+import org.apache.hadoop.hive.metastore.parser.ExpressionTree;
+import org.apache.hadoop.hive.metastore.parser.ExpressionTree.LeafNode;
+import org.apache.hadoop.hive.metastore.parser.ExpressionTree.Operator;
+import org.apache.hadoop.hive.metastore.parser.ExpressionTree.LogicalOperator;
+}
+@lexer::header {package org.apache.hadoop.hive.metastore.parser;}
+
+@members {
+ public ExpressionTree tree = new ExpressionTree();
+
+ public static String TrimQuotes (String input) {
+ if (input.length () > 1) {
+ if ((input.charAt (0) == '"' && input.charAt (input.length () - 1) == '"')
+ || (input.charAt (0) == '\'' && input.charAt (input.length () - 1) == '\'')) {
+ return input.substring (1, input.length () - 1);
+ }
+ }
+ return input;
+ }
+}
+
+@rulecatch{
+ catch (RecognitionException e){
+ throw e;
+ }
+}
+
+//main rule
+filter
+ :
+ orExpression
+ ;
+
+orExpression
+ :
+ andExpression (KW_OR andExpression { tree.addIntermediateNode(LogicalOperator.OR); } )*
+ ;
+
+andExpression
+ :
+ expression (KW_AND expression { tree.addIntermediateNode(LogicalOperator.AND); } )*
+ ;
+
+expression
+ :
+ LPAREN orExpression RPAREN
+ |
+ operatorExpression
+ ;
+
+operatorExpression
+@init {
+ boolean isReverseOrder = false;
+}
+ :
+ (
+ (key = Identifier op = operator value = StringLiteral)
+ |
+ (value = StringLiteral op = operator key = Identifier) { isReverseOrder = true; }
+ )
+ {
+ LeafNode node = new LeafNode();
+ node.keyName = key.getText();
+ node.value = TrimQuotes(value.getText());
+ node.operator = op;
+ node.isReverseOrder = isReverseOrder;
+
+ tree.addLeafNode(node);
+ };
+
+operator returns [Operator op]
+ :
+ t = (LESSTHAN | LESSTHANOREQUALTO | GREATERTHAN | GREATERTHANOREQUALTO | KW_LIKE | EQUAL | NOTEQUAL)
+ {
+ $op = Operator.fromString(t.getText().toUpperCase());
+ };
+
+// Keywords
+KW_AND : 'AND';
+KW_OR : 'OR';
+KW_LIKE : 'LIKE';
+
+// Operators
+LPAREN : '(' ;
+RPAREN : ')' ;
+EQUAL : '=';
+NOTEQUAL : '<>';
+LESSTHANOREQUALTO : '<=';
+LESSTHAN : '<';
+GREATERTHANOREQUALTO : '>=';
+GREATERTHAN : '>';
+
+// LITERALS
+fragment
+Letter
+ : 'a'..'z' | 'A'..'Z'
+ ;
+
+fragment
+Digit
+ :
+ '0'..'9'
+ ;
+
+StringLiteral
+ :
+ ( '\'' ( ~('\''|'\\') | ('\\' .) )* '\''
+ | '\"' ( ~('\"'|'\\') | ('\\' .) )* '\"'
+ )
+ ;
+
+Identifier
+ :
+ (Letter | Digit) (Letter | Digit | '_')*
+ ;
+
+WS : (' '|'\r'|'\t'|'\n')+ { skip(); } ;
+
Index: metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStoreClient.java
===================================================================
--- metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStoreClient.java (revision 991274)
+++ metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStoreClient.java (working copy)
@@ -517,6 +517,27 @@
}
/**
+ * Get list of partitions matching specified filter
+ * @param db_name the database name
+ * @param tbl_name the table name
+ * @param filter the filter string,
+ * for example "part1 = \"p1_abc\" and part2 <= "\p2_test\"". Filtering can
+ * be done only on string partition keys.
+ * @param max_parts the maximum number of partitions to return,
+ * all partitions are returned if -1 is passed
+ * @return list of partitions
+ * @throws MetaException
+ * @throws NoSuchObjectException
+ * @throws TException
+ */
+ public List listPartitionsByFilter(String db_name, String tbl_name,
+ String filter, short max_parts) throws MetaException,
+ NoSuchObjectException, TException {
+ return deepCopyPartitions(
+ client.get_partitions_by_filter(db_name, tbl_name, filter, max_parts));
+ }
+
+ /**
* @param name
* @return the database
* @throws NoSuchObjectException
Index: metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStore.java
===================================================================
--- metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStore.java (revision 991274)
+++ metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStore.java (working copy)
@@ -2074,6 +2074,32 @@
return ret;
}
+ @Override
+ public List get_partitions_by_filter(final String dbName,
+ final String tblName, final String filter, final short maxParts)
+ throws MetaException, NoSuchObjectException, TException {
+ incrementCounter("get_partitions_by_filter");
+ logStartTableFunction("get_partitions_by_filter", dbName, tblName);
+
+ List ret = null;
+ try {
+ ret = executeWithRetry(new Command>() {
+ @Override
+ List run(RawStore ms) throws Exception {
+ return ms.getPartitionsByFilter(dbName, tblName, filter, maxParts);
+ }
+ });
+ } catch (MetaException e) {
+ throw e;
+ } catch (NoSuchObjectException e) {
+ throw e;
+ } catch (Exception e) {
+ assert(e instanceof RuntimeException);
+ throw (RuntimeException)e;
+ }
+ return ret;
+ }
+
}
/**
Index: metastore/src/java/org/apache/hadoop/hive/metastore/ObjectStore.java
===================================================================
--- metastore/src/java/org/apache/hadoop/hive/metastore/ObjectStore.java (revision 991274)
+++ metastore/src/java/org/apache/hadoop/hive/metastore/ObjectStore.java (working copy)
@@ -18,8 +18,10 @@
package org.apache.hadoop.hive.metastore;
+import java.io.ByteArrayInputStream;
import java.util.ArrayList;
import java.util.Collection;
+import java.util.HashMap;
import java.util.Iterator;
import java.util.List;
import java.util.Map;
@@ -36,6 +38,10 @@
import javax.jdo.Transaction;
import javax.jdo.datastore.DataStoreCache;
+import org.antlr.runtime.CharStream;
+import org.antlr.runtime.CommonTokenStream;
+import org.antlr.runtime.RecognitionException;
+
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configurable;
@@ -62,6 +68,10 @@
import org.apache.hadoop.hive.metastore.model.MStorageDescriptor;
import org.apache.hadoop.hive.metastore.model.MTable;
import org.apache.hadoop.hive.metastore.model.MType;
+import org.apache.hadoop.hive.metastore.parser.ExpressionTree;
+import org.apache.hadoop.hive.metastore.parser.ExpressionTree.ANTLRNoCaseStringStream;
+import org.apache.hadoop.hive.metastore.parser.FilterLexer;
+import org.apache.hadoop.hive.metastore.parser.FilterParser;
import org.apache.hadoop.util.StringUtils;
/**
@@ -944,6 +954,102 @@
return mparts;
}
+ @Override
+ public List getPartitionsByFilter(String dbName, String tblName,
+ String filter, short maxParts) throws MetaException, NoSuchObjectException {
+ openTransaction();
+ List parts = convertToParts(listMPartitionsByFilter(dbName,
+ tblName, filter, maxParts));
+ commitTransaction();
+ return parts;
+ }
+
+ private List listMPartitionsByFilter(String dbName, String tableName,
+ String filter, short maxParts) throws MetaException, NoSuchObjectException{
+ boolean success = false;
+ List mparts = null;
+ try {
+ openTransaction();
+ LOG.debug("Executing listMPartitionsByFilter");
+ dbName = dbName.toLowerCase();
+ tableName = tableName.toLowerCase();
+
+ MTable mtable = getMTable(dbName, tableName);
+ if( mtable == null ) {
+ throw new NoSuchObjectException("Specified database/table does not exist : "
+ + dbName + "." + tableName);
+ }
+
+ StringBuilder queryBuilder = new StringBuilder(
+ "table.tableName == t1 && table.database.name == t2");
+
+ Map params = new HashMap();
+
+ if( filter != null ) {
+
+ Table table = convertToTable(mtable);
+
+ CharStream cs = new ANTLRNoCaseStringStream(filter);
+ FilterLexer lexer = new FilterLexer(cs);
+
+ CommonTokenStream tokens = new CommonTokenStream();
+ tokens.setTokenSource (lexer);
+
+ FilterParser parser = new FilterParser(tokens);
+
+ try {
+ parser.filter();
+ } catch(RecognitionException re) {
+ throw new MetaException("Error parsing partition filter : " + re);
+ }
+
+ String jdoFilter = parser.tree.generateJDOFilter(table, params);
+
+ if( jdoFilter.trim().length() > 0 ) {
+ queryBuilder.append(" && ( ");
+ queryBuilder.append(jdoFilter.trim());
+ queryBuilder.append(" )");
+ }
+ }
+
+ Query query = pm.newQuery(MPartition.class,
+ queryBuilder.toString());
+
+ if( maxParts >= 0 ) {
+ //User specified a row limit, set it on the Query
+ query.setRange(0, maxParts);
+ }
+
+ //Create the parameter declaration string
+ StringBuilder paramDecl = new StringBuilder(
+ "java.lang.String t1, java.lang.String t2");
+ for(String key : params.keySet() ) {
+ paramDecl.append(", java.lang.String " + key);
+ }
+
+ LOG.debug("Filter specified is " + filter + "," +
+ " JDOQL filter is " + queryBuilder.toString());
+
+ params.put("t1", tableName.trim());
+ params.put("t2", dbName.trim());
+
+ query.declareParameters(paramDecl.toString());
+ query.setOrdering("partitionName ascending");
+
+ mparts = (List) query.executeWithMap(params);
+
+ LOG.debug("Done executing query for listMPartitionsByFilter");
+ pm.retrieveAll(mparts);
+ success = commitTransaction();
+ LOG.debug("Done retrieving all objects for listMPartitionsByFilter");
+ } finally {
+ if (!success) {
+ rollbackTransaction();
+ }
+ }
+ return mparts;
+ }
+
public void alterTable(String dbname, String name, Table newTable)
throws InvalidObjectException, MetaException {
boolean success = false;
Index: metastore/src/gen-cpp/ThriftHiveMetastore.cpp
===================================================================
--- metastore/src/gen-cpp/ThriftHiveMetastore.cpp (revision 991274)
+++ metastore/src/gen-cpp/ThriftHiveMetastore.cpp (working copy)
@@ -6438,6 +6438,278 @@
return xfer;
}
+uint32_t ThriftHiveMetastore_get_partitions_by_filter_args::read(apache::thrift::protocol::TProtocol* iprot) {
+
+ uint32_t xfer = 0;
+ std::string fname;
+ apache::thrift::protocol::TType ftype;
+ int16_t fid;
+
+ xfer += iprot->readStructBegin(fname);
+
+ using apache::thrift::protocol::TProtocolException;
+
+
+ while (true)
+ {
+ xfer += iprot->readFieldBegin(fname, ftype, fid);
+ if (ftype == apache::thrift::protocol::T_STOP) {
+ break;
+ }
+ switch (fid)
+ {
+ case 1:
+ if (ftype == apache::thrift::protocol::T_STRING) {
+ xfer += iprot->readString(this->db_name);
+ this->__isset.db_name = true;
+ } else {
+ xfer += iprot->skip(ftype);
+ }
+ break;
+ case 2:
+ if (ftype == apache::thrift::protocol::T_STRING) {
+ xfer += iprot->readString(this->tbl_name);
+ this->__isset.tbl_name = true;
+ } else {
+ xfer += iprot->skip(ftype);
+ }
+ break;
+ case 3:
+ if (ftype == apache::thrift::protocol::T_STRING) {
+ xfer += iprot->readString(this->filter);
+ this->__isset.filter = true;
+ } else {
+ xfer += iprot->skip(ftype);
+ }
+ break;
+ case 4:
+ if (ftype == apache::thrift::protocol::T_I16) {
+ xfer += iprot->readI16(this->max_parts);
+ this->__isset.max_parts = true;
+ } else {
+ xfer += iprot->skip(ftype);
+ }
+ break;
+ default:
+ xfer += iprot->skip(ftype);
+ break;
+ }
+ xfer += iprot->readFieldEnd();
+ }
+
+ xfer += iprot->readStructEnd();
+
+ return xfer;
+}
+
+uint32_t ThriftHiveMetastore_get_partitions_by_filter_args::write(apache::thrift::protocol::TProtocol* oprot) const {
+ uint32_t xfer = 0;
+ xfer += oprot->writeStructBegin("ThriftHiveMetastore_get_partitions_by_filter_args");
+ xfer += oprot->writeFieldBegin("db_name", apache::thrift::protocol::T_STRING, 1);
+ xfer += oprot->writeString(this->db_name);
+ xfer += oprot->writeFieldEnd();
+ xfer += oprot->writeFieldBegin("tbl_name", apache::thrift::protocol::T_STRING, 2);
+ xfer += oprot->writeString(this->tbl_name);
+ xfer += oprot->writeFieldEnd();
+ xfer += oprot->writeFieldBegin("filter", apache::thrift::protocol::T_STRING, 3);
+ xfer += oprot->writeString(this->filter);
+ xfer += oprot->writeFieldEnd();
+ xfer += oprot->writeFieldBegin("max_parts", apache::thrift::protocol::T_I16, 4);
+ xfer += oprot->writeI16(this->max_parts);
+ xfer += oprot->writeFieldEnd();
+ xfer += oprot->writeFieldStop();
+ xfer += oprot->writeStructEnd();
+ return xfer;
+}
+
+uint32_t ThriftHiveMetastore_get_partitions_by_filter_pargs::write(apache::thrift::protocol::TProtocol* oprot) const {
+ uint32_t xfer = 0;
+ xfer += oprot->writeStructBegin("ThriftHiveMetastore_get_partitions_by_filter_pargs");
+ xfer += oprot->writeFieldBegin("db_name", apache::thrift::protocol::T_STRING, 1);
+ xfer += oprot->writeString((*(this->db_name)));
+ xfer += oprot->writeFieldEnd();
+ xfer += oprot->writeFieldBegin("tbl_name", apache::thrift::protocol::T_STRING, 2);
+ xfer += oprot->writeString((*(this->tbl_name)));
+ xfer += oprot->writeFieldEnd();
+ xfer += oprot->writeFieldBegin("filter", apache::thrift::protocol::T_STRING, 3);
+ xfer += oprot->writeString((*(this->filter)));
+ xfer += oprot->writeFieldEnd();
+ xfer += oprot->writeFieldBegin("max_parts", apache::thrift::protocol::T_I16, 4);
+ xfer += oprot->writeI16((*(this->max_parts)));
+ xfer += oprot->writeFieldEnd();
+ xfer += oprot->writeFieldStop();
+ xfer += oprot->writeStructEnd();
+ return xfer;
+}
+
+uint32_t ThriftHiveMetastore_get_partitions_by_filter_result::read(apache::thrift::protocol::TProtocol* iprot) {
+
+ uint32_t xfer = 0;
+ std::string fname;
+ apache::thrift::protocol::TType ftype;
+ int16_t fid;
+
+ xfer += iprot->readStructBegin(fname);
+
+ using apache::thrift::protocol::TProtocolException;
+
+
+ while (true)
+ {
+ xfer += iprot->readFieldBegin(fname, ftype, fid);
+ if (ftype == apache::thrift::protocol::T_STOP) {
+ break;
+ }
+ switch (fid)
+ {
+ case 0:
+ if (ftype == apache::thrift::protocol::T_LIST) {
+ {
+ this->success.clear();
+ uint32_t _size250;
+ apache::thrift::protocol::TType _etype253;
+ iprot->readListBegin(_etype253, _size250);
+ this->success.resize(_size250);
+ uint32_t _i254;
+ for (_i254 = 0; _i254 < _size250; ++_i254)
+ {
+ xfer += this->success[_i254].read(iprot);
+ }
+ iprot->readListEnd();
+ }
+ this->__isset.success = true;
+ } else {
+ xfer += iprot->skip(ftype);
+ }
+ break;
+ case 1:
+ if (ftype == apache::thrift::protocol::T_STRUCT) {
+ xfer += this->o1.read(iprot);
+ this->__isset.o1 = true;
+ } else {
+ xfer += iprot->skip(ftype);
+ }
+ break;
+ case 2:
+ if (ftype == apache::thrift::protocol::T_STRUCT) {
+ xfer += this->o2.read(iprot);
+ this->__isset.o2 = true;
+ } else {
+ xfer += iprot->skip(ftype);
+ }
+ break;
+ default:
+ xfer += iprot->skip(ftype);
+ break;
+ }
+ xfer += iprot->readFieldEnd();
+ }
+
+ xfer += iprot->readStructEnd();
+
+ return xfer;
+}
+
+uint32_t ThriftHiveMetastore_get_partitions_by_filter_result::write(apache::thrift::protocol::TProtocol* oprot) const {
+
+ uint32_t xfer = 0;
+
+ xfer += oprot->writeStructBegin("ThriftHiveMetastore_get_partitions_by_filter_result");
+
+ if (this->__isset.success) {
+ xfer += oprot->writeFieldBegin("success", apache::thrift::protocol::T_LIST, 0);
+ {
+ xfer += oprot->writeListBegin(apache::thrift::protocol::T_STRUCT, this->success.size());
+ std::vector ::const_iterator _iter255;
+ for (_iter255 = this->success.begin(); _iter255 != this->success.end(); ++_iter255)
+ {
+ xfer += (*_iter255).write(oprot);
+ }
+ xfer += oprot->writeListEnd();
+ }
+ xfer += oprot->writeFieldEnd();
+ } else if (this->__isset.o1) {
+ xfer += oprot->writeFieldBegin("o1", apache::thrift::protocol::T_STRUCT, 1);
+ xfer += this->o1.write(oprot);
+ xfer += oprot->writeFieldEnd();
+ } else if (this->__isset.o2) {
+ xfer += oprot->writeFieldBegin("o2", apache::thrift::protocol::T_STRUCT, 2);
+ xfer += this->o2.write(oprot);
+ xfer += oprot->writeFieldEnd();
+ }
+ xfer += oprot->writeFieldStop();
+ xfer += oprot->writeStructEnd();
+ return xfer;
+}
+
+uint32_t ThriftHiveMetastore_get_partitions_by_filter_presult::read(apache::thrift::protocol::TProtocol* iprot) {
+
+ uint32_t xfer = 0;
+ std::string fname;
+ apache::thrift::protocol::TType ftype;
+ int16_t fid;
+
+ xfer += iprot->readStructBegin(fname);
+
+ using apache::thrift::protocol::TProtocolException;
+
+
+ while (true)
+ {
+ xfer += iprot->readFieldBegin(fname, ftype, fid);
+ if (ftype == apache::thrift::protocol::T_STOP) {
+ break;
+ }
+ switch (fid)
+ {
+ case 0:
+ if (ftype == apache::thrift::protocol::T_LIST) {
+ {
+ (*(this->success)).clear();
+ uint32_t _size256;
+ apache::thrift::protocol::TType _etype259;
+ iprot->readListBegin(_etype259, _size256);
+ (*(this->success)).resize(_size256);
+ uint32_t _i260;
+ for (_i260 = 0; _i260 < _size256; ++_i260)
+ {
+ xfer += (*(this->success))[_i260].read(iprot);
+ }
+ iprot->readListEnd();
+ }
+ this->__isset.success = true;
+ } else {
+ xfer += iprot->skip(ftype);
+ }
+ break;
+ case 1:
+ if (ftype == apache::thrift::protocol::T_STRUCT) {
+ xfer += this->o1.read(iprot);
+ this->__isset.o1 = true;
+ } else {
+ xfer += iprot->skip(ftype);
+ }
+ break;
+ case 2:
+ if (ftype == apache::thrift::protocol::T_STRUCT) {
+ xfer += this->o2.read(iprot);
+ this->__isset.o2 = true;
+ } else {
+ xfer += iprot->skip(ftype);
+ }
+ break;
+ default:
+ xfer += iprot->skip(ftype);
+ break;
+ }
+ xfer += iprot->readFieldEnd();
+ }
+
+ xfer += iprot->readStructEnd();
+
+ return xfer;
+}
+
uint32_t ThriftHiveMetastore_alter_partition_args::read(apache::thrift::protocol::TProtocol* iprot) {
uint32_t xfer = 0;
@@ -6922,14 +7194,14 @@
if (ftype == apache::thrift::protocol::T_LIST) {
{
this->success.clear();
- uint32_t _size250;
- apache::thrift::protocol::TType _etype253;
- iprot->readListBegin(_etype253, _size250);
- this->success.resize(_size250);
- uint32_t _i254;
- for (_i254 = 0; _i254 < _size250; ++_i254)
+ uint32_t _size261;
+ apache::thrift::protocol::TType _etype264;
+ iprot->readListBegin(_etype264, _size261);
+ this->success.resize(_size261);
+ uint32_t _i265;
+ for (_i265 = 0; _i265 < _size261; ++_i265)
{
- xfer += iprot->readString(this->success[_i254]);
+ xfer += iprot->readString(this->success[_i265]);
}
iprot->readListEnd();
}
@@ -6968,10 +7240,10 @@
xfer += oprot->writeFieldBegin("success", apache::thrift::protocol::T_LIST, 0);
{
xfer += oprot->writeListBegin(apache::thrift::protocol::T_STRING, this->success.size());
- std::vector ::const_iterator _iter255;
- for (_iter255 = this->success.begin(); _iter255 != this->success.end(); ++_iter255)
+ std::vector ::const_iterator _iter266;
+ for (_iter266 = this->success.begin(); _iter266 != this->success.end(); ++_iter266)
{
- xfer += oprot->writeString((*_iter255));
+ xfer += oprot->writeString((*_iter266));
}
xfer += oprot->writeListEnd();
}
@@ -7010,14 +7282,14 @@
if (ftype == apache::thrift::protocol::T_LIST) {
{
(*(this->success)).clear();
- uint32_t _size256;
- apache::thrift::protocol::TType _etype259;
- iprot->readListBegin(_etype259, _size256);
- (*(this->success)).resize(_size256);
- uint32_t _i260;
- for (_i260 = 0; _i260 < _size256; ++_i260)
+ uint32_t _size267;
+ apache::thrift::protocol::TType _etype270;
+ iprot->readListBegin(_etype270, _size267);
+ (*(this->success)).resize(_size267);
+ uint32_t _i271;
+ for (_i271 = 0; _i271 < _size267; ++_i271)
{
- xfer += iprot->readString((*(this->success))[_i260]);
+ xfer += iprot->readString((*(this->success))[_i271]);
}
iprot->readListEnd();
}
@@ -7132,17 +7404,17 @@
if (ftype == apache::thrift::protocol::T_MAP) {
{
this->success.clear();
- uint32_t _size261;
- apache::thrift::protocol::TType _ktype262;
- apache::thrift::protocol::TType _vtype263;
- iprot->readMapBegin(_ktype262, _vtype263, _size261);
- uint32_t _i265;
- for (_i265 = 0; _i265 < _size261; ++_i265)
+ uint32_t _size272;
+ apache::thrift::protocol::TType _ktype273;
+ apache::thrift::protocol::TType _vtype274;
+ iprot->readMapBegin(_ktype273, _vtype274, _size272);
+ uint32_t _i276;
+ for (_i276 = 0; _i276 < _size272; ++_i276)
{
- std::string _key266;
- xfer += iprot->readString(_key266);
- std::string& _val267 = this->success[_key266];
- xfer += iprot->readString(_val267);
+ std::string _key277;
+ xfer += iprot->readString(_key277);
+ std::string& _val278 = this->success[_key277];
+ xfer += iprot->readString(_val278);
}
iprot->readMapEnd();
}
@@ -7181,11 +7453,11 @@
xfer += oprot->writeFieldBegin("success", apache::thrift::protocol::T_MAP, 0);
{
xfer += oprot->writeMapBegin(apache::thrift::protocol::T_STRING, apache::thrift::protocol::T_STRING, this->success.size());
- std::map ::const_iterator _iter268;
- for (_iter268 = this->success.begin(); _iter268 != this->success.end(); ++_iter268)
+ std::map ::const_iterator _iter279;
+ for (_iter279 = this->success.begin(); _iter279 != this->success.end(); ++_iter279)
{
- xfer += oprot->writeString(_iter268->first);
- xfer += oprot->writeString(_iter268->second);
+ xfer += oprot->writeString(_iter279->first);
+ xfer += oprot->writeString(_iter279->second);
}
xfer += oprot->writeMapEnd();
}
@@ -7224,17 +7496,17 @@
if (ftype == apache::thrift::protocol::T_MAP) {
{
(*(this->success)).clear();
- uint32_t _size269;
- apache::thrift::protocol::TType _ktype270;
- apache::thrift::protocol::TType _vtype271;
- iprot->readMapBegin(_ktype270, _vtype271, _size269);
- uint32_t _i273;
- for (_i273 = 0; _i273 < _size269; ++_i273)
+ uint32_t _size280;
+ apache::thrift::protocol::TType _ktype281;
+ apache::thrift::protocol::TType _vtype282;
+ iprot->readMapBegin(_ktype281, _vtype282, _size280);
+ uint32_t _i284;
+ for (_i284 = 0; _i284 < _size280; ++_i284)
{
- std::string _key274;
- xfer += iprot->readString(_key274);
- std::string& _val275 = (*(this->success))[_key274];
- xfer += iprot->readString(_val275);
+ std::string _key285;
+ xfer += iprot->readString(_key285);
+ std::string& _val286 = (*(this->success))[_key285];
+ xfer += iprot->readString(_val286);
}
iprot->readMapEnd();
}
@@ -8075,14 +8347,14 @@
if (ftype == apache::thrift::protocol::T_LIST) {
{
this->success.clear();
- uint32_t _size276;
- apache::thrift::protocol::TType _etype279;
- iprot->readListBegin(_etype279, _size276);
- this->success.resize(_size276);
- uint32_t _i280;
- for (_i280 = 0; _i280 < _size276; ++_i280)
+ uint32_t _size287;
+ apache::thrift::protocol::TType _etype290;
+ iprot->readListBegin(_etype290, _size287);
+ this->success.resize(_size287);
+ uint32_t _i291;
+ for (_i291 = 0; _i291 < _size287; ++_i291)
{
- xfer += this->success[_i280].read(iprot);
+ xfer += this->success[_i291].read(iprot);
}
iprot->readListEnd();
}
@@ -8129,10 +8401,10 @@
xfer += oprot->writeFieldBegin("success", apache::thrift::protocol::T_LIST, 0);
{
xfer += oprot->writeListBegin(apache::thrift::protocol::T_STRUCT, this->success.size());
- std::vector ::const_iterator _iter281;
- for (_iter281 = this->success.begin(); _iter281 != this->success.end(); ++_iter281)
+ std::vector ::const_iterator _iter292;
+ for (_iter292 = this->success.begin(); _iter292 != this->success.end(); ++_iter292)
{
- xfer += (*_iter281).write(oprot);
+ xfer += (*_iter292).write(oprot);
}
xfer += oprot->writeListEnd();
}
@@ -8175,14 +8447,14 @@
if (ftype == apache::thrift::protocol::T_LIST) {
{
(*(this->success)).clear();
- uint32_t _size282;
- apache::thrift::protocol::TType _etype285;
- iprot->readListBegin(_etype285, _size282);
- (*(this->success)).resize(_size282);
- uint32_t _i286;
- for (_i286 = 0; _i286 < _size282; ++_i286)
+ uint32_t _size293;
+ apache::thrift::protocol::TType _etype296;
+ iprot->readListBegin(_etype296, _size293);
+ (*(this->success)).resize(_size293);
+ uint32_t _i297;
+ for (_i297 = 0; _i297 < _size293; ++_i297)
{
- xfer += (*(this->success))[_i286].read(iprot);
+ xfer += (*(this->success))[_i297].read(iprot);
}
iprot->readListEnd();
}
@@ -8333,14 +8605,14 @@
if (ftype == apache::thrift::protocol::T_LIST) {
{
this->success.clear();
- uint32_t _size287;
- apache::thrift::protocol::TType _etype290;
- iprot->readListBegin(_etype290, _size287);
- this->success.resize(_size287);
- uint32_t _i291;
- for (_i291 = 0; _i291 < _size287; ++_i291)
+ uint32_t _size298;
+ apache::thrift::protocol::TType _etype301;
+ iprot->readListBegin(_etype301, _size298);
+ this->success.resize(_size298);
+ uint32_t _i302;
+ for (_i302 = 0; _i302 < _size298; ++_i302)
{
- xfer += iprot->readString(this->success[_i291]);
+ xfer += iprot->readString(this->success[_i302]);
}
iprot->readListEnd();
}
@@ -8379,10 +8651,10 @@
xfer += oprot->writeFieldBegin("success", apache::thrift::protocol::T_LIST, 0);
{
xfer += oprot->writeListBegin(apache::thrift::protocol::T_STRING, this->success.size());
- std::vector ::const_iterator _iter292;
- for (_iter292 = this->success.begin(); _iter292 != this->success.end(); ++_iter292)
+ std::vector ::const_iterator _iter303;
+ for (_iter303 = this->success.begin(); _iter303 != this->success.end(); ++_iter303)
{
- xfer += oprot->writeString((*_iter292));
+ xfer += oprot->writeString((*_iter303));
}
xfer += oprot->writeListEnd();
}
@@ -8421,14 +8693,14 @@
if (ftype == apache::thrift::protocol::T_LIST) {
{
(*(this->success)).clear();
- uint32_t _size293;
- apache::thrift::protocol::TType _etype296;
- iprot->readListBegin(_etype296, _size293);
- (*(this->success)).resize(_size293);
- uint32_t _i297;
- for (_i297 = 0; _i297 < _size293; ++_i297)
+ uint32_t _size304;
+ apache::thrift::protocol::TType _etype307;
+ iprot->readListBegin(_etype307, _size304);
+ (*(this->success)).resize(_size304);
+ uint32_t _i308;
+ for (_i308 = 0; _i308 < _size304; ++_i308)
{
- xfer += iprot->readString((*(this->success))[_i297]);
+ xfer += iprot->readString((*(this->success))[_i308]);
}
iprot->readListEnd();
}
@@ -10318,6 +10590,75 @@
throw apache::thrift::TApplicationException(apache::thrift::TApplicationException::MISSING_RESULT, "get_partition_names_ps failed: unknown result");
}
+void ThriftHiveMetastoreClient::get_partitions_by_filter(std::vector & _return, const std::string& db_name, const std::string& tbl_name, const std::string& filter, const int16_t max_parts)
+{
+ send_get_partitions_by_filter(db_name, tbl_name, filter, max_parts);
+ recv_get_partitions_by_filter(_return);
+}
+
+void ThriftHiveMetastoreClient::send_get_partitions_by_filter(const std::string& db_name, const std::string& tbl_name, const std::string& filter, const int16_t max_parts)
+{
+ int32_t cseqid = 0;
+ oprot_->writeMessageBegin("get_partitions_by_filter", apache::thrift::protocol::T_CALL, cseqid);
+
+ ThriftHiveMetastore_get_partitions_by_filter_pargs args;
+ args.db_name = &db_name;
+ args.tbl_name = &tbl_name;
+ args.filter = &filter;
+ args.max_parts = &max_parts;
+ args.write(oprot_);
+
+ oprot_->writeMessageEnd();
+ oprot_->getTransport()->flush();
+ oprot_->getTransport()->writeEnd();
+}
+
+void ThriftHiveMetastoreClient::recv_get_partitions_by_filter(std::vector & _return)
+{
+
+ int32_t rseqid = 0;
+ std::string fname;
+ apache::thrift::protocol::TMessageType mtype;
+
+ iprot_->readMessageBegin(fname, mtype, rseqid);
+ if (mtype == apache::thrift::protocol::T_EXCEPTION) {
+ apache::thrift::TApplicationException x;
+ x.read(iprot_);
+ iprot_->readMessageEnd();
+ iprot_->getTransport()->readEnd();
+ throw x;
+ }
+ if (mtype != apache::thrift::protocol::T_REPLY) {
+ iprot_->skip(apache::thrift::protocol::T_STRUCT);
+ iprot_->readMessageEnd();
+ iprot_->getTransport()->readEnd();
+ throw apache::thrift::TApplicationException(apache::thrift::TApplicationException::INVALID_MESSAGE_TYPE);
+ }
+ if (fname.compare("get_partitions_by_filter") != 0) {
+ iprot_->skip(apache::thrift::protocol::T_STRUCT);
+ iprot_->readMessageEnd();
+ iprot_->getTransport()->readEnd();
+ throw apache::thrift::TApplicationException(apache::thrift::TApplicationException::WRONG_METHOD_NAME);
+ }
+ ThriftHiveMetastore_get_partitions_by_filter_presult result;
+ result.success = &_return;
+ result.read(iprot_);
+ iprot_->readMessageEnd();
+ iprot_->getTransport()->readEnd();
+
+ if (result.__isset.success) {
+ // _return pointer has now been filled
+ return;
+ }
+ if (result.__isset.o1) {
+ throw result.o1;
+ }
+ if (result.__isset.o2) {
+ throw result.o2;
+ }
+ throw apache::thrift::TApplicationException(apache::thrift::TApplicationException::MISSING_RESULT, "get_partitions_by_filter failed: unknown result");
+}
+
void ThriftHiveMetastoreClient::alter_partition(const std::string& db_name, const std::string& tbl_name, const Partition& new_part)
{
send_alter_partition(db_name, tbl_name, new_part);
@@ -11900,6 +12241,40 @@
oprot->getTransport()->writeEnd();
}
+void ThriftHiveMetastoreProcessor::process_get_partitions_by_filter(int32_t seqid, apache::thrift::protocol::TProtocol* iprot, apache::thrift::protocol::TProtocol* oprot)
+{
+ ThriftHiveMetastore_get_partitions_by_filter_args args;
+ args.read(iprot);
+ iprot->readMessageEnd();
+ iprot->getTransport()->readEnd();
+
+ ThriftHiveMetastore_get_partitions_by_filter_result result;
+ try {
+ iface_->get_partitions_by_filter(result.success, args.db_name, args.tbl_name, args.filter, args.max_parts);
+ result.__isset.success = true;
+ } catch (MetaException &o1) {
+ result.o1 = o1;
+ result.__isset.o1 = true;
+ } catch (NoSuchObjectException &o2) {
+ result.o2 = o2;
+ result.__isset.o2 = true;
+ } catch (const std::exception& e) {
+ apache::thrift::TApplicationException x(e.what());
+ oprot->writeMessageBegin("get_partitions_by_filter", apache::thrift::protocol::T_EXCEPTION, seqid);
+ x.write(oprot);
+ oprot->writeMessageEnd();
+ oprot->getTransport()->flush();
+ oprot->getTransport()->writeEnd();
+ return;
+ }
+
+ oprot->writeMessageBegin("get_partitions_by_filter", apache::thrift::protocol::T_REPLY, seqid);
+ result.write(oprot);
+ oprot->writeMessageEnd();
+ oprot->getTransport()->flush();
+ oprot->getTransport()->writeEnd();
+}
+
void ThriftHiveMetastoreProcessor::process_alter_partition(int32_t seqid, apache::thrift::protocol::TProtocol* iprot, apache::thrift::protocol::TProtocol* oprot)
{
ThriftHiveMetastore_alter_partition_args args;
Index: metastore/src/gen-cpp/ThriftHiveMetastore.h
===================================================================
--- metastore/src/gen-cpp/ThriftHiveMetastore.h (revision 991274)
+++ metastore/src/gen-cpp/ThriftHiveMetastore.h (working copy)
@@ -43,6 +43,7 @@
virtual void get_partition_names(std::vector & _return, const std::string& db_name, const std::string& tbl_name, const int16_t max_parts) = 0;
virtual void get_partitions_ps(std::vector & _return, const std::string& db_name, const std::string& tbl_name, const std::vector & part_vals, const int16_t max_parts) = 0;
virtual void get_partition_names_ps(std::vector & _return, const std::string& db_name, const std::string& tbl_name, const std::vector & part_vals, const int16_t max_parts) = 0;
+ virtual void get_partitions_by_filter(std::vector & _return, const std::string& db_name, const std::string& tbl_name, const std::string& filter, const int16_t max_parts) = 0;
virtual void alter_partition(const std::string& db_name, const std::string& tbl_name, const Partition& new_part) = 0;
virtual void get_config_value(std::string& _return, const std::string& name, const std::string& defaultValue) = 0;
virtual void partition_name_to_vals(std::vector & _return, const std::string& part_name) = 0;
@@ -145,6 +146,9 @@
void get_partition_names_ps(std::vector & /* _return */, const std::string& /* db_name */, const std::string& /* tbl_name */, const std::vector & /* part_vals */, const int16_t /* max_parts */) {
return;
}
+ void get_partitions_by_filter(std::vector & /* _return */, const std::string& /* db_name */, const std::string& /* tbl_name */, const std::string& /* filter */, const int16_t /* max_parts */) {
+ return;
+ }
void alter_partition(const std::string& /* db_name */, const std::string& /* tbl_name */, const Partition& /* new_part */) {
return;
}
@@ -3253,6 +3257,126 @@
};
+class ThriftHiveMetastore_get_partitions_by_filter_args {
+ public:
+
+ ThriftHiveMetastore_get_partitions_by_filter_args() : db_name(""), tbl_name(""), filter(""), max_parts(-1) {
+ }
+
+ virtual ~ThriftHiveMetastore_get_partitions_by_filter_args() throw() {}
+
+ std::string db_name;
+ std::string tbl_name;
+ std::string filter;
+ int16_t max_parts;
+
+ struct __isset {
+ __isset() : db_name(false), tbl_name(false), filter(false), max_parts(false) {}
+ bool db_name;
+ bool tbl_name;
+ bool filter;
+ bool max_parts;
+ } __isset;
+
+ bool operator == (const ThriftHiveMetastore_get_partitions_by_filter_args & rhs) const
+ {
+ if (!(db_name == rhs.db_name))
+ return false;
+ if (!(tbl_name == rhs.tbl_name))
+ return false;
+ if (!(filter == rhs.filter))
+ return false;
+ if (!(max_parts == rhs.max_parts))
+ return false;
+ return true;
+ }
+ bool operator != (const ThriftHiveMetastore_get_partitions_by_filter_args &rhs) const {
+ return !(*this == rhs);
+ }
+
+ bool operator < (const ThriftHiveMetastore_get_partitions_by_filter_args & ) const;
+
+ uint32_t read(apache::thrift::protocol::TProtocol* iprot);
+ uint32_t write(apache::thrift::protocol::TProtocol* oprot) const;
+
+};
+
+class ThriftHiveMetastore_get_partitions_by_filter_pargs {
+ public:
+
+
+ virtual ~ThriftHiveMetastore_get_partitions_by_filter_pargs() throw() {}
+
+ const std::string* db_name;
+ const std::string* tbl_name;
+ const std::string* filter;
+ const int16_t* max_parts;
+
+ uint32_t write(apache::thrift::protocol::TProtocol* oprot) const;
+
+};
+
+class ThriftHiveMetastore_get_partitions_by_filter_result {
+ public:
+
+ ThriftHiveMetastore_get_partitions_by_filter_result() {
+ }
+
+ virtual ~ThriftHiveMetastore_get_partitions_by_filter_result() throw() {}
+
+ std::vector success;
+ MetaException o1;
+ NoSuchObjectException o2;
+
+ struct __isset {
+ __isset() : success(false), o1(false), o2(false) {}
+ bool success;
+ bool o1;
+ bool o2;
+ } __isset;
+
+ bool operator == (const ThriftHiveMetastore_get_partitions_by_filter_result & rhs) const
+ {
+ if (!(success == rhs.success))
+ return false;
+ if (!(o1 == rhs.o1))
+ return false;
+ if (!(o2 == rhs.o2))
+ return false;
+ return true;
+ }
+ bool operator != (const ThriftHiveMetastore_get_partitions_by_filter_result &rhs) const {
+ return !(*this == rhs);
+ }
+
+ bool operator < (const ThriftHiveMetastore_get_partitions_by_filter_result & ) const;
+
+ uint32_t read(apache::thrift::protocol::TProtocol* iprot);
+ uint32_t write(apache::thrift::protocol::TProtocol* oprot) const;
+
+};
+
+class ThriftHiveMetastore_get_partitions_by_filter_presult {
+ public:
+
+
+ virtual ~ThriftHiveMetastore_get_partitions_by_filter_presult() throw() {}
+
+ std::vector * success;
+ MetaException o1;
+ NoSuchObjectException o2;
+
+ struct __isset {
+ __isset() : success(false), o1(false), o2(false) {}
+ bool success;
+ bool o1;
+ bool o2;
+ } __isset;
+
+ uint32_t read(apache::thrift::protocol::TProtocol* iprot);
+
+};
+
class ThriftHiveMetastore_alter_partition_args {
public:
@@ -4335,6 +4459,9 @@
void get_partition_names_ps(std::vector & _return, const std::string& db_name, const std::string& tbl_name, const std::vector & part_vals, const int16_t max_parts);
void send_get_partition_names_ps(const std::string& db_name, const std::string& tbl_name, const std::vector & part_vals, const int16_t max_parts);
void recv_get_partition_names_ps(std::vector & _return);
+ void get_partitions_by_filter(std::vector & _return, const std::string& db_name, const std::string& tbl_name, const std::string& filter, const int16_t max_parts);
+ void send_get_partitions_by_filter(const std::string& db_name, const std::string& tbl_name, const std::string& filter, const int16_t max_parts);
+ void recv_get_partitions_by_filter(std::vector & _return);
void alter_partition(const std::string& db_name, const std::string& tbl_name, const Partition& new_part);
void send_alter_partition(const std::string& db_name, const std::string& tbl_name, const Partition& new_part);
void recv_alter_partition();
@@ -4398,6 +4525,7 @@
void process_get_partition_names(int32_t seqid, apache::thrift::protocol::TProtocol* iprot, apache::thrift::protocol::TProtocol* oprot);
void process_get_partitions_ps(int32_t seqid, apache::thrift::protocol::TProtocol* iprot, apache::thrift::protocol::TProtocol* oprot);
void process_get_partition_names_ps(int32_t seqid, apache::thrift::protocol::TProtocol* iprot, apache::thrift::protocol::TProtocol* oprot);
+ void process_get_partitions_by_filter(int32_t seqid, apache::thrift::protocol::TProtocol* iprot, apache::thrift::protocol::TProtocol* oprot);
void process_alter_partition(int32_t seqid, apache::thrift::protocol::TProtocol* iprot, apache::thrift::protocol::TProtocol* oprot);
void process_get_config_value(int32_t seqid, apache::thrift::protocol::TProtocol* iprot, apache::thrift::protocol::TProtocol* oprot);
void process_partition_name_to_vals(int32_t seqid, apache::thrift::protocol::TProtocol* iprot, apache::thrift::protocol::TProtocol* oprot);
@@ -4439,6 +4567,7 @@
processMap_["get_partition_names"] = &ThriftHiveMetastoreProcessor::process_get_partition_names;
processMap_["get_partitions_ps"] = &ThriftHiveMetastoreProcessor::process_get_partitions_ps;
processMap_["get_partition_names_ps"] = &ThriftHiveMetastoreProcessor::process_get_partition_names_ps;
+ processMap_["get_partitions_by_filter"] = &ThriftHiveMetastoreProcessor::process_get_partitions_by_filter;
processMap_["alter_partition"] = &ThriftHiveMetastoreProcessor::process_alter_partition;
processMap_["get_config_value"] = &ThriftHiveMetastoreProcessor::process_get_config_value;
processMap_["partition_name_to_vals"] = &ThriftHiveMetastoreProcessor::process_partition_name_to_vals;
@@ -4778,6 +4907,18 @@
}
}
+ void get_partitions_by_filter(std::vector & _return, const std::string& db_name, const std::string& tbl_name, const std::string& filter, const int16_t max_parts) {
+ uint32_t sz = ifaces_.size();
+ for (uint32_t i = 0; i < sz; ++i) {
+ if (i == sz - 1) {
+ ifaces_[i]->get_partitions_by_filter(_return, db_name, tbl_name, filter, max_parts);
+ return;
+ } else {
+ ifaces_[i]->get_partitions_by_filter(_return, db_name, tbl_name, filter, max_parts);
+ }
+ }
+ }
+
void alter_partition(const std::string& db_name, const std::string& tbl_name, const Partition& new_part) {
uint32_t sz = ifaces_.size();
for (uint32_t i = 0; i < sz; ++i) {
Index: metastore/src/gen-cpp/ThriftHiveMetastore_server.skeleton.cpp
===================================================================
--- metastore/src/gen-cpp/ThriftHiveMetastore_server.skeleton.cpp (revision 991274)
+++ metastore/src/gen-cpp/ThriftHiveMetastore_server.skeleton.cpp (working copy)
@@ -162,6 +162,11 @@
printf("get_partition_names_ps\n");
}
+ void get_partitions_by_filter(std::vector & _return, const std::string& db_name, const std::string& tbl_name, const std::string& filter, const int16_t max_parts) {
+ // Your implementation goes here
+ printf("get_partitions_by_filter\n");
+ }
+
void alter_partition(const std::string& db_name, const std::string& tbl_name, const Partition& new_part) {
// Your implementation goes here
printf("alter_partition\n");
Index: metastore/src/gen-javabean/org/apache/hadoop/hive/metastore/api/ThriftHiveMetastore.java
===================================================================
--- metastore/src/gen-javabean/org/apache/hadoop/hive/metastore/api/ThriftHiveMetastore.java (revision 991274)
+++ metastore/src/gen-javabean/org/apache/hadoop/hive/metastore/api/ThriftHiveMetastore.java (working copy)
@@ -81,6 +81,8 @@
public List get_partition_names_ps(String db_name, String tbl_name, List part_vals, short max_parts) throws MetaException, TException;
+ public List get_partitions_by_filter(String db_name, String tbl_name, String filter, short max_parts) throws MetaException, NoSuchObjectException, TException;
+
public void alter_partition(String db_name, String tbl_name, Partition new_part) throws InvalidOperationException, MetaException, TException;
public String get_config_value(String name, String defaultValue) throws ConfigValSecurityException, TException;
@@ -1227,6 +1229,48 @@
throw new TApplicationException(TApplicationException.MISSING_RESULT, "get_partition_names_ps failed: unknown result");
}
+ public List get_partitions_by_filter(String db_name, String tbl_name, String filter, short max_parts) throws MetaException, NoSuchObjectException, TException
+ {
+ send_get_partitions_by_filter(db_name, tbl_name, filter, max_parts);
+ return recv_get_partitions_by_filter();
+ }
+
+ public void send_get_partitions_by_filter(String db_name, String tbl_name, String filter, short max_parts) throws TException
+ {
+ oprot_.writeMessageBegin(new TMessage("get_partitions_by_filter", TMessageType.CALL, seqid_));
+ get_partitions_by_filter_args args = new get_partitions_by_filter_args();
+ args.db_name = db_name;
+ args.tbl_name = tbl_name;
+ args.filter = filter;
+ args.max_parts = max_parts;
+ args.write(oprot_);
+ oprot_.writeMessageEnd();
+ oprot_.getTransport().flush();
+ }
+
+ public List recv_get_partitions_by_filter() throws MetaException, NoSuchObjectException, TException
+ {
+ TMessage msg = iprot_.readMessageBegin();
+ if (msg.type == TMessageType.EXCEPTION) {
+ TApplicationException x = TApplicationException.read(iprot_);
+ iprot_.readMessageEnd();
+ throw x;
+ }
+ get_partitions_by_filter_result result = new get_partitions_by_filter_result();
+ result.read(iprot_);
+ iprot_.readMessageEnd();
+ if (result.isSetSuccess()) {
+ return result.success;
+ }
+ if (result.o1 != null) {
+ throw result.o1;
+ }
+ if (result.o2 != null) {
+ throw result.o2;
+ }
+ throw new TApplicationException(TApplicationException.MISSING_RESULT, "get_partitions_by_filter failed: unknown result");
+ }
+
public void alter_partition(String db_name, String tbl_name, Partition new_part) throws InvalidOperationException, MetaException, TException
{
send_alter_partition(db_name, tbl_name, new_part);
@@ -1614,6 +1658,7 @@
processMap_.put("get_partition_names", new get_partition_names());
processMap_.put("get_partitions_ps", new get_partitions_ps());
processMap_.put("get_partition_names_ps", new get_partition_names_ps());
+ processMap_.put("get_partitions_by_filter", new get_partitions_by_filter());
processMap_.put("alter_partition", new alter_partition());
processMap_.put("get_config_value", new get_config_value());
processMap_.put("partition_name_to_vals", new partition_name_to_vals());
@@ -2493,6 +2538,36 @@
}
+ private class get_partitions_by_filter implements ProcessFunction {
+ public void process(int seqid, TProtocol iprot, TProtocol oprot) throws TException
+ {
+ get_partitions_by_filter_args args = new get_partitions_by_filter_args();
+ args.read(iprot);
+ iprot.readMessageEnd();
+ get_partitions_by_filter_result result = new get_partitions_by_filter_result();
+ try {
+ result.success = iface_.get_partitions_by_filter(args.db_name, args.tbl_name, args.filter, args.max_parts);
+ } catch (MetaException o1) {
+ result.o1 = o1;
+ } catch (NoSuchObjectException o2) {
+ result.o2 = o2;
+ } catch (Throwable th) {
+ LOGGER.error("Internal error processing get_partitions_by_filter", th);
+ TApplicationException x = new TApplicationException(TApplicationException.INTERNAL_ERROR, "Internal error processing get_partitions_by_filter");
+ oprot.writeMessageBegin(new TMessage("get_partitions_by_filter", TMessageType.EXCEPTION, seqid));
+ x.write(oprot);
+ oprot.writeMessageEnd();
+ oprot.getTransport().flush();
+ return;
+ }
+ oprot.writeMessageBegin(new TMessage("get_partitions_by_filter", TMessageType.REPLY, seqid));
+ result.write(oprot);
+ oprot.writeMessageEnd();
+ oprot.getTransport().flush();
+ }
+
+ }
+
private class alter_partition implements ProcessFunction {
public void process(int seqid, TProtocol iprot, TProtocol oprot) throws TException
{
@@ -19871,6 +19946,768 @@
}
+ public static class get_partitions_by_filter_args implements TBase, java.io.Serializable, Cloneable {
+ private static final TStruct STRUCT_DESC = new TStruct("get_partitions_by_filter_args");
+ private static final TField DB_NAME_FIELD_DESC = new TField("db_name", TType.STRING, (short)1);
+ private static final TField TBL_NAME_FIELD_DESC = new TField("tbl_name", TType.STRING, (short)2);
+ private static final TField FILTER_FIELD_DESC = new TField("filter", TType.STRING, (short)3);
+ private static final TField MAX_PARTS_FIELD_DESC = new TField("max_parts", TType.I16, (short)4);
+
+ private String db_name;
+ public static final int DB_NAME = 1;
+ private String tbl_name;
+ public static final int TBL_NAME = 2;
+ private String filter;
+ public static final int FILTER = 3;
+ private short max_parts;
+ public static final int MAX_PARTS = 4;
+
+ private final Isset __isset = new Isset();
+ private static final class Isset implements java.io.Serializable {
+ public boolean max_parts = false;
+ }
+
+ public static final Map metaDataMap = Collections.unmodifiableMap(new HashMap() {{
+ put(DB_NAME, new FieldMetaData("db_name", TFieldRequirementType.DEFAULT,
+ new FieldValueMetaData(TType.STRING)));
+ put(TBL_NAME, new FieldMetaData("tbl_name", TFieldRequirementType.DEFAULT,
+ new FieldValueMetaData(TType.STRING)));
+ put(FILTER, new FieldMetaData("filter", TFieldRequirementType.DEFAULT,
+ new FieldValueMetaData(TType.STRING)));
+ put(MAX_PARTS, new FieldMetaData("max_parts", TFieldRequirementType.DEFAULT,
+ new FieldValueMetaData(TType.I16)));
+ }});
+
+ static {
+ FieldMetaData.addStructMetaDataMap(get_partitions_by_filter_args.class, metaDataMap);
+ }
+
+ public get_partitions_by_filter_args() {
+ this.max_parts = (short)-1;
+
+ }
+
+ public get_partitions_by_filter_args(
+ String db_name,
+ String tbl_name,
+ String filter,
+ short max_parts)
+ {
+ this();
+ this.db_name = db_name;
+ this.tbl_name = tbl_name;
+ this.filter = filter;
+ this.max_parts = max_parts;
+ this.__isset.max_parts = true;
+ }
+
+ /**
+ * Performs a deep copy on other.
+ */
+ public get_partitions_by_filter_args(get_partitions_by_filter_args other) {
+ if (other.isSetDb_name()) {
+ this.db_name = other.db_name;
+ }
+ if (other.isSetTbl_name()) {
+ this.tbl_name = other.tbl_name;
+ }
+ if (other.isSetFilter()) {
+ this.filter = other.filter;
+ }
+ __isset.max_parts = other.__isset.max_parts;
+ this.max_parts = other.max_parts;
+ }
+
+ @Override
+ public get_partitions_by_filter_args clone() {
+ return new get_partitions_by_filter_args(this);
+ }
+
+ public String getDb_name() {
+ return this.db_name;
+ }
+
+ public void setDb_name(String db_name) {
+ this.db_name = db_name;
+ }
+
+ public void unsetDb_name() {
+ this.db_name = null;
+ }
+
+ // Returns true if field db_name is set (has been asigned a value) and false otherwise
+ public boolean isSetDb_name() {
+ return this.db_name != null;
+ }
+
+ public String getTbl_name() {
+ return this.tbl_name;
+ }
+
+ public void setTbl_name(String tbl_name) {
+ this.tbl_name = tbl_name;
+ }
+
+ public void unsetTbl_name() {
+ this.tbl_name = null;
+ }
+
+ // Returns true if field tbl_name is set (has been asigned a value) and false otherwise
+ public boolean isSetTbl_name() {
+ return this.tbl_name != null;
+ }
+
+ public String getFilter() {
+ return this.filter;
+ }
+
+ public void setFilter(String filter) {
+ this.filter = filter;
+ }
+
+ public void unsetFilter() {
+ this.filter = null;
+ }
+
+ // Returns true if field filter is set (has been asigned a value) and false otherwise
+ public boolean isSetFilter() {
+ return this.filter != null;
+ }
+
+ public short getMax_parts() {
+ return this.max_parts;
+ }
+
+ public void setMax_parts(short max_parts) {
+ this.max_parts = max_parts;
+ this.__isset.max_parts = true;
+ }
+
+ public void unsetMax_parts() {
+ this.__isset.max_parts = false;
+ }
+
+ // Returns true if field max_parts is set (has been asigned a value) and false otherwise
+ public boolean isSetMax_parts() {
+ return this.__isset.max_parts;
+ }
+
+ public void setFieldValue(int fieldID, Object value) {
+ switch (fieldID) {
+ case DB_NAME:
+ if (value == null) {
+ unsetDb_name();
+ } else {
+ setDb_name((String)value);
+ }
+ break;
+
+ case TBL_NAME:
+ if (value == null) {
+ unsetTbl_name();
+ } else {
+ setTbl_name((String)value);
+ }
+ break;
+
+ case FILTER:
+ if (value == null) {
+ unsetFilter();
+ } else {
+ setFilter((String)value);
+ }
+ break;
+
+ case MAX_PARTS:
+ if (value == null) {
+ unsetMax_parts();
+ } else {
+ setMax_parts((Short)value);
+ }
+ break;
+
+ default:
+ throw new IllegalArgumentException("Field " + fieldID + " doesn't exist!");
+ }
+ }
+
+ public Object getFieldValue(int fieldID) {
+ switch (fieldID) {
+ case DB_NAME:
+ return getDb_name();
+
+ case TBL_NAME:
+ return getTbl_name();
+
+ case FILTER:
+ return getFilter();
+
+ case MAX_PARTS:
+ return new Short(getMax_parts());
+
+ default:
+ throw new IllegalArgumentException("Field " + fieldID + " doesn't exist!");
+ }
+ }
+
+ // Returns true if field corresponding to fieldID is set (has been asigned a value) and false otherwise
+ public boolean isSet(int fieldID) {
+ switch (fieldID) {
+ case DB_NAME:
+ return isSetDb_name();
+ case TBL_NAME:
+ return isSetTbl_name();
+ case FILTER:
+ return isSetFilter();
+ case MAX_PARTS:
+ return isSetMax_parts();
+ default:
+ throw new IllegalArgumentException("Field " + fieldID + " doesn't exist!");
+ }
+ }
+
+ @Override
+ public boolean equals(Object that) {
+ if (that == null)
+ return false;
+ if (that instanceof get_partitions_by_filter_args)
+ return this.equals((get_partitions_by_filter_args)that);
+ return false;
+ }
+
+ public boolean equals(get_partitions_by_filter_args that) {
+ if (that == null)
+ return false;
+
+ boolean this_present_db_name = true && this.isSetDb_name();
+ boolean that_present_db_name = true && that.isSetDb_name();
+ if (this_present_db_name || that_present_db_name) {
+ if (!(this_present_db_name && that_present_db_name))
+ return false;
+ if (!this.db_name.equals(that.db_name))
+ return false;
+ }
+
+ boolean this_present_tbl_name = true && this.isSetTbl_name();
+ boolean that_present_tbl_name = true && that.isSetTbl_name();
+ if (this_present_tbl_name || that_present_tbl_name) {
+ if (!(this_present_tbl_name && that_present_tbl_name))
+ return false;
+ if (!this.tbl_name.equals(that.tbl_name))
+ return false;
+ }
+
+ boolean this_present_filter = true && this.isSetFilter();
+ boolean that_present_filter = true && that.isSetFilter();
+ if (this_present_filter || that_present_filter) {
+ if (!(this_present_filter && that_present_filter))
+ return false;
+ if (!this.filter.equals(that.filter))
+ return false;
+ }
+
+ boolean this_present_max_parts = true;
+ boolean that_present_max_parts = true;
+ if (this_present_max_parts || that_present_max_parts) {
+ if (!(this_present_max_parts && that_present_max_parts))
+ return false;
+ if (this.max_parts != that.max_parts)
+ return false;
+ }
+
+ return true;
+ }
+
+ @Override
+ public int hashCode() {
+ return 0;
+ }
+
+ public void read(TProtocol iprot) throws TException {
+ TField field;
+ iprot.readStructBegin();
+ while (true)
+ {
+ field = iprot.readFieldBegin();
+ if (field.type == TType.STOP) {
+ break;
+ }
+ switch (field.id)
+ {
+ case DB_NAME:
+ if (field.type == TType.STRING) {
+ this.db_name = iprot.readString();
+ } else {
+ TProtocolUtil.skip(iprot, field.type);
+ }
+ break;
+ case TBL_NAME:
+ if (field.type == TType.STRING) {
+ this.tbl_name = iprot.readString();
+ } else {
+ TProtocolUtil.skip(iprot, field.type);
+ }
+ break;
+ case FILTER:
+ if (field.type == TType.STRING) {
+ this.filter = iprot.readString();
+ } else {
+ TProtocolUtil.skip(iprot, field.type);
+ }
+ break;
+ case MAX_PARTS:
+ if (field.type == TType.I16) {
+ this.max_parts = iprot.readI16();
+ this.__isset.max_parts = true;
+ } else {
+ TProtocolUtil.skip(iprot, field.type);
+ }
+ break;
+ default:
+ TProtocolUtil.skip(iprot, field.type);
+ break;
+ }
+ iprot.readFieldEnd();
+ }
+ iprot.readStructEnd();
+
+ validate();
+ }
+
+ public void write(TProtocol oprot) throws TException {
+ validate();
+
+ oprot.writeStructBegin(STRUCT_DESC);
+ if (this.db_name != null) {
+ oprot.writeFieldBegin(DB_NAME_FIELD_DESC);
+ oprot.writeString(this.db_name);
+ oprot.writeFieldEnd();
+ }
+ if (this.tbl_name != null) {
+ oprot.writeFieldBegin(TBL_NAME_FIELD_DESC);
+ oprot.writeString(this.tbl_name);
+ oprot.writeFieldEnd();
+ }
+ if (this.filter != null) {
+ oprot.writeFieldBegin(FILTER_FIELD_DESC);
+ oprot.writeString(this.filter);
+ oprot.writeFieldEnd();
+ }
+ oprot.writeFieldBegin(MAX_PARTS_FIELD_DESC);
+ oprot.writeI16(this.max_parts);
+ oprot.writeFieldEnd();
+ oprot.writeFieldStop();
+ oprot.writeStructEnd();
+ }
+
+ @Override
+ public String toString() {
+ StringBuilder sb = new StringBuilder("get_partitions_by_filter_args(");
+ boolean first = true;
+
+ sb.append("db_name:");
+ if (this.db_name == null) {
+ sb.append("null");
+ } else {
+ sb.append(this.db_name);
+ }
+ first = false;
+ if (!first) sb.append(", ");
+ sb.append("tbl_name:");
+ if (this.tbl_name == null) {
+ sb.append("null");
+ } else {
+ sb.append(this.tbl_name);
+ }
+ first = false;
+ if (!first) sb.append(", ");
+ sb.append("filter:");
+ if (this.filter == null) {
+ sb.append("null");
+ } else {
+ sb.append(this.filter);
+ }
+ first = false;
+ if (!first) sb.append(", ");
+ sb.append("max_parts:");
+ sb.append(this.max_parts);
+ first = false;
+ sb.append(")");
+ return sb.toString();
+ }
+
+ public void validate() throws TException {
+ // check for required fields
+ // check that fields of type enum have valid values
+ }
+
+ }
+
+ public static class get_partitions_by_filter_result implements TBase, java.io.Serializable, Cloneable {
+ private static final TStruct STRUCT_DESC = new TStruct("get_partitions_by_filter_result");
+ private static final TField SUCCESS_FIELD_DESC = new TField("success", TType.LIST, (short)0);
+ private static final TField O1_FIELD_DESC = new TField("o1", TType.STRUCT, (short)1);
+ private static final TField O2_FIELD_DESC = new TField("o2", TType.STRUCT, (short)2);
+
+ private List success;
+ public static final int SUCCESS = 0;
+ private MetaException o1;
+ public static final int O1 = 1;
+ private NoSuchObjectException o2;
+ public static final int O2 = 2;
+
+ private final Isset __isset = new Isset();
+ private static final class Isset implements java.io.Serializable {
+ }
+
+ public static final Map metaDataMap = Collections.unmodifiableMap(new HashMap() {{
+ put(SUCCESS, new FieldMetaData("success", TFieldRequirementType.DEFAULT,
+ new ListMetaData(TType.LIST,
+ new StructMetaData(TType.STRUCT, Partition.class))));
+ put(O1, new FieldMetaData("o1", TFieldRequirementType.DEFAULT,
+ new FieldValueMetaData(TType.STRUCT)));
+ put(O2, new FieldMetaData("o2", TFieldRequirementType.DEFAULT,
+ new FieldValueMetaData(TType.STRUCT)));
+ }});
+
+ static {
+ FieldMetaData.addStructMetaDataMap(get_partitions_by_filter_result.class, metaDataMap);
+ }
+
+ public get_partitions_by_filter_result() {
+ }
+
+ public get_partitions_by_filter_result(
+ List success,
+ MetaException o1,
+ NoSuchObjectException o2)
+ {
+ this();
+ this.success = success;
+ this.o1 = o1;
+ this.o2 = o2;
+ }
+
+ /**
+ * Performs a deep copy on other.
+ */
+ public get_partitions_by_filter_result(get_partitions_by_filter_result other) {
+ if (other.isSetSuccess()) {
+ List __this__success = new ArrayList();
+ for (Partition other_element : other.success) {
+ __this__success.add(new Partition(other_element));
+ }
+ this.success = __this__success;
+ }
+ if (other.isSetO1()) {
+ this.o1 = new MetaException(other.o1);
+ }
+ if (other.isSetO2()) {
+ this.o2 = new NoSuchObjectException(other.o2);
+ }
+ }
+
+ @Override
+ public get_partitions_by_filter_result clone() {
+ return new get_partitions_by_filter_result(this);
+ }
+
+ public int getSuccessSize() {
+ return (this.success == null) ? 0 : this.success.size();
+ }
+
+ public java.util.Iterator getSuccessIterator() {
+ return (this.success == null) ? null : this.success.iterator();
+ }
+
+ public void addToSuccess(Partition elem) {
+ if (this.success == null) {
+ this.success = new ArrayList();
+ }
+ this.success.add(elem);
+ }
+
+ public List getSuccess() {
+ return this.success;
+ }
+
+ public void setSuccess(List success) {
+ this.success = success;
+ }
+
+ public void unsetSuccess() {
+ this.success = null;
+ }
+
+ // Returns true if field success is set (has been asigned a value) and false otherwise
+ public boolean isSetSuccess() {
+ return this.success != null;
+ }
+
+ public MetaException getO1() {
+ return this.o1;
+ }
+
+ public void setO1(MetaException o1) {
+ this.o1 = o1;
+ }
+
+ public void unsetO1() {
+ this.o1 = null;
+ }
+
+ // Returns true if field o1 is set (has been asigned a value) and false otherwise
+ public boolean isSetO1() {
+ return this.o1 != null;
+ }
+
+ public NoSuchObjectException getO2() {
+ return this.o2;
+ }
+
+ public void setO2(NoSuchObjectException o2) {
+ this.o2 = o2;
+ }
+
+ public void unsetO2() {
+ this.o2 = null;
+ }
+
+ // Returns true if field o2 is set (has been asigned a value) and false otherwise
+ public boolean isSetO2() {
+ return this.o2 != null;
+ }
+
+ public void setFieldValue(int fieldID, Object value) {
+ switch (fieldID) {
+ case SUCCESS:
+ if (value == null) {
+ unsetSuccess();
+ } else {
+ setSuccess((List)value);
+ }
+ break;
+
+ case O1:
+ if (value == null) {
+ unsetO1();
+ } else {
+ setO1((MetaException)value);
+ }
+ break;
+
+ case O2:
+ if (value == null) {
+ unsetO2();
+ } else {
+ setO2((NoSuchObjectException)value);
+ }
+ break;
+
+ default:
+ throw new IllegalArgumentException("Field " + fieldID + " doesn't exist!");
+ }
+ }
+
+ public Object getFieldValue(int fieldID) {
+ switch (fieldID) {
+ case SUCCESS:
+ return getSuccess();
+
+ case O1:
+ return getO1();
+
+ case O2:
+ return getO2();
+
+ default:
+ throw new IllegalArgumentException("Field " + fieldID + " doesn't exist!");
+ }
+ }
+
+ // Returns true if field corresponding to fieldID is set (has been asigned a value) and false otherwise
+ public boolean isSet(int fieldID) {
+ switch (fieldID) {
+ case SUCCESS:
+ return isSetSuccess();
+ case O1:
+ return isSetO1();
+ case O2:
+ return isSetO2();
+ default:
+ throw new IllegalArgumentException("Field " + fieldID + " doesn't exist!");
+ }
+ }
+
+ @Override
+ public boolean equals(Object that) {
+ if (that == null)
+ return false;
+ if (that instanceof get_partitions_by_filter_result)
+ return this.equals((get_partitions_by_filter_result)that);
+ return false;
+ }
+
+ public boolean equals(get_partitions_by_filter_result that) {
+ if (that == null)
+ return false;
+
+ boolean this_present_success = true && this.isSetSuccess();
+ boolean that_present_success = true && that.isSetSuccess();
+ if (this_present_success || that_present_success) {
+ if (!(this_present_success && that_present_success))
+ return false;
+ if (!this.success.equals(that.success))
+ return false;
+ }
+
+ boolean this_present_o1 = true && this.isSetO1();
+ boolean that_present_o1 = true && that.isSetO1();
+ if (this_present_o1 || that_present_o1) {
+ if (!(this_present_o1 && that_present_o1))
+ return false;
+ if (!this.o1.equals(that.o1))
+ return false;
+ }
+
+ boolean this_present_o2 = true && this.isSetO2();
+ boolean that_present_o2 = true && that.isSetO2();
+ if (this_present_o2 || that_present_o2) {
+ if (!(this_present_o2 && that_present_o2))
+ return false;
+ if (!this.o2.equals(that.o2))
+ return false;
+ }
+
+ return true;
+ }
+
+ @Override
+ public int hashCode() {
+ return 0;
+ }
+
+ public void read(TProtocol iprot) throws TException {
+ TField field;
+ iprot.readStructBegin();
+ while (true)
+ {
+ field = iprot.readFieldBegin();
+ if (field.type == TType.STOP) {
+ break;
+ }
+ switch (field.id)
+ {
+ case SUCCESS:
+ if (field.type == TType.LIST) {
+ {
+ TList _list123 = iprot.readListBegin();
+ this.success = new ArrayList(_list123.size);
+ for (int _i124 = 0; _i124 < _list123.size; ++_i124)
+ {
+ Partition _elem125;
+ _elem125 = new Partition();
+ _elem125.read(iprot);
+ this.success.add(_elem125);
+ }
+ iprot.readListEnd();
+ }
+ } else {
+ TProtocolUtil.skip(iprot, field.type);
+ }
+ break;
+ case O1:
+ if (field.type == TType.STRUCT) {
+ this.o1 = new MetaException();
+ this.o1.read(iprot);
+ } else {
+ TProtocolUtil.skip(iprot, field.type);
+ }
+ break;
+ case O2:
+ if (field.type == TType.STRUCT) {
+ this.o2 = new NoSuchObjectException();
+ this.o2.read(iprot);
+ } else {
+ TProtocolUtil.skip(iprot, field.type);
+ }
+ break;
+ default:
+ TProtocolUtil.skip(iprot, field.type);
+ break;
+ }
+ iprot.readFieldEnd();
+ }
+ iprot.readStructEnd();
+
+ validate();
+ }
+
+ public void write(TProtocol oprot) throws TException {
+ oprot.writeStructBegin(STRUCT_DESC);
+
+ if (this.isSetSuccess()) {
+ oprot.writeFieldBegin(SUCCESS_FIELD_DESC);
+ {
+ oprot.writeListBegin(new TList(TType.STRUCT, this.success.size()));
+ for (Partition _iter126 : this.success) {
+ _iter126.write(oprot);
+ }
+ oprot.writeListEnd();
+ }
+ oprot.writeFieldEnd();
+ } else if (this.isSetO1()) {
+ oprot.writeFieldBegin(O1_FIELD_DESC);
+ this.o1.write(oprot);
+ oprot.writeFieldEnd();
+ } else if (this.isSetO2()) {
+ oprot.writeFieldBegin(O2_FIELD_DESC);
+ this.o2.write(oprot);
+ oprot.writeFieldEnd();
+ }
+ oprot.writeFieldStop();
+ oprot.writeStructEnd();
+ }
+
+ @Override
+ public String toString() {
+ StringBuilder sb = new StringBuilder("get_partitions_by_filter_result(");
+ boolean first = true;
+
+ sb.append("success:");
+ if (this.success == null) {
+ sb.append("null");
+ } else {
+ sb.append(this.success);
+ }
+ first = false;
+ if (!first) sb.append(", ");
+ sb.append("o1:");
+ if (this.o1 == null) {
+ sb.append("null");
+ } else {
+ sb.append(this.o1);
+ }
+ first = false;
+ if (!first) sb.append(", ");
+ sb.append("o2:");
+ if (this.o2 == null) {
+ sb.append("null");
+ } else {
+ sb.append(this.o2);
+ }
+ first = false;
+ sb.append(")");
+ return sb.toString();
+ }
+
+ public void validate() throws TException {
+ // check for required fields
+ // check that fields of type enum have valid values
+ }
+
+ }
+
public static class alter_partition_args implements TBase, java.io.Serializable, Cloneable {
private static final TStruct STRUCT_DESC = new TStruct("alter_partition_args");
private static final TField DB_NAME_FIELD_DESC = new TField("db_name", TType.STRING, (short)1);
@@ -21380,13 +22217,13 @@
case SUCCESS:
if (field.type == TType.LIST) {
{
- TList _list123 = iprot.readListBegin();
- this.success = new ArrayList(_list123.size);
- for (int _i124 = 0; _i124 < _list123.size; ++_i124)
+ TList _list127 = iprot.readListBegin();
+ this.success = new ArrayList(_list127.size);
+ for (int _i128 = 0; _i128 < _list127.size; ++_i128)
{
- String _elem125;
- _elem125 = iprot.readString();
- this.success.add(_elem125);
+ String _elem129;
+ _elem129 = iprot.readString();
+ this.success.add(_elem129);
}
iprot.readListEnd();
}
@@ -21420,8 +22257,8 @@
oprot.writeFieldBegin(SUCCESS_FIELD_DESC);
{
oprot.writeListBegin(new TList(TType.STRING, this.success.size()));
- for (String _iter126 : this.success) {
- oprot.writeString(_iter126);
+ for (String _iter130 : this.success) {
+ oprot.writeString(_iter130);
}
oprot.writeListEnd();
}
@@ -21871,15 +22708,15 @@
case SUCCESS:
if (field.type == TType.MAP) {
{
- TMap _map127 = iprot.readMapBegin();
- this.success = new HashMap(2*_map127.size);
- for (int _i128 = 0; _i128 < _map127.size; ++_i128)
+ TMap _map131 = iprot.readMapBegin();
+ this.success = new HashMap(2*_map131.size);
+ for (int _i132 = 0; _i132 < _map131.size; ++_i132)
{
- String _key129;
- String _val130;
- _key129 = iprot.readString();
- _val130 = iprot.readString();
- this.success.put(_key129, _val130);
+ String _key133;
+ String _val134;
+ _key133 = iprot.readString();
+ _val134 = iprot.readString();
+ this.success.put(_key133, _val134);
}
iprot.readMapEnd();
}
@@ -21913,9 +22750,9 @@
oprot.writeFieldBegin(SUCCESS_FIELD_DESC);
{
oprot.writeMapBegin(new TMap(TType.STRING, TType.STRING, this.success.size()));
- for (Map.Entry _iter131 : this.success.entrySet()) {
- oprot.writeString(_iter131.getKey());
- oprot.writeString(_iter131.getValue());
+ for (Map.Entry _iter135 : this.success.entrySet()) {
+ oprot.writeString(_iter135.getKey());
+ oprot.writeString(_iter135.getValue());
}
oprot.writeMapEnd();
}
@@ -24586,14 +25423,14 @@
case SUCCESS:
if (field.type == TType.LIST) {
{
- TList _list132 = iprot.readListBegin();
- this.success = new ArrayList(_list132.size);
- for (int _i133 = 0; _i133 < _list132.size; ++_i133)
+ TList _list136 = iprot.readListBegin();
+ this.success = new ArrayList(_list136.size);
+ for (int _i137 = 0; _i137 < _list136.size; ++_i137)
{
- Index _elem134;
- _elem134 = new Index();
- _elem134.read(iprot);
- this.success.add(_elem134);
+ Index _elem138;
+ _elem138 = new Index();
+ _elem138.read(iprot);
+ this.success.add(_elem138);
}
iprot.readListEnd();
}
@@ -24635,8 +25472,8 @@
oprot.writeFieldBegin(SUCCESS_FIELD_DESC);
{
oprot.writeListBegin(new TList(TType.STRUCT, this.success.size()));
- for (Index _iter135 : this.success) {
- _iter135.write(oprot);
+ for (Index _iter139 : this.success) {
+ _iter139.write(oprot);
}
oprot.writeListEnd();
}
@@ -25230,13 +26067,13 @@
case SUCCESS:
if (field.type == TType.LIST) {
{
- TList _list136 = iprot.readListBegin();
- this.success = new ArrayList(_list136.size);
- for (int _i137 = 0; _i137 < _list136.size; ++_i137)
+ TList _list140 = iprot.readListBegin();
+ this.success = new ArrayList(_list140.size);
+ for (int _i141 = 0; _i141 < _list140.size; ++_i141)
{
- String _elem138;
- _elem138 = iprot.readString();
- this.success.add(_elem138);
+ String _elem142;
+ _elem142 = iprot.readString();
+ this.success.add(_elem142);
}
iprot.readListEnd();
}
@@ -25270,8 +26107,8 @@
oprot.writeFieldBegin(SUCCESS_FIELD_DESC);
{
oprot.writeListBegin(new TList(TType.STRING, this.success.size()));
- for (String _iter139 : this.success) {
- oprot.writeString(_iter139);
+ for (String _iter143 : this.success) {
+ oprot.writeString(_iter143);
}
oprot.writeListEnd();
}
Index: metastore/src/gen-php/ThriftHiveMetastore.php
===================================================================
--- metastore/src/gen-php/ThriftHiveMetastore.php (revision 991274)
+++ metastore/src/gen-php/ThriftHiveMetastore.php (working copy)
@@ -38,6 +38,7 @@
public function get_partition_names($db_name, $tbl_name, $max_parts);
public function get_partitions_ps($db_name, $tbl_name, $part_vals, $max_parts);
public function get_partition_names_ps($db_name, $tbl_name, $part_vals, $max_parts);
+ public function get_partitions_by_filter($db_name, $tbl_name, $filter, $max_parts);
public function alter_partition($db_name, $tbl_name, $new_part);
public function get_config_value($name, $defaultValue);
public function partition_name_to_vals($part_name);
@@ -1673,6 +1674,66 @@
throw new Exception("get_partition_names_ps failed: unknown result");
}
+ public function get_partitions_by_filter($db_name, $tbl_name, $filter, $max_parts)
+ {
+ $this->send_get_partitions_by_filter($db_name, $tbl_name, $filter, $max_parts);
+ return $this->recv_get_partitions_by_filter();
+ }
+
+ public function send_get_partitions_by_filter($db_name, $tbl_name, $filter, $max_parts)
+ {
+ $args = new metastore_ThriftHiveMetastore_get_partitions_by_filter_args();
+ $args->db_name = $db_name;
+ $args->tbl_name = $tbl_name;
+ $args->filter = $filter;
+ $args->max_parts = $max_parts;
+ $bin_accel = ($this->output_ instanceof TProtocol::$TBINARYPROTOCOLACCELERATED) && function_exists('thrift_protocol_write_binary');
+ if ($bin_accel)
+ {
+ thrift_protocol_write_binary($this->output_, 'get_partitions_by_filter', TMessageType::CALL, $args, $this->seqid_, $this->output_->isStrictWrite());
+ }
+ else
+ {
+ $this->output_->writeMessageBegin('get_partitions_by_filter', TMessageType::CALL, $this->seqid_);
+ $args->write($this->output_);
+ $this->output_->writeMessageEnd();
+ $this->output_->getTransport()->flush();
+ }
+ }
+
+ public function recv_get_partitions_by_filter()
+ {
+ $bin_accel = ($this->input_ instanceof TProtocol::$TBINARYPROTOCOLACCELERATED) && function_exists('thrift_protocol_read_binary');
+ if ($bin_accel) $result = thrift_protocol_read_binary($this->input_, 'metastore_ThriftHiveMetastore_get_partitions_by_filter_result', $this->input_->isStrictRead());
+ else
+ {
+ $rseqid = 0;
+ $fname = null;
+ $mtype = 0;
+
+ $this->input_->readMessageBegin($fname, $mtype, $rseqid);
+ if ($mtype == TMessageType::EXCEPTION) {
+ $x = new TApplicationException();
+ $x->read($this->input_);
+ $this->input_->readMessageEnd();
+ throw $x;
+ }
+ $result = new metastore_ThriftHiveMetastore_get_partitions_by_filter_result();
+ $result->read($this->input_);
+ $this->input_->readMessageEnd();
+ }
+ if ($result->success !== null) {
+ return $result->success;
+ }
+ if ($result->o1 !== null) {
+ throw $result->o1;
+ }
+ if ($result->o2 !== null) {
+ throw $result->o2;
+ }
+ throw new Exception("get_partitions_by_filter failed: unknown result");
+ }
+
public function alter_partition($db_name, $tbl_name, $new_part)
{
$this->send_alter_partition($db_name, $tbl_name, $new_part);
@@ -8536,6 +8597,282 @@
}
+class metastore_ThriftHiveMetastore_get_partitions_by_filter_args {
+ static $_TSPEC;
+
+ public $db_name = null;
+ public $tbl_name = null;
+ public $filter = null;
+ public $max_parts = -1;
+
+ public function __construct($vals=null) {
+ if (!isset(self::$_TSPEC)) {
+ self::$_TSPEC = array(
+ 1 => array(
+ 'var' => 'db_name',
+ 'type' => TType::STRING,
+ ),
+ 2 => array(
+ 'var' => 'tbl_name',
+ 'type' => TType::STRING,
+ ),
+ 3 => array(
+ 'var' => 'filter',
+ 'type' => TType::STRING,
+ ),
+ 4 => array(
+ 'var' => 'max_parts',
+ 'type' => TType::I16,
+ ),
+ );
+ }
+ if (is_array($vals)) {
+ if (isset($vals['db_name'])) {
+ $this->db_name = $vals['db_name'];
+ }
+ if (isset($vals['tbl_name'])) {
+ $this->tbl_name = $vals['tbl_name'];
+ }
+ if (isset($vals['filter'])) {
+ $this->filter = $vals['filter'];
+ }
+ if (isset($vals['max_parts'])) {
+ $this->max_parts = $vals['max_parts'];
+ }
+ }
+ }
+
+ public function getName() {
+ return 'ThriftHiveMetastore_get_partitions_by_filter_args';
+ }
+
+ public function read($input)
+ {
+ $xfer = 0;
+ $fname = null;
+ $ftype = 0;
+ $fid = 0;
+ $xfer += $input->readStructBegin($fname);
+ while (true)
+ {
+ $xfer += $input->readFieldBegin($fname, $ftype, $fid);
+ if ($ftype == TType::STOP) {
+ break;
+ }
+ switch ($fid)
+ {
+ case 1:
+ if ($ftype == TType::STRING) {
+ $xfer += $input->readString($this->db_name);
+ } else {
+ $xfer += $input->skip($ftype);
+ }
+ break;
+ case 2:
+ if ($ftype == TType::STRING) {
+ $xfer += $input->readString($this->tbl_name);
+ } else {
+ $xfer += $input->skip($ftype);
+ }
+ break;
+ case 3:
+ if ($ftype == TType::STRING) {
+ $xfer += $input->readString($this->filter);
+ } else {
+ $xfer += $input->skip($ftype);
+ }
+ break;
+ case 4:
+ if ($ftype == TType::I16) {
+ $xfer += $input->readI16($this->max_parts);
+ } else {
+ $xfer += $input->skip($ftype);
+ }
+ break;
+ default:
+ $xfer += $input->skip($ftype);
+ break;
+ }
+ $xfer += $input->readFieldEnd();
+ }
+ $xfer += $input->readStructEnd();
+ return $xfer;
+ }
+
+ public function write($output) {
+ $xfer = 0;
+ $xfer += $output->writeStructBegin('ThriftHiveMetastore_get_partitions_by_filter_args');
+ if ($this->db_name !== null) {
+ $xfer += $output->writeFieldBegin('db_name', TType::STRING, 1);
+ $xfer += $output->writeString($this->db_name);
+ $xfer += $output->writeFieldEnd();
+ }
+ if ($this->tbl_name !== null) {
+ $xfer += $output->writeFieldBegin('tbl_name', TType::STRING, 2);
+ $xfer += $output->writeString($this->tbl_name);
+ $xfer += $output->writeFieldEnd();
+ }
+ if ($this->filter !== null) {
+ $xfer += $output->writeFieldBegin('filter', TType::STRING, 3);
+ $xfer += $output->writeString($this->filter);
+ $xfer += $output->writeFieldEnd();
+ }
+ if ($this->max_parts !== null) {
+ $xfer += $output->writeFieldBegin('max_parts', TType::I16, 4);
+ $xfer += $output->writeI16($this->max_parts);
+ $xfer += $output->writeFieldEnd();
+ }
+ $xfer += $output->writeFieldStop();
+ $xfer += $output->writeStructEnd();
+ return $xfer;
+ }
+
+}
+
+class metastore_ThriftHiveMetastore_get_partitions_by_filter_result {
+ static $_TSPEC;
+
+ public $success = null;
+ public $o1 = null;
+ public $o2 = null;
+
+ public function __construct($vals=null) {
+ if (!isset(self::$_TSPEC)) {
+ self::$_TSPEC = array(
+ 0 => array(
+ 'var' => 'success',
+ 'type' => TType::LST,
+ 'etype' => TType::STRUCT,
+ 'elem' => array(
+ 'type' => TType::STRUCT,
+ 'class' => 'metastore_Partition',
+ ),
+ ),
+ 1 => array(
+ 'var' => 'o1',
+ 'type' => TType::STRUCT,
+ 'class' => 'metastore_MetaException',
+ ),
+ 2 => array(
+ 'var' => 'o2',
+ 'type' => TType::STRUCT,
+ 'class' => 'metastore_NoSuchObjectException',
+ ),
+ );
+ }
+ if (is_array($vals)) {
+ if (isset($vals['success'])) {
+ $this->success = $vals['success'];
+ }
+ if (isset($vals['o1'])) {
+ $this->o1 = $vals['o1'];
+ }
+ if (isset($vals['o2'])) {
+ $this->o2 = $vals['o2'];
+ }
+ }
+ }
+
+ public function getName() {
+ return 'ThriftHiveMetastore_get_partitions_by_filter_result';
+ }
+
+ public function read($input)
+ {
+ $xfer = 0;
+ $fname = null;
+ $ftype = 0;
+ $fid = 0;
+ $xfer += $input->readStructBegin($fname);
+ while (true)
+ {
+ $xfer += $input->readFieldBegin($fname, $ftype, $fid);
+ if ($ftype == TType::STOP) {
+ break;
+ }
+ switch ($fid)
+ {
+ case 0:
+ if ($ftype == TType::LST) {
+ $this->success = array();
+ $_size217 = 0;
+ $_etype220 = 0;
+ $xfer += $input->readListBegin($_etype220, $_size217);
+ for ($_i221 = 0; $_i221 < $_size217; ++$_i221)
+ {
+ $elem222 = null;
+ $elem222 = new metastore_Partition();
+ $xfer += $elem222->read($input);
+ $this->success []= $elem222;
+ }
+ $xfer += $input->readListEnd();
+ } else {
+ $xfer += $input->skip($ftype);
+ }
+ break;
+ case 1:
+ if ($ftype == TType::STRUCT) {
+ $this->o1 = new metastore_MetaException();
+ $xfer += $this->o1->read($input);
+ } else {
+ $xfer += $input->skip($ftype);
+ }
+ break;
+ case 2:
+ if ($ftype == TType::STRUCT) {
+ $this->o2 = new metastore_NoSuchObjectException();
+ $xfer += $this->o2->read($input);
+ } else {
+ $xfer += $input->skip($ftype);
+ }
+ break;
+ default:
+ $xfer += $input->skip($ftype);
+ break;
+ }
+ $xfer += $input->readFieldEnd();
+ }
+ $xfer += $input->readStructEnd();
+ return $xfer;
+ }
+
+ public function write($output) {
+ $xfer = 0;
+ $xfer += $output->writeStructBegin('ThriftHiveMetastore_get_partitions_by_filter_result');
+ if ($this->success !== null) {
+ if (!is_array($this->success)) {
+ throw new TProtocolException('Bad type in structure.', TProtocolException::INVALID_DATA);
+ }
+ $xfer += $output->writeFieldBegin('success', TType::LST, 0);
+ {
+ $output->writeListBegin(TType::STRUCT, count($this->success));
+ {
+ foreach ($this->success as $iter223)
+ {
+ $xfer += $iter223->write($output);
+ }
+ }
+ $output->writeListEnd();
+ }
+ $xfer += $output->writeFieldEnd();
+ }
+ if ($this->o1 !== null) {
+ $xfer += $output->writeFieldBegin('o1', TType::STRUCT, 1);
+ $xfer += $this->o1->write($output);
+ $xfer += $output->writeFieldEnd();
+ }
+ if ($this->o2 !== null) {
+ $xfer += $output->writeFieldBegin('o2', TType::STRUCT, 2);
+ $xfer += $this->o2->write($output);
+ $xfer += $output->writeFieldEnd();
+ }
+ $xfer += $output->writeFieldStop();
+ $xfer += $output->writeStructEnd();
+ return $xfer;
+ }
+
+}
+
class metastore_ThriftHiveMetastore_alter_partition_args {
static $_TSPEC;
@@ -9063,14 +9400,14 @@
case 0:
if ($ftype == TType::LST) {
$this->success = array();
- $_size217 = 0;
- $_etype220 = 0;
- $xfer += $input->readListBegin($_etype220, $_size217);
- for ($_i221 = 0; $_i221 < $_size217; ++$_i221)
+ $_size224 = 0;
+ $_etype227 = 0;
+ $xfer += $input->readListBegin($_etype227, $_size224);
+ for ($_i228 = 0; $_i228 < $_size224; ++$_i228)
{
- $elem222 = null;
- $xfer += $input->readString($elem222);
- $this->success []= $elem222;
+ $elem229 = null;
+ $xfer += $input->readString($elem229);
+ $this->success []= $elem229;
}
$xfer += $input->readListEnd();
} else {
@@ -9106,9 +9443,9 @@
{
$output->writeListBegin(TType::STRING, count($this->success));
{
- foreach ($this->success as $iter223)
+ foreach ($this->success as $iter230)
{
- $xfer += $output->writeString($iter223);
+ $xfer += $output->writeString($iter230);
}
}
$output->writeListEnd();
@@ -9259,17 +9596,17 @@
case 0:
if ($ftype == TType::MAP) {
$this->success = array();
- $_size224 = 0;
- $_ktype225 = 0;
- $_vtype226 = 0;
- $xfer += $input->readMapBegin($_ktype225, $_vtype226, $_size224);
- for ($_i228 = 0; $_i228 < $_size224; ++$_i228)
+ $_size231 = 0;
+ $_ktype232 = 0;
+ $_vtype233 = 0;
+ $xfer += $input->readMapBegin($_ktype232, $_vtype233, $_size231);
+ for ($_i235 = 0; $_i235 < $_size231; ++$_i235)
{
- $key229 = '';
- $val230 = '';
- $xfer += $input->readString($key229);
- $xfer += $input->readString($val230);
- $this->success[$key229] = $val230;
+ $key236 = '';
+ $val237 = '';
+ $xfer += $input->readString($key236);
+ $xfer += $input->readString($val237);
+ $this->success[$key236] = $val237;
}
$xfer += $input->readMapEnd();
} else {
@@ -9305,10 +9642,10 @@
{
$output->writeMapBegin(TType::STRING, TType::STRING, count($this->success));
{
- foreach ($this->success as $kiter231 => $viter232)
+ foreach ($this->success as $kiter238 => $viter239)
{
- $xfer += $output->writeString($kiter231);
- $xfer += $output->writeString($viter232);
+ $xfer += $output->writeString($kiter238);
+ $xfer += $output->writeString($viter239);
}
}
$output->writeMapEnd();
@@ -10231,15 +10568,15 @@
case 0:
if ($ftype == TType::LST) {
$this->success = array();
- $_size233 = 0;
- $_etype236 = 0;
- $xfer += $input->readListBegin($_etype236, $_size233);
- for ($_i237 = 0; $_i237 < $_size233; ++$_i237)
+ $_size240 = 0;
+ $_etype243 = 0;
+ $xfer += $input->readListBegin($_etype243, $_size240);
+ for ($_i244 = 0; $_i244 < $_size240; ++$_i244)
{
- $elem238 = null;
- $elem238 = new metastore_Index();
- $xfer += $elem238->read($input);
- $this->success []= $elem238;
+ $elem245 = null;
+ $elem245 = new metastore_Index();
+ $xfer += $elem245->read($input);
+ $this->success []= $elem245;
}
$xfer += $input->readListEnd();
} else {
@@ -10283,9 +10620,9 @@
{
$output->writeListBegin(TType::STRUCT, count($this->success));
{
- foreach ($this->success as $iter239)
+ foreach ($this->success as $iter246)
{
- $xfer += $iter239->write($output);
+ $xfer += $iter246->write($output);
}
}
$output->writeListEnd();
@@ -10477,14 +10814,14 @@
case 0:
if ($ftype == TType::LST) {
$this->success = array();
- $_size240 = 0;
- $_etype243 = 0;
- $xfer += $input->readListBegin($_etype243, $_size240);
- for ($_i244 = 0; $_i244 < $_size240; ++$_i244)
+ $_size247 = 0;
+ $_etype250 = 0;
+ $xfer += $input->readListBegin($_etype250, $_size247);
+ for ($_i251 = 0; $_i251 < $_size247; ++$_i251)
{
- $elem245 = null;
- $xfer += $input->readString($elem245);
- $this->success []= $elem245;
+ $elem252 = null;
+ $xfer += $input->readString($elem252);
+ $this->success []= $elem252;
}
$xfer += $input->readListEnd();
} else {
@@ -10520,9 +10857,9 @@
{
$output->writeListBegin(TType::STRING, count($this->success));
{
- foreach ($this->success as $iter246)
+ foreach ($this->success as $iter253)
{
- $xfer += $output->writeString($iter246);
+ $xfer += $output->writeString($iter253);
}
}
$output->writeListEnd();
Index: metastore/if/hive_metastore.thrift
===================================================================
--- metastore/if/hive_metastore.thrift (revision 991397)
+++ metastore/if/hive_metastore.thrift (working copy)
@@ -228,6 +228,11 @@
2:string tbl_name, 3:list part_vals, 4:i16 max_parts=-1)
throws(1:MetaException o1)
+ // get the partitions matching the given partition filter
+ list get_partitions_by_filter(1:string db_name 2:string tbl_name
+ 3:string filter, 4:i16 max_parts=-1)
+ throws(1:MetaException o1, 2:NoSuchObjectException o2)
+
// changes the partition to the new partition object. partition is identified from the part values
// in the new_part
void alter_partition(1:string db_name, 2:string tbl_name, 3:Partition new_part)
Index: metastore/build.xml
===================================================================
--- metastore/build.xml (revision 991274)
+++ metastore/build.xml (working copy)
@@ -29,11 +29,29 @@
-
+
+
+
+
+
+
+ Building Grammar ${src.dir}/java/org/apache/hadoop/hive/metastore/parser/Filter.g ....
+
+
+
+
+
+
+
+
+
+
+
+