Index: conf/hive-default.xml
===================================================================
--- conf/hive-default.xml (revision 1085555)
+++ conf/hive-default.xml (working copy)
@@ -251,6 +251,12 @@
+ hive.metastore.batch.retrieve.max
+ 300
+ Maximum number of objects (tables/partitions) can be retrieved from metastore in one batch. The higher the number, the less the number of round trips is needed to the Hive metastore server, but it may also cause higher memory requirement at the client side.
+
+
+
hive.default.fileformat
TextFile
Default file format for CREATE TABLE statement. Options are TextFile and SequenceFile. Users can explicitly say CREATE TABLE ... STORED AS <TEXTFILE|SEQUENCEFILE> to override
Index: metastore/src/java/org/apache/hadoop/hive/metastore/RawStore.java
===================================================================
--- metastore/src/java/org/apache/hadoop/hive/metastore/RawStore.java (revision 1085555)
+++ metastore/src/java/org/apache/hadoop/hive/metastore/RawStore.java (working copy)
@@ -144,48 +144,52 @@
public abstract List getPartitionsByFilter(
String dbName, String tblName, String filter, short maxParts)
throws MetaException, NoSuchObjectException;
-
+
+ public abstract List getPartitionsByNames(
+ String dbName, String tblName, List partNames)
+ throws MetaException, NoSuchObjectException;
+
public abstract boolean addRole(String rowName, String ownerName)
throws InvalidObjectException, MetaException, NoSuchObjectException;
-
+
public abstract boolean removeRole(String roleName) throws MetaException, NoSuchObjectException;
-
+
public abstract boolean grantRole(Role role, String userName, PrincipalType principalType,
- String grantor, PrincipalType grantorType, boolean grantOption)
+ String grantor, PrincipalType grantorType, boolean grantOption)
throws MetaException, NoSuchObjectException, InvalidObjectException;
-
- public abstract boolean revokeRole(Role role, String userName, PrincipalType principalType)
+
+ public abstract boolean revokeRole(Role role, String userName, PrincipalType principalType)
throws MetaException, NoSuchObjectException;
public abstract PrincipalPrivilegeSet getUserPrivilegeSet(String userName,
List groupNames) throws InvalidObjectException, MetaException;
-
- public abstract PrincipalPrivilegeSet getDBPrivilegeSet (String dbName, String userName,
+
+ public abstract PrincipalPrivilegeSet getDBPrivilegeSet (String dbName, String userName,
List groupNames) throws InvalidObjectException, MetaException;
-
- public abstract PrincipalPrivilegeSet getTablePrivilegeSet (String dbName, String tableName,
+
+ public abstract PrincipalPrivilegeSet getTablePrivilegeSet (String dbName, String tableName,
String userName, List groupNames) throws InvalidObjectException, MetaException;
-
- public abstract PrincipalPrivilegeSet getPartitionPrivilegeSet (String dbName, String tableName,
+
+ public abstract PrincipalPrivilegeSet getPartitionPrivilegeSet (String dbName, String tableName,
String partition, String userName, List groupNames) throws InvalidObjectException, MetaException;
-
- public abstract PrincipalPrivilegeSet getColumnPrivilegeSet (String dbName, String tableName, String partitionName,
+
+ public abstract PrincipalPrivilegeSet getColumnPrivilegeSet (String dbName, String tableName, String partitionName,
String columnName, String userName, List groupNames) throws InvalidObjectException, MetaException;
-
+
public abstract List listPrincipalGlobalGrants(String principalName,
PrincipalType principalType);
-
+
public abstract List listPrincipalDBGrants(String principalName,
PrincipalType principalType, String dbName);
public abstract List listAllTableGrants(
String principalName, PrincipalType principalType, String dbName,
String tableName);
-
+
public abstract List listPrincipalPartitionGrants(
String principalName, PrincipalType principalType, String dbName,
String tableName, String partName);
-
+
public abstract List listPrincipalTableColumnGrants(
String principalName, PrincipalType principalType, String dbName,
String tableName, String columnName);
@@ -193,21 +197,21 @@
public abstract List listPrincipalPartitionColumnGrants(
String principalName, PrincipalType principalType, String dbName,
String tableName, String partName, String columnName);
-
- public abstract boolean grantPrivileges (PrivilegeBag privileges)
+
+ public abstract boolean grantPrivileges (PrivilegeBag privileges)
throws InvalidObjectException, MetaException, NoSuchObjectException;
-
- public abstract boolean revokePrivileges (PrivilegeBag privileges)
+
+ public abstract boolean revokePrivileges (PrivilegeBag privileges)
throws InvalidObjectException, MetaException, NoSuchObjectException;
public abstract org.apache.hadoop.hive.metastore.api.Role getRole(
String roleName) throws NoSuchObjectException;
public List listRoleNames();
-
+
public List listRoles(String principalName,
PrincipalType principalType);
-
+
public abstract Partition getPartitionWithAuth(String dbName, String tblName,
List partVals, String user_name, List group_names)
throws MetaException, NoSuchObjectException, InvalidObjectException;
Index: metastore/src/java/org/apache/hadoop/hive/metastore/IMetaStoreClient.java
===================================================================
--- metastore/src/java/org/apache/hadoop/hive/metastore/IMetaStoreClient.java (revision 1085555)
+++ metastore/src/java/org/apache/hadoop/hive/metastore/IMetaStoreClient.java (working copy)
@@ -339,6 +339,19 @@
throws MetaException, TException, NoSuchObjectException;
/**
+ * Get partitions by a list of partition names.
+ * @param db_name database name
+ * @param tbl_name table name
+ * @param part_names list of partition names
+ * @return list of Partition objects
+ * @throws NoSuchObjectException
+ * @throws MetaException
+ * @throws TException
+ */
+ public List getPartitionsByNames(String db_name, String tbl_name,
+ List part_names) throws NoSuchObjectException, MetaException, TException;
+
+ /**
* @param dbName
* @param tableName
* @param partialPvals
Index: metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStoreClient.java
===================================================================
--- metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStoreClient.java (revision 1085555)
+++ metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStoreClient.java (working copy)
@@ -51,7 +51,6 @@
import org.apache.hadoop.hive.metastore.api.PrincipalPrivilegeSet;
import org.apache.hadoop.hive.metastore.api.PrincipalType;
import org.apache.hadoop.hive.metastore.api.PrivilegeBag;
-import org.apache.hadoop.hive.metastore.api.PrivilegeGrantInfo;
import org.apache.hadoop.hive.metastore.api.Role;
import org.apache.hadoop.hive.metastore.api.Table;
import org.apache.hadoop.hive.metastore.api.ThriftHiveMetastore;
@@ -582,7 +581,7 @@
return deepCopyPartitions(
client.get_partitions_ps(db_name, tbl_name, part_vals, max_parts));
}
-
+
@Override
public List listPartitionsWithAuthInfo(String db_name,
String tbl_name, short max_parts, String user_name, List group_names)
@@ -648,7 +647,12 @@
List part_vals) throws NoSuchObjectException, MetaException, TException {
return deepCopy(client.get_partition(db_name, tbl_name, part_vals));
}
-
+
+ public List getPartitionsByNames(String db_name, String tbl_name,
+ List part_names) throws NoSuchObjectException, MetaException, TException {
+ return deepCopyPartitions(client.get_partitions_by_names(db_name, tbl_name, part_names));
+ }
+
@Override
public Partition getPartitionWithAuthInfo(String db_name, String tbl_name,
List part_vals, String user_name, List group_names)
@@ -1005,13 +1009,13 @@
public boolean drop_role(String roleName) throws MetaException, TException {
return client.drop_role(roleName);
}
-
+
@Override
public List list_roles(String principalName,
PrincipalType principalType) throws MetaException, TException {
return client.list_roles(principalName, principalType);
}
-
+
@Override
public List listRoleNames() throws MetaException, TException {
return client.get_role_names();
Index: metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStore.java
===================================================================
--- metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStore.java (revision 1085555)
+++ metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStore.java (working copy)
@@ -2397,6 +2397,34 @@
}
@Override
+ public List get_partitions_by_names(final String dbName,
+ final String tblName, final List partNames)
+ throws MetaException, NoSuchObjectException, TException {
+
+ startTableFunction("get_partitions_by_names", dbName, tblName);
+
+ List ret = null;
+ try {
+ ret = executeWithRetry(new Command>() {
+ @Override
+ List run(RawStore ms) throws Exception {
+ return ms.getPartitionsByNames(dbName, tblName, partNames);
+ }
+ });
+ } catch (MetaException e) {
+ throw e;
+ } catch (NoSuchObjectException e) {
+ throw e;
+ } catch (Exception e) {
+ assert(e instanceof RuntimeException);
+ throw (RuntimeException)e;
+ } finally {
+ endFunction("get_partitions_by_names");
+ }
+ return ret;
+ }
+
+ @Override
public PrincipalPrivilegeSet get_privilege_set(HiveObjectRef hiveObject,
String userName, List groupNames) throws MetaException,
TException {
Index: metastore/src/java/org/apache/hadoop/hive/metastore/ObjectStore.java
===================================================================
--- metastore/src/java/org/apache/hadoop/hive/metastore/ObjectStore.java (revision 1085555)
+++ metastore/src/java/org/apache/hadoop/hive/metastore/ObjectStore.java (working copy)
@@ -881,7 +881,7 @@
private List convertToOrders(List mkeys) {
List keys = null;
if (mkeys != null) {
- keys = new ArrayList();
+ keys = new ArrayList(mkeys.size());
for (MOrder part : mkeys) {
keys.add(new Order(part.getCol(), part.getOrder()));
}
@@ -907,18 +907,24 @@
// MSD and SD should be same objects. Not sure how to make then same right now
// MSerdeInfo *& SerdeInfo should be same as well
- private StorageDescriptor convertToStorageDescriptor(MStorageDescriptor msd)
+ private StorageDescriptor convertToStorageDescriptor(MStorageDescriptor msd,
+ boolean noFS)
throws MetaException {
if (msd == null) {
return null;
}
- return new StorageDescriptor(convertToFieldSchemas(msd.getCols()), msd
- .getLocation(), msd.getInputFormat(), msd.getOutputFormat(), msd
+ return new StorageDescriptor(noFS ? null: convertToFieldSchemas(msd.getCols()),
+ msd.getLocation(), msd.getInputFormat(), msd.getOutputFormat(), msd
.isCompressed(), msd.getNumBuckets(), converToSerDeInfo(msd
.getSerDeInfo()), msd.getBucketCols(), convertToOrders(msd
.getSortCols()), msd.getParameters());
}
+ private StorageDescriptor convertToStorageDescriptor(MStorageDescriptor msd)
+ throws MetaException {
+ return convertToStorageDescriptor(msd, false);
+ }
+
private MStorageDescriptor convertToMStorageDescriptor(StorageDescriptor sd)
throws MetaException {
if (sd == null) {
@@ -1055,6 +1061,16 @@
mpart.getParameters());
}
+ private Partition convertToPart(String dbName, String tblName, MPartition mpart)
+ throws MetaException {
+ if (mpart == null) {
+ return null;
+ }
+ return new Partition(mpart.getValues(), dbName, tblName, mpart.getCreateTime(),
+ mpart.getLastAccessTime(), convertToStorageDescriptor(mpart.getSd(), true),
+ mpart.getParameters());
+ }
+
public boolean dropPartition(String dbName, String tableName,
List part_vals) throws MetaException {
boolean success = false;
@@ -1178,6 +1194,15 @@
return parts;
}
+ private List convertToParts(String dbName, String tblName, List mparts)
+ throws MetaException {
+ List parts = new ArrayList(mparts.size());
+ for (MPartition mp : mparts) {
+ parts.add(convertToPart(dbName, tblName, mp));
+ }
+ return parts;
+ }
+
// TODO:pc implement max
public List listPartitionNames(String dbName, String tableName,
short max) throws MetaException {
@@ -1236,6 +1261,54 @@
}
@Override
+ public List getPartitionsByNames(String dbName, String tblName,
+ List partNames) throws MetaException, NoSuchObjectException {
+
+ boolean success = false;
+ try {
+ openTransaction();
+
+ StringBuilder sb = new StringBuilder(
+ "table.tableName == t1 && table.database.name == t2 && (");
+ int n = 0;
+ Map params = new HashMap();
+ for (Iterator itr = partNames.iterator(); itr.hasNext();) {
+ String pn = "p" + n;
+ n++;
+ String part = itr.next();
+ params.put(pn, part);
+ sb.append("partitionName == ").append(pn);
+ sb.append(" || ");
+ }
+ sb.setLength(sb.length() - 4); // remove the last " || "
+ sb.append(')');
+
+ Query query = pm.newQuery(MPartition.class, sb.toString());
+
+ LOG.debug(" JDOQL filter is " + sb.toString());
+
+ params.put("t1", tblName.trim());
+ params.put("t2", dbName.trim());
+
+ String parameterDeclaration = makeParameterDeclarationString(params);
+ query.declareParameters(parameterDeclaration);
+ query.setOrdering("partitionName ascending");
+
+ List mparts = (List) query.executeWithMap(params);
+ // pm.retrieveAll(mparts); // retrieveAll is pessimistic. some fields may not be needed
+ List results = convertToParts(dbName, tblName, mparts);
+ // pm.makeTransientAll(mparts); // makeTransient will prohibit future access of unfetched fields
+ query.closeAll();
+ success = commitTransaction();
+ return results;
+ } finally {
+ if (!success) {
+ rollbackTransaction();
+ }
+ }
+ }
+
+ @Override
public List getPartitionsByFilter(String dbName, String tblName,
String filter, short maxParts) throws MetaException, NoSuchObjectException {
openTransaction();
Index: metastore/src/gen/thrift/gen-py/hive_metastore/ThriftHiveMetastore.py
===================================================================
--- metastore/src/gen/thrift/gen-py/hive_metastore/ThriftHiveMetastore.py (revision 1085555)
+++ metastore/src/gen/thrift/gen-py/hive_metastore/ThriftHiveMetastore.py (working copy)
@@ -297,6 +297,15 @@
"""
pass
+ def get_partitions_by_names(self, db_name, tbl_name, names):
+ """
+ Parameters:
+ - db_name
+ - tbl_name
+ - names
+ """
+ pass
+
def alter_partition(self, db_name, tbl_name, new_part):
"""
Parameters:
@@ -1707,6 +1716,44 @@
raise result.o2
raise TApplicationException(TApplicationException.MISSING_RESULT, "get_partitions_by_filter failed: unknown result");
+ def get_partitions_by_names(self, db_name, tbl_name, names):
+ """
+ Parameters:
+ - db_name
+ - tbl_name
+ - names
+ """
+ self.send_get_partitions_by_names(db_name, tbl_name, names)
+ return self.recv_get_partitions_by_names()
+
+ def send_get_partitions_by_names(self, db_name, tbl_name, names):
+ self._oprot.writeMessageBegin('get_partitions_by_names', TMessageType.CALL, self._seqid)
+ args = get_partitions_by_names_args()
+ args.db_name = db_name
+ args.tbl_name = tbl_name
+ args.names = names
+ args.write(self._oprot)
+ self._oprot.writeMessageEnd()
+ self._oprot.trans.flush()
+
+ def recv_get_partitions_by_names(self, ):
+ (fname, mtype, rseqid) = self._iprot.readMessageBegin()
+ if mtype == TMessageType.EXCEPTION:
+ x = TApplicationException()
+ x.read(self._iprot)
+ self._iprot.readMessageEnd()
+ raise x
+ result = get_partitions_by_names_result()
+ result.read(self._iprot)
+ self._iprot.readMessageEnd()
+ if result.success != None:
+ return result.success
+ if result.o1 != None:
+ raise result.o1
+ if result.o2 != None:
+ raise result.o2
+ raise TApplicationException(TApplicationException.MISSING_RESULT, "get_partitions_by_names failed: unknown result");
+
def alter_partition(self, db_name, tbl_name, new_part):
"""
Parameters:
@@ -2573,6 +2620,7 @@
self._processMap["get_partitions_ps_with_auth"] = Processor.process_get_partitions_ps_with_auth
self._processMap["get_partition_names_ps"] = Processor.process_get_partition_names_ps
self._processMap["get_partitions_by_filter"] = Processor.process_get_partitions_by_filter
+ self._processMap["get_partitions_by_names"] = Processor.process_get_partitions_by_names
self._processMap["alter_partition"] = Processor.process_alter_partition
self._processMap["get_config_value"] = Processor.process_get_config_value
self._processMap["partition_name_to_vals"] = Processor.process_partition_name_to_vals
@@ -3145,6 +3193,22 @@
oprot.writeMessageEnd()
oprot.trans.flush()
+ def process_get_partitions_by_names(self, seqid, iprot, oprot):
+ args = get_partitions_by_names_args()
+ args.read(iprot)
+ iprot.readMessageEnd()
+ result = get_partitions_by_names_result()
+ try:
+ result.success = self._handler.get_partitions_by_names(args.db_name, args.tbl_name, args.names)
+ except MetaException, o1:
+ result.o1 = o1
+ except NoSuchObjectException, o2:
+ result.o2 = o2
+ oprot.writeMessageBegin("get_partitions_by_names", TMessageType.REPLY, seqid)
+ result.write(oprot)
+ oprot.writeMessageEnd()
+ oprot.trans.flush()
+
def process_alter_partition(self, seqid, iprot, oprot):
args = alter_partition_args()
args.read(iprot)
@@ -8976,6 +9040,190 @@
def __ne__(self, other):
return not (self == other)
+class get_partitions_by_names_args:
+ """
+ Attributes:
+ - db_name
+ - tbl_name
+ - names
+ """
+
+ thrift_spec = (
+ None, # 0
+ (1, TType.STRING, 'db_name', None, None, ), # 1
+ (2, TType.STRING, 'tbl_name', None, None, ), # 2
+ (3, TType.LIST, 'names', (TType.STRING,None), None, ), # 3
+ )
+
+ def __init__(self, db_name=None, tbl_name=None, names=None,):
+ self.db_name = db_name
+ self.tbl_name = tbl_name
+ self.names = names
+
+ def read(self, iprot):
+ if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
+ fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
+ return
+ iprot.readStructBegin()
+ while True:
+ (fname, ftype, fid) = iprot.readFieldBegin()
+ if ftype == TType.STOP:
+ break
+ if fid == 1:
+ if ftype == TType.STRING:
+ self.db_name = iprot.readString();
+ else:
+ iprot.skip(ftype)
+ elif fid == 2:
+ if ftype == TType.STRING:
+ self.tbl_name = iprot.readString();
+ else:
+ iprot.skip(ftype)
+ elif fid == 3:
+ if ftype == TType.LIST:
+ self.names = []
+ (_etype347, _size344) = iprot.readListBegin()
+ for _i348 in xrange(_size344):
+ _elem349 = iprot.readString();
+ self.names.append(_elem349)
+ iprot.readListEnd()
+ else:
+ iprot.skip(ftype)
+ else:
+ iprot.skip(ftype)
+ iprot.readFieldEnd()
+ iprot.readStructEnd()
+
+ def write(self, oprot):
+ if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
+ oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
+ return
+ oprot.writeStructBegin('get_partitions_by_names_args')
+ if self.db_name != None:
+ oprot.writeFieldBegin('db_name', TType.STRING, 1)
+ oprot.writeString(self.db_name)
+ oprot.writeFieldEnd()
+ if self.tbl_name != None:
+ oprot.writeFieldBegin('tbl_name', TType.STRING, 2)
+ oprot.writeString(self.tbl_name)
+ oprot.writeFieldEnd()
+ if self.names != None:
+ oprot.writeFieldBegin('names', TType.LIST, 3)
+ oprot.writeListBegin(TType.STRING, len(self.names))
+ for iter350 in self.names:
+ oprot.writeString(iter350)
+ oprot.writeListEnd()
+ oprot.writeFieldEnd()
+ oprot.writeFieldStop()
+ oprot.writeStructEnd()
+ def validate(self):
+ return
+
+
+ def __repr__(self):
+ L = ['%s=%r' % (key, value)
+ for key, value in self.__dict__.iteritems()]
+ return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
+
+ def __eq__(self, other):
+ return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
+
+ def __ne__(self, other):
+ return not (self == other)
+
+class get_partitions_by_names_result:
+ """
+ Attributes:
+ - success
+ - o1
+ - o2
+ """
+
+ thrift_spec = (
+ (0, TType.LIST, 'success', (TType.STRUCT,(Partition, Partition.thrift_spec)), None, ), # 0
+ (1, TType.STRUCT, 'o1', (MetaException, MetaException.thrift_spec), None, ), # 1
+ (2, TType.STRUCT, 'o2', (NoSuchObjectException, NoSuchObjectException.thrift_spec), None, ), # 2
+ )
+
+ def __init__(self, success=None, o1=None, o2=None,):
+ self.success = success
+ self.o1 = o1
+ self.o2 = o2
+
+ def read(self, iprot):
+ if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
+ fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
+ return
+ iprot.readStructBegin()
+ while True:
+ (fname, ftype, fid) = iprot.readFieldBegin()
+ if ftype == TType.STOP:
+ break
+ if fid == 0:
+ if ftype == TType.LIST:
+ self.success = []
+ (_etype354, _size351) = iprot.readListBegin()
+ for _i355 in xrange(_size351):
+ _elem356 = Partition()
+ _elem356.read(iprot)
+ self.success.append(_elem356)
+ iprot.readListEnd()
+ else:
+ iprot.skip(ftype)
+ elif fid == 1:
+ if ftype == TType.STRUCT:
+ self.o1 = MetaException()
+ self.o1.read(iprot)
+ else:
+ iprot.skip(ftype)
+ elif fid == 2:
+ if ftype == TType.STRUCT:
+ self.o2 = NoSuchObjectException()
+ self.o2.read(iprot)
+ else:
+ iprot.skip(ftype)
+ else:
+ iprot.skip(ftype)
+ iprot.readFieldEnd()
+ iprot.readStructEnd()
+
+ def write(self, oprot):
+ if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
+ oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
+ return
+ oprot.writeStructBegin('get_partitions_by_names_result')
+ if self.success != None:
+ oprot.writeFieldBegin('success', TType.LIST, 0)
+ oprot.writeListBegin(TType.STRUCT, len(self.success))
+ for iter357 in self.success:
+ iter357.write(oprot)
+ oprot.writeListEnd()
+ oprot.writeFieldEnd()
+ if self.o1 != None:
+ oprot.writeFieldBegin('o1', TType.STRUCT, 1)
+ self.o1.write(oprot)
+ oprot.writeFieldEnd()
+ if self.o2 != None:
+ oprot.writeFieldBegin('o2', TType.STRUCT, 2)
+ self.o2.write(oprot)
+ oprot.writeFieldEnd()
+ oprot.writeFieldStop()
+ oprot.writeStructEnd()
+ def validate(self):
+ return
+
+
+ def __repr__(self):
+ L = ['%s=%r' % (key, value)
+ for key, value in self.__dict__.iteritems()]
+ return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
+
+ def __eq__(self, other):
+ return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
+
+ def __ne__(self, other):
+ return not (self == other)
+
class alter_partition_args:
"""
Attributes:
@@ -9362,10 +9610,10 @@
if fid == 0:
if ftype == TType.LIST:
self.success = []
- (_etype347, _size344) = iprot.readListBegin()
- for _i348 in xrange(_size344):
- _elem349 = iprot.readString();
- self.success.append(_elem349)
+ (_etype361, _size358) = iprot.readListBegin()
+ for _i362 in xrange(_size358):
+ _elem363 = iprot.readString();
+ self.success.append(_elem363)
iprot.readListEnd()
else:
iprot.skip(ftype)
@@ -9388,8 +9636,8 @@
if self.success != None:
oprot.writeFieldBegin('success', TType.LIST, 0)
oprot.writeListBegin(TType.STRING, len(self.success))
- for iter350 in self.success:
- oprot.writeString(iter350)
+ for iter364 in self.success:
+ oprot.writeString(iter364)
oprot.writeListEnd()
oprot.writeFieldEnd()
if self.o1 != None:
@@ -9500,11 +9748,11 @@
if fid == 0:
if ftype == TType.MAP:
self.success = {}
- (_ktype352, _vtype353, _size351 ) = iprot.readMapBegin()
- for _i355 in xrange(_size351):
- _key356 = iprot.readString();
- _val357 = iprot.readString();
- self.success[_key356] = _val357
+ (_ktype366, _vtype367, _size365 ) = iprot.readMapBegin()
+ for _i369 in xrange(_size365):
+ _key370 = iprot.readString();
+ _val371 = iprot.readString();
+ self.success[_key370] = _val371
iprot.readMapEnd()
else:
iprot.skip(ftype)
@@ -9527,9 +9775,9 @@
if self.success != None:
oprot.writeFieldBegin('success', TType.MAP, 0)
oprot.writeMapBegin(TType.STRING, TType.STRING, len(self.success))
- for kiter358,viter359 in self.success.items():
- oprot.writeString(kiter358)
- oprot.writeString(viter359)
+ for kiter372,viter373 in self.success.items():
+ oprot.writeString(kiter372)
+ oprot.writeString(viter373)
oprot.writeMapEnd()
oprot.writeFieldEnd()
if self.o1 != None:
@@ -10354,11 +10602,11 @@
if fid == 0:
if ftype == TType.LIST:
self.success = []
- (_etype363, _size360) = iprot.readListBegin()
- for _i364 in xrange(_size360):
- _elem365 = Index()
- _elem365.read(iprot)
- self.success.append(_elem365)
+ (_etype377, _size374) = iprot.readListBegin()
+ for _i378 in xrange(_size374):
+ _elem379 = Index()
+ _elem379.read(iprot)
+ self.success.append(_elem379)
iprot.readListEnd()
else:
iprot.skip(ftype)
@@ -10387,8 +10635,8 @@
if self.success != None:
oprot.writeFieldBegin('success', TType.LIST, 0)
oprot.writeListBegin(TType.STRUCT, len(self.success))
- for iter366 in self.success:
- iter366.write(oprot)
+ for iter380 in self.success:
+ iter380.write(oprot)
oprot.writeListEnd()
oprot.writeFieldEnd()
if self.o1 != None:
@@ -10527,10 +10775,10 @@
if fid == 0:
if ftype == TType.LIST:
self.success = []
- (_etype370, _size367) = iprot.readListBegin()
- for _i371 in xrange(_size367):
- _elem372 = iprot.readString();
- self.success.append(_elem372)
+ (_etype384, _size381) = iprot.readListBegin()
+ for _i385 in xrange(_size381):
+ _elem386 = iprot.readString();
+ self.success.append(_elem386)
iprot.readListEnd()
else:
iprot.skip(ftype)
@@ -10553,8 +10801,8 @@
if self.success != None:
oprot.writeFieldBegin('success', TType.LIST, 0)
oprot.writeListBegin(TType.STRING, len(self.success))
- for iter373 in self.success:
- oprot.writeString(iter373)
+ for iter387 in self.success:
+ oprot.writeString(iter387)
oprot.writeListEnd()
oprot.writeFieldEnd()
if self.o2 != None:
@@ -10908,10 +11156,10 @@
if fid == 0:
if ftype == TType.LIST:
self.success = []
- (_etype377, _size374) = iprot.readListBegin()
- for _i378 in xrange(_size374):
- _elem379 = iprot.readString();
- self.success.append(_elem379)
+ (_etype391, _size388) = iprot.readListBegin()
+ for _i392 in xrange(_size388):
+ _elem393 = iprot.readString();
+ self.success.append(_elem393)
iprot.readListEnd()
else:
iprot.skip(ftype)
@@ -10934,8 +11182,8 @@
if self.success != None:
oprot.writeFieldBegin('success', TType.LIST, 0)
oprot.writeListBegin(TType.STRING, len(self.success))
- for iter380 in self.success:
- oprot.writeString(iter380)
+ for iter394 in self.success:
+ oprot.writeString(iter394)
oprot.writeListEnd()
oprot.writeFieldEnd()
if self.o1 != None:
@@ -11402,11 +11650,11 @@
if fid == 0:
if ftype == TType.LIST:
self.success = []
- (_etype384, _size381) = iprot.readListBegin()
- for _i385 in xrange(_size381):
- _elem386 = Role()
- _elem386.read(iprot)
- self.success.append(_elem386)
+ (_etype398, _size395) = iprot.readListBegin()
+ for _i399 in xrange(_size395):
+ _elem400 = Role()
+ _elem400.read(iprot)
+ self.success.append(_elem400)
iprot.readListEnd()
else:
iprot.skip(ftype)
@@ -11429,8 +11677,8 @@
if self.success != None:
oprot.writeFieldBegin('success', TType.LIST, 0)
oprot.writeListBegin(TType.STRUCT, len(self.success))
- for iter387 in self.success:
- iter387.write(oprot)
+ for iter401 in self.success:
+ iter401.write(oprot)
oprot.writeListEnd()
oprot.writeFieldEnd()
if self.o1 != None:
@@ -11497,10 +11745,10 @@
elif fid == 3:
if ftype == TType.LIST:
self.group_names = []
- (_etype391, _size388) = iprot.readListBegin()
- for _i392 in xrange(_size388):
- _elem393 = iprot.readString();
- self.group_names.append(_elem393)
+ (_etype405, _size402) = iprot.readListBegin()
+ for _i406 in xrange(_size402):
+ _elem407 = iprot.readString();
+ self.group_names.append(_elem407)
iprot.readListEnd()
else:
iprot.skip(ftype)
@@ -11525,8 +11773,8 @@
if self.group_names != None:
oprot.writeFieldBegin('group_names', TType.LIST, 3)
oprot.writeListBegin(TType.STRING, len(self.group_names))
- for iter394 in self.group_names:
- oprot.writeString(iter394)
+ for iter408 in self.group_names:
+ oprot.writeString(iter408)
oprot.writeListEnd()
oprot.writeFieldEnd()
oprot.writeFieldStop()
@@ -11730,11 +11978,11 @@
if fid == 0:
if ftype == TType.LIST:
self.success = []
- (_etype398, _size395) = iprot.readListBegin()
- for _i399 in xrange(_size395):
- _elem400 = HiveObjectPrivilege()
- _elem400.read(iprot)
- self.success.append(_elem400)
+ (_etype412, _size409) = iprot.readListBegin()
+ for _i413 in xrange(_size409):
+ _elem414 = HiveObjectPrivilege()
+ _elem414.read(iprot)
+ self.success.append(_elem414)
iprot.readListEnd()
else:
iprot.skip(ftype)
@@ -11757,8 +12005,8 @@
if self.success != None:
oprot.writeFieldBegin('success', TType.LIST, 0)
oprot.writeListBegin(TType.STRUCT, len(self.success))
- for iter401 in self.success:
- iter401.write(oprot)
+ for iter415 in self.success:
+ iter415.write(oprot)
oprot.writeListEnd()
oprot.writeFieldEnd()
if self.o1 != None:
Index: metastore/src/gen/thrift/gen-py/hive_metastore/ThriftHiveMetastore-remote
===================================================================
--- metastore/src/gen/thrift/gen-py/hive_metastore/ThriftHiveMetastore-remote (revision 1085555)
+++ metastore/src/gen/thrift/gen-py/hive_metastore/ThriftHiveMetastore-remote (working copy)
@@ -54,6 +54,7 @@
print ' get_partitions_ps_with_auth(string db_name, string tbl_name, part_vals, i16 max_parts, string user_name, group_names)'
print ' get_partition_names_ps(string db_name, string tbl_name, part_vals, i16 max_parts)'
print ' get_partitions_by_filter(string db_name, string tbl_name, string filter, i16 max_parts)'
+ print ' get_partitions_by_names(string db_name, string tbl_name, names)'
print ' void alter_partition(string db_name, string tbl_name, Partition new_part)'
print ' string get_config_value(string name, string defaultValue)'
print ' partition_name_to_vals(string part_name)'
@@ -326,6 +327,12 @@
sys.exit(1)
pp.pprint(client.get_partitions_by_filter(args[0],args[1],args[2],eval(args[3]),))
+elif cmd == 'get_partitions_by_names':
+ if len(args) != 3:
+ print 'get_partitions_by_names requires 3 args'
+ sys.exit(1)
+ pp.pprint(client.get_partitions_by_names(args[0],args[1],eval(args[2]),))
+
elif cmd == 'alter_partition':
if len(args) != 3:
print 'alter_partition requires 3 args'
Index: metastore/src/gen/thrift/gen-cpp/ThriftHiveMetastore.cpp
===================================================================
--- metastore/src/gen/thrift/gen-cpp/ThriftHiveMetastore.cpp (revision 1085555)
+++ metastore/src/gen/thrift/gen-cpp/ThriftHiveMetastore.cpp (working copy)
@@ -7882,6 +7882,292 @@
return xfer;
}
+uint32_t ThriftHiveMetastore_get_partitions_by_names_args::read(::apache::thrift::protocol::TProtocol* iprot) {
+
+ uint32_t xfer = 0;
+ std::string fname;
+ ::apache::thrift::protocol::TType ftype;
+ int16_t fid;
+
+ xfer += iprot->readStructBegin(fname);
+
+ using ::apache::thrift::protocol::TProtocolException;
+
+
+ while (true)
+ {
+ xfer += iprot->readFieldBegin(fname, ftype, fid);
+ if (ftype == ::apache::thrift::protocol::T_STOP) {
+ break;
+ }
+ switch (fid)
+ {
+ case 1:
+ if (ftype == ::apache::thrift::protocol::T_STRING) {
+ xfer += iprot->readString(this->db_name);
+ this->__isset.db_name = true;
+ } else {
+ xfer += iprot->skip(ftype);
+ }
+ break;
+ case 2:
+ if (ftype == ::apache::thrift::protocol::T_STRING) {
+ xfer += iprot->readString(this->tbl_name);
+ this->__isset.tbl_name = true;
+ } else {
+ xfer += iprot->skip(ftype);
+ }
+ break;
+ case 3:
+ if (ftype == ::apache::thrift::protocol::T_LIST) {
+ {
+ this->names.clear();
+ uint32_t _size383;
+ ::apache::thrift::protocol::TType _etype386;
+ iprot->readListBegin(_etype386, _size383);
+ this->names.resize(_size383);
+ uint32_t _i387;
+ for (_i387 = 0; _i387 < _size383; ++_i387)
+ {
+ xfer += iprot->readString(this->names[_i387]);
+ }
+ iprot->readListEnd();
+ }
+ this->__isset.names = true;
+ } else {
+ xfer += iprot->skip(ftype);
+ }
+ break;
+ default:
+ xfer += iprot->skip(ftype);
+ break;
+ }
+ xfer += iprot->readFieldEnd();
+ }
+
+ xfer += iprot->readStructEnd();
+
+ return xfer;
+}
+
+uint32_t ThriftHiveMetastore_get_partitions_by_names_args::write(::apache::thrift::protocol::TProtocol* oprot) const {
+ uint32_t xfer = 0;
+ xfer += oprot->writeStructBegin("ThriftHiveMetastore_get_partitions_by_names_args");
+ xfer += oprot->writeFieldBegin("db_name", ::apache::thrift::protocol::T_STRING, 1);
+ xfer += oprot->writeString(this->db_name);
+ xfer += oprot->writeFieldEnd();
+ xfer += oprot->writeFieldBegin("tbl_name", ::apache::thrift::protocol::T_STRING, 2);
+ xfer += oprot->writeString(this->tbl_name);
+ xfer += oprot->writeFieldEnd();
+ xfer += oprot->writeFieldBegin("names", ::apache::thrift::protocol::T_LIST, 3);
+ {
+ xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, this->names.size());
+ std::vector ::const_iterator _iter388;
+ for (_iter388 = this->names.begin(); _iter388 != this->names.end(); ++_iter388)
+ {
+ xfer += oprot->writeString((*_iter388));
+ }
+ xfer += oprot->writeListEnd();
+ }
+ xfer += oprot->writeFieldEnd();
+ xfer += oprot->writeFieldStop();
+ xfer += oprot->writeStructEnd();
+ return xfer;
+}
+
+uint32_t ThriftHiveMetastore_get_partitions_by_names_pargs::write(::apache::thrift::protocol::TProtocol* oprot) const {
+ uint32_t xfer = 0;
+ xfer += oprot->writeStructBegin("ThriftHiveMetastore_get_partitions_by_names_pargs");
+ xfer += oprot->writeFieldBegin("db_name", ::apache::thrift::protocol::T_STRING, 1);
+ xfer += oprot->writeString((*(this->db_name)));
+ xfer += oprot->writeFieldEnd();
+ xfer += oprot->writeFieldBegin("tbl_name", ::apache::thrift::protocol::T_STRING, 2);
+ xfer += oprot->writeString((*(this->tbl_name)));
+ xfer += oprot->writeFieldEnd();
+ xfer += oprot->writeFieldBegin("names", ::apache::thrift::protocol::T_LIST, 3);
+ {
+ xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, (*(this->names)).size());
+ std::vector ::const_iterator _iter389;
+ for (_iter389 = (*(this->names)).begin(); _iter389 != (*(this->names)).end(); ++_iter389)
+ {
+ xfer += oprot->writeString((*_iter389));
+ }
+ xfer += oprot->writeListEnd();
+ }
+ xfer += oprot->writeFieldEnd();
+ xfer += oprot->writeFieldStop();
+ xfer += oprot->writeStructEnd();
+ return xfer;
+}
+
+uint32_t ThriftHiveMetastore_get_partitions_by_names_result::read(::apache::thrift::protocol::TProtocol* iprot) {
+
+ uint32_t xfer = 0;
+ std::string fname;
+ ::apache::thrift::protocol::TType ftype;
+ int16_t fid;
+
+ xfer += iprot->readStructBegin(fname);
+
+ using ::apache::thrift::protocol::TProtocolException;
+
+
+ while (true)
+ {
+ xfer += iprot->readFieldBegin(fname, ftype, fid);
+ if (ftype == ::apache::thrift::protocol::T_STOP) {
+ break;
+ }
+ switch (fid)
+ {
+ case 0:
+ if (ftype == ::apache::thrift::protocol::T_LIST) {
+ {
+ this->success.clear();
+ uint32_t _size390;
+ ::apache::thrift::protocol::TType _etype393;
+ iprot->readListBegin(_etype393, _size390);
+ this->success.resize(_size390);
+ uint32_t _i394;
+ for (_i394 = 0; _i394 < _size390; ++_i394)
+ {
+ xfer += this->success[_i394].read(iprot);
+ }
+ iprot->readListEnd();
+ }
+ this->__isset.success = true;
+ } else {
+ xfer += iprot->skip(ftype);
+ }
+ break;
+ case 1:
+ if (ftype == ::apache::thrift::protocol::T_STRUCT) {
+ xfer += this->o1.read(iprot);
+ this->__isset.o1 = true;
+ } else {
+ xfer += iprot->skip(ftype);
+ }
+ break;
+ case 2:
+ if (ftype == ::apache::thrift::protocol::T_STRUCT) {
+ xfer += this->o2.read(iprot);
+ this->__isset.o2 = true;
+ } else {
+ xfer += iprot->skip(ftype);
+ }
+ break;
+ default:
+ xfer += iprot->skip(ftype);
+ break;
+ }
+ xfer += iprot->readFieldEnd();
+ }
+
+ xfer += iprot->readStructEnd();
+
+ return xfer;
+}
+
+uint32_t ThriftHiveMetastore_get_partitions_by_names_result::write(::apache::thrift::protocol::TProtocol* oprot) const {
+
+ uint32_t xfer = 0;
+
+ xfer += oprot->writeStructBegin("ThriftHiveMetastore_get_partitions_by_names_result");
+
+ if (this->__isset.success) {
+ xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_LIST, 0);
+ {
+ xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, this->success.size());
+ std::vector ::const_iterator _iter395;
+ for (_iter395 = this->success.begin(); _iter395 != this->success.end(); ++_iter395)
+ {
+ xfer += (*_iter395).write(oprot);
+ }
+ xfer += oprot->writeListEnd();
+ }
+ xfer += oprot->writeFieldEnd();
+ } else if (this->__isset.o1) {
+ xfer += oprot->writeFieldBegin("o1", ::apache::thrift::protocol::T_STRUCT, 1);
+ xfer += this->o1.write(oprot);
+ xfer += oprot->writeFieldEnd();
+ } else if (this->__isset.o2) {
+ xfer += oprot->writeFieldBegin("o2", ::apache::thrift::protocol::T_STRUCT, 2);
+ xfer += this->o2.write(oprot);
+ xfer += oprot->writeFieldEnd();
+ }
+ xfer += oprot->writeFieldStop();
+ xfer += oprot->writeStructEnd();
+ return xfer;
+}
+
+uint32_t ThriftHiveMetastore_get_partitions_by_names_presult::read(::apache::thrift::protocol::TProtocol* iprot) {
+
+ uint32_t xfer = 0;
+ std::string fname;
+ ::apache::thrift::protocol::TType ftype;
+ int16_t fid;
+
+ xfer += iprot->readStructBegin(fname);
+
+ using ::apache::thrift::protocol::TProtocolException;
+
+
+ while (true)
+ {
+ xfer += iprot->readFieldBegin(fname, ftype, fid);
+ if (ftype == ::apache::thrift::protocol::T_STOP) {
+ break;
+ }
+ switch (fid)
+ {
+ case 0:
+ if (ftype == ::apache::thrift::protocol::T_LIST) {
+ {
+ (*(this->success)).clear();
+ uint32_t _size396;
+ ::apache::thrift::protocol::TType _etype399;
+ iprot->readListBegin(_etype399, _size396);
+ (*(this->success)).resize(_size396);
+ uint32_t _i400;
+ for (_i400 = 0; _i400 < _size396; ++_i400)
+ {
+ xfer += (*(this->success))[_i400].read(iprot);
+ }
+ iprot->readListEnd();
+ }
+ this->__isset.success = true;
+ } else {
+ xfer += iprot->skip(ftype);
+ }
+ break;
+ case 1:
+ if (ftype == ::apache::thrift::protocol::T_STRUCT) {
+ xfer += this->o1.read(iprot);
+ this->__isset.o1 = true;
+ } else {
+ xfer += iprot->skip(ftype);
+ }
+ break;
+ case 2:
+ if (ftype == ::apache::thrift::protocol::T_STRUCT) {
+ xfer += this->o2.read(iprot);
+ this->__isset.o2 = true;
+ } else {
+ xfer += iprot->skip(ftype);
+ }
+ break;
+ default:
+ xfer += iprot->skip(ftype);
+ break;
+ }
+ xfer += iprot->readFieldEnd();
+ }
+
+ xfer += iprot->readStructEnd();
+
+ return xfer;
+}
+
uint32_t ThriftHiveMetastore_alter_partition_args::read(::apache::thrift::protocol::TProtocol* iprot) {
uint32_t xfer = 0;
@@ -8366,14 +8652,14 @@
if (ftype == ::apache::thrift::protocol::T_LIST) {
{
this->success.clear();
- uint32_t _size383;
- ::apache::thrift::protocol::TType _etype386;
- iprot->readListBegin(_etype386, _size383);
- this->success.resize(_size383);
- uint32_t _i387;
- for (_i387 = 0; _i387 < _size383; ++_i387)
+ uint32_t _size401;
+ ::apache::thrift::protocol::TType _etype404;
+ iprot->readListBegin(_etype404, _size401);
+ this->success.resize(_size401);
+ uint32_t _i405;
+ for (_i405 = 0; _i405 < _size401; ++_i405)
{
- xfer += iprot->readString(this->success[_i387]);
+ xfer += iprot->readString(this->success[_i405]);
}
iprot->readListEnd();
}
@@ -8412,10 +8698,10 @@
xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_LIST, 0);
{
xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, this->success.size());
- std::vector ::const_iterator _iter388;
- for (_iter388 = this->success.begin(); _iter388 != this->success.end(); ++_iter388)
+ std::vector ::const_iterator _iter406;
+ for (_iter406 = this->success.begin(); _iter406 != this->success.end(); ++_iter406)
{
- xfer += oprot->writeString((*_iter388));
+ xfer += oprot->writeString((*_iter406));
}
xfer += oprot->writeListEnd();
}
@@ -8454,14 +8740,14 @@
if (ftype == ::apache::thrift::protocol::T_LIST) {
{
(*(this->success)).clear();
- uint32_t _size389;
- ::apache::thrift::protocol::TType _etype392;
- iprot->readListBegin(_etype392, _size389);
- (*(this->success)).resize(_size389);
- uint32_t _i393;
- for (_i393 = 0; _i393 < _size389; ++_i393)
+ uint32_t _size407;
+ ::apache::thrift::protocol::TType _etype410;
+ iprot->readListBegin(_etype410, _size407);
+ (*(this->success)).resize(_size407);
+ uint32_t _i411;
+ for (_i411 = 0; _i411 < _size407; ++_i411)
{
- xfer += iprot->readString((*(this->success))[_i393]);
+ xfer += iprot->readString((*(this->success))[_i411]);
}
iprot->readListEnd();
}
@@ -8576,17 +8862,17 @@
if (ftype == ::apache::thrift::protocol::T_MAP) {
{
this->success.clear();
- uint32_t _size394;
- ::apache::thrift::protocol::TType _ktype395;
- ::apache::thrift::protocol::TType _vtype396;
- iprot->readMapBegin(_ktype395, _vtype396, _size394);
- uint32_t _i398;
- for (_i398 = 0; _i398 < _size394; ++_i398)
+ uint32_t _size412;
+ ::apache::thrift::protocol::TType _ktype413;
+ ::apache::thrift::protocol::TType _vtype414;
+ iprot->readMapBegin(_ktype413, _vtype414, _size412);
+ uint32_t _i416;
+ for (_i416 = 0; _i416 < _size412; ++_i416)
{
- std::string _key399;
- xfer += iprot->readString(_key399);
- std::string& _val400 = this->success[_key399];
- xfer += iprot->readString(_val400);
+ std::string _key417;
+ xfer += iprot->readString(_key417);
+ std::string& _val418 = this->success[_key417];
+ xfer += iprot->readString(_val418);
}
iprot->readMapEnd();
}
@@ -8625,11 +8911,11 @@
xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_MAP, 0);
{
xfer += oprot->writeMapBegin(::apache::thrift::protocol::T_STRING, ::apache::thrift::protocol::T_STRING, this->success.size());
- std::map ::const_iterator _iter401;
- for (_iter401 = this->success.begin(); _iter401 != this->success.end(); ++_iter401)
+ std::map ::const_iterator _iter419;
+ for (_iter419 = this->success.begin(); _iter419 != this->success.end(); ++_iter419)
{
- xfer += oprot->writeString(_iter401->first);
- xfer += oprot->writeString(_iter401->second);
+ xfer += oprot->writeString(_iter419->first);
+ xfer += oprot->writeString(_iter419->second);
}
xfer += oprot->writeMapEnd();
}
@@ -8668,17 +8954,17 @@
if (ftype == ::apache::thrift::protocol::T_MAP) {
{
(*(this->success)).clear();
- uint32_t _size402;
- ::apache::thrift::protocol::TType _ktype403;
- ::apache::thrift::protocol::TType _vtype404;
- iprot->readMapBegin(_ktype403, _vtype404, _size402);
- uint32_t _i406;
- for (_i406 = 0; _i406 < _size402; ++_i406)
+ uint32_t _size420;
+ ::apache::thrift::protocol::TType _ktype421;
+ ::apache::thrift::protocol::TType _vtype422;
+ iprot->readMapBegin(_ktype421, _vtype422, _size420);
+ uint32_t _i424;
+ for (_i424 = 0; _i424 < _size420; ++_i424)
{
- std::string _key407;
- xfer += iprot->readString(_key407);
- std::string& _val408 = (*(this->success))[_key407];
- xfer += iprot->readString(_val408);
+ std::string _key425;
+ xfer += iprot->readString(_key425);
+ std::string& _val426 = (*(this->success))[_key425];
+ xfer += iprot->readString(_val426);
}
iprot->readMapEnd();
}
@@ -9739,14 +10025,14 @@
if (ftype == ::apache::thrift::protocol::T_LIST) {
{
this->success.clear();
- uint32_t _size409;
- ::apache::thrift::protocol::TType _etype412;
- iprot->readListBegin(_etype412, _size409);
- this->success.resize(_size409);
- uint32_t _i413;
- for (_i413 = 0; _i413 < _size409; ++_i413)
+ uint32_t _size427;
+ ::apache::thrift::protocol::TType _etype430;
+ iprot->readListBegin(_etype430, _size427);
+ this->success.resize(_size427);
+ uint32_t _i431;
+ for (_i431 = 0; _i431 < _size427; ++_i431)
{
- xfer += this->success[_i413].read(iprot);
+ xfer += this->success[_i431].read(iprot);
}
iprot->readListEnd();
}
@@ -9793,10 +10079,10 @@
xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_LIST, 0);
{
xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, this->success.size());
- std::vector ::const_iterator _iter414;
- for (_iter414 = this->success.begin(); _iter414 != this->success.end(); ++_iter414)
+ std::vector ::const_iterator _iter432;
+ for (_iter432 = this->success.begin(); _iter432 != this->success.end(); ++_iter432)
{
- xfer += (*_iter414).write(oprot);
+ xfer += (*_iter432).write(oprot);
}
xfer += oprot->writeListEnd();
}
@@ -9839,14 +10125,14 @@
if (ftype == ::apache::thrift::protocol::T_LIST) {
{
(*(this->success)).clear();
- uint32_t _size415;
- ::apache::thrift::protocol::TType _etype418;
- iprot->readListBegin(_etype418, _size415);
- (*(this->success)).resize(_size415);
- uint32_t _i419;
- for (_i419 = 0; _i419 < _size415; ++_i419)
+ uint32_t _size433;
+ ::apache::thrift::protocol::TType _etype436;
+ iprot->readListBegin(_etype436, _size433);
+ (*(this->success)).resize(_size433);
+ uint32_t _i437;
+ for (_i437 = 0; _i437 < _size433; ++_i437)
{
- xfer += (*(this->success))[_i419].read(iprot);
+ xfer += (*(this->success))[_i437].read(iprot);
}
iprot->readListEnd();
}
@@ -9997,14 +10283,14 @@
if (ftype == ::apache::thrift::protocol::T_LIST) {
{
this->success.clear();
- uint32_t _size420;
- ::apache::thrift::protocol::TType _etype423;
- iprot->readListBegin(_etype423, _size420);
- this->success.resize(_size420);
- uint32_t _i424;
- for (_i424 = 0; _i424 < _size420; ++_i424)
+ uint32_t _size438;
+ ::apache::thrift::protocol::TType _etype441;
+ iprot->readListBegin(_etype441, _size438);
+ this->success.resize(_size438);
+ uint32_t _i442;
+ for (_i442 = 0; _i442 < _size438; ++_i442)
{
- xfer += iprot->readString(this->success[_i424]);
+ xfer += iprot->readString(this->success[_i442]);
}
iprot->readListEnd();
}
@@ -10043,10 +10329,10 @@
xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_LIST, 0);
{
xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, this->success.size());
- std::vector ::const_iterator _iter425;
- for (_iter425 = this->success.begin(); _iter425 != this->success.end(); ++_iter425)
+ std::vector ::const_iterator _iter443;
+ for (_iter443 = this->success.begin(); _iter443 != this->success.end(); ++_iter443)
{
- xfer += oprot->writeString((*_iter425));
+ xfer += oprot->writeString((*_iter443));
}
xfer += oprot->writeListEnd();
}
@@ -10085,14 +10371,14 @@
if (ftype == ::apache::thrift::protocol::T_LIST) {
{
(*(this->success)).clear();
- uint32_t _size426;
- ::apache::thrift::protocol::TType _etype429;
- iprot->readListBegin(_etype429, _size426);
- (*(this->success)).resize(_size426);
- uint32_t _i430;
- for (_i430 = 0; _i430 < _size426; ++_i430)
+ uint32_t _size444;
+ ::apache::thrift::protocol::TType _etype447;
+ iprot->readListBegin(_etype447, _size444);
+ (*(this->success)).resize(_size444);
+ uint32_t _i448;
+ for (_i448 = 0; _i448 < _size444; ++_i448)
{
- xfer += iprot->readString((*(this->success))[_i430]);
+ xfer += iprot->readString((*(this->success))[_i448]);
}
iprot->readListEnd();
}
@@ -10549,14 +10835,14 @@
if (ftype == ::apache::thrift::protocol::T_LIST) {
{
this->success.clear();
- uint32_t _size431;
- ::apache::thrift::protocol::TType _etype434;
- iprot->readListBegin(_etype434, _size431);
- this->success.resize(_size431);
- uint32_t _i435;
- for (_i435 = 0; _i435 < _size431; ++_i435)
+ uint32_t _size449;
+ ::apache::thrift::protocol::TType _etype452;
+ iprot->readListBegin(_etype452, _size449);
+ this->success.resize(_size449);
+ uint32_t _i453;
+ for (_i453 = 0; _i453 < _size449; ++_i453)
{
- xfer += iprot->readString(this->success[_i435]);
+ xfer += iprot->readString(this->success[_i453]);
}
iprot->readListEnd();
}
@@ -10595,10 +10881,10 @@
xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_LIST, 0);
{
xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, this->success.size());
- std::vector ::const_iterator _iter436;
- for (_iter436 = this->success.begin(); _iter436 != this->success.end(); ++_iter436)
+ std::vector ::const_iterator _iter454;
+ for (_iter454 = this->success.begin(); _iter454 != this->success.end(); ++_iter454)
{
- xfer += oprot->writeString((*_iter436));
+ xfer += oprot->writeString((*_iter454));
}
xfer += oprot->writeListEnd();
}
@@ -10637,14 +10923,14 @@
if (ftype == ::apache::thrift::protocol::T_LIST) {
{
(*(this->success)).clear();
- uint32_t _size437;
- ::apache::thrift::protocol::TType _etype440;
- iprot->readListBegin(_etype440, _size437);
- (*(this->success)).resize(_size437);
- uint32_t _i441;
- for (_i441 = 0; _i441 < _size437; ++_i441)
+ uint32_t _size455;
+ ::apache::thrift::protocol::TType _etype458;
+ iprot->readListBegin(_etype458, _size455);
+ (*(this->success)).resize(_size455);
+ uint32_t _i459;
+ for (_i459 = 0; _i459 < _size455; ++_i459)
{
- xfer += iprot->readString((*(this->success))[_i441]);
+ xfer += iprot->readString((*(this->success))[_i459]);
}
iprot->readListEnd();
}
@@ -10711,9 +10997,9 @@
break;
case 3:
if (ftype == ::apache::thrift::protocol::T_I32) {
- int32_t ecast442;
- xfer += iprot->readI32(ecast442);
- this->principal_type = (PrincipalType::type)ecast442;
+ int32_t ecast460;
+ xfer += iprot->readI32(ecast460);
+ this->principal_type = (PrincipalType::type)ecast460;
this->__isset.principal_type = true;
} else {
xfer += iprot->skip(ftype);
@@ -10729,9 +11015,9 @@
break;
case 5:
if (ftype == ::apache::thrift::protocol::T_I32) {
- int32_t ecast443;
- xfer += iprot->readI32(ecast443);
- this->grantorType = (PrincipalType::type)ecast443;
+ int32_t ecast461;
+ xfer += iprot->readI32(ecast461);
+ this->grantorType = (PrincipalType::type)ecast461;
this->__isset.grantorType = true;
} else {
xfer += iprot->skip(ftype);
@@ -10963,9 +11249,9 @@
break;
case 3:
if (ftype == ::apache::thrift::protocol::T_I32) {
- int32_t ecast444;
- xfer += iprot->readI32(ecast444);
- this->principal_type = (PrincipalType::type)ecast444;
+ int32_t ecast462;
+ xfer += iprot->readI32(ecast462);
+ this->principal_type = (PrincipalType::type)ecast462;
this->__isset.principal_type = true;
} else {
xfer += iprot->skip(ftype);
@@ -11163,9 +11449,9 @@
break;
case 2:
if (ftype == ::apache::thrift::protocol::T_I32) {
- int32_t ecast445;
- xfer += iprot->readI32(ecast445);
- this->principal_type = (PrincipalType::type)ecast445;
+ int32_t ecast463;
+ xfer += iprot->readI32(ecast463);
+ this->principal_type = (PrincipalType::type)ecast463;
this->__isset.principal_type = true;
} else {
xfer += iprot->skip(ftype);
@@ -11235,14 +11521,14 @@
if (ftype == ::apache::thrift::protocol::T_LIST) {
{
this->success.clear();
- uint32_t _size446;
- ::apache::thrift::protocol::TType _etype449;
- iprot->readListBegin(_etype449, _size446);
- this->success.resize(_size446);
- uint32_t _i450;
- for (_i450 = 0; _i450 < _size446; ++_i450)
+ uint32_t _size464;
+ ::apache::thrift::protocol::TType _etype467;
+ iprot->readListBegin(_etype467, _size464);
+ this->success.resize(_size464);
+ uint32_t _i468;
+ for (_i468 = 0; _i468 < _size464; ++_i468)
{
- xfer += this->success[_i450].read(iprot);
+ xfer += this->success[_i468].read(iprot);
}
iprot->readListEnd();
}
@@ -11281,10 +11567,10 @@
xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_LIST, 0);
{
xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, this->success.size());
- std::vector ::const_iterator _iter451;
- for (_iter451 = this->success.begin(); _iter451 != this->success.end(); ++_iter451)
+ std::vector ::const_iterator _iter469;
+ for (_iter469 = this->success.begin(); _iter469 != this->success.end(); ++_iter469)
{
- xfer += (*_iter451).write(oprot);
+ xfer += (*_iter469).write(oprot);
}
xfer += oprot->writeListEnd();
}
@@ -11323,14 +11609,14 @@
if (ftype == ::apache::thrift::protocol::T_LIST) {
{
(*(this->success)).clear();
- uint32_t _size452;
- ::apache::thrift::protocol::TType _etype455;
- iprot->readListBegin(_etype455, _size452);
- (*(this->success)).resize(_size452);
- uint32_t _i456;
- for (_i456 = 0; _i456 < _size452; ++_i456)
+ uint32_t _size470;
+ ::apache::thrift::protocol::TType _etype473;
+ iprot->readListBegin(_etype473, _size470);
+ (*(this->success)).resize(_size470);
+ uint32_t _i474;
+ for (_i474 = 0; _i474 < _size470; ++_i474)
{
- xfer += (*(this->success))[_i456].read(iprot);
+ xfer += (*(this->success))[_i474].read(iprot);
}
iprot->readListEnd();
}
@@ -11399,14 +11685,14 @@
if (ftype == ::apache::thrift::protocol::T_LIST) {
{
this->group_names.clear();
- uint32_t _size457;
- ::apache::thrift::protocol::TType _etype460;
- iprot->readListBegin(_etype460, _size457);
- this->group_names.resize(_size457);
- uint32_t _i461;
- for (_i461 = 0; _i461 < _size457; ++_i461)
+ uint32_t _size475;
+ ::apache::thrift::protocol::TType _etype478;
+ iprot->readListBegin(_etype478, _size475);
+ this->group_names.resize(_size475);
+ uint32_t _i479;
+ for (_i479 = 0; _i479 < _size475; ++_i479)
{
- xfer += iprot->readString(this->group_names[_i461]);
+ xfer += iprot->readString(this->group_names[_i479]);
}
iprot->readListEnd();
}
@@ -11439,10 +11725,10 @@
xfer += oprot->writeFieldBegin("group_names", ::apache::thrift::protocol::T_LIST, 3);
{
xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, this->group_names.size());
- std::vector ::const_iterator _iter462;
- for (_iter462 = this->group_names.begin(); _iter462 != this->group_names.end(); ++_iter462)
+ std::vector ::const_iterator _iter480;
+ for (_iter480 = this->group_names.begin(); _iter480 != this->group_names.end(); ++_iter480)
{
- xfer += oprot->writeString((*_iter462));
+ xfer += oprot->writeString((*_iter480));
}
xfer += oprot->writeListEnd();
}
@@ -11464,10 +11750,10 @@
xfer += oprot->writeFieldBegin("group_names", ::apache::thrift::protocol::T_LIST, 3);
{
xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, (*(this->group_names)).size());
- std::vector ::const_iterator _iter463;
- for (_iter463 = (*(this->group_names)).begin(); _iter463 != (*(this->group_names)).end(); ++_iter463)
+ std::vector ::const_iterator _iter481;
+ for (_iter481 = (*(this->group_names)).begin(); _iter481 != (*(this->group_names)).end(); ++_iter481)
{
- xfer += oprot->writeString((*_iter463));
+ xfer += oprot->writeString((*_iter481));
}
xfer += oprot->writeListEnd();
}
@@ -11623,9 +11909,9 @@
break;
case 2:
if (ftype == ::apache::thrift::protocol::T_I32) {
- int32_t ecast464;
- xfer += iprot->readI32(ecast464);
- this->principal_type = (PrincipalType::type)ecast464;
+ int32_t ecast482;
+ xfer += iprot->readI32(ecast482);
+ this->principal_type = (PrincipalType::type)ecast482;
this->__isset.principal_type = true;
} else {
xfer += iprot->skip(ftype);
@@ -11709,14 +11995,14 @@
if (ftype == ::apache::thrift::protocol::T_LIST) {
{
this->success.clear();
- uint32_t _size465;
- ::apache::thrift::protocol::TType _etype468;
- iprot->readListBegin(_etype468, _size465);
- this->success.resize(_size465);
- uint32_t _i469;
- for (_i469 = 0; _i469 < _size465; ++_i469)
+ uint32_t _size483;
+ ::apache::thrift::protocol::TType _etype486;
+ iprot->readListBegin(_etype486, _size483);
+ this->success.resize(_size483);
+ uint32_t _i487;
+ for (_i487 = 0; _i487 < _size483; ++_i487)
{
- xfer += this->success[_i469].read(iprot);
+ xfer += this->success[_i487].read(iprot);
}
iprot->readListEnd();
}
@@ -11755,10 +12041,10 @@
xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_LIST, 0);
{
xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, this->success.size());
- std::vector ::const_iterator _iter470;
- for (_iter470 = this->success.begin(); _iter470 != this->success.end(); ++_iter470)
+ std::vector ::const_iterator _iter488;
+ for (_iter488 = this->success.begin(); _iter488 != this->success.end(); ++_iter488)
{
- xfer += (*_iter470).write(oprot);
+ xfer += (*_iter488).write(oprot);
}
xfer += oprot->writeListEnd();
}
@@ -11797,14 +12083,14 @@
if (ftype == ::apache::thrift::protocol::T_LIST) {
{
(*(this->success)).clear();
- uint32_t _size471;
- ::apache::thrift::protocol::TType _etype474;
- iprot->readListBegin(_etype474, _size471);
- (*(this->success)).resize(_size471);
- uint32_t _i475;
- for (_i475 = 0; _i475 < _size471; ++_i475)
+ uint32_t _size489;
+ ::apache::thrift::protocol::TType _etype492;
+ iprot->readListBegin(_etype492, _size489);
+ (*(this->success)).resize(_size489);
+ uint32_t _i493;
+ for (_i493 = 0; _i493 < _size489; ++_i493)
{
- xfer += (*(this->success))[_i475].read(iprot);
+ xfer += (*(this->success))[_i493].read(iprot);
}
iprot->readListEnd();
}
@@ -15098,6 +15384,74 @@
throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "get_partitions_by_filter failed: unknown result");
}
+void ThriftHiveMetastoreClient::get_partitions_by_names(std::vector & _return, const std::string& db_name, const std::string& tbl_name, const std::vector & names)
+{
+ send_get_partitions_by_names(db_name, tbl_name, names);
+ recv_get_partitions_by_names(_return);
+}
+
+void ThriftHiveMetastoreClient::send_get_partitions_by_names(const std::string& db_name, const std::string& tbl_name, const std::vector & names)
+{
+ int32_t cseqid = 0;
+ oprot_->writeMessageBegin("get_partitions_by_names", ::apache::thrift::protocol::T_CALL, cseqid);
+
+ ThriftHiveMetastore_get_partitions_by_names_pargs args;
+ args.db_name = &db_name;
+ args.tbl_name = &tbl_name;
+ args.names = &names;
+ args.write(oprot_);
+
+ oprot_->writeMessageEnd();
+ oprot_->getTransport()->flush();
+ oprot_->getTransport()->writeEnd();
+}
+
+void ThriftHiveMetastoreClient::recv_get_partitions_by_names(std::vector & _return)
+{
+
+ int32_t rseqid = 0;
+ std::string fname;
+ ::apache::thrift::protocol::TMessageType mtype;
+
+ iprot_->readMessageBegin(fname, mtype, rseqid);
+ if (mtype == ::apache::thrift::protocol::T_EXCEPTION) {
+ ::apache::thrift::TApplicationException x;
+ x.read(iprot_);
+ iprot_->readMessageEnd();
+ iprot_->getTransport()->readEnd();
+ throw x;
+ }
+ if (mtype != ::apache::thrift::protocol::T_REPLY) {
+ iprot_->skip(::apache::thrift::protocol::T_STRUCT);
+ iprot_->readMessageEnd();
+ iprot_->getTransport()->readEnd();
+ throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::INVALID_MESSAGE_TYPE);
+ }
+ if (fname.compare("get_partitions_by_names") != 0) {
+ iprot_->skip(::apache::thrift::protocol::T_STRUCT);
+ iprot_->readMessageEnd();
+ iprot_->getTransport()->readEnd();
+ throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::WRONG_METHOD_NAME);
+ }
+ ThriftHiveMetastore_get_partitions_by_names_presult result;
+ result.success = &_return;
+ result.read(iprot_);
+ iprot_->readMessageEnd();
+ iprot_->getTransport()->readEnd();
+
+ if (result.__isset.success) {
+ // _return pointer has now been filled
+ return;
+ }
+ if (result.__isset.o1) {
+ throw result.o1;
+ }
+ if (result.__isset.o2) {
+ throw result.o2;
+ }
+ throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "get_partitions_by_names failed: unknown result");
+}
+
void ThriftHiveMetastoreClient::alter_partition(const std::string& db_name, const std::string& tbl_name, const Partition& new_part)
{
send_alter_partition(db_name, tbl_name, new_part);
@@ -17802,6 +18156,40 @@
oprot->getTransport()->writeEnd();
}
+void ThriftHiveMetastoreProcessor::process_get_partitions_by_names(int32_t seqid, ::apache::thrift::protocol::TProtocol* iprot, ::apache::thrift::protocol::TProtocol* oprot)
+{
+ ThriftHiveMetastore_get_partitions_by_names_args args;
+ args.read(iprot);
+ iprot->readMessageEnd();
+ iprot->getTransport()->readEnd();
+
+ ThriftHiveMetastore_get_partitions_by_names_result result;
+ try {
+ iface_->get_partitions_by_names(result.success, args.db_name, args.tbl_name, args.names);
+ result.__isset.success = true;
+ } catch (MetaException &o1) {
+ result.o1 = o1;
+ result.__isset.o1 = true;
+ } catch (NoSuchObjectException &o2) {
+ result.o2 = o2;
+ result.__isset.o2 = true;
+ } catch (const std::exception& e) {
+ ::apache::thrift::TApplicationException x(e.what());
+ oprot->writeMessageBegin("get_partitions_by_names", ::apache::thrift::protocol::T_EXCEPTION, seqid);
+ x.write(oprot);
+ oprot->writeMessageEnd();
+ oprot->getTransport()->flush();
+ oprot->getTransport()->writeEnd();
+ return;
+ }
+
+ oprot->writeMessageBegin("get_partitions_by_names", ::apache::thrift::protocol::T_REPLY, seqid);
+ result.write(oprot);
+ oprot->writeMessageEnd();
+ oprot->getTransport()->flush();
+ oprot->getTransport()->writeEnd();
+}
+
void ThriftHiveMetastoreProcessor::process_alter_partition(int32_t seqid, ::apache::thrift::protocol::TProtocol* iprot, ::apache::thrift::protocol::TProtocol* oprot)
{
ThriftHiveMetastore_alter_partition_args args;
Index: metastore/src/gen/thrift/gen-cpp/ThriftHiveMetastore.h
===================================================================
--- metastore/src/gen/thrift/gen-cpp/ThriftHiveMetastore.h (revision 1085555)
+++ metastore/src/gen/thrift/gen-cpp/ThriftHiveMetastore.h (working copy)
@@ -48,6 +48,7 @@
virtual void get_partitions_ps_with_auth(std::vector & _return, const std::string& db_name, const std::string& tbl_name, const std::vector & part_vals, const int16_t max_parts, const std::string& user_name, const std::vector & group_names) = 0;
virtual void get_partition_names_ps(std::vector & _return, const std::string& db_name, const std::string& tbl_name, const std::vector & part_vals, const int16_t max_parts) = 0;
virtual void get_partitions_by_filter(std::vector & _return, const std::string& db_name, const std::string& tbl_name, const std::string& filter, const int16_t max_parts) = 0;
+ virtual void get_partitions_by_names(std::vector & _return, const std::string& db_name, const std::string& tbl_name, const std::vector & names) = 0;
virtual void alter_partition(const std::string& db_name, const std::string& tbl_name, const Partition& new_part) = 0;
virtual void get_config_value(std::string& _return, const std::string& name, const std::string& defaultValue) = 0;
virtual void partition_name_to_vals(std::vector & _return, const std::string& part_name) = 0;
@@ -180,6 +181,9 @@
void get_partitions_by_filter(std::vector & /* _return */, const std::string& /* db_name */, const std::string& /* tbl_name */, const std::string& /* filter */, const int16_t /* max_parts */) {
return;
}
+ void get_partitions_by_names(std::vector & /* _return */, const std::string& /* db_name */, const std::string& /* tbl_name */, const std::vector & /* names */) {
+ return;
+ }
void alter_partition(const std::string& /* db_name */, const std::string& /* tbl_name */, const Partition& /* new_part */) {
return;
}
@@ -4174,6 +4178,128 @@
};
+typedef struct _ThriftHiveMetastore_get_partitions_by_names_args__isset {
+ _ThriftHiveMetastore_get_partitions_by_names_args__isset() : db_name(false), tbl_name(false), names(false) {}
+ bool db_name;
+ bool tbl_name;
+ bool names;
+} _ThriftHiveMetastore_get_partitions_by_names_args__isset;
+
+class ThriftHiveMetastore_get_partitions_by_names_args {
+ public:
+
+ ThriftHiveMetastore_get_partitions_by_names_args() : db_name(""), tbl_name("") {
+ }
+
+ virtual ~ThriftHiveMetastore_get_partitions_by_names_args() throw() {}
+
+ std::string db_name;
+ std::string tbl_name;
+ std::vector names;
+
+ _ThriftHiveMetastore_get_partitions_by_names_args__isset __isset;
+
+ bool operator == (const ThriftHiveMetastore_get_partitions_by_names_args & rhs) const
+ {
+ if (!(db_name == rhs.db_name))
+ return false;
+ if (!(tbl_name == rhs.tbl_name))
+ return false;
+ if (!(names == rhs.names))
+ return false;
+ return true;
+ }
+ bool operator != (const ThriftHiveMetastore_get_partitions_by_names_args &rhs) const {
+ return !(*this == rhs);
+ }
+
+ bool operator < (const ThriftHiveMetastore_get_partitions_by_names_args & ) const;
+
+ uint32_t read(::apache::thrift::protocol::TProtocol* iprot);
+ uint32_t write(::apache::thrift::protocol::TProtocol* oprot) const;
+
+};
+
+
+class ThriftHiveMetastore_get_partitions_by_names_pargs {
+ public:
+
+
+ virtual ~ThriftHiveMetastore_get_partitions_by_names_pargs() throw() {}
+
+ const std::string* db_name;
+ const std::string* tbl_name;
+ const std::vector * names;
+
+ uint32_t write(::apache::thrift::protocol::TProtocol* oprot) const;
+
+};
+
+typedef struct _ThriftHiveMetastore_get_partitions_by_names_result__isset {
+ _ThriftHiveMetastore_get_partitions_by_names_result__isset() : success(false), o1(false), o2(false) {}
+ bool success;
+ bool o1;
+ bool o2;
+} _ThriftHiveMetastore_get_partitions_by_names_result__isset;
+
+class ThriftHiveMetastore_get_partitions_by_names_result {
+ public:
+
+ ThriftHiveMetastore_get_partitions_by_names_result() {
+ }
+
+ virtual ~ThriftHiveMetastore_get_partitions_by_names_result() throw() {}
+
+ std::vector success;
+ MetaException o1;
+ NoSuchObjectException o2;
+
+ _ThriftHiveMetastore_get_partitions_by_names_result__isset __isset;
+
+ bool operator == (const ThriftHiveMetastore_get_partitions_by_names_result & rhs) const
+ {
+ if (!(success == rhs.success))
+ return false;
+ if (!(o1 == rhs.o1))
+ return false;
+ if (!(o2 == rhs.o2))
+ return false;
+ return true;
+ }
+ bool operator != (const ThriftHiveMetastore_get_partitions_by_names_result &rhs) const {
+ return !(*this == rhs);
+ }
+
+ bool operator < (const ThriftHiveMetastore_get_partitions_by_names_result & ) const;
+
+ uint32_t read(::apache::thrift::protocol::TProtocol* iprot);
+ uint32_t write(::apache::thrift::protocol::TProtocol* oprot) const;
+
+};
+
+typedef struct _ThriftHiveMetastore_get_partitions_by_names_presult__isset {
+ _ThriftHiveMetastore_get_partitions_by_names_presult__isset() : success(false), o1(false), o2(false) {}
+ bool success;
+ bool o1;
+ bool o2;
+} _ThriftHiveMetastore_get_partitions_by_names_presult__isset;
+
+class ThriftHiveMetastore_get_partitions_by_names_presult {
+ public:
+
+
+ virtual ~ThriftHiveMetastore_get_partitions_by_names_presult() throw() {}
+
+ std::vector * success;
+ MetaException o1;
+ NoSuchObjectException o2;
+
+ _ThriftHiveMetastore_get_partitions_by_names_presult__isset __isset;
+
+ uint32_t read(::apache::thrift::protocol::TProtocol* iprot);
+
+};
+
typedef struct _ThriftHiveMetastore_alter_partition_args__isset {
_ThriftHiveMetastore_alter_partition_args__isset() : db_name(false), tbl_name(false), new_part(false) {}
bool db_name;
@@ -6988,6 +7114,9 @@
void get_partitions_by_filter(std::vector & _return, const std::string& db_name, const std::string& tbl_name, const std::string& filter, const int16_t max_parts);
void send_get_partitions_by_filter(const std::string& db_name, const std::string& tbl_name, const std::string& filter, const int16_t max_parts);
void recv_get_partitions_by_filter(std::vector & _return);
+ void get_partitions_by_names(std::vector & _return, const std::string& db_name, const std::string& tbl_name, const std::vector & names);
+ void send_get_partitions_by_names(const std::string& db_name, const std::string& tbl_name, const std::vector & names);
+ void recv_get_partitions_by_names(std::vector & _return);
void alter_partition(const std::string& db_name, const std::string& tbl_name, const Partition& new_part);
void send_alter_partition(const std::string& db_name, const std::string& tbl_name, const Partition& new_part);
void recv_alter_partition();
@@ -7101,6 +7230,7 @@
void process_get_partitions_ps_with_auth(int32_t seqid, ::apache::thrift::protocol::TProtocol* iprot, ::apache::thrift::protocol::TProtocol* oprot);
void process_get_partition_names_ps(int32_t seqid, ::apache::thrift::protocol::TProtocol* iprot, ::apache::thrift::protocol::TProtocol* oprot);
void process_get_partitions_by_filter(int32_t seqid, ::apache::thrift::protocol::TProtocol* iprot, ::apache::thrift::protocol::TProtocol* oprot);
+ void process_get_partitions_by_names(int32_t seqid, ::apache::thrift::protocol::TProtocol* iprot, ::apache::thrift::protocol::TProtocol* oprot);
void process_alter_partition(int32_t seqid, ::apache::thrift::protocol::TProtocol* iprot, ::apache::thrift::protocol::TProtocol* oprot);
void process_get_config_value(int32_t seqid, ::apache::thrift::protocol::TProtocol* iprot, ::apache::thrift::protocol::TProtocol* oprot);
void process_partition_name_to_vals(int32_t seqid, ::apache::thrift::protocol::TProtocol* iprot, ::apache::thrift::protocol::TProtocol* oprot);
@@ -7162,6 +7292,7 @@
processMap_["get_partitions_ps_with_auth"] = &ThriftHiveMetastoreProcessor::process_get_partitions_ps_with_auth;
processMap_["get_partition_names_ps"] = &ThriftHiveMetastoreProcessor::process_get_partition_names_ps;
processMap_["get_partitions_by_filter"] = &ThriftHiveMetastoreProcessor::process_get_partitions_by_filter;
+ processMap_["get_partitions_by_names"] = &ThriftHiveMetastoreProcessor::process_get_partitions_by_names;
processMap_["alter_partition"] = &ThriftHiveMetastoreProcessor::process_alter_partition;
processMap_["get_config_value"] = &ThriftHiveMetastoreProcessor::process_get_config_value;
processMap_["partition_name_to_vals"] = &ThriftHiveMetastoreProcessor::process_partition_name_to_vals;
@@ -7571,6 +7702,18 @@
}
}
+ void get_partitions_by_names(std::vector & _return, const std::string& db_name, const std::string& tbl_name, const std::vector & names) {
+ uint32_t sz = ifaces_.size();
+ for (uint32_t i = 0; i < sz; ++i) {
+ if (i == sz - 1) {
+ ifaces_[i]->get_partitions_by_names(_return, db_name, tbl_name, names);
+ return;
+ } else {
+ ifaces_[i]->get_partitions_by_names(_return, db_name, tbl_name, names);
+ }
+ }
+ }
+
void alter_partition(const std::string& db_name, const std::string& tbl_name, const Partition& new_part) {
uint32_t sz = ifaces_.size();
for (uint32_t i = 0; i < sz; ++i) {
Index: metastore/src/gen/thrift/gen-cpp/ThriftHiveMetastore_server.skeleton.cpp
===================================================================
--- metastore/src/gen/thrift/gen-cpp/ThriftHiveMetastore_server.skeleton.cpp (revision 1085555)
+++ metastore/src/gen/thrift/gen-cpp/ThriftHiveMetastore_server.skeleton.cpp (working copy)
@@ -187,6 +187,11 @@
printf("get_partitions_by_filter\n");
}
+ void get_partitions_by_names(std::vector & _return, const std::string& db_name, const std::string& tbl_name, const std::vector & names) {
+ // Your implementation goes here
+ printf("get_partitions_by_names\n");
+ }
+
void alter_partition(const std::string& db_name, const std::string& tbl_name, const Partition& new_part) {
// Your implementation goes here
printf("alter_partition\n");
Index: metastore/src/gen/thrift/gen-rb/thrift_hive_metastore.rb
===================================================================
--- metastore/src/gen/thrift/gen-rb/thrift_hive_metastore.rb (revision 1085555)
+++ metastore/src/gen/thrift/gen-rb/thrift_hive_metastore.rb (working copy)
@@ -569,6 +569,23 @@
raise ::Thrift::ApplicationException.new(::Thrift::ApplicationException::MISSING_RESULT, 'get_partitions_by_filter failed: unknown result')
end
+ def get_partitions_by_names(db_name, tbl_name, names)
+ send_get_partitions_by_names(db_name, tbl_name, names)
+ return recv_get_partitions_by_names()
+ end
+
+ def send_get_partitions_by_names(db_name, tbl_name, names)
+ send_message('get_partitions_by_names', Get_partitions_by_names_args, :db_name => db_name, :tbl_name => tbl_name, :names => names)
+ end
+
+ def recv_get_partitions_by_names()
+ result = receive_message(Get_partitions_by_names_result)
+ return result.success unless result.success.nil?
+ raise result.o1 unless result.o1.nil?
+ raise result.o2 unless result.o2.nil?
+ raise ::Thrift::ApplicationException.new(::Thrift::ApplicationException::MISSING_RESULT, 'get_partitions_by_names failed: unknown result')
+ end
+
def alter_partition(db_name, tbl_name, new_part)
send_alter_partition(db_name, tbl_name, new_part)
recv_alter_partition()
@@ -1395,6 +1412,19 @@
write_result(result, oprot, 'get_partitions_by_filter', seqid)
end
+ def process_get_partitions_by_names(seqid, iprot, oprot)
+ args = read_args(iprot, Get_partitions_by_names_args)
+ result = Get_partitions_by_names_result.new()
+ begin
+ result.success = @handler.get_partitions_by_names(args.db_name, args.tbl_name, args.names)
+ rescue MetaException => o1
+ result.o1 = o1
+ rescue NoSuchObjectException => o2
+ result.o2 = o2
+ end
+ write_result(result, oprot, 'get_partitions_by_names', seqid)
+ end
+
def process_alter_partition(seqid, iprot, oprot)
args = read_args(iprot, Alter_partition_args)
result = Alter_partition_result.new()
@@ -2956,6 +2986,46 @@
::Thrift::Struct.generate_accessors self
end
+ class Get_partitions_by_names_args
+ include ::Thrift::Struct, ::Thrift::Struct_Union
+ DB_NAME = 1
+ TBL_NAME = 2
+ NAMES = 3
+
+ FIELDS = {
+ DB_NAME => {:type => ::Thrift::Types::STRING, :name => 'db_name'},
+ TBL_NAME => {:type => ::Thrift::Types::STRING, :name => 'tbl_name'},
+ NAMES => {:type => ::Thrift::Types::LIST, :name => 'names', :element => {:type => ::Thrift::Types::STRING}}
+ }
+
+ def struct_fields; FIELDS; end
+
+ def validate
+ end
+
+ ::Thrift::Struct.generate_accessors self
+ end
+
+ class Get_partitions_by_names_result
+ include ::Thrift::Struct, ::Thrift::Struct_Union
+ SUCCESS = 0
+ O1 = 1
+ O2 = 2
+
+ FIELDS = {
+ SUCCESS => {:type => ::Thrift::Types::LIST, :name => 'success', :element => {:type => ::Thrift::Types::STRUCT, :class => Partition}},
+ O1 => {:type => ::Thrift::Types::STRUCT, :name => 'o1', :class => MetaException},
+ O2 => {:type => ::Thrift::Types::STRUCT, :name => 'o2', :class => NoSuchObjectException}
+ }
+
+ def struct_fields; FIELDS; end
+
+ def validate
+ end
+
+ ::Thrift::Struct.generate_accessors self
+ end
+
class Alter_partition_args
include ::Thrift::Struct, ::Thrift::Struct_Union
DB_NAME = 1
Index: metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ThriftHiveMetastore.java
===================================================================
--- metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ThriftHiveMetastore.java (revision 1085555)
+++ metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ThriftHiveMetastore.java (working copy)
@@ -99,6 +99,8 @@
public List get_partitions_by_filter(String db_name, String tbl_name, String filter, short max_parts) throws MetaException, NoSuchObjectException, TException;
+ public List get_partitions_by_names(String db_name, String tbl_name, List names) throws MetaException, NoSuchObjectException, TException;
+
public void alter_partition(String db_name, String tbl_name, Partition new_part) throws InvalidOperationException, MetaException, TException;
public String get_config_value(String name, String defaultValue) throws ConfigValSecurityException, TException;
@@ -217,6 +219,8 @@
public void get_partitions_by_filter(String db_name, String tbl_name, String filter, short max_parts, AsyncMethodCallback resultHandler) throws TException;
+ public void get_partitions_by_names(String db_name, String tbl_name, List names, AsyncMethodCallback resultHandler) throws TException;
+
public void alter_partition(String db_name, String tbl_name, Partition new_part, AsyncMethodCallback resultHandler) throws TException;
public void get_config_value(String name, String defaultValue, AsyncMethodCallback resultHandler) throws TException;
@@ -1711,6 +1715,50 @@
throw new TApplicationException(TApplicationException.MISSING_RESULT, "get_partitions_by_filter failed: unknown result");
}
+ public List get_partitions_by_names(String db_name, String tbl_name, List names) throws MetaException, NoSuchObjectException, TException
+ {
+ send_get_partitions_by_names(db_name, tbl_name, names);
+ return recv_get_partitions_by_names();
+ }
+
+ public void send_get_partitions_by_names(String db_name, String tbl_name, List names) throws TException
+ {
+ oprot_.writeMessageBegin(new TMessage("get_partitions_by_names", TMessageType.CALL, ++seqid_));
+ get_partitions_by_names_args args = new get_partitions_by_names_args();
+ args.setDb_name(db_name);
+ args.setTbl_name(tbl_name);
+ args.setNames(names);
+ args.write(oprot_);
+ oprot_.writeMessageEnd();
+ oprot_.getTransport().flush();
+ }
+
+ public List recv_get_partitions_by_names() throws MetaException, NoSuchObjectException, TException
+ {
+ TMessage msg = iprot_.readMessageBegin();
+ if (msg.type == TMessageType.EXCEPTION) {
+ TApplicationException x = TApplicationException.read(iprot_);
+ iprot_.readMessageEnd();
+ throw x;
+ }
+ if (msg.seqid != seqid_) {
+ throw new TApplicationException(TApplicationException.BAD_SEQUENCE_ID, "get_partitions_by_names failed: out of sequence response");
+ }
+ get_partitions_by_names_result result = new get_partitions_by_names_result();
+ result.read(iprot_);
+ iprot_.readMessageEnd();
+ if (result.isSetSuccess()) {
+ return result.success;
+ }
+ if (result.o1 != null) {
+ throw result.o1;
+ }
+ if (result.o2 != null) {
+ throw result.o2;
+ }
+ throw new TApplicationException(TApplicationException.MISSING_RESULT, "get_partitions_by_names failed: unknown result");
+ }
+
public void alter_partition(String db_name, String tbl_name, Partition new_part) throws InvalidOperationException, MetaException, TException
{
send_alter_partition(db_name, tbl_name, new_part);
@@ -3875,6 +3923,43 @@
}
}
+ public void get_partitions_by_names(String db_name, String tbl_name, List names, AsyncMethodCallback resultHandler) throws TException {
+ checkReady();
+ get_partitions_by_names_call method_call = new get_partitions_by_names_call(db_name, tbl_name, names, resultHandler, this, protocolFactory, transport);
+ manager.call(method_call);
+ }
+
+ public static class get_partitions_by_names_call extends TAsyncMethodCall {
+ private String db_name;
+ private String tbl_name;
+ private List names;
+ public get_partitions_by_names_call(String db_name, String tbl_name, List names, AsyncMethodCallback resultHandler, TAsyncClient client, TProtocolFactory protocolFactory, TNonblockingTransport transport) throws TException {
+ super(client, protocolFactory, transport, resultHandler, false);
+ this.db_name = db_name;
+ this.tbl_name = tbl_name;
+ this.names = names;
+ }
+
+ public void write_args(TProtocol prot) throws TException {
+ prot.writeMessageBegin(new TMessage("get_partitions_by_names", TMessageType.CALL, 0));
+ get_partitions_by_names_args args = new get_partitions_by_names_args();
+ args.setDb_name(db_name);
+ args.setTbl_name(tbl_name);
+ args.setNames(names);
+ args.write(prot);
+ prot.writeMessageEnd();
+ }
+
+ public List getResult() throws MetaException, NoSuchObjectException, TException {
+ if (getState() != State.RESPONSE_READ) {
+ throw new IllegalStateException("Method call not finished!");
+ }
+ TMemoryInputTransport memoryTransport = new TMemoryInputTransport(getFrameBuffer().array());
+ TProtocol prot = client.getProtocolFactory().getProtocol(memoryTransport);
+ return (new Client(prot)).recv_get_partitions_by_names();
+ }
+ }
+
public void alter_partition(String db_name, String tbl_name, Partition new_part, AsyncMethodCallback resultHandler) throws TException {
checkReady();
alter_partition_call method_call = new alter_partition_call(db_name, tbl_name, new_part, resultHandler, this, protocolFactory, transport);
@@ -4744,6 +4829,7 @@
processMap_.put("get_partitions_ps_with_auth", new get_partitions_ps_with_auth());
processMap_.put("get_partition_names_ps", new get_partition_names_ps());
processMap_.put("get_partitions_by_filter", new get_partitions_by_filter());
+ processMap_.put("get_partitions_by_names", new get_partitions_by_names());
processMap_.put("alter_partition", new alter_partition());
processMap_.put("get_config_value", new get_config_value());
processMap_.put("partition_name_to_vals", new partition_name_to_vals());
@@ -6118,6 +6204,46 @@
}
+ private class get_partitions_by_names implements ProcessFunction {
+ public void process(int seqid, TProtocol iprot, TProtocol oprot) throws TException
+ {
+ get_partitions_by_names_args args = new get_partitions_by_names_args();
+ try {
+ args.read(iprot);
+ } catch (TProtocolException e) {
+ iprot.readMessageEnd();
+ TApplicationException x = new TApplicationException(TApplicationException.PROTOCOL_ERROR, e.getMessage());
+ oprot.writeMessageBegin(new TMessage("get_partitions_by_names", TMessageType.EXCEPTION, seqid));
+ x.write(oprot);
+ oprot.writeMessageEnd();
+ oprot.getTransport().flush();
+ return;
+ }
+ iprot.readMessageEnd();
+ get_partitions_by_names_result result = new get_partitions_by_names_result();
+ try {
+ result.success = iface_.get_partitions_by_names(args.db_name, args.tbl_name, args.names);
+ } catch (MetaException o1) {
+ result.o1 = o1;
+ } catch (NoSuchObjectException o2) {
+ result.o2 = o2;
+ } catch (Throwable th) {
+ LOGGER.error("Internal error processing get_partitions_by_names", th);
+ TApplicationException x = new TApplicationException(TApplicationException.INTERNAL_ERROR, "Internal error processing get_partitions_by_names");
+ oprot.writeMessageBegin(new TMessage("get_partitions_by_names", TMessageType.EXCEPTION, seqid));
+ x.write(oprot);
+ oprot.writeMessageEnd();
+ oprot.getTransport().flush();
+ return;
+ }
+ oprot.writeMessageBegin(new TMessage("get_partitions_by_names", TMessageType.REPLY, seqid));
+ result.write(oprot);
+ oprot.writeMessageEnd();
+ oprot.getTransport().flush();
+ }
+
+ }
+
private class alter_partition implements ProcessFunction {
public void process(int seqid, TProtocol iprot, TProtocol oprot) throws TException
{
@@ -36048,6 +36174,988 @@
}
+ public static class get_partitions_by_names_args implements TBase, java.io.Serializable, Cloneable {
+ private static final TStruct STRUCT_DESC = new TStruct("get_partitions_by_names_args");
+
+ private static final TField DB_NAME_FIELD_DESC = new TField("db_name", TType.STRING, (short)1);
+ private static final TField TBL_NAME_FIELD_DESC = new TField("tbl_name", TType.STRING, (short)2);
+ private static final TField NAMES_FIELD_DESC = new TField("names", TType.LIST, (short)3);
+
+ private String db_name;
+ private String tbl_name;
+ private List names;
+
+ /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */
+ public enum _Fields implements TFieldIdEnum {
+ DB_NAME((short)1, "db_name"),
+ TBL_NAME((short)2, "tbl_name"),
+ NAMES((short)3, "names");
+
+ private static final Map byName = new HashMap();
+
+ static {
+ for (_Fields field : EnumSet.allOf(_Fields.class)) {
+ byName.put(field.getFieldName(), field);
+ }
+ }
+
+ /**
+ * Find the _Fields constant that matches fieldId, or null if its not found.
+ */
+ public static _Fields findByThriftId(int fieldId) {
+ switch(fieldId) {
+ case 1: // DB_NAME
+ return DB_NAME;
+ case 2: // TBL_NAME
+ return TBL_NAME;
+ case 3: // NAMES
+ return NAMES;
+ default:
+ return null;
+ }
+ }
+
+ /**
+ * Find the _Fields constant that matches fieldId, throwing an exception
+ * if it is not found.
+ */
+ public static _Fields findByThriftIdOrThrow(int fieldId) {
+ _Fields fields = findByThriftId(fieldId);
+ if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!");
+ return fields;
+ }
+
+ /**
+ * Find the _Fields constant that matches name, or null if its not found.
+ */
+ public static _Fields findByName(String name) {
+ return byName.get(name);
+ }
+
+ private final short _thriftId;
+ private final String _fieldName;
+
+ _Fields(short thriftId, String fieldName) {
+ _thriftId = thriftId;
+ _fieldName = fieldName;
+ }
+
+ public short getThriftFieldId() {
+ return _thriftId;
+ }
+
+ public String getFieldName() {
+ return _fieldName;
+ }
+ }
+
+ // isset id assignments
+
+ public static final Map<_Fields, FieldMetaData> metaDataMap;
+ static {
+ Map<_Fields, FieldMetaData> tmpMap = new EnumMap<_Fields, FieldMetaData>(_Fields.class);
+ tmpMap.put(_Fields.DB_NAME, new FieldMetaData("db_name", TFieldRequirementType.DEFAULT,
+ new FieldValueMetaData(TType.STRING)));
+ tmpMap.put(_Fields.TBL_NAME, new FieldMetaData("tbl_name", TFieldRequirementType.DEFAULT,
+ new FieldValueMetaData(TType.STRING)));
+ tmpMap.put(_Fields.NAMES, new FieldMetaData("names", TFieldRequirementType.DEFAULT,
+ new ListMetaData(TType.LIST,
+ new FieldValueMetaData(TType.STRING))));
+ metaDataMap = Collections.unmodifiableMap(tmpMap);
+ FieldMetaData.addStructMetaDataMap(get_partitions_by_names_args.class, metaDataMap);
+ }
+
+ public get_partitions_by_names_args() {
+ }
+
+ public get_partitions_by_names_args(
+ String db_name,
+ String tbl_name,
+ List names)
+ {
+ this();
+ this.db_name = db_name;
+ this.tbl_name = tbl_name;
+ this.names = names;
+ }
+
+ /**
+ * Performs a deep copy on other.
+ */
+ public get_partitions_by_names_args(get_partitions_by_names_args other) {
+ if (other.isSetDb_name()) {
+ this.db_name = other.db_name;
+ }
+ if (other.isSetTbl_name()) {
+ this.tbl_name = other.tbl_name;
+ }
+ if (other.isSetNames()) {
+ List __this__names = new ArrayList();
+ for (String other_element : other.names) {
+ __this__names.add(other_element);
+ }
+ this.names = __this__names;
+ }
+ }
+
+ public get_partitions_by_names_args deepCopy() {
+ return new get_partitions_by_names_args(this);
+ }
+
+ @Override
+ public void clear() {
+ this.db_name = null;
+ this.tbl_name = null;
+ this.names = null;
+ }
+
+ public String getDb_name() {
+ return this.db_name;
+ }
+
+ public void setDb_name(String db_name) {
+ this.db_name = db_name;
+ }
+
+ public void unsetDb_name() {
+ this.db_name = null;
+ }
+
+ /** Returns true if field db_name is set (has been asigned a value) and false otherwise */
+ public boolean isSetDb_name() {
+ return this.db_name != null;
+ }
+
+ public void setDb_nameIsSet(boolean value) {
+ if (!value) {
+ this.db_name = null;
+ }
+ }
+
+ public String getTbl_name() {
+ return this.tbl_name;
+ }
+
+ public void setTbl_name(String tbl_name) {
+ this.tbl_name = tbl_name;
+ }
+
+ public void unsetTbl_name() {
+ this.tbl_name = null;
+ }
+
+ /** Returns true if field tbl_name is set (has been asigned a value) and false otherwise */
+ public boolean isSetTbl_name() {
+ return this.tbl_name != null;
+ }
+
+ public void setTbl_nameIsSet(boolean value) {
+ if (!value) {
+ this.tbl_name = null;
+ }
+ }
+
+ public int getNamesSize() {
+ return (this.names == null) ? 0 : this.names.size();
+ }
+
+ public java.util.Iterator getNamesIterator() {
+ return (this.names == null) ? null : this.names.iterator();
+ }
+
+ public void addToNames(String elem) {
+ if (this.names == null) {
+ this.names = new ArrayList();
+ }
+ this.names.add(elem);
+ }
+
+ public List getNames() {
+ return this.names;
+ }
+
+ public void setNames(List names) {
+ this.names = names;
+ }
+
+ public void unsetNames() {
+ this.names = null;
+ }
+
+ /** Returns true if field names is set (has been asigned a value) and false otherwise */
+ public boolean isSetNames() {
+ return this.names != null;
+ }
+
+ public void setNamesIsSet(boolean value) {
+ if (!value) {
+ this.names = null;
+ }
+ }
+
+ public void setFieldValue(_Fields field, Object value) {
+ switch (field) {
+ case DB_NAME:
+ if (value == null) {
+ unsetDb_name();
+ } else {
+ setDb_name((String)value);
+ }
+ break;
+
+ case TBL_NAME:
+ if (value == null) {
+ unsetTbl_name();
+ } else {
+ setTbl_name((String)value);
+ }
+ break;
+
+ case NAMES:
+ if (value == null) {
+ unsetNames();
+ } else {
+ setNames((List)value);
+ }
+ break;
+
+ }
+ }
+
+ public Object getFieldValue(_Fields field) {
+ switch (field) {
+ case DB_NAME:
+ return getDb_name();
+
+ case TBL_NAME:
+ return getTbl_name();
+
+ case NAMES:
+ return getNames();
+
+ }
+ throw new IllegalStateException();
+ }
+
+ /** Returns true if field corresponding to fieldID is set (has been asigned a value) and false otherwise */
+ public boolean isSet(_Fields field) {
+ if (field == null) {
+ throw new IllegalArgumentException();
+ }
+
+ switch (field) {
+ case DB_NAME:
+ return isSetDb_name();
+ case TBL_NAME:
+ return isSetTbl_name();
+ case NAMES:
+ return isSetNames();
+ }
+ throw new IllegalStateException();
+ }
+
+ @Override
+ public boolean equals(Object that) {
+ if (that == null)
+ return false;
+ if (that instanceof get_partitions_by_names_args)
+ return this.equals((get_partitions_by_names_args)that);
+ return false;
+ }
+
+ public boolean equals(get_partitions_by_names_args that) {
+ if (that == null)
+ return false;
+
+ boolean this_present_db_name = true && this.isSetDb_name();
+ boolean that_present_db_name = true && that.isSetDb_name();
+ if (this_present_db_name || that_present_db_name) {
+ if (!(this_present_db_name && that_present_db_name))
+ return false;
+ if (!this.db_name.equals(that.db_name))
+ return false;
+ }
+
+ boolean this_present_tbl_name = true && this.isSetTbl_name();
+ boolean that_present_tbl_name = true && that.isSetTbl_name();
+ if (this_present_tbl_name || that_present_tbl_name) {
+ if (!(this_present_tbl_name && that_present_tbl_name))
+ return false;
+ if (!this.tbl_name.equals(that.tbl_name))
+ return false;
+ }
+
+ boolean this_present_names = true && this.isSetNames();
+ boolean that_present_names = true && that.isSetNames();
+ if (this_present_names || that_present_names) {
+ if (!(this_present_names && that_present_names))
+ return false;
+ if (!this.names.equals(that.names))
+ return false;
+ }
+
+ return true;
+ }
+
+ @Override
+ public int hashCode() {
+ return 0;
+ }
+
+ public int compareTo(get_partitions_by_names_args other) {
+ if (!getClass().equals(other.getClass())) {
+ return getClass().getName().compareTo(other.getClass().getName());
+ }
+
+ int lastComparison = 0;
+ get_partitions_by_names_args typedOther = (get_partitions_by_names_args)other;
+
+ lastComparison = Boolean.valueOf(isSetDb_name()).compareTo(typedOther.isSetDb_name());
+ if (lastComparison != 0) {
+ return lastComparison;
+ }
+ if (isSetDb_name()) {
+ lastComparison = TBaseHelper.compareTo(this.db_name, typedOther.db_name);
+ if (lastComparison != 0) {
+ return lastComparison;
+ }
+ }
+ lastComparison = Boolean.valueOf(isSetTbl_name()).compareTo(typedOther.isSetTbl_name());
+ if (lastComparison != 0) {
+ return lastComparison;
+ }
+ if (isSetTbl_name()) {
+ lastComparison = TBaseHelper.compareTo(this.tbl_name, typedOther.tbl_name);
+ if (lastComparison != 0) {
+ return lastComparison;
+ }
+ }
+ lastComparison = Boolean.valueOf(isSetNames()).compareTo(typedOther.isSetNames());
+ if (lastComparison != 0) {
+ return lastComparison;
+ }
+ if (isSetNames()) {
+ lastComparison = TBaseHelper.compareTo(this.names, typedOther.names);
+ if (lastComparison != 0) {
+ return lastComparison;
+ }
+ }
+ return 0;
+ }
+
+ public _Fields fieldForId(int fieldId) {
+ return _Fields.findByThriftId(fieldId);
+ }
+
+ public void read(TProtocol iprot) throws TException {
+ TField field;
+ iprot.readStructBegin();
+ while (true)
+ {
+ field = iprot.readFieldBegin();
+ if (field.type == TType.STOP) {
+ break;
+ }
+ switch (field.id) {
+ case 1: // DB_NAME
+ if (field.type == TType.STRING) {
+ this.db_name = iprot.readString();
+ } else {
+ TProtocolUtil.skip(iprot, field.type);
+ }
+ break;
+ case 2: // TBL_NAME
+ if (field.type == TType.STRING) {
+ this.tbl_name = iprot.readString();
+ } else {
+ TProtocolUtil.skip(iprot, field.type);
+ }
+ break;
+ case 3: // NAMES
+ if (field.type == TType.LIST) {
+ {
+ TList _list195 = iprot.readListBegin();
+ this.names = new ArrayList(_list195.size);
+ for (int _i196 = 0; _i196 < _list195.size; ++_i196)
+ {
+ String _elem197;
+ _elem197 = iprot.readString();
+ this.names.add(_elem197);
+ }
+ iprot.readListEnd();
+ }
+ } else {
+ TProtocolUtil.skip(iprot, field.type);
+ }
+ break;
+ default:
+ TProtocolUtil.skip(iprot, field.type);
+ }
+ iprot.readFieldEnd();
+ }
+ iprot.readStructEnd();
+ validate();
+ }
+
+ public void write(TProtocol oprot) throws TException {
+ validate();
+
+ oprot.writeStructBegin(STRUCT_DESC);
+ if (this.db_name != null) {
+ oprot.writeFieldBegin(DB_NAME_FIELD_DESC);
+ oprot.writeString(this.db_name);
+ oprot.writeFieldEnd();
+ }
+ if (this.tbl_name != null) {
+ oprot.writeFieldBegin(TBL_NAME_FIELD_DESC);
+ oprot.writeString(this.tbl_name);
+ oprot.writeFieldEnd();
+ }
+ if (this.names != null) {
+ oprot.writeFieldBegin(NAMES_FIELD_DESC);
+ {
+ oprot.writeListBegin(new TList(TType.STRING, this.names.size()));
+ for (String _iter198 : this.names)
+ {
+ oprot.writeString(_iter198);
+ }
+ oprot.writeListEnd();
+ }
+ oprot.writeFieldEnd();
+ }
+ oprot.writeFieldStop();
+ oprot.writeStructEnd();
+ }
+
+ @Override
+ public String toString() {
+ StringBuilder sb = new StringBuilder("get_partitions_by_names_args(");
+ boolean first = true;
+
+ sb.append("db_name:");
+ if (this.db_name == null) {
+ sb.append("null");
+ } else {
+ sb.append(this.db_name);
+ }
+ first = false;
+ if (!first) sb.append(", ");
+ sb.append("tbl_name:");
+ if (this.tbl_name == null) {
+ sb.append("null");
+ } else {
+ sb.append(this.tbl_name);
+ }
+ first = false;
+ if (!first) sb.append(", ");
+ sb.append("names:");
+ if (this.names == null) {
+ sb.append("null");
+ } else {
+ sb.append(this.names);
+ }
+ first = false;
+ sb.append(")");
+ return sb.toString();
+ }
+
+ public void validate() throws TException {
+ // check for required fields
+ }
+
+ }
+
+ public static class get_partitions_by_names_result implements TBase, java.io.Serializable, Cloneable {
+ private static final TStruct STRUCT_DESC = new TStruct("get_partitions_by_names_result");
+
+ private static final TField SUCCESS_FIELD_DESC = new TField("success", TType.LIST, (short)0);
+ private static final TField O1_FIELD_DESC = new TField("o1", TType.STRUCT, (short)1);
+ private static final TField O2_FIELD_DESC = new TField("o2", TType.STRUCT, (short)2);
+
+ private List success;
+ private MetaException o1;
+ private NoSuchObjectException o2;
+
+ /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */
+ public enum _Fields implements TFieldIdEnum {
+ SUCCESS((short)0, "success"),
+ O1((short)1, "o1"),
+ O2((short)2, "o2");
+
+ private static final Map byName = new HashMap();
+
+ static {
+ for (_Fields field : EnumSet.allOf(_Fields.class)) {
+ byName.put(field.getFieldName(), field);
+ }
+ }
+
+ /**
+ * Find the _Fields constant that matches fieldId, or null if its not found.
+ */
+ public static _Fields findByThriftId(int fieldId) {
+ switch(fieldId) {
+ case 0: // SUCCESS
+ return SUCCESS;
+ case 1: // O1
+ return O1;
+ case 2: // O2
+ return O2;
+ default:
+ return null;
+ }
+ }
+
+ /**
+ * Find the _Fields constant that matches fieldId, throwing an exception
+ * if it is not found.
+ */
+ public static _Fields findByThriftIdOrThrow(int fieldId) {
+ _Fields fields = findByThriftId(fieldId);
+ if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!");
+ return fields;
+ }
+
+ /**
+ * Find the _Fields constant that matches name, or null if its not found.
+ */
+ public static _Fields findByName(String name) {
+ return byName.get(name);
+ }
+
+ private final short _thriftId;
+ private final String _fieldName;
+
+ _Fields(short thriftId, String fieldName) {
+ _thriftId = thriftId;
+ _fieldName = fieldName;
+ }
+
+ public short getThriftFieldId() {
+ return _thriftId;
+ }
+
+ public String getFieldName() {
+ return _fieldName;
+ }
+ }
+
+ // isset id assignments
+
+ public static final Map<_Fields, FieldMetaData> metaDataMap;
+ static {
+ Map<_Fields, FieldMetaData> tmpMap = new EnumMap<_Fields, FieldMetaData>(_Fields.class);
+ tmpMap.put(_Fields.SUCCESS, new FieldMetaData("success", TFieldRequirementType.DEFAULT,
+ new ListMetaData(TType.LIST,
+ new StructMetaData(TType.STRUCT, Partition.class))));
+ tmpMap.put(_Fields.O1, new FieldMetaData("o1", TFieldRequirementType.DEFAULT,
+ new FieldValueMetaData(TType.STRUCT)));
+ tmpMap.put(_Fields.O2, new FieldMetaData("o2", TFieldRequirementType.DEFAULT,
+ new FieldValueMetaData(TType.STRUCT)));
+ metaDataMap = Collections.unmodifiableMap(tmpMap);
+ FieldMetaData.addStructMetaDataMap(get_partitions_by_names_result.class, metaDataMap);
+ }
+
+ public get_partitions_by_names_result() {
+ }
+
+ public get_partitions_by_names_result(
+ List success,
+ MetaException o1,
+ NoSuchObjectException o2)
+ {
+ this();
+ this.success = success;
+ this.o1 = o1;
+ this.o2 = o2;
+ }
+
+ /**
+ * Performs a deep copy on other.
+ */
+ public get_partitions_by_names_result(get_partitions_by_names_result other) {
+ if (other.isSetSuccess()) {
+ List __this__success = new ArrayList();
+ for (Partition other_element : other.success) {
+ __this__success.add(new Partition(other_element));
+ }
+ this.success = __this__success;
+ }
+ if (other.isSetO1()) {
+ this.o1 = new MetaException(other.o1);
+ }
+ if (other.isSetO2()) {
+ this.o2 = new NoSuchObjectException(other.o2);
+ }
+ }
+
+ public get_partitions_by_names_result deepCopy() {
+ return new get_partitions_by_names_result(this);
+ }
+
+ @Override
+ public void clear() {
+ this.success = null;
+ this.o1 = null;
+ this.o2 = null;
+ }
+
+ public int getSuccessSize() {
+ return (this.success == null) ? 0 : this.success.size();
+ }
+
+ public java.util.Iterator getSuccessIterator() {
+ return (this.success == null) ? null : this.success.iterator();
+ }
+
+ public void addToSuccess(Partition elem) {
+ if (this.success == null) {
+ this.success = new ArrayList();
+ }
+ this.success.add(elem);
+ }
+
+ public List getSuccess() {
+ return this.success;
+ }
+
+ public void setSuccess(List success) {
+ this.success = success;
+ }
+
+ public void unsetSuccess() {
+ this.success = null;
+ }
+
+ /** Returns true if field success is set (has been asigned a value) and false otherwise */
+ public boolean isSetSuccess() {
+ return this.success != null;
+ }
+
+ public void setSuccessIsSet(boolean value) {
+ if (!value) {
+ this.success = null;
+ }
+ }
+
+ public MetaException getO1() {
+ return this.o1;
+ }
+
+ public void setO1(MetaException o1) {
+ this.o1 = o1;
+ }
+
+ public void unsetO1() {
+ this.o1 = null;
+ }
+
+ /** Returns true if field o1 is set (has been asigned a value) and false otherwise */
+ public boolean isSetO1() {
+ return this.o1 != null;
+ }
+
+ public void setO1IsSet(boolean value) {
+ if (!value) {
+ this.o1 = null;
+ }
+ }
+
+ public NoSuchObjectException getO2() {
+ return this.o2;
+ }
+
+ public void setO2(NoSuchObjectException o2) {
+ this.o2 = o2;
+ }
+
+ public void unsetO2() {
+ this.o2 = null;
+ }
+
+ /** Returns true if field o2 is set (has been asigned a value) and false otherwise */
+ public boolean isSetO2() {
+ return this.o2 != null;
+ }
+
+ public void setO2IsSet(boolean value) {
+ if (!value) {
+ this.o2 = null;
+ }
+ }
+
+ public void setFieldValue(_Fields field, Object value) {
+ switch (field) {
+ case SUCCESS:
+ if (value == null) {
+ unsetSuccess();
+ } else {
+ setSuccess((List)value);
+ }
+ break;
+
+ case O1:
+ if (value == null) {
+ unsetO1();
+ } else {
+ setO1((MetaException)value);
+ }
+ break;
+
+ case O2:
+ if (value == null) {
+ unsetO2();
+ } else {
+ setO2((NoSuchObjectException)value);
+ }
+ break;
+
+ }
+ }
+
+ public Object getFieldValue(_Fields field) {
+ switch (field) {
+ case SUCCESS:
+ return getSuccess();
+
+ case O1:
+ return getO1();
+
+ case O2:
+ return getO2();
+
+ }
+ throw new IllegalStateException();
+ }
+
+ /** Returns true if field corresponding to fieldID is set (has been asigned a value) and false otherwise */
+ public boolean isSet(_Fields field) {
+ if (field == null) {
+ throw new IllegalArgumentException();
+ }
+
+ switch (field) {
+ case SUCCESS:
+ return isSetSuccess();
+ case O1:
+ return isSetO1();
+ case O2:
+ return isSetO2();
+ }
+ throw new IllegalStateException();
+ }
+
+ @Override
+ public boolean equals(Object that) {
+ if (that == null)
+ return false;
+ if (that instanceof get_partitions_by_names_result)
+ return this.equals((get_partitions_by_names_result)that);
+ return false;
+ }
+
+ public boolean equals(get_partitions_by_names_result that) {
+ if (that == null)
+ return false;
+
+ boolean this_present_success = true && this.isSetSuccess();
+ boolean that_present_success = true && that.isSetSuccess();
+ if (this_present_success || that_present_success) {
+ if (!(this_present_success && that_present_success))
+ return false;
+ if (!this.success.equals(that.success))
+ return false;
+ }
+
+ boolean this_present_o1 = true && this.isSetO1();
+ boolean that_present_o1 = true && that.isSetO1();
+ if (this_present_o1 || that_present_o1) {
+ if (!(this_present_o1 && that_present_o1))
+ return false;
+ if (!this.o1.equals(that.o1))
+ return false;
+ }
+
+ boolean this_present_o2 = true && this.isSetO2();
+ boolean that_present_o2 = true && that.isSetO2();
+ if (this_present_o2 || that_present_o2) {
+ if (!(this_present_o2 && that_present_o2))
+ return false;
+ if (!this.o2.equals(that.o2))
+ return false;
+ }
+
+ return true;
+ }
+
+ @Override
+ public int hashCode() {
+ return 0;
+ }
+
+ public int compareTo(get_partitions_by_names_result other) {
+ if (!getClass().equals(other.getClass())) {
+ return getClass().getName().compareTo(other.getClass().getName());
+ }
+
+ int lastComparison = 0;
+ get_partitions_by_names_result typedOther = (get_partitions_by_names_result)other;
+
+ lastComparison = Boolean.valueOf(isSetSuccess()).compareTo(typedOther.isSetSuccess());
+ if (lastComparison != 0) {
+ return lastComparison;
+ }
+ if (isSetSuccess()) {
+ lastComparison = TBaseHelper.compareTo(this.success, typedOther.success);
+ if (lastComparison != 0) {
+ return lastComparison;
+ }
+ }
+ lastComparison = Boolean.valueOf(isSetO1()).compareTo(typedOther.isSetO1());
+ if (lastComparison != 0) {
+ return lastComparison;
+ }
+ if (isSetO1()) {
+ lastComparison = TBaseHelper.compareTo(this.o1, typedOther.o1);
+ if (lastComparison != 0) {
+ return lastComparison;
+ }
+ }
+ lastComparison = Boolean.valueOf(isSetO2()).compareTo(typedOther.isSetO2());
+ if (lastComparison != 0) {
+ return lastComparison;
+ }
+ if (isSetO2()) {
+ lastComparison = TBaseHelper.compareTo(this.o2, typedOther.o2);
+ if (lastComparison != 0) {
+ return lastComparison;
+ }
+ }
+ return 0;
+ }
+
+ public _Fields fieldForId(int fieldId) {
+ return _Fields.findByThriftId(fieldId);
+ }
+
+ public void read(TProtocol iprot) throws TException {
+ TField field;
+ iprot.readStructBegin();
+ while (true)
+ {
+ field = iprot.readFieldBegin();
+ if (field.type == TType.STOP) {
+ break;
+ }
+ switch (field.id) {
+ case 0: // SUCCESS
+ if (field.type == TType.LIST) {
+ {
+ TList _list199 = iprot.readListBegin();
+ this.success = new ArrayList(_list199.size);
+ for (int _i200 = 0; _i200 < _list199.size; ++_i200)
+ {
+ Partition _elem201;
+ _elem201 = new Partition();
+ _elem201.read(iprot);
+ this.success.add(_elem201);
+ }
+ iprot.readListEnd();
+ }
+ } else {
+ TProtocolUtil.skip(iprot, field.type);
+ }
+ break;
+ case 1: // O1
+ if (field.type == TType.STRUCT) {
+ this.o1 = new MetaException();
+ this.o1.read(iprot);
+ } else {
+ TProtocolUtil.skip(iprot, field.type);
+ }
+ break;
+ case 2: // O2
+ if (field.type == TType.STRUCT) {
+ this.o2 = new NoSuchObjectException();
+ this.o2.read(iprot);
+ } else {
+ TProtocolUtil.skip(iprot, field.type);
+ }
+ break;
+ default:
+ TProtocolUtil.skip(iprot, field.type);
+ }
+ iprot.readFieldEnd();
+ }
+ iprot.readStructEnd();
+ validate();
+ }
+
+ public void write(TProtocol oprot) throws TException {
+ oprot.writeStructBegin(STRUCT_DESC);
+
+ if (this.isSetSuccess()) {
+ oprot.writeFieldBegin(SUCCESS_FIELD_DESC);
+ {
+ oprot.writeListBegin(new TList(TType.STRUCT, this.success.size()));
+ for (Partition _iter202 : this.success)
+ {
+ _iter202.write(oprot);
+ }
+ oprot.writeListEnd();
+ }
+ oprot.writeFieldEnd();
+ } else if (this.isSetO1()) {
+ oprot.writeFieldBegin(O1_FIELD_DESC);
+ this.o1.write(oprot);
+ oprot.writeFieldEnd();
+ } else if (this.isSetO2()) {
+ oprot.writeFieldBegin(O2_FIELD_DESC);
+ this.o2.write(oprot);
+ oprot.writeFieldEnd();
+ }
+ oprot.writeFieldStop();
+ oprot.writeStructEnd();
+ }
+
+ @Override
+ public String toString() {
+ StringBuilder sb = new StringBuilder("get_partitions_by_names_result(");
+ boolean first = true;
+
+ sb.append("success:");
+ if (this.success == null) {
+ sb.append("null");
+ } else {
+ sb.append(this.success);
+ }
+ first = false;
+ if (!first) sb.append(", ");
+ sb.append("o1:");
+ if (this.o1 == null) {
+ sb.append("null");
+ } else {
+ sb.append(this.o1);
+ }
+ first = false;
+ if (!first) sb.append(", ");
+ sb.append("o2:");
+ if (this.o2 == null) {
+ sb.append("null");
+ } else {
+ sb.append(this.o2);
+ }
+ first = false;
+ sb.append(")");
+ return sb.toString();
+ }
+
+ public void validate() throws TException {
+ // check for required fields
+ }
+
+ }
+
public static class alter_partition_args implements TBase, java.io.Serializable, Cloneable {
private static final TStruct STRUCT_DESC = new TStruct("alter_partition_args");
@@ -38196,13 +39304,13 @@
case 0: // SUCCESS
if (field.type == TType.LIST) {
{
- TList _list195 = iprot.readListBegin();
- this.success = new ArrayList(_list195.size);
- for (int _i196 = 0; _i196 < _list195.size; ++_i196)
+ TList _list203 = iprot.readListBegin();
+ this.success = new ArrayList(_list203.size);
+ for (int _i204 = 0; _i204 < _list203.size; ++_i204)
{
- String _elem197;
- _elem197 = iprot.readString();
- this.success.add(_elem197);
+ String _elem205;
+ _elem205 = iprot.readString();
+ this.success.add(_elem205);
}
iprot.readListEnd();
}
@@ -38234,9 +39342,9 @@
oprot.writeFieldBegin(SUCCESS_FIELD_DESC);
{
oprot.writeListBegin(new TList(TType.STRING, this.success.size()));
- for (String _iter198 : this.success)
+ for (String _iter206 : this.success)
{
- oprot.writeString(_iter198);
+ oprot.writeString(_iter206);
}
oprot.writeListEnd();
}
@@ -38881,15 +39989,15 @@
case 0: // SUCCESS
if (field.type == TType.MAP) {
{
- TMap _map199 = iprot.readMapBegin();
- this.success = new HashMap(2*_map199.size);
- for (int _i200 = 0; _i200 < _map199.size; ++_i200)
+ TMap _map207 = iprot.readMapBegin();
+ this.success = new HashMap(2*_map207.size);
+ for (int _i208 = 0; _i208 < _map207.size; ++_i208)
{
- String _key201;
- String _val202;
- _key201 = iprot.readString();
- _val202 = iprot.readString();
- this.success.put(_key201, _val202);
+ String _key209;
+ String _val210;
+ _key209 = iprot.readString();
+ _val210 = iprot.readString();
+ this.success.put(_key209, _val210);
}
iprot.readMapEnd();
}
@@ -38921,10 +40029,10 @@
oprot.writeFieldBegin(SUCCESS_FIELD_DESC);
{
oprot.writeMapBegin(new TMap(TType.STRING, TType.STRING, this.success.size()));
- for (Map.Entry _iter203 : this.success.entrySet())
+ for (Map.Entry _iter211 : this.success.entrySet())
{
- oprot.writeString(_iter203.getKey());
- oprot.writeString(_iter203.getValue());
+ oprot.writeString(_iter211.getKey());
+ oprot.writeString(_iter211.getValue());
}
oprot.writeMapEnd();
}
@@ -43528,14 +44636,14 @@
case 0: // SUCCESS
if (field.type == TType.LIST) {
{
- TList _list204 = iprot.readListBegin();
- this.success = new ArrayList(_list204.size);
- for (int _i205 = 0; _i205 < _list204.size; ++_i205)
+ TList _list212 = iprot.readListBegin();
+ this.success = new ArrayList(_list212.size);
+ for (int _i213 = 0; _i213 < _list212.size; ++_i213)
{
- Index _elem206;
- _elem206 = new Index();
- _elem206.read(iprot);
- this.success.add(_elem206);
+ Index _elem214;
+ _elem214 = new Index();
+ _elem214.read(iprot);
+ this.success.add(_elem214);
}
iprot.readListEnd();
}
@@ -43575,9 +44683,9 @@
oprot.writeFieldBegin(SUCCESS_FIELD_DESC);
{
oprot.writeListBegin(new TList(TType.STRUCT, this.success.size()));
- for (Index _iter207 : this.success)
+ for (Index _iter215 : this.success)
{
- _iter207.write(oprot);
+ _iter215.write(oprot);
}
oprot.writeListEnd();
}
@@ -44405,13 +45513,13 @@
case 0: // SUCCESS
if (field.type == TType.LIST) {
{
- TList _list208 = iprot.readListBegin();
- this.success = new ArrayList(_list208.size);
- for (int _i209 = 0; _i209 < _list208.size; ++_i209)
+ TList _list216 = iprot.readListBegin();
+ this.success = new ArrayList(_list216.size);
+ for (int _i217 = 0; _i217 < _list216.size; ++_i217)
{
- String _elem210;
- _elem210 = iprot.readString();
- this.success.add(_elem210);
+ String _elem218;
+ _elem218 = iprot.readString();
+ this.success.add(_elem218);
}
iprot.readListEnd();
}
@@ -44443,9 +45551,9 @@
oprot.writeFieldBegin(SUCCESS_FIELD_DESC);
{
oprot.writeListBegin(new TList(TType.STRING, this.success.size()));
- for (String _iter211 : this.success)
+ for (String _iter219 : this.success)
{
- oprot.writeString(_iter211);
+ oprot.writeString(_iter219);
}
oprot.writeListEnd();
}
@@ -46278,13 +47386,13 @@
case 0: // SUCCESS
if (field.type == TType.LIST) {
{
- TList _list212 = iprot.readListBegin();
- this.success = new ArrayList(_list212.size);
- for (int _i213 = 0; _i213 < _list212.size; ++_i213)
+ TList _list220 = iprot.readListBegin();
+ this.success = new ArrayList(_list220.size);
+ for (int _i221 = 0; _i221 < _list220.size; ++_i221)
{
- String _elem214;
- _elem214 = iprot.readString();
- this.success.add(_elem214);
+ String _elem222;
+ _elem222 = iprot.readString();
+ this.success.add(_elem222);
}
iprot.readListEnd();
}
@@ -46316,9 +47424,9 @@
oprot.writeFieldBegin(SUCCESS_FIELD_DESC);
{
oprot.writeListBegin(new TList(TType.STRING, this.success.size()));
- for (String _iter215 : this.success)
+ for (String _iter223 : this.success)
{
- oprot.writeString(_iter215);
+ oprot.writeString(_iter223);
}
oprot.writeListEnd();
}
@@ -48994,14 +50102,14 @@
case 0: // SUCCESS
if (field.type == TType.LIST) {
{
- TList _list216 = iprot.readListBegin();
- this.success = new ArrayList(_list216.size);
- for (int _i217 = 0; _i217 < _list216.size; ++_i217)
+ TList _list224 = iprot.readListBegin();
+ this.success = new ArrayList(_list224.size);
+ for (int _i225 = 0; _i225 < _list224.size; ++_i225)
{
- Role _elem218;
- _elem218 = new Role();
- _elem218.read(iprot);
- this.success.add(_elem218);
+ Role _elem226;
+ _elem226 = new Role();
+ _elem226.read(iprot);
+ this.success.add(_elem226);
}
iprot.readListEnd();
}
@@ -49033,9 +50141,9 @@
oprot.writeFieldBegin(SUCCESS_FIELD_DESC);
{
oprot.writeListBegin(new TList(TType.STRUCT, this.success.size()));
- for (Role _iter219 : this.success)
+ for (Role _iter227 : this.success)
{
- _iter219.write(oprot);
+ _iter227.write(oprot);
}
oprot.writeListEnd();
}
@@ -49480,13 +50588,13 @@
case 3: // GROUP_NAMES
if (field.type == TType.LIST) {
{
- TList _list220 = iprot.readListBegin();
- this.group_names = new ArrayList(_list220.size);
- for (int _i221 = 0; _i221 < _list220.size; ++_i221)
+ TList _list228 = iprot.readListBegin();
+ this.group_names = new ArrayList(_list228.size);
+ for (int _i229 = 0; _i229 < _list228.size; ++_i229)
{
- String _elem222;
- _elem222 = iprot.readString();
- this.group_names.add(_elem222);
+ String _elem230;
+ _elem230 = iprot.readString();
+ this.group_names.add(_elem230);
}
iprot.readListEnd();
}
@@ -49521,9 +50629,9 @@
oprot.writeFieldBegin(GROUP_NAMES_FIELD_DESC);
{
oprot.writeListBegin(new TList(TType.STRING, this.group_names.size()));
- for (String _iter223 : this.group_names)
+ for (String _iter231 : this.group_names)
{
- oprot.writeString(_iter223);
+ oprot.writeString(_iter231);
}
oprot.writeListEnd();
}
@@ -50722,14 +51830,14 @@
case 0: // SUCCESS
if (field.type == TType.LIST) {
{
- TList _list224 = iprot.readListBegin();
- this.success = new ArrayList(_list224.size);
- for (int _i225 = 0; _i225 < _list224.size; ++_i225)
+ TList _list232 = iprot.readListBegin();
+ this.success = new ArrayList(_list232.size);
+ for (int _i233 = 0; _i233 < _list232.size; ++_i233)
{
- HiveObjectPrivilege _elem226;
- _elem226 = new HiveObjectPrivilege();
- _elem226.read(iprot);
- this.success.add(_elem226);
+ HiveObjectPrivilege _elem234;
+ _elem234 = new HiveObjectPrivilege();
+ _elem234.read(iprot);
+ this.success.add(_elem234);
}
iprot.readListEnd();
}
@@ -50761,9 +51869,9 @@
oprot.writeFieldBegin(SUCCESS_FIELD_DESC);
{
oprot.writeListBegin(new TList(TType.STRUCT, this.success.size()));
- for (HiveObjectPrivilege _iter227 : this.success)
+ for (HiveObjectPrivilege _iter235 : this.success)
{
- _iter227.write(oprot);
+ _iter235.write(oprot);
}
oprot.writeListEnd();
}
Index: metastore/src/gen/thrift/gen-php/hive_metastore/ThriftHiveMetastore.php
===================================================================
--- metastore/src/gen/thrift/gen-php/hive_metastore/ThriftHiveMetastore.php (revision 1085555)
+++ metastore/src/gen/thrift/gen-php/hive_metastore/ThriftHiveMetastore.php (working copy)
@@ -43,6 +43,7 @@
public function get_partitions_ps_with_auth($db_name, $tbl_name, $part_vals, $max_parts, $user_name, $group_names);
public function get_partition_names_ps($db_name, $tbl_name, $part_vals, $max_parts);
public function get_partitions_by_filter($db_name, $tbl_name, $filter, $max_parts);
+ public function get_partitions_by_names($db_name, $tbl_name, $names);
public function alter_partition($db_name, $tbl_name, $new_part);
public function get_config_value($name, $defaultValue);
public function partition_name_to_vals($part_name);
@@ -1992,6 +1993,65 @@
throw new Exception("get_partitions_by_filter failed: unknown result");
}
+ public function get_partitions_by_names($db_name, $tbl_name, $names)
+ {
+ $this->send_get_partitions_by_names($db_name, $tbl_name, $names);
+ return $this->recv_get_partitions_by_names();
+ }
+
+ public function send_get_partitions_by_names($db_name, $tbl_name, $names)
+ {
+ $args = new metastore_ThriftHiveMetastore_get_partitions_by_names_args();
+ $args->db_name = $db_name;
+ $args->tbl_name = $tbl_name;
+ $args->names = $names;
+ $bin_accel = ($this->output_ instanceof TProtocol::$TBINARYPROTOCOLACCELERATED) && function_exists('thrift_protocol_write_binary');
+ if ($bin_accel)
+ {
+ thrift_protocol_write_binary($this->output_, 'get_partitions_by_names', TMessageType::CALL, $args, $this->seqid_, $this->output_->isStrictWrite());
+ }
+ else
+ {
+ $this->output_->writeMessageBegin('get_partitions_by_names', TMessageType::CALL, $this->seqid_);
+ $args->write($this->output_);
+ $this->output_->writeMessageEnd();
+ $this->output_->getTransport()->flush();
+ }
+ }
+
+ public function recv_get_partitions_by_names()
+ {
+ $bin_accel = ($this->input_ instanceof TProtocol::$TBINARYPROTOCOLACCELERATED) && function_exists('thrift_protocol_read_binary');
+ if ($bin_accel) $result = thrift_protocol_read_binary($this->input_, 'metastore_ThriftHiveMetastore_get_partitions_by_names_result', $this->input_->isStrictRead());
+ else
+ {
+ $rseqid = 0;
+ $fname = null;
+ $mtype = 0;
+
+ $this->input_->readMessageBegin($fname, $mtype, $rseqid);
+ if ($mtype == TMessageType::EXCEPTION) {
+ $x = new TApplicationException();
+ $x->read($this->input_);
+ $this->input_->readMessageEnd();
+ throw $x;
+ }
+ $result = new metastore_ThriftHiveMetastore_get_partitions_by_names_result();
+ $result->read($this->input_);
+ $this->input_->readMessageEnd();
+ }
+ if ($result->success !== null) {
+ return $result->success;
+ }
+ if ($result->o1 !== null) {
+ throw $result->o1;
+ }
+ if ($result->o2 !== null) {
+ throw $result->o2;
+ }
+ throw new Exception("get_partitions_by_names failed: unknown result");
+ }
+
public function alter_partition($db_name, $tbl_name, $new_part)
{
$this->send_alter_partition($db_name, $tbl_name, $new_part);
@@ -11161,6 +11221,288 @@
}
+class metastore_ThriftHiveMetastore_get_partitions_by_names_args {
+ static $_TSPEC;
+
+ public $db_name = null;
+ public $tbl_name = null;
+ public $names = null;
+
+ public function __construct($vals=null) {
+ if (!isset(self::$_TSPEC)) {
+ self::$_TSPEC = array(
+ 1 => array(
+ 'var' => 'db_name',
+ 'type' => TType::STRING,
+ ),
+ 2 => array(
+ 'var' => 'tbl_name',
+ 'type' => TType::STRING,
+ ),
+ 3 => array(
+ 'var' => 'names',
+ 'type' => TType::LST,
+ 'etype' => TType::STRING,
+ 'elem' => array(
+ 'type' => TType::STRING,
+ ),
+ ),
+ );
+ }
+ if (is_array($vals)) {
+ if (isset($vals['db_name'])) {
+ $this->db_name = $vals['db_name'];
+ }
+ if (isset($vals['tbl_name'])) {
+ $this->tbl_name = $vals['tbl_name'];
+ }
+ if (isset($vals['names'])) {
+ $this->names = $vals['names'];
+ }
+ }
+ }
+
+ public function getName() {
+ return 'ThriftHiveMetastore_get_partitions_by_names_args';
+ }
+
+ public function read($input)
+ {
+ $xfer = 0;
+ $fname = null;
+ $ftype = 0;
+ $fid = 0;
+ $xfer += $input->readStructBegin($fname);
+ while (true)
+ {
+ $xfer += $input->readFieldBegin($fname, $ftype, $fid);
+ if ($ftype == TType::STOP) {
+ break;
+ }
+ switch ($fid)
+ {
+ case 1:
+ if ($ftype == TType::STRING) {
+ $xfer += $input->readString($this->db_name);
+ } else {
+ $xfer += $input->skip($ftype);
+ }
+ break;
+ case 2:
+ if ($ftype == TType::STRING) {
+ $xfer += $input->readString($this->tbl_name);
+ } else {
+ $xfer += $input->skip($ftype);
+ }
+ break;
+ case 3:
+ if ($ftype == TType::LST) {
+ $this->names = array();
+ $_size344 = 0;
+ $_etype347 = 0;
+ $xfer += $input->readListBegin($_etype347, $_size344);
+ for ($_i348 = 0; $_i348 < $_size344; ++$_i348)
+ {
+ $elem349 = null;
+ $xfer += $input->readString($elem349);
+ $this->names []= $elem349;
+ }
+ $xfer += $input->readListEnd();
+ } else {
+ $xfer += $input->skip($ftype);
+ }
+ break;
+ default:
+ $xfer += $input->skip($ftype);
+ break;
+ }
+ $xfer += $input->readFieldEnd();
+ }
+ $xfer += $input->readStructEnd();
+ return $xfer;
+ }
+
+ public function write($output) {
+ $xfer = 0;
+ $xfer += $output->writeStructBegin('ThriftHiveMetastore_get_partitions_by_names_args');
+ if ($this->db_name !== null) {
+ $xfer += $output->writeFieldBegin('db_name', TType::STRING, 1);
+ $xfer += $output->writeString($this->db_name);
+ $xfer += $output->writeFieldEnd();
+ }
+ if ($this->tbl_name !== null) {
+ $xfer += $output->writeFieldBegin('tbl_name', TType::STRING, 2);
+ $xfer += $output->writeString($this->tbl_name);
+ $xfer += $output->writeFieldEnd();
+ }
+ if ($this->names !== null) {
+ if (!is_array($this->names)) {
+ throw new TProtocolException('Bad type in structure.', TProtocolException::INVALID_DATA);
+ }
+ $xfer += $output->writeFieldBegin('names', TType::LST, 3);
+ {
+ $output->writeListBegin(TType::STRING, count($this->names));
+ {
+ foreach ($this->names as $iter350)
+ {
+ $xfer += $output->writeString($iter350);
+ }
+ }
+ $output->writeListEnd();
+ }
+ $xfer += $output->writeFieldEnd();
+ }
+ $xfer += $output->writeFieldStop();
+ $xfer += $output->writeStructEnd();
+ return $xfer;
+ }
+
+}
+
+class metastore_ThriftHiveMetastore_get_partitions_by_names_result {
+ static $_TSPEC;
+
+ public $success = null;
+ public $o1 = null;
+ public $o2 = null;
+
+ public function __construct($vals=null) {
+ if (!isset(self::$_TSPEC)) {
+ self::$_TSPEC = array(
+ 0 => array(
+ 'var' => 'success',
+ 'type' => TType::LST,
+ 'etype' => TType::STRUCT,
+ 'elem' => array(
+ 'type' => TType::STRUCT,
+ 'class' => 'metastore_Partition',
+ ),
+ ),
+ 1 => array(
+ 'var' => 'o1',
+ 'type' => TType::STRUCT,
+ 'class' => 'metastore_MetaException',
+ ),
+ 2 => array(
+ 'var' => 'o2',
+ 'type' => TType::STRUCT,
+ 'class' => 'metastore_NoSuchObjectException',
+ ),
+ );
+ }
+ if (is_array($vals)) {
+ if (isset($vals['success'])) {
+ $this->success = $vals['success'];
+ }
+ if (isset($vals['o1'])) {
+ $this->o1 = $vals['o1'];
+ }
+ if (isset($vals['o2'])) {
+ $this->o2 = $vals['o2'];
+ }
+ }
+ }
+
+ public function getName() {
+ return 'ThriftHiveMetastore_get_partitions_by_names_result';
+ }
+
+ public function read($input)
+ {
+ $xfer = 0;
+ $fname = null;
+ $ftype = 0;
+ $fid = 0;
+ $xfer += $input->readStructBegin($fname);
+ while (true)
+ {
+ $xfer += $input->readFieldBegin($fname, $ftype, $fid);
+ if ($ftype == TType::STOP) {
+ break;
+ }
+ switch ($fid)
+ {
+ case 0:
+ if ($ftype == TType::LST) {
+ $this->success = array();
+ $_size351 = 0;
+ $_etype354 = 0;
+ $xfer += $input->readListBegin($_etype354, $_size351);
+ for ($_i355 = 0; $_i355 < $_size351; ++$_i355)
+ {
+ $elem356 = null;
+ $elem356 = new metastore_Partition();
+ $xfer += $elem356->read($input);
+ $this->success []= $elem356;
+ }
+ $xfer += $input->readListEnd();
+ } else {
+ $xfer += $input->skip($ftype);
+ }
+ break;
+ case 1:
+ if ($ftype == TType::STRUCT) {
+ $this->o1 = new metastore_MetaException();
+ $xfer += $this->o1->read($input);
+ } else {
+ $xfer += $input->skip($ftype);
+ }
+ break;
+ case 2:
+ if ($ftype == TType::STRUCT) {
+ $this->o2 = new metastore_NoSuchObjectException();
+ $xfer += $this->o2->read($input);
+ } else {
+ $xfer += $input->skip($ftype);
+ }
+ break;
+ default:
+ $xfer += $input->skip($ftype);
+ break;
+ }
+ $xfer += $input->readFieldEnd();
+ }
+ $xfer += $input->readStructEnd();
+ return $xfer;
+ }
+
+ public function write($output) {
+ $xfer = 0;
+ $xfer += $output->writeStructBegin('ThriftHiveMetastore_get_partitions_by_names_result');
+ if ($this->success !== null) {
+ if (!is_array($this->success)) {
+ throw new TProtocolException('Bad type in structure.', TProtocolException::INVALID_DATA);
+ }
+ $xfer += $output->writeFieldBegin('success', TType::LST, 0);
+ {
+ $output->writeListBegin(TType::STRUCT, count($this->success));
+ {
+ foreach ($this->success as $iter357)
+ {
+ $xfer += $iter357->write($output);
+ }
+ }
+ $output->writeListEnd();
+ }
+ $xfer += $output->writeFieldEnd();
+ }
+ if ($this->o1 !== null) {
+ $xfer += $output->writeFieldBegin('o1', TType::STRUCT, 1);
+ $xfer += $this->o1->write($output);
+ $xfer += $output->writeFieldEnd();
+ }
+ if ($this->o2 !== null) {
+ $xfer += $output->writeFieldBegin('o2', TType::STRUCT, 2);
+ $xfer += $this->o2->write($output);
+ $xfer += $output->writeFieldEnd();
+ }
+ $xfer += $output->writeFieldStop();
+ $xfer += $output->writeStructEnd();
+ return $xfer;
+ }
+
+}
+
class metastore_ThriftHiveMetastore_alter_partition_args {
static $_TSPEC;
@@ -11688,14 +12030,14 @@
case 0:
if ($ftype == TType::LST) {
$this->success = array();
- $_size344 = 0;
- $_etype347 = 0;
- $xfer += $input->readListBegin($_etype347, $_size344);
- for ($_i348 = 0; $_i348 < $_size344; ++$_i348)
+ $_size358 = 0;
+ $_etype361 = 0;
+ $xfer += $input->readListBegin($_etype361, $_size358);
+ for ($_i362 = 0; $_i362 < $_size358; ++$_i362)
{
- $elem349 = null;
- $xfer += $input->readString($elem349);
- $this->success []= $elem349;
+ $elem363 = null;
+ $xfer += $input->readString($elem363);
+ $this->success []= $elem363;
}
$xfer += $input->readListEnd();
} else {
@@ -11731,9 +12073,9 @@
{
$output->writeListBegin(TType::STRING, count($this->success));
{
- foreach ($this->success as $iter350)
+ foreach ($this->success as $iter364)
{
- $xfer += $output->writeString($iter350);
+ $xfer += $output->writeString($iter364);
}
}
$output->writeListEnd();
@@ -11884,17 +12226,17 @@
case 0:
if ($ftype == TType::MAP) {
$this->success = array();
- $_size351 = 0;
- $_ktype352 = 0;
- $_vtype353 = 0;
- $xfer += $input->readMapBegin($_ktype352, $_vtype353, $_size351);
- for ($_i355 = 0; $_i355 < $_size351; ++$_i355)
+ $_size365 = 0;
+ $_ktype366 = 0;
+ $_vtype367 = 0;
+ $xfer += $input->readMapBegin($_ktype366, $_vtype367, $_size365);
+ for ($_i369 = 0; $_i369 < $_size365; ++$_i369)
{
- $key356 = '';
- $val357 = '';
- $xfer += $input->readString($key356);
- $xfer += $input->readString($val357);
- $this->success[$key356] = $val357;
+ $key370 = '';
+ $val371 = '';
+ $xfer += $input->readString($key370);
+ $xfer += $input->readString($val371);
+ $this->success[$key370] = $val371;
}
$xfer += $input->readMapEnd();
} else {
@@ -11930,10 +12272,10 @@
{
$output->writeMapBegin(TType::STRING, TType::STRING, count($this->success));
{
- foreach ($this->success as $kiter358 => $viter359)
+ foreach ($this->success as $kiter372 => $viter373)
{
- $xfer += $output->writeString($kiter358);
- $xfer += $output->writeString($viter359);
+ $xfer += $output->writeString($kiter372);
+ $xfer += $output->writeString($viter373);
}
}
$output->writeMapEnd();
@@ -13089,15 +13431,15 @@
case 0:
if ($ftype == TType::LST) {
$this->success = array();
- $_size360 = 0;
- $_etype363 = 0;
- $xfer += $input->readListBegin($_etype363, $_size360);
- for ($_i364 = 0; $_i364 < $_size360; ++$_i364)
+ $_size374 = 0;
+ $_etype377 = 0;
+ $xfer += $input->readListBegin($_etype377, $_size374);
+ for ($_i378 = 0; $_i378 < $_size374; ++$_i378)
{
- $elem365 = null;
- $elem365 = new metastore_Index();
- $xfer += $elem365->read($input);
- $this->success []= $elem365;
+ $elem379 = null;
+ $elem379 = new metastore_Index();
+ $xfer += $elem379->read($input);
+ $this->success []= $elem379;
}
$xfer += $input->readListEnd();
} else {
@@ -13141,9 +13483,9 @@
{
$output->writeListBegin(TType::STRUCT, count($this->success));
{
- foreach ($this->success as $iter366)
+ foreach ($this->success as $iter380)
{
- $xfer += $iter366->write($output);
+ $xfer += $iter380->write($output);
}
}
$output->writeListEnd();
@@ -13335,14 +13677,14 @@
case 0:
if ($ftype == TType::LST) {
$this->success = array();
- $_size367 = 0;
- $_etype370 = 0;
- $xfer += $input->readListBegin($_etype370, $_size367);
- for ($_i371 = 0; $_i371 < $_size367; ++$_i371)
+ $_size381 = 0;
+ $_etype384 = 0;
+ $xfer += $input->readListBegin($_etype384, $_size381);
+ for ($_i385 = 0; $_i385 < $_size381; ++$_i385)
{
- $elem372 = null;
- $xfer += $input->readString($elem372);
- $this->success []= $elem372;
+ $elem386 = null;
+ $xfer += $input->readString($elem386);
+ $this->success []= $elem386;
}
$xfer += $input->readListEnd();
} else {
@@ -13378,9 +13720,9 @@
{
$output->writeListBegin(TType::STRING, count($this->success));
{
- foreach ($this->success as $iter373)
+ foreach ($this->success as $iter387)
{
- $xfer += $output->writeString($iter373);
+ $xfer += $output->writeString($iter387);
}
}
$output->writeListEnd();
@@ -13842,14 +14184,14 @@
case 0:
if ($ftype == TType::LST) {
$this->success = array();
- $_size374 = 0;
- $_etype377 = 0;
- $xfer += $input->readListBegin($_etype377, $_size374);
- for ($_i378 = 0; $_i378 < $_size374; ++$_i378)
+ $_size388 = 0;
+ $_etype391 = 0;
+ $xfer += $input->readListBegin($_etype391, $_size388);
+ for ($_i392 = 0; $_i392 < $_size388; ++$_i392)
{
- $elem379 = null;
- $xfer += $input->readString($elem379);
- $this->success []= $elem379;
+ $elem393 = null;
+ $xfer += $input->readString($elem393);
+ $this->success []= $elem393;
}
$xfer += $input->readListEnd();
} else {
@@ -13885,9 +14227,9 @@
{
$output->writeListBegin(TType::STRING, count($this->success));
{
- foreach ($this->success as $iter380)
+ foreach ($this->success as $iter394)
{
- $xfer += $output->writeString($iter380);
+ $xfer += $output->writeString($iter394);
}
}
$output->writeListEnd();
@@ -14527,15 +14869,15 @@
case 0:
if ($ftype == TType::LST) {
$this->success = array();
- $_size381 = 0;
- $_etype384 = 0;
- $xfer += $input->readListBegin($_etype384, $_size381);
- for ($_i385 = 0; $_i385 < $_size381; ++$_i385)
+ $_size395 = 0;
+ $_etype398 = 0;
+ $xfer += $input->readListBegin($_etype398, $_size395);
+ for ($_i399 = 0; $_i399 < $_size395; ++$_i399)
{
- $elem386 = null;
- $elem386 = new metastore_Role();
- $xfer += $elem386->read($input);
- $this->success []= $elem386;
+ $elem400 = null;
+ $elem400 = new metastore_Role();
+ $xfer += $elem400->read($input);
+ $this->success []= $elem400;
}
$xfer += $input->readListEnd();
} else {
@@ -14571,9 +14913,9 @@
{
$output->writeListBegin(TType::STRUCT, count($this->success));
{
- foreach ($this->success as $iter387)
+ foreach ($this->success as $iter401)
{
- $xfer += $iter387->write($output);
+ $xfer += $iter401->write($output);
}
}
$output->writeListEnd();
@@ -14671,14 +15013,14 @@
case 3:
if ($ftype == TType::LST) {
$this->group_names = array();
- $_size388 = 0;
- $_etype391 = 0;
- $xfer += $input->readListBegin($_etype391, $_size388);
- for ($_i392 = 0; $_i392 < $_size388; ++$_i392)
+ $_size402 = 0;
+ $_etype405 = 0;
+ $xfer += $input->readListBegin($_etype405, $_size402);
+ for ($_i406 = 0; $_i406 < $_size402; ++$_i406)
{
- $elem393 = null;
- $xfer += $input->readString($elem393);
- $this->group_names []= $elem393;
+ $elem407 = null;
+ $xfer += $input->readString($elem407);
+ $this->group_names []= $elem407;
}
$xfer += $input->readListEnd();
} else {
@@ -14719,9 +15061,9 @@
{
$output->writeListBegin(TType::STRING, count($this->group_names));
{
- foreach ($this->group_names as $iter394)
+ foreach ($this->group_names as $iter408)
{
- $xfer += $output->writeString($iter394);
+ $xfer += $output->writeString($iter408);
}
}
$output->writeListEnd();
@@ -15008,15 +15350,15 @@
case 0:
if ($ftype == TType::LST) {
$this->success = array();
- $_size395 = 0;
- $_etype398 = 0;
- $xfer += $input->readListBegin($_etype398, $_size395);
- for ($_i399 = 0; $_i399 < $_size395; ++$_i399)
+ $_size409 = 0;
+ $_etype412 = 0;
+ $xfer += $input->readListBegin($_etype412, $_size409);
+ for ($_i413 = 0; $_i413 < $_size409; ++$_i413)
{
- $elem400 = null;
- $elem400 = new metastore_HiveObjectPrivilege();
- $xfer += $elem400->read($input);
- $this->success []= $elem400;
+ $elem414 = null;
+ $elem414 = new metastore_HiveObjectPrivilege();
+ $xfer += $elem414->read($input);
+ $this->success []= $elem414;
}
$xfer += $input->readListEnd();
} else {
@@ -15052,9 +15394,9 @@
{
$output->writeListBegin(TType::STRUCT, count($this->success));
{
- foreach ($this->success as $iter401)
+ foreach ($this->success as $iter415)
{
- $xfer += $iter401->write($output);
+ $xfer += $iter415->write($output);
}
}
$output->writeListEnd();
Index: metastore/if/hive_metastore.thrift
===================================================================
--- metastore/if/hive_metastore.thrift (revision 1085555)
+++ metastore/if/hive_metastore.thrift (working copy)
@@ -303,6 +303,10 @@
3:string filter, 4:i16 max_parts=-1)
throws(1:MetaException o1, 2:NoSuchObjectException o2)
+ // get partitions give a list of partition names
+ list get_partitions_by_names(1:string db_name 2:string tbl_name 3:list names)
+ throws(1:MetaException o1, 2:NoSuchObjectException o2)
+
// changes the partition to the new partition object. partition is identified from the part values
// in the new_part
// * See notes on DDL_TIME
Index: common/src/java/org/apache/hadoop/hive/conf/HiveConf.java
===================================================================
--- common/src/java/org/apache/hadoop/hive/conf/HiveConf.java (revision 1085555)
+++ common/src/java/org/apache/hadoop/hive/conf/HiveConf.java (working copy)
@@ -213,8 +213,9 @@
METASTORE_CACHE_LEVEL2_TYPE("datanucleus.cache.level2.type", "SOFT"),
METASTORE_IDENTIFIER_FACTORY("datanucleus.identifierFactory", "datanucleus"),
METASTORE_PLUGIN_REGISTRY_BUNDLE_CHECK("datanucleus.plugin.pluginRegistryBundleCheck", "LOG"),
-
+ METASTORE_BATCH_RETRIEVE_MAX("hive.metastore.batch.retrieve.max", 300),
+
// Default parameters for creating tables
NEWTABLEDEFAULTPARA("hive.table.parameters.default",""),
Index: ql/src/java/org/apache/hadoop/hive/ql/metadata/Partition.java
===================================================================
--- ql/src/java/org/apache/hadoop/hive/ql/metadata/Partition.java (revision 1085555)
+++ ql/src/java/org/apache/hadoop/hive/ql/metadata/Partition.java (working copy)
@@ -183,7 +183,7 @@
if (table.isView()) {
return;
}
-
+
String partName = "";
if (table.isPartitioned()) {
try {
@@ -197,6 +197,12 @@
tPartition.getSd().setLocation(partPath.toString());
}
}
+ // set default if columns are not set
+ if (tPartition.getSd().getCols() == null) {
+ if (table.getCols() != null) {
+ tPartition.getSd().setCols(table.getCols());
+ }
+ }
} catch (MetaException e) {
throw new HiveException("Invalid partition for table " + table.getTableName(),
e);
Index: ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java
===================================================================
--- ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java (revision 1085555)
+++ ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java (working copy)
@@ -1512,6 +1512,8 @@
*
* @param tbl
* object for which partition is needed. Must be partitioned.
+ * @param partialPartSpec
+ * partial partition specification (some subpartitions can be empty).
* @return list of partition objects
* @throws HiveException
*/
@@ -1527,21 +1529,58 @@
List names = getPartitionNames(tbl.getDbName(), tbl.getTableName(),
partialPartSpec, (short)-1);
- List partitions = new ArrayList();
+ List partitions = getPartitionsByNames(tbl, names);
+ return partitions;
+ }
- for (String pval: names) {
- try {
- org.apache.hadoop.hive.metastore.api.Partition tpart =
- getMSC().getPartition(tbl.getDbName(), tbl.getTableName(), pval);
- if (tpart != null) {
- Partition p = new Partition(tbl, tpart);
- partitions.add(p);
+ /**
+ * Get all partitions of the table that matches the list of given partition names.
+ *
+ * @param tbl
+ * object for which partition is needed. Must be partitioned.
+ * @param partNames
+ * list of partition names
+ * @return list of partition objects
+ * @throws HiveException
+ */
+ public List getPartitionsByNames(Table tbl, List partNames)
+ throws HiveException {
+
+ if (!tbl.isPartitioned()) {
+ throw new HiveException("Partition spec should only be supplied for a " +
+ "partitioned table");
+ }
+ List partitions = new ArrayList(partNames.size());
+
+ int batchSize = HiveConf.getIntVar(conf, HiveConf.ConfVars.METASTORE_BATCH_RETRIEVE_MAX);
+ int nParts = partNames.size();
+ int nBatches = nParts / batchSize;
+
+ try {
+ for (int i = 0; i < nBatches; ++i) {
+ List tParts =
+ getMSC().getPartitionsByNames(tbl.getDbName(), tbl.getTableName(),
+ partNames.subList(i*batchSize, (i+1)*batchSize));
+ if (tParts != null) {
+ for (org.apache.hadoop.hive.metastore.api.Partition tpart: tParts) {
+ partitions.add(new Partition(tbl, tpart));
+ }
}
- } catch (Exception e) {
- throw new HiveException(e);
}
+
+ if (nParts > nBatches * batchSize) {
+ List tParts =
+ getMSC().getPartitionsByNames(tbl.getDbName(), tbl.getTableName(),
+ partNames.subList(nBatches*batchSize, nParts));
+ if (tParts != null) {
+ for (org.apache.hadoop.hive.metastore.api.Partition tpart: tParts) {
+ partitions.add(new Partition(tbl, tpart));
+ }
+ }
+ }
+ } catch (Exception e) {
+ throw new HiveException(e);
}
-
return partitions;
}
Index: ql/src/java/org/apache/hadoop/hive/ql/optimizer/ppr/PartitionPruner.java
===================================================================
--- ql/src/java/org/apache/hadoop/hive/ql/optimizer/ppr/PartitionPruner.java (revision 1085555)
+++ ql/src/java/org/apache/hadoop/hive/ql/optimizer/ppr/PartitionPruner.java (working copy)
@@ -21,6 +21,7 @@
import java.util.ArrayList;
import java.util.LinkedHashMap;
import java.util.LinkedHashSet;
+import java.util.LinkedList;
import java.util.List;
import java.util.Map;
import java.util.Set;
@@ -29,8 +30,10 @@
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.hive.conf.HiveConf;
import org.apache.hadoop.hive.metastore.Warehouse;
+import org.apache.hadoop.hive.metastore.api.FieldSchema;
import org.apache.hadoop.hive.metastore.api.MetaException;
import org.apache.hadoop.hive.metastore.api.NoSuchObjectException;
+import org.apache.hadoop.hive.ql.exec.ExprNodeEvaluator;
import org.apache.hadoop.hive.ql.exec.FunctionRegistry;
import org.apache.hadoop.hive.ql.exec.Utilities;
import org.apache.hadoop.hive.ql.lib.DefaultGraphWalker;
@@ -57,6 +60,7 @@
import org.apache.hadoop.hive.ql.udf.generic.GenericUDF;
import org.apache.hadoop.hive.ql.udf.generic.GenericUDFOPAnd;
import org.apache.hadoop.hive.ql.udf.generic.GenericUDFOPOr;
+import org.apache.hadoop.hive.serde2.objectinspector.PrimitiveObjectInspector;
import org.apache.hadoop.hive.serde2.objectinspector.StructObjectInspector;
import org.apache.thrift.TException;
@@ -310,51 +314,62 @@
Set denied_parts, ExprNodeDesc prunerExpr, StructObjectInspector rowObjectInspector)
throws Exception {
- for (String partName : Hive.get().getPartitionNames(tab.getDbName(),
- tab.getTableName(), (short) -1)) {
+ List trueNames = new LinkedList();
+ List unknNames = new LinkedList();
+ Utilities.PerfLogBegin(LOG, "prune-listing");
+
+ List partNames = Hive.get().getPartitionNames(tab.getDbName(),
+ tab.getTableName(), (short) -1);
+
+ List pCols = tab.getPartCols();
+ List partCols = new ArrayList(pCols.size());
+ List values = new ArrayList(pCols.size());
+ Object[] objectWithPart = new Object[2];
+
+ for (FieldSchema pCol : pCols) {
+ partCols.add(pCol.getName());
+ }
+
+ Map handle = PartExprEvalUtils.prepareExpr(
+ prunerExpr, partCols, rowObjectInspector);
+
+ for (String partName : partNames) {
+
// Set all the variables here
LinkedHashMap partSpec = Warehouse
.makeSpecFromName(partName);
- LOG.trace("about to process partition " + partSpec + " for pruning ");
+ values.clear();
+ for (Map.Entry kv: partSpec.entrySet()) {
+ values.add(kv.getValue());
+ }
+ objectWithPart[1] = values;
+
// evaluate the expression tree
- if (prunerExpr != null) {
+ Boolean r = (Boolean) PartExprEvalUtils.evaluateExprOnPart(handle, objectWithPart);
- Boolean r = (Boolean) PartExprEvalUtils.evalExprWithPart(prunerExpr, partSpec,
- rowObjectInspector);
-
- if (Boolean.FALSE.equals(r)) {
- if (denied_parts.isEmpty()) {
- Partition part = Hive.get().getPartition(tab, partSpec,
- Boolean.FALSE);
- denied_parts.add(part);
- }
- LOG.trace("pruned partition: " + partSpec);
- } else {
- Partition part = Hive.get().getPartition(tab, partSpec,
- Boolean.FALSE);
- String state = "retained";
- if (Boolean.TRUE.equals(r)) {
- true_parts.add(part);
- } else {
- // r == null means prunerExpr contains null subexpression,
- // which was converted from non-partition columns
- assert (r == null);
- unkn_parts.add(part);
- state = "unknown";
- }
- if (LOG.isDebugEnabled()) {
- LOG.debug(state + " partition: " + partSpec);
- }
- }
- } else {
- // is there is no parition pruning, all of them are needed
- true_parts.add(Hive.get()
- .getPartition(tab, partSpec, Boolean.FALSE));
+ if (r == null) {
+ unknNames.add(partName);
+ LOG.debug("retained unknown partition: " + partName);
+ } else if (Boolean.TRUE.equals(r)) {
+ trueNames.add(partName);
+ LOG.debug("retained partition: " + partName);
}
}
+ Utilities.PerfLogEnd(LOG, "prune-listing");
+
+ Utilities.PerfLogBegin(LOG, "partition-retrieving");
+ if (trueNames.size() > 0) {
+ List parts = Hive.get().getPartitionsByNames(tab, trueNames);
+ true_parts.addAll(parts);
+ } else if (unknNames.size() > 0) {
+ List parts = Hive.get().getPartitionsByNames(tab, unknNames);
+ unkn_parts.addAll(parts);
+ }
+ Utilities.PerfLogEnd(LOG, "partition-retrieving");
}
+
/**
* Whether the expression contains a column node or not.
*/
Index: ql/src/java/org/apache/hadoop/hive/ql/optimizer/ppr/PartExprEvalUtils.java
===================================================================
--- ql/src/java/org/apache/hadoop/hive/ql/optimizer/ppr/PartExprEvalUtils.java (revision 1085555)
+++ ql/src/java/org/apache/hadoop/hive/ql/optimizer/ppr/PartExprEvalUtils.java (working copy)
@@ -19,7 +19,9 @@
package org.apache.hadoop.hive.ql.optimizer.ppr;
import java.util.ArrayList;
+import java.util.HashMap;
import java.util.LinkedHashMap;
+import java.util.List;
import java.util.Map;
import org.apache.hadoop.hive.ql.exec.ExprNodeEvaluator;
@@ -42,7 +44,7 @@
* @return value returned by the expression
* @throws HiveException
*/
- static public Object evalExprWithPart(ExprNodeDesc expr, LinkedHashMap partSpec,
+ static synchronized public Object evalExprWithPart(ExprNodeDesc expr, LinkedHashMap partSpec,
StructObjectInspector rowObjectInspector) throws HiveException {
Object[] rowWithPart = new Object[2];
// Create the row object
@@ -75,4 +77,45 @@
return ((PrimitiveObjectInspector) evaluateResultOI)
.getPrimitiveJavaObject(evaluateResultO);
}
+
+ static synchronized public Map prepareExpr(
+ ExprNodeDesc expr, List partNames,
+ StructObjectInspector rowObjectInspector) throws HiveException {
+
+ // Create the row object
+ List partObjectInspectors = new ArrayList();
+ for (int i = 0; i < partNames.size(); i++) {
+ partObjectInspectors.add(PrimitiveObjectInspectorFactory.javaStringObjectInspector);
+ }
+ StructObjectInspector partObjectInspector = ObjectInspectorFactory
+ .getStandardStructObjectInspector(partNames, partObjectInspectors);
+
+ List ois = new ArrayList(2);
+ ois.add(rowObjectInspector);
+ ois.add(partObjectInspector);
+ StructObjectInspector rowWithPartObjectInspector =
+ ObjectInspectorFactory.getUnionStructObjectInspector(ois);
+
+ ExprNodeEvaluator evaluator = ExprNodeEvaluatorFactory.get(expr);
+ ObjectInspector evaluateResultOI = evaluator.initialize(rowWithPartObjectInspector);
+
+ Map result =
+ new HashMap();
+ result.put((PrimitiveObjectInspector)evaluateResultOI, evaluator);
+ return result;
+ }
+
+ static synchronized public Object evaluateExprOnPart(
+ Map pair, Object[] rowWithPart)
+ throws HiveException {
+ assert(pair.size() > 0);
+ // only get the 1st entry from the map
+ Map.Entry entry = pair.entrySet().iterator().next();
+ PrimitiveObjectInspector evaluateResultOI = entry.getKey();
+ ExprNodeEvaluator evaluator = entry.getValue();
+
+ Object evaluateResultO = evaluator.evaluate(rowWithPart);
+
+ return evaluateResultOI.getPrimitiveJavaObject(evaluateResultO);
+ }
}