Index: metastore/src/test/org/apache/hadoop/hive/metastore/TestHiveMetaStore.java =================================================================== --- metastore/src/test/org/apache/hadoop/hive/metastore/TestHiveMetaStore.java (revision 1186991) +++ metastore/src/test/org/apache/hadoop/hive/metastore/TestHiveMetaStore.java (working copy) @@ -521,6 +521,136 @@ } } + public void testRenamePartition() throws Throwable { + + try { + String dbName = "compdb1"; + String tblName = "comptbl1"; + List vals = new ArrayList(2); + vals.add("2011-07-11"); + vals.add("8"); + String part_path = "/ds=2011-07-11/hr=8"; + List tmp_vals = new ArrayList(2); + tmp_vals.add("tmp_2011-07-11"); + tmp_vals.add("-8"); + String part2_path = "/ds=tmp_2011-07-11/hr=-8"; + + client.dropTable(dbName, tblName); + silentDropDatabase(dbName); + Database db = new Database(); + db.setName(dbName); + db.setDescription("Rename Partition Test database"); + client.createDatabase(db); + + ArrayList cols = new ArrayList(2); + cols.add(new FieldSchema("name", Constants.STRING_TYPE_NAME, "")); + cols.add(new FieldSchema("income", Constants.INT_TYPE_NAME, "")); + + Table tbl = new Table(); + tbl.setDbName(dbName); + tbl.setTableName(tblName); + StorageDescriptor sd = new StorageDescriptor(); + tbl.setSd(sd); + sd.setCols(cols); + sd.setCompressed(false); + sd.setNumBuckets(1); + sd.setParameters(new HashMap()); + sd.getParameters().put("test_param_1", "Use this for comments etc"); + sd.setBucketCols(new ArrayList(2)); + sd.getBucketCols().add("name"); + sd.setSerdeInfo(new SerDeInfo()); + sd.getSerdeInfo().setName(tbl.getTableName()); + sd.getSerdeInfo().setParameters(new HashMap()); + sd.getSerdeInfo().getParameters() + .put(Constants.SERIALIZATION_FORMAT, "1"); + sd.setSortCols(new ArrayList()); + + tbl.setPartitionKeys(new ArrayList(2)); + tbl.getPartitionKeys().add( + new FieldSchema("ds", Constants.STRING_TYPE_NAME, "")); + tbl.getPartitionKeys().add( + new FieldSchema("hr", Constants.INT_TYPE_NAME, "")); + + client.createTable(tbl); + + if (isThriftClient) { + // the createTable() above does not update the location in the 'tbl' + // object when the client is a thrift client and the code below relies + // on the location being present in the 'tbl' object - so get the table + // from the metastore + tbl = client.getTable(dbName, tblName); + } + + Partition part = new Partition(); + part.setDbName(dbName); + part.setTableName(tblName); + part.setValues(vals); + part.setParameters(new HashMap()); + part.setSd(tbl.getSd().deepCopy()); + part.getSd().setSerdeInfo(tbl.getSd().getSerdeInfo()); + part.getSd().setLocation(tbl.getSd().getLocation() + "/part1"); + part.getParameters().put("retention", "10"); + part.getSd().setNumBuckets(12); + part.getSd().getSerdeInfo().getParameters().put("abc", "1"); + + client.add_partition(part); + + part.setValues(tmp_vals); + client.renamePartition(dbName, tblName, vals, part); + + boolean exceptionThrown = false; + try { + Partition p = client.getPartition(dbName, tblName, vals); + } catch(Exception e) { + assertEquals("partition should not have existed", + NoSuchObjectException.class, e.getClass()); + exceptionThrown = true; + } + assertTrue("Expected NoSuchObjectException", exceptionThrown); + + Partition part3 = client.getPartition(dbName, tblName, tmp_vals); + assertEquals("couldn't rename partition", part3.getParameters().get( + "retention"), "10"); + assertEquals("couldn't rename partition", part3.getSd().getSerdeInfo() + .getParameters().get("abc"), "1"); + assertEquals("couldn't rename partition", part3.getSd().getNumBuckets(), + 12); + assertEquals("new partition sd matches", part3.getSd().getLocation(), + tbl.getSd().getLocation() + part2_path); + + part.setValues(vals); + client.renamePartition(dbName, tblName, tmp_vals, part); + + exceptionThrown = false; + try { + Partition p = client.getPartition(dbName, tblName, tmp_vals); + } catch(Exception e) { + assertEquals("partition should not have existed", + NoSuchObjectException.class, e.getClass()); + exceptionThrown = true; + } + assertTrue("Expected NoSuchObjectException", exceptionThrown); + + part3 = client.getPartition(dbName, tblName, vals); + assertEquals("couldn't rename partition", part3.getParameters().get( + "retention"), "10"); + assertEquals("couldn't rename partition", part3.getSd().getSerdeInfo() + .getParameters().get("abc"), "1"); + assertEquals("couldn't rename partition", part3.getSd().getNumBuckets(), + 12); + assertEquals("new partition sd matches", part3.getSd().getLocation(), + tbl.getSd().getLocation() + part_path); + + client.dropTable(dbName, tblName); + + client.dropDatabase(dbName); + } catch (Exception e) { + System.err.println(StringUtils.stringifyException(e)); + System.err.println("testRenamePartition() failed."); + throw e; + } + } + public void testDatabase() throws Throwable { try { // clear up any existing databases Index: metastore/src/java/org/apache/hadoop/hive/metastore/RawStore.java =================================================================== --- metastore/src/java/org/apache/hadoop/hive/metastore/RawStore.java (revision 1186991) +++ metastore/src/java/org/apache/hadoop/hive/metastore/RawStore.java (working copy) @@ -158,7 +158,7 @@ public abstract List listPartitionNamesByFilter(String db_name, String tbl_name, String filter, short max_parts) throws MetaException; - public abstract void alterPartition(String db_name, String tbl_name, + public abstract void alterPartition(String db_name, String tbl_name, List part_vals, Partition new_part) throws InvalidObjectException, MetaException; public abstract boolean addIndex(Index index) @@ -303,6 +303,6 @@ public abstract List listPartitionsPsWithAuth(String db_name, String tbl_name, List part_vals, short max_parts, String userName, List groupNames) throws MetaException, InvalidObjectException; - + public abstract long cleanupEvents(); } Index: metastore/src/java/org/apache/hadoop/hive/metastore/IMetaStoreClient.java =================================================================== --- metastore/src/java/org/apache/hadoop/hive/metastore/IMetaStoreClient.java (revision 1186991) +++ metastore/src/java/org/apache/hadoop/hive/metastore/IMetaStoreClient.java (working copy) @@ -555,6 +555,27 @@ throws InvalidOperationException, MetaException, TException; /** + * rename a partition to a new partition + * + * @param dbname + * database of the old partition + * @param name + * table name of the old partition + * @param part_vals + * values of the old partition + * @param newPart + * new partition + * @throws InvalidOperationException + * if srcFs and destFs are different + * @throws MetaException + * if error in updating metadata + * @throws TException + * if error in communicating with metastore server + */ + public void renamePartition(final String dbname, final String name, final List part_vals, final Partition newPart) + throws InvalidOperationException, MetaException, TException; + + /** * @param db * @param tableName * @throws UnknownTableException Index: metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStoreClient.java =================================================================== --- metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStoreClient.java (revision 1186991) +++ metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStoreClient.java (working copy) @@ -167,6 +167,22 @@ client.alter_table(dbname, tbl_name, new_tbl); } + /** + * @param dbname + * @param name + * @param part_vals + * @param newPart + * @throws InvalidOperationException + * @throws MetaException + * @throws TException + * @see org.apache.hadoop.hive.metastore.api.ThriftHiveMetastore.Iface#rename_partition( + * java.lang.String, java.lang.String, java.util.List, org.apache.hadoop.hive.metastore.api.Partition) + */ + public void renamePartition(final String dbname, final String name, final List part_vals, final Partition newPart) + throws InvalidOperationException, MetaException, TException { + client.alter_partition(dbname, name, part_vals, newPart); + } + private void open() throws MetaException { for (URI store : metastoreUris) { LOG.info("Trying to connect to metastore with URI " + store); @@ -778,7 +794,7 @@ public void alter_partition(String dbName, String tblName, Partition newPart) throws InvalidOperationException, MetaException, TException { - client.alter_partition(dbName, tblName, newPart); + client.alter_partition(dbName, tblName, null, newPart); } public void alterDatabase(String dbName, Database db) Index: metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStore.java =================================================================== --- metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStore.java (revision 1186991) +++ metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStore.java (working copy) @@ -40,6 +40,7 @@ import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; import org.apache.hadoop.hive.common.JavaUtils; import org.apache.hadoop.hive.common.LogUtils; @@ -1813,46 +1814,28 @@ return ret; } - private void alter_partition_core(final RawStore ms, final String db_name, - final String tbl_name, final Partition new_part) - throws InvalidOperationException, MetaException, TException { - try { - // Set DDL time to now if not specified - if (new_part.getParameters() == null || - new_part.getParameters().get(Constants.DDL_TIME) == null || - Integer.parseInt(new_part.getParameters().get(Constants.DDL_TIME)) == 0) { - new_part.putToParameters(Constants.DDL_TIME, Long.toString(System - .currentTimeMillis() / 1000)); - } - Partition oldPart = ms.getPartition(db_name, tbl_name, new_part.getValues()); - ms.alterPartition(db_name, tbl_name, new_part); - for (MetaStoreEventListener listener : listeners) { - listener.onAlterPartition(new AlterPartitionEvent(oldPart, new_part, true, this)); - } - } catch (InvalidObjectException e) { - throw new InvalidOperationException("alter is not possible"); - } catch (NoSuchObjectException e){ - //old partition does not exist - throw new InvalidOperationException("alter is not possible"); - } - } - - public void alter_partition(final String db_name, final String tbl_name, - final Partition new_part) throws InvalidOperationException, MetaException, + public void alter_partition(final String db_name, final String tbl_name, + final List part_vals, final Partition new_part) + throws InvalidOperationException, MetaException, TException { startTableFunction("alter_partition", db_name, tbl_name); - LOG.info("Partition values:" + new_part.getValues()); - + LOG.info("New partition values:" + new_part.getValues()); + if (part_vals != null && part_vals.size() > 0) { + LOG.info("Old Partition values:" + part_vals); + } + try { executeWithRetry(new Command() { @Override public Boolean run(RawStore ms) throws Exception { - alter_partition_core(ms, db_name, tbl_name, new_part); + alter_partition_core(ms, db_name, tbl_name, part_vals, new_part); return Boolean.TRUE; } }); - } catch (InvalidOperationException e) { - throw e; + } catch (InvalidObjectException e) { + throw new InvalidOperationException(e.getMessage()); + } catch (AlreadyExistsException e) { + throw new InvalidOperationException(e.getMessage()); } catch (MetaException e) { throw e; } catch (TException e) { @@ -1866,6 +1849,149 @@ return; } + private void alter_partition_core(final RawStore ms, final String dbname, final String name, final List part_vals, final Partition new_part) + throws InvalidOperationException, InvalidObjectException, AlreadyExistsException, MetaException { + boolean success = false; + + Path srcPath = null; + Path destPath = null; + FileSystem srcFs = null; + FileSystem destFs = null; + Table tbl = null; + Partition oldPart = null; + String oldPartLoc = null; + String newPartLoc = null; + // Set DDL time to now if not specified + if (new_part.getParameters() == null || + new_part.getParameters().get(Constants.DDL_TIME) == null || + Integer.parseInt(new_part.getParameters().get(Constants.DDL_TIME)) == 0) { + new_part.putToParameters(Constants.DDL_TIME, Long.toString(System + .currentTimeMillis() / 1000)); + } + //alter partition + if (part_vals == null || part_vals.size() == 0) { + try { + oldPart = ms.getPartition(dbname, name, new_part.getValues()); + ms.alterPartition(dbname, name, new_part.getValues(), new_part); + for (MetaStoreEventListener listener : listeners) { + listener.onAlterPartition(new AlterPartitionEvent(oldPart, new_part, true, this)); + } + } catch (InvalidObjectException e) { + throw new InvalidOperationException("alter is not possible"); + } catch (NoSuchObjectException e){ + //old partition does not exist + throw new InvalidOperationException("alter is not possible"); + } + return; + } + //rename partition + try { + ms.openTransaction(); + try { + oldPart = ms.getPartition(dbname, name, part_vals); + } catch (NoSuchObjectException e) { + // this means there is no existing partition + throw new InvalidObjectException( + "Unable to rename partition because old partition does not exist"); + } + Partition check_part = null; + try { + check_part = ms.getPartition(dbname, name, new_part.getValues()); + } catch(NoSuchObjectException e) { + // this means there is no existing partition + check_part = null; + } + if (check_part != null) { + throw new AlreadyExistsException("Partition already exists:" + dbname + "." + name + "." + new_part.getValues()); + } + tbl = ms.getTable(dbname, name); + if (tbl == null) { + throw new InvalidObjectException( + "Unable to rename partition because table or database do not exist"); + } + try { + destPath = new Path(wh.getTablePath(ms.getDatabase(dbname), name), Warehouse.makePartName(tbl.getPartitionKeys(), + new_part.getValues())); + } catch (NoSuchObjectException e) { + LOG.debug(e); + throw new InvalidOperationException( + "Unable to change partition or table. Database " + dbname + " does not exist" + + " Check metastore logs for detailed stack." + e.getMessage()); + } + if (destPath != null) { + newPartLoc = destPath.toString(); + oldPartLoc = oldPart.getSd().getLocation(); + + srcPath = new Path(oldPartLoc); + + LOG.info("srcPath:" + oldPartLoc); + LOG.info("descPath:" + newPartLoc); + srcFs = wh.getFs(srcPath); + destFs = wh.getFs(destPath); + // check that src and dest are on the same file system + if (srcFs != destFs) { + throw new InvalidOperationException("table new location " + destPath + + " is on a different file system than the old location " + + srcPath + ". This operation is not supported"); + } + try { + srcFs.exists(srcPath); // check that src exists and also checks + if (newPartLoc.compareTo(oldPartLoc) != 0 && destFs.exists(destPath)) { + throw new InvalidOperationException("New location for this table " + + tbl.getDbName() + "." + tbl.getTableName() + + " already exists : " + destPath); + } + } catch (IOException e) { + Warehouse.closeFs(srcFs); + Warehouse.closeFs(destFs); + throw new InvalidOperationException("Unable to access new location " + + destPath + " for partition " + tbl.getDbName() + "." + + tbl.getTableName() + " " + new_part.getValues()); + } + new_part.getSd().setLocation(newPartLoc); + ms.alterPartition(dbname, name, part_vals, new_part); + } + + success = ms.commitTransaction(); + } finally { + if (!success) { + ms.rollbackTransaction(); + } + if (success && newPartLoc.compareTo(oldPartLoc) != 0) { + //rename the data directory + try{ + if (srcFs.exists(srcPath)) { + //if destPath's parent path doesn't exist, we should mkdir it + Path destParentPath = destPath.getParent(); + if (!wh.mkdirs(destParentPath)) { + throw new IOException("Unable to create path " + destParentPath); + } + srcFs.rename(srcPath, destPath); + LOG.info("rename done!"); + } + } catch (IOException e) { + boolean revertMetaDataTransaction = false; + try { + ms.openTransaction(); + ms.alterPartition(dbname, name, new_part.getValues(), oldPart); + revertMetaDataTransaction = ms.commitTransaction(); + } catch (Exception e1) { + LOG.error("Reverting metadata opeation failed During HDFS operation failed", e1); + if (!revertMetaDataTransaction) { + ms.rollbackTransaction(); + } + } + throw new InvalidOperationException("Unable to access old location " + + srcPath + " for partition " + tbl.getDbName() + "." + + tbl.getTableName() + " " + part_vals); + } + } + for (MetaStoreEventListener listener : listeners) { + listener.onAlterPartition(new AlterPartitionEvent(oldPart, new_part, true, this)); + } + } + } + public boolean create_index(Index index_def) throws IndexAlreadyExistsException, MetaException { endFunction(startFunction("create_index")); Index: metastore/src/java/org/apache/hadoop/hive/metastore/HiveAlterHandler.java =================================================================== --- metastore/src/java/org/apache/hadoop/hive/metastore/HiveAlterHandler.java (revision 1186991) +++ metastore/src/java/org/apache/hadoop/hive/metastore/HiveAlterHandler.java (working copy) @@ -172,7 +172,7 @@ oldUri.getAuthority(), newPath); part.getSd().setLocation(newPartLocPath.toString()); - msdb.alterPartition(dbname, name, part); + msdb.alterPartition(dbname, name, part.getValues(), part); } } } Index: metastore/src/java/org/apache/hadoop/hive/metastore/ObjectStore.java =================================================================== --- metastore/src/java/org/apache/hadoop/hive/metastore/ObjectStore.java (revision 1186991) +++ metastore/src/java/org/apache/hadoop/hive/metastore/ObjectStore.java (working copy) @@ -1892,18 +1892,20 @@ } } - public void alterPartition(String dbname, String name, Partition newPart) + public void alterPartition(String dbname, String name, List part_vals, Partition newPart) throws InvalidObjectException, MetaException { boolean success = false; try { openTransaction(); name = name.toLowerCase(); dbname = dbname.toLowerCase(); - MPartition oldp = getMPartition(dbname, name, newPart.getValues()); + MPartition oldp = getMPartition(dbname, name, part_vals); MPartition newp = convertToMPart(newPart, false); if (oldp == null || newp == null) { throw new InvalidObjectException("partition does not exist."); } + oldp.setValues(newp.getValues()); + oldp.setPartitionName(newp.getPartitionName()); oldp.setParameters(newPart.getParameters()); copyMSD(newp.getSd(), oldp.getSd()); if (newp.getCreateTime() != oldp.getCreateTime()) { Index: metastore/src/gen/thrift/gen-py/hive_metastore/ThriftHiveMetastore.py =================================================================== --- metastore/src/gen/thrift/gen-py/hive_metastore/ThriftHiveMetastore.py (revision 1186991) +++ metastore/src/gen/thrift/gen-py/hive_metastore/ThriftHiveMetastore.py (working copy) @@ -331,11 +331,12 @@ """ pass - def alter_partition(self, db_name, tbl_name, new_part): + def alter_partition(self, db_name, tbl_name, part_vals, new_part): """ Parameters: - db_name - tbl_name + - part_vals - new_part """ pass @@ -1908,21 +1909,23 @@ raise result.o2 raise TApplicationException(TApplicationException.MISSING_RESULT, "get_partitions_by_names failed: unknown result"); - def alter_partition(self, db_name, tbl_name, new_part): + def alter_partition(self, db_name, tbl_name, part_vals, new_part): """ Parameters: - db_name - tbl_name + - part_vals - new_part """ - self.send_alter_partition(db_name, tbl_name, new_part) + self.send_alter_partition(db_name, tbl_name, part_vals, new_part) self.recv_alter_partition() - def send_alter_partition(self, db_name, tbl_name, new_part): + def send_alter_partition(self, db_name, tbl_name, part_vals, new_part): self._oprot.writeMessageBegin('alter_partition', TMessageType.CALL, self._seqid) args = alter_partition_args() args.db_name = db_name args.tbl_name = tbl_name + args.part_vals = part_vals args.new_part = new_part args.write(self._oprot) self._oprot.writeMessageEnd() @@ -3489,7 +3492,7 @@ iprot.readMessageEnd() result = alter_partition_result() try: - self._handler.alter_partition(args.db_name, args.tbl_name, args.new_part) + self._handler.alter_partition(args.db_name, args.tbl_name, args.part_vals, args.new_part) except InvalidOperationException, o1: result.o1 = o1 except MetaException, o2: @@ -10161,6 +10164,7 @@ Attributes: - db_name - tbl_name + - part_vals - new_part """ @@ -10168,12 +10172,14 @@ None, # 0 (1, TType.STRING, 'db_name', None, None, ), # 1 (2, TType.STRING, 'tbl_name', None, None, ), # 2 - (3, TType.STRUCT, 'new_part', (Partition, Partition.thrift_spec), None, ), # 3 + (3, TType.LIST, 'part_vals', (TType.STRING,None), None, ), # 3 + (4, TType.STRUCT, 'new_part', (Partition, Partition.thrift_spec), None, ), # 4 ) - def __init__(self, db_name=None, tbl_name=None, new_part=None,): + def __init__(self, db_name=None, tbl_name=None, part_vals=None, new_part=None,): self.db_name = db_name self.tbl_name = tbl_name + self.part_vals = part_vals self.new_part = new_part def read(self, iprot): @@ -10196,6 +10202,16 @@ else: iprot.skip(ftype) elif fid == 3: + if ftype == TType.LIST: + self.part_vals = [] + (_etype389, _size386) = iprot.readListBegin() + for _i390 in xrange(_size386): + _elem391 = iprot.readString(); + self.part_vals.append(_elem391) + iprot.readListEnd() + else: + iprot.skip(ftype) + elif fid == 4: if ftype == TType.STRUCT: self.new_part = Partition() self.new_part.read(iprot) @@ -10219,8 +10235,15 @@ oprot.writeFieldBegin('tbl_name', TType.STRING, 2) oprot.writeString(self.tbl_name) oprot.writeFieldEnd() + if self.part_vals is not None: + oprot.writeFieldBegin('part_vals', TType.LIST, 3) + oprot.writeListBegin(TType.STRING, len(self.part_vals)) + for iter392 in self.part_vals: + oprot.writeString(iter392) + oprot.writeListEnd() + oprot.writeFieldEnd() if self.new_part is not None: - oprot.writeFieldBegin('new_part', TType.STRUCT, 3) + oprot.writeFieldBegin('new_part', TType.STRUCT, 4) self.new_part.write(oprot) oprot.writeFieldEnd() oprot.writeFieldStop() @@ -10547,10 +10570,10 @@ if fid == 0: if ftype == TType.LIST: self.success = [] - (_etype389, _size386) = iprot.readListBegin() - for _i390 in xrange(_size386): - _elem391 = iprot.readString(); - self.success.append(_elem391) + (_etype396, _size393) = iprot.readListBegin() + for _i397 in xrange(_size393): + _elem398 = iprot.readString(); + self.success.append(_elem398) iprot.readListEnd() else: iprot.skip(ftype) @@ -10573,8 +10596,8 @@ if self.success is not None: oprot.writeFieldBegin('success', TType.LIST, 0) oprot.writeListBegin(TType.STRING, len(self.success)) - for iter392 in self.success: - oprot.writeString(iter392) + for iter399 in self.success: + oprot.writeString(iter399) oprot.writeListEnd() oprot.writeFieldEnd() if self.o1 is not None: @@ -10687,11 +10710,11 @@ if fid == 0: if ftype == TType.MAP: self.success = {} - (_ktype394, _vtype395, _size393 ) = iprot.readMapBegin() - for _i397 in xrange(_size393): - _key398 = iprot.readString(); - _val399 = iprot.readString(); - self.success[_key398] = _val399 + (_ktype401, _vtype402, _size400 ) = iprot.readMapBegin() + for _i404 in xrange(_size400): + _key405 = iprot.readString(); + _val406 = iprot.readString(); + self.success[_key405] = _val406 iprot.readMapEnd() else: iprot.skip(ftype) @@ -10714,9 +10737,9 @@ if self.success is not None: oprot.writeFieldBegin('success', TType.MAP, 0) oprot.writeMapBegin(TType.STRING, TType.STRING, len(self.success)) - for kiter400,viter401 in self.success.items(): - oprot.writeString(kiter400) - oprot.writeString(viter401) + for kiter407,viter408 in self.success.items(): + oprot.writeString(kiter407) + oprot.writeString(viter408) oprot.writeMapEnd() oprot.writeFieldEnd() if self.o1 is not None: @@ -10786,11 +10809,11 @@ elif fid == 3: if ftype == TType.MAP: self.part_vals = {} - (_ktype403, _vtype404, _size402 ) = iprot.readMapBegin() - for _i406 in xrange(_size402): - _key407 = iprot.readString(); - _val408 = iprot.readString(); - self.part_vals[_key407] = _val408 + (_ktype410, _vtype411, _size409 ) = iprot.readMapBegin() + for _i413 in xrange(_size409): + _key414 = iprot.readString(); + _val415 = iprot.readString(); + self.part_vals[_key414] = _val415 iprot.readMapEnd() else: iprot.skip(ftype) @@ -10820,9 +10843,9 @@ if self.part_vals is not None: oprot.writeFieldBegin('part_vals', TType.MAP, 3) oprot.writeMapBegin(TType.STRING, TType.STRING, len(self.part_vals)) - for kiter409,viter410 in self.part_vals.items(): - oprot.writeString(kiter409) - oprot.writeString(viter410) + for kiter416,viter417 in self.part_vals.items(): + oprot.writeString(kiter416) + oprot.writeString(viter417) oprot.writeMapEnd() oprot.writeFieldEnd() if self.eventType is not None: @@ -11018,11 +11041,11 @@ elif fid == 3: if ftype == TType.MAP: self.part_vals = {} - (_ktype412, _vtype413, _size411 ) = iprot.readMapBegin() - for _i415 in xrange(_size411): - _key416 = iprot.readString(); - _val417 = iprot.readString(); - self.part_vals[_key416] = _val417 + (_ktype419, _vtype420, _size418 ) = iprot.readMapBegin() + for _i422 in xrange(_size418): + _key423 = iprot.readString(); + _val424 = iprot.readString(); + self.part_vals[_key423] = _val424 iprot.readMapEnd() else: iprot.skip(ftype) @@ -11052,9 +11075,9 @@ if self.part_vals is not None: oprot.writeFieldBegin('part_vals', TType.MAP, 3) oprot.writeMapBegin(TType.STRING, TType.STRING, len(self.part_vals)) - for kiter418,viter419 in self.part_vals.items(): - oprot.writeString(kiter418) - oprot.writeString(viter419) + for kiter425,viter426 in self.part_vals.items(): + oprot.writeString(kiter425) + oprot.writeString(viter426) oprot.writeMapEnd() oprot.writeFieldEnd() if self.eventType is not None: @@ -12026,11 +12049,11 @@ if fid == 0: if ftype == TType.LIST: self.success = [] - (_etype423, _size420) = iprot.readListBegin() - for _i424 in xrange(_size420): - _elem425 = Index() - _elem425.read(iprot) - self.success.append(_elem425) + (_etype430, _size427) = iprot.readListBegin() + for _i431 in xrange(_size427): + _elem432 = Index() + _elem432.read(iprot) + self.success.append(_elem432) iprot.readListEnd() else: iprot.skip(ftype) @@ -12059,8 +12082,8 @@ if self.success is not None: oprot.writeFieldBegin('success', TType.LIST, 0) oprot.writeListBegin(TType.STRUCT, len(self.success)) - for iter426 in self.success: - iter426.write(oprot) + for iter433 in self.success: + iter433.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() if self.o1 is not None: @@ -12201,10 +12224,10 @@ if fid == 0: if ftype == TType.LIST: self.success = [] - (_etype430, _size427) = iprot.readListBegin() - for _i431 in xrange(_size427): - _elem432 = iprot.readString(); - self.success.append(_elem432) + (_etype437, _size434) = iprot.readListBegin() + for _i438 in xrange(_size434): + _elem439 = iprot.readString(); + self.success.append(_elem439) iprot.readListEnd() else: iprot.skip(ftype) @@ -12227,8 +12250,8 @@ if self.success is not None: oprot.writeFieldBegin('success', TType.LIST, 0) oprot.writeListBegin(TType.STRING, len(self.success)) - for iter433 in self.success: - oprot.writeString(iter433) + for iter440 in self.success: + oprot.writeString(iter440) oprot.writeListEnd() oprot.writeFieldEnd() if self.o2 is not None: @@ -12588,10 +12611,10 @@ if fid == 0: if ftype == TType.LIST: self.success = [] - (_etype437, _size434) = iprot.readListBegin() - for _i438 in xrange(_size434): - _elem439 = iprot.readString(); - self.success.append(_elem439) + (_etype444, _size441) = iprot.readListBegin() + for _i445 in xrange(_size441): + _elem446 = iprot.readString(); + self.success.append(_elem446) iprot.readListEnd() else: iprot.skip(ftype) @@ -12614,8 +12637,8 @@ if self.success is not None: oprot.writeFieldBegin('success', TType.LIST, 0) oprot.writeListBegin(TType.STRING, len(self.success)) - for iter440 in self.success: - oprot.writeString(iter440) + for iter447 in self.success: + oprot.writeString(iter447) oprot.writeListEnd() oprot.writeFieldEnd() if self.o1 is not None: @@ -13088,11 +13111,11 @@ if fid == 0: if ftype == TType.LIST: self.success = [] - (_etype444, _size441) = iprot.readListBegin() - for _i445 in xrange(_size441): - _elem446 = Role() - _elem446.read(iprot) - self.success.append(_elem446) + (_etype451, _size448) = iprot.readListBegin() + for _i452 in xrange(_size448): + _elem453 = Role() + _elem453.read(iprot) + self.success.append(_elem453) iprot.readListEnd() else: iprot.skip(ftype) @@ -13115,8 +13138,8 @@ if self.success is not None: oprot.writeFieldBegin('success', TType.LIST, 0) oprot.writeListBegin(TType.STRUCT, len(self.success)) - for iter447 in self.success: - iter447.write(oprot) + for iter454 in self.success: + iter454.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() if self.o1 is not None: @@ -13184,10 +13207,10 @@ elif fid == 3: if ftype == TType.LIST: self.group_names = [] - (_etype451, _size448) = iprot.readListBegin() - for _i452 in xrange(_size448): - _elem453 = iprot.readString(); - self.group_names.append(_elem453) + (_etype458, _size455) = iprot.readListBegin() + for _i459 in xrange(_size455): + _elem460 = iprot.readString(); + self.group_names.append(_elem460) iprot.readListEnd() else: iprot.skip(ftype) @@ -13212,8 +13235,8 @@ if self.group_names is not None: oprot.writeFieldBegin('group_names', TType.LIST, 3) oprot.writeListBegin(TType.STRING, len(self.group_names)) - for iter454 in self.group_names: - oprot.writeString(iter454) + for iter461 in self.group_names: + oprot.writeString(iter461) oprot.writeListEnd() oprot.writeFieldEnd() oprot.writeFieldStop() @@ -13420,11 +13443,11 @@ if fid == 0: if ftype == TType.LIST: self.success = [] - (_etype458, _size455) = iprot.readListBegin() - for _i459 in xrange(_size455): - _elem460 = HiveObjectPrivilege() - _elem460.read(iprot) - self.success.append(_elem460) + (_etype465, _size462) = iprot.readListBegin() + for _i466 in xrange(_size462): + _elem467 = HiveObjectPrivilege() + _elem467.read(iprot) + self.success.append(_elem467) iprot.readListEnd() else: iprot.skip(ftype) @@ -13447,8 +13470,8 @@ if self.success is not None: oprot.writeFieldBegin('success', TType.LIST, 0) oprot.writeListBegin(TType.STRUCT, len(self.success)) - for iter461 in self.success: - iter461.write(oprot) + for iter468 in self.success: + iter468.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() if self.o1 is not None: Index: metastore/src/gen/thrift/gen-py/hive_metastore/ThriftHiveMetastore-remote =================================================================== --- metastore/src/gen/thrift/gen-py/hive_metastore/ThriftHiveMetastore-remote (revision 1186991) +++ metastore/src/gen/thrift/gen-py/hive_metastore/ThriftHiveMetastore-remote (working copy) @@ -58,7 +58,7 @@ print ' get_partition_names_ps(string db_name, string tbl_name, part_vals, i16 max_parts)' print ' get_partitions_by_filter(string db_name, string tbl_name, string filter, i16 max_parts)' print ' get_partitions_by_names(string db_name, string tbl_name, names)' - print ' void alter_partition(string db_name, string tbl_name, Partition new_part)' + print ' void alter_partition(string db_name, string tbl_name, part_vals, Partition new_part)' print ' string get_config_value(string name, string defaultValue)' print ' partition_name_to_vals(string part_name)' print ' partition_name_to_spec(string part_name)' @@ -356,10 +356,10 @@ pp.pprint(client.get_partitions_by_names(args[0],args[1],eval(args[2]),)) elif cmd == 'alter_partition': - if len(args) != 3: - print 'alter_partition requires 3 args' + if len(args) != 4: + print 'alter_partition requires 4 args' sys.exit(1) - pp.pprint(client.alter_partition(args[0],args[1],eval(args[2]),)) + pp.pprint(client.alter_partition(args[0],args[1],eval(args[2]),eval(args[3]),)) elif cmd == 'get_config_value': if len(args) != 2: Index: metastore/src/gen/thrift/gen-cpp/ThriftHiveMetastore.cpp =================================================================== --- metastore/src/gen/thrift/gen-cpp/ThriftHiveMetastore.cpp (revision 1186991) +++ metastore/src/gen/thrift/gen-cpp/ThriftHiveMetastore.cpp (working copy) @@ -9035,6 +9035,26 @@ } break; case 3: + if (ftype == ::apache::thrift::protocol::T_LIST) { + { + this->part_vals.clear(); + uint32_t _size437; + ::apache::thrift::protocol::TType _etype440; + iprot->readListBegin(_etype440, _size437); + this->part_vals.resize(_size437); + uint32_t _i441; + for (_i441 = 0; _i441 < _size437; ++_i441) + { + xfer += iprot->readString(this->part_vals[_i441]); + } + iprot->readListEnd(); + } + this->__isset.part_vals = true; + } else { + xfer += iprot->skip(ftype); + } + break; + case 4: if (ftype == ::apache::thrift::protocol::T_STRUCT) { xfer += this->new_part.read(iprot); this->__isset.new_part = true; @@ -9063,7 +9083,18 @@ xfer += oprot->writeFieldBegin("tbl_name", ::apache::thrift::protocol::T_STRING, 2); xfer += oprot->writeString(this->tbl_name); xfer += oprot->writeFieldEnd(); - xfer += oprot->writeFieldBegin("new_part", ::apache::thrift::protocol::T_STRUCT, 3); + xfer += oprot->writeFieldBegin("part_vals", ::apache::thrift::protocol::T_LIST, 3); + { + xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast(this->part_vals.size())); + std::vector ::const_iterator _iter442; + for (_iter442 = this->part_vals.begin(); _iter442 != this->part_vals.end(); ++_iter442) + { + xfer += oprot->writeString((*_iter442)); + } + xfer += oprot->writeListEnd(); + } + xfer += oprot->writeFieldEnd(); + xfer += oprot->writeFieldBegin("new_part", ::apache::thrift::protocol::T_STRUCT, 4); xfer += this->new_part.write(oprot); xfer += oprot->writeFieldEnd(); xfer += oprot->writeFieldStop(); @@ -9080,7 +9111,18 @@ xfer += oprot->writeFieldBegin("tbl_name", ::apache::thrift::protocol::T_STRING, 2); xfer += oprot->writeString((*(this->tbl_name))); xfer += oprot->writeFieldEnd(); - xfer += oprot->writeFieldBegin("new_part", ::apache::thrift::protocol::T_STRUCT, 3); + xfer += oprot->writeFieldBegin("part_vals", ::apache::thrift::protocol::T_LIST, 3); + { + xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast((*(this->part_vals)).size())); + std::vector ::const_iterator _iter443; + for (_iter443 = (*(this->part_vals)).begin(); _iter443 != (*(this->part_vals)).end(); ++_iter443) + { + xfer += oprot->writeString((*_iter443)); + } + xfer += oprot->writeListEnd(); + } + xfer += oprot->writeFieldEnd(); + xfer += oprot->writeFieldBegin("new_part", ::apache::thrift::protocol::T_STRUCT, 4); xfer += (*(this->new_part)).write(oprot); xfer += oprot->writeFieldEnd(); xfer += oprot->writeFieldStop(); @@ -9482,14 +9524,14 @@ if (ftype == ::apache::thrift::protocol::T_LIST) { { this->success.clear(); - uint32_t _size437; - ::apache::thrift::protocol::TType _etype440; - iprot->readListBegin(_etype440, _size437); - this->success.resize(_size437); - uint32_t _i441; - for (_i441 = 0; _i441 < _size437; ++_i441) + uint32_t _size444; + ::apache::thrift::protocol::TType _etype447; + iprot->readListBegin(_etype447, _size444); + this->success.resize(_size444); + uint32_t _i448; + for (_i448 = 0; _i448 < _size444; ++_i448) { - xfer += iprot->readString(this->success[_i441]); + xfer += iprot->readString(this->success[_i448]); } iprot->readListEnd(); } @@ -9528,10 +9570,10 @@ xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_LIST, 0); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast(this->success.size())); - std::vector ::const_iterator _iter442; - for (_iter442 = this->success.begin(); _iter442 != this->success.end(); ++_iter442) + std::vector ::const_iterator _iter449; + for (_iter449 = this->success.begin(); _iter449 != this->success.end(); ++_iter449) { - xfer += oprot->writeString((*_iter442)); + xfer += oprot->writeString((*_iter449)); } xfer += oprot->writeListEnd(); } @@ -9570,14 +9612,14 @@ if (ftype == ::apache::thrift::protocol::T_LIST) { { (*(this->success)).clear(); - uint32_t _size443; - ::apache::thrift::protocol::TType _etype446; - iprot->readListBegin(_etype446, _size443); - (*(this->success)).resize(_size443); - uint32_t _i447; - for (_i447 = 0; _i447 < _size443; ++_i447) + uint32_t _size450; + ::apache::thrift::protocol::TType _etype453; + iprot->readListBegin(_etype453, _size450); + (*(this->success)).resize(_size450); + uint32_t _i454; + for (_i454 = 0; _i454 < _size450; ++_i454) { - xfer += iprot->readString((*(this->success))[_i447]); + xfer += iprot->readString((*(this->success))[_i454]); } iprot->readListEnd(); } @@ -9692,17 +9734,17 @@ if (ftype == ::apache::thrift::protocol::T_MAP) { { this->success.clear(); - uint32_t _size448; - ::apache::thrift::protocol::TType _ktype449; - ::apache::thrift::protocol::TType _vtype450; - iprot->readMapBegin(_ktype449, _vtype450, _size448); - uint32_t _i452; - for (_i452 = 0; _i452 < _size448; ++_i452) + uint32_t _size455; + ::apache::thrift::protocol::TType _ktype456; + ::apache::thrift::protocol::TType _vtype457; + iprot->readMapBegin(_ktype456, _vtype457, _size455); + uint32_t _i459; + for (_i459 = 0; _i459 < _size455; ++_i459) { - std::string _key453; - xfer += iprot->readString(_key453); - std::string& _val454 = this->success[_key453]; - xfer += iprot->readString(_val454); + std::string _key460; + xfer += iprot->readString(_key460); + std::string& _val461 = this->success[_key460]; + xfer += iprot->readString(_val461); } iprot->readMapEnd(); } @@ -9741,11 +9783,11 @@ xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_MAP, 0); { xfer += oprot->writeMapBegin(::apache::thrift::protocol::T_STRING, ::apache::thrift::protocol::T_STRING, static_cast(this->success.size())); - std::map ::const_iterator _iter455; - for (_iter455 = this->success.begin(); _iter455 != this->success.end(); ++_iter455) + std::map ::const_iterator _iter462; + for (_iter462 = this->success.begin(); _iter462 != this->success.end(); ++_iter462) { - xfer += oprot->writeString(_iter455->first); - xfer += oprot->writeString(_iter455->second); + xfer += oprot->writeString(_iter462->first); + xfer += oprot->writeString(_iter462->second); } xfer += oprot->writeMapEnd(); } @@ -9784,17 +9826,17 @@ if (ftype == ::apache::thrift::protocol::T_MAP) { { (*(this->success)).clear(); - uint32_t _size456; - ::apache::thrift::protocol::TType _ktype457; - ::apache::thrift::protocol::TType _vtype458; - iprot->readMapBegin(_ktype457, _vtype458, _size456); - uint32_t _i460; - for (_i460 = 0; _i460 < _size456; ++_i460) + uint32_t _size463; + ::apache::thrift::protocol::TType _ktype464; + ::apache::thrift::protocol::TType _vtype465; + iprot->readMapBegin(_ktype464, _vtype465, _size463); + uint32_t _i467; + for (_i467 = 0; _i467 < _size463; ++_i467) { - std::string _key461; - xfer += iprot->readString(_key461); - std::string& _val462 = (*(this->success))[_key461]; - xfer += iprot->readString(_val462); + std::string _key468; + xfer += iprot->readString(_key468); + std::string& _val469 = (*(this->success))[_key468]; + xfer += iprot->readString(_val469); } iprot->readMapEnd(); } @@ -9863,17 +9905,17 @@ if (ftype == ::apache::thrift::protocol::T_MAP) { { this->part_vals.clear(); - uint32_t _size463; - ::apache::thrift::protocol::TType _ktype464; - ::apache::thrift::protocol::TType _vtype465; - iprot->readMapBegin(_ktype464, _vtype465, _size463); - uint32_t _i467; - for (_i467 = 0; _i467 < _size463; ++_i467) + uint32_t _size470; + ::apache::thrift::protocol::TType _ktype471; + ::apache::thrift::protocol::TType _vtype472; + iprot->readMapBegin(_ktype471, _vtype472, _size470); + uint32_t _i474; + for (_i474 = 0; _i474 < _size470; ++_i474) { - std::string _key468; - xfer += iprot->readString(_key468); - std::string& _val469 = this->part_vals[_key468]; - xfer += iprot->readString(_val469); + std::string _key475; + xfer += iprot->readString(_key475); + std::string& _val476 = this->part_vals[_key475]; + xfer += iprot->readString(_val476); } iprot->readMapEnd(); } @@ -9884,9 +9926,9 @@ break; case 4: if (ftype == ::apache::thrift::protocol::T_I32) { - int32_t ecast470; - xfer += iprot->readI32(ecast470); - this->eventType = (PartitionEventType::type)ecast470; + int32_t ecast477; + xfer += iprot->readI32(ecast477); + this->eventType = (PartitionEventType::type)ecast477; this->__isset.eventType = true; } else { xfer += iprot->skip(ftype); @@ -9916,11 +9958,11 @@ xfer += oprot->writeFieldBegin("part_vals", ::apache::thrift::protocol::T_MAP, 3); { xfer += oprot->writeMapBegin(::apache::thrift::protocol::T_STRING, ::apache::thrift::protocol::T_STRING, static_cast(this->part_vals.size())); - std::map ::const_iterator _iter471; - for (_iter471 = this->part_vals.begin(); _iter471 != this->part_vals.end(); ++_iter471) + std::map ::const_iterator _iter478; + for (_iter478 = this->part_vals.begin(); _iter478 != this->part_vals.end(); ++_iter478) { - xfer += oprot->writeString(_iter471->first); - xfer += oprot->writeString(_iter471->second); + xfer += oprot->writeString(_iter478->first); + xfer += oprot->writeString(_iter478->second); } xfer += oprot->writeMapEnd(); } @@ -9945,11 +9987,11 @@ xfer += oprot->writeFieldBegin("part_vals", ::apache::thrift::protocol::T_MAP, 3); { xfer += oprot->writeMapBegin(::apache::thrift::protocol::T_STRING, ::apache::thrift::protocol::T_STRING, static_cast((*(this->part_vals)).size())); - std::map ::const_iterator _iter472; - for (_iter472 = (*(this->part_vals)).begin(); _iter472 != (*(this->part_vals)).end(); ++_iter472) + std::map ::const_iterator _iter479; + for (_iter479 = (*(this->part_vals)).begin(); _iter479 != (*(this->part_vals)).end(); ++_iter479) { - xfer += oprot->writeString(_iter472->first); - xfer += oprot->writeString(_iter472->second); + xfer += oprot->writeString(_iter479->first); + xfer += oprot->writeString(_iter479->second); } xfer += oprot->writeMapEnd(); } @@ -10198,17 +10240,17 @@ if (ftype == ::apache::thrift::protocol::T_MAP) { { this->part_vals.clear(); - uint32_t _size473; - ::apache::thrift::protocol::TType _ktype474; - ::apache::thrift::protocol::TType _vtype475; - iprot->readMapBegin(_ktype474, _vtype475, _size473); - uint32_t _i477; - for (_i477 = 0; _i477 < _size473; ++_i477) + uint32_t _size480; + ::apache::thrift::protocol::TType _ktype481; + ::apache::thrift::protocol::TType _vtype482; + iprot->readMapBegin(_ktype481, _vtype482, _size480); + uint32_t _i484; + for (_i484 = 0; _i484 < _size480; ++_i484) { - std::string _key478; - xfer += iprot->readString(_key478); - std::string& _val479 = this->part_vals[_key478]; - xfer += iprot->readString(_val479); + std::string _key485; + xfer += iprot->readString(_key485); + std::string& _val486 = this->part_vals[_key485]; + xfer += iprot->readString(_val486); } iprot->readMapEnd(); } @@ -10219,9 +10261,9 @@ break; case 4: if (ftype == ::apache::thrift::protocol::T_I32) { - int32_t ecast480; - xfer += iprot->readI32(ecast480); - this->eventType = (PartitionEventType::type)ecast480; + int32_t ecast487; + xfer += iprot->readI32(ecast487); + this->eventType = (PartitionEventType::type)ecast487; this->__isset.eventType = true; } else { xfer += iprot->skip(ftype); @@ -10251,11 +10293,11 @@ xfer += oprot->writeFieldBegin("part_vals", ::apache::thrift::protocol::T_MAP, 3); { xfer += oprot->writeMapBegin(::apache::thrift::protocol::T_STRING, ::apache::thrift::protocol::T_STRING, static_cast(this->part_vals.size())); - std::map ::const_iterator _iter481; - for (_iter481 = this->part_vals.begin(); _iter481 != this->part_vals.end(); ++_iter481) + std::map ::const_iterator _iter488; + for (_iter488 = this->part_vals.begin(); _iter488 != this->part_vals.end(); ++_iter488) { - xfer += oprot->writeString(_iter481->first); - xfer += oprot->writeString(_iter481->second); + xfer += oprot->writeString(_iter488->first); + xfer += oprot->writeString(_iter488->second); } xfer += oprot->writeMapEnd(); } @@ -10280,11 +10322,11 @@ xfer += oprot->writeFieldBegin("part_vals", ::apache::thrift::protocol::T_MAP, 3); { xfer += oprot->writeMapBegin(::apache::thrift::protocol::T_STRING, ::apache::thrift::protocol::T_STRING, static_cast((*(this->part_vals)).size())); - std::map ::const_iterator _iter482; - for (_iter482 = (*(this->part_vals)).begin(); _iter482 != (*(this->part_vals)).end(); ++_iter482) + std::map ::const_iterator _iter489; + for (_iter489 = (*(this->part_vals)).begin(); _iter489 != (*(this->part_vals)).end(); ++_iter489) { - xfer += oprot->writeString(_iter482->first); - xfer += oprot->writeString(_iter482->second); + xfer += oprot->writeString(_iter489->first); + xfer += oprot->writeString(_iter489->second); } xfer += oprot->writeMapEnd(); } @@ -11545,14 +11587,14 @@ if (ftype == ::apache::thrift::protocol::T_LIST) { { this->success.clear(); - uint32_t _size483; - ::apache::thrift::protocol::TType _etype486; - iprot->readListBegin(_etype486, _size483); - this->success.resize(_size483); - uint32_t _i487; - for (_i487 = 0; _i487 < _size483; ++_i487) + uint32_t _size490; + ::apache::thrift::protocol::TType _etype493; + iprot->readListBegin(_etype493, _size490); + this->success.resize(_size490); + uint32_t _i494; + for (_i494 = 0; _i494 < _size490; ++_i494) { - xfer += this->success[_i487].read(iprot); + xfer += this->success[_i494].read(iprot); } iprot->readListEnd(); } @@ -11599,10 +11641,10 @@ xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_LIST, 0); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast(this->success.size())); - std::vector ::const_iterator _iter488; - for (_iter488 = this->success.begin(); _iter488 != this->success.end(); ++_iter488) + std::vector ::const_iterator _iter495; + for (_iter495 = this->success.begin(); _iter495 != this->success.end(); ++_iter495) { - xfer += (*_iter488).write(oprot); + xfer += (*_iter495).write(oprot); } xfer += oprot->writeListEnd(); } @@ -11645,14 +11687,14 @@ if (ftype == ::apache::thrift::protocol::T_LIST) { { (*(this->success)).clear(); - uint32_t _size489; - ::apache::thrift::protocol::TType _etype492; - iprot->readListBegin(_etype492, _size489); - (*(this->success)).resize(_size489); - uint32_t _i493; - for (_i493 = 0; _i493 < _size489; ++_i493) + uint32_t _size496; + ::apache::thrift::protocol::TType _etype499; + iprot->readListBegin(_etype499, _size496); + (*(this->success)).resize(_size496); + uint32_t _i500; + for (_i500 = 0; _i500 < _size496; ++_i500) { - xfer += (*(this->success))[_i493].read(iprot); + xfer += (*(this->success))[_i500].read(iprot); } iprot->readListEnd(); } @@ -11803,14 +11845,14 @@ if (ftype == ::apache::thrift::protocol::T_LIST) { { this->success.clear(); - uint32_t _size494; - ::apache::thrift::protocol::TType _etype497; - iprot->readListBegin(_etype497, _size494); - this->success.resize(_size494); - uint32_t _i498; - for (_i498 = 0; _i498 < _size494; ++_i498) + uint32_t _size501; + ::apache::thrift::protocol::TType _etype504; + iprot->readListBegin(_etype504, _size501); + this->success.resize(_size501); + uint32_t _i505; + for (_i505 = 0; _i505 < _size501; ++_i505) { - xfer += iprot->readString(this->success[_i498]); + xfer += iprot->readString(this->success[_i505]); } iprot->readListEnd(); } @@ -11849,10 +11891,10 @@ xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_LIST, 0); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast(this->success.size())); - std::vector ::const_iterator _iter499; - for (_iter499 = this->success.begin(); _iter499 != this->success.end(); ++_iter499) + std::vector ::const_iterator _iter506; + for (_iter506 = this->success.begin(); _iter506 != this->success.end(); ++_iter506) { - xfer += oprot->writeString((*_iter499)); + xfer += oprot->writeString((*_iter506)); } xfer += oprot->writeListEnd(); } @@ -11891,14 +11933,14 @@ if (ftype == ::apache::thrift::protocol::T_LIST) { { (*(this->success)).clear(); - uint32_t _size500; - ::apache::thrift::protocol::TType _etype503; - iprot->readListBegin(_etype503, _size500); - (*(this->success)).resize(_size500); - uint32_t _i504; - for (_i504 = 0; _i504 < _size500; ++_i504) + uint32_t _size507; + ::apache::thrift::protocol::TType _etype510; + iprot->readListBegin(_etype510, _size507); + (*(this->success)).resize(_size507); + uint32_t _i511; + for (_i511 = 0; _i511 < _size507; ++_i511) { - xfer += iprot->readString((*(this->success))[_i504]); + xfer += iprot->readString((*(this->success))[_i511]); } iprot->readListEnd(); } @@ -12355,14 +12397,14 @@ if (ftype == ::apache::thrift::protocol::T_LIST) { { this->success.clear(); - uint32_t _size505; - ::apache::thrift::protocol::TType _etype508; - iprot->readListBegin(_etype508, _size505); - this->success.resize(_size505); - uint32_t _i509; - for (_i509 = 0; _i509 < _size505; ++_i509) + uint32_t _size512; + ::apache::thrift::protocol::TType _etype515; + iprot->readListBegin(_etype515, _size512); + this->success.resize(_size512); + uint32_t _i516; + for (_i516 = 0; _i516 < _size512; ++_i516) { - xfer += iprot->readString(this->success[_i509]); + xfer += iprot->readString(this->success[_i516]); } iprot->readListEnd(); } @@ -12401,10 +12443,10 @@ xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_LIST, 0); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast(this->success.size())); - std::vector ::const_iterator _iter510; - for (_iter510 = this->success.begin(); _iter510 != this->success.end(); ++_iter510) + std::vector ::const_iterator _iter517; + for (_iter517 = this->success.begin(); _iter517 != this->success.end(); ++_iter517) { - xfer += oprot->writeString((*_iter510)); + xfer += oprot->writeString((*_iter517)); } xfer += oprot->writeListEnd(); } @@ -12443,14 +12485,14 @@ if (ftype == ::apache::thrift::protocol::T_LIST) { { (*(this->success)).clear(); - uint32_t _size511; - ::apache::thrift::protocol::TType _etype514; - iprot->readListBegin(_etype514, _size511); - (*(this->success)).resize(_size511); - uint32_t _i515; - for (_i515 = 0; _i515 < _size511; ++_i515) + uint32_t _size518; + ::apache::thrift::protocol::TType _etype521; + iprot->readListBegin(_etype521, _size518); + (*(this->success)).resize(_size518); + uint32_t _i522; + for (_i522 = 0; _i522 < _size518; ++_i522) { - xfer += iprot->readString((*(this->success))[_i515]); + xfer += iprot->readString((*(this->success))[_i522]); } iprot->readListEnd(); } @@ -12517,9 +12559,9 @@ break; case 3: if (ftype == ::apache::thrift::protocol::T_I32) { - int32_t ecast516; - xfer += iprot->readI32(ecast516); - this->principal_type = (PrincipalType::type)ecast516; + int32_t ecast523; + xfer += iprot->readI32(ecast523); + this->principal_type = (PrincipalType::type)ecast523; this->__isset.principal_type = true; } else { xfer += iprot->skip(ftype); @@ -12535,9 +12577,9 @@ break; case 5: if (ftype == ::apache::thrift::protocol::T_I32) { - int32_t ecast517; - xfer += iprot->readI32(ecast517); - this->grantorType = (PrincipalType::type)ecast517; + int32_t ecast524; + xfer += iprot->readI32(ecast524); + this->grantorType = (PrincipalType::type)ecast524; this->__isset.grantorType = true; } else { xfer += iprot->skip(ftype); @@ -12769,9 +12811,9 @@ break; case 3: if (ftype == ::apache::thrift::protocol::T_I32) { - int32_t ecast518; - xfer += iprot->readI32(ecast518); - this->principal_type = (PrincipalType::type)ecast518; + int32_t ecast525; + xfer += iprot->readI32(ecast525); + this->principal_type = (PrincipalType::type)ecast525; this->__isset.principal_type = true; } else { xfer += iprot->skip(ftype); @@ -12969,9 +13011,9 @@ break; case 2: if (ftype == ::apache::thrift::protocol::T_I32) { - int32_t ecast519; - xfer += iprot->readI32(ecast519); - this->principal_type = (PrincipalType::type)ecast519; + int32_t ecast526; + xfer += iprot->readI32(ecast526); + this->principal_type = (PrincipalType::type)ecast526; this->__isset.principal_type = true; } else { xfer += iprot->skip(ftype); @@ -13041,14 +13083,14 @@ if (ftype == ::apache::thrift::protocol::T_LIST) { { this->success.clear(); - uint32_t _size520; - ::apache::thrift::protocol::TType _etype523; - iprot->readListBegin(_etype523, _size520); - this->success.resize(_size520); - uint32_t _i524; - for (_i524 = 0; _i524 < _size520; ++_i524) + uint32_t _size527; + ::apache::thrift::protocol::TType _etype530; + iprot->readListBegin(_etype530, _size527); + this->success.resize(_size527); + uint32_t _i531; + for (_i531 = 0; _i531 < _size527; ++_i531) { - xfer += this->success[_i524].read(iprot); + xfer += this->success[_i531].read(iprot); } iprot->readListEnd(); } @@ -13087,10 +13129,10 @@ xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_LIST, 0); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast(this->success.size())); - std::vector ::const_iterator _iter525; - for (_iter525 = this->success.begin(); _iter525 != this->success.end(); ++_iter525) + std::vector ::const_iterator _iter532; + for (_iter532 = this->success.begin(); _iter532 != this->success.end(); ++_iter532) { - xfer += (*_iter525).write(oprot); + xfer += (*_iter532).write(oprot); } xfer += oprot->writeListEnd(); } @@ -13129,14 +13171,14 @@ if (ftype == ::apache::thrift::protocol::T_LIST) { { (*(this->success)).clear(); - uint32_t _size526; - ::apache::thrift::protocol::TType _etype529; - iprot->readListBegin(_etype529, _size526); - (*(this->success)).resize(_size526); - uint32_t _i530; - for (_i530 = 0; _i530 < _size526; ++_i530) + uint32_t _size533; + ::apache::thrift::protocol::TType _etype536; + iprot->readListBegin(_etype536, _size533); + (*(this->success)).resize(_size533); + uint32_t _i537; + for (_i537 = 0; _i537 < _size533; ++_i537) { - xfer += (*(this->success))[_i530].read(iprot); + xfer += (*(this->success))[_i537].read(iprot); } iprot->readListEnd(); } @@ -13205,14 +13247,14 @@ if (ftype == ::apache::thrift::protocol::T_LIST) { { this->group_names.clear(); - uint32_t _size531; - ::apache::thrift::protocol::TType _etype534; - iprot->readListBegin(_etype534, _size531); - this->group_names.resize(_size531); - uint32_t _i535; - for (_i535 = 0; _i535 < _size531; ++_i535) + uint32_t _size538; + ::apache::thrift::protocol::TType _etype541; + iprot->readListBegin(_etype541, _size538); + this->group_names.resize(_size538); + uint32_t _i542; + for (_i542 = 0; _i542 < _size538; ++_i542) { - xfer += iprot->readString(this->group_names[_i535]); + xfer += iprot->readString(this->group_names[_i542]); } iprot->readListEnd(); } @@ -13245,10 +13287,10 @@ xfer += oprot->writeFieldBegin("group_names", ::apache::thrift::protocol::T_LIST, 3); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast(this->group_names.size())); - std::vector ::const_iterator _iter536; - for (_iter536 = this->group_names.begin(); _iter536 != this->group_names.end(); ++_iter536) + std::vector ::const_iterator _iter543; + for (_iter543 = this->group_names.begin(); _iter543 != this->group_names.end(); ++_iter543) { - xfer += oprot->writeString((*_iter536)); + xfer += oprot->writeString((*_iter543)); } xfer += oprot->writeListEnd(); } @@ -13270,10 +13312,10 @@ xfer += oprot->writeFieldBegin("group_names", ::apache::thrift::protocol::T_LIST, 3); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast((*(this->group_names)).size())); - std::vector ::const_iterator _iter537; - for (_iter537 = (*(this->group_names)).begin(); _iter537 != (*(this->group_names)).end(); ++_iter537) + std::vector ::const_iterator _iter544; + for (_iter544 = (*(this->group_names)).begin(); _iter544 != (*(this->group_names)).end(); ++_iter544) { - xfer += oprot->writeString((*_iter537)); + xfer += oprot->writeString((*_iter544)); } xfer += oprot->writeListEnd(); } @@ -13429,9 +13471,9 @@ break; case 2: if (ftype == ::apache::thrift::protocol::T_I32) { - int32_t ecast538; - xfer += iprot->readI32(ecast538); - this->principal_type = (PrincipalType::type)ecast538; + int32_t ecast545; + xfer += iprot->readI32(ecast545); + this->principal_type = (PrincipalType::type)ecast545; this->__isset.principal_type = true; } else { xfer += iprot->skip(ftype); @@ -13515,14 +13557,14 @@ if (ftype == ::apache::thrift::protocol::T_LIST) { { this->success.clear(); - uint32_t _size539; - ::apache::thrift::protocol::TType _etype542; - iprot->readListBegin(_etype542, _size539); - this->success.resize(_size539); - uint32_t _i543; - for (_i543 = 0; _i543 < _size539; ++_i543) + uint32_t _size546; + ::apache::thrift::protocol::TType _etype549; + iprot->readListBegin(_etype549, _size546); + this->success.resize(_size546); + uint32_t _i550; + for (_i550 = 0; _i550 < _size546; ++_i550) { - xfer += this->success[_i543].read(iprot); + xfer += this->success[_i550].read(iprot); } iprot->readListEnd(); } @@ -13561,10 +13603,10 @@ xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_LIST, 0); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast(this->success.size())); - std::vector ::const_iterator _iter544; - for (_iter544 = this->success.begin(); _iter544 != this->success.end(); ++_iter544) + std::vector ::const_iterator _iter551; + for (_iter551 = this->success.begin(); _iter551 != this->success.end(); ++_iter551) { - xfer += (*_iter544).write(oprot); + xfer += (*_iter551).write(oprot); } xfer += oprot->writeListEnd(); } @@ -13603,14 +13645,14 @@ if (ftype == ::apache::thrift::protocol::T_LIST) { { (*(this->success)).clear(); - uint32_t _size545; - ::apache::thrift::protocol::TType _etype548; - iprot->readListBegin(_etype548, _size545); - (*(this->success)).resize(_size545); - uint32_t _i549; - for (_i549 = 0; _i549 < _size545; ++_i549) + uint32_t _size552; + ::apache::thrift::protocol::TType _etype555; + iprot->readListBegin(_etype555, _size552); + (*(this->success)).resize(_size552); + uint32_t _i556; + for (_i556 = 0; _i556 < _size552; ++_i556) { - xfer += (*(this->success))[_i549].read(iprot); + xfer += (*(this->success))[_i556].read(iprot); } iprot->readListEnd(); } @@ -16931,13 +16973,13 @@ throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "get_partitions_by_names failed: unknown result"); } -void ThriftHiveMetastoreClient::alter_partition(const std::string& db_name, const std::string& tbl_name, const Partition& new_part) +void ThriftHiveMetastoreClient::alter_partition(const std::string& db_name, const std::string& tbl_name, const std::vector & part_vals, const Partition& new_part) { - send_alter_partition(db_name, tbl_name, new_part); + send_alter_partition(db_name, tbl_name, part_vals, new_part); recv_alter_partition(); } -void ThriftHiveMetastoreClient::send_alter_partition(const std::string& db_name, const std::string& tbl_name, const Partition& new_part) +void ThriftHiveMetastoreClient::send_alter_partition(const std::string& db_name, const std::string& tbl_name, const std::vector & part_vals, const Partition& new_part) { int32_t cseqid = 0; oprot_->writeMessageBegin("alter_partition", ::apache::thrift::protocol::T_CALL, cseqid); @@ -16945,6 +16987,7 @@ ThriftHiveMetastore_alter_partition_pargs args; args.db_name = &db_name; args.tbl_name = &tbl_name; + args.part_vals = &part_vals; args.new_part = &new_part; args.write(oprot_); @@ -20809,7 +20852,7 @@ ThriftHiveMetastore_alter_partition_result result; try { - iface_->alter_partition(args.db_name, args.tbl_name, args.new_part); + iface_->alter_partition(args.db_name, args.tbl_name, args.part_vals, args.new_part); } catch (InvalidOperationException &o1) { result.o1 = o1; result.__isset.o1 = true; Index: metastore/src/gen/thrift/gen-cpp/ThriftHiveMetastore.h =================================================================== --- metastore/src/gen/thrift/gen-cpp/ThriftHiveMetastore.h (revision 1186991) +++ metastore/src/gen/thrift/gen-cpp/ThriftHiveMetastore.h (working copy) @@ -52,7 +52,7 @@ virtual void get_partition_names_ps(std::vector & _return, const std::string& db_name, const std::string& tbl_name, const std::vector & part_vals, const int16_t max_parts) = 0; virtual void get_partitions_by_filter(std::vector & _return, const std::string& db_name, const std::string& tbl_name, const std::string& filter, const int16_t max_parts) = 0; virtual void get_partitions_by_names(std::vector & _return, const std::string& db_name, const std::string& tbl_name, const std::vector & names) = 0; - virtual void alter_partition(const std::string& db_name, const std::string& tbl_name, const Partition& new_part) = 0; + virtual void alter_partition(const std::string& db_name, const std::string& tbl_name, const std::vector & part_vals, const Partition& new_part) = 0; virtual void get_config_value(std::string& _return, const std::string& name, const std::string& defaultValue) = 0; virtual void partition_name_to_vals(std::vector & _return, const std::string& part_name) = 0; virtual void partition_name_to_spec(std::map & _return, const std::string& part_name) = 0; @@ -198,7 +198,7 @@ void get_partitions_by_names(std::vector & /* _return */, const std::string& /* db_name */, const std::string& /* tbl_name */, const std::vector & /* names */) { return; } - void alter_partition(const std::string& /* db_name */, const std::string& /* tbl_name */, const Partition& /* new_part */) { + void alter_partition(const std::string& /* db_name */, const std::string& /* tbl_name */, const std::vector & /* part_vals */, const Partition& /* new_part */) { return; } void get_config_value(std::string& /* _return */, const std::string& /* name */, const std::string& /* defaultValue */) { @@ -5501,9 +5501,10 @@ }; typedef struct _ThriftHiveMetastore_alter_partition_args__isset { - _ThriftHiveMetastore_alter_partition_args__isset() : db_name(false), tbl_name(false), new_part(false) {} + _ThriftHiveMetastore_alter_partition_args__isset() : db_name(false), tbl_name(false), part_vals(false), new_part(false) {} bool db_name; bool tbl_name; + bool part_vals; bool new_part; } _ThriftHiveMetastore_alter_partition_args__isset; @@ -5517,6 +5518,7 @@ std::string db_name; std::string tbl_name; + std::vector part_vals; Partition new_part; _ThriftHiveMetastore_alter_partition_args__isset __isset; @@ -5529,6 +5531,10 @@ tbl_name = val; } + void __set_part_vals(const std::vector & val) { + part_vals = val; + } + void __set_new_part(const Partition& val) { new_part = val; } @@ -5539,6 +5545,8 @@ return false; if (!(tbl_name == rhs.tbl_name)) return false; + if (!(part_vals == rhs.part_vals)) + return false; if (!(new_part == rhs.new_part)) return false; return true; @@ -5563,6 +5571,7 @@ const std::string* db_name; const std::string* tbl_name; + const std::vector * part_vals; const Partition* new_part; uint32_t write(::apache::thrift::protocol::TProtocol* oprot) const; @@ -9004,8 +9013,8 @@ void get_partitions_by_names(std::vector & _return, const std::string& db_name, const std::string& tbl_name, const std::vector & names); void send_get_partitions_by_names(const std::string& db_name, const std::string& tbl_name, const std::vector & names); void recv_get_partitions_by_names(std::vector & _return); - void alter_partition(const std::string& db_name, const std::string& tbl_name, const Partition& new_part); - void send_alter_partition(const std::string& db_name, const std::string& tbl_name, const Partition& new_part); + void alter_partition(const std::string& db_name, const std::string& tbl_name, const std::vector & part_vals, const Partition& new_part); + void send_alter_partition(const std::string& db_name, const std::string& tbl_name, const std::vector & part_vals, const Partition& new_part); void recv_alter_partition(); void get_config_value(std::string& _return, const std::string& name, const std::string& defaultValue); void send_get_config_value(const std::string& name, const std::string& defaultValue); @@ -9647,10 +9656,10 @@ } } - void alter_partition(const std::string& db_name, const std::string& tbl_name, const Partition& new_part) { + void alter_partition(const std::string& db_name, const std::string& tbl_name, const std::vector & part_vals, const Partition& new_part) { size_t sz = ifaces_.size(); for (size_t i = 0; i < sz; ++i) { - ifaces_[i]->alter_partition(db_name, tbl_name, new_part); + ifaces_[i]->alter_partition(db_name, tbl_name, part_vals, new_part); } } Index: metastore/src/gen/thrift/gen-cpp/ThriftHiveMetastore_server.skeleton.cpp =================================================================== --- metastore/src/gen/thrift/gen-cpp/ThriftHiveMetastore_server.skeleton.cpp (revision 1186991) +++ metastore/src/gen/thrift/gen-cpp/ThriftHiveMetastore_server.skeleton.cpp (working copy) @@ -207,7 +207,7 @@ printf("get_partitions_by_names\n"); } - void alter_partition(const std::string& db_name, const std::string& tbl_name, const Partition& new_part) { + void alter_partition(const std::string& db_name, const std::string& tbl_name, const std::vector & part_vals, const Partition& new_part) { // Your implementation goes here printf("alter_partition\n"); } Index: metastore/src/gen/thrift/gen-rb/thrift_hive_metastore.rb =================================================================== --- metastore/src/gen/thrift/gen-rb/thrift_hive_metastore.rb (revision 1186991) +++ metastore/src/gen/thrift/gen-rb/thrift_hive_metastore.rb (working copy) @@ -640,13 +640,13 @@ raise ::Thrift::ApplicationException.new(::Thrift::ApplicationException::MISSING_RESULT, 'get_partitions_by_names failed: unknown result') end - def alter_partition(db_name, tbl_name, new_part) - send_alter_partition(db_name, tbl_name, new_part) + def alter_partition(db_name, tbl_name, part_vals, new_part) + send_alter_partition(db_name, tbl_name, part_vals, new_part) recv_alter_partition() end - def send_alter_partition(db_name, tbl_name, new_part) - send_message('alter_partition', Alter_partition_args, :db_name => db_name, :tbl_name => tbl_name, :new_part => new_part) + def send_alter_partition(db_name, tbl_name, part_vals, new_part) + send_message('alter_partition', Alter_partition_args, :db_name => db_name, :tbl_name => tbl_name, :part_vals => part_vals, :new_part => new_part) end def recv_alter_partition() @@ -1553,7 +1553,7 @@ args = read_args(iprot, Alter_partition_args) result = Alter_partition_result.new() begin - @handler.alter_partition(args.db_name, args.tbl_name, args.new_part) + @handler.alter_partition(args.db_name, args.tbl_name, args.part_vals, args.new_part) rescue InvalidOperationException => o1 result.o1 = o1 rescue MetaException => o2 @@ -3307,11 +3307,13 @@ include ::Thrift::Struct, ::Thrift::Struct_Union DB_NAME = 1 TBL_NAME = 2 - NEW_PART = 3 + PART_VALS = 3 + NEW_PART = 4 FIELDS = { DB_NAME => {:type => ::Thrift::Types::STRING, :name => 'db_name'}, TBL_NAME => {:type => ::Thrift::Types::STRING, :name => 'tbl_name'}, + PART_VALS => {:type => ::Thrift::Types::LIST, :name => 'part_vals', :element => {:type => ::Thrift::Types::STRING}}, NEW_PART => {:type => ::Thrift::Types::STRUCT, :name => 'new_part', :class => Partition} } Index: metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ThriftHiveMetastore.java =================================================================== --- metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ThriftHiveMetastore.java (revision 1186991) +++ metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ThriftHiveMetastore.java (working copy) @@ -101,7 +101,7 @@ public List get_partitions_by_names(String db_name, String tbl_name, List names) throws MetaException, NoSuchObjectException, org.apache.thrift.TException; - public void alter_partition(String db_name, String tbl_name, Partition new_part) throws InvalidOperationException, MetaException, org.apache.thrift.TException; + public void alter_partition(String db_name, String tbl_name, List part_vals, Partition new_part) throws InvalidOperationException, MetaException, org.apache.thrift.TException; public String get_config_value(String name, String defaultValue) throws ConfigValSecurityException, org.apache.thrift.TException; @@ -229,7 +229,7 @@ public void get_partitions_by_names(String db_name, String tbl_name, List names, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException; - public void alter_partition(String db_name, String tbl_name, Partition new_part, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException; + public void alter_partition(String db_name, String tbl_name, List part_vals, Partition new_part, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException; public void get_config_value(String name, String defaultValue, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException; @@ -1426,17 +1426,18 @@ throw new org.apache.thrift.TApplicationException(org.apache.thrift.TApplicationException.MISSING_RESULT, "get_partitions_by_names failed: unknown result"); } - public void alter_partition(String db_name, String tbl_name, Partition new_part) throws InvalidOperationException, MetaException, org.apache.thrift.TException + public void alter_partition(String db_name, String tbl_name, List part_vals, Partition new_part) throws InvalidOperationException, MetaException, org.apache.thrift.TException { - send_alter_partition(db_name, tbl_name, new_part); + send_alter_partition(db_name, tbl_name, part_vals, new_part); recv_alter_partition(); } - public void send_alter_partition(String db_name, String tbl_name, Partition new_part) throws org.apache.thrift.TException + public void send_alter_partition(String db_name, String tbl_name, List part_vals, Partition new_part) throws org.apache.thrift.TException { alter_partition_args args = new alter_partition_args(); args.setDb_name(db_name); args.setTbl_name(tbl_name); + args.setPart_vals(part_vals); args.setNew_part(new_part); sendBase("alter_partition", args); } @@ -3516,9 +3517,9 @@ } } - public void alter_partition(String db_name, String tbl_name, Partition new_part, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException { + public void alter_partition(String db_name, String tbl_name, List part_vals, Partition new_part, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException { checkReady(); - alter_partition_call method_call = new alter_partition_call(db_name, tbl_name, new_part, resultHandler, this, ___protocolFactory, ___transport); + alter_partition_call method_call = new alter_partition_call(db_name, tbl_name, part_vals, new_part, resultHandler, this, ___protocolFactory, ___transport); this.___currentMethod = method_call; ___manager.call(method_call); } @@ -3526,11 +3527,13 @@ public static class alter_partition_call extends org.apache.thrift.async.TAsyncMethodCall { private String db_name; private String tbl_name; + private List part_vals; private Partition new_part; - public alter_partition_call(String db_name, String tbl_name, Partition new_part, org.apache.thrift.async.AsyncMethodCallback resultHandler, org.apache.thrift.async.TAsyncClient client, org.apache.thrift.protocol.TProtocolFactory protocolFactory, org.apache.thrift.transport.TNonblockingTransport transport) throws org.apache.thrift.TException { + public alter_partition_call(String db_name, String tbl_name, List part_vals, Partition new_part, org.apache.thrift.async.AsyncMethodCallback resultHandler, org.apache.thrift.async.TAsyncClient client, org.apache.thrift.protocol.TProtocolFactory protocolFactory, org.apache.thrift.transport.TNonblockingTransport transport) throws org.apache.thrift.TException { super(client, protocolFactory, transport, resultHandler, false); this.db_name = db_name; this.tbl_name = tbl_name; + this.part_vals = part_vals; this.new_part = new_part; } @@ -3539,6 +3542,7 @@ alter_partition_args args = new alter_partition_args(); args.setDb_name(db_name); args.setTbl_name(tbl_name); + args.setPart_vals(part_vals); args.setNew_part(new_part); args.write(prot); prot.writeMessageEnd(); @@ -5337,7 +5341,7 @@ protected alter_partition_result getResult(I iface, alter_partition_args args) throws org.apache.thrift.TException { alter_partition_result result = new alter_partition_result(); try { - iface.alter_partition(args.db_name, args.tbl_name, args.new_part); + iface.alter_partition(args.db_name, args.tbl_name, args.part_vals, args.new_part); } catch (InvalidOperationException o1) { result.o1 = o1; } catch (MetaException o2) { @@ -40013,17 +40017,20 @@ private static final org.apache.thrift.protocol.TField DB_NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("db_name", org.apache.thrift.protocol.TType.STRING, (short)1); private static final org.apache.thrift.protocol.TField TBL_NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("tbl_name", org.apache.thrift.protocol.TType.STRING, (short)2); - private static final org.apache.thrift.protocol.TField NEW_PART_FIELD_DESC = new org.apache.thrift.protocol.TField("new_part", org.apache.thrift.protocol.TType.STRUCT, (short)3); + private static final org.apache.thrift.protocol.TField PART_VALS_FIELD_DESC = new org.apache.thrift.protocol.TField("part_vals", org.apache.thrift.protocol.TType.LIST, (short)3); + private static final org.apache.thrift.protocol.TField NEW_PART_FIELD_DESC = new org.apache.thrift.protocol.TField("new_part", org.apache.thrift.protocol.TType.STRUCT, (short)4); private String db_name; // required private String tbl_name; // required + private List part_vals; // required private Partition new_part; // required /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ public enum _Fields implements org.apache.thrift.TFieldIdEnum { DB_NAME((short)1, "db_name"), TBL_NAME((short)2, "tbl_name"), - NEW_PART((short)3, "new_part"); + PART_VALS((short)3, "part_vals"), + NEW_PART((short)4, "new_part"); private static final Map byName = new HashMap(); @@ -40042,7 +40049,9 @@ return DB_NAME; case 2: // TBL_NAME return TBL_NAME; - case 3: // NEW_PART + case 3: // PART_VALS + return PART_VALS; + case 4: // NEW_PART return NEW_PART; default: return null; @@ -40092,6 +40101,9 @@ new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING))); tmpMap.put(_Fields.TBL_NAME, new org.apache.thrift.meta_data.FieldMetaData("tbl_name", org.apache.thrift.TFieldRequirementType.DEFAULT, new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING))); + tmpMap.put(_Fields.PART_VALS, new org.apache.thrift.meta_data.FieldMetaData("part_vals", org.apache.thrift.TFieldRequirementType.DEFAULT, + new org.apache.thrift.meta_data.ListMetaData(org.apache.thrift.protocol.TType.LIST, + new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING)))); tmpMap.put(_Fields.NEW_PART, new org.apache.thrift.meta_data.FieldMetaData("new_part", org.apache.thrift.TFieldRequirementType.DEFAULT, new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, Partition.class))); metaDataMap = Collections.unmodifiableMap(tmpMap); @@ -40104,11 +40116,13 @@ public alter_partition_args( String db_name, String tbl_name, + List part_vals, Partition new_part) { this(); this.db_name = db_name; this.tbl_name = tbl_name; + this.part_vals = part_vals; this.new_part = new_part; } @@ -40122,6 +40136,13 @@ if (other.isSetTbl_name()) { this.tbl_name = other.tbl_name; } + if (other.isSetPart_vals()) { + List __this__part_vals = new ArrayList(); + for (String other_element : other.part_vals) { + __this__part_vals.add(other_element); + } + this.part_vals = __this__part_vals; + } if (other.isSetNew_part()) { this.new_part = new Partition(other.new_part); } @@ -40135,6 +40156,7 @@ public void clear() { this.db_name = null; this.tbl_name = null; + this.part_vals = null; this.new_part = null; } @@ -40184,6 +40206,44 @@ } } + public int getPart_valsSize() { + return (this.part_vals == null) ? 0 : this.part_vals.size(); + } + + public java.util.Iterator getPart_valsIterator() { + return (this.part_vals == null) ? null : this.part_vals.iterator(); + } + + public void addToPart_vals(String elem) { + if (this.part_vals == null) { + this.part_vals = new ArrayList(); + } + this.part_vals.add(elem); + } + + public List getPart_vals() { + return this.part_vals; + } + + public void setPart_vals(List part_vals) { + this.part_vals = part_vals; + } + + public void unsetPart_vals() { + this.part_vals = null; + } + + /** Returns true if field part_vals is set (has been assigned a value) and false otherwise */ + public boolean isSetPart_vals() { + return this.part_vals != null; + } + + public void setPart_valsIsSet(boolean value) { + if (!value) { + this.part_vals = null; + } + } + public Partition getNew_part() { return this.new_part; } @@ -40225,6 +40285,14 @@ } break; + case PART_VALS: + if (value == null) { + unsetPart_vals(); + } else { + setPart_vals((List)value); + } + break; + case NEW_PART: if (value == null) { unsetNew_part(); @@ -40244,6 +40312,9 @@ case TBL_NAME: return getTbl_name(); + case PART_VALS: + return getPart_vals(); + case NEW_PART: return getNew_part(); @@ -40262,6 +40333,8 @@ return isSetDb_name(); case TBL_NAME: return isSetTbl_name(); + case PART_VALS: + return isSetPart_vals(); case NEW_PART: return isSetNew_part(); } @@ -40299,6 +40372,15 @@ return false; } + boolean this_present_part_vals = true && this.isSetPart_vals(); + boolean that_present_part_vals = true && that.isSetPart_vals(); + if (this_present_part_vals || that_present_part_vals) { + if (!(this_present_part_vals && that_present_part_vals)) + return false; + if (!this.part_vals.equals(that.part_vals)) + return false; + } + boolean this_present_new_part = true && this.isSetNew_part(); boolean that_present_new_part = true && that.isSetNew_part(); if (this_present_new_part || that_present_new_part) { @@ -40344,6 +40426,16 @@ return lastComparison; } } + lastComparison = Boolean.valueOf(isSetPart_vals()).compareTo(typedOther.isSetPart_vals()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetPart_vals()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.part_vals, typedOther.part_vals); + if (lastComparison != 0) { + return lastComparison; + } + } lastComparison = Boolean.valueOf(isSetNew_part()).compareTo(typedOther.isSetNew_part()); if (lastComparison != 0) { return lastComparison; @@ -40385,7 +40477,24 @@ org.apache.thrift.protocol.TProtocolUtil.skip(iprot, field.type); } break; - case 3: // NEW_PART + case 3: // PART_VALS + if (field.type == org.apache.thrift.protocol.TType.LIST) { + { + org.apache.thrift.protocol.TList _list219 = iprot.readListBegin(); + this.part_vals = new ArrayList(_list219.size); + for (int _i220 = 0; _i220 < _list219.size; ++_i220) + { + String _elem221; // required + _elem221 = iprot.readString(); + this.part_vals.add(_elem221); + } + iprot.readListEnd(); + } + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, field.type); + } + break; + case 4: // NEW_PART if (field.type == org.apache.thrift.protocol.TType.STRUCT) { this.new_part = new Partition(); this.new_part.read(iprot); @@ -40416,6 +40525,18 @@ oprot.writeString(this.tbl_name); oprot.writeFieldEnd(); } + if (this.part_vals != null) { + oprot.writeFieldBegin(PART_VALS_FIELD_DESC); + { + oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, this.part_vals.size())); + for (String _iter222 : this.part_vals) + { + oprot.writeString(_iter222); + } + oprot.writeListEnd(); + } + oprot.writeFieldEnd(); + } if (this.new_part != null) { oprot.writeFieldBegin(NEW_PART_FIELD_DESC); this.new_part.write(oprot); @@ -40446,6 +40567,14 @@ } first = false; if (!first) sb.append(", "); + sb.append("part_vals:"); + if (this.part_vals == null) { + sb.append("null"); + } else { + sb.append(this.part_vals); + } + first = false; + if (!first) sb.append(", "); sb.append("new_part:"); if (this.new_part == null) { sb.append("null"); @@ -42236,13 +42365,13 @@ case 0: // SUCCESS if (field.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list219 = iprot.readListBegin(); - this.success = new ArrayList(_list219.size); - for (int _i220 = 0; _i220 < _list219.size; ++_i220) + org.apache.thrift.protocol.TList _list223 = iprot.readListBegin(); + this.success = new ArrayList(_list223.size); + for (int _i224 = 0; _i224 < _list223.size; ++_i224) { - String _elem221; // required - _elem221 = iprot.readString(); - this.success.add(_elem221); + String _elem225; // required + _elem225 = iprot.readString(); + this.success.add(_elem225); } iprot.readListEnd(); } @@ -42274,9 +42403,9 @@ oprot.writeFieldBegin(SUCCESS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, this.success.size())); - for (String _iter222 : this.success) + for (String _iter226 : this.success) { - oprot.writeString(_iter222); + oprot.writeString(_iter226); } oprot.writeListEnd(); } @@ -42953,15 +43082,15 @@ case 0: // SUCCESS if (field.type == org.apache.thrift.protocol.TType.MAP) { { - org.apache.thrift.protocol.TMap _map223 = iprot.readMapBegin(); - this.success = new HashMap(2*_map223.size); - for (int _i224 = 0; _i224 < _map223.size; ++_i224) + org.apache.thrift.protocol.TMap _map227 = iprot.readMapBegin(); + this.success = new HashMap(2*_map227.size); + for (int _i228 = 0; _i228 < _map227.size; ++_i228) { - String _key225; // required - String _val226; // required - _key225 = iprot.readString(); - _val226 = iprot.readString(); - this.success.put(_key225, _val226); + String _key229; // required + String _val230; // required + _key229 = iprot.readString(); + _val230 = iprot.readString(); + this.success.put(_key229, _val230); } iprot.readMapEnd(); } @@ -42993,10 +43122,10 @@ oprot.writeFieldBegin(SUCCESS_FIELD_DESC); { oprot.writeMapBegin(new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRING, this.success.size())); - for (Map.Entry _iter227 : this.success.entrySet()) + for (Map.Entry _iter231 : this.success.entrySet()) { - oprot.writeString(_iter227.getKey()); - oprot.writeString(_iter227.getValue()); + oprot.writeString(_iter231.getKey()); + oprot.writeString(_iter231.getValue()); } oprot.writeMapEnd(); } @@ -43541,15 +43670,15 @@ case 3: // PART_VALS if (field.type == org.apache.thrift.protocol.TType.MAP) { { - org.apache.thrift.protocol.TMap _map228 = iprot.readMapBegin(); - this.part_vals = new HashMap(2*_map228.size); - for (int _i229 = 0; _i229 < _map228.size; ++_i229) + org.apache.thrift.protocol.TMap _map232 = iprot.readMapBegin(); + this.part_vals = new HashMap(2*_map232.size); + for (int _i233 = 0; _i233 < _map232.size; ++_i233) { - String _key230; // required - String _val231; // required - _key230 = iprot.readString(); - _val231 = iprot.readString(); - this.part_vals.put(_key230, _val231); + String _key234; // required + String _val235; // required + _key234 = iprot.readString(); + _val235 = iprot.readString(); + this.part_vals.put(_key234, _val235); } iprot.readMapEnd(); } @@ -43591,10 +43720,10 @@ oprot.writeFieldBegin(PART_VALS_FIELD_DESC); { oprot.writeMapBegin(new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRING, this.part_vals.size())); - for (Map.Entry _iter232 : this.part_vals.entrySet()) + for (Map.Entry _iter236 : this.part_vals.entrySet()) { - oprot.writeString(_iter232.getKey()); - oprot.writeString(_iter232.getValue()); + oprot.writeString(_iter236.getKey()); + oprot.writeString(_iter236.getValue()); } oprot.writeMapEnd(); } @@ -44890,15 +45019,15 @@ case 3: // PART_VALS if (field.type == org.apache.thrift.protocol.TType.MAP) { { - org.apache.thrift.protocol.TMap _map233 = iprot.readMapBegin(); - this.part_vals = new HashMap(2*_map233.size); - for (int _i234 = 0; _i234 < _map233.size; ++_i234) + org.apache.thrift.protocol.TMap _map237 = iprot.readMapBegin(); + this.part_vals = new HashMap(2*_map237.size); + for (int _i238 = 0; _i238 < _map237.size; ++_i238) { - String _key235; // required - String _val236; // required - _key235 = iprot.readString(); - _val236 = iprot.readString(); - this.part_vals.put(_key235, _val236); + String _key239; // required + String _val240; // required + _key239 = iprot.readString(); + _val240 = iprot.readString(); + this.part_vals.put(_key239, _val240); } iprot.readMapEnd(); } @@ -44940,10 +45069,10 @@ oprot.writeFieldBegin(PART_VALS_FIELD_DESC); { oprot.writeMapBegin(new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRING, this.part_vals.size())); - for (Map.Entry _iter237 : this.part_vals.entrySet()) + for (Map.Entry _iter241 : this.part_vals.entrySet()) { - oprot.writeString(_iter237.getKey()); - oprot.writeString(_iter237.getValue()); + oprot.writeString(_iter241.getKey()); + oprot.writeString(_iter241.getValue()); } oprot.writeMapEnd(); } @@ -50549,14 +50678,14 @@ case 0: // SUCCESS if (field.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list238 = iprot.readListBegin(); - this.success = new ArrayList(_list238.size); - for (int _i239 = 0; _i239 < _list238.size; ++_i239) + org.apache.thrift.protocol.TList _list242 = iprot.readListBegin(); + this.success = new ArrayList(_list242.size); + for (int _i243 = 0; _i243 < _list242.size; ++_i243) { - Index _elem240; // required - _elem240 = new Index(); - _elem240.read(iprot); - this.success.add(_elem240); + Index _elem244; // required + _elem244 = new Index(); + _elem244.read(iprot); + this.success.add(_elem244); } iprot.readListEnd(); } @@ -50596,9 +50725,9 @@ oprot.writeFieldBegin(SUCCESS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, this.success.size())); - for (Index _iter241 : this.success) + for (Index _iter245 : this.success) { - _iter241.write(oprot); + _iter245.write(oprot); } oprot.writeListEnd(); } @@ -51460,13 +51589,13 @@ case 0: // SUCCESS if (field.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list242 = iprot.readListBegin(); - this.success = new ArrayList(_list242.size); - for (int _i243 = 0; _i243 < _list242.size; ++_i243) + org.apache.thrift.protocol.TList _list246 = iprot.readListBegin(); + this.success = new ArrayList(_list246.size); + for (int _i247 = 0; _i247 < _list246.size; ++_i247) { - String _elem244; // required - _elem244 = iprot.readString(); - this.success.add(_elem244); + String _elem248; // required + _elem248 = iprot.readString(); + this.success.add(_elem248); } iprot.readListEnd(); } @@ -51498,9 +51627,9 @@ oprot.writeFieldBegin(SUCCESS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, this.success.size())); - for (String _iter245 : this.success) + for (String _iter249 : this.success) { - oprot.writeString(_iter245); + oprot.writeString(_iter249); } oprot.writeListEnd(); } @@ -53429,13 +53558,13 @@ case 0: // SUCCESS if (field.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list246 = iprot.readListBegin(); - this.success = new ArrayList(_list246.size); - for (int _i247 = 0; _i247 < _list246.size; ++_i247) + org.apache.thrift.protocol.TList _list250 = iprot.readListBegin(); + this.success = new ArrayList(_list250.size); + for (int _i251 = 0; _i251 < _list250.size; ++_i251) { - String _elem248; // required - _elem248 = iprot.readString(); - this.success.add(_elem248); + String _elem252; // required + _elem252 = iprot.readString(); + this.success.add(_elem252); } iprot.readListEnd(); } @@ -53467,9 +53596,9 @@ oprot.writeFieldBegin(SUCCESS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, this.success.size())); - for (String _iter249 : this.success) + for (String _iter253 : this.success) { - oprot.writeString(_iter249); + oprot.writeString(_iter253); } oprot.writeListEnd(); } @@ -56243,14 +56372,14 @@ case 0: // SUCCESS if (field.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list250 = iprot.readListBegin(); - this.success = new ArrayList(_list250.size); - for (int _i251 = 0; _i251 < _list250.size; ++_i251) + org.apache.thrift.protocol.TList _list254 = iprot.readListBegin(); + this.success = new ArrayList(_list254.size); + for (int _i255 = 0; _i255 < _list254.size; ++_i255) { - Role _elem252; // required - _elem252 = new Role(); - _elem252.read(iprot); - this.success.add(_elem252); + Role _elem256; // required + _elem256 = new Role(); + _elem256.read(iprot); + this.success.add(_elem256); } iprot.readListEnd(); } @@ -56282,9 +56411,9 @@ oprot.writeFieldBegin(SUCCESS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, this.success.size())); - for (Role _iter253 : this.success) + for (Role _iter257 : this.success) { - _iter253.write(oprot); + _iter257.write(oprot); } oprot.writeListEnd(); } @@ -56745,13 +56874,13 @@ case 3: // GROUP_NAMES if (field.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list254 = iprot.readListBegin(); - this.group_names = new ArrayList(_list254.size); - for (int _i255 = 0; _i255 < _list254.size; ++_i255) + org.apache.thrift.protocol.TList _list258 = iprot.readListBegin(); + this.group_names = new ArrayList(_list258.size); + for (int _i259 = 0; _i259 < _list258.size; ++_i259) { - String _elem256; // required - _elem256 = iprot.readString(); - this.group_names.add(_elem256); + String _elem260; // required + _elem260 = iprot.readString(); + this.group_names.add(_elem260); } iprot.readListEnd(); } @@ -56786,9 +56915,9 @@ oprot.writeFieldBegin(GROUP_NAMES_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, this.group_names.size())); - for (String _iter257 : this.group_names) + for (String _iter261 : this.group_names) { - oprot.writeString(_iter257); + oprot.writeString(_iter261); } oprot.writeListEnd(); } @@ -58035,14 +58164,14 @@ case 0: // SUCCESS if (field.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list258 = iprot.readListBegin(); - this.success = new ArrayList(_list258.size); - for (int _i259 = 0; _i259 < _list258.size; ++_i259) + org.apache.thrift.protocol.TList _list262 = iprot.readListBegin(); + this.success = new ArrayList(_list262.size); + for (int _i263 = 0; _i263 < _list262.size; ++_i263) { - HiveObjectPrivilege _elem260; // required - _elem260 = new HiveObjectPrivilege(); - _elem260.read(iprot); - this.success.add(_elem260); + HiveObjectPrivilege _elem264; // required + _elem264 = new HiveObjectPrivilege(); + _elem264.read(iprot); + this.success.add(_elem264); } iprot.readListEnd(); } @@ -58074,9 +58203,9 @@ oprot.writeFieldBegin(SUCCESS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, this.success.size())); - for (HiveObjectPrivilege _iter261 : this.success) + for (HiveObjectPrivilege _iter265 : this.success) { - _iter261.write(oprot); + _iter265.write(oprot); } oprot.writeListEnd(); } Index: metastore/src/gen/thrift/gen-php/hive_metastore/ThriftHiveMetastore.php =================================================================== --- metastore/src/gen/thrift/gen-php/hive_metastore/ThriftHiveMetastore.php (revision 1186991) +++ metastore/src/gen/thrift/gen-php/hive_metastore/ThriftHiveMetastore.php (working copy) @@ -47,7 +47,7 @@ public function get_partition_names_ps($db_name, $tbl_name, $part_vals, $max_parts); public function get_partitions_by_filter($db_name, $tbl_name, $filter, $max_parts); public function get_partitions_by_names($db_name, $tbl_name, $names); - public function alter_partition($db_name, $tbl_name, $new_part); + public function alter_partition($db_name, $tbl_name, $part_vals, $new_part); public function get_config_value($name, $defaultValue); public function partition_name_to_vals($part_name); public function partition_name_to_spec($part_name); @@ -2240,17 +2240,18 @@ throw new Exception("get_partitions_by_names failed: unknown result"); } - public function alter_partition($db_name, $tbl_name, $new_part) + public function alter_partition($db_name, $tbl_name, $part_vals, $new_part) { - $this->send_alter_partition($db_name, $tbl_name, $new_part); + $this->send_alter_partition($db_name, $tbl_name, $part_vals, $new_part); $this->recv_alter_partition(); } - public function send_alter_partition($db_name, $tbl_name, $new_part) + public function send_alter_partition($db_name, $tbl_name, $part_vals, $new_part) { $args = new ThriftHiveMetastore_alter_partition_args(); $args->db_name = $db_name; $args->tbl_name = $tbl_name; + $args->part_vals = $part_vals; $args->new_part = $new_part; $bin_accel = ($this->output_ instanceof TProtocol::$TBINARYPROTOCOLACCELERATED) && function_exists('thrift_protocol_write_binary'); if ($bin_accel) @@ -12601,6 +12602,7 @@ public $db_name = null; public $tbl_name = null; + public $part_vals = null; public $new_part = null; public function __construct($vals=null) { @@ -12615,6 +12617,14 @@ 'type' => TType::STRING, ), 3 => array( + 'var' => 'part_vals', + 'type' => TType::LST, + 'etype' => TType::STRING, + 'elem' => array( + 'type' => TType::STRING, + ), + ), + 4 => array( 'var' => 'new_part', 'type' => TType::STRUCT, 'class' => 'Partition', @@ -12628,6 +12638,9 @@ if (isset($vals['tbl_name'])) { $this->tbl_name = $vals['tbl_name']; } + if (isset($vals['part_vals'])) { + $this->part_vals = $vals['part_vals']; + } if (isset($vals['new_part'])) { $this->new_part = $vals['new_part']; } @@ -12668,6 +12681,23 @@ } break; case 3: + if ($ftype == TType::LST) { + $this->part_vals = array(); + $_size386 = 0; + $_etype389 = 0; + $xfer += $input->readListBegin($_etype389, $_size386); + for ($_i390 = 0; $_i390 < $_size386; ++$_i390) + { + $elem391 = null; + $xfer += $input->readString($elem391); + $this->part_vals []= $elem391; + } + $xfer += $input->readListEnd(); + } else { + $xfer += $input->skip($ftype); + } + break; + case 4: if ($ftype == TType::STRUCT) { $this->new_part = new Partition(); $xfer += $this->new_part->read($input); @@ -12698,11 +12728,28 @@ $xfer += $output->writeString($this->tbl_name); $xfer += $output->writeFieldEnd(); } + if ($this->part_vals !== null) { + if (!is_array($this->part_vals)) { + throw new TProtocolException('Bad type in structure.', TProtocolException::INVALID_DATA); + } + $xfer += $output->writeFieldBegin('part_vals', TType::LST, 3); + { + $output->writeListBegin(TType::STRING, count($this->part_vals)); + { + foreach ($this->part_vals as $iter392) + { + $xfer += $output->writeString($iter392); + } + } + $output->writeListEnd(); + } + $xfer += $output->writeFieldEnd(); + } if ($this->new_part !== null) { if (!is_object($this->new_part)) { throw new TProtocolException('Bad type in structure.', TProtocolException::INVALID_DATA); } - $xfer += $output->writeFieldBegin('new_part', TType::STRUCT, 3); + $xfer += $output->writeFieldBegin('new_part', TType::STRUCT, 4); $xfer += $this->new_part->write($output); $xfer += $output->writeFieldEnd(); } @@ -13123,14 +13170,14 @@ case 0: if ($ftype == TType::LST) { $this->success = array(); - $_size386 = 0; - $_etype389 = 0; - $xfer += $input->readListBegin($_etype389, $_size386); - for ($_i390 = 0; $_i390 < $_size386; ++$_i390) + $_size393 = 0; + $_etype396 = 0; + $xfer += $input->readListBegin($_etype396, $_size393); + for ($_i397 = 0; $_i397 < $_size393; ++$_i397) { - $elem391 = null; - $xfer += $input->readString($elem391); - $this->success []= $elem391; + $elem398 = null; + $xfer += $input->readString($elem398); + $this->success []= $elem398; } $xfer += $input->readListEnd(); } else { @@ -13166,9 +13213,9 @@ { $output->writeListBegin(TType::STRING, count($this->success)); { - foreach ($this->success as $iter392) + foreach ($this->success as $iter399) { - $xfer += $output->writeString($iter392); + $xfer += $output->writeString($iter399); } } $output->writeListEnd(); @@ -13319,17 +13366,17 @@ case 0: if ($ftype == TType::MAP) { $this->success = array(); - $_size393 = 0; - $_ktype394 = 0; - $_vtype395 = 0; - $xfer += $input->readMapBegin($_ktype394, $_vtype395, $_size393); - for ($_i397 = 0; $_i397 < $_size393; ++$_i397) + $_size400 = 0; + $_ktype401 = 0; + $_vtype402 = 0; + $xfer += $input->readMapBegin($_ktype401, $_vtype402, $_size400); + for ($_i404 = 0; $_i404 < $_size400; ++$_i404) { - $key398 = ''; - $val399 = ''; - $xfer += $input->readString($key398); - $xfer += $input->readString($val399); - $this->success[$key398] = $val399; + $key405 = ''; + $val406 = ''; + $xfer += $input->readString($key405); + $xfer += $input->readString($val406); + $this->success[$key405] = $val406; } $xfer += $input->readMapEnd(); } else { @@ -13365,10 +13412,10 @@ { $output->writeMapBegin(TType::STRING, TType::STRING, count($this->success)); { - foreach ($this->success as $kiter400 => $viter401) + foreach ($this->success as $kiter407 => $viter408) { - $xfer += $output->writeString($kiter400); - $xfer += $output->writeString($viter401); + $xfer += $output->writeString($kiter407); + $xfer += $output->writeString($viter408); } } $output->writeMapEnd(); @@ -13476,17 +13523,17 @@ case 3: if ($ftype == TType::MAP) { $this->part_vals = array(); - $_size402 = 0; - $_ktype403 = 0; - $_vtype404 = 0; - $xfer += $input->readMapBegin($_ktype403, $_vtype404, $_size402); - for ($_i406 = 0; $_i406 < $_size402; ++$_i406) + $_size409 = 0; + $_ktype410 = 0; + $_vtype411 = 0; + $xfer += $input->readMapBegin($_ktype410, $_vtype411, $_size409); + for ($_i413 = 0; $_i413 < $_size409; ++$_i413) { - $key407 = ''; - $val408 = ''; - $xfer += $input->readString($key407); - $xfer += $input->readString($val408); - $this->part_vals[$key407] = $val408; + $key414 = ''; + $val415 = ''; + $xfer += $input->readString($key414); + $xfer += $input->readString($val415); + $this->part_vals[$key414] = $val415; } $xfer += $input->readMapEnd(); } else { @@ -13531,10 +13578,10 @@ { $output->writeMapBegin(TType::STRING, TType::STRING, count($this->part_vals)); { - foreach ($this->part_vals as $kiter409 => $viter410) + foreach ($this->part_vals as $kiter416 => $viter417) { - $xfer += $output->writeString($kiter409); - $xfer += $output->writeString($viter410); + $xfer += $output->writeString($kiter416); + $xfer += $output->writeString($viter417); } } $output->writeMapEnd(); @@ -13826,17 +13873,17 @@ case 3: if ($ftype == TType::MAP) { $this->part_vals = array(); - $_size411 = 0; - $_ktype412 = 0; - $_vtype413 = 0; - $xfer += $input->readMapBegin($_ktype412, $_vtype413, $_size411); - for ($_i415 = 0; $_i415 < $_size411; ++$_i415) + $_size418 = 0; + $_ktype419 = 0; + $_vtype420 = 0; + $xfer += $input->readMapBegin($_ktype419, $_vtype420, $_size418); + for ($_i422 = 0; $_i422 < $_size418; ++$_i422) { - $key416 = ''; - $val417 = ''; - $xfer += $input->readString($key416); - $xfer += $input->readString($val417); - $this->part_vals[$key416] = $val417; + $key423 = ''; + $val424 = ''; + $xfer += $input->readString($key423); + $xfer += $input->readString($val424); + $this->part_vals[$key423] = $val424; } $xfer += $input->readMapEnd(); } else { @@ -13881,10 +13928,10 @@ { $output->writeMapBegin(TType::STRING, TType::STRING, count($this->part_vals)); { - foreach ($this->part_vals as $kiter418 => $viter419) + foreach ($this->part_vals as $kiter425 => $viter426) { - $xfer += $output->writeString($kiter418); - $xfer += $output->writeString($viter419); + $xfer += $output->writeString($kiter425); + $xfer += $output->writeString($viter426); } } $output->writeMapEnd(); @@ -15244,15 +15291,15 @@ case 0: if ($ftype == TType::LST) { $this->success = array(); - $_size420 = 0; - $_etype423 = 0; - $xfer += $input->readListBegin($_etype423, $_size420); - for ($_i424 = 0; $_i424 < $_size420; ++$_i424) + $_size427 = 0; + $_etype430 = 0; + $xfer += $input->readListBegin($_etype430, $_size427); + for ($_i431 = 0; $_i431 < $_size427; ++$_i431) { - $elem425 = null; - $elem425 = new Index(); - $xfer += $elem425->read($input); - $this->success []= $elem425; + $elem432 = null; + $elem432 = new Index(); + $xfer += $elem432->read($input); + $this->success []= $elem432; } $xfer += $input->readListEnd(); } else { @@ -15296,9 +15343,9 @@ { $output->writeListBegin(TType::STRUCT, count($this->success)); { - foreach ($this->success as $iter426) + foreach ($this->success as $iter433) { - $xfer += $iter426->write($output); + $xfer += $iter433->write($output); } } $output->writeListEnd(); @@ -15490,14 +15537,14 @@ case 0: if ($ftype == TType::LST) { $this->success = array(); - $_size427 = 0; - $_etype430 = 0; - $xfer += $input->readListBegin($_etype430, $_size427); - for ($_i431 = 0; $_i431 < $_size427; ++$_i431) + $_size434 = 0; + $_etype437 = 0; + $xfer += $input->readListBegin($_etype437, $_size434); + for ($_i438 = 0; $_i438 < $_size434; ++$_i438) { - $elem432 = null; - $xfer += $input->readString($elem432); - $this->success []= $elem432; + $elem439 = null; + $xfer += $input->readString($elem439); + $this->success []= $elem439; } $xfer += $input->readListEnd(); } else { @@ -15533,9 +15580,9 @@ { $output->writeListBegin(TType::STRING, count($this->success)); { - foreach ($this->success as $iter433) + foreach ($this->success as $iter440) { - $xfer += $output->writeString($iter433); + $xfer += $output->writeString($iter440); } } $output->writeListEnd(); @@ -15997,14 +16044,14 @@ case 0: if ($ftype == TType::LST) { $this->success = array(); - $_size434 = 0; - $_etype437 = 0; - $xfer += $input->readListBegin($_etype437, $_size434); - for ($_i438 = 0; $_i438 < $_size434; ++$_i438) + $_size441 = 0; + $_etype444 = 0; + $xfer += $input->readListBegin($_etype444, $_size441); + for ($_i445 = 0; $_i445 < $_size441; ++$_i445) { - $elem439 = null; - $xfer += $input->readString($elem439); - $this->success []= $elem439; + $elem446 = null; + $xfer += $input->readString($elem446); + $this->success []= $elem446; } $xfer += $input->readListEnd(); } else { @@ -16040,9 +16087,9 @@ { $output->writeListBegin(TType::STRING, count($this->success)); { - foreach ($this->success as $iter440) + foreach ($this->success as $iter447) { - $xfer += $output->writeString($iter440); + $xfer += $output->writeString($iter447); } } $output->writeListEnd(); @@ -16682,15 +16729,15 @@ case 0: if ($ftype == TType::LST) { $this->success = array(); - $_size441 = 0; - $_etype444 = 0; - $xfer += $input->readListBegin($_etype444, $_size441); - for ($_i445 = 0; $_i445 < $_size441; ++$_i445) + $_size448 = 0; + $_etype451 = 0; + $xfer += $input->readListBegin($_etype451, $_size448); + for ($_i452 = 0; $_i452 < $_size448; ++$_i452) { - $elem446 = null; - $elem446 = new Role(); - $xfer += $elem446->read($input); - $this->success []= $elem446; + $elem453 = null; + $elem453 = new Role(); + $xfer += $elem453->read($input); + $this->success []= $elem453; } $xfer += $input->readListEnd(); } else { @@ -16726,9 +16773,9 @@ { $output->writeListBegin(TType::STRUCT, count($this->success)); { - foreach ($this->success as $iter447) + foreach ($this->success as $iter454) { - $xfer += $iter447->write($output); + $xfer += $iter454->write($output); } } $output->writeListEnd(); @@ -16826,14 +16873,14 @@ case 3: if ($ftype == TType::LST) { $this->group_names = array(); - $_size448 = 0; - $_etype451 = 0; - $xfer += $input->readListBegin($_etype451, $_size448); - for ($_i452 = 0; $_i452 < $_size448; ++$_i452) + $_size455 = 0; + $_etype458 = 0; + $xfer += $input->readListBegin($_etype458, $_size455); + for ($_i459 = 0; $_i459 < $_size455; ++$_i459) { - $elem453 = null; - $xfer += $input->readString($elem453); - $this->group_names []= $elem453; + $elem460 = null; + $xfer += $input->readString($elem460); + $this->group_names []= $elem460; } $xfer += $input->readListEnd(); } else { @@ -16874,9 +16921,9 @@ { $output->writeListBegin(TType::STRING, count($this->group_names)); { - foreach ($this->group_names as $iter454) + foreach ($this->group_names as $iter461) { - $xfer += $output->writeString($iter454); + $xfer += $output->writeString($iter461); } } $output->writeListEnd(); @@ -17163,15 +17210,15 @@ case 0: if ($ftype == TType::LST) { $this->success = array(); - $_size455 = 0; - $_etype458 = 0; - $xfer += $input->readListBegin($_etype458, $_size455); - for ($_i459 = 0; $_i459 < $_size455; ++$_i459) + $_size462 = 0; + $_etype465 = 0; + $xfer += $input->readListBegin($_etype465, $_size462); + for ($_i466 = 0; $_i466 < $_size462; ++$_i466) { - $elem460 = null; - $elem460 = new HiveObjectPrivilege(); - $xfer += $elem460->read($input); - $this->success []= $elem460; + $elem467 = null; + $elem467 = new HiveObjectPrivilege(); + $xfer += $elem467->read($input); + $this->success []= $elem467; } $xfer += $input->readListEnd(); } else { @@ -17207,9 +17254,9 @@ { $output->writeListBegin(TType::STRUCT, count($this->success)); { - foreach ($this->success as $iter461) + foreach ($this->success as $iter468) { - $xfer += $iter461->write($output); + $xfer += $iter468->write($output); } } $output->writeListEnd(); Index: metastore/if/hive_metastore.thrift =================================================================== --- metastore/if/hive_metastore.thrift (revision 1186991) +++ metastore/if/hive_metastore.thrift (working copy) @@ -366,10 +366,10 @@ throws(1:MetaException o1, 2:NoSuchObjectException o2) // changes the partition to the new partition object. partition is identified from the part values - // in the new_part + // in the new_part if part_vals == null, otherwise, partition is identified from part_vals // * See notes on DDL_TIME - void alter_partition(1:string db_name, 2:string tbl_name, 3:Partition new_part) - throws(1:InvalidOperationException o1, 2:MetaException o2) + void alter_partition(1:string db_name, 2:string tbl_name, 3:list part_vals, 4:Partition new_part) + throws (1:InvalidOperationException o1, 2:MetaException o2) // gets the value of the configuration key in the metastore server. returns // defaultValue if the key does not exist. if the configuration key does not Index: ql/src/test/results/clientnegative/alter_rename_partition_failure3.q.out =================================================================== --- ql/src/test/results/clientnegative/alter_rename_partition_failure3.q.out (revision 0) +++ ql/src/test/results/clientnegative/alter_rename_partition_failure3.q.out (revision 0) @@ -0,0 +1,31 @@ +PREHOOK: query: create table alter_rename_partition_src ( col1 string ) stored as textfile +PREHOOK: type: CREATETABLE +POSTHOOK: query: create table alter_rename_partition_src ( col1 string ) stored as textfile +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: default@alter_rename_partition_src +PREHOOK: query: load data local inpath '../data/files/test.dat' overwrite into table alter_rename_partition_src +PREHOOK: type: LOAD +PREHOOK: Output: default@alter_rename_partition_src +POSTHOOK: query: load data local inpath '../data/files/test.dat' overwrite into table alter_rename_partition_src +POSTHOOK: type: LOAD +POSTHOOK: Output: default@alter_rename_partition_src +PREHOOK: query: create table alter_rename_partition ( col1 string ) partitioned by (pcol1 string , pcol2 string) stored as sequencefile +PREHOOK: type: CREATETABLE +POSTHOOK: query: create table alter_rename_partition ( col1 string ) partitioned by (pcol1 string , pcol2 string) stored as sequencefile +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: default@alter_rename_partition +PREHOOK: query: insert overwrite table alter_rename_partition partition (pCol1='old_part1:', pcol2='old_part2:') select col1 from alter_rename_partition_src +PREHOOK: type: QUERY +PREHOOK: Input: default@alter_rename_partition_src +PREHOOK: Output: default@alter_rename_partition@pcol1=old_part1%3A/pcol2=old_part2%3A +POSTHOOK: query: insert overwrite table alter_rename_partition partition (pCol1='old_part1:', pcol2='old_part2:') select col1 from alter_rename_partition_src +POSTHOOK: type: QUERY +POSTHOOK: Input: default@alter_rename_partition_src +POSTHOOK: Output: default@alter_rename_partition@pcol1=old_part1%3A/pcol2=old_part2%3A +POSTHOOK: Lineage: alter_rename_partition PARTITION(pcol1=old_part1:,pcol2=old_part2:).col1 SIMPLE [(alter_rename_partition_src)alter_rename_partition_src.FieldSchema(name:col1, type:string, comment:null), ] +PREHOOK: query: alter table alter_rename_partition partition (pCol1='old_part1:', pcol2='old_part2:') rename to partition (pCol1='old_part1:', pcol2='old_part2:', pcol3='old_part3:') +PREHOOK: type: ALTERTABLE_RENAMEPART +PREHOOK: Input: default@alter_rename_partition +PREHOOK: Output: default@alter_rename_partition@pcol1=old_part1%3A/pcol2=old_part2%3A +FAILED: Error in metadata: Unable to rename partition. +FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.exec.DDLTask Index: ql/src/test/results/clientnegative/alter_rename_partition_failure2.q.out =================================================================== --- ql/src/test/results/clientnegative/alter_rename_partition_failure2.q.out (revision 0) +++ ql/src/test/results/clientnegative/alter_rename_partition_failure2.q.out (revision 0) @@ -0,0 +1,31 @@ +PREHOOK: query: create table alter_rename_partition_src ( col1 string ) stored as textfile +PREHOOK: type: CREATETABLE +POSTHOOK: query: create table alter_rename_partition_src ( col1 string ) stored as textfile +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: default@alter_rename_partition_src +PREHOOK: query: load data local inpath '../data/files/test.dat' overwrite into table alter_rename_partition_src +PREHOOK: type: LOAD +PREHOOK: Output: default@alter_rename_partition_src +POSTHOOK: query: load data local inpath '../data/files/test.dat' overwrite into table alter_rename_partition_src +POSTHOOK: type: LOAD +POSTHOOK: Output: default@alter_rename_partition_src +PREHOOK: query: create table alter_rename_partition ( col1 string ) partitioned by (pcol1 string , pcol2 string) stored as sequencefile +PREHOOK: type: CREATETABLE +POSTHOOK: query: create table alter_rename_partition ( col1 string ) partitioned by (pcol1 string , pcol2 string) stored as sequencefile +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: default@alter_rename_partition +PREHOOK: query: insert overwrite table alter_rename_partition partition (pCol1='old_part1:', pcol2='old_part2:') select col1 from alter_rename_partition_src +PREHOOK: type: QUERY +PREHOOK: Input: default@alter_rename_partition_src +PREHOOK: Output: default@alter_rename_partition@pcol1=old_part1%3A/pcol2=old_part2%3A +POSTHOOK: query: insert overwrite table alter_rename_partition partition (pCol1='old_part1:', pcol2='old_part2:') select col1 from alter_rename_partition_src +POSTHOOK: type: QUERY +POSTHOOK: Input: default@alter_rename_partition_src +POSTHOOK: Output: default@alter_rename_partition@pcol1=old_part1%3A/pcol2=old_part2%3A +POSTHOOK: Lineage: alter_rename_partition PARTITION(pcol1=old_part1:,pcol2=old_part2:).col1 SIMPLE [(alter_rename_partition_src)alter_rename_partition_src.FieldSchema(name:col1, type:string, comment:null), ] +PREHOOK: query: alter table alter_rename_partition partition (pCol1='old_part1:', pcol2='old_part2:') rename to partition (pCol1='old_part1:', pcol2='old_part2:') +PREHOOK: type: ALTERTABLE_RENAMEPART +PREHOOK: Input: default@alter_rename_partition +PREHOOK: Output: default@alter_rename_partition@pcol1=old_part1%3A/pcol2=old_part2%3A +FAILED: Error in metadata: Unable to rename partition. +FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.exec.DDLTask Index: ql/src/test/results/clientnegative/alter_rename_partition_failure.q.out =================================================================== --- ql/src/test/results/clientnegative/alter_rename_partition_failure.q.out (revision 0) +++ ql/src/test/results/clientnegative/alter_rename_partition_failure.q.out (revision 0) @@ -0,0 +1,30 @@ +PREHOOK: query: create table alter_rename_partition_src ( col1 string ) stored as textfile +PREHOOK: type: CREATETABLE +POSTHOOK: query: create table alter_rename_partition_src ( col1 string ) stored as textfile +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: default@alter_rename_partition_src +PREHOOK: query: load data local inpath '../data/files/test.dat' overwrite into table alter_rename_partition_src +PREHOOK: type: LOAD +PREHOOK: Output: default@alter_rename_partition_src +POSTHOOK: query: load data local inpath '../data/files/test.dat' overwrite into table alter_rename_partition_src +POSTHOOK: type: LOAD +POSTHOOK: Output: default@alter_rename_partition_src +PREHOOK: query: create table alter_rename_partition ( col1 string ) partitioned by (pcol1 string , pcol2 string) stored as sequencefile +PREHOOK: type: CREATETABLE +POSTHOOK: query: create table alter_rename_partition ( col1 string ) partitioned by (pcol1 string , pcol2 string) stored as sequencefile +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: default@alter_rename_partition +PREHOOK: query: insert overwrite table alter_rename_partition partition (pCol1='old_part1:', pcol2='old_part2:') select col1 from alter_rename_partition_src +PREHOOK: type: QUERY +PREHOOK: Input: default@alter_rename_partition_src +PREHOOK: Output: default@alter_rename_partition@pcol1=old_part1%3A/pcol2=old_part2%3A +POSTHOOK: query: insert overwrite table alter_rename_partition partition (pCol1='old_part1:', pcol2='old_part2:') select col1 from alter_rename_partition_src +POSTHOOK: type: QUERY +POSTHOOK: Input: default@alter_rename_partition_src +POSTHOOK: Output: default@alter_rename_partition@pcol1=old_part1%3A/pcol2=old_part2%3A +POSTHOOK: Lineage: alter_rename_partition PARTITION(pcol1=old_part1:,pcol2=old_part2:).col1 SIMPLE [(alter_rename_partition_src)alter_rename_partition_src.FieldSchema(name:col1, type:string, comment:null), ] +PREHOOK: query: alter table alter_rename_partition partition (pCol1='nonexist_part1:', pcol2='nonexist_part2:') rename to partition (pCol1='new_part1:', pcol2='new_part2:') +PREHOOK: type: ALTERTABLE_RENAMEPART +PREHOOK: Input: default@alter_rename_partition +Failed with exception null +FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.exec.DDLTask Index: ql/src/test/results/clientpositive/alter_rename_partition.q.out =================================================================== --- ql/src/test/results/clientpositive/alter_rename_partition.q.out (revision 0) +++ ql/src/test/results/clientpositive/alter_rename_partition.q.out (revision 0) @@ -0,0 +1,241 @@ +PREHOOK: query: -- Cleanup +DROP TABLE alter_rename_partition_src +PREHOOK: type: DROPTABLE +POSTHOOK: query: -- Cleanup +DROP TABLE alter_rename_partition_src +POSTHOOK: type: DROPTABLE +PREHOOK: query: DROP TABLE alter_rename_partition +PREHOOK: type: DROPTABLE +POSTHOOK: query: DROP TABLE alter_rename_partition +POSTHOOK: type: DROPTABLE +PREHOOK: query: SHOW TABLES +PREHOOK: type: SHOWTABLES +POSTHOOK: query: SHOW TABLES +POSTHOOK: type: SHOWTABLES +src +src1 +src_json +src_sequencefile +src_thrift +srcbucket +srcbucket2 +srcpart +PREHOOK: query: create table alter_rename_partition_src ( col1 string ) stored as textfile +PREHOOK: type: CREATETABLE +POSTHOOK: query: create table alter_rename_partition_src ( col1 string ) stored as textfile +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: default@alter_rename_partition_src +PREHOOK: query: load data local inpath '../data/files/test.dat' overwrite into table alter_rename_partition_src +PREHOOK: type: LOAD +PREHOOK: Output: default@alter_rename_partition_src +POSTHOOK: query: load data local inpath '../data/files/test.dat' overwrite into table alter_rename_partition_src +POSTHOOK: type: LOAD +POSTHOOK: Output: default@alter_rename_partition_src +PREHOOK: query: create table alter_rename_partition ( col1 string ) partitioned by (pcol1 string , pcol2 string) stored as sequencefile +PREHOOK: type: CREATETABLE +POSTHOOK: query: create table alter_rename_partition ( col1 string ) partitioned by (pcol1 string , pcol2 string) stored as sequencefile +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: default@alter_rename_partition +PREHOOK: query: insert overwrite table alter_rename_partition partition (pCol1='old_part1:', pcol2='old_part2:') select col1 from alter_rename_partition_src +PREHOOK: type: QUERY +PREHOOK: Input: default@alter_rename_partition_src +PREHOOK: Output: default@alter_rename_partition@pcol1=old_part1%3A/pcol2=old_part2%3A +POSTHOOK: query: insert overwrite table alter_rename_partition partition (pCol1='old_part1:', pcol2='old_part2:') select col1 from alter_rename_partition_src +POSTHOOK: type: QUERY +POSTHOOK: Input: default@alter_rename_partition_src +POSTHOOK: Output: default@alter_rename_partition@pcol1=old_part1%3A/pcol2=old_part2%3A +POSTHOOK: Lineage: alter_rename_partition PARTITION(pcol1=old_part1:,pcol2=old_part2:).col1 SIMPLE [(alter_rename_partition_src)alter_rename_partition_src.FieldSchema(name:col1, type:string, comment:null), ] +PREHOOK: query: select * from alter_rename_partition where pcol1='old_part1:' and pcol2='old_part2:' +PREHOOK: type: QUERY +PREHOOK: Input: default@alter_rename_partition@pcol1=old_part1%3A/pcol2=old_part2%3A +PREHOOK: Output: file:/tmp/weiyan/hive_2011-08-31_19-12-37_355_2637910598610864510/-mr-10000 +POSTHOOK: query: select * from alter_rename_partition where pcol1='old_part1:' and pcol2='old_part2:' +POSTHOOK: type: QUERY +POSTHOOK: Input: default@alter_rename_partition@pcol1=old_part1%3A/pcol2=old_part2%3A +POSTHOOK: Output: file:/tmp/weiyan/hive_2011-08-31_19-12-37_355_2637910598610864510/-mr-10000 +POSTHOOK: Lineage: alter_rename_partition PARTITION(pcol1=old_part1:,pcol2=old_part2:).col1 SIMPLE [(alter_rename_partition_src)alter_rename_partition_src.FieldSchema(name:col1, type:string, comment:null), ] +1 old_part1: old_part2: +2 old_part1: old_part2: +3 old_part1: old_part2: +4 old_part1: old_part2: +5 old_part1: old_part2: +6 old_part1: old_part2: +PREHOOK: query: alter table alter_rename_partition partition (pCol1='old_part1:', pcol2='old_part2:') rename to partition (pCol1='new_part1:', pcol2='new_part2:') +PREHOOK: type: ALTERTABLE_RENAMEPART +PREHOOK: Input: default@alter_rename_partition +PREHOOK: Output: default@alter_rename_partition@pcol1=old_part1%3A/pcol2=old_part2%3A +POSTHOOK: query: alter table alter_rename_partition partition (pCol1='old_part1:', pcol2='old_part2:') rename to partition (pCol1='new_part1:', pcol2='new_part2:') +POSTHOOK: type: ALTERTABLE_RENAMEPART +POSTHOOK: Input: default@alter_rename_partition +POSTHOOK: Input: default@alter_rename_partition@pcol1=old_part1%3A/pcol2=old_part2%3A +POSTHOOK: Output: default@alter_rename_partition@pcol1=new_part1%3A/pcol2=new_part2%3A +POSTHOOK: Output: default@alter_rename_partition@pcol1=old_part1%3A/pcol2=old_part2%3A +POSTHOOK: Lineage: alter_rename_partition PARTITION(pcol1=old_part1:,pcol2=old_part2:).col1 SIMPLE [(alter_rename_partition_src)alter_rename_partition_src.FieldSchema(name:col1, type:string, comment:null), ] +PREHOOK: query: SHOW PARTITIONS alter_rename_partition +PREHOOK: type: SHOWPARTITIONS +POSTHOOK: query: SHOW PARTITIONS alter_rename_partition +POSTHOOK: type: SHOWPARTITIONS +POSTHOOK: Lineage: alter_rename_partition PARTITION(pcol1=old_part1:,pcol2=old_part2:).col1 SIMPLE [(alter_rename_partition_src)alter_rename_partition_src.FieldSchema(name:col1, type:string, comment:null), ] +pcol1=new_part1%3A/pcol2=new_part2%3A +PREHOOK: query: select * from alter_rename_partition where pcol1='old_part1:' and pcol2='old_part2:' +PREHOOK: type: QUERY +PREHOOK: Output: file:/tmp/weiyan/hive_2011-08-31_19-12-38_182_2669936000084272225/-mr-10000 +POSTHOOK: query: select * from alter_rename_partition where pcol1='old_part1:' and pcol2='old_part2:' +POSTHOOK: type: QUERY +POSTHOOK: Output: file:/tmp/weiyan/hive_2011-08-31_19-12-38_182_2669936000084272225/-mr-10000 +POSTHOOK: Lineage: alter_rename_partition PARTITION(pcol1=old_part1:,pcol2=old_part2:).col1 SIMPLE [(alter_rename_partition_src)alter_rename_partition_src.FieldSchema(name:col1, type:string, comment:null), ] +PREHOOK: query: select * from alter_rename_partition where pcol1='new_part1:' and pcol2='new_part2:' +PREHOOK: type: QUERY +PREHOOK: Input: default@alter_rename_partition@pcol1=new_part1%3A/pcol2=new_part2%3A +PREHOOK: Output: file:/tmp/weiyan/hive_2011-08-31_19-12-38_238_7418880406333524317/-mr-10000 +POSTHOOK: query: select * from alter_rename_partition where pcol1='new_part1:' and pcol2='new_part2:' +POSTHOOK: type: QUERY +POSTHOOK: Input: default@alter_rename_partition@pcol1=new_part1%3A/pcol2=new_part2%3A +POSTHOOK: Output: file:/tmp/weiyan/hive_2011-08-31_19-12-38_238_7418880406333524317/-mr-10000 +POSTHOOK: Lineage: alter_rename_partition PARTITION(pcol1=old_part1:,pcol2=old_part2:).col1 SIMPLE [(alter_rename_partition_src)alter_rename_partition_src.FieldSchema(name:col1, type:string, comment:null), ] +1 new_part1: new_part2: +2 new_part1: new_part2: +3 new_part1: new_part2: +4 new_part1: new_part2: +5 new_part1: new_part2: +6 new_part1: new_part2: +PREHOOK: query: -- Cleanup +DROP TABLE alter_rename_partition_src +PREHOOK: type: DROPTABLE +PREHOOK: Input: default@alter_rename_partition_src +PREHOOK: Output: default@alter_rename_partition_src +POSTHOOK: query: -- Cleanup +DROP TABLE alter_rename_partition_src +POSTHOOK: type: DROPTABLE +POSTHOOK: Input: default@alter_rename_partition_src +POSTHOOK: Output: default@alter_rename_partition_src +POSTHOOK: Lineage: alter_rename_partition PARTITION(pcol1=old_part1:,pcol2=old_part2:).col1 SIMPLE [(alter_rename_partition_src)alter_rename_partition_src.FieldSchema(name:col1, type:string, comment:null), ] +PREHOOK: query: DROP TABLE alter_rename_partition +PREHOOK: type: DROPTABLE +PREHOOK: Input: default@alter_rename_partition +PREHOOK: Output: default@alter_rename_partition +POSTHOOK: query: DROP TABLE alter_rename_partition +POSTHOOK: type: DROPTABLE +POSTHOOK: Input: default@alter_rename_partition +POSTHOOK: Output: default@alter_rename_partition +POSTHOOK: Lineage: alter_rename_partition PARTITION(pcol1=old_part1:,pcol2=old_part2:).col1 SIMPLE [(alter_rename_partition_src)alter_rename_partition_src.FieldSchema(name:col1, type:string, comment:null), ] +PREHOOK: query: SHOW TABLES +PREHOOK: type: SHOWTABLES +POSTHOOK: query: SHOW TABLES +POSTHOOK: type: SHOWTABLES +POSTHOOK: Lineage: alter_rename_partition PARTITION(pcol1=old_part1:,pcol2=old_part2:).col1 SIMPLE [(alter_rename_partition_src)alter_rename_partition_src.FieldSchema(name:col1, type:string, comment:null), ] +src +src1 +src_json +src_sequencefile +src_thrift +srcbucket +srcbucket2 +srcpart +PREHOOK: query: -- With non-default Database + +CREATE DATABASE alter_rename_partition_db +PREHOOK: type: CREATEDATABASE +POSTHOOK: query: -- With non-default Database + +CREATE DATABASE alter_rename_partition_db +POSTHOOK: type: CREATEDATABASE +POSTHOOK: Lineage: alter_rename_partition PARTITION(pcol1=old_part1:,pcol2=old_part2:).col1 SIMPLE [(alter_rename_partition_src)alter_rename_partition_src.FieldSchema(name:col1, type:string, comment:null), ] +PREHOOK: query: USE alter_rename_partition_db +PREHOOK: type: SWITCHDATABASE +POSTHOOK: query: USE alter_rename_partition_db +POSTHOOK: type: SWITCHDATABASE +POSTHOOK: Lineage: alter_rename_partition PARTITION(pcol1=old_part1:,pcol2=old_part2:).col1 SIMPLE [(alter_rename_partition_src)alter_rename_partition_src.FieldSchema(name:col1, type:string, comment:null), ] +PREHOOK: query: SHOW TABLES +PREHOOK: type: SHOWTABLES +POSTHOOK: query: SHOW TABLES +POSTHOOK: type: SHOWTABLES +POSTHOOK: Lineage: alter_rename_partition PARTITION(pcol1=old_part1:,pcol2=old_part2:).col1 SIMPLE [(alter_rename_partition_src)alter_rename_partition_src.FieldSchema(name:col1, type:string, comment:null), ] +PREHOOK: query: CREATE TABLE alter_rename_partition_src (col1 STRING) STORED AS TEXTFILE +PREHOOK: type: CREATETABLE +POSTHOOK: query: CREATE TABLE alter_rename_partition_src (col1 STRING) STORED AS TEXTFILE +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: alter_rename_partition_db@alter_rename_partition_src +POSTHOOK: Lineage: alter_rename_partition PARTITION(pcol1=old_part1:,pcol2=old_part2:).col1 SIMPLE [(alter_rename_partition_src)alter_rename_partition_src.FieldSchema(name:col1, type:string, comment:null), ] +PREHOOK: query: LOAD DATA LOCAL INPATH '../data/files/test.dat' OVERWRITE INTO TABLE alter_rename_partition_src +PREHOOK: type: LOAD +PREHOOK: Output: alter_rename_partition_db@alter_rename_partition_src +POSTHOOK: query: LOAD DATA LOCAL INPATH '../data/files/test.dat' OVERWRITE INTO TABLE alter_rename_partition_src +POSTHOOK: type: LOAD +POSTHOOK: Output: alter_rename_partition_db@alter_rename_partition_src +POSTHOOK: Lineage: alter_rename_partition PARTITION(pcol1=old_part1:,pcol2=old_part2:).col1 SIMPLE [(alter_rename_partition_src)alter_rename_partition_src.FieldSchema(name:col1, type:string, comment:null), ] +PREHOOK: query: CREATE TABLE alter_rename_partition (col1 STRING) PARTITIONED BY (pcol1 STRING, pcol2 STRING) STORED AS SEQUENCEFILE +PREHOOK: type: CREATETABLE +POSTHOOK: query: CREATE TABLE alter_rename_partition (col1 STRING) PARTITIONED BY (pcol1 STRING, pcol2 STRING) STORED AS SEQUENCEFILE +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: alter_rename_partition_db@alter_rename_partition +POSTHOOK: Lineage: alter_rename_partition PARTITION(pcol1=old_part1:,pcol2=old_part2:).col1 SIMPLE [(alter_rename_partition_src)alter_rename_partition_src.FieldSchema(name:col1, type:string, comment:null), ] +PREHOOK: query: INSERT OVERWRITE TABLE alter_rename_partition PARTITION (pCol1='old_part1:', pcol2='old_part2:') SELECT col1 FROM alter_rename_partition_src +PREHOOK: type: QUERY +PREHOOK: Input: alter_rename_partition_db@alter_rename_partition_src +PREHOOK: Output: alter_rename_partition_db@alter_rename_partition@pcol1=old_part1%3A/pcol2=old_part2%3A +POSTHOOK: query: INSERT OVERWRITE TABLE alter_rename_partition PARTITION (pCol1='old_part1:', pcol2='old_part2:') SELECT col1 FROM alter_rename_partition_src +POSTHOOK: type: QUERY +POSTHOOK: Input: alter_rename_partition_db@alter_rename_partition_src +POSTHOOK: Output: alter_rename_partition_db@alter_rename_partition@pcol1=old_part1%3A/pcol2=old_part2%3A +POSTHOOK: Lineage: alter_rename_partition PARTITION(pcol1=old_part1:,pcol2=old_part2:).col1 SIMPLE [(alter_rename_partition_src)alter_rename_partition_src.FieldSchema(name:col1, type:string, comment:null), ] +POSTHOOK: Lineage: alter_rename_partition PARTITION(pcol1=old_part1:,pcol2=old_part2:).col1 SIMPLE [(alter_rename_partition_src)alter_rename_partition_src.FieldSchema(name:col1, type:string, comment:null), ] +PREHOOK: query: SELECT * FROM alter_rename_partition WHERE pcol1='old_part1:' AND pcol2='old_part2:' +PREHOOK: type: QUERY +PREHOOK: Input: alter_rename_partition_db@alter_rename_partition@pcol1=old_part1%3A/pcol2=old_part2%3A +PREHOOK: Output: file:/tmp/weiyan/hive_2011-08-31_19-12-44_652_1943699924506676382/-mr-10000 +POSTHOOK: query: SELECT * FROM alter_rename_partition WHERE pcol1='old_part1:' AND pcol2='old_part2:' +POSTHOOK: type: QUERY +POSTHOOK: Input: alter_rename_partition_db@alter_rename_partition@pcol1=old_part1%3A/pcol2=old_part2%3A +POSTHOOK: Output: file:/tmp/weiyan/hive_2011-08-31_19-12-44_652_1943699924506676382/-mr-10000 +POSTHOOK: Lineage: alter_rename_partition PARTITION(pcol1=old_part1:,pcol2=old_part2:).col1 SIMPLE [(alter_rename_partition_src)alter_rename_partition_src.FieldSchema(name:col1, type:string, comment:null), ] +POSTHOOK: Lineage: alter_rename_partition PARTITION(pcol1=old_part1:,pcol2=old_part2:).col1 SIMPLE [(alter_rename_partition_src)alter_rename_partition_src.FieldSchema(name:col1, type:string, comment:null), ] +1 old_part1: old_part2: +2 old_part1: old_part2: +3 old_part1: old_part2: +4 old_part1: old_part2: +5 old_part1: old_part2: +6 old_part1: old_part2: +PREHOOK: query: ALTER TABLE alter_rename_partition PARTITION (pCol1='old_part1:', pcol2='old_part2:') RENAME TO PARTITION (pCol1='new_part1:', pcol2='new_part2:') +PREHOOK: type: ALTERTABLE_RENAMEPART +PREHOOK: Input: alter_rename_partition_db@alter_rename_partition +PREHOOK: Output: alter_rename_partition_db@alter_rename_partition@pcol1=old_part1%3A/pcol2=old_part2%3A +POSTHOOK: query: ALTER TABLE alter_rename_partition PARTITION (pCol1='old_part1:', pcol2='old_part2:') RENAME TO PARTITION (pCol1='new_part1:', pcol2='new_part2:') +POSTHOOK: type: ALTERTABLE_RENAMEPART +POSTHOOK: Input: alter_rename_partition_db@alter_rename_partition +POSTHOOK: Input: alter_rename_partition_db@alter_rename_partition@pcol1=old_part1%3A/pcol2=old_part2%3A +POSTHOOK: Output: alter_rename_partition_db@alter_rename_partition@pcol1=new_part1%3A/pcol2=new_part2%3A +POSTHOOK: Output: alter_rename_partition_db@alter_rename_partition@pcol1=old_part1%3A/pcol2=old_part2%3A +POSTHOOK: Lineage: alter_rename_partition PARTITION(pcol1=old_part1:,pcol2=old_part2:).col1 SIMPLE [(alter_rename_partition_src)alter_rename_partition_src.FieldSchema(name:col1, type:string, comment:null), ] +POSTHOOK: Lineage: alter_rename_partition PARTITION(pcol1=old_part1:,pcol2=old_part2:).col1 SIMPLE [(alter_rename_partition_src)alter_rename_partition_src.FieldSchema(name:col1, type:string, comment:null), ] +PREHOOK: query: SHOW PARTITIONS alter_rename_partition +PREHOOK: type: SHOWPARTITIONS +POSTHOOK: query: SHOW PARTITIONS alter_rename_partition +POSTHOOK: type: SHOWPARTITIONS +POSTHOOK: Lineage: alter_rename_partition PARTITION(pcol1=old_part1:,pcol2=old_part2:).col1 SIMPLE [(alter_rename_partition_src)alter_rename_partition_src.FieldSchema(name:col1, type:string, comment:null), ] +POSTHOOK: Lineage: alter_rename_partition PARTITION(pcol1=old_part1:,pcol2=old_part2:).col1 SIMPLE [(alter_rename_partition_src)alter_rename_partition_src.FieldSchema(name:col1, type:string, comment:null), ] +pcol1=new_part1%3A/pcol2=new_part2%3A +PREHOOK: query: SELECT * FROM alter_rename_partition WHERE pcol1='old_part1:' and pcol2='old_part2:' +PREHOOK: type: QUERY +PREHOOK: Output: file:/tmp/weiyan/hive_2011-08-31_19-12-45_358_4731213338640541471/-mr-10000 +POSTHOOK: query: SELECT * FROM alter_rename_partition WHERE pcol1='old_part1:' and pcol2='old_part2:' +POSTHOOK: type: QUERY +POSTHOOK: Output: file:/tmp/weiyan/hive_2011-08-31_19-12-45_358_4731213338640541471/-mr-10000 +POSTHOOK: Lineage: alter_rename_partition PARTITION(pcol1=old_part1:,pcol2=old_part2:).col1 SIMPLE [(alter_rename_partition_src)alter_rename_partition_src.FieldSchema(name:col1, type:string, comment:null), ] +POSTHOOK: Lineage: alter_rename_partition PARTITION(pcol1=old_part1:,pcol2=old_part2:).col1 SIMPLE [(alter_rename_partition_src)alter_rename_partition_src.FieldSchema(name:col1, type:string, comment:null), ] +PREHOOK: query: SELECT * FROM alter_rename_partition WHERE pcol1='new_part1:' and pcol2='new_part2:' +PREHOOK: type: QUERY +PREHOOK: Input: alter_rename_partition_db@alter_rename_partition@pcol1=new_part1%3A/pcol2=new_part2%3A +PREHOOK: Output: file:/tmp/weiyan/hive_2011-08-31_19-12-45_423_4488965959309624920/-mr-10000 +POSTHOOK: query: SELECT * FROM alter_rename_partition WHERE pcol1='new_part1:' and pcol2='new_part2:' +POSTHOOK: type: QUERY +POSTHOOK: Input: alter_rename_partition_db@alter_rename_partition@pcol1=new_part1%3A/pcol2=new_part2%3A +POSTHOOK: Output: file:/tmp/weiyan/hive_2011-08-31_19-12-45_423_4488965959309624920/-mr-10000 +POSTHOOK: Lineage: alter_rename_partition PARTITION(pcol1=old_part1:,pcol2=old_part2:).col1 SIMPLE [(alter_rename_partition_src)alter_rename_partition_src.FieldSchema(name:col1, type:string, comment:null), ] +POSTHOOK: Lineage: alter_rename_partition PARTITION(pcol1=old_part1:,pcol2=old_part2:).col1 SIMPLE [(alter_rename_partition_src)alter_rename_partition_src.FieldSchema(name:col1, type:string, comment:null), ] +1 new_part1: new_part2: +2 new_part1: new_part2: +3 new_part1: new_part2: +4 new_part1: new_part2: +5 new_part1: new_part2: +6 new_part1: new_part2: Index: ql/src/test/results/clientpositive/alter_rename_partition_authorization.q.out =================================================================== --- ql/src/test/results/clientpositive/alter_rename_partition_authorization.q.out (revision 0) +++ ql/src/test/results/clientpositive/alter_rename_partition_authorization.q.out (revision 0) @@ -0,0 +1,144 @@ +PREHOOK: query: create table src_auth_tmp as select * from src +PREHOOK: type: CREATETABLE_AS_SELECT +PREHOOK: Input: default@src +POSTHOOK: query: create table src_auth_tmp as select * from src +POSTHOOK: type: CREATETABLE_AS_SELECT +POSTHOOK: Input: default@src +POSTHOOK: Output: default@src_auth_tmp +PREHOOK: query: create table authorization_part (key int, value string) partitioned by (ds string) +PREHOOK: type: CREATETABLE +POSTHOOK: query: create table authorization_part (key int, value string) partitioned by (ds string) +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: default@authorization_part +PREHOOK: query: ALTER TABLE authorization_part SET TBLPROPERTIES ("PARTITION_LEVEL_PRIVILEGE"="TRUE") +PREHOOK: type: ALTERTABLE_PROPERTIES +PREHOOK: Input: default@authorization_part +PREHOOK: Output: default@authorization_part +POSTHOOK: query: ALTER TABLE authorization_part SET TBLPROPERTIES ("PARTITION_LEVEL_PRIVILEGE"="TRUE") +POSTHOOK: type: ALTERTABLE_PROPERTIES +POSTHOOK: Input: default@authorization_part +POSTHOOK: Output: default@authorization_part +PREHOOK: query: grant select on table src_auth_tmp to user hive_test_user +PREHOOK: type: GRANT_PRIVILEGE +PREHOOK: Output: default@src_auth_tmp +POSTHOOK: query: grant select on table src_auth_tmp to user hive_test_user +POSTHOOK: type: GRANT_PRIVILEGE +POSTHOOK: Output: default@src_auth_tmp +PREHOOK: query: -- column grant to user +grant Create on table authorization_part to user hive_test_user +PREHOOK: type: GRANT_PRIVILEGE +PREHOOK: Output: default@authorization_part +POSTHOOK: query: -- column grant to user +grant Create on table authorization_part to user hive_test_user +POSTHOOK: type: GRANT_PRIVILEGE +POSTHOOK: Output: default@authorization_part +PREHOOK: query: grant Update on table authorization_part to user hive_test_user +PREHOOK: type: GRANT_PRIVILEGE +PREHOOK: Output: default@authorization_part +POSTHOOK: query: grant Update on table authorization_part to user hive_test_user +POSTHOOK: type: GRANT_PRIVILEGE +POSTHOOK: Output: default@authorization_part +PREHOOK: query: grant Drop on table authorization_part to user hive_test_user +PREHOOK: type: GRANT_PRIVILEGE +PREHOOK: Output: default@authorization_part +POSTHOOK: query: grant Drop on table authorization_part to user hive_test_user +POSTHOOK: type: GRANT_PRIVILEGE +POSTHOOK: Output: default@authorization_part +PREHOOK: query: show grant user hive_test_user on table authorization_part +PREHOOK: type: SHOW_GRANT +POSTHOOK: query: show grant user hive_test_user on table authorization_part +POSTHOOK: type: SHOW_GRANT + +database default +table authorization_part +principalName hive_test_user +principalType USER +privilege Create +grantTime Wed Aug 31 19:13:54 PDT 2011 +grantor hive_test_user + +database default +table authorization_part +principalName hive_test_user +principalType USER +privilege Update +grantTime Wed Aug 31 19:13:54 PDT 2011 +grantor hive_test_user + +database default +table authorization_part +principalName hive_test_user +principalType USER +privilege Drop +grantTime Wed Aug 31 19:13:54 PDT 2011 +grantor hive_test_user +PREHOOK: query: grant select(key) on table authorization_part to user hive_test_user +PREHOOK: type: GRANT_PRIVILEGE +PREHOOK: Output: default@authorization_part +POSTHOOK: query: grant select(key) on table authorization_part to user hive_test_user +POSTHOOK: type: GRANT_PRIVILEGE +POSTHOOK: Output: default@authorization_part +PREHOOK: query: insert overwrite table authorization_part partition (ds='2010') select key, value from src_auth_tmp +PREHOOK: type: QUERY +PREHOOK: Input: default@src_auth_tmp +PREHOOK: Output: default@authorization_part@ds=2010 +POSTHOOK: query: insert overwrite table authorization_part partition (ds='2010') select key, value from src_auth_tmp +POSTHOOK: type: QUERY +POSTHOOK: Input: default@src_auth_tmp +POSTHOOK: Output: default@authorization_part@ds=2010 +POSTHOOK: Lineage: authorization_part PARTITION(ds=2010).key EXPRESSION [(src_auth_tmp)src_auth_tmp.FieldSchema(name:key, type:string, comment:null), ] +POSTHOOK: Lineage: authorization_part PARTITION(ds=2010).value SIMPLE [(src_auth_tmp)src_auth_tmp.FieldSchema(name:value, type:string, comment:null), ] +PREHOOK: query: show grant user hive_test_user on table authorization_part(key) partition (ds='2010') +PREHOOK: type: SHOW_GRANT +POSTHOOK: query: show grant user hive_test_user on table authorization_part(key) partition (ds='2010') +POSTHOOK: type: SHOW_GRANT +POSTHOOK: Lineage: authorization_part PARTITION(ds=2010).key EXPRESSION [(src_auth_tmp)src_auth_tmp.FieldSchema(name:key, type:string, comment:null), ] +POSTHOOK: Lineage: authorization_part PARTITION(ds=2010).value SIMPLE [(src_auth_tmp)src_auth_tmp.FieldSchema(name:value, type:string, comment:null), ] + +database default +table authorization_part +partition ds=2010 +columnName key +principalName hive_test_user +principalType USER +privilege Select +grantTime Wed Aug 31 19:14:01 PDT 2011 +grantor hive_test_user +PREHOOK: query: alter table authorization_part partition (ds='2010') rename to partition (ds='2010_tmp') +PREHOOK: type: ALTERTABLE_RENAMEPART +PREHOOK: Input: default@authorization_part +PREHOOK: Output: default@authorization_part@ds=2010 +POSTHOOK: query: alter table authorization_part partition (ds='2010') rename to partition (ds='2010_tmp') +POSTHOOK: type: ALTERTABLE_RENAMEPART +POSTHOOK: Input: default@authorization_part +POSTHOOK: Input: default@authorization_part@ds=2010 +POSTHOOK: Output: default@authorization_part@ds=2010 +POSTHOOK: Output: default@authorization_part@ds=2010_tmp +POSTHOOK: Lineage: authorization_part PARTITION(ds=2010).key EXPRESSION [(src_auth_tmp)src_auth_tmp.FieldSchema(name:key, type:string, comment:null), ] +POSTHOOK: Lineage: authorization_part PARTITION(ds=2010).value SIMPLE [(src_auth_tmp)src_auth_tmp.FieldSchema(name:value, type:string, comment:null), ] +PREHOOK: query: show grant user hive_test_user on table authorization_part(key) partition (ds='2010_tmp') +PREHOOK: type: SHOW_GRANT +POSTHOOK: query: show grant user hive_test_user on table authorization_part(key) partition (ds='2010_tmp') +POSTHOOK: type: SHOW_GRANT +POSTHOOK: Lineage: authorization_part PARTITION(ds=2010).key EXPRESSION [(src_auth_tmp)src_auth_tmp.FieldSchema(name:key, type:string, comment:null), ] +POSTHOOK: Lineage: authorization_part PARTITION(ds=2010).value SIMPLE [(src_auth_tmp)src_auth_tmp.FieldSchema(name:value, type:string, comment:null), ] + +database default +table authorization_part +partition ds=2010_tmp +columnName key +principalName hive_test_user +principalType USER +privilege Select +grantTime Wed Aug 31 19:14:01 PDT 2011 +grantor hive_test_user +PREHOOK: query: drop table authorization_part +PREHOOK: type: DROPTABLE +PREHOOK: Input: default@authorization_part +PREHOOK: Output: default@authorization_part +POSTHOOK: query: drop table authorization_part +POSTHOOK: type: DROPTABLE +POSTHOOK: Input: default@authorization_part +POSTHOOK: Output: default@authorization_part +POSTHOOK: Lineage: authorization_part PARTITION(ds=2010).key EXPRESSION [(src_auth_tmp)src_auth_tmp.FieldSchema(name:key, type:string, comment:null), ] +POSTHOOK: Lineage: authorization_part PARTITION(ds=2010).value SIMPLE [(src_auth_tmp)src_auth_tmp.FieldSchema(name:value, type:string, comment:null), ] Index: ql/src/test/queries/clientnegative/alter_rename_partition_failure2.q =================================================================== --- ql/src/test/queries/clientnegative/alter_rename_partition_failure2.q (revision 0) +++ ql/src/test/queries/clientnegative/alter_rename_partition_failure2.q (revision 0) @@ -0,0 +1,6 @@ +create table alter_rename_partition_src ( col1 string ) stored as textfile ; +load data local inpath '../data/files/test.dat' overwrite into table alter_rename_partition_src ; +create table alter_rename_partition ( col1 string ) partitioned by (pcol1 string , pcol2 string) stored as sequencefile; +insert overwrite table alter_rename_partition partition (pCol1='old_part1:', pcol2='old_part2:') select col1 from alter_rename_partition_src ; + +alter table alter_rename_partition partition (pCol1='old_part1:', pcol2='old_part2:') rename to partition (pCol1='old_part1:', pcol2='old_part2:'); Index: ql/src/test/queries/clientnegative/alter_rename_partition_failure3.q =================================================================== --- ql/src/test/queries/clientnegative/alter_rename_partition_failure3.q (revision 0) +++ ql/src/test/queries/clientnegative/alter_rename_partition_failure3.q (revision 0) @@ -0,0 +1,6 @@ +create table alter_rename_partition_src ( col1 string ) stored as textfile ; +load data local inpath '../data/files/test.dat' overwrite into table alter_rename_partition_src ; +create table alter_rename_partition ( col1 string ) partitioned by (pcol1 string , pcol2 string) stored as sequencefile; +insert overwrite table alter_rename_partition partition (pCol1='old_part1:', pcol2='old_part2:') select col1 from alter_rename_partition_src ; + +alter table alter_rename_partition partition (pCol1='old_part1:', pcol2='old_part2:') rename to partition (pCol1='old_part1:', pcol2='old_part2:', pcol3='old_part3:'); \ No newline at end of file Index: ql/src/test/queries/clientnegative/alter_rename_partition_failure.q =================================================================== --- ql/src/test/queries/clientnegative/alter_rename_partition_failure.q (revision 0) +++ ql/src/test/queries/clientnegative/alter_rename_partition_failure.q (revision 0) @@ -0,0 +1,6 @@ +create table alter_rename_partition_src ( col1 string ) stored as textfile ; +load data local inpath '../data/files/test.dat' overwrite into table alter_rename_partition_src ; +create table alter_rename_partition ( col1 string ) partitioned by (pcol1 string , pcol2 string) stored as sequencefile; +insert overwrite table alter_rename_partition partition (pCol1='old_part1:', pcol2='old_part2:') select col1 from alter_rename_partition_src ; + +alter table alter_rename_partition partition (pCol1='nonexist_part1:', pcol2='nonexist_part2:') rename to partition (pCol1='new_part1:', pcol2='new_part2:'); Index: ql/src/test/queries/clientpositive/alter_rename_partition.q =================================================================== --- ql/src/test/queries/clientpositive/alter_rename_partition.q (revision 0) +++ ql/src/test/queries/clientpositive/alter_rename_partition.q (revision 0) @@ -0,0 +1,41 @@ +-- Cleanup +DROP TABLE alter_rename_partition_src; +DROP TABLE alter_rename_partition; +SHOW TABLES; + +create table alter_rename_partition_src ( col1 string ) stored as textfile ; +load data local inpath '../data/files/test.dat' overwrite into table alter_rename_partition_src ; + +create table alter_rename_partition ( col1 string ) partitioned by (pcol1 string , pcol2 string) stored as sequencefile; + +insert overwrite table alter_rename_partition partition (pCol1='old_part1:', pcol2='old_part2:') select col1 from alter_rename_partition_src ; +select * from alter_rename_partition where pcol1='old_part1:' and pcol2='old_part2:'; + +alter table alter_rename_partition partition (pCol1='old_part1:', pcol2='old_part2:') rename to partition (pCol1='new_part1:', pcol2='new_part2:'); +SHOW PARTITIONS alter_rename_partition; +select * from alter_rename_partition where pcol1='old_part1:' and pcol2='old_part2:'; +select * from alter_rename_partition where pcol1='new_part1:' and pcol2='new_part2:'; + +-- Cleanup +DROP TABLE alter_rename_partition_src; +DROP TABLE alter_rename_partition; +SHOW TABLES; + +-- With non-default Database + +CREATE DATABASE alter_rename_partition_db; +USE alter_rename_partition_db; +SHOW TABLES; + +CREATE TABLE alter_rename_partition_src (col1 STRING) STORED AS TEXTFILE ; +LOAD DATA LOCAL INPATH '../data/files/test.dat' OVERWRITE INTO TABLE alter_rename_partition_src ; + +CREATE TABLE alter_rename_partition (col1 STRING) PARTITIONED BY (pcol1 STRING, pcol2 STRING) STORED AS SEQUENCEFILE; + +INSERT OVERWRITE TABLE alter_rename_partition PARTITION (pCol1='old_part1:', pcol2='old_part2:') SELECT col1 FROM alter_rename_partition_src ; +SELECT * FROM alter_rename_partition WHERE pcol1='old_part1:' AND pcol2='old_part2:'; + +ALTER TABLE alter_rename_partition PARTITION (pCol1='old_part1:', pcol2='old_part2:') RENAME TO PARTITION (pCol1='new_part1:', pcol2='new_part2:'); +SHOW PARTITIONS alter_rename_partition; +SELECT * FROM alter_rename_partition WHERE pcol1='old_part1:' and pcol2='old_part2:'; +SELECT * FROM alter_rename_partition WHERE pcol1='new_part1:' and pcol2='new_part2:'; Index: ql/src/test/queries/clientpositive/alter_rename_partition_authorization.q =================================================================== --- ql/src/test/queries/clientpositive/alter_rename_partition_authorization.q (revision 0) +++ ql/src/test/queries/clientpositive/alter_rename_partition_authorization.q (revision 0) @@ -0,0 +1,20 @@ +create table src_auth_tmp as select * from src; + +create table authorization_part (key int, value string) partitioned by (ds string); +ALTER TABLE authorization_part SET TBLPROPERTIES ("PARTITION_LEVEL_PRIVILEGE"="TRUE"); +set hive.security.authorization.enabled=true; +grant select on table src_auth_tmp to user hive_test_user; + +-- column grant to user +grant Create on table authorization_part to user hive_test_user; +grant Update on table authorization_part to user hive_test_user; +grant Drop on table authorization_part to user hive_test_user; + +show grant user hive_test_user on table authorization_part; +grant select(key) on table authorization_part to user hive_test_user; +insert overwrite table authorization_part partition (ds='2010') select key, value from src_auth_tmp; +show grant user hive_test_user on table authorization_part(key) partition (ds='2010'); +alter table authorization_part partition (ds='2010') rename to partition (ds='2010_tmp'); +show grant user hive_test_user on table authorization_part(key) partition (ds='2010_tmp'); + +drop table authorization_part; Index: ql/src/java/org/apache/hadoop/hive/ql/metadata/Partition.java =================================================================== --- ql/src/java/org/apache/hadoop/hive/ql/metadata/Partition.java (revision 1186991) +++ ql/src/java/org/apache/hadoop/hive/ql/metadata/Partition.java (working copy) @@ -518,6 +518,28 @@ } /** + * Set Partition's values + * + * @param partSpec + * Partition specifications. + * @throws HiveException + * Thrown if we could not create the partition. + */ + public void setValues(Map partSpec) + throws HiveException { + List pvals = new ArrayList(); + for (FieldSchema field : table.getPartCols()) { + String val = partSpec.get(field.getName()); + if (val == null) { + throw new HiveException( + "partition spec is invalid. field.getName() does not exist in input."); + } + pvals.add(val); + } + tPartition.setValues(pvals); + } + + /** * @param protectMode */ public void setProtectMode(ProtectMode protectMode){ Index: ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java =================================================================== --- ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java (revision 1186991) +++ ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java (working copy) @@ -61,6 +61,7 @@ import org.apache.hadoop.hive.metastore.api.HiveObjectRef; import org.apache.hadoop.hive.metastore.api.HiveObjectType; import org.apache.hadoop.hive.metastore.api.Index; +import org.apache.hadoop.hive.metastore.api.InvalidObjectException; import org.apache.hadoop.hive.metastore.api.InvalidOperationException; import org.apache.hadoop.hive.metastore.api.MetaException; import org.apache.hadoop.hive.metastore.api.NoSuchObjectException; @@ -426,6 +427,53 @@ } } + /** + * Rename a old partition to new partition + * + * @param tbl + * existing table + * @param oldPartSpec + * spec of old partition + * @param newPart + * new partition + * @throws InvalidOperationException + * if the changes in metadata is not acceptable + * @throws TException + */ + public void renamePartition(Table tbl, Map oldPartSpec, Partition newPart) + throws HiveException { + try { + Map newPartSpec = newPart.getSpec(); + if (oldPartSpec.keySet().size() != tbl.getPartCols().size() + || newPartSpec.keySet().size() != tbl.getPartCols().size()) { + throw new HiveException("Unable to rename partition to the same name: number of partition cols don't match. "); + } + if (!oldPartSpec.keySet().equals(newPartSpec.keySet())){ + throw new HiveException("Unable to rename partition to the same name: old and new partition cols don't match. "); + } + List pvals = new ArrayList(); + + for (FieldSchema field : tbl.getPartCols()) { + String val = oldPartSpec.get(field.getName()); + if (val == null || val.length() == 0) { + throw new HiveException("get partition: Value for key " + + field.getName() + " is null or empty"); + } else if (val != null){ + pvals.add(val); + } + } + getMSC().renamePartition(tbl.getDbName(), tbl.getTableName(), pvals, + newPart.getTPartition()); + + } catch (InvalidOperationException e){ + throw new HiveException("Unable to rename partition.", e); + } catch (MetaException e) { + throw new HiveException("Unable to rename partition.", e); + } catch (TException e) { + throw new HiveException("Unable to rename partition.", e); + } + } + public void alterDatabase(String dbName, Database db) throws HiveException { try { Index: ql/src/java/org/apache/hadoop/hive/ql/exec/DDLTask.java =================================================================== --- ql/src/java/org/apache/hadoop/hive/ql/exec/DDLTask.java (revision 1186991) +++ ql/src/java/org/apache/hadoop/hive/ql/exec/DDLTask.java (working copy) @@ -117,6 +117,7 @@ import org.apache.hadoop.hive.ql.plan.PrincipalDesc; import org.apache.hadoop.hive.ql.plan.PrivilegeDesc; import org.apache.hadoop.hive.ql.plan.PrivilegeObjectDesc; +import org.apache.hadoop.hive.ql.plan.RenamePartitionDesc; import org.apache.hadoop.hive.ql.plan.RevokeDesc; import org.apache.hadoop.hive.ql.plan.RoleDDLDesc; import org.apache.hadoop.hive.ql.plan.ShowDatabasesDesc; @@ -263,6 +264,11 @@ return addPartition(db, addPartitionDesc); } + RenamePartitionDesc renamePartitionDesc = work.getRenamePartitionDesc(); + if (renamePartitionDesc != null) { + return renamePartition(db, renamePartitionDesc); + } + AlterTableSimpleDesc simpleDesc = work.getAlterTblSimpleDesc(); if (simpleDesc != null) { if (simpleDesc.getType() == AlterTableTypes.TOUCH) { @@ -969,6 +975,34 @@ } /** + * Rename a partition in a table + * + * @param db + * Database to rename the partition. + * @param renamePartitionDesc + * rename old Partition to new one. + * @return Returns 0 when execution succeeds and above 0 if it fails. + * @throws HiveException + */ + private int renamePartition(Hive db, RenamePartitionDesc renamePartitionDesc) throws HiveException { + + Table tbl = db.getTable(renamePartitionDesc.getDbName(), renamePartitionDesc.getTableName()); + + validateAlterTableType( + tbl, AlterTableDesc.AlterTableTypes.RENAMEPARTITION, + false); + Partition oldPart = db.getPartition(tbl, renamePartitionDesc.getOldPartSpec(), false); + Partition part = db.getPartition(tbl, renamePartitionDesc.getOldPartSpec(), false); + part.setValues(renamePartitionDesc.getNewPartSpec()); + db.renamePartition(tbl, renamePartitionDesc.getOldPartSpec(), part); + Partition newPart = db + .getPartition(tbl, renamePartitionDesc.getNewPartSpec(), false); + work.getInputs().add(new ReadEntity(oldPart)); + work.getOutputs().add(new WriteEntity(newPart)); + return 0; + } + + /** * Rewrite the partition's metadata and force the pre/post execute hooks to * be fired. * @@ -1507,6 +1541,7 @@ switch (alterType) { case ADDPARTITION: case DROPPARTITION: + case RENAMEPARTITION: case ADDPROPS: case RENAME: // allow this form Index: ql/src/java/org/apache/hadoop/hive/ql/plan/DDLWork.java =================================================================== --- ql/src/java/org/apache/hadoop/hive/ql/plan/DDLWork.java (revision 1186991) +++ ql/src/java/org/apache/hadoop/hive/ql/plan/DDLWork.java (working copy) @@ -53,6 +53,7 @@ private ShowPartitionsDesc showPartsDesc; private DescTableDesc descTblDesc; private AddPartitionDesc addPartitionDesc; + private RenamePartitionDesc renamePartitionDesc; private AlterTableSimpleDesc alterTblSimpleDesc; private MsckDesc msckDesc; private ShowTableStatusDesc showTblStatusDesc; @@ -67,7 +68,7 @@ private GrantRevokeRoleDDL grantRevokeRoleDDL; boolean needLock = false; - + /** * ReadEntitites that are passed to the hooks. */ @@ -310,6 +311,17 @@ } /** + * @param renamePartitionDesc + * information about the partitions we want to add. + */ + public DDLWork(HashSet inputs, HashSet outputs, + RenamePartitionDesc renamePartitionDesc) { + this(inputs, outputs); + + this.renamePartitionDesc = renamePartitionDesc; + } + + /** * @param touchDesc * information about the table/partitions that we want to touch */ @@ -713,6 +725,21 @@ } /** + * @return information about the partitions we want to rename. + */ + public RenamePartitionDesc getRenamePartitionDesc() { + return renamePartitionDesc; + } + + /** + * @param renamePartitionDesc + * information about the partitions we want to rename. + */ + public void setRenamePartitionDesc(RenamePartitionDesc renamePartitionDesc) { + this.renamePartitionDesc = renamePartitionDesc; + } + + /** * @return information about the table/partitions we want to alter. */ public AlterTableSimpleDesc getAlterTblSimpleDesc() { @@ -806,7 +833,7 @@ public void setRoleDDLDesc(RoleDDLDesc roleDDLDesc) { this.roleDDLDesc = roleDDLDesc; } - + /** * @return grant desc */ @@ -820,7 +847,7 @@ public void setGrantDesc(GrantDesc grantDesc) { this.grantDesc = grantDesc; } - + /** * @return show grant desc */ @@ -842,7 +869,7 @@ public void setRevokeDesc(RevokeDesc revokeDesc) { this.revokeDesc = revokeDesc; } - + /** * @return */ @@ -856,7 +883,7 @@ public void setGrantRevokeRoleDDL(GrantRevokeRoleDDL grantRevokeRoleDDL) { this.grantRevokeRoleDDL = grantRevokeRoleDDL; } - + public void setAlterDatabaseDesc(AlterDatabaseDesc alterDbDesc) { this.alterDbDesc = alterDbDesc; } @@ -864,7 +891,7 @@ public AlterDatabaseDesc getAlterDatabaseDesc() { return this.alterDbDesc; } - + /** * @return descriptor for merging files */ Index: ql/src/java/org/apache/hadoop/hive/ql/plan/HiveOperation.java =================================================================== --- ql/src/java/org/apache/hadoop/hive/ql/plan/HiveOperation.java (revision 1186991) +++ ql/src/java/org/apache/hadoop/hive/ql/plan/HiveOperation.java (working copy) @@ -21,7 +21,7 @@ import org.apache.hadoop.hive.ql.security.authorization.Privilege; public enum HiveOperation { - + EXPLAIN("EXPLAIN", null, null), LOAD("LOAD", null, new Privilege[]{Privilege.ALTER_DATA}), EXPORT("EXPORT", new Privilege[]{Privilege.SELECT}, null), @@ -36,6 +36,7 @@ ALTERTABLE_ADDCOLS("ALTERTABLE_ADDCOLS", new Privilege[]{Privilege.ALTER_METADATA}, null), ALTERTABLE_REPLACECOLS("ALTERTABLE_REPLACECOLS", new Privilege[]{Privilege.ALTER_METADATA}, null), ALTERTABLE_RENAMECOL("ALTERTABLE_RENAMECOL", new Privilege[]{Privilege.ALTER_METADATA}, null), + ALTERTABLE_RENAMEPART("ALTERTABLE_RENAMEPART", new Privilege[]{Privilege.DROP}, new Privilege[]{Privilege.CREATE}), ALTERTABLE_RENAME("ALTERTABLE_RENAME", new Privilege[]{Privilege.ALTER_METADATA}, null), ALTERTABLE_DROPPARTS("ALTERTABLE_DROPPARTS", new Privilege[]{Privilege.DROP}, null), ALTERTABLE_ADDPARTS("ALTERTABLE_ADDPARTS", new Privilege[]{Privilege.CREATE}, null), @@ -81,20 +82,20 @@ ALTERPARTITION_LOCATION("ALTERPARTITION_LOCATION", new Privilege[]{Privilege.ALTER_DATA}, null), CREATETABLE("CREATETABLE", null, new Privilege[]{Privilege.CREATE}), CREATETABLE_AS_SELECT("CREATETABLE_AS_SELECT", new Privilege[]{Privilege.SELECT}, new Privilege[]{Privilege.CREATE}), - QUERY("QUERY", new Privilege[]{Privilege.SELECT}, new Privilege[]{Privilege.ALTER_DATA, Privilege.CREATE}), - ALTERINDEX_PROPS("ALTERINDEX_PROPS",null, null), - ALTERDATABASE("ALTERDATABASE", null, null), - DESCDATABASE("DESCDATABASE", null, null), + QUERY("QUERY", new Privilege[]{Privilege.SELECT}, new Privilege[]{Privilege.ALTER_DATA, Privilege.CREATE}), + ALTERINDEX_PROPS("ALTERINDEX_PROPS",null, null), + ALTERDATABASE("ALTERDATABASE", null, null), + DESCDATABASE("DESCDATABASE", null, null), ALTERTABLE_MERGEFILES("ALTER_TABLE_MERGE", new Privilege[] { Privilege.SELECT }, new Privilege[] { Privilege.ALTER_DATA }), ALTERPARTITION_MERGEFILES("ALTER_PARTITION_MERGE", new Privilege[] { Privilege.SELECT }, new Privilege[] { Privilege.ALTER_DATA }), ; private String operationName; - + private Privilege[] inputRequiredPrivileges; - + private Privilege[] outputRequiredPrivileges; - + public Privilege[] getInputRequiredPrivileges() { return inputRequiredPrivileges; } @@ -113,9 +114,9 @@ this.inputRequiredPrivileges = inputRequiredPrivileges; this.outputRequiredPrivileges = outputRequiredPrivileges; } - + public static class PrivilegeAgreement { - + private Privilege[] inputUserLevelRequiredPriv; private Privilege[] inputDBLevelRequiredPriv; private Privilege[] inputTableLevelRequiredPriv; @@ -124,7 +125,7 @@ private Privilege[] outputDBLevelRequiredPriv; private Privilege[] outputTableLevelRequiredPriv; private Privilege[] outputColumnLevelRequiredPriv; - + public PrivilegeAgreement putUserLevelRequiredPriv( Privilege[] inputUserLevelRequiredPriv, Privilege[] outputUserLevelRequiredPriv) { @@ -140,7 +141,7 @@ this.outputDBLevelRequiredPriv = outputDBLevelRequiredPriv; return this; } - + public PrivilegeAgreement putTableLevelRequiredPriv( Privilege[] inputTableLevelRequiredPriv, Privilege[] outputTableLevelRequiredPriv) { Index: ql/src/java/org/apache/hadoop/hive/ql/plan/RenamePartitionDesc.java =================================================================== --- ql/src/java/org/apache/hadoop/hive/ql/plan/RenamePartitionDesc.java (revision 0) +++ ql/src/java/org/apache/hadoop/hive/ql/plan/RenamePartitionDesc.java (revision 0) @@ -0,0 +1,136 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hive.ql.plan; + +import java.io.Serializable; +import java.util.LinkedHashMap; +import java.util.Map; + +/** + * Contains the information needed to rename a partition. + */ +public class RenamePartitionDesc extends DDLDesc implements Serializable { + + private static final long serialVersionUID = 1L; + + String tableName; + String dbName; + String location; + LinkedHashMap oldPartSpec; + LinkedHashMap newPartSpec; + + /** + * For serialization only. + */ + public RenamePartitionDesc() { + } + + /** + * @param dbName + * database to add to. + * @param tableName + * table to add to. + * @param oldPartSpec + * old partition specification. + * @param newPartSpec + * new partition specification. + */ + public RenamePartitionDesc(String dbName, String tableName, + Map oldPartSpec, Map newPartSpec) { + super(); + this.dbName = dbName; + this.tableName = tableName; + this.oldPartSpec = new LinkedHashMap(oldPartSpec); + this.newPartSpec = new LinkedHashMap(newPartSpec); + } + + /** + * @return database name + */ + public String getDbName() { + return dbName; + } + + /** + * @param dbName + * database name + */ + public void setDbName(String dbName) { + this.dbName = dbName; + } + + /** + * @return the table we're going to add the partitions to. + */ + public String getTableName() { + return tableName; + } + + /** + * @param tableName + * the table we're going to add the partitions to. + */ + public void setTableName(String tableName) { + this.tableName = tableName; + } + + /** + * @return location of partition in relation to table + */ + public String getLocation() { + return location; + } + + /** + * @param location + * location of partition in relation to table + */ + public void setLocation(String location) { + this.location = location; + } + + /** + * @return old partition specification. + */ + public LinkedHashMap getOldPartSpec() { + return oldPartSpec; + } + + /** + * @param partSpec + * partition specification + */ + public void setOldPartSpec(LinkedHashMap partSpec) { + this.oldPartSpec = partSpec; + } + + /** + * @return new partition specification. + */ + public LinkedHashMap getNewPartSpec() { + return newPartSpec; + } + + /** + * @param partSpec + * partition specification + */ + public void setNewPartSpec(LinkedHashMap partSpec) { + this.newPartSpec = partSpec; + } +} Index: ql/src/java/org/apache/hadoop/hive/ql/plan/AlterTableDesc.java =================================================================== --- ql/src/java/org/apache/hadoop/hive/ql/plan/AlterTableDesc.java (revision 1186991) +++ ql/src/java/org/apache/hadoop/hive/ql/plan/AlterTableDesc.java (working copy) @@ -43,7 +43,7 @@ RENAME, ADDCOLS, REPLACECOLS, ADDPROPS, ADDSERDE, ADDSERDEPROPS, ADDFILEFORMAT, ADDCLUSTERSORTCOLUMN, RENAMECOLUMN, ADDPARTITION, TOUCH, ARCHIVE, UNARCHIVE, ALTERPROTECTMODE, ALTERPARTITIONPROTECTMODE, - ALTERLOCATION, DROPPARTITION + ALTERLOCATION, DROPPARTITION, RENAMEPARTITION }; public static enum ProtectModeType { Index: ql/src/java/org/apache/hadoop/hive/ql/parse/Hive.g =================================================================== --- ql/src/java/org/apache/hadoop/hive/ql/parse/Hive.g (revision 1186991) +++ ql/src/java/org/apache/hadoop/hive/ql/parse/Hive.g (working copy) @@ -121,6 +121,7 @@ TOK_ALTERTABLE_RENAME; TOK_ALTERTABLE_ADDCOLS; TOK_ALTERTABLE_RENAMECOL; +TOK_ALTERTABLE_RENAMEPART; TOK_ALTERTABLE_REPLACECOLS; TOK_ALTERTABLE_ADDPARTS; TOK_ALTERTABLE_DROPPARTS; @@ -638,6 +639,7 @@ : Identifier KW_CHANGE KW_COLUMN? oldName=Identifier newName=Identifier colType (KW_COMMENT comment=StringLiteral)? alterStatementChangeColPosition? ->^(TOK_ALTERTABLE_RENAMECOL Identifier $oldName $newName colType $comment? alterStatementChangeColPosition?) ; + alterStatementChangeColPosition : first=KW_FIRST|KW_AFTER afterCol=Identifier @@ -732,6 +734,7 @@ | alterStatementSuffixProtectMode | alterStatementSuffixMergeFiles | alterStatementSuffixSerdeProperties + | alterStatementSuffixRenamePart ; alterStatementSuffixFileFormat @@ -755,6 +758,13 @@ -> ^(TOK_ALTERTABLE_ALTERPARTS_PROTECTMODE alterProtectMode) ; +alterStatementSuffixRenamePart +@init { msgs.push("alter table rename partition statement"); } +@after { msgs.pop(); } + : KW_RENAME KW_TO partitionSpec + ->^(TOK_ALTERTABLE_RENAMEPART partitionSpec) + ; + alterStatementSuffixMergeFiles @init { msgs.push(""); } @after { msgs.pop(); } Index: ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzerFactory.java =================================================================== --- ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzerFactory.java (revision 1186991) +++ ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzerFactory.java (working copy) @@ -110,6 +110,8 @@ tablePartitionCommandType.put(HiveParser.TOK_ALTERTABLE_SERDEPROPERTIES, new HiveOperation[] {HiveOperation.ALTERTABLE_SERDEPROPERTIES, HiveOperation.ALTERPARTITION_SERDEPROPERTIES }); + tablePartitionCommandType.put(HiveParser.TOK_ALTERTABLE_RENAMEPART, + new HiveOperation[] {null, HiveOperation.ALTERTABLE_RENAMEPART}); } public static BaseSemanticAnalyzer get(HiveConf conf, ASTNode tree) Index: ql/src/java/org/apache/hadoop/hive/ql/parse/DDLSemanticAnalyzer.java =================================================================== --- ql/src/java/org/apache/hadoop/hive/ql/parse/DDLSemanticAnalyzer.java (revision 1186991) +++ ql/src/java/org/apache/hadoop/hive/ql/parse/DDLSemanticAnalyzer.java (working copy) @@ -93,6 +93,7 @@ import org.apache.hadoop.hive.ql.plan.PrincipalDesc; import org.apache.hadoop.hive.ql.plan.PrivilegeDesc; import org.apache.hadoop.hive.ql.plan.PrivilegeObjectDesc; +import org.apache.hadoop.hive.ql.plan.RenamePartitionDesc; import org.apache.hadoop.hive.ql.plan.RevokeDesc; import org.apache.hadoop.hive.ql.plan.RoleDDLDesc; import org.apache.hadoop.hive.ql.plan.ShowDatabasesDesc; @@ -199,6 +200,8 @@ analyzeAlterTableSerde(ast, tableName, partSpec); } else if (ast.getToken().getType() == HiveParser.TOK_ALTERTABLE_SERDEPROPERTIES) { analyzeAlterTableSerdeProps(ast, tableName, partSpec); + } else if (ast.getToken().getType() == HiveParser.TOK_ALTERTABLE_RENAMEPART) { + analyzeAlterTableRenamePart(ast, tableName, partSpec); } break; } @@ -1685,6 +1688,32 @@ alterTblDesc), conf)); } + private void analyzeAlterTableRenamePart(ASTNode ast, String tblName, + HashMap oldPartSpec) throws SemanticException { + Map newPartSpec = extractPartitionSpecs((ASTNode)ast.getChild(0)); + if (newPartSpec == null) { + throw new SemanticException("RENAME PARTITION Missing Destination" + ast); + } + try { + Table tab = db.getTable(db.getCurrentDatabase(), tblName, false); + if (tab != null) { + inputs.add(new ReadEntity(tab)); + } else { + throw new SemanticException(ErrorMsg.INVALID_TABLE.getMsg(tblName)); + } + } catch (HiveException e) { + throw new SemanticException(ErrorMsg.INVALID_TABLE.getMsg(tblName)); + } + List> partSpecs = new ArrayList>(); + partSpecs.add(oldPartSpec); + partSpecs.add(newPartSpec); + addTablePartsOutputs(tblName, partSpecs); + RenamePartitionDesc renamePartitionDesc = new RenamePartitionDesc( + db.getCurrentDatabase(), tblName, oldPartSpec, newPartSpec); + rootTasks.add(TaskFactory.get(new DDLWork(getInputs(), getOutputs(), + renamePartitionDesc), conf)); + } + private void analyzeAlterTableModifyCols(ASTNode ast, AlterTableTypes alterType) throws SemanticException { String tblName = getUnescapedName((ASTNode)ast.getChild(0));