Index: metastore/src/test/org/apache/hadoop/hive/metastore/TestHiveMetaStore.java =================================================================== --- metastore/src/test/org/apache/hadoop/hive/metastore/TestHiveMetaStore.java (revision 1145366) +++ metastore/src/test/org/apache/hadoop/hive/metastore/TestHiveMetaStore.java (working copy) @@ -459,6 +459,136 @@ } } + public void testRenamePartition() throws Throwable { + + try { + String dbName = "compdb1"; + String tblName = "comptbl1"; + List vals = new ArrayList(2); + vals.add("2011-07-11"); + vals.add("8"); + String part_path = "/ds=2011-07-11/hr=8"; + List tmp_vals = new ArrayList(2); + tmp_vals.add("tmp_2011-07-11"); + tmp_vals.add("-8"); + String part2_path = "/ds=tmp_2011-07-11/hr=-8"; + + client.dropTable(dbName, tblName); + silentDropDatabase(dbName); + Database db = new Database(); + db.setName(dbName); + db.setDescription("Rename Partition Test database"); + client.createDatabase(db); + + ArrayList cols = new ArrayList(2); + cols.add(new FieldSchema("name", Constants.STRING_TYPE_NAME, "")); + cols.add(new FieldSchema("income", Constants.INT_TYPE_NAME, "")); + + Table tbl = new Table(); + tbl.setDbName(dbName); + tbl.setTableName(tblName); + StorageDescriptor sd = new StorageDescriptor(); + tbl.setSd(sd); + sd.setCols(cols); + sd.setCompressed(false); + sd.setNumBuckets(1); + sd.setParameters(new HashMap()); + sd.getParameters().put("test_param_1", "Use this for comments etc"); + sd.setBucketCols(new ArrayList(2)); + sd.getBucketCols().add("name"); + sd.setSerdeInfo(new SerDeInfo()); + sd.getSerdeInfo().setName(tbl.getTableName()); + sd.getSerdeInfo().setParameters(new HashMap()); + sd.getSerdeInfo().getParameters() + .put(Constants.SERIALIZATION_FORMAT, "1"); + sd.setSortCols(new ArrayList()); + + tbl.setPartitionKeys(new ArrayList(2)); + tbl.getPartitionKeys().add( + new FieldSchema("ds", Constants.STRING_TYPE_NAME, "")); + tbl.getPartitionKeys().add( + new FieldSchema("hr", Constants.INT_TYPE_NAME, "")); + + client.createTable(tbl); + + if (isThriftClient) { + // the createTable() above does not update the location in the 'tbl' + // object when the client is a thrift client and the code below relies + // on the location being present in the 'tbl' object - so get the table + // from the metastore + tbl = client.getTable(dbName, tblName); + } + + Partition part = new Partition(); + part.setDbName(dbName); + part.setTableName(tblName); + part.setValues(vals); + part.setParameters(new HashMap()); + part.setSd(tbl.getSd().deepCopy()); + part.getSd().setSerdeInfo(tbl.getSd().getSerdeInfo()); + part.getSd().setLocation(tbl.getSd().getLocation() + "/part1"); + part.getParameters().put("retention", "10"); + part.getSd().setNumBuckets(12); + part.getSd().getSerdeInfo().getParameters().put("abc", "1"); + + client.add_partition(part); + + part.setValues(tmp_vals); + client.renamePartition(dbName, tblName, vals, part); + + boolean exceptionThrown = false; + try { + Partition p = client.getPartition(dbName, tblName, vals); + } catch(Exception e) { + assertEquals("partition should not have existed", + NoSuchObjectException.class, e.getClass()); + exceptionThrown = true; + } + assertTrue("Expected NoSuchObjectException", exceptionThrown); + + Partition part3 = client.getPartition(dbName, tblName, tmp_vals); + assertEquals("couldn't rename partition", part3.getParameters().get( + "retention"), "10"); + assertEquals("couldn't rename partition", part3.getSd().getSerdeInfo() + .getParameters().get("abc"), "1"); + assertEquals("couldn't rename partition", part3.getSd().getNumBuckets(), + 12); + assertEquals("new partition sd matches", part3.getSd().getLocation(), + tbl.getSd().getLocation() + part2_path); + + part.setValues(vals); + client.renamePartition(dbName, tblName, tmp_vals, part); + + exceptionThrown = false; + try { + Partition p = client.getPartition(dbName, tblName, tmp_vals); + } catch(Exception e) { + assertEquals("partition should not have existed", + NoSuchObjectException.class, e.getClass()); + exceptionThrown = true; + } + assertTrue("Expected NoSuchObjectException", exceptionThrown); + + part3 = client.getPartition(dbName, tblName, vals); + assertEquals("couldn't rename partition", part3.getParameters().get( + "retention"), "10"); + assertEquals("couldn't rename partition", part3.getSd().getSerdeInfo() + .getParameters().get("abc"), "1"); + assertEquals("couldn't rename partition", part3.getSd().getNumBuckets(), + 12); + assertEquals("new partition sd matches", part3.getSd().getLocation(), + tbl.getSd().getLocation() + part_path); + + client.dropTable(dbName, tblName); + + client.dropDatabase(dbName); + } catch (Exception e) { + System.err.println(StringUtils.stringifyException(e)); + System.err.println("testRenamePartition() failed."); + throw e; + } + } + public void testDatabase() throws Throwable { try { // clear up any existing databases Index: metastore/src/java/org/apache/hadoop/hive/metastore/RawStore.java =================================================================== --- metastore/src/java/org/apache/hadoop/hive/metastore/RawStore.java (revision 1145366) +++ metastore/src/java/org/apache/hadoop/hive/metastore/RawStore.java (working copy) @@ -141,7 +141,7 @@ public abstract List listPartitionNamesByFilter(String db_name, String tbl_name, String filter, short max_parts) throws MetaException; - public abstract void alterPartition(String db_name, String tbl_name, + public abstract void alterPartition(String db_name, String tbl_name, List part_vals, Partition new_part) throws InvalidObjectException, MetaException; public abstract boolean addIndex(Index index) @@ -286,6 +286,6 @@ public abstract List listPartitionsPsWithAuth(String db_name, String tbl_name, List part_vals, short max_parts, String userName, List groupNames) throws MetaException, InvalidObjectException; - + public abstract long cleanupEvents(); } Index: metastore/src/java/org/apache/hadoop/hive/metastore/IMetaStoreClient.java =================================================================== --- metastore/src/java/org/apache/hadoop/hive/metastore/IMetaStoreClient.java (revision 1145366) +++ metastore/src/java/org/apache/hadoop/hive/metastore/IMetaStoreClient.java (working copy) @@ -495,6 +495,27 @@ throws InvalidOperationException, MetaException, TException; /** + * rename a partition to a new partition + * + * @param dbname + * database of the old partition + * @param name + * table name of the old partition + * @param part_vals + * values of the old partition + * @param newPart + * new partition + * @throws InvalidOperationException + * if srcFs and destFs are different + * @throws MetaException + * if error in updating metadata + * @throws TException + * if error in communicating with metastore server + */ + public void renamePartition(final String dbname, final String name, final List part_vals, final Partition newPart) + throws InvalidOperationException, MetaException, TException; + + /** * @param db * @param tableName * @throws UnknownTableException Index: metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStoreClient.java =================================================================== --- metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStoreClient.java (revision 1145366) +++ metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStoreClient.java (working copy) @@ -168,6 +168,22 @@ client.alter_table(dbname, tbl_name, new_tbl); } + /** + * @param dbname + * @param name + * @param part_vals + * @param newPart + * @throws InvalidOperationException + * @throws MetaException + * @throws TException + * @see org.apache.hadoop.hive.metastore.api.ThriftHiveMetastore.Iface#rename_partition( + * java.lang.String, java.lang.String, java.util.List, org.apache.hadoop.hive.metastore.api.Partition) + */ + public void renamePartition(final String dbname, final String name, final List part_vals, final Partition newPart) + throws InvalidOperationException, MetaException, TException { + client.alter_partition(dbname, name, part_vals, newPart); + } + private void open() throws MetaException { for (URI store : metastoreUris) { LOG.info("Trying to connect to metastore with URI " + store); @@ -759,7 +775,7 @@ public void alter_partition(String dbName, String tblName, Partition newPart) throws InvalidOperationException, MetaException, TException { - client.alter_partition(dbName, tblName, newPart); + client.alter_partition(dbName, tblName, null, newPart); } public void alterDatabase(String dbName, Database db) Index: metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStore.java =================================================================== --- metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStore.java (revision 1145366) +++ metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStore.java (working copy) @@ -37,6 +37,7 @@ import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; import org.apache.hadoop.hive.common.JavaUtils; import org.apache.hadoop.hive.common.classification.InterfaceAudience; @@ -1730,46 +1731,28 @@ return ret; } - private void alter_partition_core(final RawStore ms, final String db_name, - final String tbl_name, final Partition new_part) - throws InvalidOperationException, MetaException, TException { - try { - // Set DDL time to now if not specified - if (new_part.getParameters() == null || - new_part.getParameters().get(Constants.DDL_TIME) == null || - Integer.parseInt(new_part.getParameters().get(Constants.DDL_TIME)) == 0) { - new_part.putToParameters(Constants.DDL_TIME, Long.toString(System - .currentTimeMillis() / 1000)); - } - Partition oldPart = ms.getPartition(db_name, tbl_name, new_part.getValues()); - ms.alterPartition(db_name, tbl_name, new_part); - for (MetaStoreEventListener listener : listeners) { - listener.onAlterPartition(new AlterPartitionEvent(oldPart, new_part, true, this)); - } - } catch (InvalidObjectException e) { - throw new InvalidOperationException("alter is not possible"); - } catch (NoSuchObjectException e){ - //old partition does not exist - throw new InvalidOperationException("alter is not possible"); - } - } - - public void alter_partition(final String db_name, final String tbl_name, - final Partition new_part) throws InvalidOperationException, MetaException, + public void alter_partition(final String db_name, final String tbl_name, + final List part_vals, final Partition new_part) + throws InvalidOperationException, MetaException, TException { startTableFunction("alter_partition", db_name, tbl_name); - LOG.info("Partition values:" + new_part.getValues()); - + LOG.info("New partition values:" + new_part.getValues()); + if (part_vals != null && part_vals.size() > 0) { + LOG.info("Old Partition values:" + part_vals); + } + try { executeWithRetry(new Command() { @Override public Boolean run(RawStore ms) throws Exception { - alter_partition_core(ms, db_name, tbl_name, new_part); + alter_partition_core(ms, db_name, tbl_name, part_vals, new_part); return Boolean.TRUE; } }); - } catch (InvalidOperationException e) { - throw e; + } catch (InvalidObjectException e) { + throw new InvalidOperationException(e.getMessage()); + } catch (AlreadyExistsException e) { + throw new InvalidOperationException(e.getMessage()); } catch (MetaException e) { throw e; } catch (TException e) { @@ -1783,6 +1766,149 @@ return; } + private void alter_partition_core(final RawStore ms, final String dbname, final String name, final List part_vals, final Partition new_part) + throws InvalidOperationException, InvalidObjectException, AlreadyExistsException, MetaException { + boolean success = false; + + Path srcPath = null; + Path destPath = null; + FileSystem srcFs = null; + FileSystem destFs = null; + Table tbl = null; + Partition oldPart = null; + String oldPartLoc = null; + String newPartLoc = null; + // Set DDL time to now if not specified + if (new_part.getParameters() == null || + new_part.getParameters().get(Constants.DDL_TIME) == null || + Integer.parseInt(new_part.getParameters().get(Constants.DDL_TIME)) == 0) { + new_part.putToParameters(Constants.DDL_TIME, Long.toString(System + .currentTimeMillis() / 1000)); + } + //alter partition + if (part_vals == null || part_vals.size() == 0) { + try { + oldPart = ms.getPartition(dbname, name, new_part.getValues()); + ms.alterPartition(dbname, name, new_part.getValues(), new_part); + for (MetaStoreEventListener listener : listeners) { + listener.onAlterPartition(new AlterPartitionEvent(oldPart, new_part, true, this)); + } + } catch (InvalidObjectException e) { + throw new InvalidOperationException("alter is not possible"); + } catch (NoSuchObjectException e){ + //old partition does not exist + throw new InvalidOperationException("alter is not possible"); + } + return; + } + //rename partition + try { + ms.openTransaction(); + try { + oldPart = ms.getPartition(dbname, name, part_vals); + } catch (NoSuchObjectException e) { + // this means there is no existing partition + throw new InvalidObjectException( + "Unable to rename partition because old partition does not exist"); + } + Partition check_part = null; + try { + check_part = ms.getPartition(dbname, name, new_part.getValues()); + } catch(NoSuchObjectException e) { + // this means there is no existing partition + check_part = null; + } + if (check_part != null) { + throw new AlreadyExistsException("Partition already exists:" + dbname + "." + name + "." + new_part.getValues()); + } + tbl = ms.getTable(dbname, name); + if (tbl == null) { + throw new InvalidObjectException( + "Unable to rename partition because table or database do not exist"); + } + try { + destPath = new Path(wh.getTablePath(ms.getDatabase(dbname), name), Warehouse.makePartName(tbl.getPartitionKeys(), + new_part.getValues())); + } catch (NoSuchObjectException e) { + LOG.debug(e); + throw new InvalidOperationException( + "Unable to change partition or table. Database " + dbname + " does not exist" + + " Check metastore logs for detailed stack." + e.getMessage()); + } + if (destPath != null) { + newPartLoc = destPath.toString(); + oldPartLoc = oldPart.getSd().getLocation(); + + srcPath = new Path(oldPartLoc); + + LOG.info("srcPath:" + oldPartLoc); + LOG.info("descPath:" + newPartLoc); + srcFs = wh.getFs(srcPath); + destFs = wh.getFs(destPath); + // check that src and dest are on the same file system + if (srcFs != destFs) { + throw new InvalidOperationException("table new location " + destPath + + " is on a different file system than the old location " + + srcPath + ". This operation is not supported"); + } + try { + srcFs.exists(srcPath); // check that src exists and also checks + if (newPartLoc.compareTo(oldPartLoc) != 0 && destFs.exists(destPath)) { + throw new InvalidOperationException("New location for this table " + + tbl.getDbName() + "." + tbl.getTableName() + + " already exists : " + destPath); + } + } catch (IOException e) { + Warehouse.closeFs(srcFs); + Warehouse.closeFs(destFs); + throw new InvalidOperationException("Unable to access new location " + + destPath + " for partition " + tbl.getDbName() + "." + + tbl.getTableName() + " " + new_part.getValues()); + } + new_part.getSd().setLocation(newPartLoc); + ms.alterPartition(dbname, name, part_vals, new_part); + } + + success = ms.commitTransaction(); + } finally { + if (!success) { + ms.rollbackTransaction(); + } + if (success && newPartLoc.compareTo(oldPartLoc) != 0) { + //rename the data directory + try{ + if (srcFs.exists(srcPath)) { + //if destPath's parent path doesn't exist, we should mkdir it + Path destParentPath = destPath.getParent(); + if (!wh.mkdirs(destParentPath)) { + throw new IOException("Unable to create path " + destParentPath); + } + srcFs.rename(srcPath, destPath); + LOG.info("rename done!"); + } + } catch (IOException e) { + boolean revertMetaDataTransaction = false; + try { + ms.openTransaction(); + ms.alterPartition(dbname, name, new_part.getValues(), oldPart); + revertMetaDataTransaction = ms.commitTransaction(); + } catch (Exception e1) { + LOG.error("Reverting metadata opeation failed During HDFS operation failed", e1); + if (!revertMetaDataTransaction) { + ms.rollbackTransaction(); + } + } + throw new InvalidOperationException("Unable to access old location " + + srcPath + " for partition " + tbl.getDbName() + "." + + tbl.getTableName() + " " + part_vals); + } + } + for (MetaStoreEventListener listener : listeners) { + listener.onAlterPartition(new AlterPartitionEvent(oldPart, new_part, true, this)); + } + } + } + public boolean create_index(Index index_def) throws IndexAlreadyExistsException, MetaException { endFunction(startFunction("create_index")); Index: metastore/src/java/org/apache/hadoop/hive/metastore/HiveAlterHandler.java =================================================================== --- metastore/src/java/org/apache/hadoop/hive/metastore/HiveAlterHandler.java (revision 1145366) +++ metastore/src/java/org/apache/hadoop/hive/metastore/HiveAlterHandler.java (working copy) @@ -30,9 +30,9 @@ import org.apache.hadoop.hive.metastore.api.InvalidObjectException; import org.apache.hadoop.hive.metastore.api.InvalidOperationException; import org.apache.hadoop.hive.metastore.api.MetaException; +import org.apache.hadoop.hive.metastore.api.NoSuchObjectException; import org.apache.hadoop.hive.metastore.api.Partition; import org.apache.hadoop.hive.metastore.api.Table; -import org.apache.hadoop.hive.metastore.api.NoSuchObjectException; /** * Hive specific implementation of alter @@ -164,7 +164,7 @@ oldUri.getAuthority(), newPath); part.getSd().setLocation(newPartLocPath.toString()); - msdb.alterPartition(dbname, name, part); + msdb.alterPartition(dbname, name, part.getValues(), part); } } } Index: metastore/src/java/org/apache/hadoop/hive/metastore/ObjectStore.java =================================================================== --- metastore/src/java/org/apache/hadoop/hive/metastore/ObjectStore.java (revision 1145366) +++ metastore/src/java/org/apache/hadoop/hive/metastore/ObjectStore.java (working copy) @@ -1704,18 +1704,20 @@ } } - public void alterPartition(String dbname, String name, Partition newPart) + public void alterPartition(String dbname, String name, List part_vals, Partition newPart) throws InvalidObjectException, MetaException { boolean success = false; try { openTransaction(); name = name.toLowerCase(); dbname = dbname.toLowerCase(); - MPartition oldp = getMPartition(dbname, name, newPart.getValues()); + MPartition oldp = getMPartition(dbname, name, part_vals); MPartition newp = convertToMPart(newPart); if (oldp == null || newp == null) { throw new InvalidObjectException("partition does not exist."); } + oldp.setValues(newp.getValues()); + oldp.setPartitionName(newp.getPartitionName()); oldp.setParameters(newPart.getParameters()); copyMSD(newp.getSd(), oldp.getSd()); if (newp.getCreateTime() != oldp.getCreateTime()) { Index: metastore/src/gen/thrift/gen-py/hive_metastore/ThriftHiveMetastore.py =================================================================== --- metastore/src/gen/thrift/gen-py/hive_metastore/ThriftHiveMetastore.py (revision 1145366) +++ metastore/src/gen/thrift/gen-py/hive_metastore/ThriftHiveMetastore.py (working copy) @@ -315,11 +315,12 @@ """ pass - def alter_partition(self, db_name, tbl_name, new_part): + def alter_partition(self, db_name, tbl_name, part_vals, new_part): """ Parameters: - db_name - tbl_name + - part_vals - new_part """ pass @@ -1816,21 +1817,23 @@ raise result.o2 raise TApplicationException(TApplicationException.MISSING_RESULT, "get_partitions_by_names failed: unknown result"); - def alter_partition(self, db_name, tbl_name, new_part): + def alter_partition(self, db_name, tbl_name, part_vals, new_part): """ Parameters: - db_name - tbl_name + - part_vals - new_part """ - self.send_alter_partition(db_name, tbl_name, new_part) + self.send_alter_partition(db_name, tbl_name, part_vals, new_part) self.recv_alter_partition() - def send_alter_partition(self, db_name, tbl_name, new_part): + def send_alter_partition(self, db_name, tbl_name, part_vals, new_part): self._oprot.writeMessageBegin('alter_partition', TMessageType.CALL, self._seqid) args = alter_partition_args() args.db_name = db_name args.tbl_name = tbl_name + args.part_vals = part_vals args.new_part = new_part args.write(self._oprot) self._oprot.writeMessageEnd() @@ -3359,7 +3362,7 @@ iprot.readMessageEnd() result = alter_partition_result() try: - self._handler.alter_partition(args.db_name, args.tbl_name, args.new_part) + self._handler.alter_partition(args.db_name, args.tbl_name, args.part_vals, args.new_part) except InvalidOperationException, o1: result.o1 = o1 except MetaException, o2: @@ -9604,6 +9607,7 @@ Attributes: - db_name - tbl_name + - part_vals - new_part """ @@ -9611,12 +9615,14 @@ None, # 0 (1, TType.STRING, 'db_name', None, None, ), # 1 (2, TType.STRING, 'tbl_name', None, None, ), # 2 - (3, TType.STRUCT, 'new_part', (Partition, Partition.thrift_spec), None, ), # 3 + (3, TType.LIST, 'part_vals', (TType.STRING,None), None, ), # 3 + (4, TType.STRUCT, 'new_part', (Partition, Partition.thrift_spec), None, ), # 4 ) - def __init__(self, db_name=None, tbl_name=None, new_part=None,): + def __init__(self, db_name=None, tbl_name=None, part_vals=None, new_part=None,): self.db_name = db_name self.tbl_name = tbl_name + self.part_vals = part_vals self.new_part = new_part def read(self, iprot): @@ -9639,6 +9645,16 @@ else: iprot.skip(ftype) elif fid == 3: + if ftype == TType.LIST: + self.part_vals = [] + (_etype375, _size372) = iprot.readListBegin() + for _i376 in xrange(_size372): + _elem377 = iprot.readString(); + self.part_vals.append(_elem377) + iprot.readListEnd() + else: + iprot.skip(ftype) + elif fid == 4: if ftype == TType.STRUCT: self.new_part = Partition() self.new_part.read(iprot) @@ -9662,8 +9678,15 @@ oprot.writeFieldBegin('tbl_name', TType.STRING, 2) oprot.writeString(self.tbl_name) oprot.writeFieldEnd() + if self.part_vals != None: + oprot.writeFieldBegin('part_vals', TType.LIST, 3) + oprot.writeListBegin(TType.STRING, len(self.part_vals)) + for iter378 in self.part_vals: + oprot.writeString(iter378) + oprot.writeListEnd() + oprot.writeFieldEnd() if self.new_part != None: - oprot.writeFieldBegin('new_part', TType.STRUCT, 3) + oprot.writeFieldBegin('new_part', TType.STRUCT, 4) self.new_part.write(oprot) oprot.writeFieldEnd() oprot.writeFieldStop() @@ -9985,10 +10008,10 @@ if fid == 0: if ftype == TType.LIST: self.success = [] - (_etype375, _size372) = iprot.readListBegin() - for _i376 in xrange(_size372): - _elem377 = iprot.readString(); - self.success.append(_elem377) + (_etype382, _size379) = iprot.readListBegin() + for _i383 in xrange(_size379): + _elem384 = iprot.readString(); + self.success.append(_elem384) iprot.readListEnd() else: iprot.skip(ftype) @@ -10011,8 +10034,8 @@ if self.success != None: oprot.writeFieldBegin('success', TType.LIST, 0) oprot.writeListBegin(TType.STRING, len(self.success)) - for iter378 in self.success: - oprot.writeString(iter378) + for iter385 in self.success: + oprot.writeString(iter385) oprot.writeListEnd() oprot.writeFieldEnd() if self.o1 != None: @@ -10123,11 +10146,11 @@ if fid == 0: if ftype == TType.MAP: self.success = {} - (_ktype380, _vtype381, _size379 ) = iprot.readMapBegin() - for _i383 in xrange(_size379): - _key384 = iprot.readString(); - _val385 = iprot.readString(); - self.success[_key384] = _val385 + (_ktype387, _vtype388, _size386 ) = iprot.readMapBegin() + for _i390 in xrange(_size386): + _key391 = iprot.readString(); + _val392 = iprot.readString(); + self.success[_key391] = _val392 iprot.readMapEnd() else: iprot.skip(ftype) @@ -10150,9 +10173,9 @@ if self.success != None: oprot.writeFieldBegin('success', TType.MAP, 0) oprot.writeMapBegin(TType.STRING, TType.STRING, len(self.success)) - for kiter386,viter387 in self.success.items(): - oprot.writeString(kiter386) - oprot.writeString(viter387) + for kiter393,viter394 in self.success.items(): + oprot.writeString(kiter393) + oprot.writeString(viter394) oprot.writeMapEnd() oprot.writeFieldEnd() if self.o1 != None: @@ -10221,11 +10244,11 @@ elif fid == 3: if ftype == TType.MAP: self.part_vals = {} - (_ktype389, _vtype390, _size388 ) = iprot.readMapBegin() - for _i392 in xrange(_size388): - _key393 = iprot.readString(); - _val394 = iprot.readString(); - self.part_vals[_key393] = _val394 + (_ktype396, _vtype397, _size395 ) = iprot.readMapBegin() + for _i399 in xrange(_size395): + _key400 = iprot.readString(); + _val401 = iprot.readString(); + self.part_vals[_key400] = _val401 iprot.readMapEnd() else: iprot.skip(ftype) @@ -10255,9 +10278,9 @@ if self.part_vals != None: oprot.writeFieldBegin('part_vals', TType.MAP, 3) oprot.writeMapBegin(TType.STRING, TType.STRING, len(self.part_vals)) - for kiter395,viter396 in self.part_vals.items(): - oprot.writeString(kiter395) - oprot.writeString(viter396) + for kiter402,viter403 in self.part_vals.items(): + oprot.writeString(kiter402) + oprot.writeString(viter403) oprot.writeMapEnd() oprot.writeFieldEnd() if self.eventType != None: @@ -10451,11 +10474,11 @@ elif fid == 3: if ftype == TType.MAP: self.part_vals = {} - (_ktype398, _vtype399, _size397 ) = iprot.readMapBegin() - for _i401 in xrange(_size397): - _key402 = iprot.readString(); - _val403 = iprot.readString(); - self.part_vals[_key402] = _val403 + (_ktype405, _vtype406, _size404 ) = iprot.readMapBegin() + for _i408 in xrange(_size404): + _key409 = iprot.readString(); + _val410 = iprot.readString(); + self.part_vals[_key409] = _val410 iprot.readMapEnd() else: iprot.skip(ftype) @@ -10485,9 +10508,9 @@ if self.part_vals != None: oprot.writeFieldBegin('part_vals', TType.MAP, 3) oprot.writeMapBegin(TType.STRING, TType.STRING, len(self.part_vals)) - for kiter404,viter405 in self.part_vals.items(): - oprot.writeString(kiter404) - oprot.writeString(viter405) + for kiter411,viter412 in self.part_vals.items(): + oprot.writeString(kiter411) + oprot.writeString(viter412) oprot.writeMapEnd() oprot.writeFieldEnd() if self.eventType != None: @@ -11448,11 +11471,11 @@ if fid == 0: if ftype == TType.LIST: self.success = [] - (_etype409, _size406) = iprot.readListBegin() - for _i410 in xrange(_size406): - _elem411 = Index() - _elem411.read(iprot) - self.success.append(_elem411) + (_etype416, _size413) = iprot.readListBegin() + for _i417 in xrange(_size413): + _elem418 = Index() + _elem418.read(iprot) + self.success.append(_elem418) iprot.readListEnd() else: iprot.skip(ftype) @@ -11481,8 +11504,8 @@ if self.success != None: oprot.writeFieldBegin('success', TType.LIST, 0) oprot.writeListBegin(TType.STRUCT, len(self.success)) - for iter412 in self.success: - iter412.write(oprot) + for iter419 in self.success: + iter419.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() if self.o1 != None: @@ -11621,10 +11644,10 @@ if fid == 0: if ftype == TType.LIST: self.success = [] - (_etype416, _size413) = iprot.readListBegin() - for _i417 in xrange(_size413): - _elem418 = iprot.readString(); - self.success.append(_elem418) + (_etype423, _size420) = iprot.readListBegin() + for _i424 in xrange(_size420): + _elem425 = iprot.readString(); + self.success.append(_elem425) iprot.readListEnd() else: iprot.skip(ftype) @@ -11647,8 +11670,8 @@ if self.success != None: oprot.writeFieldBegin('success', TType.LIST, 0) oprot.writeListBegin(TType.STRING, len(self.success)) - for iter419 in self.success: - oprot.writeString(iter419) + for iter426 in self.success: + oprot.writeString(iter426) oprot.writeListEnd() oprot.writeFieldEnd() if self.o2 != None: @@ -12002,10 +12025,10 @@ if fid == 0: if ftype == TType.LIST: self.success = [] - (_etype423, _size420) = iprot.readListBegin() - for _i424 in xrange(_size420): - _elem425 = iprot.readString(); - self.success.append(_elem425) + (_etype430, _size427) = iprot.readListBegin() + for _i431 in xrange(_size427): + _elem432 = iprot.readString(); + self.success.append(_elem432) iprot.readListEnd() else: iprot.skip(ftype) @@ -12028,8 +12051,8 @@ if self.success != None: oprot.writeFieldBegin('success', TType.LIST, 0) oprot.writeListBegin(TType.STRING, len(self.success)) - for iter426 in self.success: - oprot.writeString(iter426) + for iter433 in self.success: + oprot.writeString(iter433) oprot.writeListEnd() oprot.writeFieldEnd() if self.o1 != None: @@ -12496,11 +12519,11 @@ if fid == 0: if ftype == TType.LIST: self.success = [] - (_etype430, _size427) = iprot.readListBegin() - for _i431 in xrange(_size427): - _elem432 = Role() - _elem432.read(iprot) - self.success.append(_elem432) + (_etype437, _size434) = iprot.readListBegin() + for _i438 in xrange(_size434): + _elem439 = Role() + _elem439.read(iprot) + self.success.append(_elem439) iprot.readListEnd() else: iprot.skip(ftype) @@ -12523,8 +12546,8 @@ if self.success != None: oprot.writeFieldBegin('success', TType.LIST, 0) oprot.writeListBegin(TType.STRUCT, len(self.success)) - for iter433 in self.success: - iter433.write(oprot) + for iter440 in self.success: + iter440.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() if self.o1 != None: @@ -12591,10 +12614,10 @@ elif fid == 3: if ftype == TType.LIST: self.group_names = [] - (_etype437, _size434) = iprot.readListBegin() - for _i438 in xrange(_size434): - _elem439 = iprot.readString(); - self.group_names.append(_elem439) + (_etype444, _size441) = iprot.readListBegin() + for _i445 in xrange(_size441): + _elem446 = iprot.readString(); + self.group_names.append(_elem446) iprot.readListEnd() else: iprot.skip(ftype) @@ -12619,8 +12642,8 @@ if self.group_names != None: oprot.writeFieldBegin('group_names', TType.LIST, 3) oprot.writeListBegin(TType.STRING, len(self.group_names)) - for iter440 in self.group_names: - oprot.writeString(iter440) + for iter447 in self.group_names: + oprot.writeString(iter447) oprot.writeListEnd() oprot.writeFieldEnd() oprot.writeFieldStop() @@ -12824,11 +12847,11 @@ if fid == 0: if ftype == TType.LIST: self.success = [] - (_etype444, _size441) = iprot.readListBegin() - for _i445 in xrange(_size441): - _elem446 = HiveObjectPrivilege() - _elem446.read(iprot) - self.success.append(_elem446) + (_etype451, _size448) = iprot.readListBegin() + for _i452 in xrange(_size448): + _elem453 = HiveObjectPrivilege() + _elem453.read(iprot) + self.success.append(_elem453) iprot.readListEnd() else: iprot.skip(ftype) @@ -12851,8 +12874,8 @@ if self.success != None: oprot.writeFieldBegin('success', TType.LIST, 0) oprot.writeListBegin(TType.STRUCT, len(self.success)) - for iter447 in self.success: - iter447.write(oprot) + for iter454 in self.success: + iter454.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() if self.o1 != None: Index: metastore/src/gen/thrift/gen-py/hive_metastore/ThriftHiveMetastore-remote =================================================================== --- metastore/src/gen/thrift/gen-py/hive_metastore/ThriftHiveMetastore-remote (revision 1145366) +++ metastore/src/gen/thrift/gen-py/hive_metastore/ThriftHiveMetastore-remote (working copy) @@ -56,7 +56,7 @@ print ' get_partition_names_ps(string db_name, string tbl_name, part_vals, i16 max_parts)' print ' get_partitions_by_filter(string db_name, string tbl_name, string filter, i16 max_parts)' print ' get_partitions_by_names(string db_name, string tbl_name, names)' - print ' void alter_partition(string db_name, string tbl_name, Partition new_part)' + print ' void alter_partition(string db_name, string tbl_name, part_vals, Partition new_part)' print ' string get_config_value(string name, string defaultValue)' print ' partition_name_to_vals(string part_name)' print ' partition_name_to_spec(string part_name)' @@ -342,10 +342,10 @@ pp.pprint(client.get_partitions_by_names(args[0],args[1],eval(args[2]),)) elif cmd == 'alter_partition': - if len(args) != 3: - print 'alter_partition requires 3 args' + if len(args) != 4: + print 'alter_partition requires 4 args' sys.exit(1) - pp.pprint(client.alter_partition(args[0],args[1],eval(args[2]),)) + pp.pprint(client.alter_partition(args[0],args[1],eval(args[2]),eval(args[3]),)) elif cmd == 'get_config_value': if len(args) != 2: Index: metastore/src/gen/thrift/gen-cpp/ThriftHiveMetastore.cpp =================================================================== --- metastore/src/gen/thrift/gen-cpp/ThriftHiveMetastore.cpp (revision 1145366) +++ metastore/src/gen/thrift/gen-cpp/ThriftHiveMetastore.cpp (working copy) @@ -8511,6 +8511,26 @@ } break; case 3: + if (ftype == ::apache::thrift::protocol::T_LIST) { + { + this->part_vals.clear(); + uint32_t _size419; + ::apache::thrift::protocol::TType _etype422; + iprot->readListBegin(_etype422, _size419); + this->part_vals.resize(_size419); + uint32_t _i423; + for (_i423 = 0; _i423 < _size419; ++_i423) + { + xfer += iprot->readString(this->part_vals[_i423]); + } + iprot->readListEnd(); + } + this->__isset.part_vals = true; + } else { + xfer += iprot->skip(ftype); + } + break; + case 4: if (ftype == ::apache::thrift::protocol::T_STRUCT) { xfer += this->new_part.read(iprot); this->__isset.new_part = true; @@ -8539,7 +8559,18 @@ xfer += oprot->writeFieldBegin("tbl_name", ::apache::thrift::protocol::T_STRING, 2); xfer += oprot->writeString(this->tbl_name); xfer += oprot->writeFieldEnd(); - xfer += oprot->writeFieldBegin("new_part", ::apache::thrift::protocol::T_STRUCT, 3); + xfer += oprot->writeFieldBegin("part_vals", ::apache::thrift::protocol::T_LIST, 3); + { + xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, this->part_vals.size()); + std::vector ::const_iterator _iter424; + for (_iter424 = this->part_vals.begin(); _iter424 != this->part_vals.end(); ++_iter424) + { + xfer += oprot->writeString((*_iter424)); + } + xfer += oprot->writeListEnd(); + } + xfer += oprot->writeFieldEnd(); + xfer += oprot->writeFieldBegin("new_part", ::apache::thrift::protocol::T_STRUCT, 4); xfer += this->new_part.write(oprot); xfer += oprot->writeFieldEnd(); xfer += oprot->writeFieldStop(); @@ -8556,7 +8587,18 @@ xfer += oprot->writeFieldBegin("tbl_name", ::apache::thrift::protocol::T_STRING, 2); xfer += oprot->writeString((*(this->tbl_name))); xfer += oprot->writeFieldEnd(); - xfer += oprot->writeFieldBegin("new_part", ::apache::thrift::protocol::T_STRUCT, 3); + xfer += oprot->writeFieldBegin("part_vals", ::apache::thrift::protocol::T_LIST, 3); + { + xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, (*(this->part_vals)).size()); + std::vector ::const_iterator _iter425; + for (_iter425 = (*(this->part_vals)).begin(); _iter425 != (*(this->part_vals)).end(); ++_iter425) + { + xfer += oprot->writeString((*_iter425)); + } + xfer += oprot->writeListEnd(); + } + xfer += oprot->writeFieldEnd(); + xfer += oprot->writeFieldBegin("new_part", ::apache::thrift::protocol::T_STRUCT, 4); xfer += (*(this->new_part)).write(oprot); xfer += oprot->writeFieldEnd(); xfer += oprot->writeFieldStop(); @@ -8958,14 +9000,14 @@ if (ftype == ::apache::thrift::protocol::T_LIST) { { this->success.clear(); - uint32_t _size419; - ::apache::thrift::protocol::TType _etype422; - iprot->readListBegin(_etype422, _size419); - this->success.resize(_size419); - uint32_t _i423; - for (_i423 = 0; _i423 < _size419; ++_i423) + uint32_t _size426; + ::apache::thrift::protocol::TType _etype429; + iprot->readListBegin(_etype429, _size426); + this->success.resize(_size426); + uint32_t _i430; + for (_i430 = 0; _i430 < _size426; ++_i430) { - xfer += iprot->readString(this->success[_i423]); + xfer += iprot->readString(this->success[_i430]); } iprot->readListEnd(); } @@ -9004,10 +9046,10 @@ xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_LIST, 0); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, this->success.size()); - std::vector ::const_iterator _iter424; - for (_iter424 = this->success.begin(); _iter424 != this->success.end(); ++_iter424) + std::vector ::const_iterator _iter431; + for (_iter431 = this->success.begin(); _iter431 != this->success.end(); ++_iter431) { - xfer += oprot->writeString((*_iter424)); + xfer += oprot->writeString((*_iter431)); } xfer += oprot->writeListEnd(); } @@ -9046,14 +9088,14 @@ if (ftype == ::apache::thrift::protocol::T_LIST) { { (*(this->success)).clear(); - uint32_t _size425; - ::apache::thrift::protocol::TType _etype428; - iprot->readListBegin(_etype428, _size425); - (*(this->success)).resize(_size425); - uint32_t _i429; - for (_i429 = 0; _i429 < _size425; ++_i429) + uint32_t _size432; + ::apache::thrift::protocol::TType _etype435; + iprot->readListBegin(_etype435, _size432); + (*(this->success)).resize(_size432); + uint32_t _i436; + for (_i436 = 0; _i436 < _size432; ++_i436) { - xfer += iprot->readString((*(this->success))[_i429]); + xfer += iprot->readString((*(this->success))[_i436]); } iprot->readListEnd(); } @@ -9168,17 +9210,17 @@ if (ftype == ::apache::thrift::protocol::T_MAP) { { this->success.clear(); - uint32_t _size430; - ::apache::thrift::protocol::TType _ktype431; - ::apache::thrift::protocol::TType _vtype432; - iprot->readMapBegin(_ktype431, _vtype432, _size430); - uint32_t _i434; - for (_i434 = 0; _i434 < _size430; ++_i434) + uint32_t _size437; + ::apache::thrift::protocol::TType _ktype438; + ::apache::thrift::protocol::TType _vtype439; + iprot->readMapBegin(_ktype438, _vtype439, _size437); + uint32_t _i441; + for (_i441 = 0; _i441 < _size437; ++_i441) { - std::string _key435; - xfer += iprot->readString(_key435); - std::string& _val436 = this->success[_key435]; - xfer += iprot->readString(_val436); + std::string _key442; + xfer += iprot->readString(_key442); + std::string& _val443 = this->success[_key442]; + xfer += iprot->readString(_val443); } iprot->readMapEnd(); } @@ -9217,11 +9259,11 @@ xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_MAP, 0); { xfer += oprot->writeMapBegin(::apache::thrift::protocol::T_STRING, ::apache::thrift::protocol::T_STRING, this->success.size()); - std::map ::const_iterator _iter437; - for (_iter437 = this->success.begin(); _iter437 != this->success.end(); ++_iter437) + std::map ::const_iterator _iter444; + for (_iter444 = this->success.begin(); _iter444 != this->success.end(); ++_iter444) { - xfer += oprot->writeString(_iter437->first); - xfer += oprot->writeString(_iter437->second); + xfer += oprot->writeString(_iter444->first); + xfer += oprot->writeString(_iter444->second); } xfer += oprot->writeMapEnd(); } @@ -9260,17 +9302,17 @@ if (ftype == ::apache::thrift::protocol::T_MAP) { { (*(this->success)).clear(); - uint32_t _size438; - ::apache::thrift::protocol::TType _ktype439; - ::apache::thrift::protocol::TType _vtype440; - iprot->readMapBegin(_ktype439, _vtype440, _size438); - uint32_t _i442; - for (_i442 = 0; _i442 < _size438; ++_i442) + uint32_t _size445; + ::apache::thrift::protocol::TType _ktype446; + ::apache::thrift::protocol::TType _vtype447; + iprot->readMapBegin(_ktype446, _vtype447, _size445); + uint32_t _i449; + for (_i449 = 0; _i449 < _size445; ++_i449) { - std::string _key443; - xfer += iprot->readString(_key443); - std::string& _val444 = (*(this->success))[_key443]; - xfer += iprot->readString(_val444); + std::string _key450; + xfer += iprot->readString(_key450); + std::string& _val451 = (*(this->success))[_key450]; + xfer += iprot->readString(_val451); } iprot->readMapEnd(); } @@ -9339,17 +9381,17 @@ if (ftype == ::apache::thrift::protocol::T_MAP) { { this->part_vals.clear(); - uint32_t _size445; - ::apache::thrift::protocol::TType _ktype446; - ::apache::thrift::protocol::TType _vtype447; - iprot->readMapBegin(_ktype446, _vtype447, _size445); - uint32_t _i449; - for (_i449 = 0; _i449 < _size445; ++_i449) + uint32_t _size452; + ::apache::thrift::protocol::TType _ktype453; + ::apache::thrift::protocol::TType _vtype454; + iprot->readMapBegin(_ktype453, _vtype454, _size452); + uint32_t _i456; + for (_i456 = 0; _i456 < _size452; ++_i456) { - std::string _key450; - xfer += iprot->readString(_key450); - std::string& _val451 = this->part_vals[_key450]; - xfer += iprot->readString(_val451); + std::string _key457; + xfer += iprot->readString(_key457); + std::string& _val458 = this->part_vals[_key457]; + xfer += iprot->readString(_val458); } iprot->readMapEnd(); } @@ -9360,9 +9402,9 @@ break; case 4: if (ftype == ::apache::thrift::protocol::T_I32) { - int32_t ecast452; - xfer += iprot->readI32(ecast452); - this->eventType = (PartitionEventType::type)ecast452; + int32_t ecast459; + xfer += iprot->readI32(ecast459); + this->eventType = (PartitionEventType::type)ecast459; this->__isset.eventType = true; } else { xfer += iprot->skip(ftype); @@ -9392,11 +9434,11 @@ xfer += oprot->writeFieldBegin("part_vals", ::apache::thrift::protocol::T_MAP, 3); { xfer += oprot->writeMapBegin(::apache::thrift::protocol::T_STRING, ::apache::thrift::protocol::T_STRING, this->part_vals.size()); - std::map ::const_iterator _iter453; - for (_iter453 = this->part_vals.begin(); _iter453 != this->part_vals.end(); ++_iter453) + std::map ::const_iterator _iter460; + for (_iter460 = this->part_vals.begin(); _iter460 != this->part_vals.end(); ++_iter460) { - xfer += oprot->writeString(_iter453->first); - xfer += oprot->writeString(_iter453->second); + xfer += oprot->writeString(_iter460->first); + xfer += oprot->writeString(_iter460->second); } xfer += oprot->writeMapEnd(); } @@ -9421,11 +9463,11 @@ xfer += oprot->writeFieldBegin("part_vals", ::apache::thrift::protocol::T_MAP, 3); { xfer += oprot->writeMapBegin(::apache::thrift::protocol::T_STRING, ::apache::thrift::protocol::T_STRING, (*(this->part_vals)).size()); - std::map ::const_iterator _iter454; - for (_iter454 = (*(this->part_vals)).begin(); _iter454 != (*(this->part_vals)).end(); ++_iter454) + std::map ::const_iterator _iter461; + for (_iter461 = (*(this->part_vals)).begin(); _iter461 != (*(this->part_vals)).end(); ++_iter461) { - xfer += oprot->writeString(_iter454->first); - xfer += oprot->writeString(_iter454->second); + xfer += oprot->writeString(_iter461->first); + xfer += oprot->writeString(_iter461->second); } xfer += oprot->writeMapEnd(); } @@ -9674,17 +9716,17 @@ if (ftype == ::apache::thrift::protocol::T_MAP) { { this->part_vals.clear(); - uint32_t _size455; - ::apache::thrift::protocol::TType _ktype456; - ::apache::thrift::protocol::TType _vtype457; - iprot->readMapBegin(_ktype456, _vtype457, _size455); - uint32_t _i459; - for (_i459 = 0; _i459 < _size455; ++_i459) + uint32_t _size462; + ::apache::thrift::protocol::TType _ktype463; + ::apache::thrift::protocol::TType _vtype464; + iprot->readMapBegin(_ktype463, _vtype464, _size462); + uint32_t _i466; + for (_i466 = 0; _i466 < _size462; ++_i466) { - std::string _key460; - xfer += iprot->readString(_key460); - std::string& _val461 = this->part_vals[_key460]; - xfer += iprot->readString(_val461); + std::string _key467; + xfer += iprot->readString(_key467); + std::string& _val468 = this->part_vals[_key467]; + xfer += iprot->readString(_val468); } iprot->readMapEnd(); } @@ -9695,9 +9737,9 @@ break; case 4: if (ftype == ::apache::thrift::protocol::T_I32) { - int32_t ecast462; - xfer += iprot->readI32(ecast462); - this->eventType = (PartitionEventType::type)ecast462; + int32_t ecast469; + xfer += iprot->readI32(ecast469); + this->eventType = (PartitionEventType::type)ecast469; this->__isset.eventType = true; } else { xfer += iprot->skip(ftype); @@ -9727,11 +9769,11 @@ xfer += oprot->writeFieldBegin("part_vals", ::apache::thrift::protocol::T_MAP, 3); { xfer += oprot->writeMapBegin(::apache::thrift::protocol::T_STRING, ::apache::thrift::protocol::T_STRING, this->part_vals.size()); - std::map ::const_iterator _iter463; - for (_iter463 = this->part_vals.begin(); _iter463 != this->part_vals.end(); ++_iter463) + std::map ::const_iterator _iter470; + for (_iter470 = this->part_vals.begin(); _iter470 != this->part_vals.end(); ++_iter470) { - xfer += oprot->writeString(_iter463->first); - xfer += oprot->writeString(_iter463->second); + xfer += oprot->writeString(_iter470->first); + xfer += oprot->writeString(_iter470->second); } xfer += oprot->writeMapEnd(); } @@ -9756,11 +9798,11 @@ xfer += oprot->writeFieldBegin("part_vals", ::apache::thrift::protocol::T_MAP, 3); { xfer += oprot->writeMapBegin(::apache::thrift::protocol::T_STRING, ::apache::thrift::protocol::T_STRING, (*(this->part_vals)).size()); - std::map ::const_iterator _iter464; - for (_iter464 = (*(this->part_vals)).begin(); _iter464 != (*(this->part_vals)).end(); ++_iter464) + std::map ::const_iterator _iter471; + for (_iter471 = (*(this->part_vals)).begin(); _iter471 != (*(this->part_vals)).end(); ++_iter471) { - xfer += oprot->writeString(_iter464->first); - xfer += oprot->writeString(_iter464->second); + xfer += oprot->writeString(_iter471->first); + xfer += oprot->writeString(_iter471->second); } xfer += oprot->writeMapEnd(); } @@ -11021,14 +11063,14 @@ if (ftype == ::apache::thrift::protocol::T_LIST) { { this->success.clear(); - uint32_t _size465; - ::apache::thrift::protocol::TType _etype468; - iprot->readListBegin(_etype468, _size465); - this->success.resize(_size465); - uint32_t _i469; - for (_i469 = 0; _i469 < _size465; ++_i469) + uint32_t _size472; + ::apache::thrift::protocol::TType _etype475; + iprot->readListBegin(_etype475, _size472); + this->success.resize(_size472); + uint32_t _i476; + for (_i476 = 0; _i476 < _size472; ++_i476) { - xfer += this->success[_i469].read(iprot); + xfer += this->success[_i476].read(iprot); } iprot->readListEnd(); } @@ -11075,10 +11117,10 @@ xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_LIST, 0); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, this->success.size()); - std::vector ::const_iterator _iter470; - for (_iter470 = this->success.begin(); _iter470 != this->success.end(); ++_iter470) + std::vector ::const_iterator _iter477; + for (_iter477 = this->success.begin(); _iter477 != this->success.end(); ++_iter477) { - xfer += (*_iter470).write(oprot); + xfer += (*_iter477).write(oprot); } xfer += oprot->writeListEnd(); } @@ -11121,14 +11163,14 @@ if (ftype == ::apache::thrift::protocol::T_LIST) { { (*(this->success)).clear(); - uint32_t _size471; - ::apache::thrift::protocol::TType _etype474; - iprot->readListBegin(_etype474, _size471); - (*(this->success)).resize(_size471); - uint32_t _i475; - for (_i475 = 0; _i475 < _size471; ++_i475) + uint32_t _size478; + ::apache::thrift::protocol::TType _etype481; + iprot->readListBegin(_etype481, _size478); + (*(this->success)).resize(_size478); + uint32_t _i482; + for (_i482 = 0; _i482 < _size478; ++_i482) { - xfer += (*(this->success))[_i475].read(iprot); + xfer += (*(this->success))[_i482].read(iprot); } iprot->readListEnd(); } @@ -11279,14 +11321,14 @@ if (ftype == ::apache::thrift::protocol::T_LIST) { { this->success.clear(); - uint32_t _size476; - ::apache::thrift::protocol::TType _etype479; - iprot->readListBegin(_etype479, _size476); - this->success.resize(_size476); - uint32_t _i480; - for (_i480 = 0; _i480 < _size476; ++_i480) + uint32_t _size483; + ::apache::thrift::protocol::TType _etype486; + iprot->readListBegin(_etype486, _size483); + this->success.resize(_size483); + uint32_t _i487; + for (_i487 = 0; _i487 < _size483; ++_i487) { - xfer += iprot->readString(this->success[_i480]); + xfer += iprot->readString(this->success[_i487]); } iprot->readListEnd(); } @@ -11325,10 +11367,10 @@ xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_LIST, 0); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, this->success.size()); - std::vector ::const_iterator _iter481; - for (_iter481 = this->success.begin(); _iter481 != this->success.end(); ++_iter481) + std::vector ::const_iterator _iter488; + for (_iter488 = this->success.begin(); _iter488 != this->success.end(); ++_iter488) { - xfer += oprot->writeString((*_iter481)); + xfer += oprot->writeString((*_iter488)); } xfer += oprot->writeListEnd(); } @@ -11367,14 +11409,14 @@ if (ftype == ::apache::thrift::protocol::T_LIST) { { (*(this->success)).clear(); - uint32_t _size482; - ::apache::thrift::protocol::TType _etype485; - iprot->readListBegin(_etype485, _size482); - (*(this->success)).resize(_size482); - uint32_t _i486; - for (_i486 = 0; _i486 < _size482; ++_i486) + uint32_t _size489; + ::apache::thrift::protocol::TType _etype492; + iprot->readListBegin(_etype492, _size489); + (*(this->success)).resize(_size489); + uint32_t _i493; + for (_i493 = 0; _i493 < _size489; ++_i493) { - xfer += iprot->readString((*(this->success))[_i486]); + xfer += iprot->readString((*(this->success))[_i493]); } iprot->readListEnd(); } @@ -11831,14 +11873,14 @@ if (ftype == ::apache::thrift::protocol::T_LIST) { { this->success.clear(); - uint32_t _size487; - ::apache::thrift::protocol::TType _etype490; - iprot->readListBegin(_etype490, _size487); - this->success.resize(_size487); - uint32_t _i491; - for (_i491 = 0; _i491 < _size487; ++_i491) + uint32_t _size494; + ::apache::thrift::protocol::TType _etype497; + iprot->readListBegin(_etype497, _size494); + this->success.resize(_size494); + uint32_t _i498; + for (_i498 = 0; _i498 < _size494; ++_i498) { - xfer += iprot->readString(this->success[_i491]); + xfer += iprot->readString(this->success[_i498]); } iprot->readListEnd(); } @@ -11877,10 +11919,10 @@ xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_LIST, 0); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, this->success.size()); - std::vector ::const_iterator _iter492; - for (_iter492 = this->success.begin(); _iter492 != this->success.end(); ++_iter492) + std::vector ::const_iterator _iter499; + for (_iter499 = this->success.begin(); _iter499 != this->success.end(); ++_iter499) { - xfer += oprot->writeString((*_iter492)); + xfer += oprot->writeString((*_iter499)); } xfer += oprot->writeListEnd(); } @@ -11919,14 +11961,14 @@ if (ftype == ::apache::thrift::protocol::T_LIST) { { (*(this->success)).clear(); - uint32_t _size493; - ::apache::thrift::protocol::TType _etype496; - iprot->readListBegin(_etype496, _size493); - (*(this->success)).resize(_size493); - uint32_t _i497; - for (_i497 = 0; _i497 < _size493; ++_i497) + uint32_t _size500; + ::apache::thrift::protocol::TType _etype503; + iprot->readListBegin(_etype503, _size500); + (*(this->success)).resize(_size500); + uint32_t _i504; + for (_i504 = 0; _i504 < _size500; ++_i504) { - xfer += iprot->readString((*(this->success))[_i497]); + xfer += iprot->readString((*(this->success))[_i504]); } iprot->readListEnd(); } @@ -11993,9 +12035,9 @@ break; case 3: if (ftype == ::apache::thrift::protocol::T_I32) { - int32_t ecast498; - xfer += iprot->readI32(ecast498); - this->principal_type = (PrincipalType::type)ecast498; + int32_t ecast505; + xfer += iprot->readI32(ecast505); + this->principal_type = (PrincipalType::type)ecast505; this->__isset.principal_type = true; } else { xfer += iprot->skip(ftype); @@ -12011,9 +12053,9 @@ break; case 5: if (ftype == ::apache::thrift::protocol::T_I32) { - int32_t ecast499; - xfer += iprot->readI32(ecast499); - this->grantorType = (PrincipalType::type)ecast499; + int32_t ecast506; + xfer += iprot->readI32(ecast506); + this->grantorType = (PrincipalType::type)ecast506; this->__isset.grantorType = true; } else { xfer += iprot->skip(ftype); @@ -12245,9 +12287,9 @@ break; case 3: if (ftype == ::apache::thrift::protocol::T_I32) { - int32_t ecast500; - xfer += iprot->readI32(ecast500); - this->principal_type = (PrincipalType::type)ecast500; + int32_t ecast507; + xfer += iprot->readI32(ecast507); + this->principal_type = (PrincipalType::type)ecast507; this->__isset.principal_type = true; } else { xfer += iprot->skip(ftype); @@ -12445,9 +12487,9 @@ break; case 2: if (ftype == ::apache::thrift::protocol::T_I32) { - int32_t ecast501; - xfer += iprot->readI32(ecast501); - this->principal_type = (PrincipalType::type)ecast501; + int32_t ecast508; + xfer += iprot->readI32(ecast508); + this->principal_type = (PrincipalType::type)ecast508; this->__isset.principal_type = true; } else { xfer += iprot->skip(ftype); @@ -12517,14 +12559,14 @@ if (ftype == ::apache::thrift::protocol::T_LIST) { { this->success.clear(); - uint32_t _size502; - ::apache::thrift::protocol::TType _etype505; - iprot->readListBegin(_etype505, _size502); - this->success.resize(_size502); - uint32_t _i506; - for (_i506 = 0; _i506 < _size502; ++_i506) + uint32_t _size509; + ::apache::thrift::protocol::TType _etype512; + iprot->readListBegin(_etype512, _size509); + this->success.resize(_size509); + uint32_t _i513; + for (_i513 = 0; _i513 < _size509; ++_i513) { - xfer += this->success[_i506].read(iprot); + xfer += this->success[_i513].read(iprot); } iprot->readListEnd(); } @@ -12563,10 +12605,10 @@ xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_LIST, 0); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, this->success.size()); - std::vector ::const_iterator _iter507; - for (_iter507 = this->success.begin(); _iter507 != this->success.end(); ++_iter507) + std::vector ::const_iterator _iter514; + for (_iter514 = this->success.begin(); _iter514 != this->success.end(); ++_iter514) { - xfer += (*_iter507).write(oprot); + xfer += (*_iter514).write(oprot); } xfer += oprot->writeListEnd(); } @@ -12605,14 +12647,14 @@ if (ftype == ::apache::thrift::protocol::T_LIST) { { (*(this->success)).clear(); - uint32_t _size508; - ::apache::thrift::protocol::TType _etype511; - iprot->readListBegin(_etype511, _size508); - (*(this->success)).resize(_size508); - uint32_t _i512; - for (_i512 = 0; _i512 < _size508; ++_i512) + uint32_t _size515; + ::apache::thrift::protocol::TType _etype518; + iprot->readListBegin(_etype518, _size515); + (*(this->success)).resize(_size515); + uint32_t _i519; + for (_i519 = 0; _i519 < _size515; ++_i519) { - xfer += (*(this->success))[_i512].read(iprot); + xfer += (*(this->success))[_i519].read(iprot); } iprot->readListEnd(); } @@ -12681,14 +12723,14 @@ if (ftype == ::apache::thrift::protocol::T_LIST) { { this->group_names.clear(); - uint32_t _size513; - ::apache::thrift::protocol::TType _etype516; - iprot->readListBegin(_etype516, _size513); - this->group_names.resize(_size513); - uint32_t _i517; - for (_i517 = 0; _i517 < _size513; ++_i517) + uint32_t _size520; + ::apache::thrift::protocol::TType _etype523; + iprot->readListBegin(_etype523, _size520); + this->group_names.resize(_size520); + uint32_t _i524; + for (_i524 = 0; _i524 < _size520; ++_i524) { - xfer += iprot->readString(this->group_names[_i517]); + xfer += iprot->readString(this->group_names[_i524]); } iprot->readListEnd(); } @@ -12721,10 +12763,10 @@ xfer += oprot->writeFieldBegin("group_names", ::apache::thrift::protocol::T_LIST, 3); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, this->group_names.size()); - std::vector ::const_iterator _iter518; - for (_iter518 = this->group_names.begin(); _iter518 != this->group_names.end(); ++_iter518) + std::vector ::const_iterator _iter525; + for (_iter525 = this->group_names.begin(); _iter525 != this->group_names.end(); ++_iter525) { - xfer += oprot->writeString((*_iter518)); + xfer += oprot->writeString((*_iter525)); } xfer += oprot->writeListEnd(); } @@ -12746,10 +12788,10 @@ xfer += oprot->writeFieldBegin("group_names", ::apache::thrift::protocol::T_LIST, 3); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, (*(this->group_names)).size()); - std::vector ::const_iterator _iter519; - for (_iter519 = (*(this->group_names)).begin(); _iter519 != (*(this->group_names)).end(); ++_iter519) + std::vector ::const_iterator _iter526; + for (_iter526 = (*(this->group_names)).begin(); _iter526 != (*(this->group_names)).end(); ++_iter526) { - xfer += oprot->writeString((*_iter519)); + xfer += oprot->writeString((*_iter526)); } xfer += oprot->writeListEnd(); } @@ -12905,9 +12947,9 @@ break; case 2: if (ftype == ::apache::thrift::protocol::T_I32) { - int32_t ecast520; - xfer += iprot->readI32(ecast520); - this->principal_type = (PrincipalType::type)ecast520; + int32_t ecast527; + xfer += iprot->readI32(ecast527); + this->principal_type = (PrincipalType::type)ecast527; this->__isset.principal_type = true; } else { xfer += iprot->skip(ftype); @@ -12991,14 +13033,14 @@ if (ftype == ::apache::thrift::protocol::T_LIST) { { this->success.clear(); - uint32_t _size521; - ::apache::thrift::protocol::TType _etype524; - iprot->readListBegin(_etype524, _size521); - this->success.resize(_size521); - uint32_t _i525; - for (_i525 = 0; _i525 < _size521; ++_i525) + uint32_t _size528; + ::apache::thrift::protocol::TType _etype531; + iprot->readListBegin(_etype531, _size528); + this->success.resize(_size528); + uint32_t _i532; + for (_i532 = 0; _i532 < _size528; ++_i532) { - xfer += this->success[_i525].read(iprot); + xfer += this->success[_i532].read(iprot); } iprot->readListEnd(); } @@ -13037,10 +13079,10 @@ xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_LIST, 0); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, this->success.size()); - std::vector ::const_iterator _iter526; - for (_iter526 = this->success.begin(); _iter526 != this->success.end(); ++_iter526) + std::vector ::const_iterator _iter533; + for (_iter533 = this->success.begin(); _iter533 != this->success.end(); ++_iter533) { - xfer += (*_iter526).write(oprot); + xfer += (*_iter533).write(oprot); } xfer += oprot->writeListEnd(); } @@ -13079,14 +13121,14 @@ if (ftype == ::apache::thrift::protocol::T_LIST) { { (*(this->success)).clear(); - uint32_t _size527; - ::apache::thrift::protocol::TType _etype530; - iprot->readListBegin(_etype530, _size527); - (*(this->success)).resize(_size527); - uint32_t _i531; - for (_i531 = 0; _i531 < _size527; ++_i531) + uint32_t _size534; + ::apache::thrift::protocol::TType _etype537; + iprot->readListBegin(_etype537, _size534); + (*(this->success)).resize(_size534); + uint32_t _i538; + for (_i538 = 0; _i538 < _size534; ++_i538) { - xfer += (*(this->success))[_i531].read(iprot); + xfer += (*(this->success))[_i538].read(iprot); } iprot->readListEnd(); } @@ -16341,13 +16383,13 @@ throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "get_partitions_by_names failed: unknown result"); } -void ThriftHiveMetastoreClient::alter_partition(const std::string& db_name, const std::string& tbl_name, const Partition& new_part) +void ThriftHiveMetastoreClient::alter_partition(const std::string& db_name, const std::string& tbl_name, const std::vector & part_vals, const Partition& new_part) { - send_alter_partition(db_name, tbl_name, new_part); + send_alter_partition(db_name, tbl_name, part_vals, new_part); recv_alter_partition(); } -void ThriftHiveMetastoreClient::send_alter_partition(const std::string& db_name, const std::string& tbl_name, const Partition& new_part) +void ThriftHiveMetastoreClient::send_alter_partition(const std::string& db_name, const std::string& tbl_name, const std::vector & part_vals, const Partition& new_part) { int32_t cseqid = 0; oprot_->writeMessageBegin("alter_partition", ::apache::thrift::protocol::T_CALL, cseqid); @@ -16355,6 +16397,7 @@ ThriftHiveMetastore_alter_partition_pargs args; args.db_name = &db_name; args.tbl_name = &tbl_name; + args.part_vals = &part_vals; args.new_part = &new_part; args.write(oprot_); @@ -19219,7 +19262,7 @@ ThriftHiveMetastore_alter_partition_result result; try { - iface_->alter_partition(args.db_name, args.tbl_name, args.new_part); + iface_->alter_partition(args.db_name, args.tbl_name, args.part_vals, args.new_part); } catch (InvalidOperationException &o1) { result.o1 = o1; result.__isset.o1 = true; Index: metastore/src/gen/thrift/gen-cpp/ThriftHiveMetastore.h =================================================================== --- metastore/src/gen/thrift/gen-cpp/ThriftHiveMetastore.h (revision 1145366) +++ metastore/src/gen/thrift/gen-cpp/ThriftHiveMetastore.h (working copy) @@ -50,7 +50,7 @@ virtual void get_partition_names_ps(std::vector & _return, const std::string& db_name, const std::string& tbl_name, const std::vector & part_vals, const int16_t max_parts) = 0; virtual void get_partitions_by_filter(std::vector & _return, const std::string& db_name, const std::string& tbl_name, const std::string& filter, const int16_t max_parts) = 0; virtual void get_partitions_by_names(std::vector & _return, const std::string& db_name, const std::string& tbl_name, const std::vector & names) = 0; - virtual void alter_partition(const std::string& db_name, const std::string& tbl_name, const Partition& new_part) = 0; + virtual void alter_partition(const std::string& db_name, const std::string& tbl_name, const std::vector & part_vals, const Partition& new_part) = 0; virtual void get_config_value(std::string& _return, const std::string& name, const std::string& defaultValue) = 0; virtual void partition_name_to_vals(std::vector & _return, const std::string& part_name) = 0; virtual void partition_name_to_spec(std::map & _return, const std::string& part_name) = 0; @@ -189,7 +189,7 @@ void get_partitions_by_names(std::vector & /* _return */, const std::string& /* db_name */, const std::string& /* tbl_name */, const std::vector & /* names */) { return; } - void alter_partition(const std::string& /* db_name */, const std::string& /* tbl_name */, const Partition& /* new_part */) { + void alter_partition(const std::string& /* db_name */, const std::string& /* tbl_name */, const std::vector & /* part_vals */, const Partition& /* new_part */) { return; } void get_config_value(std::string& /* _return */, const std::string& /* name */, const std::string& /* defaultValue */) { @@ -4438,9 +4438,10 @@ }; typedef struct _ThriftHiveMetastore_alter_partition_args__isset { - _ThriftHiveMetastore_alter_partition_args__isset() : db_name(false), tbl_name(false), new_part(false) {} + _ThriftHiveMetastore_alter_partition_args__isset() : db_name(false), tbl_name(false), part_vals(false), new_part(false) {} bool db_name; bool tbl_name; + bool part_vals; bool new_part; } _ThriftHiveMetastore_alter_partition_args__isset; @@ -4454,6 +4455,7 @@ std::string db_name; std::string tbl_name; + std::vector part_vals; Partition new_part; _ThriftHiveMetastore_alter_partition_args__isset __isset; @@ -4464,6 +4466,8 @@ return false; if (!(tbl_name == rhs.tbl_name)) return false; + if (!(part_vals == rhs.part_vals)) + return false; if (!(new_part == rhs.new_part)) return false; return true; @@ -4488,6 +4492,7 @@ const std::string* db_name; const std::string* tbl_name; + const std::vector * part_vals; const Partition* new_part; uint32_t write(::apache::thrift::protocol::TProtocol* oprot) const; @@ -7447,8 +7452,8 @@ void get_partitions_by_names(std::vector & _return, const std::string& db_name, const std::string& tbl_name, const std::vector & names); void send_get_partitions_by_names(const std::string& db_name, const std::string& tbl_name, const std::vector & names); void recv_get_partitions_by_names(std::vector & _return); - void alter_partition(const std::string& db_name, const std::string& tbl_name, const Partition& new_part); - void send_alter_partition(const std::string& db_name, const std::string& tbl_name, const Partition& new_part); + void alter_partition(const std::string& db_name, const std::string& tbl_name, const std::vector & part_vals, const Partition& new_part); + void send_alter_partition(const std::string& db_name, const std::string& tbl_name, const std::vector & part_vals, const Partition& new_part); void recv_alter_partition(); void get_config_value(std::string& _return, const std::string& name, const std::string& defaultValue); void send_get_config_value(const std::string& name, const std::string& defaultValue); @@ -8063,10 +8068,10 @@ } } - void alter_partition(const std::string& db_name, const std::string& tbl_name, const Partition& new_part) { + void alter_partition(const std::string& db_name, const std::string& tbl_name, const std::vector & part_vals, const Partition& new_part) { uint32_t sz = ifaces_.size(); for (uint32_t i = 0; i < sz; ++i) { - ifaces_[i]->alter_partition(db_name, tbl_name, new_part); + ifaces_[i]->alter_partition(db_name, tbl_name, part_vals, new_part); } } Index: metastore/src/gen/thrift/gen-cpp/ThriftHiveMetastore_server.skeleton.cpp =================================================================== --- metastore/src/gen/thrift/gen-cpp/ThriftHiveMetastore_server.skeleton.cpp (revision 1145366) +++ metastore/src/gen/thrift/gen-cpp/ThriftHiveMetastore_server.skeleton.cpp (working copy) @@ -197,7 +197,7 @@ printf("get_partitions_by_names\n"); } - void alter_partition(const std::string& db_name, const std::string& tbl_name, const Partition& new_part) { + void alter_partition(const std::string& db_name, const std::string& tbl_name, const std::vector & part_vals, const Partition& new_part) { // Your implementation goes here printf("alter_partition\n"); } Index: metastore/src/gen/thrift/gen-rb/thrift_hive_metastore.rb =================================================================== --- metastore/src/gen/thrift/gen-rb/thrift_hive_metastore.rb (revision 1145366) +++ metastore/src/gen/thrift/gen-rb/thrift_hive_metastore.rb (working copy) @@ -604,13 +604,13 @@ raise ::Thrift::ApplicationException.new(::Thrift::ApplicationException::MISSING_RESULT, 'get_partitions_by_names failed: unknown result') end - def alter_partition(db_name, tbl_name, new_part) - send_alter_partition(db_name, tbl_name, new_part) + def alter_partition(db_name, tbl_name, part_vals, new_part) + send_alter_partition(db_name, tbl_name, part_vals, new_part) recv_alter_partition() end - def send_alter_partition(db_name, tbl_name, new_part) - send_message('alter_partition', Alter_partition_args, :db_name => db_name, :tbl_name => tbl_name, :new_part => new_part) + def send_alter_partition(db_name, tbl_name, part_vals, new_part) + send_message('alter_partition', Alter_partition_args, :db_name => db_name, :tbl_name => tbl_name, :part_vals => part_vals, :new_part => new_part) end def recv_alter_partition() @@ -1487,7 +1487,7 @@ args = read_args(iprot, Alter_partition_args) result = Alter_partition_result.new() begin - @handler.alter_partition(args.db_name, args.tbl_name, args.new_part) + @handler.alter_partition(args.db_name, args.tbl_name, args.part_vals, args.new_part) rescue InvalidOperationException => o1 result.o1 = o1 rescue MetaException => o2 @@ -3161,11 +3161,13 @@ include ::Thrift::Struct, ::Thrift::Struct_Union DB_NAME = 1 TBL_NAME = 2 - NEW_PART = 3 + PART_VALS = 3 + NEW_PART = 4 FIELDS = { DB_NAME => {:type => ::Thrift::Types::STRING, :name => 'db_name'}, TBL_NAME => {:type => ::Thrift::Types::STRING, :name => 'tbl_name'}, + PART_VALS => {:type => ::Thrift::Types::LIST, :name => 'part_vals', :element => {:type => ::Thrift::Types::STRING}}, NEW_PART => {:type => ::Thrift::Types::STRUCT, :name => 'new_part', :class => Partition} } Index: metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ThriftHiveMetastore.java =================================================================== --- metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ThriftHiveMetastore.java (revision 1145366) +++ metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ThriftHiveMetastore.java (working copy) @@ -103,7 +103,7 @@ public List get_partitions_by_names(String db_name, String tbl_name, List names) throws MetaException, NoSuchObjectException, TException; - public void alter_partition(String db_name, String tbl_name, Partition new_part) throws InvalidOperationException, MetaException, TException; + public void alter_partition(String db_name, String tbl_name, List part_vals, Partition new_part) throws InvalidOperationException, MetaException, TException; public String get_config_value(String name, String defaultValue) throws ConfigValSecurityException, TException; @@ -227,7 +227,7 @@ public void get_partitions_by_names(String db_name, String tbl_name, List names, AsyncMethodCallback resultHandler) throws TException; - public void alter_partition(String db_name, String tbl_name, Partition new_part, AsyncMethodCallback resultHandler) throws TException; + public void alter_partition(String db_name, String tbl_name, List part_vals, Partition new_part, AsyncMethodCallback resultHandler) throws TException; public void get_config_value(String name, String defaultValue, AsyncMethodCallback resultHandler) throws TException; @@ -1814,18 +1814,19 @@ throw new TApplicationException(TApplicationException.MISSING_RESULT, "get_partitions_by_names failed: unknown result"); } - public void alter_partition(String db_name, String tbl_name, Partition new_part) throws InvalidOperationException, MetaException, TException + public void alter_partition(String db_name, String tbl_name, List part_vals, Partition new_part) throws InvalidOperationException, MetaException, TException { - send_alter_partition(db_name, tbl_name, new_part); + send_alter_partition(db_name, tbl_name, part_vals, new_part); recv_alter_partition(); } - public void send_alter_partition(String db_name, String tbl_name, Partition new_part) throws TException + public void send_alter_partition(String db_name, String tbl_name, List part_vals, Partition new_part) throws TException { oprot_.writeMessageBegin(new TMessage("alter_partition", TMessageType.CALL, ++seqid_)); alter_partition_args args = new alter_partition_args(); args.setDb_name(db_name); args.setTbl_name(tbl_name); + args.setPart_vals(part_vals); args.setNew_part(new_part); args.write(oprot_); oprot_.writeMessageEnd(); @@ -4124,20 +4125,22 @@ } } - public void alter_partition(String db_name, String tbl_name, Partition new_part, AsyncMethodCallback resultHandler) throws TException { + public void alter_partition(String db_name, String tbl_name, List part_vals, Partition new_part, AsyncMethodCallback resultHandler) throws TException { checkReady(); - alter_partition_call method_call = new alter_partition_call(db_name, tbl_name, new_part, resultHandler, this, protocolFactory, transport); + alter_partition_call method_call = new alter_partition_call(db_name, tbl_name, part_vals, new_part, resultHandler, this, protocolFactory, transport); manager.call(method_call); } public static class alter_partition_call extends TAsyncMethodCall { private String db_name; private String tbl_name; + private List part_vals; private Partition new_part; - public alter_partition_call(String db_name, String tbl_name, Partition new_part, AsyncMethodCallback resultHandler, TAsyncClient client, TProtocolFactory protocolFactory, TNonblockingTransport transport) throws TException { + public alter_partition_call(String db_name, String tbl_name, List part_vals, Partition new_part, AsyncMethodCallback resultHandler, TAsyncClient client, TProtocolFactory protocolFactory, TNonblockingTransport transport) throws TException { super(client, protocolFactory, transport, resultHandler, false); this.db_name = db_name; this.tbl_name = tbl_name; + this.part_vals = part_vals; this.new_part = new_part; } @@ -4146,6 +4149,7 @@ alter_partition_args args = new alter_partition_args(); args.setDb_name(db_name); args.setTbl_name(tbl_name); + args.setPart_vals(part_vals); args.setNew_part(new_part); args.write(prot); prot.writeMessageEnd(); @@ -6519,7 +6523,7 @@ iprot.readMessageEnd(); alter_partition_result result = new alter_partition_result(); try { - iface_.alter_partition(args.db_name, args.tbl_name, args.new_part); + iface_.alter_partition(args.db_name, args.tbl_name, args.part_vals, args.new_part); } catch (InvalidOperationException o1) { result.o1 = o1; } catch (MetaException o2) { @@ -38542,17 +38546,20 @@ private static final TField DB_NAME_FIELD_DESC = new TField("db_name", TType.STRING, (short)1); private static final TField TBL_NAME_FIELD_DESC = new TField("tbl_name", TType.STRING, (short)2); - private static final TField NEW_PART_FIELD_DESC = new TField("new_part", TType.STRUCT, (short)3); + private static final TField PART_VALS_FIELD_DESC = new TField("part_vals", TType.LIST, (short)3); + private static final TField NEW_PART_FIELD_DESC = new TField("new_part", TType.STRUCT, (short)4); private String db_name; private String tbl_name; + private List part_vals; private Partition new_part; /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ public enum _Fields implements TFieldIdEnum { DB_NAME((short)1, "db_name"), TBL_NAME((short)2, "tbl_name"), - NEW_PART((short)3, "new_part"); + PART_VALS((short)3, "part_vals"), + NEW_PART((short)4, "new_part"); private static final Map byName = new HashMap(); @@ -38571,7 +38578,9 @@ return DB_NAME; case 2: // TBL_NAME return TBL_NAME; - case 3: // NEW_PART + case 3: // PART_VALS + return PART_VALS; + case 4: // NEW_PART return NEW_PART; default: return null; @@ -38621,6 +38630,9 @@ new FieldValueMetaData(TType.STRING))); tmpMap.put(_Fields.TBL_NAME, new FieldMetaData("tbl_name", TFieldRequirementType.DEFAULT, new FieldValueMetaData(TType.STRING))); + tmpMap.put(_Fields.PART_VALS, new FieldMetaData("part_vals", TFieldRequirementType.DEFAULT, + new ListMetaData(TType.LIST, + new FieldValueMetaData(TType.STRING)))); tmpMap.put(_Fields.NEW_PART, new FieldMetaData("new_part", TFieldRequirementType.DEFAULT, new StructMetaData(TType.STRUCT, Partition.class))); metaDataMap = Collections.unmodifiableMap(tmpMap); @@ -38633,11 +38645,13 @@ public alter_partition_args( String db_name, String tbl_name, + List part_vals, Partition new_part) { this(); this.db_name = db_name; this.tbl_name = tbl_name; + this.part_vals = part_vals; this.new_part = new_part; } @@ -38651,6 +38665,13 @@ if (other.isSetTbl_name()) { this.tbl_name = other.tbl_name; } + if (other.isSetPart_vals()) { + List __this__part_vals = new ArrayList(); + for (String other_element : other.part_vals) { + __this__part_vals.add(other_element); + } + this.part_vals = __this__part_vals; + } if (other.isSetNew_part()) { this.new_part = new Partition(other.new_part); } @@ -38664,6 +38685,7 @@ public void clear() { this.db_name = null; this.tbl_name = null; + this.part_vals = null; this.new_part = null; } @@ -38713,6 +38735,44 @@ } } + public int getPart_valsSize() { + return (this.part_vals == null) ? 0 : this.part_vals.size(); + } + + public java.util.Iterator getPart_valsIterator() { + return (this.part_vals == null) ? null : this.part_vals.iterator(); + } + + public void addToPart_vals(String elem) { + if (this.part_vals == null) { + this.part_vals = new ArrayList(); + } + this.part_vals.add(elem); + } + + public List getPart_vals() { + return this.part_vals; + } + + public void setPart_vals(List part_vals) { + this.part_vals = part_vals; + } + + public void unsetPart_vals() { + this.part_vals = null; + } + + /** Returns true if field part_vals is set (has been asigned a value) and false otherwise */ + public boolean isSetPart_vals() { + return this.part_vals != null; + } + + public void setPart_valsIsSet(boolean value) { + if (!value) { + this.part_vals = null; + } + } + public Partition getNew_part() { return this.new_part; } @@ -38754,6 +38814,14 @@ } break; + case PART_VALS: + if (value == null) { + unsetPart_vals(); + } else { + setPart_vals((List)value); + } + break; + case NEW_PART: if (value == null) { unsetNew_part(); @@ -38773,6 +38841,9 @@ case TBL_NAME: return getTbl_name(); + case PART_VALS: + return getPart_vals(); + case NEW_PART: return getNew_part(); @@ -38791,6 +38862,8 @@ return isSetDb_name(); case TBL_NAME: return isSetTbl_name(); + case PART_VALS: + return isSetPart_vals(); case NEW_PART: return isSetNew_part(); } @@ -38828,6 +38901,15 @@ return false; } + boolean this_present_part_vals = true && this.isSetPart_vals(); + boolean that_present_part_vals = true && that.isSetPart_vals(); + if (this_present_part_vals || that_present_part_vals) { + if (!(this_present_part_vals && that_present_part_vals)) + return false; + if (!this.part_vals.equals(that.part_vals)) + return false; + } + boolean this_present_new_part = true && this.isSetNew_part(); boolean that_present_new_part = true && that.isSetNew_part(); if (this_present_new_part || that_present_new_part) { @@ -38873,6 +38955,16 @@ return lastComparison; } } + lastComparison = Boolean.valueOf(isSetPart_vals()).compareTo(typedOther.isSetPart_vals()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetPart_vals()) { + lastComparison = TBaseHelper.compareTo(this.part_vals, typedOther.part_vals); + if (lastComparison != 0) { + return lastComparison; + } + } lastComparison = Boolean.valueOf(isSetNew_part()).compareTo(typedOther.isSetNew_part()); if (lastComparison != 0) { return lastComparison; @@ -38914,7 +39006,24 @@ TProtocolUtil.skip(iprot, field.type); } break; - case 3: // NEW_PART + case 3: // PART_VALS + if (field.type == TType.LIST) { + { + TList _list211 = iprot.readListBegin(); + this.part_vals = new ArrayList(_list211.size); + for (int _i212 = 0; _i212 < _list211.size; ++_i212) + { + String _elem213; + _elem213 = iprot.readString(); + this.part_vals.add(_elem213); + } + iprot.readListEnd(); + } + } else { + TProtocolUtil.skip(iprot, field.type); + } + break; + case 4: // NEW_PART if (field.type == TType.STRUCT) { this.new_part = new Partition(); this.new_part.read(iprot); @@ -38945,6 +39054,18 @@ oprot.writeString(this.tbl_name); oprot.writeFieldEnd(); } + if (this.part_vals != null) { + oprot.writeFieldBegin(PART_VALS_FIELD_DESC); + { + oprot.writeListBegin(new TList(TType.STRING, this.part_vals.size())); + for (String _iter214 : this.part_vals) + { + oprot.writeString(_iter214); + } + oprot.writeListEnd(); + } + oprot.writeFieldEnd(); + } if (this.new_part != null) { oprot.writeFieldBegin(NEW_PART_FIELD_DESC); this.new_part.write(oprot); @@ -38975,6 +39096,14 @@ } first = false; if (!first) sb.append(", "); + sb.append("part_vals:"); + if (this.part_vals == null) { + sb.append("null"); + } else { + sb.append(this.part_vals); + } + first = false; + if (!first) sb.append(", "); sb.append("new_part:"); if (this.new_part == null) { sb.append("null"); @@ -40685,13 +40814,13 @@ case 0: // SUCCESS if (field.type == TType.LIST) { { - TList _list211 = iprot.readListBegin(); - this.success = new ArrayList(_list211.size); - for (int _i212 = 0; _i212 < _list211.size; ++_i212) + TList _list215 = iprot.readListBegin(); + this.success = new ArrayList(_list215.size); + for (int _i216 = 0; _i216 < _list215.size; ++_i216) { - String _elem213; - _elem213 = iprot.readString(); - this.success.add(_elem213); + String _elem217; + _elem217 = iprot.readString(); + this.success.add(_elem217); } iprot.readListEnd(); } @@ -40723,9 +40852,9 @@ oprot.writeFieldBegin(SUCCESS_FIELD_DESC); { oprot.writeListBegin(new TList(TType.STRING, this.success.size())); - for (String _iter214 : this.success) + for (String _iter218 : this.success) { - oprot.writeString(_iter214); + oprot.writeString(_iter218); } oprot.writeListEnd(); } @@ -41370,15 +41499,15 @@ case 0: // SUCCESS if (field.type == TType.MAP) { { - TMap _map215 = iprot.readMapBegin(); - this.success = new HashMap(2*_map215.size); - for (int _i216 = 0; _i216 < _map215.size; ++_i216) + TMap _map219 = iprot.readMapBegin(); + this.success = new HashMap(2*_map219.size); + for (int _i220 = 0; _i220 < _map219.size; ++_i220) { - String _key217; - String _val218; - _key217 = iprot.readString(); - _val218 = iprot.readString(); - this.success.put(_key217, _val218); + String _key221; + String _val222; + _key221 = iprot.readString(); + _val222 = iprot.readString(); + this.success.put(_key221, _val222); } iprot.readMapEnd(); } @@ -41410,10 +41539,10 @@ oprot.writeFieldBegin(SUCCESS_FIELD_DESC); { oprot.writeMapBegin(new TMap(TType.STRING, TType.STRING, this.success.size())); - for (Map.Entry _iter219 : this.success.entrySet()) + for (Map.Entry _iter223 : this.success.entrySet()) { - oprot.writeString(_iter219.getKey()); - oprot.writeString(_iter219.getValue()); + oprot.writeString(_iter223.getKey()); + oprot.writeString(_iter223.getValue()); } oprot.writeMapEnd(); } @@ -41942,15 +42071,15 @@ case 3: // PART_VALS if (field.type == TType.MAP) { { - TMap _map220 = iprot.readMapBegin(); - this.part_vals = new HashMap(2*_map220.size); - for (int _i221 = 0; _i221 < _map220.size; ++_i221) + TMap _map224 = iprot.readMapBegin(); + this.part_vals = new HashMap(2*_map224.size); + for (int _i225 = 0; _i225 < _map224.size; ++_i225) { - String _key222; - String _val223; - _key222 = iprot.readString(); - _val223 = iprot.readString(); - this.part_vals.put(_key222, _val223); + String _key226; + String _val227; + _key226 = iprot.readString(); + _val227 = iprot.readString(); + this.part_vals.put(_key226, _val227); } iprot.readMapEnd(); } @@ -41992,10 +42121,10 @@ oprot.writeFieldBegin(PART_VALS_FIELD_DESC); { oprot.writeMapBegin(new TMap(TType.STRING, TType.STRING, this.part_vals.size())); - for (Map.Entry _iter224 : this.part_vals.entrySet()) + for (Map.Entry _iter228 : this.part_vals.entrySet()) { - oprot.writeString(_iter224.getKey()); - oprot.writeString(_iter224.getValue()); + oprot.writeString(_iter228.getKey()); + oprot.writeString(_iter228.getValue()); } oprot.writeMapEnd(); } @@ -43259,15 +43388,15 @@ case 3: // PART_VALS if (field.type == TType.MAP) { { - TMap _map225 = iprot.readMapBegin(); - this.part_vals = new HashMap(2*_map225.size); - for (int _i226 = 0; _i226 < _map225.size; ++_i226) + TMap _map229 = iprot.readMapBegin(); + this.part_vals = new HashMap(2*_map229.size); + for (int _i230 = 0; _i230 < _map229.size; ++_i230) { - String _key227; - String _val228; - _key227 = iprot.readString(); - _val228 = iprot.readString(); - this.part_vals.put(_key227, _val228); + String _key231; + String _val232; + _key231 = iprot.readString(); + _val232 = iprot.readString(); + this.part_vals.put(_key231, _val232); } iprot.readMapEnd(); } @@ -43309,10 +43438,10 @@ oprot.writeFieldBegin(PART_VALS_FIELD_DESC); { oprot.writeMapBegin(new TMap(TType.STRING, TType.STRING, this.part_vals.size())); - for (Map.Entry _iter229 : this.part_vals.entrySet()) + for (Map.Entry _iter233 : this.part_vals.entrySet()) { - oprot.writeString(_iter229.getKey()); - oprot.writeString(_iter229.getValue()); + oprot.writeString(_iter233.getKey()); + oprot.writeString(_iter233.getValue()); } oprot.writeMapEnd(); } @@ -48738,14 +48867,14 @@ case 0: // SUCCESS if (field.type == TType.LIST) { { - TList _list230 = iprot.readListBegin(); - this.success = new ArrayList(_list230.size); - for (int _i231 = 0; _i231 < _list230.size; ++_i231) + TList _list234 = iprot.readListBegin(); + this.success = new ArrayList(_list234.size); + for (int _i235 = 0; _i235 < _list234.size; ++_i235) { - Index _elem232; - _elem232 = new Index(); - _elem232.read(iprot); - this.success.add(_elem232); + Index _elem236; + _elem236 = new Index(); + _elem236.read(iprot); + this.success.add(_elem236); } iprot.readListEnd(); } @@ -48785,9 +48914,9 @@ oprot.writeFieldBegin(SUCCESS_FIELD_DESC); { oprot.writeListBegin(new TList(TType.STRUCT, this.success.size())); - for (Index _iter233 : this.success) + for (Index _iter237 : this.success) { - _iter233.write(oprot); + _iter237.write(oprot); } oprot.writeListEnd(); } @@ -49615,13 +49744,13 @@ case 0: // SUCCESS if (field.type == TType.LIST) { { - TList _list234 = iprot.readListBegin(); - this.success = new ArrayList(_list234.size); - for (int _i235 = 0; _i235 < _list234.size; ++_i235) + TList _list238 = iprot.readListBegin(); + this.success = new ArrayList(_list238.size); + for (int _i239 = 0; _i239 < _list238.size; ++_i239) { - String _elem236; - _elem236 = iprot.readString(); - this.success.add(_elem236); + String _elem240; + _elem240 = iprot.readString(); + this.success.add(_elem240); } iprot.readListEnd(); } @@ -49653,9 +49782,9 @@ oprot.writeFieldBegin(SUCCESS_FIELD_DESC); { oprot.writeListBegin(new TList(TType.STRING, this.success.size())); - for (String _iter237 : this.success) + for (String _iter241 : this.success) { - oprot.writeString(_iter237); + oprot.writeString(_iter241); } oprot.writeListEnd(); } @@ -51488,13 +51617,13 @@ case 0: // SUCCESS if (field.type == TType.LIST) { { - TList _list238 = iprot.readListBegin(); - this.success = new ArrayList(_list238.size); - for (int _i239 = 0; _i239 < _list238.size; ++_i239) + TList _list242 = iprot.readListBegin(); + this.success = new ArrayList(_list242.size); + for (int _i243 = 0; _i243 < _list242.size; ++_i243) { - String _elem240; - _elem240 = iprot.readString(); - this.success.add(_elem240); + String _elem244; + _elem244 = iprot.readString(); + this.success.add(_elem244); } iprot.readListEnd(); } @@ -51526,9 +51655,9 @@ oprot.writeFieldBegin(SUCCESS_FIELD_DESC); { oprot.writeListBegin(new TList(TType.STRING, this.success.size())); - for (String _iter241 : this.success) + for (String _iter245 : this.success) { - oprot.writeString(_iter241); + oprot.writeString(_iter245); } oprot.writeListEnd(); } @@ -54204,14 +54333,14 @@ case 0: // SUCCESS if (field.type == TType.LIST) { { - TList _list242 = iprot.readListBegin(); - this.success = new ArrayList(_list242.size); - for (int _i243 = 0; _i243 < _list242.size; ++_i243) + TList _list246 = iprot.readListBegin(); + this.success = new ArrayList(_list246.size); + for (int _i247 = 0; _i247 < _list246.size; ++_i247) { - Role _elem244; - _elem244 = new Role(); - _elem244.read(iprot); - this.success.add(_elem244); + Role _elem248; + _elem248 = new Role(); + _elem248.read(iprot); + this.success.add(_elem248); } iprot.readListEnd(); } @@ -54243,9 +54372,9 @@ oprot.writeFieldBegin(SUCCESS_FIELD_DESC); { oprot.writeListBegin(new TList(TType.STRUCT, this.success.size())); - for (Role _iter245 : this.success) + for (Role _iter249 : this.success) { - _iter245.write(oprot); + _iter249.write(oprot); } oprot.writeListEnd(); } @@ -54690,13 +54819,13 @@ case 3: // GROUP_NAMES if (field.type == TType.LIST) { { - TList _list246 = iprot.readListBegin(); - this.group_names = new ArrayList(_list246.size); - for (int _i247 = 0; _i247 < _list246.size; ++_i247) + TList _list250 = iprot.readListBegin(); + this.group_names = new ArrayList(_list250.size); + for (int _i251 = 0; _i251 < _list250.size; ++_i251) { - String _elem248; - _elem248 = iprot.readString(); - this.group_names.add(_elem248); + String _elem252; + _elem252 = iprot.readString(); + this.group_names.add(_elem252); } iprot.readListEnd(); } @@ -54731,9 +54860,9 @@ oprot.writeFieldBegin(GROUP_NAMES_FIELD_DESC); { oprot.writeListBegin(new TList(TType.STRING, this.group_names.size())); - for (String _iter249 : this.group_names) + for (String _iter253 : this.group_names) { - oprot.writeString(_iter249); + oprot.writeString(_iter253); } oprot.writeListEnd(); } @@ -55932,14 +56061,14 @@ case 0: // SUCCESS if (field.type == TType.LIST) { { - TList _list250 = iprot.readListBegin(); - this.success = new ArrayList(_list250.size); - for (int _i251 = 0; _i251 < _list250.size; ++_i251) + TList _list254 = iprot.readListBegin(); + this.success = new ArrayList(_list254.size); + for (int _i255 = 0; _i255 < _list254.size; ++_i255) { - HiveObjectPrivilege _elem252; - _elem252 = new HiveObjectPrivilege(); - _elem252.read(iprot); - this.success.add(_elem252); + HiveObjectPrivilege _elem256; + _elem256 = new HiveObjectPrivilege(); + _elem256.read(iprot); + this.success.add(_elem256); } iprot.readListEnd(); } @@ -55971,9 +56100,9 @@ oprot.writeFieldBegin(SUCCESS_FIELD_DESC); { oprot.writeListBegin(new TList(TType.STRUCT, this.success.size())); - for (HiveObjectPrivilege _iter253 : this.success) + for (HiveObjectPrivilege _iter257 : this.success) { - _iter253.write(oprot); + _iter257.write(oprot); } oprot.writeListEnd(); } Index: metastore/src/gen/thrift/gen-php/hive_metastore/ThriftHiveMetastore.php =================================================================== --- metastore/src/gen/thrift/gen-php/hive_metastore/ThriftHiveMetastore.php (revision 1145366) +++ metastore/src/gen/thrift/gen-php/hive_metastore/ThriftHiveMetastore.php (working copy) @@ -45,7 +45,7 @@ public function get_partition_names_ps($db_name, $tbl_name, $part_vals, $max_parts); public function get_partitions_by_filter($db_name, $tbl_name, $filter, $max_parts); public function get_partitions_by_names($db_name, $tbl_name, $names); - public function alter_partition($db_name, $tbl_name, $new_part); + public function alter_partition($db_name, $tbl_name, $part_vals, $new_part); public function get_config_value($name, $defaultValue); public function partition_name_to_vals($part_name); public function partition_name_to_spec($part_name); @@ -2116,17 +2116,18 @@ throw new Exception("get_partitions_by_names failed: unknown result"); } - public function alter_partition($db_name, $tbl_name, $new_part) + public function alter_partition($db_name, $tbl_name, $part_vals, $new_part) { - $this->send_alter_partition($db_name, $tbl_name, $new_part); + $this->send_alter_partition($db_name, $tbl_name, $part_vals, $new_part); $this->recv_alter_partition(); } - public function send_alter_partition($db_name, $tbl_name, $new_part) + public function send_alter_partition($db_name, $tbl_name, $part_vals, $new_part) { $args = new metastore_ThriftHiveMetastore_alter_partition_args(); $args->db_name = $db_name; $args->tbl_name = $tbl_name; + $args->part_vals = $part_vals; $args->new_part = $new_part; $bin_accel = ($this->output_ instanceof TProtocol::$TBINARYPROTOCOLACCELERATED) && function_exists('thrift_protocol_write_binary'); if ($bin_accel) @@ -11963,6 +11964,7 @@ public $db_name = null; public $tbl_name = null; + public $part_vals = null; public $new_part = null; public function __construct($vals=null) { @@ -11977,6 +11979,14 @@ 'type' => TType::STRING, ), 3 => array( + 'var' => 'part_vals', + 'type' => TType::LST, + 'etype' => TType::STRING, + 'elem' => array( + 'type' => TType::STRING, + ), + ), + 4 => array( 'var' => 'new_part', 'type' => TType::STRUCT, 'class' => 'metastore_Partition', @@ -11990,6 +12000,9 @@ if (isset($vals['tbl_name'])) { $this->tbl_name = $vals['tbl_name']; } + if (isset($vals['part_vals'])) { + $this->part_vals = $vals['part_vals']; + } if (isset($vals['new_part'])) { $this->new_part = $vals['new_part']; } @@ -12030,6 +12043,23 @@ } break; case 3: + if ($ftype == TType::LST) { + $this->part_vals = array(); + $_size372 = 0; + $_etype375 = 0; + $xfer += $input->readListBegin($_etype375, $_size372); + for ($_i376 = 0; $_i376 < $_size372; ++$_i376) + { + $elem377 = null; + $xfer += $input->readString($elem377); + $this->part_vals []= $elem377; + } + $xfer += $input->readListEnd(); + } else { + $xfer += $input->skip($ftype); + } + break; + case 4: if ($ftype == TType::STRUCT) { $this->new_part = new metastore_Partition(); $xfer += $this->new_part->read($input); @@ -12060,11 +12090,28 @@ $xfer += $output->writeString($this->tbl_name); $xfer += $output->writeFieldEnd(); } + if ($this->part_vals !== null) { + if (!is_array($this->part_vals)) { + throw new TProtocolException('Bad type in structure.', TProtocolException::INVALID_DATA); + } + $xfer += $output->writeFieldBegin('part_vals', TType::LST, 3); + { + $output->writeListBegin(TType::STRING, count($this->part_vals)); + { + foreach ($this->part_vals as $iter378) + { + $xfer += $output->writeString($iter378); + } + } + $output->writeListEnd(); + } + $xfer += $output->writeFieldEnd(); + } if ($this->new_part !== null) { if (!is_object($this->new_part)) { throw new TProtocolException('Bad type in structure.', TProtocolException::INVALID_DATA); } - $xfer += $output->writeFieldBegin('new_part', TType::STRUCT, 3); + $xfer += $output->writeFieldBegin('new_part', TType::STRUCT, 4); $xfer += $this->new_part->write($output); $xfer += $output->writeFieldEnd(); } @@ -12485,14 +12532,14 @@ case 0: if ($ftype == TType::LST) { $this->success = array(); - $_size372 = 0; - $_etype375 = 0; - $xfer += $input->readListBegin($_etype375, $_size372); - for ($_i376 = 0; $_i376 < $_size372; ++$_i376) + $_size379 = 0; + $_etype382 = 0; + $xfer += $input->readListBegin($_etype382, $_size379); + for ($_i383 = 0; $_i383 < $_size379; ++$_i383) { - $elem377 = null; - $xfer += $input->readString($elem377); - $this->success []= $elem377; + $elem384 = null; + $xfer += $input->readString($elem384); + $this->success []= $elem384; } $xfer += $input->readListEnd(); } else { @@ -12528,9 +12575,9 @@ { $output->writeListBegin(TType::STRING, count($this->success)); { - foreach ($this->success as $iter378) + foreach ($this->success as $iter385) { - $xfer += $output->writeString($iter378); + $xfer += $output->writeString($iter385); } } $output->writeListEnd(); @@ -12681,17 +12728,17 @@ case 0: if ($ftype == TType::MAP) { $this->success = array(); - $_size379 = 0; - $_ktype380 = 0; - $_vtype381 = 0; - $xfer += $input->readMapBegin($_ktype380, $_vtype381, $_size379); - for ($_i383 = 0; $_i383 < $_size379; ++$_i383) + $_size386 = 0; + $_ktype387 = 0; + $_vtype388 = 0; + $xfer += $input->readMapBegin($_ktype387, $_vtype388, $_size386); + for ($_i390 = 0; $_i390 < $_size386; ++$_i390) { - $key384 = ''; - $val385 = ''; - $xfer += $input->readString($key384); - $xfer += $input->readString($val385); - $this->success[$key384] = $val385; + $key391 = ''; + $val392 = ''; + $xfer += $input->readString($key391); + $xfer += $input->readString($val392); + $this->success[$key391] = $val392; } $xfer += $input->readMapEnd(); } else { @@ -12727,10 +12774,10 @@ { $output->writeMapBegin(TType::STRING, TType::STRING, count($this->success)); { - foreach ($this->success as $kiter386 => $viter387) + foreach ($this->success as $kiter393 => $viter394) { - $xfer += $output->writeString($kiter386); - $xfer += $output->writeString($viter387); + $xfer += $output->writeString($kiter393); + $xfer += $output->writeString($viter394); } } $output->writeMapEnd(); @@ -12838,17 +12885,17 @@ case 3: if ($ftype == TType::MAP) { $this->part_vals = array(); - $_size388 = 0; - $_ktype389 = 0; - $_vtype390 = 0; - $xfer += $input->readMapBegin($_ktype389, $_vtype390, $_size388); - for ($_i392 = 0; $_i392 < $_size388; ++$_i392) + $_size395 = 0; + $_ktype396 = 0; + $_vtype397 = 0; + $xfer += $input->readMapBegin($_ktype396, $_vtype397, $_size395); + for ($_i399 = 0; $_i399 < $_size395; ++$_i399) { - $key393 = ''; - $val394 = ''; - $xfer += $input->readString($key393); - $xfer += $input->readString($val394); - $this->part_vals[$key393] = $val394; + $key400 = ''; + $val401 = ''; + $xfer += $input->readString($key400); + $xfer += $input->readString($val401); + $this->part_vals[$key400] = $val401; } $xfer += $input->readMapEnd(); } else { @@ -12893,10 +12940,10 @@ { $output->writeMapBegin(TType::STRING, TType::STRING, count($this->part_vals)); { - foreach ($this->part_vals as $kiter395 => $viter396) + foreach ($this->part_vals as $kiter402 => $viter403) { - $xfer += $output->writeString($kiter395); - $xfer += $output->writeString($viter396); + $xfer += $output->writeString($kiter402); + $xfer += $output->writeString($viter403); } } $output->writeMapEnd(); @@ -13188,17 +13235,17 @@ case 3: if ($ftype == TType::MAP) { $this->part_vals = array(); - $_size397 = 0; - $_ktype398 = 0; - $_vtype399 = 0; - $xfer += $input->readMapBegin($_ktype398, $_vtype399, $_size397); - for ($_i401 = 0; $_i401 < $_size397; ++$_i401) + $_size404 = 0; + $_ktype405 = 0; + $_vtype406 = 0; + $xfer += $input->readMapBegin($_ktype405, $_vtype406, $_size404); + for ($_i408 = 0; $_i408 < $_size404; ++$_i408) { - $key402 = ''; - $val403 = ''; - $xfer += $input->readString($key402); - $xfer += $input->readString($val403); - $this->part_vals[$key402] = $val403; + $key409 = ''; + $val410 = ''; + $xfer += $input->readString($key409); + $xfer += $input->readString($val410); + $this->part_vals[$key409] = $val410; } $xfer += $input->readMapEnd(); } else { @@ -13243,10 +13290,10 @@ { $output->writeMapBegin(TType::STRING, TType::STRING, count($this->part_vals)); { - foreach ($this->part_vals as $kiter404 => $viter405) + foreach ($this->part_vals as $kiter411 => $viter412) { - $xfer += $output->writeString($kiter404); - $xfer += $output->writeString($viter405); + $xfer += $output->writeString($kiter411); + $xfer += $output->writeString($viter412); } } $output->writeMapEnd(); @@ -14606,15 +14653,15 @@ case 0: if ($ftype == TType::LST) { $this->success = array(); - $_size406 = 0; - $_etype409 = 0; - $xfer += $input->readListBegin($_etype409, $_size406); - for ($_i410 = 0; $_i410 < $_size406; ++$_i410) + $_size413 = 0; + $_etype416 = 0; + $xfer += $input->readListBegin($_etype416, $_size413); + for ($_i417 = 0; $_i417 < $_size413; ++$_i417) { - $elem411 = null; - $elem411 = new metastore_Index(); - $xfer += $elem411->read($input); - $this->success []= $elem411; + $elem418 = null; + $elem418 = new metastore_Index(); + $xfer += $elem418->read($input); + $this->success []= $elem418; } $xfer += $input->readListEnd(); } else { @@ -14658,9 +14705,9 @@ { $output->writeListBegin(TType::STRUCT, count($this->success)); { - foreach ($this->success as $iter412) + foreach ($this->success as $iter419) { - $xfer += $iter412->write($output); + $xfer += $iter419->write($output); } } $output->writeListEnd(); @@ -14852,14 +14899,14 @@ case 0: if ($ftype == TType::LST) { $this->success = array(); - $_size413 = 0; - $_etype416 = 0; - $xfer += $input->readListBegin($_etype416, $_size413); - for ($_i417 = 0; $_i417 < $_size413; ++$_i417) + $_size420 = 0; + $_etype423 = 0; + $xfer += $input->readListBegin($_etype423, $_size420); + for ($_i424 = 0; $_i424 < $_size420; ++$_i424) { - $elem418 = null; - $xfer += $input->readString($elem418); - $this->success []= $elem418; + $elem425 = null; + $xfer += $input->readString($elem425); + $this->success []= $elem425; } $xfer += $input->readListEnd(); } else { @@ -14895,9 +14942,9 @@ { $output->writeListBegin(TType::STRING, count($this->success)); { - foreach ($this->success as $iter419) + foreach ($this->success as $iter426) { - $xfer += $output->writeString($iter419); + $xfer += $output->writeString($iter426); } } $output->writeListEnd(); @@ -15359,14 +15406,14 @@ case 0: if ($ftype == TType::LST) { $this->success = array(); - $_size420 = 0; - $_etype423 = 0; - $xfer += $input->readListBegin($_etype423, $_size420); - for ($_i424 = 0; $_i424 < $_size420; ++$_i424) + $_size427 = 0; + $_etype430 = 0; + $xfer += $input->readListBegin($_etype430, $_size427); + for ($_i431 = 0; $_i431 < $_size427; ++$_i431) { - $elem425 = null; - $xfer += $input->readString($elem425); - $this->success []= $elem425; + $elem432 = null; + $xfer += $input->readString($elem432); + $this->success []= $elem432; } $xfer += $input->readListEnd(); } else { @@ -15402,9 +15449,9 @@ { $output->writeListBegin(TType::STRING, count($this->success)); { - foreach ($this->success as $iter426) + foreach ($this->success as $iter433) { - $xfer += $output->writeString($iter426); + $xfer += $output->writeString($iter433); } } $output->writeListEnd(); @@ -16044,15 +16091,15 @@ case 0: if ($ftype == TType::LST) { $this->success = array(); - $_size427 = 0; - $_etype430 = 0; - $xfer += $input->readListBegin($_etype430, $_size427); - for ($_i431 = 0; $_i431 < $_size427; ++$_i431) + $_size434 = 0; + $_etype437 = 0; + $xfer += $input->readListBegin($_etype437, $_size434); + for ($_i438 = 0; $_i438 < $_size434; ++$_i438) { - $elem432 = null; - $elem432 = new metastore_Role(); - $xfer += $elem432->read($input); - $this->success []= $elem432; + $elem439 = null; + $elem439 = new metastore_Role(); + $xfer += $elem439->read($input); + $this->success []= $elem439; } $xfer += $input->readListEnd(); } else { @@ -16088,9 +16135,9 @@ { $output->writeListBegin(TType::STRUCT, count($this->success)); { - foreach ($this->success as $iter433) + foreach ($this->success as $iter440) { - $xfer += $iter433->write($output); + $xfer += $iter440->write($output); } } $output->writeListEnd(); @@ -16188,14 +16235,14 @@ case 3: if ($ftype == TType::LST) { $this->group_names = array(); - $_size434 = 0; - $_etype437 = 0; - $xfer += $input->readListBegin($_etype437, $_size434); - for ($_i438 = 0; $_i438 < $_size434; ++$_i438) + $_size441 = 0; + $_etype444 = 0; + $xfer += $input->readListBegin($_etype444, $_size441); + for ($_i445 = 0; $_i445 < $_size441; ++$_i445) { - $elem439 = null; - $xfer += $input->readString($elem439); - $this->group_names []= $elem439; + $elem446 = null; + $xfer += $input->readString($elem446); + $this->group_names []= $elem446; } $xfer += $input->readListEnd(); } else { @@ -16236,9 +16283,9 @@ { $output->writeListBegin(TType::STRING, count($this->group_names)); { - foreach ($this->group_names as $iter440) + foreach ($this->group_names as $iter447) { - $xfer += $output->writeString($iter440); + $xfer += $output->writeString($iter447); } } $output->writeListEnd(); @@ -16525,15 +16572,15 @@ case 0: if ($ftype == TType::LST) { $this->success = array(); - $_size441 = 0; - $_etype444 = 0; - $xfer += $input->readListBegin($_etype444, $_size441); - for ($_i445 = 0; $_i445 < $_size441; ++$_i445) + $_size448 = 0; + $_etype451 = 0; + $xfer += $input->readListBegin($_etype451, $_size448); + for ($_i452 = 0; $_i452 < $_size448; ++$_i452) { - $elem446 = null; - $elem446 = new metastore_HiveObjectPrivilege(); - $xfer += $elem446->read($input); - $this->success []= $elem446; + $elem453 = null; + $elem453 = new metastore_HiveObjectPrivilege(); + $xfer += $elem453->read($input); + $this->success []= $elem453; } $xfer += $input->readListEnd(); } else { @@ -16569,9 +16616,9 @@ { $output->writeListBegin(TType::STRUCT, count($this->success)); { - foreach ($this->success as $iter447) + foreach ($this->success as $iter454) { - $xfer += $iter447->write($output); + $xfer += $iter454->write($output); } } $output->writeListEnd(); Index: metastore/if/hive_metastore.thrift =================================================================== --- metastore/if/hive_metastore.thrift (revision 1145366) +++ metastore/if/hive_metastore.thrift (working copy) @@ -323,10 +323,10 @@ throws(1:MetaException o1, 2:NoSuchObjectException o2) // changes the partition to the new partition object. partition is identified from the part values - // in the new_part + // in the new_part if part_vals == null, otherwise, partition is identified from part_vals // * See notes on DDL_TIME - void alter_partition(1:string db_name, 2:string tbl_name, 3:Partition new_part) - throws(1:InvalidOperationException o1, 2:MetaException o2) + void alter_partition(1:string db_name, 2:string tbl_name, 3:list part_vals, 4:Partition new_part) + throws (1:InvalidOperationException o1, 2:MetaException o2) // gets the value of the configuration key in the metastore server. returns // defaultValue if the key does not exist. if the configuration key does not Index: ql/src/test/results/clientnegative/alter_rename_partition_failure3.q.out =================================================================== --- ql/src/test/results/clientnegative/alter_rename_partition_failure3.q.out (revision 0) +++ ql/src/test/results/clientnegative/alter_rename_partition_failure3.q.out (revision 0) @@ -0,0 +1,31 @@ +PREHOOK: query: create table alter_rename_partition_src ( col1 string ) stored as textfile +PREHOOK: type: CREATETABLE +POSTHOOK: query: create table alter_rename_partition_src ( col1 string ) stored as textfile +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: default@alter_rename_partition_src +PREHOOK: query: load data local inpath '../data/files/test.dat' overwrite into table alter_rename_partition_src +PREHOOK: type: LOAD +PREHOOK: Output: default@alter_rename_partition_src +POSTHOOK: query: load data local inpath '../data/files/test.dat' overwrite into table alter_rename_partition_src +POSTHOOK: type: LOAD +POSTHOOK: Output: default@alter_rename_partition_src +PREHOOK: query: create table alter_rename_partition ( col1 string ) partitioned by (pcol1 string , pcol2 string) stored as sequencefile +PREHOOK: type: CREATETABLE +POSTHOOK: query: create table alter_rename_partition ( col1 string ) partitioned by (pcol1 string , pcol2 string) stored as sequencefile +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: default@alter_rename_partition +PREHOOK: query: insert overwrite table alter_rename_partition partition (pCol1='old_part1:', pcol2='old_part2:') select col1 from alter_rename_partition_src +PREHOOK: type: QUERY +PREHOOK: Input: default@alter_rename_partition_src +PREHOOK: Output: default@alter_rename_partition@pcol1=old_part1%3A/pcol2=old_part2%3A +POSTHOOK: query: insert overwrite table alter_rename_partition partition (pCol1='old_part1:', pcol2='old_part2:') select col1 from alter_rename_partition_src +POSTHOOK: type: QUERY +POSTHOOK: Input: default@alter_rename_partition_src +POSTHOOK: Output: default@alter_rename_partition@pcol1=old_part1%3A/pcol2=old_part2%3A +POSTHOOK: Lineage: alter_rename_partition PARTITION(pcol1=old_part1:,pcol2=old_part2:).col1 SIMPLE [(alter_rename_partition_src)alter_rename_partition_src.FieldSchema(name:col1, type:string, comment:null), ] +PREHOOK: query: alter table alter_rename_partition partition (pCol1='old_part1:', pcol2='old_part2:') rename to partition (pCol1='old_part1:', pcol2='old_part2:', pcol3='old_part3:') +PREHOOK: type: ALTERTABLE_RENAMEPART +PREHOOK: Input: default@alter_rename_partition +PREHOOK: Output: default@alter_rename_partition@pcol1=old_part1%3A/pcol2=old_part2%3A +FAILED: Error in metadata: Unable to rename partition. +FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.exec.DDLTask Index: ql/src/test/results/clientnegative/alter_rename_partition_failure2.q.out =================================================================== --- ql/src/test/results/clientnegative/alter_rename_partition_failure2.q.out (revision 0) +++ ql/src/test/results/clientnegative/alter_rename_partition_failure2.q.out (revision 0) @@ -0,0 +1,31 @@ +PREHOOK: query: create table alter_rename_partition_src ( col1 string ) stored as textfile +PREHOOK: type: CREATETABLE +POSTHOOK: query: create table alter_rename_partition_src ( col1 string ) stored as textfile +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: default@alter_rename_partition_src +PREHOOK: query: load data local inpath '../data/files/test.dat' overwrite into table alter_rename_partition_src +PREHOOK: type: LOAD +PREHOOK: Output: default@alter_rename_partition_src +POSTHOOK: query: load data local inpath '../data/files/test.dat' overwrite into table alter_rename_partition_src +POSTHOOK: type: LOAD +POSTHOOK: Output: default@alter_rename_partition_src +PREHOOK: query: create table alter_rename_partition ( col1 string ) partitioned by (pcol1 string , pcol2 string) stored as sequencefile +PREHOOK: type: CREATETABLE +POSTHOOK: query: create table alter_rename_partition ( col1 string ) partitioned by (pcol1 string , pcol2 string) stored as sequencefile +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: default@alter_rename_partition +PREHOOK: query: insert overwrite table alter_rename_partition partition (pCol1='old_part1:', pcol2='old_part2:') select col1 from alter_rename_partition_src +PREHOOK: type: QUERY +PREHOOK: Input: default@alter_rename_partition_src +PREHOOK: Output: default@alter_rename_partition@pcol1=old_part1%3A/pcol2=old_part2%3A +POSTHOOK: query: insert overwrite table alter_rename_partition partition (pCol1='old_part1:', pcol2='old_part2:') select col1 from alter_rename_partition_src +POSTHOOK: type: QUERY +POSTHOOK: Input: default@alter_rename_partition_src +POSTHOOK: Output: default@alter_rename_partition@pcol1=old_part1%3A/pcol2=old_part2%3A +POSTHOOK: Lineage: alter_rename_partition PARTITION(pcol1=old_part1:,pcol2=old_part2:).col1 SIMPLE [(alter_rename_partition_src)alter_rename_partition_src.FieldSchema(name:col1, type:string, comment:null), ] +PREHOOK: query: alter table alter_rename_partition partition (pCol1='old_part1:', pcol2='old_part2:') rename to partition (pCol1='old_part1:', pcol2='old_part2:') +PREHOOK: type: ALTERTABLE_RENAMEPART +PREHOOK: Input: default@alter_rename_partition +PREHOOK: Output: default@alter_rename_partition@pcol1=old_part1%3A/pcol2=old_part2%3A +FAILED: Error in metadata: Unable to rename partition. +FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.exec.DDLTask Index: ql/src/test/results/clientnegative/alter_rename_partition_failure.q.out =================================================================== --- ql/src/test/results/clientnegative/alter_rename_partition_failure.q.out (revision 0) +++ ql/src/test/results/clientnegative/alter_rename_partition_failure.q.out (revision 0) @@ -0,0 +1,30 @@ +PREHOOK: query: create table alter_rename_partition_src ( col1 string ) stored as textfile +PREHOOK: type: CREATETABLE +POSTHOOK: query: create table alter_rename_partition_src ( col1 string ) stored as textfile +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: default@alter_rename_partition_src +PREHOOK: query: load data local inpath '../data/files/test.dat' overwrite into table alter_rename_partition_src +PREHOOK: type: LOAD +PREHOOK: Output: default@alter_rename_partition_src +POSTHOOK: query: load data local inpath '../data/files/test.dat' overwrite into table alter_rename_partition_src +POSTHOOK: type: LOAD +POSTHOOK: Output: default@alter_rename_partition_src +PREHOOK: query: create table alter_rename_partition ( col1 string ) partitioned by (pcol1 string , pcol2 string) stored as sequencefile +PREHOOK: type: CREATETABLE +POSTHOOK: query: create table alter_rename_partition ( col1 string ) partitioned by (pcol1 string , pcol2 string) stored as sequencefile +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: default@alter_rename_partition +PREHOOK: query: insert overwrite table alter_rename_partition partition (pCol1='old_part1:', pcol2='old_part2:') select col1 from alter_rename_partition_src +PREHOOK: type: QUERY +PREHOOK: Input: default@alter_rename_partition_src +PREHOOK: Output: default@alter_rename_partition@pcol1=old_part1%3A/pcol2=old_part2%3A +POSTHOOK: query: insert overwrite table alter_rename_partition partition (pCol1='old_part1:', pcol2='old_part2:') select col1 from alter_rename_partition_src +POSTHOOK: type: QUERY +POSTHOOK: Input: default@alter_rename_partition_src +POSTHOOK: Output: default@alter_rename_partition@pcol1=old_part1%3A/pcol2=old_part2%3A +POSTHOOK: Lineage: alter_rename_partition PARTITION(pcol1=old_part1:,pcol2=old_part2:).col1 SIMPLE [(alter_rename_partition_src)alter_rename_partition_src.FieldSchema(name:col1, type:string, comment:null), ] +PREHOOK: query: alter table alter_rename_partition partition (pCol1='nonexist_part1:', pcol2='nonexist_part2:') rename to partition (pCol1='new_part1:', pcol2='new_part2:') +PREHOOK: type: ALTERTABLE_RENAMEPART +PREHOOK: Input: default@alter_rename_partition +Failed with exception null +FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.exec.DDLTask Index: ql/src/test/results/clientpositive/alter_rename_partition.q.out =================================================================== --- ql/src/test/results/clientpositive/alter_rename_partition.q.out (revision 0) +++ ql/src/test/results/clientpositive/alter_rename_partition.q.out (revision 0) @@ -0,0 +1,239 @@ +PREHOOK: query: -- Cleanup +DROP TABLE alter_rename_partition_src +PREHOOK: type: DROPTABLE +POSTHOOK: query: -- Cleanup +DROP TABLE alter_rename_partition_src +POSTHOOK: type: DROPTABLE +PREHOOK: query: DROP TABLE alter_rename_partition +PREHOOK: type: DROPTABLE +POSTHOOK: query: DROP TABLE alter_rename_partition +POSTHOOK: type: DROPTABLE +PREHOOK: query: SHOW TABLES +PREHOOK: type: SHOWTABLES +POSTHOOK: query: SHOW TABLES +POSTHOOK: type: SHOWTABLES +src +src1 +src_json +src_sequencefile +src_thrift +srcbucket +srcbucket2 +srcpart +PREHOOK: query: create table alter_rename_partition_src ( col1 string ) stored as textfile +PREHOOK: type: CREATETABLE +POSTHOOK: query: create table alter_rename_partition_src ( col1 string ) stored as textfile +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: default@alter_rename_partition_src +PREHOOK: query: load data local inpath '../data/files/test.dat' overwrite into table alter_rename_partition_src +PREHOOK: type: LOAD +PREHOOK: Output: default@alter_rename_partition_src +POSTHOOK: query: load data local inpath '../data/files/test.dat' overwrite into table alter_rename_partition_src +POSTHOOK: type: LOAD +POSTHOOK: Output: default@alter_rename_partition_src +PREHOOK: query: create table alter_rename_partition ( col1 string ) partitioned by (pcol1 string , pcol2 string) stored as sequencefile +PREHOOK: type: CREATETABLE +POSTHOOK: query: create table alter_rename_partition ( col1 string ) partitioned by (pcol1 string , pcol2 string) stored as sequencefile +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: default@alter_rename_partition +PREHOOK: query: insert overwrite table alter_rename_partition partition (pCol1='old_part1:', pcol2='old_part2:') select col1 from alter_rename_partition_src +PREHOOK: type: QUERY +PREHOOK: Input: default@alter_rename_partition_src +PREHOOK: Output: default@alter_rename_partition@pcol1=old_part1%3A/pcol2=old_part2%3A +POSTHOOK: query: insert overwrite table alter_rename_partition partition (pCol1='old_part1:', pcol2='old_part2:') select col1 from alter_rename_partition_src +POSTHOOK: type: QUERY +POSTHOOK: Input: default@alter_rename_partition_src +POSTHOOK: Output: default@alter_rename_partition@pcol1=old_part1%3A/pcol2=old_part2%3A +POSTHOOK: Lineage: alter_rename_partition PARTITION(pcol1=old_part1:,pcol2=old_part2:).col1 SIMPLE [(alter_rename_partition_src)alter_rename_partition_src.FieldSchema(name:col1, type:string, comment:null), ] +PREHOOK: query: select * from alter_rename_partition where pcol1='old_part1:' and pcol2='old_part2:' +PREHOOK: type: QUERY +PREHOOK: Input: default@alter_rename_partition@pcol1=old_part1%3A/pcol2=old_part2%3A +PREHOOK: Output: file:/tmp/weiyan/hive_2011-07-20_17-10-10_504_7043162834384097264/-mr-10000 +POSTHOOK: query: select * from alter_rename_partition where pcol1='old_part1:' and pcol2='old_part2:' +POSTHOOK: type: QUERY +POSTHOOK: Input: default@alter_rename_partition@pcol1=old_part1%3A/pcol2=old_part2%3A +POSTHOOK: Output: file:/tmp/weiyan/hive_2011-07-20_17-10-10_504_7043162834384097264/-mr-10000 +POSTHOOK: Lineage: alter_rename_partition PARTITION(pcol1=old_part1:,pcol2=old_part2:).col1 SIMPLE [(alter_rename_partition_src)alter_rename_partition_src.FieldSchema(name:col1, type:string, comment:null), ] +1 old_part1: old_part2: +2 old_part1: old_part2: +3 old_part1: old_part2: +4 old_part1: old_part2: +5 old_part1: old_part2: +6 old_part1: old_part2: +PREHOOK: query: alter table alter_rename_partition partition (pCol1='old_part1:', pcol2='old_part2:') rename to partition (pCol1='new_part1:', pcol2='new_part2:') +PREHOOK: type: ALTERTABLE_RENAMEPART +PREHOOK: Input: default@alter_rename_partition +PREHOOK: Output: default@alter_rename_partition@pcol1=old_part1%3A/pcol2=old_part2%3A +POSTHOOK: query: alter table alter_rename_partition partition (pCol1='old_part1:', pcol2='old_part2:') rename to partition (pCol1='new_part1:', pcol2='new_part2:') +POSTHOOK: type: ALTERTABLE_RENAMEPART +POSTHOOK: Input: default@alter_rename_partition +POSTHOOK: Output: default@alter_rename_partition@pcol1=new_part1%3A/pcol2=new_part2%3A +POSTHOOK: Output: default@alter_rename_partition@pcol1=old_part1%3A/pcol2=old_part2%3A +POSTHOOK: Lineage: alter_rename_partition PARTITION(pcol1=old_part1:,pcol2=old_part2:).col1 SIMPLE [(alter_rename_partition_src)alter_rename_partition_src.FieldSchema(name:col1, type:string, comment:null), ] +PREHOOK: query: SHOW PARTITIONS alter_rename_partition +PREHOOK: type: SHOWPARTITIONS +POSTHOOK: query: SHOW PARTITIONS alter_rename_partition +POSTHOOK: type: SHOWPARTITIONS +POSTHOOK: Lineage: alter_rename_partition PARTITION(pcol1=old_part1:,pcol2=old_part2:).col1 SIMPLE [(alter_rename_partition_src)alter_rename_partition_src.FieldSchema(name:col1, type:string, comment:null), ] +pcol1=new_part1%3A/pcol2=new_part2%3A +PREHOOK: query: select * from alter_rename_partition where pcol1='old_part1:' and pcol2='old_part2:' +PREHOOK: type: QUERY +PREHOOK: Output: file:/tmp/weiyan/hive_2011-07-20_17-10-11_254_5867032021766375600/-mr-10000 +POSTHOOK: query: select * from alter_rename_partition where pcol1='old_part1:' and pcol2='old_part2:' +POSTHOOK: type: QUERY +POSTHOOK: Output: file:/tmp/weiyan/hive_2011-07-20_17-10-11_254_5867032021766375600/-mr-10000 +POSTHOOK: Lineage: alter_rename_partition PARTITION(pcol1=old_part1:,pcol2=old_part2:).col1 SIMPLE [(alter_rename_partition_src)alter_rename_partition_src.FieldSchema(name:col1, type:string, comment:null), ] +PREHOOK: query: select * from alter_rename_partition where pcol1='new_part1:' and pcol2='new_part2:' +PREHOOK: type: QUERY +PREHOOK: Input: default@alter_rename_partition@pcol1=new_part1%3A/pcol2=new_part2%3A +PREHOOK: Output: file:/tmp/weiyan/hive_2011-07-20_17-10-11_309_4226098800500660937/-mr-10000 +POSTHOOK: query: select * from alter_rename_partition where pcol1='new_part1:' and pcol2='new_part2:' +POSTHOOK: type: QUERY +POSTHOOK: Input: default@alter_rename_partition@pcol1=new_part1%3A/pcol2=new_part2%3A +POSTHOOK: Output: file:/tmp/weiyan/hive_2011-07-20_17-10-11_309_4226098800500660937/-mr-10000 +POSTHOOK: Lineage: alter_rename_partition PARTITION(pcol1=old_part1:,pcol2=old_part2:).col1 SIMPLE [(alter_rename_partition_src)alter_rename_partition_src.FieldSchema(name:col1, type:string, comment:null), ] +1 new_part1: new_part2: +2 new_part1: new_part2: +3 new_part1: new_part2: +4 new_part1: new_part2: +5 new_part1: new_part2: +6 new_part1: new_part2: +PREHOOK: query: -- Cleanup +DROP TABLE alter_rename_partition_src +PREHOOK: type: DROPTABLE +PREHOOK: Input: default@alter_rename_partition_src +PREHOOK: Output: default@alter_rename_partition_src +POSTHOOK: query: -- Cleanup +DROP TABLE alter_rename_partition_src +POSTHOOK: type: DROPTABLE +POSTHOOK: Input: default@alter_rename_partition_src +POSTHOOK: Output: default@alter_rename_partition_src +POSTHOOK: Lineage: alter_rename_partition PARTITION(pcol1=old_part1:,pcol2=old_part2:).col1 SIMPLE [(alter_rename_partition_src)alter_rename_partition_src.FieldSchema(name:col1, type:string, comment:null), ] +PREHOOK: query: DROP TABLE alter_rename_partition +PREHOOK: type: DROPTABLE +PREHOOK: Input: default@alter_rename_partition +PREHOOK: Output: default@alter_rename_partition +POSTHOOK: query: DROP TABLE alter_rename_partition +POSTHOOK: type: DROPTABLE +POSTHOOK: Input: default@alter_rename_partition +POSTHOOK: Output: default@alter_rename_partition +POSTHOOK: Lineage: alter_rename_partition PARTITION(pcol1=old_part1:,pcol2=old_part2:).col1 SIMPLE [(alter_rename_partition_src)alter_rename_partition_src.FieldSchema(name:col1, type:string, comment:null), ] +PREHOOK: query: SHOW TABLES +PREHOOK: type: SHOWTABLES +POSTHOOK: query: SHOW TABLES +POSTHOOK: type: SHOWTABLES +POSTHOOK: Lineage: alter_rename_partition PARTITION(pcol1=old_part1:,pcol2=old_part2:).col1 SIMPLE [(alter_rename_partition_src)alter_rename_partition_src.FieldSchema(name:col1, type:string, comment:null), ] +src +src1 +src_json +src_sequencefile +src_thrift +srcbucket +srcbucket2 +srcpart +PREHOOK: query: -- With non-default Database + +CREATE DATABASE alter_rename_partition_db +PREHOOK: type: CREATEDATABASE +POSTHOOK: query: -- With non-default Database + +CREATE DATABASE alter_rename_partition_db +POSTHOOK: type: CREATEDATABASE +POSTHOOK: Lineage: alter_rename_partition PARTITION(pcol1=old_part1:,pcol2=old_part2:).col1 SIMPLE [(alter_rename_partition_src)alter_rename_partition_src.FieldSchema(name:col1, type:string, comment:null), ] +PREHOOK: query: USE alter_rename_partition_db +PREHOOK: type: SWITCHDATABASE +POSTHOOK: query: USE alter_rename_partition_db +POSTHOOK: type: SWITCHDATABASE +POSTHOOK: Lineage: alter_rename_partition PARTITION(pcol1=old_part1:,pcol2=old_part2:).col1 SIMPLE [(alter_rename_partition_src)alter_rename_partition_src.FieldSchema(name:col1, type:string, comment:null), ] +PREHOOK: query: SHOW TABLES +PREHOOK: type: SHOWTABLES +POSTHOOK: query: SHOW TABLES +POSTHOOK: type: SHOWTABLES +POSTHOOK: Lineage: alter_rename_partition PARTITION(pcol1=old_part1:,pcol2=old_part2:).col1 SIMPLE [(alter_rename_partition_src)alter_rename_partition_src.FieldSchema(name:col1, type:string, comment:null), ] +PREHOOK: query: CREATE TABLE alter_rename_partition_src (col1 STRING) STORED AS TEXTFILE +PREHOOK: type: CREATETABLE +POSTHOOK: query: CREATE TABLE alter_rename_partition_src (col1 STRING) STORED AS TEXTFILE +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: alter_rename_partition_db@alter_rename_partition_src +POSTHOOK: Lineage: alter_rename_partition PARTITION(pcol1=old_part1:,pcol2=old_part2:).col1 SIMPLE [(alter_rename_partition_src)alter_rename_partition_src.FieldSchema(name:col1, type:string, comment:null), ] +PREHOOK: query: LOAD DATA LOCAL INPATH '../data/files/test.dat' OVERWRITE INTO TABLE alter_rename_partition_src +PREHOOK: type: LOAD +PREHOOK: Output: alter_rename_partition_db@alter_rename_partition_src +POSTHOOK: query: LOAD DATA LOCAL INPATH '../data/files/test.dat' OVERWRITE INTO TABLE alter_rename_partition_src +POSTHOOK: type: LOAD +POSTHOOK: Output: alter_rename_partition_db@alter_rename_partition_src +POSTHOOK: Lineage: alter_rename_partition PARTITION(pcol1=old_part1:,pcol2=old_part2:).col1 SIMPLE [(alter_rename_partition_src)alter_rename_partition_src.FieldSchema(name:col1, type:string, comment:null), ] +PREHOOK: query: CREATE TABLE alter_rename_partition (col1 STRING) PARTITIONED BY (pcol1 STRING, pcol2 STRING) STORED AS SEQUENCEFILE +PREHOOK: type: CREATETABLE +POSTHOOK: query: CREATE TABLE alter_rename_partition (col1 STRING) PARTITIONED BY (pcol1 STRING, pcol2 STRING) STORED AS SEQUENCEFILE +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: alter_rename_partition_db@alter_rename_partition +POSTHOOK: Lineage: alter_rename_partition PARTITION(pcol1=old_part1:,pcol2=old_part2:).col1 SIMPLE [(alter_rename_partition_src)alter_rename_partition_src.FieldSchema(name:col1, type:string, comment:null), ] +PREHOOK: query: INSERT OVERWRITE TABLE alter_rename_partition PARTITION (pCol1='old_part1:', pcol2='old_part2:') SELECT col1 FROM alter_rename_partition_src +PREHOOK: type: QUERY +PREHOOK: Input: alter_rename_partition_db@alter_rename_partition_src +PREHOOK: Output: alter_rename_partition_db@alter_rename_partition@pcol1=old_part1%3A/pcol2=old_part2%3A +POSTHOOK: query: INSERT OVERWRITE TABLE alter_rename_partition PARTITION (pCol1='old_part1:', pcol2='old_part2:') SELECT col1 FROM alter_rename_partition_src +POSTHOOK: type: QUERY +POSTHOOK: Input: alter_rename_partition_db@alter_rename_partition_src +POSTHOOK: Output: alter_rename_partition_db@alter_rename_partition@pcol1=old_part1%3A/pcol2=old_part2%3A +POSTHOOK: Lineage: alter_rename_partition PARTITION(pcol1=old_part1:,pcol2=old_part2:).col1 SIMPLE [(alter_rename_partition_src)alter_rename_partition_src.FieldSchema(name:col1, type:string, comment:null), ] +POSTHOOK: Lineage: alter_rename_partition PARTITION(pcol1=old_part1:,pcol2=old_part2:).col1 SIMPLE [(alter_rename_partition_src)alter_rename_partition_src.FieldSchema(name:col1, type:string, comment:null), ] +PREHOOK: query: SELECT * FROM alter_rename_partition WHERE pcol1='old_part1:' AND pcol2='old_part2:' +PREHOOK: type: QUERY +PREHOOK: Input: alter_rename_partition_db@alter_rename_partition@pcol1=old_part1%3A/pcol2=old_part2%3A +PREHOOK: Output: file:/tmp/weiyan/hive_2011-07-20_17-10-17_869_1349534695674823942/-mr-10000 +POSTHOOK: query: SELECT * FROM alter_rename_partition WHERE pcol1='old_part1:' AND pcol2='old_part2:' +POSTHOOK: type: QUERY +POSTHOOK: Input: alter_rename_partition_db@alter_rename_partition@pcol1=old_part1%3A/pcol2=old_part2%3A +POSTHOOK: Output: file:/tmp/weiyan/hive_2011-07-20_17-10-17_869_1349534695674823942/-mr-10000 +POSTHOOK: Lineage: alter_rename_partition PARTITION(pcol1=old_part1:,pcol2=old_part2:).col1 SIMPLE [(alter_rename_partition_src)alter_rename_partition_src.FieldSchema(name:col1, type:string, comment:null), ] +POSTHOOK: Lineage: alter_rename_partition PARTITION(pcol1=old_part1:,pcol2=old_part2:).col1 SIMPLE [(alter_rename_partition_src)alter_rename_partition_src.FieldSchema(name:col1, type:string, comment:null), ] +1 old_part1: old_part2: +2 old_part1: old_part2: +3 old_part1: old_part2: +4 old_part1: old_part2: +5 old_part1: old_part2: +6 old_part1: old_part2: +PREHOOK: query: ALTER TABLE alter_rename_partition PARTITION (pCol1='old_part1:', pcol2='old_part2:') RENAME TO PARTITION (pCol1='new_part1:', pcol2='new_part2:') +PREHOOK: type: ALTERTABLE_RENAMEPART +PREHOOK: Input: alter_rename_partition_db@alter_rename_partition +PREHOOK: Output: alter_rename_partition_db@alter_rename_partition@pcol1=old_part1%3A/pcol2=old_part2%3A +POSTHOOK: query: ALTER TABLE alter_rename_partition PARTITION (pCol1='old_part1:', pcol2='old_part2:') RENAME TO PARTITION (pCol1='new_part1:', pcol2='new_part2:') +POSTHOOK: type: ALTERTABLE_RENAMEPART +POSTHOOK: Input: alter_rename_partition_db@alter_rename_partition +POSTHOOK: Output: alter_rename_partition_db@alter_rename_partition@pcol1=new_part1%3A/pcol2=new_part2%3A +POSTHOOK: Output: alter_rename_partition_db@alter_rename_partition@pcol1=old_part1%3A/pcol2=old_part2%3A +POSTHOOK: Lineage: alter_rename_partition PARTITION(pcol1=old_part1:,pcol2=old_part2:).col1 SIMPLE [(alter_rename_partition_src)alter_rename_partition_src.FieldSchema(name:col1, type:string, comment:null), ] +POSTHOOK: Lineage: alter_rename_partition PARTITION(pcol1=old_part1:,pcol2=old_part2:).col1 SIMPLE [(alter_rename_partition_src)alter_rename_partition_src.FieldSchema(name:col1, type:string, comment:null), ] +PREHOOK: query: SHOW PARTITIONS alter_rename_partition +PREHOOK: type: SHOWPARTITIONS +POSTHOOK: query: SHOW PARTITIONS alter_rename_partition +POSTHOOK: type: SHOWPARTITIONS +POSTHOOK: Lineage: alter_rename_partition PARTITION(pcol1=old_part1:,pcol2=old_part2:).col1 SIMPLE [(alter_rename_partition_src)alter_rename_partition_src.FieldSchema(name:col1, type:string, comment:null), ] +POSTHOOK: Lineage: alter_rename_partition PARTITION(pcol1=old_part1:,pcol2=old_part2:).col1 SIMPLE [(alter_rename_partition_src)alter_rename_partition_src.FieldSchema(name:col1, type:string, comment:null), ] +pcol1=new_part1%3A/pcol2=new_part2%3A +PREHOOK: query: SELECT * FROM alter_rename_partition WHERE pcol1='old_part1:' and pcol2='old_part2:' +PREHOOK: type: QUERY +PREHOOK: Output: file:/tmp/weiyan/hive_2011-07-20_17-10-18_506_2856444261350669507/-mr-10000 +POSTHOOK: query: SELECT * FROM alter_rename_partition WHERE pcol1='old_part1:' and pcol2='old_part2:' +POSTHOOK: type: QUERY +POSTHOOK: Output: file:/tmp/weiyan/hive_2011-07-20_17-10-18_506_2856444261350669507/-mr-10000 +POSTHOOK: Lineage: alter_rename_partition PARTITION(pcol1=old_part1:,pcol2=old_part2:).col1 SIMPLE [(alter_rename_partition_src)alter_rename_partition_src.FieldSchema(name:col1, type:string, comment:null), ] +POSTHOOK: Lineage: alter_rename_partition PARTITION(pcol1=old_part1:,pcol2=old_part2:).col1 SIMPLE [(alter_rename_partition_src)alter_rename_partition_src.FieldSchema(name:col1, type:string, comment:null), ] +PREHOOK: query: SELECT * FROM alter_rename_partition WHERE pcol1='new_part1:' and pcol2='new_part2:' +PREHOOK: type: QUERY +PREHOOK: Input: alter_rename_partition_db@alter_rename_partition@pcol1=new_part1%3A/pcol2=new_part2%3A +PREHOOK: Output: file:/tmp/weiyan/hive_2011-07-20_17-10-18_554_5725601924055799575/-mr-10000 +POSTHOOK: query: SELECT * FROM alter_rename_partition WHERE pcol1='new_part1:' and pcol2='new_part2:' +POSTHOOK: type: QUERY +POSTHOOK: Input: alter_rename_partition_db@alter_rename_partition@pcol1=new_part1%3A/pcol2=new_part2%3A +POSTHOOK: Output: file:/tmp/weiyan/hive_2011-07-20_17-10-18_554_5725601924055799575/-mr-10000 +POSTHOOK: Lineage: alter_rename_partition PARTITION(pcol1=old_part1:,pcol2=old_part2:).col1 SIMPLE [(alter_rename_partition_src)alter_rename_partition_src.FieldSchema(name:col1, type:string, comment:null), ] +POSTHOOK: Lineage: alter_rename_partition PARTITION(pcol1=old_part1:,pcol2=old_part2:).col1 SIMPLE [(alter_rename_partition_src)alter_rename_partition_src.FieldSchema(name:col1, type:string, comment:null), ] +1 new_part1: new_part2: +2 new_part1: new_part2: +3 new_part1: new_part2: +4 new_part1: new_part2: +5 new_part1: new_part2: +6 new_part1: new_part2: Index: ql/src/test/results/clientpositive/alter_rename_partition_authorization.q.out =================================================================== --- ql/src/test/results/clientpositive/alter_rename_partition_authorization.q.out (revision 0) +++ ql/src/test/results/clientpositive/alter_rename_partition_authorization.q.out (revision 0) @@ -0,0 +1,143 @@ +PREHOOK: query: create table src_auth_tmp as select * from src +PREHOOK: type: CREATETABLE_AS_SELECT +PREHOOK: Input: default@src +POSTHOOK: query: create table src_auth_tmp as select * from src +POSTHOOK: type: CREATETABLE_AS_SELECT +POSTHOOK: Input: default@src +POSTHOOK: Output: default@src_auth_tmp +PREHOOK: query: create table authorization_part (key int, value string) partitioned by (ds string) +PREHOOK: type: CREATETABLE +POSTHOOK: query: create table authorization_part (key int, value string) partitioned by (ds string) +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: default@authorization_part +PREHOOK: query: ALTER TABLE authorization_part SET TBLPROPERTIES ("PARTITION_LEVEL_PRIVILEGE"="TRUE") +PREHOOK: type: ALTERTABLE_PROPERTIES +PREHOOK: Input: default@authorization_part +PREHOOK: Output: default@authorization_part +POSTHOOK: query: ALTER TABLE authorization_part SET TBLPROPERTIES ("PARTITION_LEVEL_PRIVILEGE"="TRUE") +POSTHOOK: type: ALTERTABLE_PROPERTIES +POSTHOOK: Input: default@authorization_part +POSTHOOK: Output: default@authorization_part +PREHOOK: query: grant select on table src_auth_tmp to user hive_test_user +PREHOOK: type: GRANT_PRIVILEGE +PREHOOK: Output: default@src_auth_tmp +POSTHOOK: query: grant select on table src_auth_tmp to user hive_test_user +POSTHOOK: type: GRANT_PRIVILEGE +POSTHOOK: Output: default@src_auth_tmp +PREHOOK: query: -- column grant to user +grant Create on table authorization_part to user hive_test_user +PREHOOK: type: GRANT_PRIVILEGE +PREHOOK: Output: default@authorization_part +POSTHOOK: query: -- column grant to user +grant Create on table authorization_part to user hive_test_user +POSTHOOK: type: GRANT_PRIVILEGE +POSTHOOK: Output: default@authorization_part +PREHOOK: query: grant Update on table authorization_part to user hive_test_user +PREHOOK: type: GRANT_PRIVILEGE +PREHOOK: Output: default@authorization_part +POSTHOOK: query: grant Update on table authorization_part to user hive_test_user +POSTHOOK: type: GRANT_PRIVILEGE +POSTHOOK: Output: default@authorization_part +PREHOOK: query: grant Drop on table authorization_part to user hive_test_user +PREHOOK: type: GRANT_PRIVILEGE +PREHOOK: Output: default@authorization_part +POSTHOOK: query: grant Drop on table authorization_part to user hive_test_user +POSTHOOK: type: GRANT_PRIVILEGE +POSTHOOK: Output: default@authorization_part +PREHOOK: query: show grant user hive_test_user on table authorization_part +PREHOOK: type: SHOW_GRANT +POSTHOOK: query: show grant user hive_test_user on table authorization_part +POSTHOOK: type: SHOW_GRANT + +database default +table authorization_part +principalName hive_test_user +principalType USER +privilege Create +grantTime Wed Jul 20 17:39:29 PDT 2011 +grantor hive_test_user + +database default +table authorization_part +principalName hive_test_user +principalType USER +privilege Update +grantTime Wed Jul 20 17:39:29 PDT 2011 +grantor hive_test_user + +database default +table authorization_part +principalName hive_test_user +principalType USER +privilege Drop +grantTime Wed Jul 20 17:39:30 PDT 2011 +grantor hive_test_user +PREHOOK: query: grant select(key) on table authorization_part to user hive_test_user +PREHOOK: type: GRANT_PRIVILEGE +PREHOOK: Output: default@authorization_part +POSTHOOK: query: grant select(key) on table authorization_part to user hive_test_user +POSTHOOK: type: GRANT_PRIVILEGE +POSTHOOK: Output: default@authorization_part +PREHOOK: query: insert overwrite table authorization_part partition (ds='2010') select key, value from src_auth_tmp +PREHOOK: type: QUERY +PREHOOK: Input: default@src_auth_tmp +PREHOOK: Output: default@authorization_part@ds=2010 +POSTHOOK: query: insert overwrite table authorization_part partition (ds='2010') select key, value from src_auth_tmp +POSTHOOK: type: QUERY +POSTHOOK: Input: default@src_auth_tmp +POSTHOOK: Output: default@authorization_part@ds=2010 +POSTHOOK: Lineage: authorization_part PARTITION(ds=2010).key EXPRESSION [(src_auth_tmp)src_auth_tmp.FieldSchema(name:key, type:string, comment:null), ] +POSTHOOK: Lineage: authorization_part PARTITION(ds=2010).value SIMPLE [(src_auth_tmp)src_auth_tmp.FieldSchema(name:value, type:string, comment:null), ] +PREHOOK: query: show grant user hive_test_user on table authorization_part(key) partition (ds='2010') +PREHOOK: type: SHOW_GRANT +POSTHOOK: query: show grant user hive_test_user on table authorization_part(key) partition (ds='2010') +POSTHOOK: type: SHOW_GRANT +POSTHOOK: Lineage: authorization_part PARTITION(ds=2010).key EXPRESSION [(src_auth_tmp)src_auth_tmp.FieldSchema(name:key, type:string, comment:null), ] +POSTHOOK: Lineage: authorization_part PARTITION(ds=2010).value SIMPLE [(src_auth_tmp)src_auth_tmp.FieldSchema(name:value, type:string, comment:null), ] + +database default +table authorization_part +partition ds=2010 +columnName key +principalName hive_test_user +principalType USER +privilege Select +grantTime Wed Jul 20 17:39:36 PDT 2011 +grantor hive_test_user +PREHOOK: query: alter table authorization_part partition (ds='2010') rename to partition (ds='2010_tmp') +PREHOOK: type: ALTERTABLE_RENAMEPART +PREHOOK: Input: default@authorization_part +PREHOOK: Output: default@authorization_part@ds=2010 +POSTHOOK: query: alter table authorization_part partition (ds='2010') rename to partition (ds='2010_tmp') +POSTHOOK: type: ALTERTABLE_RENAMEPART +POSTHOOK: Input: default@authorization_part +POSTHOOK: Output: default@authorization_part@ds=2010 +POSTHOOK: Output: default@authorization_part@ds=2010_tmp +POSTHOOK: Lineage: authorization_part PARTITION(ds=2010).key EXPRESSION [(src_auth_tmp)src_auth_tmp.FieldSchema(name:key, type:string, comment:null), ] +POSTHOOK: Lineage: authorization_part PARTITION(ds=2010).value SIMPLE [(src_auth_tmp)src_auth_tmp.FieldSchema(name:value, type:string, comment:null), ] +PREHOOK: query: show grant user hive_test_user on table authorization_part(key) partition (ds='2010_tmp') +PREHOOK: type: SHOW_GRANT +POSTHOOK: query: show grant user hive_test_user on table authorization_part(key) partition (ds='2010_tmp') +POSTHOOK: type: SHOW_GRANT +POSTHOOK: Lineage: authorization_part PARTITION(ds=2010).key EXPRESSION [(src_auth_tmp)src_auth_tmp.FieldSchema(name:key, type:string, comment:null), ] +POSTHOOK: Lineage: authorization_part PARTITION(ds=2010).value SIMPLE [(src_auth_tmp)src_auth_tmp.FieldSchema(name:value, type:string, comment:null), ] + +database default +table authorization_part +partition ds=2010_tmp +columnName key +principalName hive_test_user +principalType USER +privilege Select +grantTime Wed Jul 20 17:39:36 PDT 2011 +grantor hive_test_user +PREHOOK: query: drop table authorization_part +PREHOOK: type: DROPTABLE +PREHOOK: Input: default@authorization_part +PREHOOK: Output: default@authorization_part +POSTHOOK: query: drop table authorization_part +POSTHOOK: type: DROPTABLE +POSTHOOK: Input: default@authorization_part +POSTHOOK: Output: default@authorization_part +POSTHOOK: Lineage: authorization_part PARTITION(ds=2010).key EXPRESSION [(src_auth_tmp)src_auth_tmp.FieldSchema(name:key, type:string, comment:null), ] +POSTHOOK: Lineage: authorization_part PARTITION(ds=2010).value SIMPLE [(src_auth_tmp)src_auth_tmp.FieldSchema(name:value, type:string, comment:null), ] Index: ql/src/test/queries/clientnegative/alter_rename_partition_failure2.q =================================================================== --- ql/src/test/queries/clientnegative/alter_rename_partition_failure2.q (revision 0) +++ ql/src/test/queries/clientnegative/alter_rename_partition_failure2.q (revision 0) @@ -0,0 +1,6 @@ +create table alter_rename_partition_src ( col1 string ) stored as textfile ; +load data local inpath '../data/files/test.dat' overwrite into table alter_rename_partition_src ; +create table alter_rename_partition ( col1 string ) partitioned by (pcol1 string , pcol2 string) stored as sequencefile; +insert overwrite table alter_rename_partition partition (pCol1='old_part1:', pcol2='old_part2:') select col1 from alter_rename_partition_src ; + +alter table alter_rename_partition partition (pCol1='old_part1:', pcol2='old_part2:') rename to partition (pCol1='old_part1:', pcol2='old_part2:'); Index: ql/src/test/queries/clientnegative/alter_rename_partition_failure3.q =================================================================== --- ql/src/test/queries/clientnegative/alter_rename_partition_failure3.q (revision 0) +++ ql/src/test/queries/clientnegative/alter_rename_partition_failure3.q (revision 0) @@ -0,0 +1,6 @@ +create table alter_rename_partition_src ( col1 string ) stored as textfile ; +load data local inpath '../data/files/test.dat' overwrite into table alter_rename_partition_src ; +create table alter_rename_partition ( col1 string ) partitioned by (pcol1 string , pcol2 string) stored as sequencefile; +insert overwrite table alter_rename_partition partition (pCol1='old_part1:', pcol2='old_part2:') select col1 from alter_rename_partition_src ; + +alter table alter_rename_partition partition (pCol1='old_part1:', pcol2='old_part2:') rename to partition (pCol1='old_part1:', pcol2='old_part2:', pcol3='old_part3:'); \ No newline at end of file Index: ql/src/test/queries/clientnegative/alter_rename_partition_failure.q =================================================================== --- ql/src/test/queries/clientnegative/alter_rename_partition_failure.q (revision 0) +++ ql/src/test/queries/clientnegative/alter_rename_partition_failure.q (revision 0) @@ -0,0 +1,6 @@ +create table alter_rename_partition_src ( col1 string ) stored as textfile ; +load data local inpath '../data/files/test.dat' overwrite into table alter_rename_partition_src ; +create table alter_rename_partition ( col1 string ) partitioned by (pcol1 string , pcol2 string) stored as sequencefile; +insert overwrite table alter_rename_partition partition (pCol1='old_part1:', pcol2='old_part2:') select col1 from alter_rename_partition_src ; + +alter table alter_rename_partition partition (pCol1='nonexist_part1:', pcol2='nonexist_part2:') rename to partition (pCol1='new_part1:', pcol2='new_part2:'); Index: ql/src/test/queries/clientpositive/alter_rename_partition.q =================================================================== --- ql/src/test/queries/clientpositive/alter_rename_partition.q (revision 0) +++ ql/src/test/queries/clientpositive/alter_rename_partition.q (revision 0) @@ -0,0 +1,41 @@ +-- Cleanup +DROP TABLE alter_rename_partition_src; +DROP TABLE alter_rename_partition; +SHOW TABLES; + +create table alter_rename_partition_src ( col1 string ) stored as textfile ; +load data local inpath '../data/files/test.dat' overwrite into table alter_rename_partition_src ; + +create table alter_rename_partition ( col1 string ) partitioned by (pcol1 string , pcol2 string) stored as sequencefile; + +insert overwrite table alter_rename_partition partition (pCol1='old_part1:', pcol2='old_part2:') select col1 from alter_rename_partition_src ; +select * from alter_rename_partition where pcol1='old_part1:' and pcol2='old_part2:'; + +alter table alter_rename_partition partition (pCol1='old_part1:', pcol2='old_part2:') rename to partition (pCol1='new_part1:', pcol2='new_part2:'); +SHOW PARTITIONS alter_rename_partition; +select * from alter_rename_partition where pcol1='old_part1:' and pcol2='old_part2:'; +select * from alter_rename_partition where pcol1='new_part1:' and pcol2='new_part2:'; + +-- Cleanup +DROP TABLE alter_rename_partition_src; +DROP TABLE alter_rename_partition; +SHOW TABLES; + +-- With non-default Database + +CREATE DATABASE alter_rename_partition_db; +USE alter_rename_partition_db; +SHOW TABLES; + +CREATE TABLE alter_rename_partition_src (col1 STRING) STORED AS TEXTFILE ; +LOAD DATA LOCAL INPATH '../data/files/test.dat' OVERWRITE INTO TABLE alter_rename_partition_src ; + +CREATE TABLE alter_rename_partition (col1 STRING) PARTITIONED BY (pcol1 STRING, pcol2 STRING) STORED AS SEQUENCEFILE; + +INSERT OVERWRITE TABLE alter_rename_partition PARTITION (pCol1='old_part1:', pcol2='old_part2:') SELECT col1 FROM alter_rename_partition_src ; +SELECT * FROM alter_rename_partition WHERE pcol1='old_part1:' AND pcol2='old_part2:'; + +ALTER TABLE alter_rename_partition PARTITION (pCol1='old_part1:', pcol2='old_part2:') RENAME TO PARTITION (pCol1='new_part1:', pcol2='new_part2:'); +SHOW PARTITIONS alter_rename_partition; +SELECT * FROM alter_rename_partition WHERE pcol1='old_part1:' and pcol2='old_part2:'; +SELECT * FROM alter_rename_partition WHERE pcol1='new_part1:' and pcol2='new_part2:'; Index: ql/src/test/queries/clientpositive/alter_rename_partition_authorization.q =================================================================== --- ql/src/test/queries/clientpositive/alter_rename_partition_authorization.q (revision 0) +++ ql/src/test/queries/clientpositive/alter_rename_partition_authorization.q (revision 0) @@ -0,0 +1,20 @@ +create table src_auth_tmp as select * from src; + +create table authorization_part (key int, value string) partitioned by (ds string); +ALTER TABLE authorization_part SET TBLPROPERTIES ("PARTITION_LEVEL_PRIVILEGE"="TRUE"); +set hive.security.authorization.enabled=true; +grant select on table src_auth_tmp to user hive_test_user; + +-- column grant to user +grant Create on table authorization_part to user hive_test_user; +grant Update on table authorization_part to user hive_test_user; +grant Drop on table authorization_part to user hive_test_user; + +show grant user hive_test_user on table authorization_part; +grant select(key) on table authorization_part to user hive_test_user; +insert overwrite table authorization_part partition (ds='2010') select key, value from src_auth_tmp; +show grant user hive_test_user on table authorization_part(key) partition (ds='2010'); +alter table authorization_part partition (ds='2010') rename to partition (ds='2010_tmp'); +show grant user hive_test_user on table authorization_part(key) partition (ds='2010_tmp'); + +drop table authorization_part; Index: ql/src/java/org/apache/hadoop/hive/ql/metadata/Partition.java =================================================================== --- ql/src/java/org/apache/hadoop/hive/ql/metadata/Partition.java (revision 1145366) +++ ql/src/java/org/apache/hadoop/hive/ql/metadata/Partition.java (working copy) @@ -518,6 +518,28 @@ } /** + * Set Partition's values + * + * @param partSpec + * Partition specifications. + * @throws HiveException + * Thrown if we could not create the partition. + */ + public void setValues(Map partSpec) + throws HiveException { + List pvals = new ArrayList(); + for (FieldSchema field : table.getPartCols()) { + String val = partSpec.get(field.getName()); + if (val == null) { + throw new HiveException( + "partition spec is invalid. field.getName() does not exist in input."); + } + pvals.add(val); + } + tPartition.setValues(pvals); + } + + /** * @param protectMode */ public void setProtectMode(ProtectMode protectMode){ Index: ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java =================================================================== --- ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java (revision 1145366) +++ ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java (working copy) @@ -61,6 +61,7 @@ import org.apache.hadoop.hive.metastore.api.HiveObjectRef; import org.apache.hadoop.hive.metastore.api.HiveObjectType; import org.apache.hadoop.hive.metastore.api.Index; +import org.apache.hadoop.hive.metastore.api.InvalidObjectException; import org.apache.hadoop.hive.metastore.api.InvalidOperationException; import org.apache.hadoop.hive.metastore.api.MetaException; import org.apache.hadoop.hive.metastore.api.NoSuchObjectException; @@ -425,6 +426,54 @@ } } + /** + * Rename a old partition to new partition + * + * @param tbl + * existing table + * @param oldPartSpec + * spec of old partition + * @param newPart + * new partition + * @throws InvalidOperationException + * if the changes in metadata is not acceptable + * @throws TException + */ + public void renamePartition(Table tbl, Map oldPartSpec, Partition newPart) + throws HiveException { + try { + Map newPartSpec = newPart.getSpec(); + if (oldPartSpec.keySet().size() != tbl.getPartCols().size() + || newPartSpec.keySet().size() != tbl.getPartCols().size()) { + throw new HiveException("Unable to rename partition."); + } + if (!oldPartSpec.keySet().equals(newPartSpec.keySet())){ + throw new HiveException("Unable to rename partition."); + } + List pvals = new ArrayList(); + + for (FieldSchema field : tbl.getPartCols()) { + String val = oldPartSpec.get(field.getName()); + if (val == null && !HiveConf.getBoolVar(conf, HiveConf.ConfVars.DYNAMICPARTITIONING) + || val.length() == 0) { + throw new HiveException("get partition: Value for key " + + field.getName() + " is null or empty"); + } else if (val != null){ + pvals.add(val); + } + } + getMSC().renamePartition(tbl.getDbName(), tbl.getTableName(), pvals, + newPart.getTPartition()); + + } catch (InvalidOperationException e){ + throw new HiveException("Unable to rename partition.", e); + } catch (MetaException e) { + throw new HiveException("Unable to rename partition.", e); + } catch (TException e) { + throw new HiveException("Unable to rename partition.", e); + } + } + public void alterDatabase(String dbName, Database db) throws HiveException { try { Index: ql/src/java/org/apache/hadoop/hive/ql/exec/DDLTask.java =================================================================== --- ql/src/java/org/apache/hadoop/hive/ql/exec/DDLTask.java (revision 1145366) +++ ql/src/java/org/apache/hadoop/hive/ql/exec/DDLTask.java (working copy) @@ -118,6 +118,7 @@ import org.apache.hadoop.hive.ql.plan.PrincipalDesc; import org.apache.hadoop.hive.ql.plan.PrivilegeDesc; import org.apache.hadoop.hive.ql.plan.PrivilegeObjectDesc; +import org.apache.hadoop.hive.ql.plan.RenamePartitionDesc; import org.apache.hadoop.hive.ql.plan.RevokeDesc; import org.apache.hadoop.hive.ql.plan.RoleDDLDesc; import org.apache.hadoop.hive.ql.plan.ShowDatabasesDesc; @@ -262,6 +263,11 @@ return addPartition(db, addPartitionDesc); } + RenamePartitionDesc renamePartitionDesc = work.getRenamePartitionDesc(); + if (renamePartitionDesc != null) { + return renamePartition(db, renamePartitionDesc); + } + AlterTableSimpleDesc simpleDesc = work.getAlterTblSimpleDesc(); if (simpleDesc != null) { if (simpleDesc.getType() == AlterTableTypes.TOUCH) { @@ -919,6 +925,32 @@ } /** + * Rename a partition in a table + * + * @param db + * Database to rename the partition. + * @param renamePartitionDesc + * rename old Partition to new one. + * @return Returns 0 when execution succeeds and above 0 if it fails. + * @throws HiveException + */ + private int renamePartition(Hive db, RenamePartitionDesc renamePartitionDesc) throws HiveException { + + Table tbl = db.getTable(renamePartitionDesc.getDbName(), renamePartitionDesc.getTableName()); + + validateAlterTableType( + tbl, AlterTableDesc.AlterTableTypes.RENAMEPARTITION, + false); + Partition newPart = db.getPartition(tbl, renamePartitionDesc.getOldPartSpec(), false); + newPart.setValues(renamePartitionDesc.getNewPartSpec()); + db.renamePartition(tbl, renamePartitionDesc.getOldPartSpec(), newPart); + Partition part = db + .getPartition(tbl, renamePartitionDesc.getNewPartSpec(), false); + work.getOutputs().add(new WriteEntity(part)); + return 0; + } + + /** * Rewrite the partition's metadata and force the pre/post execute hooks to * be fired. * @@ -1457,6 +1489,7 @@ switch (alterType) { case ADDPARTITION: case DROPPARTITION: + case RENAMEPARTITION: case ADDPROPS: case RENAME: // allow this form Index: ql/src/java/org/apache/hadoop/hive/ql/plan/DDLWork.java =================================================================== --- ql/src/java/org/apache/hadoop/hive/ql/plan/DDLWork.java (revision 1145366) +++ ql/src/java/org/apache/hadoop/hive/ql/plan/DDLWork.java (working copy) @@ -53,6 +53,7 @@ private ShowPartitionsDesc showPartsDesc; private DescTableDesc descTblDesc; private AddPartitionDesc addPartitionDesc; + private RenamePartitionDesc renamePartitionDesc; private AlterTableSimpleDesc alterTblSimpleDesc; private MsckDesc msckDesc; private ShowTableStatusDesc showTblStatusDesc; @@ -67,7 +68,7 @@ private GrantRevokeRoleDDL grantRevokeRoleDDL; boolean needLock = false; - + /** * ReadEntitites that are passed to the hooks. */ @@ -310,6 +311,17 @@ } /** + * @param renamePartitionDesc + * information about the partitions we want to add. + */ + public DDLWork(HashSet inputs, HashSet outputs, + RenamePartitionDesc renamePartitionDesc) { + this(inputs, outputs); + + this.renamePartitionDesc = renamePartitionDesc; + } + + /** * @param touchDesc * information about the table/partitions that we want to touch */ @@ -713,6 +725,21 @@ } /** + * @return information about the partitions we want to rename. + */ + public RenamePartitionDesc getRenamePartitionDesc() { + return renamePartitionDesc; + } + + /** + * @param renamePartitionDesc + * information about the partitions we want to rename. + */ + public void setRenamePartitionDesc(RenamePartitionDesc renamePartitionDesc) { + this.renamePartitionDesc = renamePartitionDesc; + } + + /** * @return information about the table/partitions we want to alter. */ public AlterTableSimpleDesc getAlterTblSimpleDesc() { @@ -806,7 +833,7 @@ public void setRoleDDLDesc(RoleDDLDesc roleDDLDesc) { this.roleDDLDesc = roleDDLDesc; } - + /** * @return grant desc */ @@ -820,7 +847,7 @@ public void setGrantDesc(GrantDesc grantDesc) { this.grantDesc = grantDesc; } - + /** * @return show grant desc */ @@ -842,7 +869,7 @@ public void setRevokeDesc(RevokeDesc revokeDesc) { this.revokeDesc = revokeDesc; } - + /** * @return */ @@ -856,7 +883,7 @@ public void setGrantRevokeRoleDDL(GrantRevokeRoleDDL grantRevokeRoleDDL) { this.grantRevokeRoleDDL = grantRevokeRoleDDL; } - + public void setAlterDatabaseDesc(AlterDatabaseDesc alterDbDesc) { this.alterDbDesc = alterDbDesc; } @@ -864,7 +891,7 @@ public AlterDatabaseDesc getAlterDatabaseDesc() { return this.alterDbDesc; } - + /** * @return descriptor for merging files */ Index: ql/src/java/org/apache/hadoop/hive/ql/plan/HiveOperation.java =================================================================== --- ql/src/java/org/apache/hadoop/hive/ql/plan/HiveOperation.java (revision 1145366) +++ ql/src/java/org/apache/hadoop/hive/ql/plan/HiveOperation.java (working copy) @@ -21,7 +21,7 @@ import org.apache.hadoop.hive.ql.security.authorization.Privilege; public enum HiveOperation { - + EXPLAIN("EXPLAIN", null, null), LOAD("LOAD", null, new Privilege[]{Privilege.ALTER_DATA}), EXPORT("EXPORT", new Privilege[]{Privilege.SELECT}, null), @@ -36,6 +36,7 @@ ALTERTABLE_ADDCOLS("ALTERTABLE_ADDCOLS", new Privilege[]{Privilege.ALTER_METADATA}, null), ALTERTABLE_REPLACECOLS("ALTERTABLE_REPLACECOLS", new Privilege[]{Privilege.ALTER_METADATA}, null), ALTERTABLE_RENAMECOL("ALTERTABLE_RENAMECOL", new Privilege[]{Privilege.ALTER_METADATA}, null), + ALTERTABLE_RENAMEPART("ALTERTABLE_RENAMEPART", new Privilege[]{Privilege.DROP}, new Privilege[]{Privilege.CREATE}), ALTERTABLE_RENAME("ALTERTABLE_RENAME", new Privilege[]{Privilege.ALTER_METADATA}, null), ALTERTABLE_DROPPARTS("ALTERTABLE_DROPPARTS", new Privilege[]{Privilege.DROP}, null), ALTERTABLE_ADDPARTS("ALTERTABLE_ADDPARTS", new Privilege[]{Privilege.CREATE}, null), @@ -79,20 +80,20 @@ ALTERPARTITION_LOCATION("ALTERPARTITION_LOCATION", new Privilege[]{Privilege.ALTER_DATA}, null), CREATETABLE("CREATETABLE", null, new Privilege[]{Privilege.CREATE}), CREATETABLE_AS_SELECT("CREATETABLE_AS_SELECT", new Privilege[]{Privilege.SELECT}, new Privilege[]{Privilege.CREATE}), - QUERY("QUERY", new Privilege[]{Privilege.SELECT}, new Privilege[]{Privilege.ALTER_DATA, Privilege.CREATE}), - ALTERINDEX_PROPS("ALTERINDEX_PROPS",null, null), - ALTERDATABASE("ALTERDATABASE", null, null), - DESCDATABASE("DESCDATABASE", null, null), + QUERY("QUERY", new Privilege[]{Privilege.SELECT}, new Privilege[]{Privilege.ALTER_DATA, Privilege.CREATE}), + ALTERINDEX_PROPS("ALTERINDEX_PROPS",null, null), + ALTERDATABASE("ALTERDATABASE", null, null), + DESCDATABASE("DESCDATABASE", null, null), ALTERTABLE_MERGEFILES("ALTER_TABLE_MERGE", new Privilege[] { Privilege.SELECT }, new Privilege[] { Privilege.ALTER_DATA }), ALTERPARTITION_MERGEFILES("ALTER_PARTITION_MERGE", new Privilege[] { Privilege.SELECT }, new Privilege[] { Privilege.ALTER_DATA }), ; private String operationName; - + private Privilege[] inputRequiredPrivileges; - + private Privilege[] outputRequiredPrivileges; - + public Privilege[] getInputRequiredPrivileges() { return inputRequiredPrivileges; } @@ -111,9 +112,9 @@ this.inputRequiredPrivileges = inputRequiredPrivileges; this.outputRequiredPrivileges = outputRequiredPrivileges; } - + public static class PrivilegeAgreement { - + private Privilege[] inputUserLevelRequiredPriv; private Privilege[] inputDBLevelRequiredPriv; private Privilege[] inputTableLevelRequiredPriv; @@ -122,7 +123,7 @@ private Privilege[] outputDBLevelRequiredPriv; private Privilege[] outputTableLevelRequiredPriv; private Privilege[] outputColumnLevelRequiredPriv; - + public PrivilegeAgreement putUserLevelRequiredPriv( Privilege[] inputUserLevelRequiredPriv, Privilege[] outputUserLevelRequiredPriv) { @@ -138,7 +139,7 @@ this.outputDBLevelRequiredPriv = outputDBLevelRequiredPriv; return this; } - + public PrivilegeAgreement putTableLevelRequiredPriv( Privilege[] inputTableLevelRequiredPriv, Privilege[] outputTableLevelRequiredPriv) { Index: ql/src/java/org/apache/hadoop/hive/ql/plan/RenamePartitionDesc.java =================================================================== --- ql/src/java/org/apache/hadoop/hive/ql/plan/RenamePartitionDesc.java (revision 0) +++ ql/src/java/org/apache/hadoop/hive/ql/plan/RenamePartitionDesc.java (revision 0) @@ -0,0 +1,136 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hive.ql.plan; + +import java.io.Serializable; +import java.util.LinkedHashMap; +import java.util.Map; + +/** + * Contains the information needed to rename a partition. + */ +public class RenamePartitionDesc extends DDLDesc implements Serializable { + + private static final long serialVersionUID = 1L; + + String tableName; + String dbName; + String location; + LinkedHashMap oldPartSpec; + LinkedHashMap newPartSpec; + + /** + * For serialization only. + */ + public RenamePartitionDesc() { + } + + /** + * @param dbName + * database to add to. + * @param tableName + * table to add to. + * @param oldPartSpec + * old partition specification. + * @param newPartSpec + * new partition specification. + */ + public RenamePartitionDesc(String dbName, String tableName, + Map oldPartSpec, Map newPartSpec) { + super(); + this.dbName = dbName; + this.tableName = tableName; + this.oldPartSpec = new LinkedHashMap(oldPartSpec); + this.newPartSpec = new LinkedHashMap(newPartSpec); + } + + /** + * @return database name + */ + public String getDbName() { + return dbName; + } + + /** + * @param dbName + * database name + */ + public void setDbName(String dbName) { + this.dbName = dbName; + } + + /** + * @return the table we're going to add the partitions to. + */ + public String getTableName() { + return tableName; + } + + /** + * @param tableName + * the table we're going to add the partitions to. + */ + public void setTableName(String tableName) { + this.tableName = tableName; + } + + /** + * @return location of partition in relation to table + */ + public String getLocation() { + return location; + } + + /** + * @param location + * location of partition in relation to table + */ + public void setLocation(String location) { + this.location = location; + } + + /** + * @return old partition specification. + */ + public LinkedHashMap getOldPartSpec() { + return oldPartSpec; + } + + /** + * @param partSpec + * partition specification + */ + public void setOldPartSpec(LinkedHashMap partSpec) { + this.oldPartSpec = partSpec; + } + + /** + * @return new partition specification. + */ + public LinkedHashMap getNewPartSpec() { + return newPartSpec; + } + + /** + * @param partSpec + * partition specification + */ + public void setNewPartSpec(LinkedHashMap partSpec) { + this.newPartSpec = partSpec; + } +} Index: ql/src/java/org/apache/hadoop/hive/ql/plan/AlterTableDesc.java =================================================================== --- ql/src/java/org/apache/hadoop/hive/ql/plan/AlterTableDesc.java (revision 1145366) +++ ql/src/java/org/apache/hadoop/hive/ql/plan/AlterTableDesc.java (working copy) @@ -43,7 +43,7 @@ RENAME, ADDCOLS, REPLACECOLS, ADDPROPS, ADDSERDE, ADDSERDEPROPS, ADDFILEFORMAT, ADDCLUSTERSORTCOLUMN, RENAMECOLUMN, ADDPARTITION, TOUCH, ARCHIVE, UNARCHIVE, ALTERPROTECTMODE, ALTERPARTITIONPROTECTMODE, - ALTERLOCATION, DROPPARTITION + ALTERLOCATION, DROPPARTITION, RENAMEPARTITION }; public static enum ProtectModeType { Index: ql/src/java/org/apache/hadoop/hive/ql/parse/Hive.g =================================================================== --- ql/src/java/org/apache/hadoop/hive/ql/parse/Hive.g (revision 1145366) +++ ql/src/java/org/apache/hadoop/hive/ql/parse/Hive.g (working copy) @@ -119,6 +119,7 @@ TOK_ALTERTABLE_RENAME; TOK_ALTERTABLE_ADDCOLS; TOK_ALTERTABLE_RENAMECOL; +TOK_ALTERTABLE_RENAMEPART; TOK_ALTERTABLE_REPLACECOLS; TOK_ALTERTABLE_ADDPARTS; TOK_ALTERTABLE_DROPPARTS; @@ -556,6 +557,7 @@ : alterStatementSuffixRename | alterStatementSuffixAddCol | alterStatementSuffixRenameCol + | alterStatementSuffixRenamePart | alterStatementSuffixDropPartitions | alterStatementSuffixAddPartitions | alterStatementSuffixTouch @@ -629,6 +631,13 @@ : Identifier KW_CHANGE KW_COLUMN? oldName=Identifier newName=Identifier colType (KW_COMMENT comment=StringLiteral)? alterStatementChangeColPosition? ->^(TOK_ALTERTABLE_RENAMECOL Identifier $oldName $newName colType $comment? alterStatementChangeColPosition?) ; + +alterStatementSuffixRenamePart +@init { msgs.push("rename partition statement"); } +@after { msgs.pop(); } + : Identifier partitionSpec KW_RENAME KW_TO partitionSpec + ->^(TOK_ALTERTABLE_RENAMEPART Identifier (partitionSpec)+) + ; alterStatementChangeColPosition : first=KW_FIRST|KW_AFTER afterCol=Identifier Index: ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzerFactory.java =================================================================== --- ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzerFactory.java (revision 1145366) +++ ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzerFactory.java (working copy) @@ -49,6 +49,7 @@ commandType.put(HiveParser.TOK_ALTERTABLE_ADDCOLS, HiveOperation.ALTERTABLE_ADDCOLS); commandType.put(HiveParser.TOK_ALTERTABLE_REPLACECOLS, HiveOperation.ALTERTABLE_REPLACECOLS); commandType.put(HiveParser.TOK_ALTERTABLE_RENAMECOL, HiveOperation.ALTERTABLE_RENAMECOL); + commandType.put(HiveParser.TOK_ALTERTABLE_RENAMEPART, HiveOperation.ALTERTABLE_RENAMEPART); commandType.put(HiveParser.TOK_ALTERTABLE_RENAME, HiveOperation.ALTERTABLE_RENAME); commandType.put(HiveParser.TOK_ALTERTABLE_DROPPARTS, HiveOperation.ALTERTABLE_DROPPARTS); commandType.put(HiveParser.TOK_ALTERTABLE_ADDPARTS, HiveOperation.ALTERTABLE_ADDPARTS); @@ -135,6 +136,7 @@ case HiveParser.TOK_MSCK: case HiveParser.TOK_ALTERTABLE_ADDCOLS: case HiveParser.TOK_ALTERTABLE_RENAMECOL: + case HiveParser.TOK_ALTERTABLE_RENAMEPART: case HiveParser.TOK_ALTERTABLE_REPLACECOLS: case HiveParser.TOK_ALTERTABLE_RENAME: case HiveParser.TOK_ALTERTABLE_DROPPARTS: Index: ql/src/java/org/apache/hadoop/hive/ql/parse/DDLSemanticAnalyzer.java =================================================================== --- ql/src/java/org/apache/hadoop/hive/ql/parse/DDLSemanticAnalyzer.java (revision 1145366) +++ ql/src/java/org/apache/hadoop/hive/ql/parse/DDLSemanticAnalyzer.java (working copy) @@ -92,6 +92,7 @@ import org.apache.hadoop.hive.ql.plan.PrincipalDesc; import org.apache.hadoop.hive.ql.plan.PrivilegeDesc; import org.apache.hadoop.hive.ql.plan.PrivilegeObjectDesc; +import org.apache.hadoop.hive.ql.plan.RenamePartitionDesc; import org.apache.hadoop.hive.ql.plan.RevokeDesc; import org.apache.hadoop.hive.ql.plan.RoleDDLDesc; import org.apache.hadoop.hive.ql.plan.ShowDatabasesDesc; @@ -286,6 +287,9 @@ case HiveParser.TOK_ALTERTABLE_RENAMECOL: analyzeAlterTableRenameCol(ast); break; + case HiveParser.TOK_ALTERTABLE_RENAMEPART: + analyzeAlterTableRenamePart(ast); + break; case HiveParser.TOK_ALTERTABLE_ADDPARTS: analyzeAlterTableAddParts(ast, false); break; @@ -1688,6 +1692,32 @@ alterTblDesc), conf)); } + private void analyzeAlterTableRenamePart(ASTNode ast) throws SemanticException { + String tblName = getUnescapedName((ASTNode)ast.getChild(0)); + + List> partSpecs = getPartitionSpecs(ast); + + try { + Table tab = db.getTable(db.getCurrentDatabase(), tblName, false); + if (tab != null) { + inputs.add(new ReadEntity(tab)); + } + else { + throw new SemanticException(ErrorMsg.INVALID_TABLE.getMsg(tblName)); + } + } catch (HiveException e) { + throw new SemanticException(ErrorMsg.INVALID_TABLE.getMsg(tblName)); + } + if (partSpecs == null || partSpecs.size() != 2) { + throw new SemanticException("Isn't two partitions"); + } + addTablePartsOutputs(tblName, partSpecs); + RenamePartitionDesc renamePartitionDesc = new RenamePartitionDesc( + db.getCurrentDatabase(), tblName, partSpecs.get(0), partSpecs.get(1)); + rootTasks.add(TaskFactory.get(new DDLWork(getInputs(), getOutputs(), + renamePartitionDesc), conf)); + } + private void analyzeAlterTableModifyCols(ASTNode ast, AlterTableTypes alterType) throws SemanticException { String tblName = getUnescapedName((ASTNode)ast.getChild(0));