diff --git ql/src/java/org/apache/hadoop/hive/ql/metadata/PartitionTree.java ql/src/java/org/apache/hadoop/hive/ql/metadata/PartitionTree.java index c84c3ef595..ffe6831d2f 100644 --- ql/src/java/org/apache/hadoop/hive/ql/metadata/PartitionTree.java +++ ql/src/java/org/apache/hadoop/hive/ql/metadata/PartitionTree.java @@ -18,6 +18,7 @@ package org.apache.hadoop.hive.ql.metadata; import org.apache.hadoop.hive.metastore.api.AlreadyExistsException; +import org.apache.hadoop.hive.metastore.api.InvalidOperationException; import org.apache.hadoop.hive.metastore.api.MetaException; import org.apache.hadoop.hive.metastore.api.NoSuchObjectException; import org.apache.hadoop.hive.metastore.api.Partition; @@ -148,4 +149,78 @@ Partition dropPartition(List partVals) throws MetaException, NoSuchObjec } return parts.remove(partName); } + + /** + * Alter an existing partition. The flow is following: + *

+ * 1) search for existing partition + * 2) if found delete it + * 3) insert new partition + *

+ * @param oldPartitionVals the values of existing partition, which is altered, must be not null. + * @param newPartition the new partition, must be not null. + * @param isRename true, if rename is requested, meaning that all properties of partition can be changed, except + * of its location. + * @throws MetaException + * @throws InvalidOperationException + * @throws NoSuchObjectException + * @throws AlreadyExistsException + */ + void alterPartition(List oldPartitionVals, Partition newPartition, boolean isRename) + throws MetaException, InvalidOperationException, NoSuchObjectException, AlreadyExistsException { + if (oldPartitionVals == null || oldPartitionVals.isEmpty()) { + throw new InvalidOperationException("Old partition values cannot be null or empty."); + } + if (newPartition == null) { + throw new InvalidOperationException("New partition cannot be null."); + } + Partition oldPartition = getPartition(oldPartitionVals); + if (oldPartition == null) { + throw new InvalidOperationException( + "Partition with partition values " + Arrays.toString(oldPartitionVals.toArray()) + " is not found."); + } + if (!oldPartition.getDbName().equals(newPartition.getDbName())) { + throw new MetaException("Db name cannot be altered."); + } + if (!oldPartition.getTableName().equals(newPartition.getTableName())) { + throw new MetaException("Table name cannot be altered."); + } + if (isRename) { + newPartition.getSd().setLocation(oldPartition.getSd().getLocation()); + } + if (dropPartition(oldPartitionVals) == null) { + throw new MetaException("Unable to alter partition " + newPartition.toString()); + } + + addPartition(newPartition, makePartName(tTable.getPartitionKeys(), newPartition.getValues()), false); + } + + /** + * Alter multiple partitions. + * @param newParts list of new partitions, must be not null. + * @throws MetaException + * @throws NoSuchObjectException + * @throws InvalidOperationException + * @throws AlreadyExistsException + */ + void alterPartitions(List newParts) + throws MetaException, NoSuchObjectException, InvalidOperationException, AlreadyExistsException { + for (Partition partition : newParts) { + alterPartition(partition.getValues(), partition, false); + } + } + + /** + * Rename an existing partition. + * @param oldPartitionVals the values of existing partition, which is renamed, must be not null. + * @param newPart the new partition, must be not null. + * @throws MetaException + * @throws NoSuchObjectException + * @throws InvalidOperationException + * @throws AlreadyExistsException + */ + void renamePartition(List oldPartitionVals, Partition newPart) + throws MetaException, NoSuchObjectException, InvalidOperationException, AlreadyExistsException { + alterPartition(oldPartitionVals, newPart, true); + } } diff --git ql/src/java/org/apache/hadoop/hive/ql/metadata/SessionHiveMetaStoreClient.java ql/src/java/org/apache/hadoop/hive/ql/metadata/SessionHiveMetaStoreClient.java index a5b16d12a5..28509bed43 100644 --- ql/src/java/org/apache/hadoop/hive/ql/metadata/SessionHiveMetaStoreClient.java +++ ql/src/java/org/apache/hadoop/hive/ql/metadata/SessionHiveMetaStoreClient.java @@ -82,11 +82,13 @@ import static org.apache.hadoop.hive.metastore.Warehouse.getCatalogQualifiedTableName; import static org.apache.hadoop.hive.metastore.Warehouse.makePartName; import static org.apache.hadoop.hive.metastore.Warehouse.DEFAULT_CATALOG_NAME; +import static org.apache.hadoop.hive.metastore.Warehouse.makeSpecFromName; import static org.apache.hadoop.hive.metastore.utils.MetaStoreUtils.compareFieldColumns; import static org.apache.hadoop.hive.metastore.utils.MetaStoreUtils.getColumnNamesForTable; import static org.apache.hadoop.hive.metastore.utils.MetaStoreUtils.getDefaultCatalog; import static org.apache.hadoop.hive.metastore.utils.MetaStoreUtils.getPvals; import static org.apache.hadoop.hive.metastore.utils.MetaStoreUtils.isExternalTable; +import static org.apache.hadoop.hive.metastore.utils.MetaStoreUtils.makePartNameMatcher; /** * todo: This need review re: thread safety. Various places (see callsers of @@ -1341,6 +1343,49 @@ public Partition exchange_partition(Map partitionSpecs, String s throw new MetaException("Exchanging partitions between temporary and non-temporary tables is not supported."); } + @Override + public void alter_partition(String catName, String dbName, String tblName, Partition newPart, + EnvironmentContext environmentContext) throws TException { + alter_partition(catName, dbName, tblName, newPart, environmentContext, null); + } + + @Override + public void alter_partition(String catName, String dbName, String tblName, Partition newPart, + EnvironmentContext environmentContext, String writeIdList) + throws TException { + org.apache.hadoop.hive.metastore.api.Table table = getTempTable(dbName, tblName); + if (table == null) { + super.alter_partition(catName, dbName, tblName, newPart, environmentContext, writeIdList); + return; + } + TempTable tt = getPartitionedTempTable(table); + tt.alterPartition(newPart); + } + + @Override + public void alter_partitions(String catName, String dbName, String tblName, List newParts, + EnvironmentContext environmentContext, String writeIdList, long writeId) throws TException { + org.apache.hadoop.hive.metastore.api.Table table = getTempTable(dbName, tblName); + if (table == null) { + super.alter_partitions(catName, dbName, tblName, newParts, environmentContext, writeIdList, writeId); + return; + } + TempTable tt = getPartitionedTempTable(table); + tt.alterPartitions(newParts); + } + + @Override + public void renamePartition(String catName, String dbname, String tableName, List partitionVals, + Partition newPart, String validWriteIds) throws TException { + org.apache.hadoop.hive.metastore.api.Table table = getTempTable(dbname, tableName); + if (table == null) { + super.renamePartition(catName, dbname, tableName, partitionVals, newPart, validWriteIds); + return; + } + TempTable tt = getPartitionedTempTable(table); + tt.renamePartition(partitionVals, newPart); + } + private List exchangePartitions(Map partitionSpecs, org.apache.hadoop.hive.metastore.api.Table sourceTable, TempTable sourceTempTable, org.apache.hadoop.hive.metastore.api.Table destTable, TempTable destTempTable) throws TException { diff --git ql/src/java/org/apache/hadoop/hive/ql/metadata/TempTable.java ql/src/java/org/apache/hadoop/hive/ql/metadata/TempTable.java index fa6dddcbad..2569a1bc8c 100644 --- ql/src/java/org/apache/hadoop/hive/ql/metadata/TempTable.java +++ ql/src/java/org/apache/hadoop/hive/ql/metadata/TempTable.java @@ -19,6 +19,7 @@ import org.apache.hadoop.hive.metastore.api.AlreadyExistsException; import org.apache.hadoop.hive.metastore.api.FieldSchema; +import org.apache.hadoop.hive.metastore.api.InvalidOperationException; import org.apache.hadoop.hive.metastore.api.MetaException; import org.apache.hadoop.hive.metastore.api.NoSuchObjectException; import org.apache.hadoop.hive.metastore.api.Partition; @@ -177,4 +178,19 @@ Partition dropPartition(String partitionName) throws MetaException, NoSuchObject return pTree.dropPartition(pVals); } + void alterPartition(Partition partition) + throws MetaException, NoSuchObjectException, InvalidOperationException, AlreadyExistsException { + pTree.alterPartition(partition.getValues(), partition, false); + } + + void alterPartitions(List newParts) + throws MetaException, NoSuchObjectException, InvalidOperationException, AlreadyExistsException { + pTree.alterPartitions(newParts); + } + + void renamePartition(List partitionVals, Partition newPart) + throws MetaException, NoSuchObjectException, InvalidOperationException, AlreadyExistsException { + pTree.renamePartition(partitionVals, newPart); + } + } diff --git ql/src/test/org/apache/hadoop/hive/ql/metadata/TestSessionHiveMetastoreClientAlterPartitionsTempTable.java ql/src/test/org/apache/hadoop/hive/ql/metadata/TestSessionHiveMetastoreClientAlterPartitionsTempTable.java new file mode 100644 index 0000000000..965b9e7f5d --- /dev/null +++ ql/src/test/org/apache/hadoop/hive/ql/metadata/TestSessionHiveMetastoreClientAlterPartitionsTempTable.java @@ -0,0 +1,203 @@ +package org.apache.hadoop.hive.ql.metadata; + +import org.apache.hadoop.hive.conf.HiveConf; +import org.apache.hadoop.hive.metastore.IMetaStoreClient; +import org.apache.hadoop.hive.metastore.MetaStoreTestUtils; +import org.apache.hadoop.hive.metastore.annotation.MetastoreCheckinTest; +import org.apache.hadoop.hive.metastore.api.*; +import org.apache.hadoop.hive.metastore.api.Partition; +import org.apache.hadoop.hive.metastore.api.Table; +import org.apache.hadoop.hive.metastore.client.CustomIgnoreRule; +import org.apache.hadoop.hive.metastore.client.TestAlterPartitions; +import org.apache.hadoop.hive.metastore.client.builder.PartitionBuilder; +import org.apache.hadoop.hive.metastore.client.builder.TableBuilder; +import org.apache.hadoop.hive.metastore.minihms.AbstractMetaStoreService; +import org.apache.hadoop.hive.ql.session.SessionState; +import org.apache.thrift.TException; +import org.junit.Assert; +import org.junit.Before; +import org.junit.Test; +import org.junit.experimental.categories.Category; +import org.junit.runner.RunWith; +import org.junit.runners.Parameterized; + +import java.util.ArrayList; +import java.util.Arrays; +import java.util.Collections; +import java.util.List; + +import static java.util.stream.Collectors.joining; +import static org.apache.hadoop.hive.metastore.Warehouse.DEFAULT_DATABASE_NAME; +import static org.junit.Assert.*; + +/** + * Test class for alter/rename partitions related methods on temporary tables. + */ +@RunWith(Parameterized.class) +@Category(MetastoreCheckinTest.class) +public class TestSessionHiveMetastoreClientAlterPartitionsTempTable extends TestAlterPartitions { + + private HiveConf conf; + + private static final String PART_PRIV = "PARTITION_LEVEL_PRIVILEGE"; + + public TestSessionHiveMetastoreClientAlterPartitionsTempTable(String name, AbstractMetaStoreService metaStore) { + super(name, metaStore); + ignoreRule = new CustomIgnoreRule(); + } + + @Before + public void setUp() throws Exception { + initHiveConf(); + SessionState.start(conf); + setClient(Hive.get(conf).getMSC()); + cleanDB(); + createDB(DB_NAME); + } + + private void initHiveConf() throws HiveException { + conf = Hive.get().getConf(); + conf.setBoolVar(HiveConf.ConfVars.METASTORE_FASTPATH, true); + } + + @Override + protected Table createTestTable(IMetaStoreClient client, String dbName, String tableName, + List partCols, boolean setPartitionLevelPrivilages) + throws Exception { + TableBuilder builder = new TableBuilder() + .setDbName(dbName) + .setTableName(tableName) + .addCol("id", "int") + .addCol("name", "string") + .setTemporary(true); + + partCols.forEach(col -> builder.addPartCol(col, "string")); + Table table = builder.build(getMetaStore().getConf()); + + if (setPartitionLevelPrivilages) { + table.putToParameters(PART_PRIV, "true"); + } + + client.createTable(table); + return table; + } + + @Override + protected void addPartition(IMetaStoreClient client, Table table, List values) + throws TException { + PartitionBuilder builder = new PartitionBuilder().inTable(table); + values.forEach(builder::addValue); + Partition partition = builder.build(conf); + getClient().add_partition(partition); + } + + @Override + protected void assertPartitionUnchanged(Partition partition, List testValues, + List partCols) throws Exception { + assertFalse(partition.getParameters().containsKey("hmsTestParam001")); + + List expectedKVPairs = new ArrayList<>(); + for (int i = 0; i < partCols.size(); ++i) { + expectedKVPairs.add(partCols.get(i) + "=" + testValues.get(i)); + } + Table table = getClient().getTable(partition.getDbName(), partition.getTableName()); + String partPath = expectedKVPairs.stream().collect(joining("/")); + assertTrue(partition.getSd().getLocation().equals(table.getSd().getLocation() + "/" + partPath)); + assertEquals(2, partition.getSd().getCols().size()); + } + + @Override + protected void assertPartitionChanged(Partition partition, List testValues, + List partCols) throws Exception { + assertEquals("testValue001", partition.getParameters().get("hmsTestParam001")); + + List expectedKVPairs = new ArrayList<>(); + for (int i = 0; i < partCols.size(); ++i) { + expectedKVPairs.add(partCols.get(i) + "=" + testValues.get(i)); + } + Table table = getClient().getTable(partition.getDbName(), partition.getTableName()); + String partPath = expectedKVPairs.stream().collect(joining("/")); + assertTrue(partition.getSd().getLocation().equals(table.getSd().getLocation() + + "/" + partPath + "/hh=01")); + assertEquals(NEW_CREATE_TIME, partition.getCreateTime()); + assertEquals(NEW_CREATE_TIME, partition.getLastAccessTime()); + assertEquals(3, partition.getSd().getCols().size()); + } + + @Override + @SuppressWarnings("deprecation") + @Test + public void deprecatedCalls() throws TException { + String tableName = "deprecated_table"; + Table table = new TableBuilder() + .setTableName(tableName) + .addCol("id", "int") + .addCol("name", "string") + .addPartCol("partcol", "string") + .setTemporary(true) + .create(getClient(), conf); + Partition[] parts = new Partition[5]; + for (int i = 0; i < 5; i++) { + parts[i] = new PartitionBuilder() + .inTable(table) + .addValue("a" + i) + .setLocation(MetaStoreTestUtils.getTestWarehouseDir("a" + i)) + .build(conf); + } + getClient().add_partitions(Arrays.asList(parts)); + + Partition newPart = + getClient().getPartition(DEFAULT_DATABASE_NAME, tableName, Collections.singletonList("a0")); + newPart.getParameters().put("test_key", "test_value"); + getClient().alter_partition(DEFAULT_DATABASE_NAME, tableName, newPart); + + Partition fetched = + getClient().getPartition(DEFAULT_DATABASE_NAME, tableName, Collections.singletonList("a0")); + Assert.assertEquals("test_value", fetched.getParameters().get("test_key")); + + newPart = + getClient().getPartition(DEFAULT_DATABASE_NAME, tableName, Collections.singletonList("a1")); + newPart.setLastAccessTime(3); + Partition newPart1 = + getClient().getPartition(DEFAULT_DATABASE_NAME, tableName, Collections.singletonList("a2")); + newPart1.getSd().setLocation("somewhere"); + getClient().alter_partitions(DEFAULT_DATABASE_NAME, tableName, Arrays.asList(newPart, newPart1)); + fetched = + getClient().getPartition(DEFAULT_DATABASE_NAME, tableName, Collections.singletonList("a1")); + Assert.assertEquals(3L, fetched.getLastAccessTime()); + fetched = + getClient().getPartition(DEFAULT_DATABASE_NAME, tableName, Collections.singletonList("a2")); + Assert.assertTrue(fetched.getSd().getLocation().contains("somewhere")); + + newPart = + getClient().getPartition(DEFAULT_DATABASE_NAME, tableName, Collections.singletonList("a3")); + newPart.setValues(Collections.singletonList("b3")); + getClient().renamePartition(DEFAULT_DATABASE_NAME, tableName, Collections.singletonList("a3"), newPart); + fetched = + getClient().getPartition(DEFAULT_DATABASE_NAME, tableName, Collections.singletonList("b3")); + Assert.assertEquals(1, fetched.getValuesSize()); + Assert.assertEquals("b3", fetched.getValues().get(0)); + + newPart = + getClient().getPartition(DEFAULT_DATABASE_NAME, tableName, Collections.singletonList("a4")); + newPart.getParameters().put("test_key", "test_value"); + EnvironmentContext ec = new EnvironmentContext(); + ec.setProperties(Collections.singletonMap("a", "b")); + getClient().alter_partition(DEFAULT_DATABASE_NAME, tableName, newPart, ec); + fetched = + getClient().getPartition(DEFAULT_DATABASE_NAME, tableName, Collections.singletonList("a4")); + Assert.assertEquals("test_value", fetched.getParameters().get("test_key")); + } + + @Override + @Test(expected = AlreadyExistsException.class) + public void testRenamePartitionTargetAlreadyExisting() throws Exception { + super.testRenamePartitionTargetAlreadyExisting(); + } + + @Override + @Test(expected = InvalidOperationException.class) + public void testRenamePartitionNullNewPart() throws Exception { + super.testRenamePartitionNullNewPart(); + } +} diff --git standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/client/TestAlterPartitions.java standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/client/TestAlterPartitions.java index 4fc3688f2e..020f3382e1 100644 --- standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/client/TestAlterPartitions.java +++ standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/client/TestAlterPartitions.java @@ -18,7 +18,6 @@ package org.apache.hadoop.hive.metastore.client; -import java.net.ProtocolException; import java.util.ArrayList; import java.util.Arrays; import java.util.Collections; @@ -48,7 +47,6 @@ import com.google.common.collect.Lists; import org.junit.After; -import org.junit.AfterClass; import org.junit.Assert; import org.junit.Before; import org.junit.Test; @@ -70,11 +68,11 @@ @RunWith(Parameterized.class) @Category(MetastoreCheckinTest.class) public class TestAlterPartitions extends MetaStoreClientTest { - private static final int NEW_CREATE_TIME = 123456789; + protected static final int NEW_CREATE_TIME = 123456789; private AbstractMetaStoreService metaStore; private IMetaStoreClient client; - private static final String DB_NAME = "testpartdb"; + protected static final String DB_NAME = "testpartdb"; private static final String TABLE_NAME = "testparttable"; private static final List PARTCOL_SCHEMA = Lists.newArrayList("yyyy", "mm", "dd"); @@ -88,8 +86,7 @@ public void setUp() throws Exception { client = metaStore.getClient(); // Clean up the database - client.dropDatabase(DB_NAME, true, true, true); - metaStore.cleanWarehouseDirs(); + cleanDB(); createDB(DB_NAME); } @@ -108,13 +105,34 @@ public void tearDown() throws Exception { } } - private void createDB(String dbName) throws TException { + public AbstractMetaStoreService getMetaStore() { + return metaStore; + } + + public void setMetaStore(AbstractMetaStoreService metaStore) { + this.metaStore = metaStore; + } + + protected IMetaStoreClient getClient() { + return client; + } + + protected void setClient(IMetaStoreClient client) { + this.client = client; + } + + protected void cleanDB() throws Exception{ + client.dropDatabase(DB_NAME, true, true, true); + metaStore.cleanWarehouseDirs(); + } + + protected void createDB(String dbName) throws TException { new DatabaseBuilder(). setName(dbName). create(client, metaStore.getConf()); } - private Table createTestTable(IMetaStoreClient client, String dbName, String tableName, + protected Table createTestTable(IMetaStoreClient client, String dbName, String tableName, List partCols, boolean setPartitionLevelPrivilages) throws Exception { TableBuilder builder = new TableBuilder() @@ -134,7 +152,7 @@ private Table createTestTable(IMetaStoreClient client, String dbName, String tab return table; } - private void addPartition(IMetaStoreClient client, Table table, List values) + protected void addPartition(IMetaStoreClient client, Table table, List values) throws TException { PartitionBuilder partitionBuilder = new PartitionBuilder().inTable(table); values.forEach(val -> partitionBuilder.addValue(val)); @@ -173,8 +191,8 @@ private static void makeTestChangesOnPartition(Partition partition) { partition.getSd().getCols().add(new FieldSchema("newcol", "string", "")); } - private void assertPartitionUnchanged(Partition partition, List testValues, - List partCols) throws MetaException { + protected void assertPartitionUnchanged(Partition partition, List testValues, + List partCols) throws Exception { assertFalse(partition.getParameters().containsKey("hmsTestParam001")); List expectedKVPairs = new ArrayList<>(); @@ -189,8 +207,8 @@ private void assertPartitionUnchanged(Partition partition, List testValu assertEquals(2, partition.getSd().getCols().size()); } - private void assertPartitionChanged(Partition partition, List testValues, - List partCols) throws MetaException { + protected void assertPartitionChanged(Partition partition, List testValues, + List partCols) throws Exception { assertEquals("testValue001", partition.getParameters().get("hmsTestParam001")); List expectedKVPairs = new ArrayList<>(); @@ -230,6 +248,7 @@ public void testAlterPartition() throws Exception { } @Test + @ConditionalIgnoreOnSessionHiveMetastoreClient public void otherCatalog() throws TException { String catName = "alter_partition_catalog"; Catalog cat = new CatalogBuilder() @@ -397,6 +416,7 @@ public void testAlterPartitionMissingPartitionVals() throws Exception { } @Test(expected = InvalidOperationException.class) + @ConditionalIgnoreOnSessionHiveMetastoreClient public void testAlterPartitionBogusCatalogName() throws Exception { createTable4PartColsParts(client); List partitions = client.listPartitions(DB_NAME, TABLE_NAME, (short)-1); @@ -683,6 +703,7 @@ public void testAlterPartitionsMissingPartitionVals() throws Exception { } @Test(expected = InvalidOperationException.class) + @ConditionalIgnoreOnSessionHiveMetastoreClient public void testAlterPartitionsBogusCatalogName() throws Exception { createTable4PartColsParts(client); Partition part = client.listPartitions(DB_NAME, TABLE_NAME, (short)-1).get(0); @@ -859,6 +880,7 @@ public void testAlterPartitionsWithEnvironmentCtxMissingPartitionVals() throws E } @Test(expected = InvalidOperationException.class) + @ConditionalIgnoreOnSessionHiveMetastoreClient public void testAlterPartitionsWithEnvironmentCtxBogusCatalogName() throws Exception { createTable4PartColsParts(client); Partition part = client.listPartitions(DB_NAME, TABLE_NAME, (short)-1).get(0); @@ -1078,6 +1100,7 @@ public void testRenamePartitionNullNewPart() throws Exception { } @Test(expected = InvalidOperationException.class) + @ConditionalIgnoreOnSessionHiveMetastoreClient public void testRenamePartitionBogusCatalogName() throws Exception { List> oldValues = createTable4PartColsParts(client); List oldParts = client.listPartitions(DB_NAME, TABLE_NAME, (short)-1);