diff --git a/hcatalog/webhcat/java-client/src/main/java/org/apache/hive/hcatalog/api/HCatClient.java b/hcatalog/webhcat/java-client/src/main/java/org/apache/hive/hcatalog/api/HCatClient.java index edabbef..23c51a2 100644 --- a/hcatalog/webhcat/java-client/src/main/java/org/apache/hive/hcatalog/api/HCatClient.java +++ b/hcatalog/webhcat/java-client/src/main/java/org/apache/hive/hcatalog/api/HCatClient.java @@ -376,6 +376,14 @@ public abstract int addPartitionSpec(HCatPartitionSpec partitionSpec) throws HCatException; /** + * Updates a list of existing partitions. + * @param partitions A list of existing partitions. All partitions must be for the same table. + * @throws HCatException On failure to update partitions. + */ + public abstract void updatePartitions(List partitions) + throws HCatException; + + /** * Drops partition(s) that match the specified (and possibly partial) partition specification. * A partial partition-specification is one where not all partition-keys have associated values. For example, * for a table ('myDb.myTable') with 2 partition keys (dt string, region string), diff --git a/hcatalog/webhcat/java-client/src/main/java/org/apache/hive/hcatalog/api/HCatClientHMSImpl.java b/hcatalog/webhcat/java-client/src/main/java/org/apache/hive/hcatalog/api/HCatClientHMSImpl.java index 4ab497e..59015e6 100644 --- a/hcatalog/webhcat/java-client/src/main/java/org/apache/hive/hcatalog/api/HCatClientHMSImpl.java +++ b/hcatalog/webhcat/java-client/src/main/java/org/apache/hive/hcatalog/api/HCatClientHMSImpl.java @@ -39,6 +39,7 @@ import org.apache.hadoop.hive.metastore.api.AlreadyExistsException; import org.apache.hadoop.hive.metastore.api.CurrentNotificationEventId; import org.apache.hadoop.hive.metastore.api.Database; +import org.apache.hadoop.hive.metastore.api.EnvironmentContext; import org.apache.hadoop.hive.metastore.api.FieldSchema; import org.apache.hadoop.hive.metastore.api.InvalidObjectException; import org.apache.hadoop.hive.metastore.api.InvalidOperationException; @@ -510,6 +511,46 @@ public void addPartition(HCatAddPartitionDesc partInfo) } } + @Override + public void updatePartitions(List partitions) throws HCatException { + String dbName = null; + String tableName = null; + + List hivePartitions = new ArrayList<>(); + for (HCatPartition partition : partitions) { + + String partitionDbName = partition.getDatabaseName(); + if (partitionDbName == null) { + throw new HCatException("Database name must not be null."); + } else if (dbName == null) { + dbName = partitionDbName; + } else if (!dbName.equals(partitionDbName)) { + throw new HCatException("Invalid database name: " + partitionDbName + ", expected: " + dbName); + } + + String partitionTableName = partition.getTableName(); + if (partitionTableName == null) { + throw new HCatException("Table name must not be null."); + } else if (tableName == null) { + tableName = partitionTableName; + } else if (!tableName.equals(partitionTableName)) { + throw new HCatException("Invalid table name: " + partitionTableName + ", expected: " + tableName); + } + + hivePartitions.add(partition.toHivePartition()); + } + + try { + hmsClient.alter_partitions(dbName, tableName, hivePartitions, null); + } catch (InvalidOperationException e) { + throw new HCatException("InvalidObjectException while updating partitions.", e); + } catch (MetaException e) { + throw new HCatException("MetaException while updating partitions.", e); + } catch (TException e) { + throw new HCatException("TException while updating partitions.", e); + } + } + /** * Helper class to help build ExprDesc tree to represent the partitions to be dropped. * Note: At present, the ExpressionBuilder only constructs partition predicates where diff --git a/hcatalog/webhcat/java-client/src/test/java/org/apache/hive/hcatalog/api/TestHCatClient.java b/hcatalog/webhcat/java-client/src/test/java/org/apache/hive/hcatalog/api/TestHCatClient.java index 48ee7cf..259a575 100644 --- a/hcatalog/webhcat/java-client/src/test/java/org/apache/hive/hcatalog/api/TestHCatClient.java +++ b/hcatalog/webhcat/java-client/src/test/java/org/apache/hive/hcatalog/api/TestHCatClient.java @@ -37,6 +37,7 @@ import org.apache.hadoop.hive.metastore.IMetaStoreClient; import org.apache.hadoop.hive.metastore.MetaStoreUtils; import org.apache.hadoop.hive.metastore.Warehouse; +import org.apache.hadoop.hive.metastore.api.InvalidOperationException; import org.apache.hadoop.hive.metastore.api.MetaException; import org.apache.hadoop.hive.metastore.api.NotificationEvent; import org.apache.hadoop.hive.metastore.api.PartitionEventType; @@ -70,6 +71,10 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; +import static java.util.Arrays.asList; +import static java.util.Collections.singletonList; +import static java.util.Collections.singletonMap; +import static org.apache.hadoop.hive.serde2.typeinfo.TypeInfoFactory.stringTypeInfo; import static org.junit.Assert.assertNotNull; import static org.junit.Assert.fail; import static org.junit.Assert.assertEquals; @@ -1333,5 +1338,154 @@ public void testPartitionSpecRegistrationWithCustomSchema() throws Exception { assertTrue("Unexpected exception! " + unexpected.getMessage(), false); } } + + @Test + public void testUpdatePartitions() throws Exception { + startReplicationTargetMetaStoreIfRequired(); + + HCatClient metaStore = HCatClient.create(new Configuration(hcatConf)); + + String dbName = "test_db"; + String tableName = "test_table"; + + metaStore.dropDatabase(dbName, true, HCatClient.DropDBMode.CASCADE); + metaStore.createDatabase(HCatCreateDBDesc.create(dbName).build()); + + List columnSchema = singletonList(new HCatFieldSchema("foo", stringTypeInfo, null)); + List partitionSchema = singletonList(new HCatFieldSchema("bar", stringTypeInfo, null)); + + HCatTable table = new HCatTable(dbName, tableName).cols(columnSchema).partCols(partitionSchema); + metaStore.createTable(HCatCreateTableDesc.create(table).build()); + table = metaStore.getTable(dbName, tableName); + + // Add a single partition with an old location + Map partitionSpec1 = singletonMap("bar", "p1"); + String location1 = table.getLocation() + "/p1_old"; + HCatPartition partition1 = new HCatPartition(table, partitionSpec1, location1); + metaStore.addPartition(HCatAddPartitionDesc.create(partition1).build()); + + // Update the location + location1 = table.getLocation() + "/p1_new"; + partition1 = new HCatPartition(table, partitionSpec1, location1); + metaStore.updatePartitions(singletonList(partition1)); + + // Assert location was updated + partition1 = metaStore.getPartition(dbName, tableName, partitionSpec1); + assertEquals(location1, partition1.getLocation()); + + // Attempt to update a partition that doesn't exist + Map partitionSpec2 = singletonMap("bar", "p2"); + String location2 = table.getLocation() + "/p2"; + HCatPartition partition2 = new HCatPartition(table, partitionSpec2, location2); + try { + metaStore.updatePartitions(singletonList(partition2)); + } catch (HCatException e) { + assertTrue(e.getCause() instanceof InvalidOperationException); + return; + } + fail("Expected HCatException"); + } + + @Test + public void testUpdatePartitionsFromDifferentTable() throws Exception { + startReplicationTargetMetaStoreIfRequired(); + + HCatClient metaStore = HCatClient.create(new Configuration(hcatConf)); + + String dbName = "test_db"; + String tableName1 = "test_table1"; + String tableName2 = "test_table2"; + + List columnSchema = singletonList(new HCatFieldSchema("foo", stringTypeInfo, null)); + List partitionSchema = singletonList(new HCatFieldSchema("bar", stringTypeInfo, null)); + + HCatTable table1 = new HCatTable(dbName, tableName1).cols(columnSchema).partCols(partitionSchema); + HCatTable table2 = new HCatTable(dbName, tableName2).cols(columnSchema).partCols(partitionSchema); + + HCatPartition partition1 = new HCatPartition(table1, singletonMap("bar", "p1"), "p1"); + HCatPartition partition2 = new HCatPartition(table2, singletonMap("bar", "p2"), "p2"); + + try { + metaStore.updatePartitions(asList(partition1, partition2)); + } catch (HCatException e) { + return; + } + fail("Expected HCatException"); + } + + @Test + public void testUpdatePartitionsFromDifferentDatabase() throws Exception { + startReplicationTargetMetaStoreIfRequired(); + + HCatClient metaStore = HCatClient.create(new Configuration(hcatConf)); + + String dbName1 = "test_db1"; + String dbName2 = "test_db2"; + String tableName = "test_table"; + + List columnSchema = singletonList(new HCatFieldSchema("foo", stringTypeInfo, null)); + List partitionSchema = singletonList(new HCatFieldSchema("bar", stringTypeInfo, null)); + + HCatTable table1 = new HCatTable(dbName1, tableName).cols(columnSchema).partCols(partitionSchema); + HCatTable table2 = new HCatTable(dbName2, tableName).cols(columnSchema).partCols(partitionSchema); + + HCatPartition partition1 = new HCatPartition(table1, singletonMap("bar", "p1"), "p1"); + HCatPartition partition2 = new HCatPartition(table2, singletonMap("bar", "p2"), "p2"); + + try { + metaStore.updatePartitions(asList(partition1, partition2)); + } catch (HCatException e) { + return; + } + fail("Expected HCatException"); + } + + @Test + public void testUpdatePartitionsNullTableName() throws Exception { + startReplicationTargetMetaStoreIfRequired(); + + HCatClient metaStore = HCatClient.create(new Configuration(hcatConf)); + + String dbName = "test_db"; + String tableName = null; + + List columnSchema = singletonList(new HCatFieldSchema("foo", stringTypeInfo, null)); + List partitionSchema = singletonList(new HCatFieldSchema("bar", stringTypeInfo, null)); + + HCatTable table = new HCatTable(dbName, tableName).cols(columnSchema).partCols(partitionSchema); + + HCatPartition partition = new HCatPartition(table, singletonMap("bar", "p1"), "p1"); + + try { + metaStore.updatePartitions(asList(partition)); + } catch (HCatException e) { + return; + } + fail("Expected HCatException"); + } + + @Test + public void testUpdatePartitionsNullDatabaseName() throws Exception { + startReplicationTargetMetaStoreIfRequired(); + + HCatClient metaStore = HCatClient.create(new Configuration(hcatConf)); + + String dbName = null; + String tableName = "test_table"; + + List columnSchema = singletonList(new HCatFieldSchema("foo", stringTypeInfo, null)); + List partitionSchema = singletonList(new HCatFieldSchema("bar", stringTypeInfo, null)); + + HCatTable table = new HCatTable(dbName, tableName).cols(columnSchema).partCols(partitionSchema); + + HCatPartition partition = new HCatPartition(table, singletonMap("bar", "p1"), "p1"); + + try { + metaStore.updatePartitions(asList(partition)); + } catch (HCatException e) { + return; + } + fail("Expected HCatException"); + } }