diff --git ql/src/java/org/apache/hadoop/hive/ql/metadata/SessionHiveMetaStoreClient.java ql/src/java/org/apache/hadoop/hive/ql/metadata/SessionHiveMetaStoreClient.java index 2467ee3cfb..db3d9dbca8 100644 --- ql/src/java/org/apache/hadoop/hive/ql/metadata/SessionHiveMetaStoreClient.java +++ ql/src/java/org/apache/hadoop/hive/ql/metadata/SessionHiveMetaStoreClient.java @@ -19,6 +19,7 @@ package org.apache.hadoop.hive.ql.metadata; import java.io.IOException; + import java.util.ArrayList; import java.util.Arrays; import java.util.Collections; @@ -68,6 +69,7 @@ import org.apache.hadoop.hive.metastore.api.TableMeta; import org.apache.hadoop.hive.metastore.api.UnknownDBException; import org.apache.hadoop.hive.metastore.api.UnknownTableException; +import org.apache.hadoop.hive.metastore.client.builder.PartitionBuilder; import org.apache.hadoop.hive.metastore.partition.spec.PartitionSpecProxy; import org.apache.hadoop.hive.ql.parse.SemanticAnalyzer; import org.apache.hadoop.hive.metastore.utils.SecurityUtils; @@ -82,6 +84,7 @@ import static org.apache.hadoop.hive.metastore.Warehouse.getCatalogQualifiedTableName; import static org.apache.hadoop.hive.metastore.Warehouse.makePartName; import static org.apache.hadoop.hive.metastore.Warehouse.DEFAULT_CATALOG_NAME; +import static org.apache.hadoop.hive.metastore.Warehouse.makeSpecFromName; import static org.apache.hadoop.hive.metastore.utils.MetaStoreUtils.compareFieldColumns; import static org.apache.hadoop.hive.metastore.utils.MetaStoreUtils.getColumnNamesForTable; import static org.apache.hadoop.hive.metastore.utils.MetaStoreUtils.getDefaultCatalog; @@ -1384,6 +1387,61 @@ public void renamePartition(String catName, String dbname, String tableName, Lis tt.renamePartition(partitionVals, newPart); } + @Override + public Partition appendPartition(String catName, String dbName, String tableName, List partVals) + throws TException { + org.apache.hadoop.hive.metastore.api.Table table = getTempTable(dbName, tableName); + if (table == null) { + return super.appendPartition(catName, dbName, tableName, partVals); + } + if (partVals == null || partVals.isEmpty()) { + throw new MetaException("The partition values must be not null or empty."); + } + assertTempTablePartitioned(table); + Partition partition = new PartitionBuilder().inTable(table).setValues(partVals).build(conf); + return appendPartitionToTempTable(table, partition); + } + + @Override + public Partition appendPartition(String catName, String dbName, String tableName, String partitionName) + throws TException { + org.apache.hadoop.hive.metastore.api.Table table = getTempTable(dbName, tableName); + if (table == null) { + return super.appendPartition(catName, dbName, tableName, partitionName); + } + if (partitionName == null || partitionName.isEmpty()) { + throw new MetaException("The partition must be not null or empty."); + } + assertTempTablePartitioned(table); + Map specFromName = makeSpecFromName(partitionName); + if (specFromName == null || specFromName.isEmpty()) { + throw new InvalidObjectException("Invalid partition name " + partitionName); + } + List pVals = new ArrayList<>(); + for (FieldSchema field : table.getPartitionKeys()) { + String val = specFromName.get(field.getName()); + if (val == null) { + throw new InvalidObjectException("Partition name " + partitionName + " and table partition keys " + Arrays + .toString(table.getPartitionKeys().toArray()) + " does not match"); + } + pVals.add(val); + } + Partition partition = new PartitionBuilder().inTable(table).setValues(pVals).build(conf); + return appendPartitionToTempTable(table, partition); + } + + private Partition appendPartitionToTempTable(org.apache.hadoop.hive.metastore.api.Table table, Partition partition) + throws MetaException, AlreadyExistsException { + TempTable tt = getPartitionedTempTable(table); + if (tt == null) { + throw new IllegalStateException("TempTable not found for " + getCatalogQualifiedTableName(table)); + } + Path partitionLocation = getPartitionLocation(table, partition, false); + partition = tt.addPartition(deepCopy(partition)); + createAndSetLocationForAddedPartition(partition, partitionLocation); + return partition; + } + private List exchangePartitions(Map partitionSpecs, org.apache.hadoop.hive.metastore.api.Table sourceTable, TempTable sourceTempTable, org.apache.hadoop.hive.metastore.api.Table destTable, TempTable destTempTable) throws TException { diff --git ql/src/test/org/apache/hadoop/hive/ql/metadata/TestSessionHiveMetastoreClientAppendPartitionTempTable.java ql/src/test/org/apache/hadoop/hive/ql/metadata/TestSessionHiveMetastoreClientAppendPartitionTempTable.java new file mode 100644 index 0000000000..1e04be3f5f --- /dev/null +++ ql/src/test/org/apache/hadoop/hive/ql/metadata/TestSessionHiveMetastoreClientAppendPartitionTempTable.java @@ -0,0 +1,79 @@ +package org.apache.hadoop.hive.ql.metadata; + +import org.apache.hadoop.fs.Path; +import org.apache.hadoop.hive.conf.HiveConf; +import org.apache.hadoop.hive.metastore.annotation.MetastoreCheckinTest; +import org.apache.hadoop.hive.metastore.api.FieldSchema; +import org.apache.hadoop.hive.metastore.api.Partition; +import org.apache.hadoop.hive.metastore.api.StorageDescriptor; +import org.apache.hadoop.hive.metastore.api.Table; +import org.apache.hadoop.hive.metastore.client.CustomIgnoreRule; +import org.apache.hadoop.hive.metastore.client.TestAppendPartitions; +import org.apache.hadoop.hive.metastore.client.builder.TableBuilder; +import org.apache.hadoop.hive.metastore.minihms.AbstractMetaStoreService; +import org.apache.hadoop.hive.ql.session.SessionState; +import org.junit.Assert; +import org.junit.Before; +import org.junit.experimental.categories.Category; +import org.junit.runner.RunWith; +import org.junit.runners.Parameterized; + +import java.util.List; +import java.util.Map; + +/** + * Test class for append partitions related methods on temporary tables. + */ +@RunWith(Parameterized.class) +@Category(MetastoreCheckinTest.class) +public class TestSessionHiveMetastoreClientAppendPartitionTempTable extends TestAppendPartitions { + + private HiveConf conf; + + public TestSessionHiveMetastoreClientAppendPartitionTempTable(String name, AbstractMetaStoreService metaStore) { + super(name, metaStore); + ignoreRule = new CustomIgnoreRule(); + } + + @Before + public void setUp() throws Exception { + initHiveConf(); + SessionState.start(conf); + setClient(Hive.get(conf).getMSC()); + cleanUpDatabase(); + createTables(); + } + + private void initHiveConf() throws HiveException { + conf = Hive.get().getConf(); + conf.setBoolVar(HiveConf.ConfVars.METASTORE_FASTPATH, true); + } + + @Override + protected Table createTable(String tableName, List partCols, Map tableParams, + String tableType, String location) throws Exception { + TableBuilder builder = + new TableBuilder().setDbName(DB_NAME).setTableName(tableName).addCol("test_id", "int", "test col id") + .addCol("test_value", "string", "test col value").setPartCols(partCols) + .setType(tableType).setLocation(location).setTemporary(true); + if (tableParams != null) { + builder.setTableParams(tableParams); + } + builder.create(getClient(), conf); + return getClient().getTable(DB_NAME, tableName); + } + + @Override + protected void verifyPartition(Partition partition, Table table, List expectedPartValues, + String partitionName) throws Exception { + Assert.assertEquals(table.getTableName(), partition.getTableName()); + Assert.assertEquals(table.getDbName(), partition.getDbName()); + Assert.assertEquals(expectedPartValues, partition.getValues()); + Assert.assertNotEquals(0, partition.getCreateTime()); + Assert.assertEquals(0, partition.getParameters().size()); + StorageDescriptor partitionSD = partition.getSd(); + Assert.assertEquals(table.getSd().getLocation() + "/" + partitionName, + partitionSD.getLocation()); + Assert.assertTrue(getMetaStore().isPathExists(new Path(partitionSD.getLocation()))); + } +} diff --git standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/client/TestAppendPartitions.java standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/client/TestAppendPartitions.java index e2593fecdc..0df071fb19 100644 --- standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/client/TestAppendPartitions.java +++ standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/client/TestAppendPartitions.java @@ -49,7 +49,6 @@ import org.apache.hadoop.hive.metastore.conf.MetastoreConf.ConfVars; import org.apache.hadoop.hive.metastore.minihms.AbstractMetaStoreService; import org.apache.thrift.TException; -import org.apache.thrift.transport.TTransportException; import org.junit.After; import org.junit.Assert; import org.junit.Before; @@ -70,7 +69,7 @@ private IMetaStoreClient client; private Configuration conf; - private static final String DB_NAME = "test_append_part_db"; + protected static final String DB_NAME = "test_append_part_db"; private static Table tableWithPartitions; private static Table externalTable; private static Table tableNoPartColumns; @@ -88,16 +87,9 @@ public void setUp() throws Exception { client = metaStore.getClient(); // Clean up the database - client.dropDatabase(DB_NAME, true, true, true); - metaStore.cleanWarehouseDirs(); - new DatabaseBuilder() - .setName(DB_NAME) - .create(client, metaStore.getConf()); + cleanUpDatabase(); - tableWithPartitions = createTableWithPartitions(); - externalTable = createExternalTable(); - tableNoPartColumns = createTableNoPartitionColumns(); - tableView = createView(); + createTables(); } @After @@ -115,6 +107,33 @@ public void tearDown() throws Exception { } } + protected void cleanUpDatabase() throws Exception{ + client.dropDatabase(DB_NAME, true, true, true); + metaStore.cleanWarehouseDirs(); + new DatabaseBuilder() + .setName(DB_NAME) + .create(client, metaStore.getConf()); + } + + protected void createTables() throws Exception{ + tableWithPartitions = createTableWithPartitions(); + externalTable = createExternalTable(); + tableNoPartColumns = createTableNoPartitionColumns(); + tableView = createView(); + } + + protected void setClient(IMetaStoreClient client) { + this.client = client; + } + + protected IMetaStoreClient getClient() { + return client; + } + + protected AbstractMetaStoreService getMetaStore() { + return metaStore; + } + // Tests for Partition appendPartition(String tableName, String dbName, List partVals) method @Test @@ -130,6 +149,7 @@ public void testAppendPartition() throws Exception { Partition partition = client.getPartition(table.getDbName(), table.getTableName(), partitionValues); appendedPart.setWriteId(partition.getWriteId()); + partition.setWriteIdIsSet(true); Assert.assertEquals(partition, appendedPart); verifyPartition(partition, table, partitionValues, "year=2017/month=may"); verifyPartitionNames(table, Lists.newArrayList("year=2017/month=march", "year=2017/month=april", @@ -149,6 +169,7 @@ public void testAppendPartitionToExternalTable() throws Exception { Partition partition = client.getPartition(table.getDbName(), table.getTableName(), partitionValues); appendedPart.setWriteId(partition.getWriteId()); + partition.setWriteIdIsSet(true); Assert.assertEquals(partition, appendedPart); verifyPartition(partition, table, partitionValues, "year=2017/month=may"); verifyPartitionNames(table, Lists.newArrayList("year=2017/month=may")); @@ -181,6 +202,7 @@ public void testAppendPartitionToTableWithoutPartCols() throws Exception { } @Test(expected = MetaException.class) + @ConditionalIgnoreOnSessionHiveMetastoreClient public void testAppendPartitionToView() throws Exception { List partitionValues = Lists.newArrayList("2017", "may"); @@ -303,6 +325,7 @@ public void testAppendPart() throws Exception { Partition partition = client.getPartition(table.getDbName(), table.getTableName(), getPartitionValues(partitionName)); appendedPart.setWriteId(partition.getWriteId()); + partition.setWriteIdIsSet(true); Assert.assertEquals(partition, appendedPart); verifyPartition(partition, table, getPartitionValues(partitionName), partitionName); verifyPartitionNames(table, Lists.newArrayList("year=2017/month=march", "year=2017/month=april", @@ -322,6 +345,7 @@ public void testAppendPartToExternalTable() throws Exception { Partition partition = client.getPartition(table.getDbName(), table.getTableName(), getPartitionValues(partitionName)); appendedPart.setWriteId(partition.getWriteId()); + partition.setWriteIdIsSet(true); Assert.assertEquals(partition, appendedPart); verifyPartition(partition, table, getPartitionValues(partitionName), partitionName); verifyPartitionNames(table, Lists.newArrayList(partitionName)); @@ -352,6 +376,7 @@ public void testAppendPartToTableWithoutPartCols() throws Exception { } @Test(expected = MetaException.class) + @ConditionalIgnoreOnSessionHiveMetastoreClient public void testAppendPartToView() throws Exception { String partitionName = "year=2017/month=may"; @@ -456,6 +481,7 @@ public void testAppendPartWrongColumnInPartName() throws Exception { } @Test + @ConditionalIgnoreOnSessionHiveMetastoreClient public void otherCatalog() throws TException { String catName = "append_partition_catalog"; Catalog cat = new CatalogBuilder() @@ -497,12 +523,14 @@ public void otherCatalog() throws TException { } @Test(expected = InvalidObjectException.class) + @ConditionalIgnoreOnSessionHiveMetastoreClient public void testAppendPartitionBogusCatalog() throws Exception { client.appendPartition("nosuch", DB_NAME, tableWithPartitions.getTableName(), Lists.newArrayList("2017", "may")); } @Test(expected = InvalidObjectException.class) + @ConditionalIgnoreOnSessionHiveMetastoreClient public void testAppendPartitionByNameBogusCatalog() throws Exception { client.appendPartition("nosuch", DB_NAME, tableWithPartitions.getTableName(), "year=2017/month=april"); @@ -541,7 +569,7 @@ private Table createView() throws Exception { return table; } - private Table createTable(String tableName, List partCols, Map partCols, Map tableParams, String tableType, String location) throws Exception { new TableBuilder() .setDbName(DB_NAME) @@ -580,7 +608,7 @@ private void createPartition(Table table, List values) throws Exception return values; } - private void verifyPartition(Partition partition, Table table, List expectedPartValues, + protected void verifyPartition(Partition partition, Table table, List expectedPartValues, String partitionName) throws Exception { Assert.assertEquals(table.getTableName(), partition.getTableName()); Assert.assertEquals(table.getDbName(), partition.getDbName());