From 41f10852ce6ef895d0146ae4eaf59dfb22028b75 Mon Sep 17 00:00:00 2001 From: Sushanth Sowmyan Date: Mon, 27 Apr 2015 22:38:56 -0700 Subject: [PATCH] HIVE-10517 : HCatPartition should not be created with "" as location in tests --- .../apache/hive/hcatalog/api/TestHCatClient.java | 56 ++++++++++++++++------ .../hcatalog/api/repl/commands/TestCommands.java | 25 ++++++---- 2 files changed, 56 insertions(+), 25 deletions(-) diff --git a/hcatalog/webhcat/java-client/src/test/java/org/apache/hive/hcatalog/api/TestHCatClient.java b/hcatalog/webhcat/java-client/src/test/java/org/apache/hive/hcatalog/api/TestHCatClient.java index f944157..5f94b13 100644 --- a/hcatalog/webhcat/java-client/src/test/java/org/apache/hive/hcatalog/api/TestHCatClient.java +++ b/hcatalog/webhcat/java-client/src/test/java/org/apache/hive/hcatalog/api/TestHCatClient.java @@ -32,9 +32,12 @@ import com.google.common.base.Function; import com.google.common.collect.Iterables; import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.Path; import org.apache.hadoop.hive.conf.HiveConf; import org.apache.hadoop.hive.metastore.HiveMetaStore; import org.apache.hadoop.hive.metastore.IMetaStoreClient; +import org.apache.hadoop.hive.metastore.Warehouse; +import org.apache.hadoop.hive.metastore.api.MetaException; import org.apache.hadoop.hive.metastore.api.NotificationEvent; import org.apache.hadoop.hive.metastore.api.PartitionEventType; import org.apache.hadoop.hive.ql.WindowsPathUtil; @@ -174,6 +177,10 @@ public static String fixPath(String path) { return expectedDir; } + public static String makePartLocation(HCatTable table, Map partitionSpec) throws MetaException { + return (new Path(table.getSd().getLocation(), Warehouse.makePartPath(partitionSpec))).toUri().toString(); + } + @Test public void testBasicDDLCommands() throws Exception { String db = "testdb"; @@ -611,6 +618,8 @@ public void testObjectNotFoundException() throws Exception { HCatTable table = new HCatTable(dbName, tableName).cols(columns).partCols(partitionColumns); client.createTable(HCatCreateTableDesc.create(table, false).build()); + HCatTable createdTable = client.getTable(dbName,tableName); + Map partitionSpec = new HashMap(); partitionSpec.put(partitionColumn, "foobar"); try { // Test that fetching a non-existent partition yields ObjectNotFound. @@ -622,7 +631,8 @@ public void testObjectNotFoundException() throws Exception { exception instanceof ObjectNotFoundException); } - client.addPartition(HCatAddPartitionDesc.create(new HCatPartition(table, partitionSpec, "")).build()); + client.addPartition(HCatAddPartitionDesc.create(new HCatPartition(createdTable, partitionSpec, + makePartLocation(createdTable,partitionSpec))).build()); // Test that listPartitionsByFilter() returns an empty-set, if the filter selects no partitions. assertEquals("Expected empty set of partitions.", @@ -726,16 +736,20 @@ public void testGetPartitionsWithPartialSpec() throws Exception { Map partitionSpec = new HashMap(); partitionSpec.put("grid", "AB"); partitionSpec.put("dt", "2011_12_31"); - client.addPartition(HCatAddPartitionDesc.create(new HCatPartition(table, partitionSpec, "")).build()); + client.addPartition(HCatAddPartitionDesc.create(new HCatPartition(table, partitionSpec, + makePartLocation(table,partitionSpec))).build()); partitionSpec.put("grid", "AB"); partitionSpec.put("dt", "2012_01_01"); - client.addPartition(HCatAddPartitionDesc.create(new HCatPartition(table, partitionSpec, "")).build()); + client.addPartition(HCatAddPartitionDesc.create(new HCatPartition(table, partitionSpec, + makePartLocation(table,partitionSpec))).build()); partitionSpec.put("dt", "2012_01_01"); partitionSpec.put("grid", "OB"); - client.addPartition(HCatAddPartitionDesc.create(new HCatPartition(table, partitionSpec, "")).build()); + client.addPartition(HCatAddPartitionDesc.create(new HCatPartition(table, partitionSpec, + makePartLocation(table,partitionSpec))).build()); partitionSpec.put("dt", "2012_01_01"); partitionSpec.put("grid", "XB"); - client.addPartition(HCatAddPartitionDesc.create(new HCatPartition(table, partitionSpec, "")).build()); + client.addPartition(HCatAddPartitionDesc.create(new HCatPartition(table, partitionSpec, + makePartLocation(table,partitionSpec))).build()); Map partialPartitionSpec = new HashMap(); partialPartitionSpec.put("dt", "2012_01_01"); @@ -780,16 +794,20 @@ public void testDropPartitionsWithPartialSpec() throws Exception { Map partitionSpec = new HashMap(); partitionSpec.put("grid", "AB"); partitionSpec.put("dt", "2011_12_31"); - client.addPartition(HCatAddPartitionDesc.create(new HCatPartition(table, partitionSpec, "")).build()); + client.addPartition(HCatAddPartitionDesc.create(new HCatPartition(table, partitionSpec, + makePartLocation(table, partitionSpec))).build()); partitionSpec.put("grid", "AB"); partitionSpec.put("dt", "2012_01_01"); - client.addPartition(HCatAddPartitionDesc.create(new HCatPartition(table, partitionSpec, "")).build()); + client.addPartition(HCatAddPartitionDesc.create(new HCatPartition(table, partitionSpec, + makePartLocation(table, partitionSpec))).build()); partitionSpec.put("dt", "2012_01_01"); partitionSpec.put("grid", "OB"); - client.addPartition(HCatAddPartitionDesc.create(new HCatPartition(table, partitionSpec, "")).build()); + client.addPartition(HCatAddPartitionDesc.create(new HCatPartition(table, partitionSpec, + makePartLocation(table, partitionSpec))).build()); partitionSpec.put("dt", "2012_01_01"); partitionSpec.put("grid", "XB"); - client.addPartition(HCatAddPartitionDesc.create(new HCatPartition(table, partitionSpec, "")).build()); + client.addPartition(HCatAddPartitionDesc.create(new HCatPartition(table, partitionSpec, + makePartLocation(table, partitionSpec))).build()); Map partialPartitionSpec = new HashMap(); partialPartitionSpec.put("dt", "2012_01_01"); @@ -869,9 +887,12 @@ public void testReplicationTaskIter() throws Exception { // 4: Add a partition P1 to T2 => 1 event + HCatTable table2Created = sourceMetastore.getTable(dbName,tblName2); + Map ptnDesc1 = new HashMap(); ptnDesc1.put("b","test1"); - HCatPartition ptn1 = (new HCatPartition(table2, ptnDesc1, "")); + HCatPartition ptn1 = (new HCatPartition(table2Created, ptnDesc1, + makePartLocation(table2Created,ptnDesc1))); sourceMetastore.addPartition(HCatAddPartitionDesc.create(ptn1).build()); // 5 : Create and drop partition P2 to T2 10 times => 20 events @@ -879,7 +900,8 @@ public void testReplicationTaskIter() throws Exception { for (int i = 0; i < 20; i++){ Map ptnDesc = new HashMap(); ptnDesc.put("b","testmul"+i); - HCatPartition ptn = (new HCatPartition(table2, ptnDesc, "")); + HCatPartition ptn = (new HCatPartition(table2Created, ptnDesc, + makePartLocation(table2Created,ptnDesc))); sourceMetastore.addPartition(HCatAddPartitionDesc.create(ptn).build()); sourceMetastore.dropPartitions(dbName,tblName2,ptnDesc,true); } @@ -1130,7 +1152,8 @@ public void testPartitionRegistrationWithCustomSchema() throws Exception { Map partitionSpec_1 = new HashMap(); partitionSpec_1.put("grid", "AB"); partitionSpec_1.put("dt", "2011_12_31"); - HCatPartition sourcePartition_1 = new HCatPartition(sourceTable, partitionSpec_1, ""); + HCatPartition sourcePartition_1 = new HCatPartition(sourceTable, partitionSpec_1, + makePartLocation(sourceTable,partitionSpec_1)); sourceMetaStore.addPartition(HCatAddPartitionDesc.create(sourcePartition_1).build()); assertEquals("Unexpected number of partitions. ", @@ -1173,7 +1196,8 @@ public void testPartitionRegistrationWithCustomSchema() throws Exception { Map partitionSpec_2 = new HashMap(); partitionSpec_2.put("grid", "AB"); partitionSpec_2.put("dt", "2012_01_01"); - HCatPartition sourcePartition_2 = new HCatPartition(sourceTable, partitionSpec_2, ""); + HCatPartition sourcePartition_2 = new HCatPartition(sourceTable, partitionSpec_2, + makePartLocation(sourceTable,partitionSpec_2)); sourceMetaStore.addPartition(HCatAddPartitionDesc.create(sourcePartition_2).build()); // The source table now has 2 partitions, one in TEXTFILE, the other in ORC. @@ -1257,7 +1281,8 @@ public void testPartitionSpecRegistrationWithCustomSchema() throws Exception { Map partitionSpec_1 = new HashMap(); partitionSpec_1.put("grid", "AB"); partitionSpec_1.put("dt", "2011_12_31"); - HCatPartition sourcePartition_1 = new HCatPartition(sourceTable, partitionSpec_1, ""); + HCatPartition sourcePartition_1 = new HCatPartition(sourceTable, partitionSpec_1, + makePartLocation(sourceTable,partitionSpec_1)); sourceMetaStore.addPartition(HCatAddPartitionDesc.create(sourcePartition_1).build()); assertEquals("Unexpected number of partitions. ", @@ -1300,7 +1325,8 @@ public void testPartitionSpecRegistrationWithCustomSchema() throws Exception { Map partitionSpec_2 = new HashMap(); partitionSpec_2.put("grid", "AB"); partitionSpec_2.put("dt", "2012_01_01"); - HCatPartition sourcePartition_2 = new HCatPartition(sourceTable, partitionSpec_2, ""); + HCatPartition sourcePartition_2 = new HCatPartition(sourceTable, partitionSpec_2, + makePartLocation(sourceTable,partitionSpec_2)); sourceMetaStore.addPartition(HCatAddPartitionDesc.create(sourcePartition_2).build()); // The source table now has 2 partitions, one in TEXTFILE, the other in ORC. diff --git a/hcatalog/webhcat/java-client/src/test/java/org/apache/hive/hcatalog/api/repl/commands/TestCommands.java b/hcatalog/webhcat/java-client/src/test/java/org/apache/hive/hcatalog/api/repl/commands/TestCommands.java index 05fd89f..9f9e6bf 100644 --- a/hcatalog/webhcat/java-client/src/test/java/org/apache/hive/hcatalog/api/repl/commands/TestCommands.java +++ b/hcatalog/webhcat/java-client/src/test/java/org/apache/hive/hcatalog/api/repl/commands/TestCommands.java @@ -24,6 +24,7 @@ import org.apache.hadoop.fs.Path; import org.apache.hadoop.hive.cli.CliSessionState; import org.apache.hadoop.hive.conf.HiveConf; +import org.apache.hadoop.hive.metastore.api.MetaException; import org.apache.hadoop.hive.ql.CommandNeedRetryException; import org.apache.hadoop.hive.ql.Driver; import org.apache.hadoop.hive.ql.processors.CommandProcessorResponse; @@ -206,7 +207,7 @@ public void testDropTableCommand() throws HCatException, CommandNeedRetryExcepti } @Test - public void testDropPartitionCommand() throws HCatException, CommandNeedRetryException { + public void testDropPartitionCommand() throws HCatException, CommandNeedRetryException, MetaException { String dbName = "cmd_testdb"; String tableName = "cmd_testtable"; int evid = 789; @@ -242,10 +243,11 @@ public void testDropPartitionCommand() throws HCatException, CommandNeedRetryExc HCatTable table = (new HCatTable(dbName, tableName)).tblProps(props).cols(cols).partCols(pcols); client.createTable(HCatCreateTableDesc.create(table).build()); - HCatTable t = client.getTable(dbName, tableName); - assertNotNull(t); + HCatTable tableCreated = client.getTable(dbName, tableName); + assertNotNull(tableCreated); - HCatPartition ptnToAdd = (new HCatPartition(table, ptnDesc, "")).parameters(props); + HCatPartition ptnToAdd = (new HCatPartition(tableCreated, ptnDesc, + TestHCatClient.makePartLocation(tableCreated,ptnDesc))).parameters(props); client.addPartition(HCatAddPartitionDesc.create(ptnToAdd).build()); HCatPartition p1 = client.getPartition(dbName,tableName,ptnDesc); @@ -274,7 +276,8 @@ public void testDropPartitionCommand() throws HCatException, CommandNeedRetryExc Map props2 = new HashMap(); props2.put(ReplicationUtils.REPL_STATE_ID,String.valueOf(evid - 5)); - HCatPartition ptnToAdd2 = (new HCatPartition(table, ptnDesc, "")).parameters(props2); + HCatPartition ptnToAdd2 = (new HCatPartition(tableCreated, ptnDesc, + TestHCatClient.makePartLocation(tableCreated,ptnDesc))).parameters(props2); client.addPartition(HCatAddPartitionDesc.create(ptnToAdd2).build()); HCatPartition p3 = client.getPartition(dbName,tableName,ptnDesc); @@ -296,7 +299,7 @@ public void testDropPartitionCommand() throws HCatException, CommandNeedRetryExc } @Test - public void testDropTableCommand2() throws HCatException, CommandNeedRetryException { + public void testDropTableCommand2() throws HCatException, CommandNeedRetryException, MetaException { // Secondary DropTableCommand test for testing repl-drop-tables' effect on partitions inside a partitioned table // when there exist partitions inside the table which are older than the drop event. // Our goal is this : Create a table t, with repl.last.id=157, say. @@ -322,21 +325,23 @@ public void testDropTableCommand2() throws HCatException, CommandNeedRetryExcept HCatTable table = (new HCatTable(dbName, tableName)).tblProps(tprops).cols(cols).partCols(pcols); client.createTable(HCatCreateTableDesc.create(table).build()); - HCatTable t = client.getTable(dbName, tableName); - assertNotNull(t); + HCatTable tableCreated = client.getTable(dbName, tableName); + assertNotNull(tableCreated); Map ptnDesc1 = new HashMap(); ptnDesc1.put("b","test-older"); Map props1 = new HashMap(); props1.put(ReplicationUtils.REPL_STATE_ID,String.valueOf(evid - 5)); - HCatPartition ptnToAdd1 = (new HCatPartition(table, ptnDesc1, "")).parameters(props1); + HCatPartition ptnToAdd1 = (new HCatPartition(tableCreated, ptnDesc1, + TestHCatClient.makePartLocation(tableCreated,ptnDesc1))).parameters(props1); client.addPartition(HCatAddPartitionDesc.create(ptnToAdd1).build()); Map ptnDesc2 = new HashMap(); ptnDesc2.put("b","test-newer"); Map props2 = new HashMap(); props2.put(ReplicationUtils.REPL_STATE_ID, String.valueOf(evid + 5)); - HCatPartition ptnToAdd2 = (new HCatPartition(table, ptnDesc2, "")).parameters(props2); + HCatPartition ptnToAdd2 = (new HCatPartition(tableCreated, ptnDesc2, + TestHCatClient.makePartLocation(tableCreated,ptnDesc2))).parameters(props2); client.addPartition(HCatAddPartitionDesc.create(ptnToAdd2).build()); HCatPartition p1 = client.getPartition(dbName,tableName,ptnDesc1); -- 1.9.5 (Apple Git-50.3)