commit eaa25388acec867524e5d2a03e6608372bbbd449 Author: Vihang Karajgaonkar Date: Thu Jul 27 16:23:47 2017 -0700 HIVE-17189 : Fix backwards incompatibility in HiveMetaStoreClient diff --git a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/TestHiveMetaStore.java b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/TestHiveMetaStore.java index 4f7d56bd60f12779fe7702f4fd0d39e626955daa..603e5e9b991664e8badf337dc48c8fb44f0e4ccd 100644 --- a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/TestHiveMetaStore.java +++ b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/TestHiveMetaStore.java @@ -593,6 +593,55 @@ public void testListPartitionsWihtLimitEnabled() throws Throwable { assertEquals(" should have returned 50 partitions", maxParts, partitions.size()); } + public void testAlterTableCascade() throws Throwable { + // create a table with multiple partitions + String dbName = "compdb"; + String tblName = "comptbl"; + String typeName = "Person"; + + cleanUp(dbName, tblName, typeName); + + List> values = new ArrayList>(); + values.add(makeVals("2008-07-01 14:13:12", "14")); + values.add(makeVals("2008-07-01 14:13:12", "15")); + values.add(makeVals("2008-07-02 14:13:12", "15")); + values.add(makeVals("2008-07-03 14:13:12", "151")); + + createMultiPartitionTableSchema(dbName, tblName, typeName, values); + Table tbl = client.getTable(dbName, tblName); + List cols = tbl.getSd().getCols(); + cols.add(new FieldSchema("new_col", serdeConstants.STRING_TYPE_NAME, "")); + tbl.getSd().setCols(cols); + //add new column with cascade option + client.alter_table(dbName, tblName, tbl, true); + // + Table tbl2 = client.getTable(dbName, tblName); + Assert.assertEquals("Unexpected number of cols", 3, tbl2.getSd().getCols().size()); + Assert.assertEquals("Unexpected column name", "new_col", tbl2.getSd().getCols().get(2).getName()); + //get a partition + List pvalues = new ArrayList<>(2); + pvalues.add("2008-07-01 14:13:12"); + pvalues.add("14"); + Partition partition = client.getPartition(dbName, tblName, pvalues); + Assert.assertEquals("Unexpected number of cols", 3, partition.getSd().getCols().size()); + Assert.assertEquals("Unexpected column name", "new_col", partition.getSd().getCols().get(2).getName()); + + //add another column + cols = tbl.getSd().getCols(); + cols.add(new FieldSchema("new_col2", serdeConstants.STRING_TYPE_NAME, "")); + tbl.getSd().setCols(cols); + //add new column with no cascade option + client.alter_table(dbName, tblName, tbl, false); + tbl2 = client.getTable(dbName, tblName); + Assert.assertEquals("Unexpected number of cols", 4, tbl2.getSd().getCols().size()); + Assert.assertEquals("Unexpected column name", "new_col2", tbl2.getSd().getCols().get(3).getName()); + //get partition, this partition should not have the newly added column since cascade option + //was false + partition = client.getPartition(dbName, tblName, pvalues); + Assert.assertEquals("Unexpected number of cols", 3, partition.getSd().getCols().size()); + } + + public void testListPartitionNames() throws Throwable { // create a table with multiple partitions String dbName = "compdb"; diff --git a/metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStoreClient.java b/metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStoreClient.java index 24fc1f6cf90783011bb9b662af45a206bd0515df..ad51dfd5c9e2cd4a0b92697808ebf12a3163c3f9 100644 --- a/metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStoreClient.java +++ b/metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStoreClient.java @@ -50,6 +50,7 @@ import javax.security.auth.login.LoginException; import org.apache.hadoop.hive.common.ObjectPair; +import org.apache.hadoop.hive.common.StatsSetupConst; import org.apache.hadoop.hive.common.ValidTxnList; import org.apache.hadoop.hive.common.auth.HiveAuthUtils; import org.apache.hadoop.hive.common.classification.InterfaceAudience; @@ -359,6 +360,16 @@ public void alter_table(String dbname, String tbl_name, Table new_tbl) } @Override + public void alter_table(String defaultDatabaseName, String tblName, Table table, + boolean cascade) throws InvalidOperationException, MetaException, TException { + EnvironmentContext environmentContext = new EnvironmentContext(); + if (cascade) { + environmentContext.putToProperties(StatsSetupConst.CASCADE, StatsSetupConst.TRUE); + } + alter_table_with_environmentContext(defaultDatabaseName, tblName, table, environmentContext); + } + + @Override public void alter_table_with_environmentContext(String dbname, String tbl_name, Table new_tbl, EnvironmentContext envContext) throws InvalidOperationException, MetaException, TException { client.alter_table_with_environment_context(dbname, tbl_name, new_tbl, envContext); @@ -1536,12 +1547,24 @@ public int getNumPartitionsByFilter(String db_name, String tbl_name, } @Override + public void alter_partition(String dbName, String tblName, Partition newPart) + throws InvalidOperationException, MetaException, TException { + client.alter_partition_with_environment_context(dbName, tblName, newPart, null); + } + + @Override public void alter_partition(String dbName, String tblName, Partition newPart, EnvironmentContext environmentContext) throws InvalidOperationException, MetaException, TException { client.alter_partition_with_environment_context(dbName, tblName, newPart, environmentContext); } @Override + public void alter_partitions(String dbName, String tblName, List newParts) + throws InvalidOperationException, MetaException, TException { + client.alter_partitions_with_environment_context(dbName, tblName, newParts, null); + } + + @Override public void alter_partitions(String dbName, String tblName, List newParts, EnvironmentContext environmentContext) throws InvalidOperationException, MetaException, TException { client.alter_partitions_with_environment_context(dbName, tblName, newParts, environmentContext); diff --git a/metastore/src/java/org/apache/hadoop/hive/metastore/IMetaStoreClient.java b/metastore/src/java/org/apache/hadoop/hive/metastore/IMetaStoreClient.java index d4bbef037dbdaa43ccec07613afe67db6ef70f4d..813a283870547eeb0eaaf1615c811fd20d3cf772 100644 --- a/metastore/src/java/org/apache/hadoop/hive/metastore/IMetaStoreClient.java +++ b/metastore/src/java/org/apache/hadoop/hive/metastore/IMetaStoreClient.java @@ -26,6 +26,7 @@ import java.util.Map.Entry; import org.apache.hadoop.hive.common.ObjectPair; +import org.apache.hadoop.hive.common.StatsSetupConst; import org.apache.hadoop.hive.common.ValidTxnList; import org.apache.hadoop.hive.common.classification.InterfaceAudience; import org.apache.hadoop.hive.common.classification.InterfaceAudience.Public; @@ -735,6 +736,14 @@ void createTable(Table tbl) throws AlreadyExistsException, void alter_table(String defaultDatabaseName, String tblName, Table table) throws InvalidOperationException, MetaException, TException; + /** + * Use alter_table_with_environmentContext instead of alter_table with cascade option + * passed in EnvironmentContext using {@code StatsSetupConst.CASCADE} + */ + @Deprecated + void alter_table(String defaultDatabaseName, String tblName, Table table, + boolean cascade) throws InvalidOperationException, MetaException, TException; + //wrapper of alter_table_with_cascade void alter_table_with_environmentContext(String defaultDatabaseName, String tblName, Table table, EnvironmentContext environmentContext) throws InvalidOperationException, MetaException, @@ -808,6 +817,26 @@ boolean dropPartition(String db_name, String tbl_name, List part_vals, boolean dropPartition(String db_name, String tbl_name, String name, boolean deleteData) throws NoSuchObjectException, MetaException, TException; + + /** + * updates a partition to new partition + * + * @param dbName + * database of the old partition + * @param tblName + * table name of the old partition + * @param newPart + * new partition + * @throws InvalidOperationException + * if the old partition does not exist + * @throws MetaException + * if error in updating metadata + * @throws TException + * if error in communicating with metastore server + */ + void alter_partition(String dbName, String tblName, Partition newPart) + throws InvalidOperationException, MetaException, TException; + /** * updates a partition to new partition * @@ -843,7 +872,28 @@ void alter_partition(String dbName, String tblName, Partition newPart, Environme * @throws TException * if error in communicating with metastore server */ - void alter_partitions(String dbName, String tblName, List newParts, EnvironmentContext environmentContext) + void alter_partitions(String dbName, String tblName, List newParts) + throws InvalidOperationException, MetaException, TException; + + /** + * updates a list of partitions + * + * @param dbName + * database of the old partition + * @param tblName + * table name of the old partition + * @param newParts + * list of partitions + * @param environmentContext + * @throws InvalidOperationException + * if the old partition does not exist + * @throws MetaException + * if error in updating metadata + * @throws TException + * if error in communicating with metastore server + */ + void alter_partitions(String dbName, String tblName, List newParts, + EnvironmentContext environmentContext) throws InvalidOperationException, MetaException, TException; /** diff --git a/ql/src/java/org/apache/hadoop/hive/ql/metadata/SessionHiveMetaStoreClient.java b/ql/src/java/org/apache/hadoop/hive/ql/metadata/SessionHiveMetaStoreClient.java index 4642ec2faa0d45cd2d05994f71ba60165b707975..61f6a7c4ff38447db0ac2610e7308f4e710580ab 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/metadata/SessionHiveMetaStoreClient.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/metadata/SessionHiveMetaStoreClient.java @@ -299,6 +299,19 @@ public boolean tableExists(String databaseName, String tableName) throws MetaExc return super.getSchema(dbName, tableName); } + @Deprecated + @Override + public void alter_table(String dbname, String tbl_name, org.apache.hadoop.hive.metastore.api.Table new_tbl, + boolean cascade) throws InvalidOperationException, MetaException, TException { + org.apache.hadoop.hive.metastore.api.Table old_tbl = getTempTable(dbname, tbl_name); + if (old_tbl != null) { + //actually temp table does not support partitions, cascade is not applicable here + alterTempTable(dbname, tbl_name, old_tbl, new_tbl, null); + return; + } + super.alter_table(dbname, tbl_name, new_tbl, cascade); + } + @Override public void alter_table(String dbname, String tbl_name, org.apache.hadoop.hive.metastore.api.Table new_tbl) throws InvalidOperationException,