Index: conf/hive-default.xml.template
===================================================================
--- conf/hive-default.xml.template (revision 1475680)
+++ conf/hive-default.xml.template (working copy)
@@ -290,6 +290,24 @@
+ hive.metastore.disallow.incompatible.col.type.change
+
+ If true (default is false), ALTER TABLE operations which change the type of
+ a column (say STRING) to an incompatible type (say MAP) are disallowed.
+ RCFile default serde (ColumnarSerde) serializes the values in such a way that the
+ datatypes can be converted from string to any type. The map is also serialized as
+ a string, which can be read as a string as well. However, with any binary
+ serialization, this is not true. Blocking the ALTER TABLE prevents ClassCastExceptions
+ when subsequently trying to access old partitions.
+
+ Primitive types like INT, STRING, BIGINT, etc are compatible with each other and are
+ not blocked.
+
+ See HIVE-4409 for more details.
+
+
+
+
hive.metastore.end.function.listeners
list of comma separated listeners for the end of metastore functions.
Index: metastore/src/java/org/apache/hadoop/hive/metastore/MetaStoreUtils.java
===================================================================
--- metastore/src/java/org/apache/hadoop/hive/metastore/MetaStoreUtils.java (revision 1475680)
+++ metastore/src/java/org/apache/hadoop/hive/metastore/MetaStoreUtils.java (working copy)
@@ -50,6 +50,8 @@
import org.apache.hadoop.hive.metastore.api.StorageDescriptor;
import org.apache.hadoop.hive.metastore.api.Table;
import org.apache.hadoop.hive.metastore.api.hive_metastoreConstants;
+import org.apache.hadoop.hive.metastore.api.InvalidOperationException;
+import org.apache.hadoop.hive.serde.serdeConstants;;
import org.apache.hadoop.hive.serde2.Deserializer;
import org.apache.hadoop.hive.serde2.SerDeException;
import org.apache.hadoop.hive.serde2.SerDeUtils;
@@ -340,7 +342,53 @@
return true;
}
+ static void throwExceptionIfIncompatibleColTypeChange(
+ List oldCols, List newCols)
+ throws InvalidOperationException {
+
+ List incompatibleCols = new ArrayList();
+ int maxCols = Math.min(oldCols.size(), newCols.size());
+ for (int i = 0; i < maxCols; i++) {
+ if (!areColTypesCompatible(oldCols.get(i).getType(), newCols.get(i).getType())) {
+ incompatibleCols.add(newCols.get(i).getName());
+ }
+ }
+ if (!incompatibleCols.isEmpty()) {
+ throw new InvalidOperationException(
+ "The following columns have types incompatible with the existing " +
+ "columns in their respective positions :\n" +
+ StringUtils.join(",", incompatibleCols)
+ );
+ }
+ }
+
/**
+ * @return true if oldType and newType are compatible.
+ * Two types are compatible if we have internal functions to cast one to another.
+ */
+ static private boolean areColTypesCompatible(String oldType, String newType) {
+ if (oldType.equals(newType)) {
+ return true;
+ }
+
+ /*
+ * RCFile default serde (ColumnarSerde) serializes the values in such a way that the
+ * datatypes can be converted from string to any type. The map is also serialized as
+ * a string, which can be read as a string as well. However, with any binary
+ * serialization, this is not true.
+ *
+ * Primitive types like INT, STRING, BIGINT, etc are compatible with each other and are
+ * not blocked.
+ */
+ if(serdeConstants.PrimitiveTypes.contains(oldType.toLowerCase()) &&
+ serdeConstants.PrimitiveTypes.contains(newType.toLowerCase())) {
+ return true;
+ }
+
+ return false;
+ }
+
+ /**
* validate column type
*
* if it is predefined, yes. otherwise no
@@ -451,7 +499,7 @@
static Set hiveThriftTypeMap; //for validation
static {
hiveThriftTypeMap = new HashSet();
- hiveThriftTypeMap.addAll(org.apache.hadoop.hive.serde.serdeConstants.PrimitiveTypes);
+ hiveThriftTypeMap.addAll(serdeConstants.PrimitiveTypes);
hiveThriftTypeMap.addAll(org.apache.hadoop.hive.serde.serdeConstants.CollectionTypes);
hiveThriftTypeMap.add(org.apache.hadoop.hive.serde.serdeConstants.UNION_TYPE_NAME);
hiveThriftTypeMap.add(org.apache.hadoop.hive.serde.serdeConstants.STRUCT_TYPE_NAME);
Index: metastore/src/java/org/apache/hadoop/hive/metastore/HiveAlterHandler.java
===================================================================
--- metastore/src/java/org/apache/hadoop/hive/metastore/HiveAlterHandler.java (revision 1475680)
+++ metastore/src/java/org/apache/hadoop/hive/metastore/HiveAlterHandler.java (working copy)
@@ -29,6 +29,7 @@
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hive.conf.HiveConf;
import org.apache.hadoop.hive.metastore.api.AlreadyExistsException;
import org.apache.hadoop.hive.metastore.api.FieldSchema;
import org.apache.hadoop.hive.metastore.api.InvalidObjectException;
@@ -102,6 +103,15 @@
+ newt.getTableName() + " doesn't exist");
}
+ if (HiveConf.getBoolVar(hiveConf,
+ HiveConf.ConfVars.METASTORE_DISALLOW_INCOMPATIBLE_COL_TYPE_CHANGES,
+ false)) {
+ // Throws InvalidOperationException if the new column types are not
+ // compatible with the current column types.
+ MetaStoreUtils.throwExceptionIfIncompatibleColTypeChange(
+ oldt.getSd().getCols(), newt.getSd().getCols());
+ }
+
//check that partition keys have not changed, except for virtual views
//however, allow the partition comments to change
boolean partKeysPartiallyEqual = checkPartialPartKeysEqual(oldt.getPartitionKeys(),
Index: common/src/java/org/apache/hadoop/hive/conf/HiveConf.java
===================================================================
--- common/src/java/org/apache/hadoop/hive/conf/HiveConf.java (revision 1475680)
+++ common/src/java/org/apache/hadoop/hive/conf/HiveConf.java (working copy)
@@ -131,7 +131,8 @@
HiveConf.ConfVars.HMSHANDLERATTEMPTS,
HiveConf.ConfVars.HMSHANDLERINTERVAL,
HiveConf.ConfVars.HMSHANDLERFORCERELOADCONF,
- HiveConf.ConfVars.METASTORE_PARTITION_NAME_WHITELIST_PATTERN
+ HiveConf.ConfVars.METASTORE_PARTITION_NAME_WHITELIST_PATTERN,
+ HiveConf.ConfVars.METASTORE_DISALLOW_INCOMPATIBLE_COL_TYPE_CHANGES
};
/**
@@ -339,8 +340,9 @@
METASTORE_EXECUTE_SET_UGI("hive.metastore.execute.setugi", false),
METASTORE_PARTITION_NAME_WHITELIST_PATTERN(
"hive.metastore.partition.name.whitelist.pattern", ""),
+ METASTORE_DISALLOW_INCOMPATIBLE_COL_TYPE_CHANGES(
+ "hive.metastore.disallow.incompatible.col.type.changes", false),
-
// Default parameters for creating tables
NEWTABLEDEFAULTPARA("hive.table.parameters.default", ""),
// Parameters to copy over when creating a table with Create Table Like.
Index: ql/src/test/results/clientpositive/disallow_incompatible_type_change_off.q.out
===================================================================
--- ql/src/test/results/clientpositive/disallow_incompatible_type_change_off.q.out (revision 0)
+++ ql/src/test/results/clientpositive/disallow_incompatible_type_change_off.q.out (working copy)
@@ -0,0 +1,49 @@
+PREHOOK: query: SELECT * FROM src LIMIT 1
+PREHOOK: type: QUERY
+PREHOOK: Input: default@src
+#### A masked pattern was here ####
+POSTHOOK: query: SELECT * FROM src LIMIT 1
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@src
+#### A masked pattern was here ####
+238 val_238
+PREHOOK: query: CREATE TABLE test_table123 (a INT, b MAP) PARTITIONED BY (ds STRING) STORED AS SEQUENCEFILE
+PREHOOK: type: CREATETABLE
+POSTHOOK: query: CREATE TABLE test_table123 (a INT, b MAP) PARTITIONED BY (ds STRING) STORED AS SEQUENCEFILE
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: default@test_table123
+PREHOOK: query: INSERT OVERWRITE TABLE test_table123 PARTITION(ds="foo1") SELECT 1, MAP("a1", "b1") FROM src LIMIT 1
+PREHOOK: type: QUERY
+PREHOOK: Input: default@src
+PREHOOK: Output: default@test_table123@ds=foo1
+POSTHOOK: query: INSERT OVERWRITE TABLE test_table123 PARTITION(ds="foo1") SELECT 1, MAP("a1", "b1") FROM src LIMIT 1
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@src
+POSTHOOK: Output: default@test_table123@ds=foo1
+POSTHOOK: Lineage: test_table123 PARTITION(ds=foo1).a SIMPLE []
+POSTHOOK: Lineage: test_table123 PARTITION(ds=foo1).b EXPRESSION []
+PREHOOK: query: SELECT * from test_table123 WHERE ds="foo1"
+PREHOOK: type: QUERY
+PREHOOK: Input: default@test_table123
+PREHOOK: Input: default@test_table123@ds=foo1
+#### A masked pattern was here ####
+POSTHOOK: query: SELECT * from test_table123 WHERE ds="foo1"
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@test_table123
+POSTHOOK: Input: default@test_table123@ds=foo1
+#### A masked pattern was here ####
+POSTHOOK: Lineage: test_table123 PARTITION(ds=foo1).a SIMPLE []
+POSTHOOK: Lineage: test_table123 PARTITION(ds=foo1).b EXPRESSION []
+1 {"a1":"b1"} foo1
+PREHOOK: query: -- This should now work as hive.metastore.disallow.incompatible.col.type.changes is false
+ALTER TABLE test_table123 REPLACE COLUMNS (a INT, b STRING)
+PREHOOK: type: ALTERTABLE_REPLACECOLS
+PREHOOK: Input: default@test_table123
+PREHOOK: Output: default@test_table123
+POSTHOOK: query: -- This should now work as hive.metastore.disallow.incompatible.col.type.changes is false
+ALTER TABLE test_table123 REPLACE COLUMNS (a INT, b STRING)
+POSTHOOK: type: ALTERTABLE_REPLACECOLS
+POSTHOOK: Input: default@test_table123
+POSTHOOK: Output: default@test_table123
+POSTHOOK: Lineage: test_table123 PARTITION(ds=foo1).a SIMPLE []
+POSTHOOK: Lineage: test_table123 PARTITION(ds=foo1).b EXPRESSION []
Index: ql/src/test/results/clientnegative/disallow_incompatible_type_change_on1.q.out
===================================================================
--- ql/src/test/results/clientnegative/disallow_incompatible_type_change_on1.q.out (revision 0)
+++ ql/src/test/results/clientnegative/disallow_incompatible_type_change_on1.q.out (working copy)
@@ -0,0 +1,125 @@
+PREHOOK: query: SELECT * FROM src LIMIT 1
+PREHOOK: type: QUERY
+PREHOOK: Input: default@src
+#### A masked pattern was here ####
+POSTHOOK: query: SELECT * FROM src LIMIT 1
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@src
+#### A masked pattern was here ####
+238 val_238
+PREHOOK: query: CREATE TABLE test_table123 (a INT, b MAP) PARTITIONED BY (ds STRING) STORED AS SEQUENCEFILE
+PREHOOK: type: CREATETABLE
+POSTHOOK: query: CREATE TABLE test_table123 (a INT, b MAP) PARTITIONED BY (ds STRING) STORED AS SEQUENCEFILE
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: default@test_table123
+PREHOOK: query: INSERT OVERWRITE TABLE test_table123 PARTITION(ds="foo1") SELECT 1, MAP("a1", "b1") FROM src LIMIT 1
+PREHOOK: type: QUERY
+PREHOOK: Input: default@src
+PREHOOK: Output: default@test_table123@ds=foo1
+POSTHOOK: query: INSERT OVERWRITE TABLE test_table123 PARTITION(ds="foo1") SELECT 1, MAP("a1", "b1") FROM src LIMIT 1
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@src
+POSTHOOK: Output: default@test_table123@ds=foo1
+POSTHOOK: Lineage: test_table123 PARTITION(ds=foo1).a SIMPLE []
+POSTHOOK: Lineage: test_table123 PARTITION(ds=foo1).b EXPRESSION []
+PREHOOK: query: SELECT * from test_table123 WHERE ds="foo1"
+PREHOOK: type: QUERY
+PREHOOK: Input: default@test_table123
+PREHOOK: Input: default@test_table123@ds=foo1
+#### A masked pattern was here ####
+POSTHOOK: query: SELECT * from test_table123 WHERE ds="foo1"
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@test_table123
+POSTHOOK: Input: default@test_table123@ds=foo1
+#### A masked pattern was here ####
+POSTHOOK: Lineage: test_table123 PARTITION(ds=foo1).a SIMPLE []
+POSTHOOK: Lineage: test_table123 PARTITION(ds=foo1).b EXPRESSION []
+1 {"a1":"b1"} foo1
+PREHOOK: query: ALTER TABLE test_table123 REPLACE COLUMNS (a INT, b MAP)
+PREHOOK: type: ALTERTABLE_REPLACECOLS
+PREHOOK: Input: default@test_table123
+PREHOOK: Output: default@test_table123
+POSTHOOK: query: ALTER TABLE test_table123 REPLACE COLUMNS (a INT, b MAP)
+POSTHOOK: type: ALTERTABLE_REPLACECOLS
+POSTHOOK: Input: default@test_table123
+POSTHOOK: Output: default@test_table123
+POSTHOOK: Lineage: test_table123 PARTITION(ds=foo1).a SIMPLE []
+POSTHOOK: Lineage: test_table123 PARTITION(ds=foo1).b EXPRESSION []
+PREHOOK: query: ALTER TABLE test_table123 REPLACE COLUMNS (a BIGINT, b MAP)
+PREHOOK: type: ALTERTABLE_REPLACECOLS
+PREHOOK: Input: default@test_table123
+PREHOOK: Output: default@test_table123
+POSTHOOK: query: ALTER TABLE test_table123 REPLACE COLUMNS (a BIGINT, b MAP)
+POSTHOOK: type: ALTERTABLE_REPLACECOLS
+POSTHOOK: Input: default@test_table123
+POSTHOOK: Output: default@test_table123
+POSTHOOK: Lineage: test_table123 PARTITION(ds=foo1).a SIMPLE []
+POSTHOOK: Lineage: test_table123 PARTITION(ds=foo1).b EXPRESSION []
+PREHOOK: query: ALTER TABLE test_table123 REPLACE COLUMNS (a INT, b MAP)
+PREHOOK: type: ALTERTABLE_REPLACECOLS
+PREHOOK: Input: default@test_table123
+PREHOOK: Output: default@test_table123
+POSTHOOK: query: ALTER TABLE test_table123 REPLACE COLUMNS (a INT, b MAP)
+POSTHOOK: type: ALTERTABLE_REPLACECOLS
+POSTHOOK: Input: default@test_table123
+POSTHOOK: Output: default@test_table123
+POSTHOOK: Lineage: test_table123 PARTITION(ds=foo1).a SIMPLE []
+POSTHOOK: Lineage: test_table123 PARTITION(ds=foo1).b EXPRESSION []
+PREHOOK: query: ALTER TABLE test_table123 REPLACE COLUMNS (a DOUBLE, b MAP)
+PREHOOK: type: ALTERTABLE_REPLACECOLS
+PREHOOK: Input: default@test_table123
+PREHOOK: Output: default@test_table123
+POSTHOOK: query: ALTER TABLE test_table123 REPLACE COLUMNS (a DOUBLE, b MAP)
+POSTHOOK: type: ALTERTABLE_REPLACECOLS
+POSTHOOK: Input: default@test_table123
+POSTHOOK: Output: default@test_table123
+POSTHOOK: Lineage: test_table123 PARTITION(ds=foo1).a SIMPLE []
+POSTHOOK: Lineage: test_table123 PARTITION(ds=foo1).b EXPRESSION []
+PREHOOK: query: ALTER TABLE test_table123 REPLACE COLUMNS (a TINYINT, b MAP)
+PREHOOK: type: ALTERTABLE_REPLACECOLS
+PREHOOK: Input: default@test_table123
+PREHOOK: Output: default@test_table123
+POSTHOOK: query: ALTER TABLE test_table123 REPLACE COLUMNS (a TINYINT, b MAP)
+POSTHOOK: type: ALTERTABLE_REPLACECOLS
+POSTHOOK: Input: default@test_table123
+POSTHOOK: Output: default@test_table123
+POSTHOOK: Lineage: test_table123 PARTITION(ds=foo1).a SIMPLE []
+POSTHOOK: Lineage: test_table123 PARTITION(ds=foo1).b EXPRESSION []
+PREHOOK: query: ALTER TABLE test_table123 REPLACE COLUMNS (a BOOLEAN, b MAP)
+PREHOOK: type: ALTERTABLE_REPLACECOLS
+PREHOOK: Input: default@test_table123
+PREHOOK: Output: default@test_table123
+POSTHOOK: query: ALTER TABLE test_table123 REPLACE COLUMNS (a BOOLEAN, b MAP)
+POSTHOOK: type: ALTERTABLE_REPLACECOLS
+POSTHOOK: Input: default@test_table123
+POSTHOOK: Output: default@test_table123
+POSTHOOK: Lineage: test_table123 PARTITION(ds=foo1).a SIMPLE []
+POSTHOOK: Lineage: test_table123 PARTITION(ds=foo1).b EXPRESSION []
+PREHOOK: query: ALTER TABLE test_table123 REPLACE COLUMNS (a TINYINT, b MAP)
+PREHOOK: type: ALTERTABLE_REPLACECOLS
+PREHOOK: Input: default@test_table123
+PREHOOK: Output: default@test_table123
+POSTHOOK: query: ALTER TABLE test_table123 REPLACE COLUMNS (a TINYINT, b MAP)
+POSTHOOK: type: ALTERTABLE_REPLACECOLS
+POSTHOOK: Input: default@test_table123
+POSTHOOK: Output: default@test_table123
+POSTHOOK: Lineage: test_table123 PARTITION(ds=foo1).a SIMPLE []
+POSTHOOK: Lineage: test_table123 PARTITION(ds=foo1).b EXPRESSION []
+PREHOOK: query: ALTER TABLE test_table123 CHANGE COLUMN a a_new BOOLEAN
+PREHOOK: type: ALTERTABLE_RENAMECOL
+PREHOOK: Input: default@test_table123
+PREHOOK: Output: default@test_table123
+POSTHOOK: query: ALTER TABLE test_table123 CHANGE COLUMN a a_new BOOLEAN
+POSTHOOK: type: ALTERTABLE_RENAMECOL
+POSTHOOK: Input: default@test_table123
+POSTHOOK: Output: default@test_table123
+POSTHOOK: Lineage: test_table123 PARTITION(ds=foo1).a SIMPLE []
+POSTHOOK: Lineage: test_table123 PARTITION(ds=foo1).b EXPRESSION []
+PREHOOK: query: -- All the above ALTERs will succeed since they are between compatible types.
+-- The following ALTER will fail as MAP and STRING are not
+-- compatible.
+ALTER TABLE test_table123 REPLACE COLUMNS (a INT, b STRING)
+PREHOOK: type: ALTERTABLE_REPLACECOLS
+PREHOOK: Input: default@test_table123
+PREHOOK: Output: default@test_table123
+FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.exec.DDLTask
Index: ql/src/test/results/clientnegative/disallow_incompatible_type_change_on2.q.out
===================================================================
--- ql/src/test/results/clientnegative/disallow_incompatible_type_change_on2.q.out (revision 0)
+++ ql/src/test/results/clientnegative/disallow_incompatible_type_change_on2.q.out (working copy)
@@ -0,0 +1,42 @@
+PREHOOK: query: SELECT * FROM src LIMIT 1
+PREHOOK: type: QUERY
+PREHOOK: Input: default@src
+#### A masked pattern was here ####
+POSTHOOK: query: SELECT * FROM src LIMIT 1
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@src
+#### A masked pattern was here ####
+238 val_238
+PREHOOK: query: CREATE TABLE test_table123 (a INT, b STRING) PARTITIONED BY (ds STRING) STORED AS SEQUENCEFILE
+PREHOOK: type: CREATETABLE
+POSTHOOK: query: CREATE TABLE test_table123 (a INT, b STRING) PARTITIONED BY (ds STRING) STORED AS SEQUENCEFILE
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: default@test_table123
+PREHOOK: query: INSERT OVERWRITE TABLE test_table123 PARTITION(ds="foo1") SELECT 1, "one" FROM src LIMIT 1
+PREHOOK: type: QUERY
+PREHOOK: Input: default@src
+PREHOOK: Output: default@test_table123@ds=foo1
+POSTHOOK: query: INSERT OVERWRITE TABLE test_table123 PARTITION(ds="foo1") SELECT 1, "one" FROM src LIMIT 1
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@src
+POSTHOOK: Output: default@test_table123@ds=foo1
+POSTHOOK: Lineage: test_table123 PARTITION(ds=foo1).a SIMPLE []
+POSTHOOK: Lineage: test_table123 PARTITION(ds=foo1).b SIMPLE []
+PREHOOK: query: SELECT * from test_table123 WHERE ds="foo1"
+PREHOOK: type: QUERY
+PREHOOK: Input: default@test_table123
+PREHOOK: Input: default@test_table123@ds=foo1
+#### A masked pattern was here ####
+POSTHOOK: query: SELECT * from test_table123 WHERE ds="foo1"
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@test_table123
+POSTHOOK: Input: default@test_table123@ds=foo1
+#### A masked pattern was here ####
+POSTHOOK: Lineage: test_table123 PARTITION(ds=foo1).a SIMPLE []
+POSTHOOK: Lineage: test_table123 PARTITION(ds=foo1).b SIMPLE []
+1 one foo1
+PREHOOK: query: ALTER TABLE test_table123 CHANGE COLUMN b b MAP
+PREHOOK: type: ALTERTABLE_RENAMECOL
+PREHOOK: Input: default@test_table123
+PREHOOK: Output: default@test_table123
+FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.exec.DDLTask
Index: ql/src/test/queries/clientpositive/disallow_incompatible_type_change_off.q
===================================================================
--- ql/src/test/queries/clientpositive/disallow_incompatible_type_change_off.q (revision 0)
+++ ql/src/test/queries/clientpositive/disallow_incompatible_type_change_off.q (working copy)
@@ -0,0 +1,7 @@
+SET hive.metastore.disallow.incompatible.col.type.changes=false;
+SELECT * FROM src LIMIT 1;
+CREATE TABLE test_table123 (a INT, b MAP) PARTITIONED BY (ds STRING) STORED AS SEQUENCEFILE;
+INSERT OVERWRITE TABLE test_table123 PARTITION(ds="foo1") SELECT 1, MAP("a1", "b1") FROM src LIMIT 1;
+SELECT * from test_table123 WHERE ds="foo1";
+-- This should now work as hive.metastore.disallow.incompatible.col.type.changes is false
+ALTER TABLE test_table123 REPLACE COLUMNS (a INT, b STRING);
Index: ql/src/test/queries/clientnegative/disallow_incompatible_type_change_on1.q
===================================================================
--- ql/src/test/queries/clientnegative/disallow_incompatible_type_change_on1.q (revision 0)
+++ ql/src/test/queries/clientnegative/disallow_incompatible_type_change_on1.q (working copy)
@@ -0,0 +1,17 @@
+SET hive.metastore.disallow.incompatible.col.type.changes=true;
+SELECT * FROM src LIMIT 1;
+CREATE TABLE test_table123 (a INT, b MAP) PARTITIONED BY (ds STRING) STORED AS SEQUENCEFILE;
+INSERT OVERWRITE TABLE test_table123 PARTITION(ds="foo1") SELECT 1, MAP("a1", "b1") FROM src LIMIT 1;
+SELECT * from test_table123 WHERE ds="foo1";
+ALTER TABLE test_table123 REPLACE COLUMNS (a INT, b MAP);
+ALTER TABLE test_table123 REPLACE COLUMNS (a BIGINT, b MAP);
+ALTER TABLE test_table123 REPLACE COLUMNS (a INT, b MAP);
+ALTER TABLE test_table123 REPLACE COLUMNS (a DOUBLE, b MAP);
+ALTER TABLE test_table123 REPLACE COLUMNS (a TINYINT, b MAP);
+ALTER TABLE test_table123 REPLACE COLUMNS (a BOOLEAN, b MAP);
+ALTER TABLE test_table123 REPLACE COLUMNS (a TINYINT, b MAP);
+ALTER TABLE test_table123 CHANGE COLUMN a a_new BOOLEAN;
+-- All the above ALTERs will succeed since they are between compatible types.
+-- The following ALTER will fail as MAP and STRING are not
+-- compatible.
+ALTER TABLE test_table123 REPLACE COLUMNS (a INT, b STRING);
Index: ql/src/test/queries/clientnegative/disallow_incompatible_type_change_on2.q
===================================================================
--- ql/src/test/queries/clientnegative/disallow_incompatible_type_change_on2.q (revision 0)
+++ ql/src/test/queries/clientnegative/disallow_incompatible_type_change_on2.q (working copy)
@@ -0,0 +1,6 @@
+SET hive.metastore.disallow.incompatible.col.type.changes=true;
+SELECT * FROM src LIMIT 1;
+CREATE TABLE test_table123 (a INT, b STRING) PARTITIONED BY (ds STRING) STORED AS SEQUENCEFILE;
+INSERT OVERWRITE TABLE test_table123 PARTITION(ds="foo1") SELECT 1, "one" FROM src LIMIT 1;
+SELECT * from test_table123 WHERE ds="foo1";
+ALTER TABLE test_table123 CHANGE COLUMN b b MAP;