From 2a5a6de93f92128174861bf4f649354e82573b29 Mon Sep 17 00:00:00 2001 From: Ashutosh Chauhan Date: Mon, 2 Nov 2015 17:00:24 -0800 Subject: [PATCH] HIVE-12320 : hive.metastore.disallow.incompatible.col.type.changes should be true by default --- .../java/org/apache/hadoop/hive/conf/HiveConf.java | 2 +- .../hadoop/hive/metastore/MetaStoreUtils.java | 13 +-- .../hadoop/hive/ql/exec/FunctionRegistry.java | 118 +++------------------ .../hadoop/hive/ql/parse/TypeCheckProcFactory.java | 5 +- .../hadoop/hive/ql/exec/TestFunctionRegistry.java | 2 +- .../test/queries/clientpositive/avro_partitioned.q | 3 +- ql/src/test/queries/clientpositive/input3.q | 10 +- .../clientpositive/orc_int_type_promotion.q | 2 + .../clientpositive/parquet_schema_evolution.q | 6 +- .../hadoop/hive/serde2/typeinfo/TypeInfoUtils.java | 93 ++++++++++++++++ 10 files changed, 123 insertions(+), 131 deletions(-) diff --git a/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java b/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java index 3ab73ad..98f9206 100644 --- a/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java +++ b/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java @@ -637,7 +637,7 @@ public void setSparkConfigUpdated(boolean isSparkConfigUpdated) { "as nulls, so we should set this parameter if we wish to reverse that behaviour. For others, " + "pruning is the correct behaviour"), METASTORE_DISALLOW_INCOMPATIBLE_COL_TYPE_CHANGES( - "hive.metastore.disallow.incompatible.col.type.changes", false, + "hive.metastore.disallow.incompatible.col.type.changes", true, "If true (default is false), ALTER TABLE operations which change the type of a\n" + "column (say STRING) to an incompatible type (say MAP) are disallowed.\n" + "RCFile default SerDe (ColumnarSerDe) serializes the values in such a way that the\n" + diff --git a/metastore/src/java/org/apache/hadoop/hive/metastore/MetaStoreUtils.java b/metastore/src/java/org/apache/hadoop/hive/metastore/MetaStoreUtils.java index bbaa1ce..02cbd76 100644 --- a/metastore/src/java/org/apache/hadoop/hive/metastore/MetaStoreUtils.java +++ b/metastore/src/java/org/apache/hadoop/hive/metastore/MetaStoreUtils.java @@ -51,11 +51,9 @@ import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.PathFilter; -import org.apache.hadoop.hive.common.HiveStatsUtils; import org.apache.hadoop.hive.common.JavaUtils; import org.apache.hadoop.hive.common.StatsSetupConst; import org.apache.hadoop.hive.conf.HiveConf; -import org.apache.hadoop.hive.conf.HiveConf.ConfVars; import org.apache.hadoop.hive.metastore.HiveMetaStore.HMSHandler; import org.apache.hadoop.hive.metastore.api.Database; import org.apache.hadoop.hive.metastore.api.FieldSchema; @@ -79,6 +77,7 @@ import org.apache.hadoop.hive.serde2.objectinspector.StructField; import org.apache.hadoop.hive.serde2.objectinspector.StructObjectInspector; import org.apache.hadoop.hive.serde2.typeinfo.TypeInfo; +import org.apache.hadoop.hive.serde2.typeinfo.TypeInfoUtils; import org.apache.hadoop.hive.shims.ShimLoader; import org.apache.hadoop.hive.thrift.HadoopThriftAuthBridge; import org.apache.hive.common.util.ReflectionUtil; @@ -632,9 +631,6 @@ static boolean areSameColumns(List oldCols, List newCo * Two types are compatible if we have internal functions to cast one to another. */ static private boolean areColTypesCompatible(String oldType, String newType) { - if (oldType.equals(newType)) { - return true; - } /* * RCFile default serde (ColumnarSerde) serializes the values in such a way that the @@ -645,12 +641,9 @@ static private boolean areColTypesCompatible(String oldType, String newType) { * Primitive types like INT, STRING, BIGINT, etc are compatible with each other and are * not blocked. */ - if(serdeConstants.PrimitiveTypes.contains(oldType.toLowerCase()) && - serdeConstants.PrimitiveTypes.contains(newType.toLowerCase())) { - return true; - } - return false; + return TypeInfoUtils.implicitConvertible(TypeInfoUtils.getTypeInfoFromTypeString(oldType), + TypeInfoUtils.getTypeInfoFromTypeString(newType)); } public static final int MAX_MS_TYPENAME_LENGTH = 2000; // 4000/2, for an unlikely unicode case diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/FunctionRegistry.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/FunctionRegistry.java index de8e98c..90f7518 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/exec/FunctionRegistry.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/FunctionRegistry.java @@ -22,7 +22,6 @@ import java.util.ArrayList; import java.util.Arrays; import java.util.Collections; -import java.util.EnumMap; import java.util.HashSet; import java.util.Iterator; import java.util.LinkedHashSet; @@ -558,30 +557,6 @@ public static WindowFunctionInfo getWindowFunctionInfo(String functionName) return synonyms; } - // The ordering of types here is used to determine which numeric types - // are common/convertible to one another. Probably better to rely on the - // ordering explicitly defined here than to assume that the enum values - // that were arbitrarily assigned in PrimitiveCategory work for our purposes. - static EnumMap numericTypes = - new EnumMap(PrimitiveCategory.class); - static List numericTypeList = new ArrayList(); - - static void registerNumericType(PrimitiveCategory primitiveCategory, int level) { - numericTypeList.add(primitiveCategory); - numericTypes.put(primitiveCategory, level); - } - - static { - registerNumericType(PrimitiveCategory.BYTE, 1); - registerNumericType(PrimitiveCategory.SHORT, 2); - registerNumericType(PrimitiveCategory.INT, 3); - registerNumericType(PrimitiveCategory.LONG, 4); - registerNumericType(PrimitiveCategory.FLOAT, 5); - registerNumericType(PrimitiveCategory.DOUBLE, 6); - registerNumericType(PrimitiveCategory.DECIMAL, 7); - registerNumericType(PrimitiveCategory.STRING, 8); - } - /** * Check if the given type is numeric. String is considered numeric when used in * numeric operators. @@ -702,15 +677,15 @@ public static TypeInfo getCommonClassForUnionAll(TypeInfo a, TypeInfo b) { (PrimitiveTypeInfo)a, (PrimitiveTypeInfo)b,PrimitiveCategory.STRING); } - if (FunctionRegistry.implicitConvertible(a, b)) { + if (TypeInfoUtils.implicitConvertible(a, b)) { return getTypeInfoForPrimitiveCategory((PrimitiveTypeInfo)a, (PrimitiveTypeInfo)b, pcB); } - if (FunctionRegistry.implicitConvertible(b, a)) { + if (TypeInfoUtils.implicitConvertible(b, a)) { return getTypeInfoForPrimitiveCategory((PrimitiveTypeInfo)a, (PrimitiveTypeInfo)b, pcA); } - for (PrimitiveCategory t : numericTypeList) { - if (FunctionRegistry.implicitConvertible(pcA, t) - && FunctionRegistry.implicitConvertible(pcB, t)) { + for (PrimitiveCategory t : TypeInfoUtils.numericTypeList) { + if (TypeInfoUtils.implicitConvertible(pcA, t) + && TypeInfoUtils.implicitConvertible(pcB, t)) { return getTypeInfoForPrimitiveCategory((PrimitiveTypeInfo)a, (PrimitiveTypeInfo)b, t); } } @@ -759,9 +734,9 @@ public static TypeInfo getCommonClassForComparison(TypeInfo a, TypeInfo b) { return TypeInfoFactory.doubleTypeInfo; } - for (PrimitiveCategory t : numericTypeList) { - if (FunctionRegistry.implicitConvertible(pcA, t) - && FunctionRegistry.implicitConvertible(pcB, t)) { + for (PrimitiveCategory t : TypeInfoUtils.numericTypeList) { + if (TypeInfoUtils.implicitConvertible(pcA, t) + && TypeInfoUtils.implicitConvertible(pcB, t)) { return getTypeInfoForPrimitiveCategory((PrimitiveTypeInfo)a, (PrimitiveTypeInfo)b, t); } } @@ -790,8 +765,8 @@ public static PrimitiveCategory getPrimitiveCommonCategory(TypeInfo a, TypeInfo if (pgB == PrimitiveGrouping.DATE_GROUP && pgA == PrimitiveGrouping.STRING_GROUP) { return PrimitiveCategory.STRING; } - Integer ai = numericTypes.get(pcA); - Integer bi = numericTypes.get(pcB); + Integer ai = TypeInfoUtils.numericTypes.get(pcA); + Integer bi = TypeInfoUtils.numericTypes.get(pcB); if (ai == null || bi == null) { // If either is not a numeric type, return null. return null; @@ -870,73 +845,6 @@ public static TypeInfo getCommonClassForStruct(StructTypeInfo a, StructTypeInfo return TypeInfoFactory.getStructTypeInfo(names, typeInfos); } - public static boolean implicitConvertible(PrimitiveCategory from, PrimitiveCategory to) { - if (from == to) { - return true; - } - - PrimitiveGrouping fromPg = PrimitiveObjectInspectorUtils.getPrimitiveGrouping(from); - PrimitiveGrouping toPg = PrimitiveObjectInspectorUtils.getPrimitiveGrouping(to); - - // Allow implicit String to Double conversion - if (fromPg == PrimitiveGrouping.STRING_GROUP && to == PrimitiveCategory.DOUBLE) { - return true; - } - // Allow implicit String to Decimal conversion - if (fromPg == PrimitiveGrouping.STRING_GROUP && to == PrimitiveCategory.DECIMAL) { - return true; - } - // Void can be converted to any type - if (from == PrimitiveCategory.VOID) { - return true; - } - - // Allow implicit String to Date conversion - if (fromPg == PrimitiveGrouping.DATE_GROUP && toPg == PrimitiveGrouping.STRING_GROUP) { - return true; - } - // Allow implicit Numeric to String conversion - if (fromPg == PrimitiveGrouping.NUMERIC_GROUP && toPg == PrimitiveGrouping.STRING_GROUP) { - return true; - } - // Allow implicit String to varchar conversion, and vice versa - if (fromPg == PrimitiveGrouping.STRING_GROUP && toPg == PrimitiveGrouping.STRING_GROUP) { - return true; - } - - // Allow implicit conversion from Byte -> Integer -> Long -> Float -> Double - // Decimal -> String - Integer f = numericTypes.get(from); - Integer t = numericTypes.get(to); - if (f == null || t == null) { - return false; - } - if (f.intValue() > t.intValue()) { - return false; - } - return true; - } - - /** - * Returns whether it is possible to implicitly convert an object of Class - * from to Class to. - */ - public static boolean implicitConvertible(TypeInfo from, TypeInfo to) { - if (from.equals(to)) { - return true; - } - - // Reimplemented to use PrimitiveCategory rather than TypeInfo, because - // 2 TypeInfos from the same qualified type (varchar, decimal) should still be - // seen as equivalent. - if (from.getCategory() == Category.PRIMITIVE && to.getCategory() == Category.PRIMITIVE) { - return implicitConvertible( - ((PrimitiveTypeInfo) from).getPrimitiveCategory(), - ((PrimitiveTypeInfo) to).getPrimitiveCategory()); - } - return false; - } - /** * Get the GenericUDAF evaluator for the name and argumentClasses. * @@ -1105,7 +1013,7 @@ public static int matchCost(TypeInfo argumentPassed, // but there is a conversion cost. return 1; } - if (!exact && implicitConvertible(argumentPassed, argumentAccepted)) { + if (!exact && TypeInfoUtils.implicitConvertible(argumentPassed, argumentAccepted)) { return 1; } @@ -1273,9 +1181,9 @@ public static Method getMethodInternal(Class udfClass, List mlist, bo acceptedIsPrimitive = true; acceptedPrimCat = ((PrimitiveTypeInfo) accepted).getPrimitiveCategory(); } - if (acceptedIsPrimitive && numericTypes.containsKey(acceptedPrimCat)) { + if (acceptedIsPrimitive && TypeInfoUtils.numericTypes.containsKey(acceptedPrimCat)) { // We're looking for the udf with the smallest maximum numeric type. - int typeValue = numericTypes.get(acceptedPrimCat); + int typeValue = TypeInfoUtils.numericTypes.get(acceptedPrimCat); maxNumericType = typeValue > maxNumericType ? typeValue : maxNumericType; } else if (!accepted.equals(reference)) { // There are non-numeric arguments that don't match from one UDF to diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/TypeCheckProcFactory.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/TypeCheckProcFactory.java index 3a6535b..7f5d72a 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/parse/TypeCheckProcFactory.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/TypeCheckProcFactory.java @@ -78,6 +78,7 @@ import org.apache.hadoop.hive.serde2.typeinfo.StructTypeInfo; import org.apache.hadoop.hive.serde2.typeinfo.TypeInfo; import org.apache.hadoop.hive.serde2.typeinfo.TypeInfoFactory; +import org.apache.hadoop.hive.serde2.typeinfo.TypeInfoUtils; import org.apache.hadoop.hive.serde2.typeinfo.VarcharTypeInfo; import org.apache.hadoop.io.NullWritable; import org.apache.hive.common.util.DateUtils; @@ -903,7 +904,7 @@ protected ExprNodeDesc getXpathOrFuncExprNodeDesc(ASTNode expr, if (myt.getCategory() == Category.LIST) { // Only allow integer index for now - if (!FunctionRegistry.implicitConvertible(children.get(1).getTypeInfo(), + if (!TypeInfoUtils.implicitConvertible(children.get(1).getTypeInfo(), TypeInfoFactory.intTypeInfo)) { throw new SemanticException(SemanticAnalyzer.generateErrorMessage( expr, ErrorMsg.INVALID_ARRAYINDEX_TYPE.getMsg())); @@ -913,7 +914,7 @@ protected ExprNodeDesc getXpathOrFuncExprNodeDesc(ASTNode expr, TypeInfo t = ((ListTypeInfo) myt).getListElementTypeInfo(); desc = new ExprNodeGenericFuncDesc(t, FunctionRegistry.getGenericUDFForIndex(), children); } else if (myt.getCategory() == Category.MAP) { - if (!FunctionRegistry.implicitConvertible(children.get(1).getTypeInfo(), + if (!TypeInfoUtils.implicitConvertible(children.get(1).getTypeInfo(), ((MapTypeInfo) myt).getMapKeyTypeInfo())) { throw new SemanticException(ErrorMsg.INVALID_MAPINDEX_TYPE .getMsg(expr)); diff --git a/ql/src/test/org/apache/hadoop/hive/ql/exec/TestFunctionRegistry.java b/ql/src/test/org/apache/hadoop/hive/ql/exec/TestFunctionRegistry.java index 068bdee..6a83c32 100644 --- a/ql/src/test/org/apache/hadoop/hive/ql/exec/TestFunctionRegistry.java +++ b/ql/src/test/org/apache/hadoop/hive/ql/exec/TestFunctionRegistry.java @@ -80,7 +80,7 @@ protected void setUp() { } private void implicit(TypeInfo a, TypeInfo b, boolean convertible) { - assertEquals(convertible, FunctionRegistry.implicitConvertible(a, b)); + assertEquals(convertible, TypeInfoUtils.implicitConvertible(a, b)); } public void testImplicitConversion() { diff --git a/ql/src/test/queries/clientpositive/avro_partitioned.q b/ql/src/test/queries/clientpositive/avro_partitioned.q index a06e7c4..9e6c79a 100644 --- a/ql/src/test/queries/clientpositive/avro_partitioned.q +++ b/ql/src/test/queries/clientpositive/avro_partitioned.q @@ -112,7 +112,7 @@ OUTPUTFORMAT 'org.apache.hadoop.hive.ql.io.avro.AvroContainerOutputFormat'; -- Insert data into a partition INSERT INTO TABLE episodes_partitioned_serdeproperties PARTITION (doctor_pt) SELECT title, air_date, doctor, doctor as doctor_pt FROM episodes; - +set hive.metastore.disallow.incompatible.col.type.changes=false; -- Evolve the table schema by adding new array field "cast_and_crew" ALTER TABLE episodes_partitioned_serdeproperties SET SERDE 'org.apache.hadoop.hive.serde2.avro.AvroSerDe' @@ -144,5 +144,6 @@ WITH SERDEPROPERTIES ('avro.schema.literal'='{ ] }'); +reset hive.metastore.disallow.incompatible.col.type.changes; -- Try selecting from the evolved table SELECT * FROM episodes_partitioned_serdeproperties; diff --git a/ql/src/test/queries/clientpositive/input3.q b/ql/src/test/queries/clientpositive/input3.q index 2efa7a4..1925fff 100644 --- a/ql/src/test/queries/clientpositive/input3.q +++ b/ql/src/test/queries/clientpositive/input3.q @@ -1,7 +1,3 @@ - - - - CREATE TABLE TEST3a(A INT, B DOUBLE) STORED AS TEXTFILE; DESCRIBE TEST3a; CREATE TABLE TEST3b(A ARRAY, B DOUBLE, C MAP) STORED AS TEXTFILE; @@ -16,11 +12,9 @@ ALTER TABLE TEST3b RENAME TO TEST3c; ALTER TABLE TEST3b RENAME TO TEST3c; DESCRIBE TEST3c; SHOW TABLES; +set hive.metastore.disallow.incompatible.col.type.changes=false; EXPLAIN ALTER TABLE TEST3c REPLACE COLUMNS (R1 INT, R2 DOUBLE); ALTER TABLE TEST3c REPLACE COLUMNS (R1 INT, R2 DOUBLE); +reset hive.metastore.disallow.incompatible.col.type.changes; DESCRIBE EXTENDED TEST3c; - - - - diff --git a/ql/src/test/queries/clientpositive/orc_int_type_promotion.q b/ql/src/test/queries/clientpositive/orc_int_type_promotion.q index 4a805a0..c3e2cf9 100644 --- a/ql/src/test/queries/clientpositive/orc_int_type_promotion.q +++ b/ql/src/test/queries/clientpositive/orc_int_type_promotion.q @@ -1,3 +1,4 @@ +set hive.metastore.disallow.incompatible.col.type.changes=false; create table if not exists alltypes ( bo boolean, ti tinyint, @@ -77,3 +78,4 @@ select * from src_part_orc limit 10; alter table src_part_orc change key key bigint; select * from src_part_orc limit 10; +reset hive.metastore.disallow.incompatible.col.type.changes; diff --git a/ql/src/test/queries/clientpositive/parquet_schema_evolution.q b/ql/src/test/queries/clientpositive/parquet_schema_evolution.q index af0cf99..d2f2996 100644 --- a/ql/src/test/queries/clientpositive/parquet_schema_evolution.q +++ b/ql/src/test/queries/clientpositive/parquet_schema_evolution.q @@ -11,10 +11,10 @@ INSERT OVERWRITE TABLE NewStructField SELECT named_struct('a1', map('k1','v1'), DESCRIBE NewStructField; SELECT * FROM NewStructField; - +set hive.metastore.disallow.incompatible.col.type.changes=false; -- Adds new fields to the struct types ALTER TABLE NewStructField REPLACE COLUMNS (a struct, a2:struct, a3:int>, b int); - +reset hive.metastore.disallow.incompatible.col.type.changes; DESCRIBE NewStructField; SELECT * FROM NewStructField; @@ -24,4 +24,4 @@ DESCRIBE NewStructFieldTable; SELECT * FROM NewStructFieldTable; DROP TABLE NewStructField; -DROP TABLE NewStructFieldTable; \ No newline at end of file +DROP TABLE NewStructFieldTable; diff --git a/serde/src/java/org/apache/hadoop/hive/serde2/typeinfo/TypeInfoUtils.java b/serde/src/java/org/apache/hadoop/hive/serde2/typeinfo/TypeInfoUtils.java index a4323d1..14ed5ba 100644 --- a/serde/src/java/org/apache/hadoop/hive/serde2/typeinfo/TypeInfoUtils.java +++ b/serde/src/java/org/apache/hadoop/hive/serde2/typeinfo/TypeInfoUtils.java @@ -23,6 +23,7 @@ import java.lang.reflect.ParameterizedType; import java.lang.reflect.Type; import java.util.ArrayList; +import java.util.EnumMap; import java.util.HashMap; import java.util.LinkedList; import java.util.List; @@ -45,6 +46,7 @@ import org.apache.hadoop.hive.serde2.objectinspector.UnionObjectInspector; import org.apache.hadoop.hive.serde2.objectinspector.primitive.PrimitiveObjectInspectorFactory; import org.apache.hadoop.hive.serde2.objectinspector.primitive.PrimitiveObjectInspectorUtils; +import org.apache.hadoop.hive.serde2.objectinspector.primitive.PrimitiveObjectInspectorUtils.PrimitiveGrouping; import org.apache.hadoop.hive.serde2.objectinspector.primitive.PrimitiveObjectInspectorUtils.PrimitiveTypeEntry; /** @@ -53,6 +55,25 @@ */ public final class TypeInfoUtils { + public static List numericTypeList = new ArrayList(); + // The ordering of types here is used to determine which numeric types + // are common/convertible to one another. Probably better to rely on the + // ordering explicitly defined here than to assume that the enum values + // that were arbitrarily assigned in PrimitiveCategory work for our purposes. + public static EnumMap numericTypes = + new EnumMap(PrimitiveCategory.class); + + static { + registerNumericType(PrimitiveCategory.BYTE, 1); + registerNumericType(PrimitiveCategory.SHORT, 2); + registerNumericType(PrimitiveCategory.INT, 3); + registerNumericType(PrimitiveCategory.LONG, 4); + registerNumericType(PrimitiveCategory.FLOAT, 5); + registerNumericType(PrimitiveCategory.DOUBLE, 6); + registerNumericType(PrimitiveCategory.DECIMAL, 7); + registerNumericType(PrimitiveCategory.STRING, 8); + } + private TypeInfoUtils() { // prevent instantiation } @@ -807,4 +828,76 @@ public static int getCharacterLengthForType(PrimitiveTypeInfo typeInfo) { return 0; } } + + public static void registerNumericType(PrimitiveCategory primitiveCategory, int level) { + numericTypeList.add(primitiveCategory); + numericTypes.put(primitiveCategory, level); + } + + public static boolean implicitConvertible(PrimitiveCategory from, PrimitiveCategory to) { + if (from == to) { + return true; + } + + PrimitiveGrouping fromPg = PrimitiveObjectInspectorUtils.getPrimitiveGrouping(from); + PrimitiveGrouping toPg = PrimitiveObjectInspectorUtils.getPrimitiveGrouping(to); + + // Allow implicit String to Double conversion + if (fromPg == PrimitiveGrouping.STRING_GROUP && to == PrimitiveCategory.DOUBLE) { + return true; + } + // Allow implicit String to Decimal conversion + if (fromPg == PrimitiveGrouping.STRING_GROUP && to == PrimitiveCategory.DECIMAL) { + return true; + } + // Void can be converted to any type + if (from == PrimitiveCategory.VOID) { + return true; + } + + // Allow implicit String to Date conversion + if (fromPg == PrimitiveGrouping.DATE_GROUP && toPg == PrimitiveGrouping.STRING_GROUP) { + return true; + } + // Allow implicit Numeric to String conversion + if (fromPg == PrimitiveGrouping.NUMERIC_GROUP && toPg == PrimitiveGrouping.STRING_GROUP) { + return true; + } + // Allow implicit String to varchar conversion, and vice versa + if (fromPg == PrimitiveGrouping.STRING_GROUP && toPg == PrimitiveGrouping.STRING_GROUP) { + return true; + } + + // Allow implicit conversion from Byte -> Integer -> Long -> Float -> Double + // Decimal -> String + Integer f = numericTypes.get(from); + Integer t = numericTypes.get(to); + if (f == null || t == null) { + return false; + } + if (f.intValue() > t.intValue()) { + return false; + } + return true; + } + + /** + * Returns whether it is possible to implicitly convert an object of Class + * from to Class to. + */ + public static boolean implicitConvertible(TypeInfo from, TypeInfo to) { + if (from.equals(to)) { + return true; + } + + // Reimplemented to use PrimitiveCategory rather than TypeInfo, because + // 2 TypeInfos from the same qualified type (varchar, decimal) should still be + // seen as equivalent. + if (from.getCategory() == Category.PRIMITIVE && to.getCategory() == Category.PRIMITIVE) { + return implicitConvertible( + ((PrimitiveTypeInfo) from).getPrimitiveCategory(), + ((PrimitiveTypeInfo) to).getPrimitiveCategory()); + } + return false; + } } -- 1.7.12.4 (Apple Git-37)