Index: serde/src/test/org/apache/hadoop/hive/serde2/objectinspector/TestObjectInspectorConverters.java =================================================================== --- serde/src/test/org/apache/hadoop/hive/serde2/objectinspector/TestObjectInspectorConverters.java (revision 1527680) +++ serde/src/test/org/apache/hadoop/hive/serde2/objectinspector/TestObjectInspectorConverters.java (working copy) @@ -25,6 +25,9 @@ import org.apache.hadoop.hive.serde2.io.ShortWritable; import org.apache.hadoop.hive.serde2.objectinspector.ObjectInspectorConverters.Converter; import org.apache.hadoop.hive.serde2.objectinspector.primitive.PrimitiveObjectInspectorFactory; +import org.apache.hadoop.hive.serde2.typeinfo.PrimitiveTypeInfo; +import org.apache.hadoop.hive.serde2.typeinfo.TypeInfoFactory; +import org.apache.hadoop.hive.serde2.typeinfo.VarcharTypeParams; import org.apache.hadoop.io.BooleanWritable; import org.apache.hadoop.io.BytesWritable; import org.apache.hadoop.io.FloatWritable; @@ -171,4 +174,22 @@ } } + + public void testGetConvertedOI() throws Throwable { + // Try with types that have type params + PrimitiveTypeInfo varchar5TI = + (PrimitiveTypeInfo) TypeInfoFactory.getPrimitiveTypeInfo("varchar(5)"); + PrimitiveTypeInfo varchar10TI = + (PrimitiveTypeInfo) TypeInfoFactory.getPrimitiveTypeInfo("varchar(10)"); + PrimitiveObjectInspector varchar5OI = + PrimitiveObjectInspectorFactory.getPrimitiveWritableObjectInspector(varchar5TI); + PrimitiveObjectInspector varchar10OI = + PrimitiveObjectInspectorFactory.getPrimitiveWritableObjectInspector(varchar10TI); + + // output OI should have varchar type params + PrimitiveObjectInspector poi = (PrimitiveObjectInspector) + ObjectInspectorConverters.getConvertedOI(varchar10OI, varchar5OI, true); + VarcharTypeParams vcParams = (VarcharTypeParams) poi.getTypeParams(); + assertEquals("varchar length doesn't match", 5, vcParams); + } } Index: serde/src/java/org/apache/hadoop/hive/serde2/objectinspector/ObjectInspectorConverters.java =================================================================== --- serde/src/java/org/apache/hadoop/hive/serde2/objectinspector/ObjectInspectorConverters.java (revision 1527680) +++ serde/src/java/org/apache/hadoop/hive/serde2/objectinspector/ObjectInspectorConverters.java (working copy) @@ -22,7 +22,6 @@ import java.util.List; import java.util.Map; -import org.apache.hadoop.hive.serde2.objectinspector.ObjectInspector.Category; import org.apache.hadoop.hive.serde2.objectinspector.primitive.JavaStringObjectInspector; import org.apache.hadoop.hive.serde2.objectinspector.primitive.PrimitiveObjectInspectorConverter; import org.apache.hadoop.hive.serde2.objectinspector.primitive.PrimitiveObjectInspectorFactory; @@ -188,20 +187,32 @@ return getConvertedOI(inputOI, outputOI, null, true); } - /* + /** * Utility function to convert from one object inspector type to another. + * The output object inspector type should have all fields as settableOI type. + * The above condition can be violated only if equalsCheck is true and inputOI is + * equal to outputOI. + * @param inputOI : input object inspector + * @param outputOI : output object inspector + * @param oiSettableProperties : The object inspector to isSettable mapping used to cache + * intermediate results. + * @param equalsCheck : Do we need to check if the inputOI and outputOI are the same? + * true : If they are the same, we return the object inspector directly. + * false : Do not perform an equality check on inputOI and outputOI + * @return : The output object inspector containing all settable fields. The return value + * can contain non-settable fields only if inputOI equals outputOI and equalsCheck is + * true. */ private static ObjectInspector getConvertedOI( ObjectInspector inputOI, ObjectInspector outputOI, Map oiSettableProperties, boolean equalsCheck) { - ObjectInspector retOI = outputOI.getCategory() == Category.PRIMITIVE ? inputOI : outputOI; - // If the inputOI is the same as the outputOI, just return it - // If the retOI has all fields settable, return it + // 1. If equalsCheck is true and the inputOI is the same as the outputOI OR + // 2. If the outputOI has all fields settable, return it if ((equalsCheck && inputOI.equals(outputOI)) || - ObjectInspectorUtils.hasAllFieldsSettable(retOI, oiSettableProperties) == true) { - return retOI; + ObjectInspectorUtils.hasAllFieldsSettable(outputOI, oiSettableProperties) == true) { + return outputOI; } // Return the settable equivalent object inspector for primitive categories // For eg: for table T containing partitions p1 and p2 (possibly different @@ -209,11 +220,12 @@ // T is settable recursively i.e all the nested fields are also settable. switch (outputOI.getCategory()) { case PRIMITIVE: - PrimitiveObjectInspector primInputOI = (PrimitiveObjectInspector) inputOI; - return PrimitiveObjectInspectorFactory.getPrimitiveWritableObjectInspector(primInputOI); + // Create a writable object inspector for primitive type and return it. + PrimitiveObjectInspector primOutputOI = (PrimitiveObjectInspector) outputOI; + return PrimitiveObjectInspectorFactory.getPrimitiveWritableObjectInspector(primOutputOI); case STRUCT: StructObjectInspector structOutputOI = (StructObjectInspector) outputOI; - // create a standard settable struct object inspector + // create a standard settable struct object inspector. List listFields = structOutputOI.getAllStructFieldRefs(); List structFieldNames = new ArrayList(listFields.size()); List structFieldObjectInspectors = new ArrayList( @@ -221,6 +233,10 @@ for (StructField listField : listFields) { structFieldNames.add(listField.getFieldName()); + // We need to make sure that the underlying fields are settable as well. + // Hence, the recursive call for each field. + // Note that equalsCheck is false while invoking getConvertedOI() because + // we need to bypass the initial inputOI.equals(outputOI) check. structFieldObjectInspectors.add(getConvertedOI(listField.getFieldObjectInspector(), listField.getFieldObjectInspector(), oiSettableProperties, false)); } @@ -229,11 +245,13 @@ structFieldObjectInspectors); case LIST: ListObjectInspector listOutputOI = (ListObjectInspector) outputOI; + // We need to make sure that the list element type is settable. return ObjectInspectorFactory.getStandardListObjectInspector( getConvertedOI(listOutputOI.getListElementObjectInspector(), listOutputOI.getListElementObjectInspector(), oiSettableProperties, false)); case MAP: MapObjectInspector mapOutputOI = (MapObjectInspector) outputOI; + // We need to make sure that the key type and the value types are settable. return ObjectInspectorFactory.getStandardMapObjectInspector( getConvertedOI(mapOutputOI.getMapKeyObjectInspector(), mapOutputOI.getMapKeyObjectInspector(), oiSettableProperties, false), @@ -246,11 +264,13 @@ List unionFieldObjectInspectors = new ArrayList( unionListFields.size()); for (ObjectInspector listField : unionListFields) { + // We need to make sure that all the field associated with the union are settable. unionFieldObjectInspectors.add(getConvertedOI(listField, listField, oiSettableProperties, false)); } return ObjectInspectorFactory.getStandardUnionObjectInspector(unionFieldObjectInspectors); default: + // Unsupported in-memory structure. throw new RuntimeException("Hive internal error: conversion of " + inputOI.getTypeName() + " to " + outputOI.getTypeName() + " not supported yet."); Index: ql/src/test/results/clientpositive/alter_varchar2.q.out =================================================================== --- ql/src/test/results/clientpositive/alter_varchar2.q.out (revision 0) +++ ql/src/test/results/clientpositive/alter_varchar2.q.out (revision 0) @@ -0,0 +1,97 @@ +PREHOOK: query: -- alter column type, with partitioned table +drop table if exists alter_varchar2 +PREHOOK: type: DROPTABLE +POSTHOOK: query: -- alter column type, with partitioned table +drop table if exists alter_varchar2 +POSTHOOK: type: DROPTABLE +PREHOOK: query: create table alter_varchar2 ( + c1 varchar(255) +) partitioned by (hr int) +PREHOOK: type: CREATETABLE +POSTHOOK: query: create table alter_varchar2 ( + c1 varchar(255) +) partitioned by (hr int) +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: default@alter_varchar2 +PREHOOK: query: insert overwrite table alter_varchar2 partition (hr=1) + select value from src limit 1 +PREHOOK: type: QUERY +PREHOOK: Input: default@src +PREHOOK: Output: default@alter_varchar2@hr=1 +POSTHOOK: query: insert overwrite table alter_varchar2 partition (hr=1) + select value from src limit 1 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@src +POSTHOOK: Output: default@alter_varchar2@hr=1 +POSTHOOK: Lineage: alter_varchar2 PARTITION(hr=1).c1 EXPRESSION [(src)src.FieldSchema(name:value, type:string, comment:default), ] +PREHOOK: query: select c1, length(c1) from alter_varchar2 +PREHOOK: type: QUERY +PREHOOK: Input: default@alter_varchar2 +PREHOOK: Input: default@alter_varchar2@hr=1 +#### A masked pattern was here #### +POSTHOOK: query: select c1, length(c1) from alter_varchar2 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@alter_varchar2 +POSTHOOK: Input: default@alter_varchar2@hr=1 +#### A masked pattern was here #### +POSTHOOK: Lineage: alter_varchar2 PARTITION(hr=1).c1 EXPRESSION [(src)src.FieldSchema(name:value, type:string, comment:default), ] +val_238 7 +PREHOOK: query: alter table alter_varchar2 change column c1 c1 varchar(10) +PREHOOK: type: ALTERTABLE_RENAMECOL +PREHOOK: Input: default@alter_varchar2 +PREHOOK: Output: default@alter_varchar2 +POSTHOOK: query: alter table alter_varchar2 change column c1 c1 varchar(10) +POSTHOOK: type: ALTERTABLE_RENAMECOL +POSTHOOK: Input: default@alter_varchar2 +POSTHOOK: Output: default@alter_varchar2 +POSTHOOK: Lineage: alter_varchar2 PARTITION(hr=1).c1 EXPRESSION [(src)src.FieldSchema(name:value, type:string, comment:default), ] +PREHOOK: query: select hr, c1, length(c1) from alter_varchar2 where hr = 1 +PREHOOK: type: QUERY +PREHOOK: Input: default@alter_varchar2 +PREHOOK: Input: default@alter_varchar2@hr=1 +#### A masked pattern was here #### +POSTHOOK: query: select hr, c1, length(c1) from alter_varchar2 where hr = 1 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@alter_varchar2 +POSTHOOK: Input: default@alter_varchar2@hr=1 +#### A masked pattern was here #### +POSTHOOK: Lineage: alter_varchar2 PARTITION(hr=1).c1 EXPRESSION [(src)src.FieldSchema(name:value, type:string, comment:default), ] +1 val_238 7 +PREHOOK: query: insert overwrite table alter_varchar2 partition (hr=2) + select key from src limit 1 +PREHOOK: type: QUERY +PREHOOK: Input: default@src +PREHOOK: Output: default@alter_varchar2@hr=2 +POSTHOOK: query: insert overwrite table alter_varchar2 partition (hr=2) + select key from src limit 1 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@src +POSTHOOK: Output: default@alter_varchar2@hr=2 +POSTHOOK: Lineage: alter_varchar2 PARTITION(hr=1).c1 EXPRESSION [(src)src.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: alter_varchar2 PARTITION(hr=2).c1 EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] +PREHOOK: query: select hr, c1, length(c1) from alter_varchar2 where hr = 1 +PREHOOK: type: QUERY +PREHOOK: Input: default@alter_varchar2 +PREHOOK: Input: default@alter_varchar2@hr=1 +#### A masked pattern was here #### +POSTHOOK: query: select hr, c1, length(c1) from alter_varchar2 where hr = 1 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@alter_varchar2 +POSTHOOK: Input: default@alter_varchar2@hr=1 +#### A masked pattern was here #### +POSTHOOK: Lineage: alter_varchar2 PARTITION(hr=1).c1 EXPRESSION [(src)src.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: alter_varchar2 PARTITION(hr=2).c1 EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] +1 val_238 7 +PREHOOK: query: select hr, c1, length(c1) from alter_varchar2 where hr = 2 +PREHOOK: type: QUERY +PREHOOK: Input: default@alter_varchar2 +PREHOOK: Input: default@alter_varchar2@hr=2 +#### A masked pattern was here #### +POSTHOOK: query: select hr, c1, length(c1) from alter_varchar2 where hr = 2 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@alter_varchar2 +POSTHOOK: Input: default@alter_varchar2@hr=2 +#### A masked pattern was here #### +POSTHOOK: Lineage: alter_varchar2 PARTITION(hr=1).c1 EXPRESSION [(src)src.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: alter_varchar2 PARTITION(hr=2).c1 EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] +2 238 3 Index: ql/src/test/queries/clientpositive/alter_varchar2.q =================================================================== --- ql/src/test/queries/clientpositive/alter_varchar2.q (revision 0) +++ ql/src/test/queries/clientpositive/alter_varchar2.q (revision 0) @@ -0,0 +1,22 @@ + +-- alter column type, with partitioned table +drop table if exists alter_varchar2; + +create table alter_varchar2 ( + c1 varchar(255) +) partitioned by (hr int); + +insert overwrite table alter_varchar2 partition (hr=1) + select value from src limit 1; + +select c1, length(c1) from alter_varchar2; + +alter table alter_varchar2 change column c1 c1 varchar(10); + +select hr, c1, length(c1) from alter_varchar2 where hr = 1; + +insert overwrite table alter_varchar2 partition (hr=2) + select key from src limit 1; + +select hr, c1, length(c1) from alter_varchar2 where hr = 1; +select hr, c1, length(c1) from alter_varchar2 where hr = 2;