Index: jdbc/src/test/org/apache/hadoop/hive/jdbc/TestJdbcDriver.java =================================================================== --- jdbc/src/test/org/apache/hadoop/hive/jdbc/TestJdbcDriver.java (revision 1343123) +++ jdbc/src/test/org/apache/hadoop/hive/jdbc/TestJdbcDriver.java (working copy) @@ -55,8 +55,6 @@ private static final String partitionedTableName = "testHiveJdbcDriverPartitionedTable"; private static final String partitionedColumnName = "partcolabc"; private static final String partitionedColumnValue = "20090619"; - private static final String partitionedIntColumnName = "partcolint"; - private static final int partitionedIntColumnValue = 777; private static final String partitionedTableComment = "Partitioned table"; private static final String dataTypeTableName = "testDataTypeTable"; private static final String dataTypeTableComment = "Table with many column data types"; @@ -124,16 +122,14 @@ res = stmt.executeQuery("create table " + partitionedTableName + " (under_col int, value string) comment '"+partitionedTableComment - +"' partitioned by (" + partitionedColumnName + " STRING," - + partitionedIntColumnName + " INT)"); + +"' partitioned by (" + partitionedColumnName + " STRING)"); assertFalse(res.next()); // load data res = stmt.executeQuery("load data local inpath '" + dataFilePath.toString() + "' into table " + partitionedTableName + " PARTITION (" + partitionedColumnName + "=" - + partitionedColumnValue + "," - + partitionedIntColumnName + "=" + partitionedIntColumnValue + ")"); + + partitionedColumnValue + ")"); assertFalse(res.next()); // drop table. ignore error. @@ -441,7 +437,7 @@ int i = 0; ResultSetMetaData meta = res.getMetaData(); - int expectedColCount = isPartitionTable ? 4 : 2; + int expectedColCount = isPartitionTable ? 3 : 2; assertEquals( "Unexpected column count", expectedColCount, meta.getColumnCount()); @@ -455,8 +451,6 @@ if (isPartitionTable) { assertEquals(res.getString(3), partitionedColumnValue); assertEquals(res.getString(3), res.getString(partitionedColumnName)); - assertEquals(res.getInt(4), partitionedIntColumnValue); - assertEquals(res.getInt(4), res.getInt(partitionedIntColumnName)); } assertFalse("Last result value was not null", res.wasNull()); assertNull("No warnings should be found on ResultSet", res @@ -658,7 +652,7 @@ public void testMetaDataGetColumns() throws SQLException { Map tests = new HashMap(); tests.put(new String[]{"testhivejdbcdriver\\_table", null}, 2); - tests.put(new String[]{"testhivejdbc%", null}, 8); + tests.put(new String[]{"testhivejdbc%", null}, 7); tests.put(new String[]{"testhiveJDBC%", null}, 8); tests.put(new String[]{"testhiveJDB\\C%", null}, 0); tests.put(new String[]{"%jdbcdriver\\_table", null}, 2); @@ -1007,87 +1001,6 @@ } } - public void testPartitionedResultSetMetaData() throws SQLException { - Statement stmt = con.createStatement(); - - ResultSet res = stmt.executeQuery( - "select under_col, value, partcolabc, partcolint " + - "from " + partitionedTableName + " limit 1"); - ResultSetMetaData meta = res.getMetaData(); - - ResultSet colRS = con.getMetaData().getColumns(null, null, - partitionedTableName.toLowerCase(), null); - - assertEquals(4, meta.getColumnCount()); - - assertTrue(colRS.next()); - - assertEquals("under_col", meta.getColumnName(1)); - assertEquals(Types.INTEGER, meta.getColumnType(1)); - assertEquals("int", meta.getColumnTypeName(1)); - assertEquals(11, meta.getColumnDisplaySize(1)); - assertEquals(10, meta.getPrecision(1)); - assertEquals(0, meta.getScale(1)); - - assertEquals("under_col", colRS.getString("COLUMN_NAME")); - assertEquals(Types.INTEGER, colRS.getInt("DATA_TYPE")); - assertEquals("int", colRS.getString("TYPE_NAME").toLowerCase()); - assertEquals(meta.getPrecision(1), colRS.getInt("COLUMN_SIZE")); - assertEquals(meta.getScale(1), colRS.getInt("DECIMAL_DIGITS")); - - assertTrue(colRS.next()); - - assertEquals("value", meta.getColumnName(2)); - assertEquals(Types.VARCHAR, meta.getColumnType(2)); - assertEquals("string", meta.getColumnTypeName(2)); - assertEquals(Integer.MAX_VALUE, meta.getColumnDisplaySize(2)); - assertEquals(Integer.MAX_VALUE, meta.getPrecision(2)); - assertEquals(0, meta.getScale(2)); - - assertEquals("value", colRS.getString("COLUMN_NAME")); - assertEquals(Types.VARCHAR, colRS.getInt("DATA_TYPE")); - assertEquals("string", colRS.getString("TYPE_NAME").toLowerCase()); - assertEquals(meta.getPrecision(2), colRS.getInt("COLUMN_SIZE")); - assertEquals(meta.getScale(2), colRS.getInt("DECIMAL_DIGITS")); - - assertTrue(colRS.next()); - - assertEquals("partcolabc", meta.getColumnName(3)); - assertEquals(Types.VARCHAR, meta.getColumnType(3)); - assertEquals("string", meta.getColumnTypeName(3)); - assertEquals(Integer.MAX_VALUE, meta.getColumnDisplaySize(3)); - assertEquals(Integer.MAX_VALUE, meta.getPrecision(3)); - assertEquals(0, meta.getScale(3)); - - assertEquals("partcolabc", colRS.getString("COLUMN_NAME")); - assertEquals(Types.VARCHAR, colRS.getInt("DATA_TYPE")); - assertEquals("string", colRS.getString("TYPE_NAME").toLowerCase()); - assertEquals(meta.getPrecision(3), colRS.getInt("COLUMN_SIZE")); - assertEquals(meta.getScale(3), colRS.getInt("DECIMAL_DIGITS")); - - assertTrue(colRS.next()); - - assertEquals("partcolint", meta.getColumnName(4)); - assertEquals(Types.INTEGER, meta.getColumnType(4)); - assertEquals("int", meta.getColumnTypeName(4)); - assertEquals(11, meta.getColumnDisplaySize(4)); - assertEquals(10, meta.getPrecision(4)); - assertEquals(0, meta.getScale(4)); - - assertEquals("partcolint", colRS.getString("COLUMN_NAME")); - assertEquals(Types.INTEGER, colRS.getInt("DATA_TYPE")); - assertEquals("int", colRS.getString("TYPE_NAME").toLowerCase()); - assertEquals(meta.getPrecision(4), colRS.getInt("COLUMN_SIZE")); - assertEquals(meta.getScale(4), colRS.getInt("DECIMAL_DIGITS")); - - for (int i = 1; i <= meta.getColumnCount(); i++) { - assertFalse(meta.isAutoIncrement(i)); - assertFalse(meta.isCurrency(i)); - assertEquals(ResultSetMetaData.columnNullable, meta.isNullable(i)); - } - } - - // [url] [host] [port] [db] private static final String[][] URL_PROPERTIES = new String[][] { {"jdbc:hive://", "", "", "default"}, Index: ql/src/test/results/clientpositive/partcols1.q.out =================================================================== --- ql/src/test/results/clientpositive/partcols1.q.out (revision 0) +++ ql/src/test/results/clientpositive/partcols1.q.out (working copy) @@ -0,0 +1,59 @@ +PREHOOK: query: create table test1(col1 string) partitioned by (partitionId int) +PREHOOK: type: CREATETABLE +POSTHOOK: query: create table test1(col1 string) partitioned by (partitionId int) +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: default@test1 +PREHOOK: query: insert overwrite table test1 partition (partitionId=1) + select key from src limit 10 +PREHOOK: type: QUERY +PREHOOK: Input: default@src +PREHOOK: Output: default@test1@partitionid=1 +POSTHOOK: query: insert overwrite table test1 partition (partitionId=1) + select key from src limit 10 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@src +POSTHOOK: Output: default@test1@partitionid=1 +POSTHOOK: Lineage: test1 PARTITION(partitionid=1).col1 SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] +PREHOOK: query: FROM ( + FROM test1 + SELECT partitionId, 111 as col2, 222 as col3, 333 as col4 + WHERE partitionId = 1 + DISTRIBUTE BY partitionId + SORT BY partitionId + ) b + +SELECT TRANSFORM( + b.partitionId,b.col2,b.col3,b.col4 + ) + + USING '/bin/cat' as (a,b,c,d) +PREHOOK: type: QUERY +PREHOOK: Input: default@test1@partitionid=1 +#### A masked pattern was here #### +POSTHOOK: query: FROM ( + FROM test1 + SELECT partitionId, 111 as col2, 222 as col3, 333 as col4 + WHERE partitionId = 1 + DISTRIBUTE BY partitionId + SORT BY partitionId + ) b + +SELECT TRANSFORM( + b.partitionId,b.col2,b.col3,b.col4 + ) + + USING '/bin/cat' as (a,b,c,d) +POSTHOOK: type: QUERY +POSTHOOK: Input: default@test1@partitionid=1 +#### A masked pattern was here #### +POSTHOOK: Lineage: test1 PARTITION(partitionid=1).col1 SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] +1 111 222 333 +1 111 222 333 +1 111 222 333 +1 111 222 333 +1 111 222 333 +1 111 222 333 +1 111 222 333 +1 111 222 333 +1 111 222 333 +1 111 222 333 Index: ql/src/test/queries/clientpositive/partcols1.q =================================================================== --- ql/src/test/queries/clientpositive/partcols1.q (revision 0) +++ ql/src/test/queries/clientpositive/partcols1.q (working copy) @@ -0,0 +1,18 @@ + +create table test1(col1 string) partitioned by (partitionId int); +insert overwrite table test1 partition (partitionId=1) + select key from src limit 10; + + FROM ( + FROM test1 + SELECT partitionId, 111 as col2, 222 as col3, 333 as col4 + WHERE partitionId = 1 + DISTRIBUTE BY partitionId + SORT BY partitionId + ) b + +SELECT TRANSFORM( + b.partitionId,b.col2,b.col3,b.col4 + ) + + USING '/bin/cat' as (a,b,c,d); \ No newline at end of file Index: ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java =================================================================== --- ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java (revision 1343123) +++ ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java (working copy) @@ -42,7 +42,6 @@ import org.apache.hadoop.hive.common.JavaUtils; import org.apache.hadoop.hive.conf.HiveConf; import org.apache.hadoop.hive.conf.HiveConf.ConfVars; -import org.apache.hadoop.hive.metastore.TableType; import org.apache.hadoop.hive.metastore.Warehouse; import org.apache.hadoop.hive.metastore.api.FieldSchema; import org.apache.hadoop.hive.metastore.api.MetaException; @@ -174,6 +173,7 @@ import org.apache.hadoop.hive.serde2.typeinfo.TypeInfoFactory; import org.apache.hadoop.hive.serde2.typeinfo.TypeInfoUtils; import org.apache.hadoop.mapred.InputFormat; +import org.apache.hadoop.hive.metastore.TableType; /** * Implementation of the semantic analyzer. @@ -6477,8 +6477,10 @@ // Finally add the partitioning columns for (FieldSchema part_col : tab.getPartCols()) { LOG.trace("Adding partition col: " + part_col); + // TODO: use the right type by calling part_col.getType() instead of + // String.class rwsch.put(alias, part_col.getName(), new ColumnInfo(part_col.getName(), - TypeInfoFactory.getPrimitiveTypeInfo(part_col.getType()), alias, true)); + TypeInfoFactory.stringTypeInfo, alias, true)); } //put all virutal columns in RowResolver.