Index: ql/src/test/results/clientnegative/dyn_part4.q.out =================================================================== --- ql/src/test/results/clientnegative/dyn_part4.q.out (revision 0) +++ ql/src/test/results/clientnegative/dyn_part4.q.out (revision 0) @@ -0,0 +1,6 @@ +PREHOOK: query: create table nzhang_part4 (key string) partitioned by (ds string, hr string, value string) +PREHOOK: type: CREATETABLE +POSTHOOK: query: create table nzhang_part4 (key string) partitioned by (ds string, hr string, value string) +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: default@nzhang_part4 +FAILED: Error in semantic analysis: Partition columns in partition specification are not the same as that defined in the table schema. The names and orders have to be exactly the same. Partition columns in the table schema are: (ds, hr, value), while the partitions specified in the query are: (value, ds, hr). Index: ql/src/test/queries/clientnegative/dyn_part4.q =================================================================== --- ql/src/test/queries/clientnegative/dyn_part4.q (revision 0) +++ ql/src/test/queries/clientnegative/dyn_part4.q (revision 0) @@ -0,0 +1,7 @@ +create table nzhang_part4 (key string) partitioned by (ds string, hr string, value string); + +set hive.exec.dynamic.partition=true; + +insert overwrite table nzhang_part4 partition(value = 'aaa', ds='11', hr) select key, hr from srcpart where ds is not null; + +drop table nzhang_part4; Index: ql/src/java/org/apache/hadoop/hive/ql/parse/ErrorMsg.java =================================================================== --- ql/src/java/org/apache/hadoop/hive/ql/parse/ErrorMsg.java (revision 19552) +++ ql/src/java/org/apache/hadoop/hive/ql/parse/ErrorMsg.java (working copy) @@ -186,7 +186,9 @@ NO_COMPARE_BIGINT_DOUBLE("In strict mode, comparing bigints and doubles is not allowed, " + "it may result in a loss of precision. " + "If you really want to perform the operation, set hive.mapred.mode=nonstrict"), - FUNCTIONS_ARE_NOT_SUPPORTED_IN_ORDER_BY("functions are not supported in order by"), + FUNCTIONS_ARE_NOT_SUPPORTED_IN_ORDER_BY("functions are not supported in order by"), + PARTSPEC_DIFFER_FROM_SCHEMA("Partition columns in partition specification are not the same as " + + "that defined in the table schema. The names and orders have to be exactly the same."), ; private String mesg; Index: ql/src/java/org/apache/hadoop/hive/ql/parse/BaseSemanticAnalyzer.java =================================================================== --- ql/src/java/org/apache/hadoop/hive/ql/parse/BaseSemanticAnalyzer.java (revision 19552) +++ ql/src/java/org/apache/hadoop/hive/ql/parse/BaseSemanticAnalyzer.java (working copy) @@ -23,6 +23,7 @@ import java.util.ArrayList; import java.util.HashMap; import java.util.HashSet; +import java.util.Iterator; import java.util.LinkedHashMap; import java.util.LinkedHashSet; import java.util.List; @@ -675,6 +676,19 @@ conf.getVar(HiveConf.ConfVars.DYNAMICPARTITIONINGMODE).equalsIgnoreCase("strict")) { throw new SemanticException(ErrorMsg.DYNAMIC_PARTITION_STRICT_MODE.getMsg()); } + + // check the partitions in partSpec be the same as defined in table schema + if (partSpec.keySet().size() != parts.size()) { + ErrorPartSpec(partSpec, parts); + } + Iterator itrPsKeys = partSpec.keySet().iterator(); + for (FieldSchema fs: parts) { + if (!itrPsKeys.next().toLowerCase().equals(fs.getName().toLowerCase())) { + ErrorPartSpec(partSpec, parts); + } + } + + // check if static partition appear after dynamic partitions for (FieldSchema fs: parts) { if (partSpec.get(fs.getName().toLowerCase()) == null) { if (numStaPart > 0) { // found a DP, but there exists ST as subpartition @@ -714,6 +728,26 @@ } } + private void ErrorPartSpec(Map partSpec, List parts) + throws SemanticException { + StringBuilder sb = new StringBuilder("Partition columns in the table schema are: ("); + for (FieldSchema fs: parts) { + sb.append(fs.getName()).append(", "); + } + sb.setLength(sb.length() - 2); // remove the last ", " + sb.append("), while the partitions specified in the query are: ("); + + Iterator itrPsKeys = partSpec.keySet().iterator(); + while (itrPsKeys.hasNext()) { + sb.append(itrPsKeys.next()).append(", "); + } + sb.setLength(sb.length() - 2); // remove the last ", " + sb.append(")."); + + throw new SemanticException( + ErrorMsg.PARTSPEC_DIFFER_FROM_SCHEMA.getMsg(sb.toString())); + } + public Map getPartSpec() { return this.partSpec; } @@ -730,6 +764,7 @@ return tableHandle.toString(); } } + } /**