diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/Utilities.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/Utilities.java index 4450ad3477ff5bb20f7764982873bbcc16b3fae4..10ae157ab22f32f055a10da24ab038c6c8109258 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/exec/Utilities.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/Utilities.java @@ -3094,7 +3094,8 @@ private static Path createDummyFileForEmptyPartition(Path path, JobConf job, Map PartitionDesc partDesc = work.getPathToPartitionInfo().get(strPath); boolean nonNative = partDesc.getTableDesc().isNonNative(); boolean oneRow = partDesc.getInputFileFormatClass() == OneNullRowInputFormat.class; - Properties props = partDesc.getProperties(); + Properties props = SerDeUtils.createOverlayedProperties( + partDesc.getTableDesc().getProperties(), partDesc.getProperties()); Class outFileFormat = partDesc.getOutputFileFormatClass(); if (nonNative) { diff --git a/ql/src/test/queries/clientpositive/avro_partitioned.q b/ql/src/test/queries/clientpositive/avro_partitioned.q index 51cab9a3fdee09121ad09e317fad0330a21be125..a06e7c4e455d6a1ad9ccf1d9c8f81c6ee8000c46 100644 --- a/ql/src/test/queries/clientpositive/avro_partitioned.q +++ b/ql/src/test/queries/clientpositive/avro_partitioned.q @@ -72,7 +72,9 @@ SELECT * FROM episodes_partitioned ORDER BY air_date LIMIT 5; SELECT * FROM episodes_partitioned WHERE doctor_pt = 6; -- Fetch w/non-existent partition SELECT * FROM episodes_partitioned WHERE doctor_pt = 7 LIMIT 5; - +-- Alter table add an empty partition +ALTER TABLE episodes_partitioned ADD PARTITION (doctor_pt=7); +SELECT COUNT(*) FROM episodes_partitioned; -- Verify that reading from an Avro partition works -- even if it has an old schema relative to the current table level schema diff --git a/ql/src/test/results/clientpositive/avro_partitioned.q.out b/ql/src/test/results/clientpositive/avro_partitioned.q.out index 5c312e2da4996292d6f831a5628fd0d8b6ba0580..108714b073771dac6e9ca74ebfd177fde0e4429e 100644 --- a/ql/src/test/results/clientpositive/avro_partitioned.q.out +++ b/ql/src/test/results/clientpositive/avro_partitioned.q.out @@ -236,6 +236,40 @@ SELECT * FROM episodes_partitioned WHERE doctor_pt = 7 LIMIT 5 POSTHOOK: type: QUERY POSTHOOK: Input: default@episodes_partitioned #### A masked pattern was here #### +PREHOOK: query: -- Alter table add an empty partition +ALTER TABLE episodes_partitioned ADD PARTITION (doctor_pt=7) +PREHOOK: type: ALTERTABLE_ADDPARTS +PREHOOK: Output: default@episodes_partitioned +POSTHOOK: query: -- Alter table add an empty partition +ALTER TABLE episodes_partitioned ADD PARTITION (doctor_pt=7) +POSTHOOK: type: ALTERTABLE_ADDPARTS +POSTHOOK: Output: default@episodes_partitioned +POSTHOOK: Output: default@episodes_partitioned@doctor_pt=7 +PREHOOK: query: SELECT COUNT(*) FROM episodes_partitioned +PREHOOK: type: QUERY +PREHOOK: Input: default@episodes_partitioned +PREHOOK: Input: default@episodes_partitioned@doctor_pt=1 +PREHOOK: Input: default@episodes_partitioned@doctor_pt=11 +PREHOOK: Input: default@episodes_partitioned@doctor_pt=2 +PREHOOK: Input: default@episodes_partitioned@doctor_pt=4 +PREHOOK: Input: default@episodes_partitioned@doctor_pt=5 +PREHOOK: Input: default@episodes_partitioned@doctor_pt=6 +PREHOOK: Input: default@episodes_partitioned@doctor_pt=7 +PREHOOK: Input: default@episodes_partitioned@doctor_pt=9 +#### A masked pattern was here #### +POSTHOOK: query: SELECT COUNT(*) FROM episodes_partitioned +POSTHOOK: type: QUERY +POSTHOOK: Input: default@episodes_partitioned +POSTHOOK: Input: default@episodes_partitioned@doctor_pt=1 +POSTHOOK: Input: default@episodes_partitioned@doctor_pt=11 +POSTHOOK: Input: default@episodes_partitioned@doctor_pt=2 +POSTHOOK: Input: default@episodes_partitioned@doctor_pt=4 +POSTHOOK: Input: default@episodes_partitioned@doctor_pt=5 +POSTHOOK: Input: default@episodes_partitioned@doctor_pt=6 +POSTHOOK: Input: default@episodes_partitioned@doctor_pt=7 +POSTHOOK: Input: default@episodes_partitioned@doctor_pt=9 +#### A masked pattern was here #### +8 PREHOOK: query: -- Verify that reading from an Avro partition works -- even if it has an old schema relative to the current table level schema