diff --git ql/src/java/org/apache/hadoop/hive/ql/io/parquet/read/ParquetRecordReaderWrapper.java ql/src/java/org/apache/hadoop/hive/ql/io/parquet/read/ParquetRecordReaderWrapper.java index 3e0c7cb..113d61f 100644 --- ql/src/java/org/apache/hadoop/hive/ql/io/parquet/read/ParquetRecordReaderWrapper.java +++ ql/src/java/org/apache/hadoop/hive/ql/io/parquet/read/ParquetRecordReaderWrapper.java @@ -83,7 +83,7 @@ public ParquetRecordReaderWrapper( Configuration conf = jobConf; if (skipTimestampConversion ^ HiveConf.getBoolVar( conf, HiveConf.ConfVars.HIVE_PARQUET_TIMESTAMP_SKIP_CONVERSION)) { - conf = new JobConf(oldJobConf); + conf = new JobConf(jobConf); HiveConf.setBoolVar(conf, HiveConf.ConfVars.HIVE_PARQUET_TIMESTAMP_SKIP_CONVERSION, skipTimestampConversion); } diff --git ql/src/test/queries/clientpositive/parquet_ppd_char.q ql/src/test/queries/clientpositive/parquet_ppd_char.q index 40cd3f6..386fb25 100644 --- ql/src/test/queries/clientpositive/parquet_ppd_char.q +++ ql/src/test/queries/clientpositive/parquet_ppd_char.q @@ -6,7 +6,6 @@ SET hive.input.format=org.apache.hadoop.hive.ql.io.HiveInputFormat; SET hive.optimize.ppd=true; SET mapred.min.split.size=1000; SET mapred.max.split.size=5000; -set hive.parquet.timestamp.skip.conversion=false; create table newtypestbl_n3(c char(10), v varchar(10), d decimal(5,3), da date) stored as parquet; diff --git ql/src/test/queries/clientpositive/parquet_ppd_char2.q ql/src/test/queries/clientpositive/parquet_ppd_char2.q index b2d8a75..ef965d7 100644 --- ql/src/test/queries/clientpositive/parquet_ppd_char2.q +++ ql/src/test/queries/clientpositive/parquet_ppd_char2.q @@ -5,7 +5,6 @@ SET hive.vectorized.execution.enabled=false; SET hive.input.format=org.apache.hadoop.hive.ql.io.HiveInputFormat; SET mapred.min.split.size=1000; SET mapred.max.split.size=5000; -SET hive.parquet.timestamp.skip.conversion=false; drop table if exists ppd_char_test; create table ppd_char_test (id int, a char(10), b char(10), c varchar(10), d varchar(10)) stored as parquet;