diff --git common/src/java/org/apache/hadoop/hive/conf/HiveConf.java common/src/java/org/apache/hadoop/hive/conf/HiveConf.java index 2d577d40b9..d84e00ce3d 100644 --- common/src/java/org/apache/hadoop/hive/conf/HiveConf.java +++ common/src/java/org/apache/hadoop/hive/conf/HiveConf.java @@ -1989,7 +1989,7 @@ private static void populateLlapDaemonVarsSet(Set llapDaemonVarsSetLocal "Maximum fraction of heap that can be used by Parquet file writers in one task.\n" + "It is for avoiding OutOfMemory error in tasks. Work with Parquet 1.6.0 and above.\n" + "This config parameter is defined in Parquet, so that it does not start with 'hive.'."), - HIVE_PARQUET_TIMESTAMP_SKIP_CONVERSION("hive.parquet.timestamp.skip.conversion", false, + HIVE_PARQUET_TIMESTAMP_SKIP_CONVERSION("hive.parquet.timestamp.skip.conversion", true, "Current Hive implementation of parquet stores timestamps to UTC, this flag allows skipping of the conversion" + "on reading parquet files from other tools"), HIVE_AVRO_TIMESTAMP_SKIP_CONVERSION("hive.avro.timestamp.skip.conversion", false, diff --git ql/src/test/queries/clientpositive/parquet_external_time.q ql/src/test/queries/clientpositive/parquet_external_time.q index 19a7059f20..d83125cdba 100644 --- ql/src/test/queries/clientpositive/parquet_external_time.q +++ ql/src/test/queries/clientpositive/parquet_external_time.q @@ -1,5 +1,4 @@ set hive.vectorized.execution.enabled=false; -set hive.parquet.timestamp.skip.conversion=true; create table timetest_parquet(t timestamp) stored as parquet; diff --git ql/src/test/queries/clientpositive/parquet_ppd_char.q ql/src/test/queries/clientpositive/parquet_ppd_char.q index 4230d8c1dd..386fb2589f 100644 --- ql/src/test/queries/clientpositive/parquet_ppd_char.q +++ ql/src/test/queries/clientpositive/parquet_ppd_char.q @@ -1,7 +1,6 @@ --! qt:dataset:src1 --! qt:dataset:src -set hive.parquet.timestamp.skip.conversion=true; set hive.vectorized.execution.enabled=false; SET hive.input.format=org.apache.hadoop.hive.ql.io.HiveInputFormat; SET hive.optimize.ppd=true;