This is an automated email from the ASF dual-hosted git repository.

jcamacho pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/hive.git


The following commit(s) were added to refs/heads/master by this push:
     new 21177ef  HIVE-21888: Set hive.parquet.timestamp.skip.conversion 
default to true (Karen Coppage via Jesus Camacho Rodriguez)
21177ef is described below

commit 21177ef1517a44766f724d56ec57d6b7cd8f5388
Author: Karen Coppage <kcopp.apa...@gmail.com>
AuthorDate: Fri Jun 28 09:48:44 2019 -0700

    HIVE-21888: Set hive.parquet.timestamp.skip.conversion default to true 
(Karen Coppage via Jesus Camacho Rodriguez)
---
 common/src/java/org/apache/hadoop/hive/conf/HiveConf.java  | 2 +-
 ql/src/test/queries/clientpositive/parquet_external_time.q | 1 -
 ql/src/test/queries/clientpositive/parquet_ppd_char.q      | 1 -
 3 files changed, 1 insertion(+), 3 deletions(-)

diff --git a/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java 
b/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java
index 99f6c3d..48b49ce 100644
--- a/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java
+++ b/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java
@@ -1989,7 +1989,7 @@ public class HiveConf extends Configuration {
         "Maximum fraction of heap that can be used by Parquet file writers in 
one task.\n" +
         "It is for avoiding OutOfMemory error in tasks. Work with Parquet 
1.6.0 and above.\n" +
         "This config parameter is defined in Parquet, so that it does not 
start with 'hive.'."),
-    
HIVE_PARQUET_TIMESTAMP_SKIP_CONVERSION("hive.parquet.timestamp.skip.conversion",
 false,
+    
HIVE_PARQUET_TIMESTAMP_SKIP_CONVERSION("hive.parquet.timestamp.skip.conversion",
 true,
       "Current Hive implementation of parquet stores timestamps to UTC, this 
flag allows skipping of the conversion" +
       "on reading parquet files from other tools"),
     HIVE_AVRO_TIMESTAMP_SKIP_CONVERSION("hive.avro.timestamp.skip.conversion", 
false,
diff --git a/ql/src/test/queries/clientpositive/parquet_external_time.q 
b/ql/src/test/queries/clientpositive/parquet_external_time.q
index 19a7059..d83125c 100644
--- a/ql/src/test/queries/clientpositive/parquet_external_time.q
+++ b/ql/src/test/queries/clientpositive/parquet_external_time.q
@@ -1,5 +1,4 @@
 set hive.vectorized.execution.enabled=false;
-set hive.parquet.timestamp.skip.conversion=true;
 
 create table timetest_parquet(t timestamp) stored as parquet;
 
diff --git a/ql/src/test/queries/clientpositive/parquet_ppd_char.q 
b/ql/src/test/queries/clientpositive/parquet_ppd_char.q
index 4230d8c..386fb25 100644
--- a/ql/src/test/queries/clientpositive/parquet_ppd_char.q
+++ b/ql/src/test/queries/clientpositive/parquet_ppd_char.q
@@ -1,7 +1,6 @@
 --! qt:dataset:src1
 --! qt:dataset:src
 
-set hive.parquet.timestamp.skip.conversion=true;
 set hive.vectorized.execution.enabled=false;
 SET hive.input.format=org.apache.hadoop.hive.ql.io.HiveInputFormat;
 SET hive.optimize.ppd=true;

Reply via email to