This is an automated email from the ASF dual-hosted git repository.

ayushsaxena pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/hive.git


The following commit(s) were added to refs/heads/master by this push:
     new 2ff3ed6d83a HIVE-28633: Insert to Bucketed Partition table fails with 
CBO=false and dynamic sort partition optimization enabled (#5563). (Indhumathi 
Muthumurugesh, reviewed by Laszlo Bodor, Soumyakanti Das)
2ff3ed6d83a is described below

commit 2ff3ed6d83a1633d17bbf5b3c852b34b51548ef8
Author: Indhumathi <[email protected]>
AuthorDate: Mon Dec 2 19:10:44 2024 +0530

    HIVE-28633: Insert to Bucketed Partition table fails with CBO=false and 
dynamic sort partition optimization enabled (#5563). (Indhumathi Muthumurugesh, 
reviewed by Laszlo Bodor, Soumyakanti Das)
---
 .../test/resources/testconfiguration.properties    |  1 +
 .../apache/hadoop/hive/ql/parse/TezCompiler.java   |  3 +-
 .../test/queries/clientpositive/dynpart_bucket.q   |  9 +++++
 .../clientpositive/tez/dynpart_bucket.q.out        | 46 ++++++++++++++++++++++
 4 files changed, 58 insertions(+), 1 deletion(-)

diff --git a/itests/src/test/resources/testconfiguration.properties 
b/itests/src/test/resources/testconfiguration.properties
index e706ebab6c8..21e639499fa 100644
--- a/itests/src/test/resources/testconfiguration.properties
+++ b/itests/src/test/resources/testconfiguration.properties
@@ -19,6 +19,7 @@ minitez.query.files=\
   acid_vectorization_original_tez.q,\
   bucketmapjoin_with_subquery.q,\
   delete_orig_table.q,\
+  dynpart_bucket.q,\
   explainanalyze_1.q,\
   explainanalyze_3.q,\
   explainanalyze_4.q,\
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/TezCompiler.java 
b/ql/src/java/org/apache/hadoop/hive/ql/parse/TezCompiler.java
index 5aafe93cc58..86088a8fcdc 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/parse/TezCompiler.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/TezCompiler.java
@@ -205,7 +205,8 @@ public class TezCompiler extends TaskCompiler {
     // run Sorted dynamic partition optimization
     if(HiveConf.getBoolVar(procCtx.conf, 
HiveConf.ConfVars.DYNAMIC_PARTITIONING) &&
         HiveConf.getVar(procCtx.conf, 
HiveConf.ConfVars.DYNAMIC_PARTITIONING_MODE).equals("nonstrict") &&
-        !HiveConf.getBoolVar(procCtx.conf, 
HiveConf.ConfVars.HIVE_OPT_LIST_BUCKETING)) {
+        !HiveConf.getBoolVar(procCtx.conf, 
HiveConf.ConfVars.HIVE_OPT_LIST_BUCKETING) &&
+         HiveConf.getBoolVar(procCtx.conf, 
HiveConf.ConfVars.HIVE_CBO_ENABLED)) {
       perfLogger.perfLogBegin(this.getClass().getName(), 
PerfLogger.TEZ_COMPILER);
       new SortedDynPartitionOptimizer().transform(procCtx.parseContext);
       perfLogger.perfLogEnd(this.getClass().getName(), 
PerfLogger.TEZ_COMPILER, "Sorted dynamic partition optimization");
diff --git a/ql/src/test/queries/clientpositive/dynpart_bucket.q 
b/ql/src/test/queries/clientpositive/dynpart_bucket.q
new file mode 100644
index 00000000000..c7b7fc9ac9d
--- /dev/null
+++ b/ql/src/test/queries/clientpositive/dynpart_bucket.q
@@ -0,0 +1,9 @@
+set hive.cbo.enable=false;
+
+drop table if exists dynpart_bucket;
+CREATE TABLE dynpart_bucket (bn string) PARTITIONED BY (br string) CLUSTERED 
BY (bn) INTO 2 BUCKETS ROW FORMAT DELIMITED FIELDS TERMINATED BY ',' STORED AS 
TEXTFILE ;
+INSERT into TABLE dynpart_bucket VALUES ('tv_0', 'tv');
+set hive.cbo.enable=true;
+INSERT into TABLE dynpart_bucket VALUES ('tv_1', 'tv');
+select * from dynpart_bucket;
+
diff --git a/ql/src/test/results/clientpositive/tez/dynpart_bucket.q.out 
b/ql/src/test/results/clientpositive/tez/dynpart_bucket.q.out
new file mode 100644
index 00000000000..634540367cf
--- /dev/null
+++ b/ql/src/test/results/clientpositive/tez/dynpart_bucket.q.out
@@ -0,0 +1,46 @@
+PREHOOK: query: drop table if exists dynpart_bucket
+PREHOOK: type: DROPTABLE
+PREHOOK: Output: database:default
+POSTHOOK: query: drop table if exists dynpart_bucket
+POSTHOOK: type: DROPTABLE
+POSTHOOK: Output: database:default
+PREHOOK: query: CREATE TABLE dynpart_bucket (bn string) PARTITIONED BY (br 
string) CLUSTERED BY (bn) INTO 2 BUCKETS ROW FORMAT DELIMITED FIELDS TERMINATED 
BY ',' STORED AS TEXTFILE
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@dynpart_bucket
+POSTHOOK: query: CREATE TABLE dynpart_bucket (bn string) PARTITIONED BY (br 
string) CLUSTERED BY (bn) INTO 2 BUCKETS ROW FORMAT DELIMITED FIELDS TERMINATED 
BY ',' STORED AS TEXTFILE
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@dynpart_bucket
+PREHOOK: query: INSERT into TABLE dynpart_bucket VALUES ('tv_0', 'tv')
+PREHOOK: type: QUERY
+PREHOOK: Input: _dummy_database@_dummy_table
+PREHOOK: Output: default@dynpart_bucket
+POSTHOOK: query: INSERT into TABLE dynpart_bucket VALUES ('tv_0', 'tv')
+POSTHOOK: type: QUERY
+POSTHOOK: Input: _dummy_database@_dummy_table
+POSTHOOK: Output: default@dynpart_bucket
+POSTHOOK: Output: default@dynpart_bucket@br=tv
+POSTHOOK: Lineage: dynpart_bucket PARTITION(br=tv).bn SCRIPT []
+PREHOOK: query: INSERT into TABLE dynpart_bucket VALUES ('tv_1', 'tv')
+PREHOOK: type: QUERY
+PREHOOK: Input: _dummy_database@_dummy_table
+PREHOOK: Output: default@dynpart_bucket
+POSTHOOK: query: INSERT into TABLE dynpart_bucket VALUES ('tv_1', 'tv')
+POSTHOOK: type: QUERY
+POSTHOOK: Input: _dummy_database@_dummy_table
+POSTHOOK: Output: default@dynpart_bucket
+POSTHOOK: Output: default@dynpart_bucket@br=tv
+POSTHOOK: Lineage: dynpart_bucket PARTITION(br=tv).bn SCRIPT []
+PREHOOK: query: select * from dynpart_bucket
+PREHOOK: type: QUERY
+PREHOOK: Input: default@dynpart_bucket
+PREHOOK: Input: default@dynpart_bucket@br=tv
+PREHOOK: Output: hdfs://### HDFS PATH ###
+POSTHOOK: query: select * from dynpart_bucket
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@dynpart_bucket
+POSTHOOK: Input: default@dynpart_bucket@br=tv
+POSTHOOK: Output: hdfs://### HDFS PATH ###
+tv_0   tv
+tv_1   tv

Reply via email to