[hive] branch master updated (ed4ecfc -> 9a3d878)

2021-12-08 Thread kgyrtkirk
This is an automated email from the ASF dual-hosted git repository.

kgyrtkirk pushed a change to branch master
in repository https://gitbox.apache.org/repos/asf/hive.git.


from ed4ecfc  HIVE-25788: Iceberg CTAS should honor location clause and 
have correct table properties (Marton Bod, reviewed by Adam Szita and Peter 
Vary)
 add 9a3d878  HIVE-25735: Improve statestimator in UDFWhen/UDFCase (#2814) 
(Zoltan Haindrich reviewed by Krisztian Kasa)

No new revisions were added by this update.

Summary of changes:
 .../hadoop/hive/ql/udf/generic/GenericUDFCase.java |  8 +++---
 .../hadoop/hive/ql/udf/generic/GenericUDFWhen.java | 30 --
 .../clientpositive/llap/constant_prop_when.q.out   |  4 +--
 .../results/clientpositive/llap/innerjoin1.q.out   | 16 ++--
 .../materialized_view_create_rewrite_nulls.q.out   |  6 ++---
 .../clientpositive/llap/subquery_notin.q.out   | 10 
 .../clientpositive/llap/subquery_select.q.out  |  4 +--
 .../llap/vector_between_columns.q.out  |  4 +--
 .../clientpositive/llap/vector_case_when_1.q.out   | 12 -
 .../clientpositive/llap/vector_case_when_2.q.out   | 24 -
 .../clientpositive/llap/vector_coalesce_2.q.out|  8 +++---
 .../clientpositive/llap/vector_coalesce_3.q.out| 10 
 .../llap/vector_groupby_grouping_id1.q.out |  8 +++---
 .../vector_groupby_grouping_sets_grouping.q.out|  4 +--
 .../clientpositive/llap/vectorized_case.q.out  | 16 ++--
 .../perf/tpcds30tb/tez/query36.q.out   |  4 +--
 .../perf/tpcds30tb/tez/query39.q.out   | 16 ++--
 .../perf/tpcds30tb/tez/query70.q.out   |  4 +--
 .../perf/tpcds30tb/tez/query86.q.out   |  4 +--
 19 files changed, 109 insertions(+), 83 deletions(-)


[hive] branch master updated (0e119ea -> ed4ecfc)

2021-12-08 Thread mbod
This is an automated email from the ASF dual-hosted git repository.

mbod pushed a change to branch master
in repository https://gitbox.apache.org/repos/asf/hive.git.


from 0e119ea  HIVE-25781: Restore multi-threaded support in Cleaner after 
HIVE-25115 (Denys Kuzmenko, reviewed by Karen Coppage)
 add ed4ecfc  HIVE-25788: Iceberg CTAS should honor location clause and 
have correct table properties (Marton Bod, reviewed by Adam Szita and Peter 
Vary)

No new revisions were added by this update.

Summary of changes:
 .../apache/iceberg/mr/hive/HiveIcebergSerDe.java   | 26 ++-
 .../iceberg/mr/hive/TestHiveIcebergCTAS.java   | 30 ++
 .../hadoop/hive/ql/parse/SemanticAnalyzer.java |  7 +
 3 files changed, 56 insertions(+), 7 deletions(-)


[hive] branch master updated: HIVE-25781: Restore multi-threaded support in Cleaner after HIVE-25115 (Denys Kuzmenko, reviewed by Karen Coppage)

2021-12-08 Thread dkuzmenko
This is an automated email from the ASF dual-hosted git repository.

dkuzmenko pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/hive.git


The following commit(s) were added to refs/heads/master by this push:
 new 0e119ea  HIVE-25781: Restore multi-threaded support in Cleaner after 
HIVE-25115 (Denys Kuzmenko, reviewed by Karen Coppage)
0e119ea is described below

commit 0e119eaddb93dc10743bb8990ce8eca4fb77cf16
Author: Denys Kuzmenko 
AuthorDate: Wed Dec 8 10:59:48 2021 +0200

HIVE-25781: Restore multi-threaded support in Cleaner after HIVE-25115 
(Denys Kuzmenko, reviewed by Karen Coppage)

Closes #2825
---
 .../hive/ql/txn/compactor/TestCompactor.java   |  4 ++
 .../metastore/txn/TestCompactionTxnHandler.java| 35 ---
 .../apache/hadoop/hive/ql/TestTxnCommands2.java|  2 +
 .../apache/hadoop/hive/ql/TestTxnCommands3.java|  1 +
 .../hadoop/hive/ql/txn/compactor/TestCleaner.java  |  1 +
 .../hive/metastore/txn/CompactionTxnHandler.java   | 69 --
 6 files changed, 72 insertions(+), 40 deletions(-)

diff --git 
a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/txn/compactor/TestCompactor.java
 
b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/txn/compactor/TestCompactor.java
index 7e48419..13705be 100644
--- 
a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/txn/compactor/TestCompactor.java
+++ 
b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/txn/compactor/TestCompactor.java
@@ -1621,6 +1621,10 @@ public class TestCompactor {
 verifyFooBarResult(tblName, 2);
 verifyHasBase(table.getSd(), fs, "base_005_v016");
 runCleaner(conf);
+// in case when we have # of accumulated entries for the same 
table/partition - we need to process them one-by-one in ASC order of write_id's,
+// however, to support multi-threaded processing in the Cleaner, we have 
to move entries from the same group to the next Cleaner cycle, 
+// so that they are not processed by multiple threads concurrently. 
+runCleaner(conf);
 verifyDeltaCount(table.getSd(), fs, 0);
   }
 
diff --git 
a/ql/src/test/org/apache/hadoop/hive/metastore/txn/TestCompactionTxnHandler.java
 
b/ql/src/test/org/apache/hadoop/hive/metastore/txn/TestCompactionTxnHandler.java
index ea1abc6..9bfc324 100644
--- 
a/ql/src/test/org/apache/hadoop/hive/metastore/txn/TestCompactionTxnHandler.java
+++ 
b/ql/src/test/org/apache/hadoop/hive/metastore/txn/TestCompactionTxnHandler.java
@@ -51,6 +51,7 @@ import org.junit.Test;
 
 import java.util.ArrayList;
 import java.util.Arrays;
+import java.util.Collections;
 import java.util.List;
 import java.util.Set;
 import java.util.SortedSet;
@@ -196,8 +197,9 @@ public class TestCompactionTxnHandler {
 assertEquals(0, txnHandler.findReadyToClean(0, 0).size());
 CompactionInfo ci = 
txnHandler.findNextToCompact(aFindNextCompactRequest("fred", WORKER_VERSION));
 assertNotNull(ci);
-
-assertEquals(0, txnHandler.findReadyToClean(0, 0).size());
+
+ci.highestWriteId = 41;
+txnHandler.updateCompactorState(ci, 0);
 txnHandler.markCompacted(ci);
 assertNull(txnHandler.findNextToCompact(aFindNextCompactRequest("fred", 
WORKER_VERSION)));
 
@@ -225,8 +227,9 @@ public class TestCompactionTxnHandler {
 assertEquals(0, txnHandler.findReadyToClean(0, 0).size());
 CompactionInfo ci = 
txnHandler.findNextToCompact(aFindNextCompactRequest("fred", WORKER_VERSION));
 assertNotNull(ci);
-
-assertEquals(0, txnHandler.findReadyToClean(0, 0).size());
+
+ci.highestWriteId = 41;
+txnHandler.updateCompactorState(ci, 0);
 txnHandler.markCompacted(ci);
 assertNull(txnHandler.findNextToCompact(aFindNextCompactRequest("fred", 
WORKER_VERSION)));
 
@@ -721,8 +724,9 @@ public class TestCompactionTxnHandler {
   public void testMarkCleanedCleansTxnsAndTxnComponents()
   throws Exception {
 long txnid = openTxn();
-LockComponent comp = new LockComponent(LockType.SHARED_WRITE, LockLevel.DB,
-"mydb");
+long mytableWriteId = allocateTableWriteIds("mydb", "mytable", txnid);
+
+LockComponent comp = new LockComponent(LockType.SHARED_WRITE, 
LockLevel.DB, "mydb");
 comp.setTablename("mytable");
 comp.setOperationType(DataOperationType.INSERT);
 List components = new ArrayList(1);
@@ -746,6 +750,8 @@ public class TestCompactionTxnHandler {
 txnHandler.abortTxn(new AbortTxnRequest(txnid));
 
 txnid = openTxn();
+long fooWriteId = allocateTableWriteIds("mydb", "foo", txnid);
+
 comp = new LockComponent(LockType.SHARED_WRITE, LockLevel.DB, "mydb");
 comp.setTablename("foo");
 comp.setPartitionname("bar=compact");
@@ -769,7 +775,7 @@ public class TestCompactionTxnHandler {
 assertTrue(res.getState() == LockState.ACQUIRED);
 txnHandler.abortTxn(new AbortTxnRequest(txnid));
 
-CompactionInfo ci = new CompactionInfo();
+CompactionInfo ci;
 
 // Now clean