[hive] branch master updated: HIVE-26189: Iceberg metadata query throws exceptions after partition evolution (#3258) (Adam Szita, reviewed by Peter Vary)

2022-05-02 Thread szita
This is an automated email from the ASF dual-hosted git repository.

szita pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/hive.git


The following commit(s) were added to refs/heads/master by this push:
 new 39eba3b948 HIVE-26189: Iceberg metadata query throws exceptions after 
partition evolution (#3258) (Adam Szita, reviewed by Peter Vary)
39eba3b948 is described below

commit 39eba3b948ec38ea99c8c51449342e973f431a24
Author: Adam Szita <40628386+sz...@users.noreply.github.com>
AuthorDate: Mon May 2 11:22:21 2022 +0200

HIVE-26189: Iceberg metadata query throws exceptions after partition 
evolution (#3258) (Adam Szita, reviewed by Peter Vary)
---
 .../mr/mapreduce/IcebergInternalRecordWrapper.java |  4 +-
 .../query_iceberg_metadata_of_partitioned_table.q  | 13 +
 ...ery_iceberg_metadata_of_partitioned_table.q.out | 57 ++
 3 files changed, 73 insertions(+), 1 deletion(-)

diff --git 
a/iceberg/iceberg-handler/src/main/java/org/apache/iceberg/mr/mapreduce/IcebergInternalRecordWrapper.java
 
b/iceberg/iceberg-handler/src/main/java/org/apache/iceberg/mr/mapreduce/IcebergInternalRecordWrapper.java
index 0d53924b93..241c12a2d3 100644
--- 
a/iceberg/iceberg-handler/src/main/java/org/apache/iceberg/mr/mapreduce/IcebergInternalRecordWrapper.java
+++ 
b/iceberg/iceberg-handler/src/main/java/org/apache/iceberg/mr/mapreduce/IcebergInternalRecordWrapper.java
@@ -66,7 +66,7 @@ public class IcebergInternalRecordWrapper implements Record, 
StructLike {
 
   @Override
   public  T get(int pos, Class javaClass) {
-if (transforms[pos] != null) {
+if (transforms[pos] != null && values[pos] != null) {
   return javaClass.cast(transforms[pos].apply(values[pos]));
 }
 return javaClass.cast(values[pos]);
@@ -143,6 +143,8 @@ public class IcebergInternalRecordWrapper implements 
Record, StructLike {
 switch (type.typeId()) {
   case TIMESTAMP:
 return timestamp -> DateTimeUtil.timestamptzFromMicros((Long) 
timestamp);
+  case DATE:
+return date -> DateTimeUtil.dateFromDays((Integer) date);
   case STRUCT:
 IcebergInternalRecordWrapper wrapper =
 new IcebergInternalRecordWrapper(type.asStructType(), 
type.asStructType());
diff --git 
a/iceberg/iceberg-handler/src/test/queries/positive/query_iceberg_metadata_of_partitioned_table.q
 
b/iceberg/iceberg-handler/src/test/queries/positive/query_iceberg_metadata_of_partitioned_table.q
index 1c1ef7a1d1..5408ec6dbd 100644
--- 
a/iceberg/iceberg-handler/src/test/queries/positive/query_iceberg_metadata_of_partitioned_table.q
+++ 
b/iceberg/iceberg-handler/src/test/queries/positive/query_iceberg_metadata_of_partitioned_table.q
@@ -73,3 +73,16 @@ select partition_summaries from default.ice_meta_3.manifests 
where partition_sum
 
 drop table ice_meta_2;
 drop table ice_meta_3;
+
+
+CREATE EXTERNAL TABLE `partevv`( `id` int, `ts` timestamp, `ts2` timestamp)  
STORED BY ICEBERG STORED AS ORC TBLPROPERTIES  ('format-version'='1');
+
+ALTER TABLE partevv SET PARTITION SPEC (id);
+INSERT INTO partevv VALUES (1, current_timestamp(), current_timestamp());
+INSERT INTO partevv VALUES (2, current_timestamp(), current_timestamp());
+
+
+ALTER TABLE partevv SET PARTITION SPEC (day(ts));
+INSERT INTO partevv VALUES (100, current_timestamp(), current_timestamp());
+
+select * from default.partevv.partitions;
\ No newline at end of file
diff --git 
a/iceberg/iceberg-handler/src/test/results/positive/query_iceberg_metadata_of_partitioned_table.q.out
 
b/iceberg/iceberg-handler/src/test/results/positive/query_iceberg_metadata_of_partitioned_table.q.out
index 4f7a0f90cd..6fdf0c8190 100644
--- 
a/iceberg/iceberg-handler/src/test/results/positive/query_iceberg_metadata_of_partitioned_table.q.out
+++ 
b/iceberg/iceberg-handler/src/test/results/positive/query_iceberg_metadata_of_partitioned_table.q.out
@@ -526,3 +526,60 @@ POSTHOOK: query: drop table ice_meta_3
 POSTHOOK: type: DROPTABLE
 POSTHOOK: Input: default@ice_meta_3
 POSTHOOK: Output: default@ice_meta_3
+PREHOOK: query: CREATE EXTERNAL TABLE `partevv`( `id` int, `ts` timestamp, 
`ts2` timestamp)  STORED BY ICEBERG STORED AS ORC TBLPROPERTIES  
('format-version'='1')
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@partevv
+POSTHOOK: query: CREATE EXTERNAL TABLE `partevv`( `id` int, `ts` timestamp, 
`ts2` timestamp)  STORED BY ICEBERG STORED AS ORC TBLPROPERTIES  
('format-version'='1')
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@partevv
+PREHOOK: query: ALTER TABLE partevv SET PARTITION SPEC (id)
+PREHOOK: type: ALTERTABLE_SETPARTSPEC
+PREHOOK: Input: default@partevv
+POSTHOOK: query: ALTER TABLE partevv SET PARTITION SPEC (id)
+POSTHOOK: type: ALTERTABLE_SETPARTSPEC
+POSTHOOK: Input: default@partevv
+POSTHOOK: Output: default@partevv
+PREHOOK: query: INSERT INTO partevv VALUES (1, current_timestamp(), 
current_timestamp())
+PREHOOK:

[hive] branch master updated: HIVE-26107: Worker shouldn't inject duplicate entries in `ready for cleaning` state into the compaction queue (Laszlo Vegh, reviewed by Denys Kuzmenko, Karen Coppage)

2022-05-02 Thread dkuzmenko
This is an automated email from the ASF dual-hosted git repository.

dkuzmenko pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/hive.git


The following commit(s) were added to refs/heads/master by this push:
 new 730da48f65 HIVE-26107: Worker shouldn't inject duplicate entries in 
`ready for cleaning` state into the compaction queue (Laszlo Vegh, reviewed by 
Denys Kuzmenko, Karen Coppage)
730da48f65 is described below

commit 730da48f65ca2fb6ff771820a7a1fffae10ea7bd
Author: veghlaci05 <90267982+veghlac...@users.noreply.github.com>
AuthorDate: Mon May 2 11:20:36 2022 +0200

HIVE-26107: Worker shouldn't inject duplicate entries in `ready for 
cleaning` state into the compaction queue (Laszlo Vegh, reviewed by Denys 
Kuzmenko, Karen Coppage)

Closes #3172
---
 .../org/apache/hadoop/hive/conf/Constants.java |   2 +
 .../java/org/apache/hadoop/hive/ql/ErrorMsg.java   |   1 +
 .../org/apache/hadoop/hive/ql/TestAcidOnTez.java   |   1 +
 .../hive/ql/txn/compactor/TestCompactor.java   |  16 +--
 .../ql/txn/compactor/TestCrudCompactorOnTez.java   |  69 +++--
 .../TestFetchWriteIdFromInsertOnlyTables.java  |   6 +-
 .../ql/txn/compactor/TestMmCompactorOnTez.java |   2 +-
 ql/src/java/org/apache/hadoop/hive/ql/Driver.java  |  10 +-
 .../org/apache/hadoop/hive/ql/DriverContext.java   |   9 ++
 .../apache/hadoop/hive/ql/DriverTxnHandler.java|   3 +-
 .../org/apache/hadoop/hive/ql/DriverUtils.java |  37 +--
 .../compact/AlterTableCompactOperation.java|   9 ++
 .../hadoop/hive/ql/lockmgr/DbTxnManager.java   |  10 ++
 .../hadoop/hive/ql/lockmgr/DummyTxnManager.java|   6 ++
 .../hadoop/hive/ql/lockmgr/HiveTxnManager.java |  12 ++-
 .../hadoop/hive/ql/stats/StatsUpdaterThread.java   |   2 +-
 .../hive/ql/txn/compactor/QueryCompactor.java  |   6 +-
 .../hadoop/hive/ql/txn/compactor/Worker.java   |   2 +-
 .../hadoop/hive/metastore/txn/TestTxnHandler.java  |   4 +-
 .../org/apache/hadoop/hive/ql/TestTxnCommands.java |   6 ++
 .../apache/hadoop/hive/ql/TestTxnCommands2.java|  34 ---
 .../org/apache/hadoop/hive/ql/TestTxnLoadData.java |  10 +-
 .../hive/ql/stats/TestStatsUpdaterThread.java  |   2 +-
 .../hadoop/hive/ql/txn/compactor/TestCleaner.java  |  28 --
 .../ql/txn/compactor/TestCompactionMetrics.java|  13 ++-
 .../clientpositive/acid_insert_overwrite_update.q  |   1 -
 .../queries/clientpositive/dbtxnmgr_compact1.q |   6 +-
 .../queries/clientpositive/dbtxnmgr_compact3.q |   6 +-
 .../llap/acid_insert_overwrite_update.q.out|   4 -
 .../clientpositive/llap/dbtxnmgr_compact1.q.out|  20 +++-
 .../clientpositive/llap/dbtxnmgr_compact3.q.out|  20 +++-
 .../gen/thrift/gen-cpp/hive_metastore_types.cpp|  25 +
 .../src/gen/thrift/gen-cpp/hive_metastore_types.h  |  15 ++-
 .../hive/metastore/api/CompactionResponse.java | 112 -
 .../gen-php/metastore/CompactionResponse.php   |  24 +
 .../src/gen/thrift/gen-py/hive_metastore/ttypes.py |  14 ++-
 .../src/gen/thrift/gen-rb/hive_metastore_types.rb  |   4 +-
 .../src/main/thrift/hive_metastore.thrift  |   3 +-
 .../hadoop/hive/metastore/txn/TxnHandler.java  |  26 +++--
 39 files changed, 484 insertions(+), 96 deletions(-)

diff --git a/common/src/java/org/apache/hadoop/hive/conf/Constants.java 
b/common/src/java/org/apache/hadoop/hive/conf/Constants.java
index a62d8e8f13..b89cdf3fad 100644
--- a/common/src/java/org/apache/hadoop/hive/conf/Constants.java
+++ b/common/src/java/org/apache/hadoop/hive/conf/Constants.java
@@ -91,4 +91,6 @@ public class Constants {
 
   public static final String ACID_FETCH_DELETED_ROWS = 
"acid.fetch.deleted.rows";
   public static final String INSERT_ONLY_FETCH_BUCKET_ID = 
"insertonly.fetch.bucketid";
+
+  public static final String ERROR_MESSAGE_NO_DETAILS_AVAILABLE = "No detailed 
message available";
 }
diff --git a/common/src/java/org/apache/hadoop/hive/ql/ErrorMsg.java 
b/common/src/java/org/apache/hadoop/hive/ql/ErrorMsg.java
index 749b76b41f..277e647557 100644
--- a/common/src/java/org/apache/hadoop/hive/ql/ErrorMsg.java
+++ b/common/src/java/org/apache/hadoop/hive/ql/ErrorMsg.java
@@ -477,6 +477,7 @@ public enum ErrorMsg {
   TIME_TRAVEL_NOT_ALLOWED(10429, "Time travel is not allowed for {0}. Please 
choose a storage format which supports the feature.", true),
   INVALID_METADATA_TABLE_NAME(10430, "Invalid metadata table name {0}.", true),
   METADATA_TABLE_NOT_SUPPORTED(10431, "Metadata tables are not supported for 
table {0}.", true),
+  COMPACTION_REFUSED(10432, "Compaction request for {0}.{1}{2} is refused, 
details: {3}.", true),
 
 
   //== 2 range starts here 
//
diff --git 
a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/TestAcidOnTez.java 
b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/TestAcidOnTez.java
index d4ce79b237..602c064ce5 100644
--- 
a/itests/hive-u