(hive) branch master updated: HIVE-27984: Support backward compatibility of hms thrift struct about column stats (#4984)(Butao Zhang, reviewed by okumin, Zhihua Deng)
This is an automated email from the ASF dual-hosted git repository. zhangbutao pushed a commit to branch master in repository https://gitbox.apache.org/repos/asf/hive.git The following commit(s) were added to refs/heads/master by this push: new 6ab9b14c2c4 HIVE-27984: Support backward compatibility of hms thrift struct about column stats (#4984)(Butao Zhang, reviewed by okumin, Zhihua Deng) 6ab9b14c2c4 is described below commit 6ab9b14c2c45e139a30ab78d41cefe5f1ab64d22 Author: Butao Zhang AuthorDate: Thu Feb 1 14:27:43 2024 +0800 HIVE-27984: Support backward compatibility of hms thrift struct about column stats (#4984)(Butao Zhang, reviewed by okumin, Zhihua Deng) --- .../cache/TestCachedStoreUpdateUsingEvents.java| 18 ++-- .../hadoop/hive/ql/exec/ColumnStatsUpdateTask.java | 2 +- .../hadoop/hive/ql/stats/ColStatsProcessor.java| 2 +- .../gen/thrift/gen-cpp/hive_metastore_types.cpp| 51 +- .../src/gen/thrift/gen-cpp/hive_metastore_types.h | 105 +++-- .../hive/metastore/api/ColumnStatistics.java | 5 +- .../metastore/api/GetPartitionsByNamesRequest.java | 5 +- .../hadoop/hive/metastore/api/GetTableRequest.java | 5 +- .../hive/metastore/api/PartitionsStatsRequest.java | 60 +++- .../metastore/api/SetPartitionsStatsRequest.java | 60 +++- .../hive/metastore/api/TableStatsRequest.java | 62 ++-- .../thrift/gen-php/metastore/ColumnStatistics.php | 2 +- .../metastore/GetPartitionsByNamesRequest.php | 2 +- .../thrift/gen-php/metastore/GetTableRequest.php | 2 +- .../gen-php/metastore/PartitionsStatsRequest.php | 4 +- .../metastore/SetPartitionsStatsRequest.php| 4 +- .../thrift/gen-php/metastore/TableStatsRequest.php | 4 +- .../src/gen/thrift/gen-py/hive_metastore/ttypes.py | 30 +++--- .../src/gen/thrift/gen-rb/hive_metastore_types.rb | 15 ++- .../hadoop/hive/metastore/HiveMetaStoreClient.java | 18 ++-- .../src/main/thrift/hive_metastore.thrift | 12 +-- .../metastore/HiveMetaStoreClientPreCatalog.java | 22 +++-- 22 files changed, 262 insertions(+), 228 deletions(-) diff --git a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/cache/TestCachedStoreUpdateUsingEvents.java b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/cache/TestCachedStoreUpdateUsingEvents.java index dd08d8aa109..1ad1349b10b 100644 --- a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/cache/TestCachedStoreUpdateUsingEvents.java +++ b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/cache/TestCachedStoreUpdateUsingEvents.java @@ -585,7 +585,7 @@ public class TestCachedStoreUpdateUsingEvents { colStats.setStatsObj(getStatsObjects(dbName, tblName, colName, highValue, avgColLen)); colStats.setEngine(Constants.HIVE_ENGINE); -SetPartitionsStatsRequest setTblColStat = new SetPartitionsStatsRequest(Collections.singletonList(colStats), Constants.HIVE_ENGINE); +SetPartitionsStatsRequest setTblColStat = new SetPartitionsStatsRequest(Collections.singletonList(colStats)); setTblColStat.setWriteId(writeId); setTblColStat.setValidWriteIdList(validWriteIds); @@ -627,7 +627,7 @@ public class TestCachedStoreUpdateUsingEvents { colStats.setStatsObj(getStatsObjects(dbName, tblName, colName, highValue, avgColLen)); colStats.setEngine(Constants.HIVE_ENGINE); -SetPartitionsStatsRequest setTblColStat = new SetPartitionsStatsRequest(Collections.singletonList(colStats), Constants.HIVE_ENGINE); +SetPartitionsStatsRequest setTblColStat = new SetPartitionsStatsRequest(Collections.singletonList(colStats)); setTblColStat.setWriteId(writeId); setTblColStat.setValidWriteIdList(validWriteIds); @@ -940,7 +940,7 @@ public class TestCachedStoreUpdateUsingEvents { colStats.setStatsObj(getStatsObjects(dbName, tblName, colName, highValue, avgColLen)); colStats.setEngine(Constants.HIVE_ENGINE); -SetPartitionsStatsRequest setTblColStat = new SetPartitionsStatsRequest(Collections.singletonList(colStats), Constants.HIVE_ENGINE); +SetPartitionsStatsRequest setTblColStat = new SetPartitionsStatsRequest(Collections.singletonList(colStats)); setTblColStat.setWriteId(writeId); setTblColStat.setValidWriteIdList(validWriteIds); @@ -1007,7 +1007,7 @@ public class TestCachedStoreUpdateUsingEvents { colStats.setStatsObj(getStatsObjects(dbName, tblName, colName, highValue, avgColLen)); colStats.setEngine(Constants.HIVE_ENGINE); -SetPartitionsStatsRequest setTblColStat = new SetPartitionsStatsRequest(Collections.singletonList(colStats), Constants.HIVE_ENGINE); +SetPartitionsStatsRequest setTblColStat = new SetPartitionsStatsRequest(Collections.singletonList(colStats)); setTblColStat.setWriteId(writeId); setTblColStat.setValidWriteIdList(validWriteIds); @@ -1056,7 +1056,7 @@ public class TestCachedStoreUpdateUsingEvents {
(hive) branch master updated (1dc3dc9ede4 -> e08a60029f9)
This is an automated email from the ASF dual-hosted git repository. krisztiankasa pushed a change to branch master in repository https://gitbox.apache.org/repos/asf/hive.git from 1dc3dc9ede4 HIVE-28017: Add generated protobuf code. (#5018). (Ayush Saxena, reviewed by Butao Zhang, Zhihua Deng, Attila Turoczy) add e08a60029f9 HIVE-28000: Fix scenarios where 'not in' gives incorrect results due to type coercion (Anmol Sundaram reviewed by Krisztian Kasa, Steve Carlin, Attila Turoczy) No new revisions were added by this update. Summary of changes: .../hive/ql/parse/type/TypeCheckProcFactory.java | 19 +++--- .../test/queries/clientpositive/not_in_scenarios.q | 8 +++ .../results/clientpositive/llap/in_coercion.q.out | 2 + .../clientpositive/llap/not_in_scenarios.q.out | 77 ++ .../clientpositive/llap/orc_llap_counters.q.out| 10 +-- 5 files changed, 102 insertions(+), 14 deletions(-) create mode 100644 ql/src/test/queries/clientpositive/not_in_scenarios.q create mode 100644 ql/src/test/results/clientpositive/llap/not_in_scenarios.q.out
(hive) branch branch-3 updated (aafaeb874e3 -> 3feab7aac71)
This is an automated email from the ASF dual-hosted git repository. sankarh pushed a change to branch branch-3 in repository https://gitbox.apache.org/repos/asf/hive.git from aafaeb874e3 HIVE-27806: Backport of HIVE-20536, HIVE-20632, HIVE-20511, HIVE-20560, HIVE-20631, HIVE-20637, HIVE-20609, HIVE-20439 to branch-3 (#4983) add 3feab7aac71 HIVE-28049: Backport of HIVE-21862, HIVE-20437, HIVE-22589, HIVE-22840, HIVE-24074, HIVE-25104 to branch-3(#5051) No new revisions were added by this update. Summary of changes: .../hadoop/hive/common/type/TimestampTZUtil.java |35 +- .../java/org/apache/hadoop/hive/conf/HiveConf.java |23 + data/files/avro_date.txt | 4 + data/files/avro_legacy_mixed_dates.avro| Bin 0 -> 236 bytes data/files/avro_legacy_mixed_timestamps.avro | Bin 0 -> 282 bytes data/files/avro_timestamp.txt | 6 +- data/files/orc_legacy_mixed_dates.orc | Bin 0 -> 213 bytes data/files/orc_legacy_mixed_timestamps.orc | Bin 0 -> 276 bytes data/files/parquet_legacy_mixed_dates.parq | Bin 0 -> 245 bytes data/files/parquet_legacy_mixed_timestamps.parq| Bin 0 -> 359 bytes data/files/tbl_avro1/00_0 | Bin 0 -> 262 bytes data/files/tbl_avro1/00_0_copy_1 | Bin 0 -> 263 bytes data/files/tbl_parq1/00_0 | Bin 0 -> 286 bytes data/files/tbl_parq1/00_0_copy_1 | Bin 0 -> 286 bytes data/files/tbl_parq1/00_0_copy_2 | Bin 0 -> 327 bytes .../test/resources/testconfiguration.properties|20 + .../io/decode/GenericColumnVectorProducer.java | 6 + .../llap/io/decode/OrcEncodedDataConsumer.java | 8 +- .../hive/llap/io/encoded/OrcEncodedDataReader.java | 3 +- .../llap/io/metadata/ConsumerFileMetadata.java | 2 + .../hive/llap/io/metadata/OrcFileMetadata.java | 8 + .../metastore/filemeta/OrcFileMetadataHandler.java | 2 +- pom.xml| 7 +- ql/pom.xml | 5 + .../hive/ql/exec/vector/VectorizedBatchUtil.java |10 +- .../hive/ql/io/avro/AvroContainerOutputFormat.java | 3 + .../hive/ql/io/avro/AvroGenericRecordReader.java |26 +- .../hadoop/hive/ql/io/orc/ExternalCache.java | 4 +- .../org/apache/hadoop/hive/ql/io/orc/OrcFile.java |11 + .../hadoop/hive/ql/io/orc/OrcFileFormatProxy.java |11 +- .../hadoop/hive/ql/io/orc/OrcInputFormat.java | 9 +- .../hadoop/hive/ql/io/orc/RecordReaderImpl.java| 3 +- .../apache/hadoop/hive/ql/io/orc/WriterImpl.java | 5 +- .../ql/io/parquet/ParquetRecordReaderBase.java |16 + .../hive/ql/io/parquet/convert/ETypeConverter.java | 346 +- .../io/parquet/read/DataWritableReadSupport.java |63 + .../ql/io/parquet/timestamp/NanoTimeUtils.java | 189 +- .../parquet/vector/BaseVectorizedColumnReader.java |12 +- .../io/parquet/vector/ParquetDataColumnReader.java |33 +- .../vector/ParquetDataColumnReaderFactory.java | 1137 +- .../parquet/vector/VectorizedListColumnReader.java | 7 +- .../vector/VectorizedParquetRecordReader.java |23 +- .../vector/VectorizedPrimitiveColumnReader.java| 219 +- .../io/parquet/write/DataWritableWriteSupport.java |13 +- .../ql/io/parquet/write/DataWritableWriter.java|29 +- .../hive/ql/io/sarg/ConvertAstToSearchArg.java |17 +- .../ql/optimizer/FixedBucketPruningOptimizer.java | 8 +- .../vector/util/batchgen/VectorBatchGenerator.java | 6 +- .../hive/ql/io/orc/TestInputOutputFormat.java | 6 +- .../apache/hadoop/hive/ql/io/orc/TestOrcFile.java | 3 +- .../hive/ql/io/parquet/TestDataWritableWriter.java | 2 +- .../io/parquet/VectorizedColumnReaderTestBase.java | 3 +- .../parquet/serde/TestParquetTimestampUtils.java |86 +- .../TestParquetTimestampsHive2Compatibility.java | 276 + .../hive/ql/io/sarg/TestConvertAstToSearchArg.java | 2 +- .../clientpositive/avro_hybrid_mixed_date.q|22 + .../clientpositive/avro_hybrid_mixed_timestamp.q |22 + .../clientpositive/avro_legacy_mixed_date.q|14 + .../clientpositive/avro_legacy_mixed_timestamp.q |23 + .../clientpositive/avro_proleptic_mixed_date.q |24 + .../avro_proleptic_mixed_timestamp.q |24 + .../test/queries/clientpositive/avro_timestamp2.q |23 + ...ge_allowincompatible_vectorization_false_date.q | 8 + ..._allowincompatible_vectorization_false_date2.q} |16 +- ...e_allowincompatible_vectorization_false_date3.q |21 + .../queries/clientpositive/orc_hybrid_mixed_date.q |20 + .../clientpositive/orc_hybrid_mixed_timestamp.q|20 + .../queries/clientpositive/orc_legacy_mixed_date.q |12 + .../clientpositive/orc_legacy_mixed_timestamp.q|12 +