[hive] branch master updated: HIVE-27802: Simplify TestTezSessionState.testSymlinkedLocalFilesAreLocalizedOnce (#4804) (Laszlo Bodor reviewed by Ayush Saxena)
This is an automated email from the ASF dual-hosted git repository. abstractdog pushed a commit to branch master in repository https://gitbox.apache.org/repos/asf/hive.git The following commit(s) were added to refs/heads/master by this push: new b6847ed38b7 HIVE-27802: Simplify TestTezSessionState.testSymlinkedLocalFilesAreLocalizedOnce (#4804) (Laszlo Bodor reviewed by Ayush Saxena) b6847ed38b7 is described below commit b6847ed38b7d32586ab22e224904867f159b510e Author: Bodor Laszlo AuthorDate: Tue Oct 17 07:50:22 2023 +0200 HIVE-27802: Simplify TestTezSessionState.testSymlinkedLocalFilesAreLocalizedOnce (#4804) (Laszlo Bodor reviewed by Ayush Saxena) --- .../hadoop/hive/ql/exec/tez/TestTezSessionState.java | 19 +-- 1 file changed, 1 insertion(+), 18 deletions(-) diff --git a/ql/src/test/org/apache/hadoop/hive/ql/exec/tez/TestTezSessionState.java b/ql/src/test/org/apache/hadoop/hive/ql/exec/tez/TestTezSessionState.java index 521134bdfa5..8e48c0f9998 100644 --- a/ql/src/test/org/apache/hadoop/hive/ql/exec/tez/TestTezSessionState.java +++ b/ql/src/test/org/apache/hadoop/hive/ql/exec/tez/TestTezSessionState.java @@ -28,22 +28,6 @@ import org.junit.Test; public class TestTezSessionState { - private class TestTezSessionPoolManager extends TezSessionPoolManager { -public TestTezSessionPoolManager() { - super(); -} - -@Override -public void setupPool(HiveConf conf) throws Exception { - super.setupPool(conf); -} - -@Override -public TezSessionPoolSession createSession(String sessionId, HiveConf conf) { - return new SampleTezSessionState(sessionId, this, conf); -} - } - @Test public void testSymlinkedLocalFilesAreLocalizedOnce() throws Exception { Path jarPath = Files.createTempFile("jar", ""); @@ -57,9 +41,8 @@ public class TestTezSessionState { HiveConf hiveConf = new HiveConf(); hiveConf.set(HiveConf.ConfVars.HIVE_JAR_DIRECTORY.varname, "/tmp"); -TezSessionPoolManager poolManager = new TestTezSessionPoolManager(); -TezSessionState sessionState = poolManager.getSession(null, hiveConf, true, false); +TezSessionState sessionState = new TezSessionState(DagUtils.getInstance(), hiveConf); LocalResource l1 = sessionState.createJarLocalResource(jarPath.toUri().toString()); LocalResource l2 = sessionState.createJarLocalResource(symlinkPath.toUri().toString());
[hive] branch master updated: HIVE-27682: AlterTableAlterPartitionOperation cannot change the type if the column has default partition (Zhihua Deng, reviewed by Sai Hemanth Gantasala)
This is an automated email from the ASF dual-hosted git repository. dengzh pushed a commit to branch master in repository https://gitbox.apache.org/repos/asf/hive.git The following commit(s) were added to refs/heads/master by this push: new a0364474ab2 HIVE-27682: AlterTableAlterPartitionOperation cannot change the type if the column has default partition (Zhihua Deng, reviewed by Sai Hemanth Gantasala) a0364474ab2 is described below commit a0364474ab2bf9926b32d7df31948fd49871cc35 Author: dengzh AuthorDate: Tue Oct 17 09:29:09 2023 +0800 HIVE-27682: AlterTableAlterPartitionOperation cannot change the type if the column has default partition (Zhihua Deng, reviewed by Sai Hemanth Gantasala) Closes #4684 --- .../alter/AlterTableAlterPartitionOperation.java | 20 +++--- .../clientpositive/alter_partition_coltype.q | 4 +++ .../llap/alter_partition_coltype.q.out | 31 ++ 3 files changed, 45 insertions(+), 10 deletions(-) diff --git a/ql/src/java/org/apache/hadoop/hive/ql/ddl/table/partition/alter/AlterTableAlterPartitionOperation.java b/ql/src/java/org/apache/hadoop/hive/ql/ddl/table/partition/alter/AlterTableAlterPartitionOperation.java index 2046cbdb432..0fd8785d1bc 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/ddl/table/partition/alter/AlterTableAlterPartitionOperation.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/ddl/table/partition/alter/AlterTableAlterPartitionOperation.java @@ -20,9 +20,9 @@ package org.apache.hadoop.hive.ql.ddl.table.partition.alter; import java.util.ArrayList; import java.util.List; -import java.util.Set; import org.apache.hadoop.hive.conf.HiveConf; +import org.apache.hadoop.hive.metastore.Warehouse; import org.apache.hadoop.hive.metastore.api.FieldSchema; import org.apache.hadoop.hive.ql.ddl.DDLOperation; import org.apache.hadoop.hive.ql.ddl.DDLOperationContext; @@ -30,7 +30,6 @@ import org.apache.hadoop.hive.ql.ddl.DDLUtils; import org.apache.hadoop.hive.ql.hooks.ReadEntity; import org.apache.hadoop.hive.ql.hooks.WriteEntity; import org.apache.hadoop.hive.ql.metadata.HiveException; -import org.apache.hadoop.hive.ql.metadata.Partition; import org.apache.hadoop.hive.ql.metadata.Table; import org.apache.hadoop.hive.serde2.objectinspector.ObjectInspector; import org.apache.hadoop.hive.serde2.objectinspector.ObjectInspectorConverters; @@ -92,14 +91,15 @@ public class AlterTableAlterPartitionOperation extends DDLOperation partitions = context.getDb().getAllPartitionsOf(tbl); -for (Partition part : partitions) { - if (part.getName().equals(context.getConf().getVar(HiveConf.ConfVars.DEFAULTPARTITIONNAME))) { -continue; - } - +List partNames = context.getDb().getPartitionNames(tbl.getDbName(), +tbl.getTableName(), (short) -1); +for (String partName : partNames) { try { -String value = part.getValues().get(colIndex); +List values = Warehouse.getPartValuesFromPartName(partName); +String value = values.get(colIndex); +if (value.equals(context.getConf().getVar(HiveConf.ConfVars.DEFAULTPARTITIONNAME))) { + continue; +} Object convertedValue = converter.convert(value); if (convertedValue == null) { throw new HiveException(" Converting from " + TypeInfoFactory.stringTypeInfo + " to " + expectedType + @@ -107,7 +107,7 @@ public class AlterTableAlterPartitionOperation extends DDLOperation
[hive] branch master updated: HIVE-27169: New Locked List to prevent configuration change at runtime without throwing error (#4731) (Raghav Aggarwal, reviewed by Okumin, Pravin Kumar Sinha)
This is an automated email from the ASF dual-hosted git repository. pravin pushed a commit to branch master in repository https://gitbox.apache.org/repos/asf/hive.git The following commit(s) were added to refs/heads/master by this push: new 9eeab401734 HIVE-27169: New Locked List to prevent configuration change at runtime without throwing error (#4731) (Raghav Aggarwal, reviewed by Okumin, Pravin Kumar Sinha) 9eeab401734 is described below commit 9eeab40173479c74b6fbf6657c3472b81ce4efcd Author: Raghav Aggarwal AuthorDate: Mon Oct 16 21:03:51 2023 +0530 HIVE-27169: New Locked List to prevent configuration change at runtime without throwing error (#4731) (Raghav Aggarwal, reviewed by Okumin, Pravin Kumar Sinha) --- .../java/org/apache/hadoop/hive/conf/HiveConf.java | 33 ++ .../org/apache/hadoop/hive/conf/HiveConfUtil.java | 16 + .../org/apache/hadoop/hive/conf/TestHiveConf.java | 15 + .../hadoop/hive/ql/processors/SetProcessor.java| 5 +++ .../hadoop/hive/ql/session/SessionState.java | 39 ++ 5 files changed, 108 insertions(+) diff --git a/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java b/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java index 290cd8a4efa..adc6503debe 100644 --- a/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java +++ b/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java @@ -100,6 +100,7 @@ public class HiveConf extends Configuration { private static final Map metaConfs = new HashMap(); private final List restrictList = new ArrayList(); private final Set hiddenSet = new HashSet(); + private final Set lockedSet = new HashSet<>(); private final List rscList = new ArrayList<>(); private Pattern modWhiteListPattern = null; @@ -850,6 +851,10 @@ public class HiveConf extends Configuration { HIVEIGNOREMAPJOINHINT("hive.ignore.mapjoin.hint", true, "Ignore the mapjoin hint"), +HIVE_CONF_LOCKED_LIST("hive.conf.locked.list", "", "Comma separated " + +"list of configuration options which are locked and can not be changed at runtime. Warning is logged and the " + +"change is ignored when user try to set these configs during runtime"), + HIVE_FILE_MAX_FOOTER("hive.file.max.footer", 100, "maximum number of lines for footer user can define for a table file"), @@ -6010,6 +6015,9 @@ public class HiveConf extends Configuration { throw new IllegalArgumentException("Cannot modify " + name + " at runtime. It is in the list" + " of parameters that can't be modified at runtime or is prefixed by a restricted variable"); } +if (isLockedConfig(name)) { + return; +} String oldValue = name != null ? get(name) : null; if (name == null || value == null || !value.equals(oldValue)) { // When either name or value is null, the set method below will fail, @@ -6022,6 +6030,10 @@ public class HiveConf extends Configuration { return Iterables.any(hiddenSet, hiddenVar -> name.startsWith(hiddenVar)); } + public boolean isLockedConfig(String name) { +return Iterables.any(lockedSet, lockedVar -> name != null && name.equalsIgnoreCase(lockedVar)); + } + public static boolean isEncodedPar(String name) { for (ConfVars confVar : HiveConf.ENCODED_CONF) { ConfVars confVar1 = confVar; @@ -6427,6 +6439,7 @@ public class HiveConf extends Configuration { origProp = (Properties)other.origProp.clone(); restrictList.addAll(other.restrictList); hiddenSet.addAll(other.hiddenSet); +lockedSet.addAll(other.lockedSet); modWhiteListPattern = other.modWhiteListPattern; } @@ -6560,6 +6573,9 @@ public class HiveConf extends Configuration { setupRestrictList(); hiddenSet.clear(); hiddenSet.addAll(HiveConfUtil.getHiddenSet(this)); + +lockedSet.clear(); +lockedSet.addAll(HiveConfUtil.getLockedSet(this)); } /** @@ -6938,6 +6954,22 @@ public class HiveConf extends Configuration { setupRestrictList(); } + public void addToLockedSet(String lockedListStr) { +String oldList = this.getVar(ConfVars.HIVE_CONF_LOCKED_LIST); +if (oldList == null || oldList.isEmpty()) { + this.setVar(ConfVars.HIVE_CONF_LOCKED_LIST, lockedListStr); +} else { + this.setVar(ConfVars.HIVE_CONF_LOCKED_LIST, oldList + "," + lockedListStr); +} +String modifiedLockedSet = this.getVar(ConfVars.HIVE_CONF_LOCKED_LIST); +lockedSet.clear(); +if (modifiedLockedSet != null) { + for (String entry : modifiedLockedSet.split(",")) { +lockedSet.add(entry.trim()); + } +} + } + /** * Set white list of parameters that are allowed to be modified * @@ -6975,6 +7007,7 @@ public class HiveConf extends Configuration { restrictList.add(ConfVars.HIVE_CONF_RESTRICTED_LIST.varname); restrictList.add(ConfVars.HIVE_CONF_HIDDEN_LIST.varname); restrictList.add(ConfVars.HIVE_CONF_INTERNAL_VAR
[hive] branch master updated: HIVE-27772: UNIX_TIMESTAMP should return NULL when date fields are out of bounds (Simhadri Govindappa reviewed by Stamatis Zampetakis)
This is an automated email from the ASF dual-hosted git repository. zabetak pushed a commit to branch master in repository https://gitbox.apache.org/repos/asf/hive.git The following commit(s) were added to refs/heads/master by this push: new 1c126d94744 HIVE-27772: UNIX_TIMESTAMP should return NULL when date fields are out of bounds (Simhadri Govindappa reviewed by Stamatis Zampetakis) 1c126d94744 is described below commit 1c126d947448ffc9784a1465306e018ba183a014 Author: SimhadriG AuthorDate: Thu Oct 5 14:31:12 2023 +0530 HIVE-27772: UNIX_TIMESTAMP should return NULL when date fields are out of bounds (Simhadri Govindappa reviewed by Stamatis Zampetakis) In the case of invalid dates, such as '2001-02-31' (day field exceeds valid range for the given month), the UNIX_TIMESTAMP function behaves unexpectedly. Instead of returning NULL (as it happens in other systems like Spark, MySQL, etc.), it provides a value corresponding to another valid date based on some resolution rules (e.g., Feb 28th or March 1st). The resolution rules and results depend on the underlying formatter implementation used by UNIX_TIMESTAMP. By default, the DATETIME formatter uses the SMART resolution style and the SIMPLE formatter the LENIENT. Both of these styles are able to resolve "invalid" bounds to valid dates. In order to prevent seemingly "invalid" dates to be parsed correctly we have to use the STRICT resolution style. However, we cannot simply switch the formatters to always use the STRICT resolution cause that would break existing applications relying on the existing resolution rules. To address the problem reported here and retain the previous behaviour we opted to make the resolution style configurable by adding a new property. The new property only affects the DATETIME formatter; the SIMPLE formatter is almost deprecated so we don't add new features to it. Close apache/hive#4777 --- .../java/org/apache/hadoop/hive/conf/HiveConf.java | 4 + .../hadoop/hive/conf/TestHiveConfVarsValidate.java | 9 ++ .../ql/udf/generic/InstantDateTimeFormatter.java | 6 +- .../hive/ql/udf/generic/InstantFormatter.java | 16 ++- ...ericUDFToUnixTimestampEvaluateStringString.java | 30 +++-- ...nericUDFToUnixTimestampEvaluateStringString.csv | 140 + 6 files changed, 135 insertions(+), 70 deletions(-) diff --git a/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java b/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java index 5e9c8425ddf..290cd8a4efa 100644 --- a/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java +++ b/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java @@ -3860,6 +3860,10 @@ public class HiveConf extends Configuration { "is discouraged. It suffers from known bugs that are unlikely to be fixed in subsequent versions of the product." + "Furthermore, using SIMPLE formatter may lead to strange behavior, and unexpected results when combined " + "with SQL functions/operators that are using the new DATETIME formatter."), +HIVE_DATETIME_RESOLVER_STYLE("hive.datetime.formatter.resolver.style", "SMART", +new StringSet("SMART", "STRICT", "LENIENT"), +"The style used by the hive.datetime.formatter (only applicable to DATETIME) to resolve dates amd times." + +"The possible values are STRICT, SMART, and LENIENT and their behavior follows the java.time.format.ResolverStyle API."), // HiveServer2 specific configs HIVE_SERVER2_CLEAR_DANGLING_SCRATCH_DIR("hive.server2.clear.dangling.scratchdir", false, "Clear dangling scratch dir periodically in HS2"), diff --git a/common/src/test/org/apache/hadoop/hive/conf/TestHiveConfVarsValidate.java b/common/src/test/org/apache/hadoop/hive/conf/TestHiveConfVarsValidate.java index 7ac44588c08..42736ddb3d6 100644 --- a/common/src/test/org/apache/hadoop/hive/conf/TestHiveConfVarsValidate.java +++ b/common/src/test/org/apache/hadoop/hive/conf/TestHiveConfVarsValidate.java @@ -26,6 +26,7 @@ import java.util.List; import static org.apache.hadoop.hive.conf.HiveConf.ConfVars; import static org.apache.hadoop.hive.conf.HiveConf.ConfVars.HIVE_DATETIME_FORMATTER; +import static org.apache.hadoop.hive.conf.HiveConf.ConfVars.HIVE_DATETIME_RESOLVER_STYLE; import static org.apache.hadoop.hive.conf.HiveConf.ConfVars.HIVE_EXPLAIN_NODE_VISIT_LIMIT; import static org.junit.Assert.assertEquals; @@ -62,6 +63,14 @@ public class TestHiveConfVarsValidate { list.add(new Object[] { HIVE_DATETIME_FORMATTER, "simple", null }); list.add(new Object[] { HIVE_DATETIME_FORMATTER, "dateTime", null }); list.add(new Object[] { HIVE_DATETIME_FORMATTER, "OTHER", "Invalid value.. expects one of [datetime, simple]" }); +list.add(new Object[] { HIVE_DATETIME_RESOLVER_STYLE, "SMART", null}); +list.add(new Object[] { HIVE_DATETIME_RESOLVER_STYLE, "STRICT", null}); +list.add(new Objec
[hive] branch master updated (357714abbf1 -> 5d58a210375)
This is an automated email from the ASF dual-hosted git repository. abstractdog pushed a change to branch master in repository https://gitbox.apache.org/repos/asf/hive.git from 357714abbf1 HIVE-27798: Correct configuration item in hive-site.xml in docker. (#4803). (xiaolin84250 , Reviewed by Ayush Saxena) add 5d58a210375 HIVE-27686 ORC upgraded to 1.8.5. (#4690) (Zoltan Ratkai reviewed by Laszlo Bodor) No new revisions were added by this update. Summary of changes: .../hive/ql/txn/compactor/TestCompactor.java | 6 -- .../ql/txn/compactor/TestCrudCompactorOnTez.java | 105 +++-- pom.xml| 2 +- .../apache/hadoop/hive/ql/TestTxnNoBuckets.java| 2 +- .../acid_bloom_filter_orc_file_dump.q | 1 + .../test/queries/clientpositive/acid_no_buckets.q | 1 + .../queries/clientpositive/default_constraint.q| 1 + ql/src/test/queries/clientpositive/deleteAnalyze.q | 1 + .../materialized_view_create_rewrite.q | 1 + .../materialized_view_create_rewrite_4.q | 2 +- ...rialized_view_create_rewrite_by_text_multi_db.q | 1 + .../materialized_view_create_rewrite_dummy.q | 1 + .../materialized_view_create_rewrite_multi_db.q| 1 + .../materialized_view_create_rewrite_time_window.q | 1 + ...aterialized_view_create_rewrite_time_window_2.q | 1 + ql/src/test/queries/clientpositive/orc_analyze.q | 1 + ql/src/test/queries/clientpositive/orc_file_dump.q | 1 + .../queries/clientpositive/orc_llap_counters.q | 1 + .../queries/clientpositive/orc_llap_counters1.q| 1 + ql/src/test/queries/clientpositive/orc_merge10.q | 2 +- ql/src/test/queries/clientpositive/orc_merge11.q | 1 + ql/src/test/queries/clientpositive/orc_merge12.q | 1 + ql/src/test/queries/clientpositive/row__id.q | 1 + ql/src/test/queries/clientpositive/smb_mapjoin_1.q | 1 + .../test/queries/clientpositive/sqlmerge_stats.q | 1 + .../test/queries/clientpositive/stats_histogram.q | 1 + .../queries/clientpositive/stats_histogram_null.q | 1 + ql/src/test/queries/clientpositive/stats_part2.q | 1 + .../beeline/materialized_view_create_rewrite.q.out | 4 +- .../clientpositive/beeline/smb_mapjoin_1.q.out | 2 +- .../llap/acid_bloom_filter_orc_file_dump.q.out | 4 +- .../clientpositive/llap/acid_no_buckets.q.out | 4 +- .../clientpositive/llap/default_constraint.q.out | 14 +-- .../clientpositive/llap/deleteAnalyze.q.out| 4 +- .../llap/materialized_view_create_rewrite.q.out| 4 +- .../llap/materialized_view_create_rewrite_4.q.out | 12 +-- ...ized_view_create_rewrite_by_text_multi_db.q.out | 4 +- .../materialized_view_create_rewrite_dummy.q.out | 4 +- ...materialized_view_create_rewrite_multi_db.q.out | 4 +- ...erialized_view_create_rewrite_time_window.q.out | 6 +- ...ialized_view_create_rewrite_time_window_2.q.out | 6 +- .../results/clientpositive/llap/orc_analyze.q.out | 34 +++ .../clientpositive/llap/orc_file_dump.q.out| 6 +- .../clientpositive/llap/orc_llap_counters.q.out| 2 +- .../clientpositive/llap/orc_llap_counters1.q.out | 2 +- .../results/clientpositive/llap/orc_merge10.q.out | 4 +- .../results/clientpositive/llap/orc_merge11.q.out | 6 +- .../clientpositive/llap/sqlmerge_stats.q.out | 10 +- .../clientpositive/llap/stats_histogram.q.out | 2 +- .../clientpositive/llap/stats_histogram_null.q.out | 2 +- .../results/clientpositive/llap/stats_part2.q.out | 30 +++--- ql/src/test/results/clientpositive/row__id.q.out | 18 ++-- .../results/clientpositive/tez/orc_merge12.q.out | 2 +- standalone-metastore/pom.xml | 2 +- 54 files changed, 179 insertions(+), 154 deletions(-)
[hive] branch master updated (c126422a91b -> 357714abbf1)
This is an automated email from the ASF dual-hosted git repository. ayushsaxena pushed a change to branch master in repository https://gitbox.apache.org/repos/asf/hive.git from c126422a91b HIVE-27695: HIVE-26828: Intermittent OOM when running TestMiniTezCliDriver (Stamatis Zampetakis reviewed by Ayush Saxena) add 357714abbf1 HIVE-27798: Correct configuration item in hive-site.xml in docker. (#4803). (xiaolin84250 , Reviewed by Ayush Saxena) No new revisions were added by this update. Summary of changes: packaging/src/docker/conf/hive-site.xml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-)
[hive] branch master updated: HIVE-27695: HIVE-26828: Intermittent OOM when running TestMiniTezCliDriver (Stamatis Zampetakis reviewed by Ayush Saxena)
This is an automated email from the ASF dual-hosted git repository. zabetak pushed a commit to branch master in repository https://gitbox.apache.org/repos/asf/hive.git The following commit(s) were added to refs/heads/master by this push: new c126422a91b HIVE-27695: HIVE-26828: Intermittent OOM when running TestMiniTezCliDriver (Stamatis Zampetakis reviewed by Ayush Saxena) c126422a91b is described below commit c126422a91be695c75ec4a750638a0aa4d1ba6cd Author: Stamatis Zampetakis AuthorDate: Wed Oct 11 12:17:23 2023 +0200 HIVE-27695: HIVE-26828: Intermittent OOM when running TestMiniTezCliDriver (Stamatis Zampetakis reviewed by Ayush Saxena) java.lang.OutOfMemoryError: GC overhead limit exceeded is thrown by the Tez Application Master (AM) cause the current heap size (128MB) is not enough to accommodate the needs of multiple Tez containers running. Each running container requires roughly 10MB of memory in the AM. The AM accumulates/manipulates multiple configuration objects (some of them retaining as much as 1MB of heap) per container. The heap gradually becomes full and GC is spending a lot of CPU time to clean things up without really making much progress since containers are reused and heap cannot shrink. There are multiple solutions to the problem but the easiest and most effective is to increase the heap size for the AM. At this point in time, 512MB is a good value. The hybridgrace_hashjoin_2.q test which was failing to due OOM can now be re-enabled. Close apache/hive#4792 --- data/conf/tez/tez-site.xml | 2 +- ql/src/test/queries/clientpositive/hybridgrace_hashjoin_2.q | 1 - 2 files changed, 1 insertion(+), 2 deletions(-) diff --git a/data/conf/tez/tez-site.xml b/data/conf/tez/tez-site.xml index ba4df319a55..88adb6a57e8 100644 --- a/data/conf/tez/tez-site.xml +++ b/data/conf/tez/tez-site.xml @@ -1,7 +1,7 @@ tez.am.resource.memory.mb -128 +512 tez.task.resource.memory.mb diff --git a/ql/src/test/queries/clientpositive/hybridgrace_hashjoin_2.q b/ql/src/test/queries/clientpositive/hybridgrace_hashjoin_2.q index 93b8c13a49f..6ed771ba685 100644 --- a/ql/src/test/queries/clientpositive/hybridgrace_hashjoin_2.q +++ b/ql/src/test/queries/clientpositive/hybridgrace_hashjoin_2.q @@ -1,7 +1,6 @@ --! qt:dataset:srcpart --! qt:dataset:src1 --! qt:dataset:src ---! qt:disabled:HIVE-26820 Disable hybridgrace_hashjoin_2.q flaky test set hive.mapred.mode=nonstrict; set hive.explain.user=false; -- Hybrid Grace Hash Join
[hive] branch branch-3 updated: HIVE-27784: Backport of HIVE-20364, HIVE-20549 to branch-3 (#4789)
This is an automated email from the ASF dual-hosted git repository. sankarh pushed a commit to branch branch-3 in repository https://gitbox.apache.org/repos/asf/hive.git The following commit(s) were added to refs/heads/branch-3 by this push: new b1503b5123f HIVE-27784: Backport of HIVE-20364, HIVE-20549 to branch-3 (#4789) b1503b5123f is described below commit b1503b5123fde96cf7a7583e41a70083c704b3cd Author: Aman Raj <104416558+amanraj2...@users.noreply.github.com> AuthorDate: Mon Oct 16 13:31:43 2023 +0530 HIVE-27784: Backport of HIVE-20364, HIVE-20549 to branch-3 (#4789) * HIVE-20364: Update default for hive.map.aggr.hash.min.reduction * HIVE-20549: Allow user set query tag, and kill query with tag (Daniel Dai, reviewed by Thejas Nair, Sergey Shelukhin) * Removed explainanalyze_2.q test to fix in HIVE-27795 - Co-authored-by: Ashutosh Chauhan Co-authored-by: Mahesh Kumar Behera Co-authored-by: Daniel Dai Signed-off-by: Sankar Hariappan Closes (#4789) --- .../java/org/apache/hadoop/hive/conf/HiveConf.java | 7 +- .../hive/jdbc/TestJdbcWithMiniLlapArrow.java | 153 +++-- .../test/resources/testconfiguration.properties| 5 +- .../java/org/apache/hive/jdbc/HiveStatement.java | 6 +- ql/src/java/org/apache/hadoop/hive/ql/Driver.java | 7 +- .../java/org/apache/hadoop/hive/ql/QueryState.java | 23 +++- .../hive/ql/exec/tez/KillTriggerActionHandler.java | 5 + .../hadoop/hive/ql/exec/tez/WorkloadManager.java | 3 + .../hive/ql/parse/ReplicationSemanticAnalyzer.java | 2 +- .../clientnegative/authorization_kill_query.q | 15 -- .../service/cli/operation/OperationManager.java| 29 ++-- .../apache/hive/service/server/KillQueryImpl.java | 112 +++ 12 files changed, 257 insertions(+), 110 deletions(-) diff --git a/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java b/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java index bf20a78b588..6bd226c442f 100644 --- a/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java +++ b/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java @@ -1519,6 +1519,10 @@ public class HiveConf extends Configuration { HIVEQUERYID("hive.query.id", "", "ID for query being executed (might be multiple per a session)"), + HIVEQUERYTAG("hive.query.tag", null, "Tag for the queries in the session. User can kill the queries with the tag " + +"in another session. Currently there is no tag duplication check, user need to make sure his tag is unique. " + +"Also 'kill query' needs to be issued to all HiveServer2 instances to proper kill the queries"), + HIVEJOBNAMELENGTH("hive.jobname.length", 50, "max jobname length"), // hive jar @@ -1688,7 +1692,7 @@ public class HiveConf extends Configuration { "How many rows with the same key value should be cached in memory per smb joined table."), HIVEGROUPBYMAPINTERVAL("hive.groupby.mapaggr.checkinterval", 10, "Number of rows after which size of the grouping keys/aggregation classes is performed"), -HIVEMAPAGGRHASHMEMORY("hive.map.aggr.hash.percentmemory", (float) 0.5, +HIVEMAPAGGRHASHMEMORY("hive.map.aggr.hash.percentmemory", (float) 0.99, "Portion of total memory to be used by map-side group aggregation hash table"), HIVEMAPJOINFOLLOWEDBYMAPAGGRHASHMEMORY("hive.mapjoin.followby.map.aggr.hash.percentmemory", (float) 0.3, "Portion of total memory to be used by map-side group aggregation hash table, when this group by is followed by map join"), @@ -5451,6 +5455,7 @@ public class HiveConf extends Configuration { ConfVars.SHOW_JOB_FAIL_DEBUG_INFO.varname, ConfVars.TASKLOG_DEBUG_TIMEOUT.varname, ConfVars.HIVEQUERYID.varname, +ConfVars.HIVEQUERYTAG.varname, }; /** diff --git a/itests/hive-unit/src/test/java/org/apache/hive/jdbc/TestJdbcWithMiniLlapArrow.java b/itests/hive-unit/src/test/java/org/apache/hive/jdbc/TestJdbcWithMiniLlapArrow.java index 3dcc4928b1a..dcb8701e696 100644 --- a/itests/hive-unit/src/test/java/org/apache/hive/jdbc/TestJdbcWithMiniLlapArrow.java +++ b/itests/hive-unit/src/test/java/org/apache/hive/jdbc/TestJdbcWithMiniLlapArrow.java @@ -43,9 +43,12 @@ import org.apache.hadoop.hive.llap.LlapArrowRowInputFormat; import org.apache.hive.jdbc.miniHS2.MiniHS2; import static org.junit.Assert.assertNotNull; import static org.junit.Assert.assertNull; +import static org.junit.Assert.assertTrue; import org.apache.hadoop.fs.Path; import org.apache.hadoop.hive.conf.HiveConf; import org.apache.hadoop.hive.conf.HiveConf.ConfVars; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; /** * TestJdbcWithMiniLlap for Arrow format @@ -57,6 +60,7 @@ public class TestJdbcWithMiniLlapArrow extends BaseJdbcWithMiniLlap { private static final String tableName = "testJdbcMinihs2Tbl"; private static String dataFileDir; private stati