[hive] branch master updated: HIVE-25734: Wrongly-typed constant in case expression leads to incorrect empty result (#2815) ( Alessandro Solimando reviewed by Zoltan Haindrich)

2021-11-30 Thread kgyrtkirk
This is an automated email from the ASF dual-hosted git repository.

kgyrtkirk pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/hive.git


The following commit(s) were added to refs/heads/master by this push:
 new 17f7208  HIVE-25734: Wrongly-typed constant in case expression leads 
to incorrect empty result (#2815) ( Alessandro Solimando reviewed by Zoltan 
Haindrich)
17f7208 is described below

commit 17f72087bd0fc8b5d306e01277052cdcc87c8556
Author: Alessandro Solimando 
AuthorDate: Tue Nov 30 09:54:33 2021 +0100

HIVE-25734: Wrongly-typed constant in case expression leads to incorrect 
empty result (#2815) ( Alessandro Solimando reviewed by Zoltan Haindrich)
---
 .../rules/HivePointLookupOptimizerRule.java|  35 +++-
 .../calcite/translator/RexNodeConverter.java   |  14 +-
 .../rules/TestHivePointLookupOptimizerRule.java| 165 +-
 .../calcite/translator/TestRexNodeConverter.java   | 186 +
 .../clientpositive/cbo_case_when_wrong_type.q  |  10 ++
 .../llap/cbo_case_when_wrong_type.q.out|  84 ++
 .../perf/tpcds30tb/tez/query39.q.out   |   8 +-
 7 files changed, 484 insertions(+), 18 deletions(-)

diff --git 
a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/rules/HivePointLookupOptimizerRule.java
 
b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/rules/HivePointLookupOptimizerRule.java
index bf69d3a..da6e9e7 100644
--- 
a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/rules/HivePointLookupOptimizerRule.java
+++ 
b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/rules/HivePointLookupOptimizerRule.java
@@ -29,6 +29,7 @@ import java.util.Map;
 import java.util.Map.Entry;
 import java.util.Set;
 import java.util.stream.Collectors;
+import java.util.stream.Stream;
 
 import org.apache.calcite.plan.RelOptRule;
 import org.apache.calcite.plan.RelOptRuleCall;
@@ -387,12 +388,12 @@ public abstract class HivePointLookupOptimizerRule 
extends RelOptRule {
 }
 
 private static boolean isColumnExpr(RexNode node) {
-  return !node.getType().isStruct() && 
HiveCalciteUtil.getInputRefs(node).size() > 0
+  return !node.getType().isStruct() && 
!HiveCalciteUtil.getInputRefs(node).isEmpty()
   && HiveCalciteUtil.isDeterministic(node);
 }
 
 private static boolean isConstExpr(RexNode node) {
-  return !node.getType().isStruct() && 
HiveCalciteUtil.getInputRefs(node).size() == 0
+  return !node.getType().isStruct() && 
HiveCalciteUtil.getInputRefs(node).isEmpty()
   && HiveCalciteUtil.isDeterministic(node);
 }
 
@@ -508,7 +509,7 @@ public abstract class HivePointLookupOptimizerRule extends 
RelOptRule {
 
   for (Entry, Collection> sa : 
assignmentGroups.asMap().entrySet()) {
 // skip opaque
-if (sa.getKey().size() == 0) {
+if (sa.getKey().isEmpty()) {
   continue;
 }
 // not enough equalities should not be handled
@@ -593,6 +594,7 @@ public abstract class HivePointLookupOptimizerRule extends 
RelOptRule {
   // into a null value.
   final Multimap inLHSExprToRHSNullableExprs = 
LinkedHashMultimap.create();
   final List operands = new 
ArrayList<>(RexUtil.flattenAnd(call.getOperands()));
+
   for (int i = 0; i < operands.size(); i++) {
 RexNode operand = operands.get(i);
 if (operand.getKind() == SqlKind.IN) {
@@ -614,7 +616,11 @@ public abstract class HivePointLookupOptimizerRule extends 
RelOptRule {
 inLHSExprToRHSNullableExprs.put(ref, constNode);
   }
 }
-inLHSExprToRHSExprs.get(ref).retainAll(expressions);
+Collection knownConstants = inLHSExprToRHSExprs.get(ref);
+if (!shareSameType(knownConstants, expressions)) {
+  return call;
+}
+knownConstants.retainAll(expressions);
   } else {
 for (int j = 1; j < inCall.getOperands().size(); j++) {
   RexNode constNode = inCall.getOperands().get(j);
@@ -639,7 +645,12 @@ public abstract class HivePointLookupOptimizerRule extends 
RelOptRule {
 inLHSExprToRHSNullableExprs.put(c.exprNode, c.constNode);
   }
   if (inLHSExprToRHSExprs.containsKey(c.exprNode)) {
-
inLHSExprToRHSExprs.get(c.exprNode).retainAll(Collections.singleton(c.constNode));
+Collection knownConstants = 
inLHSExprToRHSExprs.get(c.exprNode);
+Collection nextConstant = 
Collections.singleton(c.constNode);
+if (!shareSameType(knownConstants, nextConstant)) {
+  return call;
+}
+knownConstants.retainAll(nextConstant);
   } else {
 inLHSExprToRHSExprs.put(c.exprNode, c.constNode);
   }
@@ -655,6 +666,20 @@ public abstract class HivePointLookupOptimizerRule extends 
RelOptRule {
   return RexUtil.composeConjunction(rexBuilder, newOperands, 

[hive] branch master updated (a8e5073 -> 3a610dc)

2021-11-30 Thread kgyrtkirk
This is an automated email from the ASF dual-hosted git repository.

kgyrtkirk pushed a change to branch master
in repository https://gitbox.apache.org/repos/asf/hive.git.


from a8e5073  HIVE-25710: Config used to enable non-blocking TRUNCATE is 
not properly propagated (Denys Kuzmenko, reviewed by Karen Coppage and Peter 
Vary)
 add 3a610dc  HIVE-25749: Check if RelMetadataQuery.collations() returns 
null to avoid NPE (#2823) (Alessandro Solimando reviewed by Aman Sinha and 
Zoltan Haindrich)

No new revisions were added by this update.

Summary of changes:
 .../hive/ql/optimizer/calcite/reloperators/HiveJoin.java | 12 +++-
 .../hive/ql/optimizer/calcite/rules/RelFieldTrimmer.java |  9 +
 2 files changed, 12 insertions(+), 9 deletions(-)


[hive] branch master updated: HIVE-25710: Config used to enable non-blocking TRUNCATE is not properly propagated (Denys Kuzmenko, reviewed by Karen Coppage and Peter Vary)

2021-11-30 Thread dkuzmenko
This is an automated email from the ASF dual-hosted git repository.

dkuzmenko pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/hive.git


The following commit(s) were added to refs/heads/master by this push:
 new a8e5073  HIVE-25710: Config used to enable non-blocking TRUNCATE is 
not properly propagated (Denys Kuzmenko, reviewed by Karen Coppage and Peter 
Vary)
a8e5073 is described below

commit a8e50734e0460e506f1762fbe0f628bcb444b8f5
Author: Denys Kuzmenko 
AuthorDate: Tue Nov 30 10:09:06 2021 +0200

HIVE-25710: Config used to enable non-blocking TRUNCATE is not properly 
propagated (Denys Kuzmenko, reviewed by Karen Coppage and Peter Vary)

Closes #2796
---
 .../java/org/apache/hadoop/hive/conf/HiveConf.java | 11 +++--
 .../table/misc/truncate/TruncateTableAnalyzer.java |  9 +---
 .../org/apache/hadoop/hive/ql/metadata/Hive.java   |  4 +++-
 .../ql/metadata/SessionHiveMetaStoreClient.java| 12 ++
 .../org/apache/hadoop/hive/ql/TestTxnCommands.java | 10 +++--
 .../hadoop/hive/ql/TestTxnCommandsForMmTable.java  | 14 
 .../hadoop/hive/ql/lockmgr/TestDbTxnManager2.java  |  2 +-
 .../hadoop/hive/metastore/HiveMetaStoreClient.java | 26 +++---
 .../hadoop/hive/metastore/IMetaStoreClient.java|  2 ++
 .../hadoop/hive/metastore/conf/MetastoreConf.java  |  3 ---
 .../apache/hadoop/hive/metastore/HMSHandler.java   | 12 +-
 .../metastore/HiveMetaStoreClientPreCatalog.java   |  7 ++
 12 files changed, 81 insertions(+), 31 deletions(-)

diff --git a/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java 
b/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java
index c0325a6..988cec8 100644
--- a/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java
+++ b/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java
@@ -3060,8 +3060,15 @@ public class HiveConf extends Configuration {
 "Creates an _orc_acid_version file along with acid files, to store the 
version data"),
 
 HIVE_TXN_READONLY_ENABLED("hive.txn.readonly.enabled", false,
-  "Enables read-only transaction classification and related 
optimizations"),
-
+"Enables read-only transaction classification and related 
optimizations"),
+
+HIVE_ACID_LOCKLESS_READS_ENABLED("hive.acid.lockless.reads.enabled", false,
+"Enables lockless reads"),
+
+HIVE_ACID_TRUNCATE_USE_BASE("hive.acid.truncate.usebase", false,
+"If enabled, truncate for transactional tables will not delete the 
data directories,\n" +
+"rather create a new base directory with no datafiles."),
+
 // Configs having to do with DeltaFilesMetricReporter, which collects 
lists of most recently active tables
 // with the most number of active/obsolete deltas.
 
HIVE_TXN_ACID_METRICS_MAX_CACHE_SIZE("hive.txn.acid.metrics.max.cache.size", 
100,
diff --git 
a/ql/src/java/org/apache/hadoop/hive/ql/ddl/table/misc/truncate/TruncateTableAnalyzer.java
 
b/ql/src/java/org/apache/hadoop/hive/ql/ddl/table/misc/truncate/TruncateTableAnalyzer.java
index ab67994..47d39d4 100644
--- 
a/ql/src/java/org/apache/hadoop/hive/ql/ddl/table/misc/truncate/TruncateTableAnalyzer.java
+++ 
b/ql/src/java/org/apache/hadoop/hive/ql/ddl/table/misc/truncate/TruncateTableAnalyzer.java
@@ -116,10 +116,13 @@ public class TruncateTableAnalyzer extends 
AbstractBaseAlterTableAnalyzer {
 
   private void addTruncateTableOutputs(ASTNode root, Table table, Map partitionSpec)
   throws SemanticException {
-boolean truncateKeepsDataFiles = AcidUtils.isTransactionalTable(table) &&
-MetastoreConf.getBoolVar(conf, 
MetastoreConf.ConfVars.TRUNCATE_ACID_USE_BASE);
+boolean truncateUseBase = (HiveConf.getBoolVar(conf, 
HiveConf.ConfVars.HIVE_ACID_TRUNCATE_USE_BASE)
+|| HiveConf.getBoolVar(conf, 
HiveConf.ConfVars.HIVE_ACID_LOCKLESS_READS_ENABLED))
+  && AcidUtils.isTransactionalTable(table);
+
 WriteEntity.WriteType writeType =
-truncateKeepsDataFiles ? WriteEntity.WriteType.DDL_EXCL_WRITE : 
WriteEntity.WriteType.DDL_EXCLUSIVE;
+truncateUseBase ? WriteEntity.WriteType.DDL_EXCL_WRITE : 
WriteEntity.WriteType.DDL_EXCLUSIVE;
+
 if (partitionSpec == null) {
   if (!table.isPartitioned()) {
 outputs.add(new WriteEntity(table, writeType));
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java 
b/ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java
index 13c0514..b5a8d36 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java
@@ -1433,8 +1433,10 @@ public class Hive {
   if (snapshot == null) {
 getMSC().truncateTable(table.getDbName(), table.getTableName(), 
partNames);
   } else {
+boolean truncateUseBase = HiveConf.getBoolVar(conf, 
HiveConf.ConfVars.HIVE_ACID_TRUNCATE_USE_BASE)
+  || HiveConf.getBoolVar(conf,