[hive] branch master updated: HIVE-27346: Getting exception for wildcard search for database and table name (#4326) (Hongdan Zhu, reviewed by Attila Turoczy, Zhihua Deng)

2023-10-18 Thread dengzh
This is an automated email from the ASF dual-hosted git repository.

dengzh pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/hive.git


The following commit(s) were added to refs/heads/master by this push:
 new 162da6d8618 HIVE-27346: Getting exception for wildcard search for 
database and table name (#4326) (Hongdan Zhu, reviewed by Attila Turoczy, 
Zhihua Deng)
162da6d8618 is described below

commit 162da6d861897761aa0255c43b4c1016e1bb9d1b
Author: Daniel (Hongdan) Zhu <50390050+danielzh...@users.noreply.github.com>
AuthorDate: Wed Oct 18 18:27:07 2023 -0700

HIVE-27346: Getting exception for wildcard search for database and table 
name (#4326) (Hongdan Zhu, reviewed by Attila Turoczy, Zhihua Deng)
---
 .../apache/hadoop/hive/metastore/ObjectStore.java   |  2 +-
 .../hive/metastore/client/TestTablesGetExists.java  | 21 +
 2 files changed, 22 insertions(+), 1 deletion(-)

diff --git 
a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/ObjectStore.java
 
b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/ObjectStore.java
index e7df10673b5..e8996cb2498 100644
--- 
a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/ObjectStore.java
+++ 
b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/ObjectStore.java
@@ -2163,7 +2163,7 @@ public class ObjectStore implements RawStore, 
Configurable {
 
   StringBuilder filterBuilder = new StringBuilder();
   List parameterVals = new ArrayList<>();
-  appendSimpleCondition(filterBuilder, "database.name", new String[] {db}, 
parameterVals);
+  appendPatternCondition(filterBuilder, "database.name", new String[] 
{db}, parameterVals);
   appendSimpleCondition(filterBuilder, "database.catalogName", new 
String[] {catName}, parameterVals);
   if(tbl_names != null){
 appendSimpleCondition(filterBuilder, "tableName", 
lowered_tbl_names.toArray(new String[0]), parameterVals);
diff --git 
a/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/client/TestTablesGetExists.java
 
b/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/client/TestTablesGetExists.java
index 273054c6dab..f2937bc8576 100644
--- 
a/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/client/TestTablesGetExists.java
+++ 
b/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/client/TestTablesGetExists.java
@@ -271,6 +271,27 @@ public class TestTablesGetExists extends 
MetaStoreClientTest {
 Assert.assertEquals("Found functions size", 1, tables.size());
 Assert.assertTrue("Comparing tablenames", 
tables.contains(testTables[6].getTableName()));
 
+// Find tables by using the wildcard sign "*"
+tables = client.getTables(DEFAULT_DATABASE, "*");
+Assert.assertEquals("All tables size", 5, tables.size());
+Assert.assertTrue("Comparing tablenames", 
tables.contains(testTables[0].getTableName()));
+Assert.assertTrue("Comparing tablenames", 
tables.contains(testTables[1].getTableName()));
+Assert.assertTrue("Comparing tablenames", 
tables.contains(testTables[2].getTableName()));
+Assert.assertTrue("Comparing tablenames", 
tables.contains(testTables[3].getTableName()));
+Assert.assertTrue("Comparing tablenames", 
tables.contains(testTables[4].getTableName()));
+
+tables = client.getTables(OTHER_DATABASE, "*");
+Assert.assertEquals("All tables size", 2, tables.size());
+Assert.assertTrue("Comparing tablenames", 
tables.contains(testTables[5].getTableName()));
+Assert.assertTrue("Comparing tablenames", 
tables.contains(testTables[6].getTableName()));
+
+tables = client.getTables("*", "*");
+Assert.assertEquals("All tables size", 7, tables.size());
+tables = client.getTables("d*", "*");
+Assert.assertEquals("All tables size", 7, tables.size());
+tables = client.getTables("def*", "*");
+Assert.assertEquals("All tables size", 5, tables.size());
+
 // Look for tables but do not find any
 tables = client.getTables(DEFAULT_DATABASE, "*_not_such_function_*");
 Assert.assertEquals("No such table size", 0, tables.size());



[hive] branch branch-3 updated: HIVE-27604: Backport of HIVE-21167 to branch-3 (#4583)

2023-10-18 Thread sankarh
This is an automated email from the ASF dual-hosted git repository.

sankarh pushed a commit to branch branch-3
in repository https://gitbox.apache.org/repos/asf/hive.git


The following commit(s) were added to refs/heads/branch-3 by this push:
 new 639b2dba6a6 HIVE-27604: Backport of HIVE-21167 to branch-3 (#4583)
639b2dba6a6 is described below

commit 639b2dba6a61ad8bff04c924830fe733108bb620
Author: Aman Raj <104416558+amanraj2...@users.noreply.github.com>
AuthorDate: Wed Oct 18 17:14:08 2023 +0530

HIVE-27604: Backport of HIVE-21167 to branch-3 (#4583)

* HIVE-21167: Bucketing: Bucketing version 1 is incorrectly partitioning 
data (Deepak Jaiswal, reviewed by Jason Dere and Vineet Garg)

-
Co-authored-by: Deepak Jaiswal 

Signed-off-by: Sankar Hariappan 
Closes (#4583)
---
 .../apache/hadoop/hive/ql/parse/TezCompiler.java   |  47 ++-
 .../queries/clientpositive/murmur_hash_migration.q |  35 +++
 .../llap/murmur_hash_migration.q.out   | 332 +
 3 files changed, 390 insertions(+), 24 deletions(-)

diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/TezCompiler.java 
b/ql/src/java/org/apache/hadoop/hive/ql/parse/TezCompiler.java
index a92d4f643e6..95ef33ffe20 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/parse/TezCompiler.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/TezCompiler.java
@@ -167,9 +167,6 @@ public class TezCompiler extends TaskCompiler {
 runStatsAnnotation(procCtx);
 perfLogger.PerfLogEnd(this.getClass().getName(), PerfLogger.TEZ_COMPILER, 
"Setup stats in the operator plan");
 
-// Update bucketing version of ReduceSinkOp if needed
-updateBucketingVersionForUpgrade(procCtx);
-
 perfLogger.PerfLogBegin(this.getClass().getName(), 
PerfLogger.TEZ_COMPILER);
 // run the optimizations that use stats for optimization
 runStatsDependentOptimizations(procCtx, inputs, outputs);
@@ -201,6 +198,15 @@ public class TezCompiler extends TaskCompiler {
   new 
ConstantPropagate(ConstantPropagateOption.SHORTCUT).transform(procCtx.parseContext);
 }
 
+// ATTENTION : DO NOT, I REPEAT, DO NOT WRITE ANYTHING AFTER 
updateBucketingVersionForUpgrade()
+// ANYTHING WHICH NEEDS TO BE ADDED MUST BE ADDED ABOVE
+// This call updates the bucketing version of final ReduceSinkOp based on
+// the bucketing version of FileSinkOp. This operation must happen at the
+// end to ensure there is no further rewrite of plan which may end up
+// removing/updating the ReduceSinkOp as was the case with 
SortedDynPartitionOptimizer
+// Update bucketing version of ReduceSinkOp if needed
+updateBucketingVersionForUpgrade(procCtx);
+
   }
 
   private void runCycleAnalysisForPartitionPruning(OptimizeTezProcContext 
procCtx,
@@ -1654,30 +1660,23 @@ public class TezCompiler extends TaskCompiler {
 
 
 for (FileSinkOperator fsOp : fsOpsAll) {
-  Operator parentOfFS = fsOp.getParentOperators().get(0);
-  if (parentOfFS instanceof GroupByOperator) {
-GroupByOperator gbyOp = (GroupByOperator) parentOfFS;
-List aggs = gbyOp.getConf().getAggregatorStrings();
-boolean compute_stats = false;
-for (String agg : aggs) {
-  if (agg.equalsIgnoreCase("compute_stats")) {
-compute_stats = true;
-break;
-  }
-}
-if (compute_stats) {
+  if (!fsOp.getConf().getTableInfo().isSetBucketingVersion()) {
+continue;
+  }
+  // Look for direct parent ReduceSinkOp
+  // If there are more than 1 parent, bail out.
+  Operator parent = fsOp;
+  List> parentOps = parent.getParentOperators();
+  while (parentOps != null && parentOps.size() == 1) {
+parent = parentOps.get(0);
+if (!(parent instanceof ReduceSinkOperator)) {
+  parentOps = parent.getParentOperators();
   continue;
 }
-  }
 
-  // Not compute_stats
-  Set rsOps = 
OperatorUtils.findOperatorsUpstream(parentOfFS, ReduceSinkOperator.class);
-  if (rsOps.isEmpty()) {
-continue;
-  }
-  // Skip setting if the bucketing version is not set in FileSinkOp.
-  if (fsOp.getConf().getTableInfo().isSetBucketingVersion()) {
-
rsOps.iterator().next().setBucketingVersion(fsOp.getConf().getTableInfo().getBucketingVersion());
+// Found the target RSOp
+
parent.setBucketingVersion(fsOp.getConf().getTableInfo().getBucketingVersion());
+break;
   }
 }
   }
diff --git a/ql/src/test/queries/clientpositive/murmur_hash_migration.q 
b/ql/src/test/queries/clientpositive/murmur_hash_migration.q
index 2b8da9f6836..7acea46b62b 100644
--- a/ql/src/test/queries/clientpositive/murmur_hash_migration.q
+++ b/ql/src/test/queries/clientpositive/murmur_hash_migration.q
@@ -59,3 +59,38 @@ select t1.key, t1.value, t2.key, t2.value from 
srcbucket_mapjoin_n18 t1, srcbuck
 explain
 select t1.key, t1.value, t2.key, t2.value from