[hive] branch master updated: HIVE-26160: Materialized View rewrite does not check tables scanned in sub-query expressions (Krisztian Kasa, reviewed by Stamatis Zampetakis)

2022-04-27 Thread krisztiankasa
This is an automated email from the ASF dual-hosted git repository.

krisztiankasa pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/hive.git


The following commit(s) were added to refs/heads/master by this push:
 new 192a9a9aa82 HIVE-26160: Materialized View rewrite does not check 
tables scanned in sub-query expressions (Krisztian Kasa, reviewed by Stamatis 
Zampetakis)
192a9a9aa82 is described below

commit 192a9a9aa822bd724b2fd8070435dcd6e2f2cc03
Author: Krisztian Kasa 
AuthorDate: Thu Apr 28 06:10:11 2022 +0200

HIVE-26160: Materialized View rewrite does not check tables scanned in 
sub-query expressions (Krisztian Kasa, reviewed by Stamatis Zampetakis)
---
 .../hadoop/hive/ql/parse/CalcitePlanner.java   | 15 ++--
 .../materialized_view_rewrite_by_text_9.q  | 22 ++
 .../llap/materialized_view_rewrite_by_text_9.q.out | 92 ++
 3 files changed, 124 insertions(+), 5 deletions(-)

diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/CalcitePlanner.java 
b/ql/src/java/org/apache/hadoop/hive/ql/parse/CalcitePlanner.java
index cb3fddb60b7..e522fc9cea7 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/parse/CalcitePlanner.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/CalcitePlanner.java
@@ -1680,8 +1680,6 @@ public class CalcitePlanner extends SemanticAnalyzer {
 LOG.debug("Initial CBO Plan:\n" + RelOptUtil.toString(calcitePlan));
   }
 
-  calcitePlan = applyMaterializedViewRewritingByText(ast, calcitePlan, 
optCluster);
-
   // Create executor
   RexExecutor executorProvider = new HiveRexExecutorImpl();
   calcitePlan.getCluster().getPlanner().setExecutor(executorProvider);
@@ -1691,6 +1689,9 @@ public class CalcitePlanner extends SemanticAnalyzer {
   
RelMetadataQuery.THREAD_PROVIDERS.set(JaninoRelMetadataProvider.of(mdProvider.getMetadataProvider()));
   optCluster.invalidateMetadataQuery();
 
+  calcitePlan = applyMaterializedViewRewritingByText(
+  ast, calcitePlan, optCluster, mdProvider.getMetadataProvider());
+
   // We need to get the ColumnAccessInfo and viewToTableSchema for views.
   HiveRelFieldTrimmer.get()
   .trim(HiveRelFactories.HIVE_BUILDER.create(optCluster, null),
@@ -2108,7 +2109,10 @@ public class CalcitePlanner extends SemanticAnalyzer {
 }
 
 private RelNode applyMaterializedViewRewritingByText(
-ASTNode queryToRewriteAST, RelNode originalPlan, RelOptCluster 
optCluster) {
+ASTNode queryToRewriteAST,
+RelNode originalPlan,
+RelOptCluster optCluster,
+RelMetadataProvider metadataProvider) {
   if (!isMaterializedViewRewritingByTextEnabled()) {
 return originalPlan;
   }
@@ -2122,8 +2126,9 @@ public class CalcitePlanner extends SemanticAnalyzer {
 queryToRewriteAST.getTokenStopIndex());
 
 ASTNode expandedAST = ParseUtils.parse(expandedQueryText, new 
Context(conf));
-Set tablesUsedByOriginalPlan = getTablesUsed(originalPlan);
-RelNode mvScan = getMaterializedViewByAST(expandedAST, optCluster, 
ANY, db, tablesUsedByOriginalPlan, getTxnMgr());
+Set tablesUsedByOriginalPlan = 
getTablesUsed(removeSubqueries(originalPlan, metadataProvider));
+RelNode mvScan = getMaterializedViewByAST(
+expandedAST, optCluster, ANY, db, tablesUsedByOriginalPlan, 
getTxnMgr());
 if (mvScan != null) {
   return mvScan;
 }
diff --git 
a/ql/src/test/queries/clientpositive/materialized_view_rewrite_by_text_9.q 
b/ql/src/test/queries/clientpositive/materialized_view_rewrite_by_text_9.q
new file mode 100644
index 000..ca4ba36b0c2
--- /dev/null
+++ b/ql/src/test/queries/clientpositive/materialized_view_rewrite_by_text_9.q
@@ -0,0 +1,22 @@
+set hive.support.concurrency=true;
+set hive.txn.manager=org.apache.hadoop.hive.ql.lockmgr.DbTxnManager;
+set hive.materializedview.rewriting=false;
+
+create table t1(col0 int) STORED AS ORC
+  TBLPROPERTIES ('transactional'='true');
+
+create table t2(col0 int) STORED AS ORC
+  TBLPROPERTIES ('transactional'='true');
+
+create materialized view mat1 as
+select col0 from t1 where col0 = 1 union select col0 from t1 where col0 = 2;
+
+-- View can be used -> rewrite
+explain cbo
+select col0 from t2 where col0 in (select col0 from t1 where col0 = 1 union 
select col0 from t1 where col0 = 2);
+
+insert into t1(col0) values (2);
+
+-- View can not be used since it is outdated
+explain cbo
+select col0 from t2 where col0 in (select col0 from t1 where col0 = 1 union 
select col0 from t1 where col0 = 2);
diff --git 
a/ql/src/test/results/clientpositive/llap/materialized_view_rewrite_by_text_9.q.out
 
b/ql/src/test/results/clientpositive/llap/materialized_view_rewrite_by_text_9.q.out
new file mode 100644
index 000..52ce097a039
--- /dev/null
+++ 
b/ql/src/test/results/clientpositive/llap/mat

[hive] branch master updated: HIVE-26180: Change MySQLConnectorProvider driver from mariadb to mysql (#3250)

2022-04-27 Thread ngangam
This is an automated email from the ASF dual-hosted git repository.

ngangam pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/hive.git


The following commit(s) were added to refs/heads/master by this push:
 new e1e565aca8f HIVE-26180: Change MySQLConnectorProvider driver from 
mariadb to mysql (#3250)
e1e565aca8f is described below

commit e1e565aca8f920b09870111993f4d58d3d421f1f
Author: Butao Zhang <9760681+zhangbu...@users.noreply.github.com>
AuthorDate: Thu Apr 28 12:05:32 2022 +0800

HIVE-26180: Change MySQLConnectorProvider driver from mariadb to mysql 
(#3250)
---
 .../hive/metastore/dataconnector/jdbc/MySQLConnectorProvider.java   | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git 
a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/dataconnector/jdbc/MySQLConnectorProvider.java
 
b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/dataconnector/jdbc/MySQLConnectorProvider.java
index 87430c29862..faf54a57466 100644
--- 
a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/dataconnector/jdbc/MySQLConnectorProvider.java
+++ 
b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/dataconnector/jdbc/MySQLConnectorProvider.java
@@ -31,7 +31,7 @@ import java.sql.Statement;
 public class MySQLConnectorProvider extends AbstractJDBCConnectorProvider {
   private static Logger LOG = 
LoggerFactory.getLogger(MySQLConnectorProvider.class);
 
-  private static final String DRIVER_CLASS = "org.mariadb.jdbc.Driver";
+  private static final String DRIVER_CLASS = "com.mysql.jdbc.Driver";
 
   public MySQLConnectorProvider(String dbName, DataConnector dataConn) {
 super(dbName, dataConn, DRIVER_CLASS);



[hive] branch master updated: HIVE-25091: Implemented connector provider for MSSQL and Oracle (#3167) (Saihemanth Gantasale via Naveen Gangam)

2022-04-27 Thread ngangam
This is an automated email from the ASF dual-hosted git repository.

ngangam pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/hive.git


The following commit(s) were added to refs/heads/master by this push:
 new 21dd8471f46 HIVE-25091: Implemented connector provider for MSSQL and 
Oracle (#3167) (Saihemanth Gantasale via Naveen Gangam)
21dd8471f46 is described below

commit 21dd8471f4675ec1206b6043119c861ce3b38823
Author: Sai Hemanth Gantasala 
<68923650+saihemanth-cloud...@users.noreply.github.com>
AuthorDate: Wed Apr 27 20:43:43 2022 -0700

HIVE-25091: Implemented connector provider for MSSQL and Oracle (#3167) 
(Saihemanth Gantasale via Naveen Gangam)
---
 .../JDBCConnectorProviderFactory.java  | 10 +++
 .../jdbc/AbstractJDBCConnectorProvider.java| 12 ++-
 .../jdbc/DerbySQLConnectorProvider.java|  8 ++
 .../dataconnector/jdbc/MSSQLConnectorProvider.java | 96 +
 .../dataconnector/jdbc/MySQLConnectorProvider.java |  8 ++
 .../jdbc/OracleConnectorProvider.java  | 99 ++
 .../jdbc/PostgreSQLConnectorProvider.java  |  8 ++
 7 files changed, 238 insertions(+), 3 deletions(-)

diff --git 
a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/dataconnector/JDBCConnectorProviderFactory.java
 
b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/dataconnector/JDBCConnectorProviderFactory.java
index e59f1a6687d..60589e51844 100644
--- 
a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/dataconnector/JDBCConnectorProviderFactory.java
+++ 
b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/dataconnector/JDBCConnectorProviderFactory.java
@@ -22,6 +22,8 @@ import org.apache.hadoop.hive.metastore.api.DataConnector;
 import 
org.apache.hadoop.hive.metastore.dataconnector.jdbc.DerbySQLConnectorProvider;
 import 
org.apache.hadoop.hive.metastore.dataconnector.jdbc.MySQLConnectorProvider;
 import 
org.apache.hadoop.hive.metastore.dataconnector.jdbc.PostgreSQLConnectorProvider;
+import 
org.apache.hadoop.hive.metastore.dataconnector.jdbc.OracleConnectorProvider;
+import 
org.apache.hadoop.hive.metastore.dataconnector.jdbc.MSSQLConnectorProvider;
 
 import static 
org.apache.hadoop.hive.metastore.dataconnector.IDataConnectorProvider.*;
 
@@ -41,6 +43,14 @@ public class JDBCConnectorProviderFactory {
   provider = new DerbySQLConnectorProvider(dbName, connector);
   break;
 
+case ORACLE_TYPE:
+  provider = new OracleConnectorProvider(dbName, connector);
+  break;
+
+case MSSQL_TYPE:
+  provider = new MSSQLConnectorProvider(dbName, connector);
+  break;
+
 default:
   throw new RuntimeException("Unsupported JDBC type");
 }
diff --git 
a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/dataconnector/jdbc/AbstractJDBCConnectorProvider.java
 
b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/dataconnector/jdbc/AbstractJDBCConnectorProvider.java
index d60cea50c37..09d0784b929 100644
--- 
a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/dataconnector/jdbc/AbstractJDBCConnectorProvider.java
+++ 
b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/dataconnector/jdbc/AbstractJDBCConnectorProvider.java
@@ -207,6 +207,10 @@ public abstract class AbstractJDBCConnectorProvider 
extends AbstractDataConnecto
 
   protected abstract ResultSet fetchTableNames() throws MetaException;
 
+  protected abstract String getCatalogName();
+
+  protected abstract String getDatabaseName();
+
   /**
* Fetch a single table with the given name, returns a Hive Table object 
from the remote database
* @return Table A Table object for the matching table, null otherwise.
@@ -220,6 +224,7 @@ public abstract class AbstractJDBCConnectorProvider extends 
AbstractDataConnecto
   rs = fetchColumnsViaDBMetaData(tableName);
   List cols = new ArrayList<>();
   while (rs.next()) {
+String typename = rs.getString("TYPE_NAME");
 FieldSchema fs = new FieldSchema();
 fs.setName(rs.getString("COLUMN_NAME"));
 fs.setType(getDataType(rs.getString("TYPE_NAME"), 
rs.getInt("COLUMN_SIZE")));
@@ -236,7 +241,7 @@ public abstract class AbstractJDBCConnectorProvider extends 
AbstractDataConnecto
   //Setting the table properties.
   table.getParameters().put(JDBC_DATABASE_TYPE, this.type);
   table.getParameters().put(JDBC_DRIVER, this.driverClassName);
-  table.getParameters().put(JDBC_TABLE, tableName);
+  table.getParameters().put(JDBC_TABLE, scoped_db+"."+tableName);
   table.getParameters().put(JDBC_URL, this.jdbcUrl);
   table.getParameters().put(hive_metastoreConstants.META_TABLE_STORAGE, 
JDBC_HIVE_STORAGE_HANDLER_ID);
   table.getParame

[hive] branch master updated: HIVE-26131: Incorrect OutputFormat when describing jdbc connector table (#3200)

2022-04-27 Thread ngangam
This is an automated email from the ASF dual-hosted git repository.

ngangam pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/hive.git


The following commit(s) were added to refs/heads/master by this push:
 new 62b7ef9c2d1 HIVE-26131: Incorrect OutputFormat when describing jdbc 
connector table (#3200)
62b7ef9c2d1 is described below

commit 62b7ef9c2d16018b764cb7f98bb95778d5f2a17b
Author: Butao Zhang <9760681+zhangbu...@users.noreply.github.com>
AuthorDate: Thu Apr 28 11:32:32 2022 +0800

HIVE-26131: Incorrect OutputFormat when describing jdbc connector table 
(#3200)
---
 .../metastore/dataconnector/jdbc/AbstractJDBCConnectorProvider.java | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git 
a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/dataconnector/jdbc/AbstractJDBCConnectorProvider.java
 
b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/dataconnector/jdbc/AbstractJDBCConnectorProvider.java
index 89cb32925f6..d60cea50c37 100644
--- 
a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/dataconnector/jdbc/AbstractJDBCConnectorProvider.java
+++ 
b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/dataconnector/jdbc/AbstractJDBCConnectorProvider.java
@@ -354,7 +354,7 @@ public abstract class AbstractJDBCConnectorProvider extends 
AbstractDataConnecto
   }
 
   @Override protected String getOutputClass() {
-return JDBC_INPUTFORMAT_CLASS;
+return JDBC_OUTPUTFORMAT_CLASS;
   }
   @Override protected String getTableLocation(String tableName) {
 if (warehouse != null) {



[hive] branch master updated: HIVE-26032: Upgrade cron-utils to 9.1.6 (#3099)

2022-04-27 Thread ngangam
This is an automated email from the ASF dual-hosted git repository.

ngangam pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/hive.git


The following commit(s) were added to refs/heads/master by this push:
 new 21d5720da42 HIVE-26032: Upgrade cron-utils to 9.1.6 (#3099)
21d5720da42 is described below

commit 21d5720da421de4cd5d92b211927a9720d323712
Author: Yuming Wang 
AuthorDate: Thu Apr 28 03:28:29 2022 +0800

HIVE-26032: Upgrade cron-utils to 9.1.6 (#3099)
---
 standalone-metastore/pom.xml | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/standalone-metastore/pom.xml b/standalone-metastore/pom.xml
index 48fbe91487c..68f63e3769e 100644
--- a/standalone-metastore/pom.xml
+++ b/standalone-metastore/pom.xml
@@ -99,7 +99,7 @@
 1.3
 4.2.0
 3.5.5
-9.1.3
+9.1.6
 4.0.3
 2.8.4
 1.7.30



[hive] branch master updated: HIVE-26178: Exclude wood-stox jar in the packaging as it conflicts with hadoop's jar in spark class path (#3248) (Saihemanth Gantasala via Naveen Gangam)

2022-04-27 Thread ngangam
This is an automated email from the ASF dual-hosted git repository.

ngangam pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/hive.git


The following commit(s) were added to refs/heads/master by this push:
 new 672949402b4 HIVE-26178: Exclude wood-stox jar in the packaging as it 
conflicts with hadoop's jar in spark class path (#3248) (Saihemanth Gantasala 
via Naveen Gangam)
672949402b4 is described below

commit 672949402b4df9098a207ad1438830d9b09c7ae9
Author: Sai Hemanth Gantasala 
<68923650+saihemanth-cloud...@users.noreply.github.com>
AuthorDate: Wed Apr 27 11:12:19 2022 -0700

HIVE-26178: Exclude wood-stox jar in the packaging as it conflicts with 
hadoop's jar in spark class path (#3248) (Saihemanth Gantasala via Naveen 
Gangam)
---
 pom.xml | 6 ++
 1 file changed, 6 insertions(+)

diff --git a/pom.xml b/pom.xml
index 9af2b33c1c7..048d0d72c0d 100644
--- a/pom.xml
+++ b/pom.xml
@@ -846,6 +846,12 @@
 org.apache.santuario
 xmlsec
 ${xmlsec.version}
+
+  
+com.fasterxml.woodstox
+woodstox-core
+  
+
   
   
 com.tdunning



[hive] branch master updated: HIVE-25758: OOM due to recursive application of CBO rules (Alessandro Solimando, reviewed by Stamatis Zampetakis)

2022-04-27 Thread zabetak
This is an automated email from the ASF dual-hosted git repository.

zabetak pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/hive.git


The following commit(s) were added to refs/heads/master by this push:
 new 7583142cbff HIVE-25758: OOM due to recursive application of CBO rules 
(Alessandro Solimando, reviewed by Stamatis Zampetakis)
7583142cbff is described below

commit 7583142cbffcb3958a546a9aaa15700bbc243df9
Author: Alessandro Solimando 
AuthorDate: Mon Jan 24 13:08:56 2022 +0100

HIVE-25758: OOM due to recursive application of CBO rules (Alessandro 
Solimando, reviewed by Stamatis Zampetakis)

Closes #2966
---
 .../java/org/apache/hadoop/hive/conf/HiveConf.java |  4 ++
 .../hive/ql/optimizer/calcite/HiveCalciteUtil.java | 52 ++
 .../HiveJoinPushTransitivePredicatesRule.java  | 82 +-
 .../hadoop/hive/ql/parse/CalcitePlanner.java   |  7 +-
 .../cbo_join_transitive_pred_loop_1.q  | 17 +
 .../cbo_join_transitive_pred_loop_2.q  | 24 +++
 .../cbo_join_transitive_pred_loop_3.q  | 23 ++
 .../cbo_join_transitive_pred_loop_4.q  | 23 ++
 .../llap/cbo_join_transitive_pred_loop_1.q.out | 75 
 .../llap/cbo_join_transitive_pred_loop_2.q.out | 74 +++
 .../llap/cbo_join_transitive_pred_loop_3.q.out | 67 ++
 .../llap/cbo_join_transitive_pred_loop_4.q.out | 73 +++
 12 files changed, 470 insertions(+), 51 deletions(-)

diff --git a/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java 
b/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java
index 99964fc7732..caf223dd91b 100644
--- a/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java
+++ b/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java
@@ -2530,6 +2530,10 @@ public class HiveConf extends Configuration {
 "If this config is true only pushed down filters remain in the 
operator tree, \n" +
 "and the original filter is removed. If this config is false, the 
original filter \n" +
 "is also left in the operator tree at the original place."),
+
HIVE_JOIN_DISJ_TRANSITIVE_PREDICATES_PUSHDOWN("hive.optimize.join.disjunctive.transitive.predicates.pushdown",
+true, "Whether to transitively infer disjunctive predicates across 
joins. \n"
++ "Disjunctive predicates are hard to simplify and pushing them down 
might lead to infinite rule matching "
++ "causing stackoverflow and OOM errors"),
 HIVEPOINTLOOKUPOPTIMIZER("hive.optimize.point.lookup", true,
  "Whether to transform OR clauses in Filter operators into IN 
clauses"),
 HIVEPOINTLOOKUPOPTIMIZERMIN("hive.optimize.point.lookup.min", 2,
diff --git 
a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/HiveCalciteUtil.java 
b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/HiveCalciteUtil.java
index 160bfb86f6c..264756f0413 100644
--- 
a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/HiveCalciteUtil.java
+++ 
b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/HiveCalciteUtil.java
@@ -1214,6 +1214,58 @@ public class HiveCalciteUtil {
 }
   }
 
+  private static class DisjunctivePredicatesFinder extends 
RexVisitorImpl {
+// accounting for DeMorgan's law
+boolean inNegation = false;
+boolean hasDisjunction = false;
+
+public DisjunctivePredicatesFinder() {
+  super(true);
+}
+
+@Override
+public Void visitCall(RexCall call) {
+  switch (call.getKind()) {
+  case OR:
+if (inNegation) {
+  return super.visitCall(call);
+} else {
+  this.hasDisjunction = true;
+  return null;
+}
+  case AND:
+if (inNegation) {
+  this.hasDisjunction = true;
+  return null;
+} else {
+  return super.visitCall(call);
+}
+  case NOT:
+inNegation = !inNegation;
+return super.visitCall(call);
+  default:
+return super.visitCall(call);
+  }
+}
+  }
+
+  /**
+   * Returns whether the expression has disjunctions (OR) at any level of 
nesting.
+   * 
+   *  Example 1: OR(=($0, $1), IS NOT NULL($2))):INTEGER (OR in the 
top-level expression) 
+   *  Example 2: NOT(AND(=($0, $1), IS NOT NULL($2)) 
+   *   this is equivalent to OR((<>($0, $1), IS NULL($2))
+   *  Example 3: AND(OR(=($0, $1), IS NOT NULL($2 (OR in inner 
expression) 
+   * 
+   * @param node the expression where to look for disjunctions.
+   * @return true if the given expressions contains a disjunction, false 
otherwise.
+   */
+  public static boolean hasDisjuction(RexNode node) {
+DisjunctivePredicatesFinder finder = new DisjunctivePredicatesFinder();
+node.accept(finder);
+return finder.hasDisjunction;
+  }
+
   /**
* Checks if any of the expression given as list expressions are from right 
side of the join.
*  This is used during anti