This is an automated email from the ASF dual-hosted git repository.

wenchen pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/spark.git


The following commit(s) were added to refs/heads/master by this push:
     new 7fd654c0142 [SPARK-41323][SQL] Support current_schema
7fd654c0142 is described below

commit 7fd654c0142ab9e4002882da4e65d3b25bebd26c
Author: Serge Rielau <se...@rielau.com>
AuthorDate: Wed Nov 30 14:56:23 2022 +0800

    [SPARK-41323][SQL] Support current_schema
    
    ### What changes were proposed in this pull request?
    Support current_schema(0 as a synonym for current_database()
    
    ### Why are the changes needed?
    current_schema is used in the SQL Standard and many other products such as 
Snowflake, Db2, PostgreSQL, Redshift, ...
    It also disambiguates from database which is also used with catalog in some 
products.
    
    ### Does this PR introduce _any_ user-facing change?
    It is a new feature.
    
    ### How was this patch tested?
    Add a test verifying that current_schema() and current_database() return 
the same result()
    
    Closes #38840 from srielau/SPARK-41323-CURRENT_SCHEMA.
    
    Authored-by: Serge Rielau <se...@rielau.com>
    Signed-off-by: Wenchen Fan <wenc...@databricks.com>
---
 .../org/apache/spark/sql/catalyst/analysis/FunctionRegistry.scala   | 1 +
 sql/core/src/test/resources/sql-functions/sql-expression-schema.md  | 5 +++--
 .../test/resources/sql-tests/inputs/current_database_catalog.sql    | 4 ++--
 .../resources/sql-tests/results/current_database_catalog.sql.out    | 6 +++---
 .../scala/org/apache/spark/sql/hive/execution/HiveQuerySuite.scala  | 2 ++
 5 files changed, 11 insertions(+), 7 deletions(-)

diff --git 
a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/analysis/FunctionRegistry.scala
 
b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/analysis/FunctionRegistry.scala
index a1cecc4b6e0..3817f00d09d 100644
--- 
a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/analysis/FunctionRegistry.scala
+++ 
b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/analysis/FunctionRegistry.scala
@@ -726,6 +726,7 @@ object FunctionRegistry {
     expression[InputFileBlockLength]("input_file_block_length"),
     expression[MonotonicallyIncreasingID]("monotonically_increasing_id"),
     expression[CurrentDatabase]("current_database"),
+    expression[CurrentDatabase]("current_schema", true),
     expression[CurrentCatalog]("current_catalog"),
     expression[CurrentUser]("current_user"),
     expression[CurrentUser]("user", setAlias = true),
diff --git a/sql/core/src/test/resources/sql-functions/sql-expression-schema.md 
b/sql/core/src/test/resources/sql-functions/sql-expression-schema.md
index 8d47878de15..8c2134a6142 100644
--- a/sql/core/src/test/resources/sql-functions/sql-expression-schema.md
+++ b/sql/core/src/test/resources/sql-functions/sql-expression-schema.md
@@ -89,6 +89,7 @@
 | org.apache.spark.sql.catalyst.expressions.CurDateExpressionBuilder | curdate 
| SELECT curdate() | struct<current_date():date> |
 | org.apache.spark.sql.catalyst.expressions.CurrentCatalog | current_catalog | 
SELECT current_catalog() | struct<current_catalog():string> |
 | org.apache.spark.sql.catalyst.expressions.CurrentDatabase | current_database 
| SELECT current_database() | struct<current_database():string> |
+| org.apache.spark.sql.catalyst.expressions.CurrentDatabase | current_schema | 
SELECT current_schema() | struct<current_database():string> |
 | org.apache.spark.sql.catalyst.expressions.CurrentDate | current_date | 
SELECT current_date() | struct<current_date():date> |
 | org.apache.spark.sql.catalyst.expressions.CurrentTimeZone | current_timezone 
| SELECT current_timezone() | struct<current_timezone():string> |
 | org.apache.spark.sql.catalyst.expressions.CurrentTimestamp | 
current_timestamp | SELECT current_timestamp() | 
struct<current_timestamp():timestamp> |
@@ -400,7 +401,7 @@
 | org.apache.spark.sql.catalyst.expressions.aggregate.StddevSamp | stddev | 
SELECT stddev(col) FROM VALUES (1), (2), (3) AS tab(col) | 
struct<stddev(col):double> |
 | org.apache.spark.sql.catalyst.expressions.aggregate.StddevSamp | stddev_samp 
| SELECT stddev_samp(col) FROM VALUES (1), (2), (3) AS tab(col) | 
struct<stddev_samp(col):double> |
 | org.apache.spark.sql.catalyst.expressions.aggregate.Sum | sum | SELECT 
sum(col) FROM VALUES (5), (10), (15) AS tab(col) | struct<sum(col):bigint> |
-| org.apache.spark.sql.catalyst.expressions.aggregate.TryAverage | try_avg | 
SELECT try_avg(col) FROM VALUES (1), (2), (3) AS tab(col) | 
struct<try_avg(col):double> |
+| 
org.apache.spark.sql.catalyst.expressions.aggregate.TryAverageExpressionBuilder 
| try_avg | SELECT try_avg(col) FROM VALUES (1), (2), (3) AS tab(col) | 
struct<try_avg(col):double> |
 | org.apache.spark.sql.catalyst.expressions.aggregate.TrySumExpressionBuilder 
| try_sum | SELECT try_sum(col) FROM VALUES (5), (10), (15) AS tab(col) | 
struct<try_sum(col):bigint> |
 | org.apache.spark.sql.catalyst.expressions.aggregate.VariancePop | var_pop | 
SELECT var_pop(col) FROM VALUES (1), (2), (3) AS tab(col) | 
struct<var_pop(col):double> |
 | org.apache.spark.sql.catalyst.expressions.aggregate.VarianceSamp | var_samp 
| SELECT var_samp(col) FROM VALUES (1), (2), (3) AS tab(col) | 
struct<var_samp(col):double> |
@@ -413,4 +414,4 @@
 | org.apache.spark.sql.catalyst.expressions.xml.XPathList | xpath | SELECT 
xpath('<a><b>b1</b><b>b2</b><b>b3</b><c>c1</c><c>c2</c></a>','a/b/text()') | 
struct<xpath(<a><b>b1</b><b>b2</b><b>b3</b><c>c1</c><c>c2</c></a>, 
a/b/text()):array<string>> |
 | org.apache.spark.sql.catalyst.expressions.xml.XPathLong | xpath_long | 
SELECT xpath_long('<a><b>1</b><b>2</b></a>', 'sum(a/b)') | 
struct<xpath_long(<a><b>1</b><b>2</b></a>, sum(a/b)):bigint> |
 | org.apache.spark.sql.catalyst.expressions.xml.XPathShort | xpath_short | 
SELECT xpath_short('<a><b>1</b><b>2</b></a>', 'sum(a/b)') | 
struct<xpath_short(<a><b>1</b><b>2</b></a>, sum(a/b)):smallint> |
-| org.apache.spark.sql.catalyst.expressions.xml.XPathString | xpath_string | 
SELECT xpath_string('<a><b>b</b><c>cc</c></a>','a/c') | 
struct<xpath_string(<a><b>b</b><c>cc</c></a>, a/c):string> |
+| org.apache.spark.sql.catalyst.expressions.xml.XPathString | xpath_string | 
SELECT xpath_string('<a><b>b</b><c>cc</c></a>','a/c') | 
struct<xpath_string(<a><b>b</b><c>cc</c></a>, a/c):string> |
\ No newline at end of file
diff --git 
a/sql/core/src/test/resources/sql-tests/inputs/current_database_catalog.sql 
b/sql/core/src/test/resources/sql-tests/inputs/current_database_catalog.sql
index 4406f1bc2e6..d7aed6afaa1 100644
--- a/sql/core/src/test/resources/sql-tests/inputs/current_database_catalog.sql
+++ b/sql/core/src/test/resources/sql-tests/inputs/current_database_catalog.sql
@@ -1,2 +1,2 @@
--- get current_datebase and current_catalog
-select current_database(), current_catalog();
+-- get current_database/current_schema and current_catalog
+select current_database(), current_schema(), current_catalog();
diff --git 
a/sql/core/src/test/resources/sql-tests/results/current_database_catalog.sql.out
 
b/sql/core/src/test/resources/sql-tests/results/current_database_catalog.sql.out
index 9b1ca765978..379bf01e645 100644
--- 
a/sql/core/src/test/resources/sql-tests/results/current_database_catalog.sql.out
+++ 
b/sql/core/src/test/resources/sql-tests/results/current_database_catalog.sql.out
@@ -1,7 +1,7 @@
 -- Automatically generated by SQLQueryTestSuite
 -- !query
-select current_database(), current_catalog()
+select current_database(), current_schema(), current_catalog()
 -- !query schema
-struct<current_database():string,current_catalog():string>
+struct<current_database():string,current_database():string,current_catalog():string>
 -- !query output
-default        spark_catalog
+default        default spark_catalog
diff --git 
a/sql/hive/src/test/scala/org/apache/spark/sql/hive/execution/HiveQuerySuite.scala
 
b/sql/hive/src/test/scala/org/apache/spark/sql/hive/execution/HiveQuerySuite.scala
index 01c8d6ffe1b..e0ecc51a5d5 100644
--- 
a/sql/hive/src/test/scala/org/apache/spark/sql/hive/execution/HiveQuerySuite.scala
+++ 
b/sql/hive/src/test/scala/org/apache/spark/sql/hive/execution/HiveQuerySuite.scala
@@ -1408,6 +1408,8 @@ class HiveQuerySuite extends HiveComparisonTest with 
SQLTestUtils with BeforeAnd
     sql("USE hive_test_db")
     assert("hive_test_db" == sql("select 
current_database()").first().getString(0))
 
+    assert("hive_test_db" == sql("select 
current_schema()").first().getString(0))
+
     checkError(
       exception = intercept[AnalysisException] {
         sql("USE not_existing_db")


---------------------------------------------------------------------
To unsubscribe, e-mail: commits-unsubscr...@spark.apache.org
For additional commands, e-mail: commits-h...@spark.apache.org

Reply via email to