This is an automated email from the ASF dual-hosted git repository.

dongjoon pushed a commit to branch branch-3.3
in repository https://gitbox.apache.org/repos/asf/spark.git


The following commit(s) were added to refs/heads/branch-3.3 by this push:
     new d034ce5  [SPARK-38709][SQL] Remove trailing $ from function class name 
in sql-expression-schema.md
d034ce5 is described below

commit d034ce5b90f44efc701ec8cef4a0cb42cfc18f66
Author: Wenchen Fan <wenc...@databricks.com>
AuthorDate: Thu Mar 31 00:52:07 2022 -0700

    [SPARK-38709][SQL] Remove trailing $ from function class name in 
sql-expression-schema.md
    
    ### What changes were proposed in this pull request?
    
    It's a bit weird to see class names like `CeilExpressionBuilder$` in 
`sql-expression-schema.md`. This PR removes the trailing `$`.
    
    ### Why are the changes needed?
    
    code cleanup
    
    ### Does this PR introduce _any_ user-facing change?
    
    No
    
    ### How was this patch tested?
    
    existing tests
    
    Closes #36021 from cloud-fan/minor.
    
    Authored-by: Wenchen Fan <wenc...@databricks.com>
    Signed-off-by: Dongjoon Hyun <dongj...@apache.org>
    (cherry picked from commit 794420fcddcacbb655ea88c4015d0a309b410bda)
    Signed-off-by: Dongjoon Hyun <dongj...@apache.org>
---
 .../sql/catalyst/analysis/FunctionRegistry.scala   |  7 +++---
 .../sql-functions/sql-expression-schema.md         | 26 +++++++++++-----------
 2 files changed, 17 insertions(+), 16 deletions(-)

diff --git 
a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/analysis/FunctionRegistry.scala
 
b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/analysis/FunctionRegistry.scala
index 3737f2ab..bb4aa70 100644
--- 
a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/analysis/FunctionRegistry.scala
+++ 
b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/analysis/FunctionRegistry.scala
@@ -170,7 +170,7 @@ object FunctionRegistryBase {
     if (df != null) {
       if (df.extended().isEmpty) {
         new ExpressionInfo(
-          clazz.getCanonicalName,
+          clazz.getCanonicalName.stripSuffix("$"),
           null,
           name,
           df.usage(),
@@ -184,10 +184,11 @@ object FunctionRegistryBase {
       } else {
         // This exists for the backward compatibility with old 
`ExpressionDescription`s defining
         // the extended description in `extended()`.
-        new ExpressionInfo(clazz.getCanonicalName, null, name, df.usage(), 
df.extended())
+        new ExpressionInfo(
+          clazz.getCanonicalName.stripSuffix("$"), null, name, df.usage(), 
df.extended())
       }
     } else {
-      new ExpressionInfo(clazz.getCanonicalName, name)
+      new ExpressionInfo(clazz.getCanonicalName.stripSuffix("$"), name)
     }
   }
 }
diff --git a/sql/core/src/test/resources/sql-functions/sql-expression-schema.md 
b/sql/core/src/test/resources/sql-functions/sql-expression-schema.md
index 177a78d..644bfa9 100644
--- a/sql/core/src/test/resources/sql-functions/sql-expression-schema.md
+++ b/sql/core/src/test/resources/sql-functions/sql-expression-schema.md
@@ -69,14 +69,14 @@
 | org.apache.spark.sql.catalyst.expressions.Cast | timestamp | N/A | N/A |
 | org.apache.spark.sql.catalyst.expressions.Cast | tinyint | N/A | N/A |
 | org.apache.spark.sql.catalyst.expressions.Cbrt | cbrt | SELECT cbrt(27.0) | 
struct<CBRT(27.0):double> |
-| org.apache.spark.sql.catalyst.expressions.CeilExpressionBuilder$ | ceil | 
SELECT ceil(-0.1) | struct<CEIL(-0.1):decimal(1,0)> |
-| org.apache.spark.sql.catalyst.expressions.CeilExpressionBuilder$ | ceiling | 
SELECT ceiling(-0.1) | struct<ceiling(-0.1):decimal(1,0)> |
+| org.apache.spark.sql.catalyst.expressions.CeilExpressionBuilder | ceil | 
SELECT ceil(-0.1) | struct<CEIL(-0.1):decimal(1,0)> |
+| org.apache.spark.sql.catalyst.expressions.CeilExpressionBuilder | ceiling | 
SELECT ceiling(-0.1) | struct<ceiling(-0.1):decimal(1,0)> |
 | org.apache.spark.sql.catalyst.expressions.Chr | char | SELECT char(65) | 
struct<char(65):string> |
 | org.apache.spark.sql.catalyst.expressions.Chr | chr | SELECT chr(65) | 
struct<chr(65):string> |
 | org.apache.spark.sql.catalyst.expressions.Coalesce | coalesce | SELECT 
coalesce(NULL, 1, NULL) | struct<coalesce(NULL, 1, NULL):int> |
 | org.apache.spark.sql.catalyst.expressions.Concat | concat | SELECT 
concat('Spark', 'SQL') | struct<concat(Spark, SQL):string> |
 | org.apache.spark.sql.catalyst.expressions.ConcatWs | concat_ws | SELECT 
concat_ws(' ', 'Spark', 'SQL') | struct<concat_ws( , Spark, SQL):string> |
-| org.apache.spark.sql.catalyst.expressions.ContainsExpressionBuilder$ | 
contains | SELECT contains('Spark SQL', 'Spark') | struct<contains(Spark SQL, 
Spark):boolean> |
+| org.apache.spark.sql.catalyst.expressions.ContainsExpressionBuilder | 
contains | SELECT contains('Spark SQL', 'Spark') | struct<contains(Spark SQL, 
Spark):boolean> |
 | org.apache.spark.sql.catalyst.expressions.Conv | conv | SELECT conv('100', 
2, 10) | struct<conv(100, 2, 10):string> |
 | org.apache.spark.sql.catalyst.expressions.ConvertTimezone | convert_timezone 
| SELECT convert_timezone('Europe/Amsterdam', 'America/Los_Angeles', 
timestamp_ntz'2021-12-06 00:00:00') | struct<convert_timezone(Europe/Amsterdam, 
America/Los_Angeles, TIMESTAMP_NTZ '2021-12-06 00:00:00'):timestamp_ntz> |
 | org.apache.spark.sql.catalyst.expressions.Cos | cos | SELECT cos(0) | 
struct<COS(0):double> |
@@ -100,7 +100,7 @@
 | org.apache.spark.sql.catalyst.expressions.DateDiff | datediff | SELECT 
datediff('2009-07-31', '2009-07-30') | struct<datediff(2009-07-31, 
2009-07-30):int> |
 | org.apache.spark.sql.catalyst.expressions.DateFormatClass | date_format | 
SELECT date_format('2016-04-08', 'y') | struct<date_format(2016-04-08, 
y):string> |
 | org.apache.spark.sql.catalyst.expressions.DateFromUnixDate | 
date_from_unix_date | SELECT date_from_unix_date(1) | 
struct<date_from_unix_date(1):date> |
-| org.apache.spark.sql.catalyst.expressions.DatePartExpressionBuilder$ | 
date_part | SELECT date_part('YEAR', TIMESTAMP '2019-08-12 01:00:00.123456') | 
struct<date_part(YEAR, TIMESTAMP '2019-08-12 01:00:00.123456'):int> |
+| org.apache.spark.sql.catalyst.expressions.DatePartExpressionBuilder | 
date_part | SELECT date_part('YEAR', TIMESTAMP '2019-08-12 01:00:00.123456') | 
struct<date_part(YEAR, TIMESTAMP '2019-08-12 01:00:00.123456'):int> |
 | org.apache.spark.sql.catalyst.expressions.DateSub | date_sub | SELECT 
date_sub('2016-07-30', 1) | struct<date_sub(2016-07-30, 1):date> |
 | org.apache.spark.sql.catalyst.expressions.DayOfMonth | day | SELECT 
day('2009-07-30') | struct<day(2009-07-30):int> |
 | org.apache.spark.sql.catalyst.expressions.DayOfMonth | dayofmonth | SELECT 
dayofmonth('2009-07-30') | struct<dayofmonth(2009-07-30):int> |
@@ -112,7 +112,7 @@
 | org.apache.spark.sql.catalyst.expressions.ElementAt | element_at | SELECT 
element_at(array(1, 2, 3), 2) | struct<element_at(array(1, 2, 3), 2):int> |
 | org.apache.spark.sql.catalyst.expressions.Elt | elt | SELECT elt(1, 'scala', 
'java') | struct<elt(1, scala, java):string> |
 | org.apache.spark.sql.catalyst.expressions.Encode | encode | SELECT 
encode('abc', 'utf-8') | struct<encode(abc, utf-8):binary> |
-| org.apache.spark.sql.catalyst.expressions.EndsWithExpressionBuilder$ | 
endswith | SELECT endswith('Spark SQL', 'SQL') | struct<endswith(Spark SQL, 
SQL):boolean> |
+| org.apache.spark.sql.catalyst.expressions.EndsWithExpressionBuilder | 
endswith | SELECT endswith('Spark SQL', 'SQL') | struct<endswith(Spark SQL, 
SQL):boolean> |
 | org.apache.spark.sql.catalyst.expressions.EqualNullSafe | <=> | SELECT 2 <=> 
2 | struct<(2 <=> 2):boolean> |
 | org.apache.spark.sql.catalyst.expressions.EqualTo | = | SELECT 2 = 2 | 
struct<(2 = 2):boolean> |
 | org.apache.spark.sql.catalyst.expressions.EqualTo | == | SELECT 2 == 2 | 
struct<(2 = 2):boolean> |
@@ -125,7 +125,7 @@
 | org.apache.spark.sql.catalyst.expressions.Factorial | factorial | SELECT 
factorial(5) | struct<factorial(5):bigint> |
 | org.apache.spark.sql.catalyst.expressions.FindInSet | find_in_set | SELECT 
find_in_set('ab','abc,b,ab,c,def') | struct<find_in_set(ab, 
abc,b,ab,c,def):int> |
 | org.apache.spark.sql.catalyst.expressions.Flatten | flatten | SELECT 
flatten(array(array(1, 2), array(3, 4))) | struct<flatten(array(array(1, 2), 
array(3, 4))):array<int>> |
-| org.apache.spark.sql.catalyst.expressions.FloorExpressionBuilder$ | floor | 
SELECT floor(-0.1) | struct<FLOOR(-0.1):decimal(1,0)> |
+| org.apache.spark.sql.catalyst.expressions.FloorExpressionBuilder | floor | 
SELECT floor(-0.1) | struct<FLOOR(-0.1):decimal(1,0)> |
 | org.apache.spark.sql.catalyst.expressions.FormatNumber | format_number | 
SELECT format_number(12332.123456, 4) | struct<format_number(12332.123456, 
4):string> |
 | org.apache.spark.sql.catalyst.expressions.FormatString | format_string | 
SELECT format_string("Hello World %d %s", 100, "days") | 
struct<format_string(Hello World %d %s, 100, days):string> |
 | org.apache.spark.sql.catalyst.expressions.FormatString | printf | SELECT 
printf("Hello World %d %s", 100, "days") | struct<printf(Hello World %d %s, 
100, days):string> |
@@ -156,7 +156,7 @@
 | org.apache.spark.sql.catalyst.expressions.JsonObjectKeys | json_object_keys 
| SELECT json_object_keys('{}') | struct<json_object_keys({}):array<string>> |
 | org.apache.spark.sql.catalyst.expressions.JsonToStructs | from_json | SELECT 
from_json('{"a":1, "b":0.8}', 'a INT, b DOUBLE') | struct<from_json({"a":1, 
"b":0.8}):struct<a:int,b:double>> |
 | org.apache.spark.sql.catalyst.expressions.JsonTuple | json_tuple | SELECT 
json_tuple('{"a":1, "b":2}', 'a', 'b') | struct<c0:string,c1:string> |
-| org.apache.spark.sql.catalyst.expressions.LPadExpressionBuilder$ | lpad | 
SELECT lpad('hi', 5, '??') | struct<lpad(hi, 5, ??):string> |
+| org.apache.spark.sql.catalyst.expressions.LPadExpressionBuilder | lpad | 
SELECT lpad('hi', 5, '??') | struct<lpad(hi, 5, ??):string> |
 | org.apache.spark.sql.catalyst.expressions.Lag | lag | SELECT a, b, lag(b) 
OVER (PARTITION BY a ORDER BY b) FROM VALUES ('A1', 2), ('A1', 1), ('A2', 3), 
('A1', 1) tab(a, b) | struct<a:string,b:int,lag(b, 1, NULL) OVER (PARTITION BY 
a ORDER BY b ASC NULLS FIRST ROWS BETWEEN -1 FOLLOWING AND -1 FOLLOWING):int> |
 | org.apache.spark.sql.catalyst.expressions.LastDay | last_day | SELECT 
last_day('2009-01-12') | struct<last_day(2009-01-12):date> |
 | org.apache.spark.sql.catalyst.expressions.Lead | lead | SELECT a, b, lead(b) 
OVER (PARTITION BY a ORDER BY b) FROM VALUES ('A1', 2), ('A1', 1), ('A2', 3), 
('A1', 1) tab(a, b) | struct<a:string,b:int,lead(b, 1, NULL) OVER (PARTITION BY 
a ORDER BY b ASC NULLS FIRST ROWS BETWEEN 1 FOLLOWING AND 1 FOLLOWING):int> |
@@ -182,8 +182,8 @@
 | org.apache.spark.sql.catalyst.expressions.MakeDate | make_date | SELECT 
make_date(2013, 7, 15) | struct<make_date(2013, 7, 15):date> |
 | org.apache.spark.sql.catalyst.expressions.MakeInterval | make_interval | 
SELECT make_interval(100, 11, 1, 1, 12, 30, 01.001001) | 
struct<make_interval(100, 11, 1, 1, 12, 30, 1.001001):interval> |
 | org.apache.spark.sql.catalyst.expressions.MakeTimestamp | make_timestamp | 
SELECT make_timestamp(2014, 12, 28, 6, 30, 45.887) | 
struct<make_timestamp(2014, 12, 28, 6, 30, 45.887):timestamp> |
-| org.apache.spark.sql.catalyst.expressions.MakeTimestampLTZExpressionBuilder$ 
| make_timestamp_ltz | SELECT make_timestamp_ltz(2014, 12, 28, 6, 30, 45.887) | 
struct<make_timestamp_ltz(2014, 12, 28, 6, 30, 45.887):timestamp> |
-| org.apache.spark.sql.catalyst.expressions.MakeTimestampNTZExpressionBuilder$ 
| make_timestamp_ntz | SELECT make_timestamp_ntz(2014, 12, 28, 6, 30, 45.887) | 
struct<make_timestamp_ntz(2014, 12, 28, 6, 30, 45.887):timestamp_ntz> |
+| org.apache.spark.sql.catalyst.expressions.MakeTimestampLTZExpressionBuilder 
| make_timestamp_ltz | SELECT make_timestamp_ltz(2014, 12, 28, 6, 30, 45.887) | 
struct<make_timestamp_ltz(2014, 12, 28, 6, 30, 45.887):timestamp> |
+| org.apache.spark.sql.catalyst.expressions.MakeTimestampNTZExpressionBuilder 
| make_timestamp_ntz | SELECT make_timestamp_ntz(2014, 12, 28, 6, 30, 45.887) | 
struct<make_timestamp_ntz(2014, 12, 28, 6, 30, 45.887):timestamp_ntz> |
 | org.apache.spark.sql.catalyst.expressions.MakeYMInterval | make_ym_interval 
| SELECT make_ym_interval(1, 2) | struct<make_ym_interval(1, 2):interval year 
to month> |
 | org.apache.spark.sql.catalyst.expressions.MapConcat | map_concat | SELECT 
map_concat(map(1, 'a', 2, 'b'), map(3, 'c')) | struct<map_concat(map(1, a, 2, 
b), map(3, c)):map<int,string>> |
 | org.apache.spark.sql.catalyst.expressions.MapContainsKey | map_contains_key 
| SELECT map_contains_key(map(1, 'a', 2, 'b'), 1) | 
struct<map_contains_key(map(1, a, 2, b), 1):boolean> |
@@ -219,8 +219,8 @@
 | org.apache.spark.sql.catalyst.expressions.Overlay | overlay | SELECT 
overlay('Spark SQL' PLACING '_' FROM 6) | struct<overlay(Spark SQL, _, 6, 
-1):string> |
 | org.apache.spark.sql.catalyst.expressions.ParseToDate | to_date | SELECT 
to_date('2009-07-30 04:17:52') | struct<to_date(2009-07-30 04:17:52):date> |
 | org.apache.spark.sql.catalyst.expressions.ParseToTimestamp | to_timestamp | 
SELECT to_timestamp('2016-12-31 00:12:00') | struct<to_timestamp(2016-12-31 
00:12:00):timestamp> |
-| 
org.apache.spark.sql.catalyst.expressions.ParseToTimestampLTZExpressionBuilder$ 
| to_timestamp_ltz | SELECT to_timestamp_ltz('2016-12-31 00:12:00') | 
struct<to_timestamp_ltz(2016-12-31 00:12:00):timestamp> |
-| 
org.apache.spark.sql.catalyst.expressions.ParseToTimestampNTZExpressionBuilder$ 
| to_timestamp_ntz | SELECT to_timestamp_ntz('2016-12-31 00:12:00') | 
struct<to_timestamp_ntz(2016-12-31 00:12:00):timestamp_ntz> |
+| 
org.apache.spark.sql.catalyst.expressions.ParseToTimestampLTZExpressionBuilder 
| to_timestamp_ltz | SELECT to_timestamp_ltz('2016-12-31 00:12:00') | 
struct<to_timestamp_ltz(2016-12-31 00:12:00):timestamp> |
+| 
org.apache.spark.sql.catalyst.expressions.ParseToTimestampNTZExpressionBuilder 
| to_timestamp_ntz | SELECT to_timestamp_ntz('2016-12-31 00:12:00') | 
struct<to_timestamp_ntz(2016-12-31 00:12:00):timestamp_ntz> |
 | org.apache.spark.sql.catalyst.expressions.ParseUrl | parse_url | SELECT 
parse_url('http://spark.apache.org/path?query=1', 'HOST') | 
struct<parse_url(http://spark.apache.org/path?query=1, HOST):string> |
 | org.apache.spark.sql.catalyst.expressions.PercentRank | percent_rank | 
SELECT a, b, percent_rank(b) OVER (PARTITION BY a ORDER BY b) FROM VALUES 
('A1', 2), ('A1', 1), ('A2', 3), ('A1', 1) tab(a, b) | 
struct<a:string,b:int,PERCENT_RANK() OVER (PARTITION BY a ORDER BY b ASC NULLS 
FIRST ROWS BETWEEN UNBOUNDED PRECEDING AND CURRENT ROW):double> |
 | org.apache.spark.sql.catalyst.expressions.Pi | pi | SELECT pi() | 
struct<PI():double> |
@@ -233,7 +233,7 @@
 | org.apache.spark.sql.catalyst.expressions.RLike | regexp | SELECT 
regexp('%SystemDrive%\Users\John', '%SystemDrive%\\Users.*') | 
struct<REGEXP(%SystemDrive%UsersJohn, %SystemDrive%\Users.*):boolean> |
 | org.apache.spark.sql.catalyst.expressions.RLike | regexp_like | SELECT 
regexp_like('%SystemDrive%\Users\John', '%SystemDrive%\\Users.*') | 
struct<REGEXP_LIKE(%SystemDrive%UsersJohn, %SystemDrive%\Users.*):boolean> |
 | org.apache.spark.sql.catalyst.expressions.RLike | rlike | SELECT 
rlike('%SystemDrive%\Users\John', '%SystemDrive%\\Users.*') | 
struct<RLIKE(%SystemDrive%UsersJohn, %SystemDrive%\Users.*):boolean> |
-| org.apache.spark.sql.catalyst.expressions.RPadExpressionBuilder$ | rpad | 
SELECT rpad('hi', 5, '??') | struct<rpad(hi, 5, ??):string> |
+| org.apache.spark.sql.catalyst.expressions.RPadExpressionBuilder | rpad | 
SELECT rpad('hi', 5, '??') | struct<rpad(hi, 5, ??):string> |
 | org.apache.spark.sql.catalyst.expressions.RaiseError | raise_error | SELECT 
raise_error('custom error message') | struct<raise_error(custom error 
message):void> |
 | org.apache.spark.sql.catalyst.expressions.Rand | rand | SELECT rand() | 
struct<rand():double> |
 | org.apache.spark.sql.catalyst.expressions.Rand | random | SELECT random() | 
struct<rand():double> |
@@ -278,7 +278,7 @@
 | org.apache.spark.sql.catalyst.expressions.SplitPart | split_part | SELECT 
split_part('11.12.13', '.', 3) | struct<split_part(11.12.13, ., 3):string> |
 | org.apache.spark.sql.catalyst.expressions.Sqrt | sqrt | SELECT sqrt(4) | 
struct<SQRT(4):double> |
 | org.apache.spark.sql.catalyst.expressions.Stack | stack | SELECT stack(2, 1, 
2, 3) | struct<col0:int,col1:int> |
-| org.apache.spark.sql.catalyst.expressions.StartsWithExpressionBuilder$ | 
startswith | SELECT startswith('Spark SQL', 'Spark') | struct<startswith(Spark 
SQL, Spark):boolean> |
+| org.apache.spark.sql.catalyst.expressions.StartsWithExpressionBuilder | 
startswith | SELECT startswith('Spark SQL', 'Spark') | struct<startswith(Spark 
SQL, Spark):boolean> |
 | org.apache.spark.sql.catalyst.expressions.StringInstr | instr | SELECT 
instr('SparkSQL', 'SQL') | struct<instr(SparkSQL, SQL):int> |
 | org.apache.spark.sql.catalyst.expressions.StringLocate | locate | SELECT 
locate('bar', 'foobarbar') | struct<locate(bar, foobarbar, 1):int> |
 | org.apache.spark.sql.catalyst.expressions.StringLocate | position | SELECT 
position('bar', 'foobarbar') | struct<position(bar, foobarbar, 1):int> |

---------------------------------------------------------------------
To unsubscribe, e-mail: commits-unsubscr...@spark.apache.org
For additional commands, e-mail: commits-h...@spark.apache.org

Reply via email to