This is an automated email from the ASF dual-hosted git repository.

maxgekk pushed a commit to branch branch-3.3
in repository https://gitbox.apache.org/repos/asf/spark.git


The following commit(s) were added to refs/heads/branch-3.3 by this push:
     new 2069fd03fd3 [SPARK-39677][SQL][DOCS] Fix args formatting of the regexp 
and like functions
2069fd03fd3 is described below

commit 2069fd03fd30faaabd1d73ca0416a76ab5908937
Author: Max Gekk <max.g...@gmail.com>
AuthorDate: Tue Jul 5 13:37:41 2022 +0300

    [SPARK-39677][SQL][DOCS] Fix args formatting of the regexp and like 
functions
    
    ### What changes were proposed in this pull request?
    In the PR, I propose to fix args formatting of some regexp functions by 
adding explicit new lines. That fixes the following items in arg lists.
    
    Before:
    
    <img width="745" alt="Screenshot 2022-07-05 at 09 48 28" 
src="https://user-images.githubusercontent.com/1580697/177274234-04209d43-a542-4c71-b5ca-6f3239208015.png";>
    
    After:
    
    <img width="704" alt="Screenshot 2022-07-05 at 11 06 13" 
src="https://user-images.githubusercontent.com/1580697/177280718-cb05184c-8559-4461-b94d-dfaaafda7dd2.png";>
    
    ### Why are the changes needed?
    To improve readability of Spark SQL docs.
    
    ### Does this PR introduce _any_ user-facing change?
    No.
    
    ### How was this patch tested?
    By building docs and checking manually:
    ```
    $ SKIP_SCALADOC=1 SKIP_PYTHONDOC=1 SKIP_RDOC=1 bundle exec jekyll build
    ```
    
    Closes #37082 from MaxGekk/fix-regexp-docs.
    
    Authored-by: Max Gekk <max.g...@gmail.com>
    Signed-off-by: Max Gekk <max.g...@gmail.com>
    (cherry picked from commit 4e42f8b12e8dc57a15998f22d508a19cf3c856aa)
    Signed-off-by: Max Gekk <max.g...@gmail.com>
---
 .../catalyst/expressions/regexpExpressions.scala   | 46 ++++++++--------------
 1 file changed, 16 insertions(+), 30 deletions(-)

diff --git 
a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/expressions/regexpExpressions.scala
 
b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/expressions/regexpExpressions.scala
index 01763f082d6..e3eea6f46e2 100644
--- 
a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/expressions/regexpExpressions.scala
+++ 
b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/expressions/regexpExpressions.scala
@@ -84,16 +84,12 @@ abstract class StringRegexExpression extends 
BinaryExpression
     Arguments:
       * str - a string expression
       * pattern - a string expression. The pattern is a string which is 
matched literally, with
-          exception to the following special symbols:
-
-          _ matches any one character in the input (similar to . in posix 
regular expressions)
-
+          exception to the following special symbols:<br><br>
+          _ matches any one character in the input (similar to . in posix 
regular expressions)\
           % matches zero or more characters in the input (similar to .* in 
posix regular
-          expressions)
-
+          expressions)<br><br>
           Since Spark 2.0, string literals are unescaped in our SQL parser. 
For example, in order
-          to match "\abc", the pattern should be "\\abc".
-
+          to match "\abc", the pattern should be "\\abc".<br><br>
           When SQL config 'spark.sql.parser.escapedStringLiterals' is enabled, 
it falls back
           to Spark 1.6 behavior regarding string literal parsing. For example, 
if the config is
           enabled, the pattern to match "\abc" should be "\abc".
@@ -189,7 +185,7 @@ case class Like(left: Expression, right: Expression, 
escapeChar: Char)
     copy(left = newLeft, right = newRight)
 }
 
-// scalastyle:off line.contains.tab
+// scalastyle:off line.contains.tab line.size.limit
 /**
  * Simple RegEx case-insensitive pattern matching function
  */
@@ -200,16 +196,12 @@ case class Like(left: Expression, right: Expression, 
escapeChar: Char)
     Arguments:
       * str - a string expression
       * pattern - a string expression. The pattern is a string which is 
matched literally and
-          case-insensitively, with exception to the following special symbols:
-
-          _ matches any one character in the input (similar to . in posix 
regular expressions)
-
+          case-insensitively, with exception to the following special 
symbols:<br><br>
+          _ matches any one character in the input (similar to . in posix 
regular expressions)<br><br>
           % matches zero or more characters in the input (similar to .* in 
posix regular
-          expressions)
-
+          expressions)<br><br>
           Since Spark 2.0, string literals are unescaped in our SQL parser. 
For example, in order
-          to match "\abc", the pattern should be "\\abc".
-
+          to match "\abc", the pattern should be "\\abc".<br><br>
           When SQL config 'spark.sql.parser.escapedStringLiterals' is enabled, 
it falls back
           to Spark 1.6 behavior regarding string literal parsing. For example, 
if the config is
           enabled, the pattern to match "\abc" should be "\abc".
@@ -237,7 +229,7 @@ case class Like(left: Expression, right: Expression, 
escapeChar: Char)
   """,
   since = "3.3.0",
   group = "predicate_funcs")
-// scalastyle:on line.contains.tab
+// scalastyle:on line.contains.tab line.size.limit
 case class ILike(
     left: Expression,
     right: Expression,
@@ -574,12 +566,10 @@ case class StringSplit(str: Expression, regex: 
Expression, limit: Expression)
     Arguments:
       * str - a string expression to search for a regular expression pattern 
match.
       * regexp - a string representing a regular expression. The regex string 
should be a
-          Java regular expression.
-
+          Java regular expression.<br><br>
           Since Spark 2.0, string literals (including regex patterns) are 
unescaped in our SQL
           parser. For example, to match "\abc", a regular expression for 
`regexp` can be
-          "^\\abc$".
-
+          "^\\abc$".<br><br>
           There is a SQL config 'spark.sql.parser.escapedStringLiterals' that 
can be used to
           fallback to the Spark 1.6 behavior regarding string literal parsing. 
For example,
           if the config is enabled, the `regexp` that can match "\abc" is 
"^\abc$".
@@ -783,12 +773,10 @@ abstract class RegExpExtractBase
     Arguments:
       * str - a string expression.
       * regexp - a string representing a regular expression. The regex string 
should be a
-          Java regular expression.
-
+          Java regular expression.<br><br>
           Since Spark 2.0, string literals (including regex patterns) are 
unescaped in our SQL
           parser. For example, to match "\abc", a regular expression for 
`regexp` can be
-          "^\\abc$".
-
+          "^\\abc$".<br><br>
           There is a SQL config 'spark.sql.parser.escapedStringLiterals' that 
can be used to
           fallback to the Spark 1.6 behavior regarding string literal parsing. 
For example,
           if the config is enabled, the `regexp` that can match "\abc" is 
"^\abc$".
@@ -888,12 +876,10 @@ case class RegExpExtract(subject: Expression, regexp: 
Expression, idx: Expressio
     Arguments:
       * str - a string expression.
       * regexp - a string representing a regular expression. The regex string 
should be a
-          Java regular expression.
-
+          Java regular expression.<br><br>
           Since Spark 2.0, string literals (including regex patterns) are 
unescaped in our SQL
           parser. For example, to match "\abc", a regular expression for 
`regexp` can be
-          "^\\abc$".
-
+          "^\\abc$".<br><br>
           There is a SQL config 'spark.sql.parser.escapedStringLiterals' that 
can be used to
           fallback to the Spark 1.6 behavior regarding string literal parsing. 
For example,
           if the config is enabled, the `regexp` that can match "\abc" is 
"^\abc$".


---------------------------------------------------------------------
To unsubscribe, e-mail: commits-unsubscr...@spark.apache.org
For additional commands, e-mail: commits-h...@spark.apache.org

Reply via email to