This is an automated email from the ASF dual-hosted git repository.

gengliang pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/spark.git


The following commit(s) were added to refs/heads/master by this push:
     new 4f567f49cb1 [SPARK-39272][SQL] Increase the start position of query 
context by 1
4f567f49cb1 is described below

commit 4f567f49cb19007b9adcce850f5f309c02375ac3
Author: Gengliang Wang <gengli...@apache.org>
AuthorDate: Wed May 25 17:32:45 2022 +0800

    [SPARK-39272][SQL] Increase the start position of query context by 1
    
    ### What changes were proposed in this pull request?
    
    Increase the start position of query context by 1
    
    ### Why are the changes needed?
    
      Currently, the line number starts from 1, while the start position starts 
from 0.
    Thus it's better to increase the start position by 1 for consistency.
    
    ### Does this PR introduce _any_ user-facing change?
    
    No
    
    ### How was this patch tested?
    
    UT
    
    Closes #36651 from gengliangwang/increase1.
    
    Authored-by: Gengliang Wang <gengli...@apache.org>
    Signed-off-by: Gengliang Wang <gengli...@apache.org>
---
 .../apache/spark/sql/catalyst/trees/TreeNode.scala |  4 +-
 .../spark/sql/catalyst/trees/TreeNodeSuite.scala   |  2 +-
 .../resources/sql-tests/results/ansi/cast.sql.out  | 70 +++++++++++-----------
 .../resources/sql-tests/results/ansi/date.sql.out  |  6 +-
 .../results/ansi/datetime-parsing-invalid.sql.out  |  4 +-
 .../ansi/decimalArithmeticOperations.sql.out       | 20 +++----
 .../sql-tests/results/ansi/interval.sql.out        | 34 +++++------
 .../resources/sql-tests/results/ansi/map.sql.out   |  8 +--
 .../results/ansi/string-functions.sql.out          |  8 +--
 .../resources/sql-tests/results/interval.sql.out   | 12 ++--
 .../sql-tests/results/postgreSQL/boolean.sql.out   | 32 +++++-----
 .../sql-tests/results/postgreSQL/float4.sql.out    |  8 +--
 .../sql-tests/results/postgreSQL/float8.sql.out    |  8 +--
 .../sql-tests/results/postgreSQL/int4.sql.out      | 12 ++--
 .../sql-tests/results/postgreSQL/int8.sql.out      | 14 ++---
 .../results/postgreSQL/select_having.sql.out       |  2 +-
 .../sql-tests/results/postgreSQL/text.sql.out      |  4 +-
 .../results/postgreSQL/window_part2.sql.out        |  2 +-
 .../results/postgreSQL/window_part3.sql.out        |  2 +-
 .../results/postgreSQL/window_part4.sql.out        |  2 +-
 .../udf/postgreSQL/udf-select_having.sql.out       |  2 +-
 .../sql/errors/QueryExecutionAnsiErrorsSuite.scala |  8 +--
 22 files changed, 133 insertions(+), 131 deletions(-)

diff --git 
a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/trees/TreeNode.scala
 
b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/trees/TreeNode.scala
index 54c64515ee4..fcbebf3ac7a 100644
--- 
a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/trees/TreeNode.scala
+++ 
b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/trees/TreeNode.scala
@@ -79,7 +79,9 @@ case class Origin(
       ""
     } else {
       val positionContext = if (line.isDefined && startPosition.isDefined) {
-        s"(line ${line.get}, position ${startPosition.get})"
+        // Note that the line number starts from 1, while the start position 
starts from 0.
+        // Here we increase the start position by 1 for consistency.
+        s"(line ${line.get}, position ${startPosition.get + 1})"
       } else {
         ""
       }
diff --git 
a/sql/catalyst/src/test/scala/org/apache/spark/sql/catalyst/trees/TreeNodeSuite.scala
 
b/sql/catalyst/src/test/scala/org/apache/spark/sql/catalyst/trees/TreeNodeSuite.scala
index 899a740bdae..1e1206c0e1e 100644
--- 
a/sql/catalyst/src/test/scala/org/apache/spark/sql/catalyst/trees/TreeNodeSuite.scala
+++ 
b/sql/catalyst/src/test/scala/org/apache/spark/sql/catalyst/trees/TreeNodeSuite.scala
@@ -876,7 +876,7 @@ class TreeNodeSuite extends SparkFunSuite with SQLHelper {
       objectType = Some("VIEW"),
       objectName = Some("some_view"))
     val expected =
-      """== SQL of VIEW some_view(line 3, position 38) ==
+      """== SQL of VIEW some_view(line 3, position 39) ==
         |...7890 + 1234567890 + 1234567890, cast('a'
         |                                   ^^^^^^^^
         |as /* comment */
diff --git a/sql/core/src/test/resources/sql-tests/results/ansi/cast.sql.out 
b/sql/core/src/test/resources/sql-tests/results/ansi/cast.sql.out
index 891cd34b7c5..45024dcffa7 100644
--- a/sql/core/src/test/resources/sql-tests/results/ansi/cast.sql.out
+++ b/sql/core/src/test/resources/sql-tests/results/ansi/cast.sql.out
@@ -9,7 +9,7 @@ struct<>
 -- !query output
 org.apache.spark.SparkNumberFormatException
 [CAST_INVALID_INPUT] The value '1.23' of the type "STRING" cannot be cast to 
"INT" because it is malformed. Correct the value as per the syntax, or change 
its target type. To return NULL instead, use `try_cast`. If necessary set 
"spark.sql.ansi.enabled" to "false" to bypass this error.
-== SQL(line 1, position 7) ==
+== SQL(line 1, position 8) ==
 SELECT CAST('1.23' AS int)
        ^^^^^^^^^^^^^^^^^^^
 
@@ -21,7 +21,7 @@ struct<>
 -- !query output
 org.apache.spark.SparkNumberFormatException
 [CAST_INVALID_INPUT] The value '1.23' of the type "STRING" cannot be cast to 
"BIGINT" because it is malformed. Correct the value as per the syntax, or 
change its target type. To return NULL instead, use `try_cast`. If necessary 
set "spark.sql.ansi.enabled" to "false" to bypass this error.
-== SQL(line 1, position 7) ==
+== SQL(line 1, position 8) ==
 SELECT CAST('1.23' AS long)
        ^^^^^^^^^^^^^^^^^^^^
 
@@ -33,7 +33,7 @@ struct<>
 -- !query output
 org.apache.spark.SparkNumberFormatException
 [CAST_INVALID_INPUT] The value '-4.56' of the type "STRING" cannot be cast to 
"INT" because it is malformed. Correct the value as per the syntax, or change 
its target type. To return NULL instead, use `try_cast`. If necessary set 
"spark.sql.ansi.enabled" to "false" to bypass this error.
-== SQL(line 1, position 7) ==
+== SQL(line 1, position 8) ==
 SELECT CAST('-4.56' AS int)
        ^^^^^^^^^^^^^^^^^^^^
 
@@ -45,7 +45,7 @@ struct<>
 -- !query output
 org.apache.spark.SparkNumberFormatException
 [CAST_INVALID_INPUT] The value '-4.56' of the type "STRING" cannot be cast to 
"BIGINT" because it is malformed. Correct the value as per the syntax, or 
change its target type. To return NULL instead, use `try_cast`. If necessary 
set "spark.sql.ansi.enabled" to "false" to bypass this error.
-== SQL(line 1, position 7) ==
+== SQL(line 1, position 8) ==
 SELECT CAST('-4.56' AS long)
        ^^^^^^^^^^^^^^^^^^^^^
 
@@ -57,7 +57,7 @@ struct<>
 -- !query output
 org.apache.spark.SparkNumberFormatException
 [CAST_INVALID_INPUT] The value 'abc' of the type "STRING" cannot be cast to 
"INT" because it is malformed. Correct the value as per the syntax, or change 
its target type. To return NULL instead, use `try_cast`. If necessary set 
"spark.sql.ansi.enabled" to "false" to bypass this error.
-== SQL(line 1, position 7) ==
+== SQL(line 1, position 8) ==
 SELECT CAST('abc' AS int)
        ^^^^^^^^^^^^^^^^^^
 
@@ -69,7 +69,7 @@ struct<>
 -- !query output
 org.apache.spark.SparkNumberFormatException
 [CAST_INVALID_INPUT] The value 'abc' of the type "STRING" cannot be cast to 
"BIGINT" because it is malformed. Correct the value as per the syntax, or 
change its target type. To return NULL instead, use `try_cast`. If necessary 
set "spark.sql.ansi.enabled" to "false" to bypass this error.
-== SQL(line 1, position 7) ==
+== SQL(line 1, position 8) ==
 SELECT CAST('abc' AS long)
        ^^^^^^^^^^^^^^^^^^^
 
@@ -81,7 +81,7 @@ struct<>
 -- !query output
 org.apache.spark.SparkNumberFormatException
 [CAST_INVALID_INPUT] The value 'abc' of the type "STRING" cannot be cast to 
"FLOAT" because it is malformed. Correct the value as per the syntax, or change 
its target type. To return NULL instead, use `try_cast`. If necessary set 
"spark.sql.ansi.enabled" to "false" to bypass this error.
-== SQL(line 1, position 7) ==
+== SQL(line 1, position 8) ==
 SELECT CAST('abc' AS float)
        ^^^^^^^^^^^^^^^^^^^^
 
@@ -93,7 +93,7 @@ struct<>
 -- !query output
 org.apache.spark.SparkNumberFormatException
 [CAST_INVALID_INPUT] The value 'abc' of the type "STRING" cannot be cast to 
"DOUBLE" because it is malformed. Correct the value as per the syntax, or 
change its target type. To return NULL instead, use `try_cast`. If necessary 
set "spark.sql.ansi.enabled" to "false" to bypass this error.
-== SQL(line 1, position 7) ==
+== SQL(line 1, position 8) ==
 SELECT CAST('abc' AS double)
        ^^^^^^^^^^^^^^^^^^^^^
 
@@ -105,7 +105,7 @@ struct<>
 -- !query output
 org.apache.spark.SparkNumberFormatException
 [CAST_INVALID_INPUT] The value '1234567890123' of the type "STRING" cannot be 
cast to "INT" because it is malformed. Correct the value as per the syntax, or 
change its target type. To return NULL instead, use `try_cast`. If necessary 
set "spark.sql.ansi.enabled" to "false" to bypass this error.
-== SQL(line 1, position 7) ==
+== SQL(line 1, position 8) ==
 SELECT CAST('1234567890123' AS int)
        ^^^^^^^^^^^^^^^^^^^^^^^^^^^^
 
@@ -117,7 +117,7 @@ struct<>
 -- !query output
 org.apache.spark.SparkNumberFormatException
 [CAST_INVALID_INPUT] The value '12345678901234567890123' of the type "STRING" 
cannot be cast to "BIGINT" because it is malformed. Correct the value as per 
the syntax, or change its target type. To return NULL instead, use `try_cast`. 
If necessary set "spark.sql.ansi.enabled" to "false" to bypass this error.
-== SQL(line 1, position 7) ==
+== SQL(line 1, position 8) ==
 SELECT CAST('12345678901234567890123' AS long)
        ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
 
@@ -129,7 +129,7 @@ struct<>
 -- !query output
 org.apache.spark.SparkNumberFormatException
 [CAST_INVALID_INPUT] The value '' of the type "STRING" cannot be cast to "INT" 
because it is malformed. Correct the value as per the syntax, or change its 
target type. To return NULL instead, use `try_cast`. If necessary set 
"spark.sql.ansi.enabled" to "false" to bypass this error.
-== SQL(line 1, position 7) ==
+== SQL(line 1, position 8) ==
 SELECT CAST('' AS int)
        ^^^^^^^^^^^^^^^
 
@@ -141,7 +141,7 @@ struct<>
 -- !query output
 org.apache.spark.SparkNumberFormatException
 [CAST_INVALID_INPUT] The value '' of the type "STRING" cannot be cast to 
"BIGINT" because it is malformed. Correct the value as per the syntax, or 
change its target type. To return NULL instead, use `try_cast`. If necessary 
set "spark.sql.ansi.enabled" to "false" to bypass this error.
-== SQL(line 1, position 7) ==
+== SQL(line 1, position 8) ==
 SELECT CAST('' AS long)
        ^^^^^^^^^^^^^^^^
 
@@ -153,7 +153,7 @@ struct<>
 -- !query output
 org.apache.spark.SparkNumberFormatException
 [CAST_INVALID_INPUT] The value '' of the type "STRING" cannot be cast to 
"FLOAT" because it is malformed. Correct the value as per the syntax, or change 
its target type. To return NULL instead, use `try_cast`. If necessary set 
"spark.sql.ansi.enabled" to "false" to bypass this error.
-== SQL(line 1, position 7) ==
+== SQL(line 1, position 8) ==
 SELECT CAST('' AS float)
        ^^^^^^^^^^^^^^^^^
 
@@ -165,7 +165,7 @@ struct<>
 -- !query output
 org.apache.spark.SparkNumberFormatException
 [CAST_INVALID_INPUT] The value '' of the type "STRING" cannot be cast to 
"DOUBLE" because it is malformed. Correct the value as per the syntax, or 
change its target type. To return NULL instead, use `try_cast`. If necessary 
set "spark.sql.ansi.enabled" to "false" to bypass this error.
-== SQL(line 1, position 7) ==
+== SQL(line 1, position 8) ==
 SELECT CAST('' AS double)
        ^^^^^^^^^^^^^^^^^^
 
@@ -193,7 +193,7 @@ struct<>
 -- !query output
 org.apache.spark.SparkNumberFormatException
 [CAST_INVALID_INPUT] The value '123.a' of the type "STRING" cannot be cast to 
"INT" because it is malformed. Correct the value as per the syntax, or change 
its target type. To return NULL instead, use `try_cast`. If necessary set 
"spark.sql.ansi.enabled" to "false" to bypass this error.
-== SQL(line 1, position 7) ==
+== SQL(line 1, position 8) ==
 SELECT CAST('123.a' AS int)
        ^^^^^^^^^^^^^^^^^^^^
 
@@ -205,7 +205,7 @@ struct<>
 -- !query output
 org.apache.spark.SparkNumberFormatException
 [CAST_INVALID_INPUT] The value '123.a' of the type "STRING" cannot be cast to 
"BIGINT" because it is malformed. Correct the value as per the syntax, or 
change its target type. To return NULL instead, use `try_cast`. If necessary 
set "spark.sql.ansi.enabled" to "false" to bypass this error.
-== SQL(line 1, position 7) ==
+== SQL(line 1, position 8) ==
 SELECT CAST('123.a' AS long)
        ^^^^^^^^^^^^^^^^^^^^^
 
@@ -217,7 +217,7 @@ struct<>
 -- !query output
 org.apache.spark.SparkNumberFormatException
 [CAST_INVALID_INPUT] The value '123.a' of the type "STRING" cannot be cast to 
"FLOAT" because it is malformed. Correct the value as per the syntax, or change 
its target type. To return NULL instead, use `try_cast`. If necessary set 
"spark.sql.ansi.enabled" to "false" to bypass this error.
-== SQL(line 1, position 7) ==
+== SQL(line 1, position 8) ==
 SELECT CAST('123.a' AS float)
        ^^^^^^^^^^^^^^^^^^^^^^
 
@@ -229,7 +229,7 @@ struct<>
 -- !query output
 org.apache.spark.SparkNumberFormatException
 [CAST_INVALID_INPUT] The value '123.a' of the type "STRING" cannot be cast to 
"DOUBLE" because it is malformed. Correct the value as per the syntax, or 
change its target type. To return NULL instead, use `try_cast`. If necessary 
set "spark.sql.ansi.enabled" to "false" to bypass this error.
-== SQL(line 1, position 7) ==
+== SQL(line 1, position 8) ==
 SELECT CAST('123.a' AS double)
        ^^^^^^^^^^^^^^^^^^^^^^^
 
@@ -249,7 +249,7 @@ struct<>
 -- !query output
 org.apache.spark.SparkNumberFormatException
 [CAST_INVALID_INPUT] The value '-2147483649' of the type "STRING" cannot be 
cast to "INT" because it is malformed. Correct the value as per the syntax, or 
change its target type. To return NULL instead, use `try_cast`. If necessary 
set "spark.sql.ansi.enabled" to "false" to bypass this error.
-== SQL(line 1, position 7) ==
+== SQL(line 1, position 8) ==
 SELECT CAST('-2147483649' AS int)
        ^^^^^^^^^^^^^^^^^^^^^^^^^^
 
@@ -269,7 +269,7 @@ struct<>
 -- !query output
 org.apache.spark.SparkNumberFormatException
 [CAST_INVALID_INPUT] The value '2147483648' of the type "STRING" cannot be 
cast to "INT" because it is malformed. Correct the value as per the syntax, or 
change its target type. To return NULL instead, use `try_cast`. If necessary 
set "spark.sql.ansi.enabled" to "false" to bypass this error.
-== SQL(line 1, position 7) ==
+== SQL(line 1, position 8) ==
 SELECT CAST('2147483648' AS int)
        ^^^^^^^^^^^^^^^^^^^^^^^^^
 
@@ -289,7 +289,7 @@ struct<>
 -- !query output
 org.apache.spark.SparkNumberFormatException
 [CAST_INVALID_INPUT] The value '-9223372036854775809' of the type "STRING" 
cannot be cast to "BIGINT" because it is malformed. Correct the value as per 
the syntax, or change its target type. To return NULL instead, use `try_cast`. 
If necessary set "spark.sql.ansi.enabled" to "false" to bypass this error.
-== SQL(line 1, position 7) ==
+== SQL(line 1, position 8) ==
 SELECT CAST('-9223372036854775809' AS long)
        ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
 
@@ -309,7 +309,7 @@ struct<>
 -- !query output
 org.apache.spark.SparkNumberFormatException
 [CAST_INVALID_INPUT] The value '9223372036854775808' of the type "STRING" 
cannot be cast to "BIGINT" because it is malformed. Correct the value as per 
the syntax, or change its target type. To return NULL instead, use `try_cast`. 
If necessary set "spark.sql.ansi.enabled" to "false" to bypass this error.
-== SQL(line 1, position 7) ==
+== SQL(line 1, position 8) ==
 SELECT CAST('9223372036854775808' AS long)
        ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
 
@@ -568,7 +568,7 @@ struct<>
 -- !query output
 org.apache.spark.SparkNumberFormatException
 [CAST_INVALID_INPUT] The value '1中文' of the type "STRING" cannot be cast to 
"TINYINT" because it is malformed. Correct the value as per the syntax, or 
change its target type. To return NULL instead, use `try_cast`. If necessary 
set "spark.sql.ansi.enabled" to "false" to bypass this error.
-== SQL(line 1, position 7) ==
+== SQL(line 1, position 8) ==
 select cast('1中文' as tinyint)
        ^^^^^^^^^^^^^^^^^^^^^^
 
@@ -580,7 +580,7 @@ struct<>
 -- !query output
 org.apache.spark.SparkNumberFormatException
 [CAST_INVALID_INPUT] The value '1中文' of the type "STRING" cannot be cast to 
"SMALLINT" because it is malformed. Correct the value as per the syntax, or 
change its target type. To return NULL instead, use `try_cast`. If necessary 
set "spark.sql.ansi.enabled" to "false" to bypass this error.
-== SQL(line 1, position 7) ==
+== SQL(line 1, position 8) ==
 select cast('1中文' as smallint)
        ^^^^^^^^^^^^^^^^^^^^^^^
 
@@ -592,7 +592,7 @@ struct<>
 -- !query output
 org.apache.spark.SparkNumberFormatException
 [CAST_INVALID_INPUT] The value '1中文' of the type "STRING" cannot be cast to 
"INT" because it is malformed. Correct the value as per the syntax, or change 
its target type. To return NULL instead, use `try_cast`. If necessary set 
"spark.sql.ansi.enabled" to "false" to bypass this error.
-== SQL(line 1, position 7) ==
+== SQL(line 1, position 8) ==
 select cast('1中文' as INT)
        ^^^^^^^^^^^^^^^^^^
 
@@ -604,7 +604,7 @@ struct<>
 -- !query output
 org.apache.spark.SparkNumberFormatException
 [CAST_INVALID_INPUT] The value '中文1' of the type "STRING" cannot be cast to 
"BIGINT" because it is malformed. Correct the value as per the syntax, or 
change its target type. To return NULL instead, use `try_cast`. If necessary 
set "spark.sql.ansi.enabled" to "false" to bypass this error.
-== SQL(line 1, position 7) ==
+== SQL(line 1, position 8) ==
 select cast('中文1' as bigint)
        ^^^^^^^^^^^^^^^^^^^^^
 
@@ -616,7 +616,7 @@ struct<>
 -- !query output
 org.apache.spark.SparkNumberFormatException
 [CAST_INVALID_INPUT] The value '1中文' of the type "STRING" cannot be cast to 
"BIGINT" because it is malformed. Correct the value as per the syntax, or 
change its target type. To return NULL instead, use `try_cast`. If necessary 
set "spark.sql.ansi.enabled" to "false" to bypass this error.
-== SQL(line 1, position 7) ==
+== SQL(line 1, position 8) ==
 select cast('1中文' as bigint)
        ^^^^^^^^^^^^^^^^^^^^^
 
@@ -647,7 +647,7 @@ struct<>
 org.apache.spark.SparkRuntimeException
 [CAST_INVALID_INPUT] The value '       
  xyz   
' of the type "STRING" cannot be cast to "BOOLEAN" because it is malformed. 
Correct the value as per the syntax, or change its target type. To return NULL 
instead, use `try_cast`. If necessary set "spark.sql.ansi.enabled" to "false" 
to bypass this error.
-== SQL(line 1, position 7) ==
+== SQL(line 1, position 8) ==
 select cast('\t\n xyz \t\r' as boolean)
        ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
 
@@ -667,7 +667,7 @@ struct<>
 -- !query output
 org.apache.spark.SparkArithmeticException
 [CANNOT_CHANGE_DECIMAL_PRECISION] Decimal(expanded, 123.45, 5, 2) cannot be 
represented as Decimal(4, 2). If necessary set "spark.sql.ansi.enabled" to 
"false" to bypass this error.
-== SQL(line 1, position 7) ==
+== SQL(line 1, position 8) ==
 select cast('123.45' as decimal(4, 2))
        ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
 
@@ -679,7 +679,7 @@ struct<>
 -- !query output
 org.apache.spark.SparkNumberFormatException
 [CAST_INVALID_INPUT] The value 'xyz' of the type "STRING" cannot be cast to 
"DECIMAL(4,2)" because it is malformed. Correct the value as per the syntax, or 
change its target type. To return NULL instead, use `try_cast`. If necessary 
set "spark.sql.ansi.enabled" to "false" to bypass this error.
-== SQL(line 1, position 7) ==
+== SQL(line 1, position 8) ==
 select cast('xyz' as decimal(4, 2))
        ^^^^^^^^^^^^^^^^^^^^^^^^^^^^
 
@@ -699,7 +699,7 @@ struct<>
 -- !query output
 org.apache.spark.SparkDateTimeException
 [CAST_INVALID_INPUT] The value 'a' of the type "STRING" cannot be cast to 
"DATE" because it is malformed. Correct the value as per the syntax, or change 
its target type. To return NULL instead, use `try_cast`. If necessary set 
"spark.sql.ansi.enabled" to "false" to bypass this error.
-== SQL(line 1, position 7) ==
+== SQL(line 1, position 8) ==
 select cast('a' as date)
        ^^^^^^^^^^^^^^^^^
 
@@ -719,7 +719,7 @@ struct<>
 -- !query output
 org.apache.spark.SparkDateTimeException
 [CAST_INVALID_INPUT] The value 'a' of the type "STRING" cannot be cast to 
"TIMESTAMP" because it is malformed. Correct the value as per the syntax, or 
change its target type. To return NULL instead, use `try_cast`. If necessary 
set "spark.sql.ansi.enabled" to "false" to bypass this error.
-== SQL(line 1, position 7) ==
+== SQL(line 1, position 8) ==
 select cast('a' as timestamp)
        ^^^^^^^^^^^^^^^^^^^^^^
 
@@ -739,7 +739,7 @@ struct<>
 -- !query output
 org.apache.spark.SparkDateTimeException
 [CAST_INVALID_INPUT] The value 'a' of the type "STRING" cannot be cast to 
"TIMESTAMP_NTZ" because it is malformed. Correct the value as per the syntax, 
or change its target type. To return NULL instead, use `try_cast`. If necessary 
set "spark.sql.ansi.enabled" to "false" to bypass this error.
-== SQL(line 1, position 7) ==
+== SQL(line 1, position 8) ==
 select cast('a' as timestamp_ntz)
        ^^^^^^^^^^^^^^^^^^^^^^^^^^
 
@@ -751,7 +751,7 @@ struct<>
 -- !query output
 org.apache.spark.SparkDateTimeException
 [CAST_INVALID_INPUT] The value Infinity of the type "DOUBLE" cannot be cast to 
"TIMESTAMP" because it is malformed. Correct the value as per the syntax, or 
change its target type. To return NULL instead, use `try_cast`. If necessary 
set "spark.sql.ansi.enabled" to "false" to bypass this error.
-== SQL(line 1, position 7) ==
+== SQL(line 1, position 8) ==
 select cast(cast('inf' as double) as timestamp)
        ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
 
@@ -763,6 +763,6 @@ struct<>
 -- !query output
 org.apache.spark.SparkDateTimeException
 [CAST_INVALID_INPUT] The value Infinity of the type "DOUBLE" cannot be cast to 
"TIMESTAMP" because it is malformed. Correct the value as per the syntax, or 
change its target type. To return NULL instead, use `try_cast`. If necessary 
set "spark.sql.ansi.enabled" to "false" to bypass this error.
-== SQL(line 1, position 7) ==
+== SQL(line 1, position 8) ==
 select cast(cast('inf' as float) as timestamp)
        ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
diff --git a/sql/core/src/test/resources/sql-tests/results/ansi/date.sql.out 
b/sql/core/src/test/resources/sql-tests/results/ansi/date.sql.out
index dea228b3652..cea75f6dc46 100644
--- a/sql/core/src/test/resources/sql-tests/results/ansi/date.sql.out
+++ b/sql/core/src/test/resources/sql-tests/results/ansi/date.sql.out
@@ -233,7 +233,7 @@ struct<>
 -- !query output
 org.apache.spark.SparkDateTimeException
 [CAST_INVALID_INPUT] The value 'xx' of the type "STRING" cannot be cast to 
"DATE" because it is malformed. Correct the value as per the syntax, or change 
its target type. To return NULL instead, use `try_cast`. If necessary set 
"spark.sql.ansi.enabled" to "false" to bypass this error.
-== SQL(line 1, position 7) ==
+== SQL(line 1, position 8) ==
 select next_day("xx", "Mon")
        ^^^^^^^^^^^^^^^^^^^^^
 
@@ -328,7 +328,7 @@ struct<>
 -- !query output
 org.apache.spark.SparkNumberFormatException
 [CAST_INVALID_INPUT] The value '1.2' of the type "STRING" cannot be cast to 
"INT" because it is malformed. Correct the value as per the syntax, or change 
its target type. To return NULL instead, use `try_cast`. If necessary set 
"spark.sql.ansi.enabled" to "false" to bypass this error.
-== SQL(line 1, position 7) ==
+== SQL(line 1, position 8) ==
 select date_add('2011-11-11', '1.2')
        ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
 
@@ -439,7 +439,7 @@ struct<>
 -- !query output
 org.apache.spark.SparkNumberFormatException
 [CAST_INVALID_INPUT] The value '1.2' of the type "STRING" cannot be cast to 
"INT" because it is malformed. Correct the value as per the syntax, or change 
its target type. To return NULL instead, use `try_cast`. If necessary set 
"spark.sql.ansi.enabled" to "false" to bypass this error.
-== SQL(line 1, position 7) ==
+== SQL(line 1, position 8) ==
 select date_sub(date'2011-11-11', '1.2')
        ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
 
diff --git 
a/sql/core/src/test/resources/sql-tests/results/ansi/datetime-parsing-invalid.sql.out
 
b/sql/core/src/test/resources/sql-tests/results/ansi/datetime-parsing-invalid.sql.out
index 7b9ad7ac4a9..aa6a6835a6b 100644
--- 
a/sql/core/src/test/resources/sql-tests/results/ansi/datetime-parsing-invalid.sql.out
+++ 
b/sql/core/src/test/resources/sql-tests/results/ansi/datetime-parsing-invalid.sql.out
@@ -251,7 +251,7 @@ struct<>
 -- !query output
 org.apache.spark.SparkDateTimeException
 [CAST_INVALID_INPUT] The value 'Unparseable' of the type "STRING" cannot be 
cast to "TIMESTAMP" because it is malformed. Correct the value as per the 
syntax, or change its target type. To return NULL instead, use `try_cast`. If 
necessary set "spark.sql.ansi.enabled" to "false" to bypass this error.
-== SQL(line 1, position 7) ==
+== SQL(line 1, position 8) ==
 select cast("Unparseable" as timestamp)
        ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
 
@@ -263,6 +263,6 @@ struct<>
 -- !query output
 org.apache.spark.SparkDateTimeException
 [CAST_INVALID_INPUT] The value 'Unparseable' of the type "STRING" cannot be 
cast to "DATE" because it is malformed. Correct the value as per the syntax, or 
change its target type. To return NULL instead, use `try_cast`. If necessary 
set "spark.sql.ansi.enabled" to "false" to bypass this error.
-== SQL(line 1, position 7) ==
+== SQL(line 1, position 8) ==
 select cast("Unparseable" as date)
        ^^^^^^^^^^^^^^^^^^^^^^^^^^^
diff --git 
a/sql/core/src/test/resources/sql-tests/results/ansi/decimalArithmeticOperations.sql.out
 
b/sql/core/src/test/resources/sql-tests/results/ansi/decimalArithmeticOperations.sql.out
index 219b1e621e1..ec4174c212e 100644
--- 
a/sql/core/src/test/resources/sql-tests/results/ansi/decimalArithmeticOperations.sql.out
+++ 
b/sql/core/src/test/resources/sql-tests/results/ansi/decimalArithmeticOperations.sql.out
@@ -77,7 +77,7 @@ struct<>
 -- !query output
 org.apache.spark.SparkArithmeticException
 [CANNOT_CHANGE_DECIMAL_PRECISION] Decimal(expanded, 
10000000000000000000000000000000000000.1, 39, 1) cannot be represented as 
Decimal(38, 1). If necessary set "spark.sql.ansi.enabled" to "false" to bypass 
this error.
-== SQL(line 1, position 7) ==
+== SQL(line 1, position 8) ==
 select (5e36BD + 0.1) + 5e36BD
        ^^^^^^^^^^^^^^^^^^^^^^^
 
@@ -89,7 +89,7 @@ struct<>
 -- !query output
 org.apache.spark.SparkArithmeticException
 [CANNOT_CHANGE_DECIMAL_PRECISION] Decimal(expanded, 
-11000000000000000000000000000000000000.1, 39, 1) cannot be represented as 
Decimal(38, 1). If necessary set "spark.sql.ansi.enabled" to "false" to bypass 
this error.
-== SQL(line 1, position 7) ==
+== SQL(line 1, position 8) ==
 select (-4e36BD - 0.1) - 7e36BD
        ^^^^^^^^^^^^^^^^^^^^^^^^
 
@@ -101,7 +101,7 @@ struct<>
 -- !query output
 org.apache.spark.SparkArithmeticException
 [CANNOT_CHANGE_DECIMAL_PRECISION] Decimal(expanded, 
152415787532388367501905199875019052100, 39, 0) cannot be represented as 
Decimal(38, 2). If necessary set "spark.sql.ansi.enabled" to "false" to bypass 
this error.
-== SQL(line 1, position 7) ==
+== SQL(line 1, position 8) ==
 select 12345678901234567890.0 * 12345678901234567890.0
        ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
 
@@ -113,7 +113,7 @@ struct<>
 -- !query output
 org.apache.spark.SparkArithmeticException
 [CANNOT_CHANGE_DECIMAL_PRECISION] Decimal(expanded, 
1000000000000000000000000000000000000.00000000000000000000000000000000000000, 
75, 38) cannot be represented as Decimal(38, 6). If necessary set 
"spark.sql.ansi.enabled" to "false" to bypass this error.
-== SQL(line 1, position 7) ==
+== SQL(line 1, position 8) ==
 select 1e35BD / 0.1
        ^^^^^^^^^^^^
 
@@ -149,7 +149,7 @@ struct<>
 -- !query output
 org.apache.spark.SparkArithmeticException
 [CANNOT_CHANGE_DECIMAL_PRECISION] Decimal(expanded, 
10123456789012345678901234567890123456.00000000000000000000000000000000000000, 
76, 38) cannot be represented as Decimal(38, 6). If necessary set 
"spark.sql.ansi.enabled" to "false" to bypass this error.
-== SQL(line 1, position 7) ==
+== SQL(line 1, position 8) ==
 select 1.0123456789012345678901234567890123456e36BD / 0.1
        ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
 
@@ -161,7 +161,7 @@ struct<>
 -- !query output
 org.apache.spark.SparkArithmeticException
 [CANNOT_CHANGE_DECIMAL_PRECISION] Decimal(expanded, 
101234567890123456789012345678901234.56000000000000000000000000000000000000, 
74, 38) cannot be represented as Decimal(38, 6). If necessary set 
"spark.sql.ansi.enabled" to "false" to bypass this error.
-== SQL(line 1, position 7) ==
+== SQL(line 1, position 8) ==
 select 1.0123456789012345678901234567890123456e35BD / 1.0
        ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
 
@@ -173,7 +173,7 @@ struct<>
 -- !query output
 org.apache.spark.SparkArithmeticException
 [CANNOT_CHANGE_DECIMAL_PRECISION] Decimal(expanded, 
10123456789012345678901234567890123.45600000000000000000000000000000000000, 73, 
38) cannot be represented as Decimal(38, 6). If necessary set 
"spark.sql.ansi.enabled" to "false" to bypass this error.
-== SQL(line 1, position 7) ==
+== SQL(line 1, position 8) ==
 select 1.0123456789012345678901234567890123456e34BD / 1.0
        ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
 
@@ -185,7 +185,7 @@ struct<>
 -- !query output
 org.apache.spark.SparkArithmeticException
 [CANNOT_CHANGE_DECIMAL_PRECISION] Decimal(expanded, 
1012345678901234567890123456789012.34560000000000000000000000000000000000, 72, 
38) cannot be represented as Decimal(38, 6). If necessary set 
"spark.sql.ansi.enabled" to "false" to bypass this error.
-== SQL(line 1, position 7) ==
+== SQL(line 1, position 8) ==
 select 1.0123456789012345678901234567890123456e33BD / 1.0
        ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
 
@@ -197,7 +197,7 @@ struct<>
 -- !query output
 org.apache.spark.SparkArithmeticException
 [CANNOT_CHANGE_DECIMAL_PRECISION] Decimal(expanded, 
101234567890123456789012345678901.23456000000000000000000000000000000000, 71, 
38) cannot be represented as Decimal(38, 6). If necessary set 
"spark.sql.ansi.enabled" to "false" to bypass this error.
-== SQL(line 1, position 7) ==
+== SQL(line 1, position 8) ==
 select 1.0123456789012345678901234567890123456e32BD / 1.0
        ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
 
@@ -217,7 +217,7 @@ struct<>
 -- !query output
 org.apache.spark.SparkArithmeticException
 [CANNOT_CHANGE_DECIMAL_PRECISION] Decimal(expanded, 
101234567890123456789012345678901.23456000000000000000000000000000000000, 71, 
38) cannot be represented as Decimal(38, 6). If necessary set 
"spark.sql.ansi.enabled" to "false" to bypass this error.
-== SQL(line 1, position 7) ==
+== SQL(line 1, position 8) ==
 select 1.0123456789012345678901234567890123456e31BD / 0.1
        ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
 
diff --git 
a/sql/core/src/test/resources/sql-tests/results/ansi/interval.sql.out 
b/sql/core/src/test/resources/sql-tests/results/ansi/interval.sql.out
index 6a61369a63e..7eaa5cc9a78 100644
--- a/sql/core/src/test/resources/sql-tests/results/ansi/interval.sql.out
+++ b/sql/core/src/test/resources/sql-tests/results/ansi/interval.sql.out
@@ -123,7 +123,7 @@ struct<>
 -- !query output
 org.apache.spark.SparkNumberFormatException
 [CAST_INVALID_INPUT] The value 'a' of the type "STRING" cannot be cast to 
"DOUBLE" because it is malformed. Correct the value as per the syntax, or 
change its target type. To return NULL instead, use `try_cast`. If necessary 
set "spark.sql.ansi.enabled" to "false" to bypass this error.
-== SQL(line 1, position 7) ==
+== SQL(line 1, position 8) ==
 select interval 2 second * 'a'
        ^^^^^^^^^^^^^^^^^^^^^^^
 
@@ -135,7 +135,7 @@ struct<>
 -- !query output
 org.apache.spark.SparkNumberFormatException
 [CAST_INVALID_INPUT] The value 'a' of the type "STRING" cannot be cast to 
"DOUBLE" because it is malformed. Correct the value as per the syntax, or 
change its target type. To return NULL instead, use `try_cast`. If necessary 
set "spark.sql.ansi.enabled" to "false" to bypass this error.
-== SQL(line 1, position 7) ==
+== SQL(line 1, position 8) ==
 select interval 2 second / 'a'
        ^^^^^^^^^^^^^^^^^^^^^^^
 
@@ -147,7 +147,7 @@ struct<>
 -- !query output
 org.apache.spark.SparkNumberFormatException
 [CAST_INVALID_INPUT] The value 'a' of the type "STRING" cannot be cast to 
"DOUBLE" because it is malformed. Correct the value as per the syntax, or 
change its target type. To return NULL instead, use `try_cast`. If necessary 
set "spark.sql.ansi.enabled" to "false" to bypass this error.
-== SQL(line 1, position 7) ==
+== SQL(line 1, position 8) ==
 select interval 2 year * 'a'
        ^^^^^^^^^^^^^^^^^^^^^
 
@@ -159,7 +159,7 @@ struct<>
 -- !query output
 org.apache.spark.SparkNumberFormatException
 [CAST_INVALID_INPUT] The value 'a' of the type "STRING" cannot be cast to 
"DOUBLE" because it is malformed. Correct the value as per the syntax, or 
change its target type. To return NULL instead, use `try_cast`. If necessary 
set "spark.sql.ansi.enabled" to "false" to bypass this error.
-== SQL(line 1, position 7) ==
+== SQL(line 1, position 8) ==
 select interval 2 year / 'a'
        ^^^^^^^^^^^^^^^^^^^^^
 
@@ -187,7 +187,7 @@ struct<>
 -- !query output
 org.apache.spark.SparkNumberFormatException
 [CAST_INVALID_INPUT] The value 'a' of the type "STRING" cannot be cast to 
"DOUBLE" because it is malformed. Correct the value as per the syntax, or 
change its target type. To return NULL instead, use `try_cast`. If necessary 
set "spark.sql.ansi.enabled" to "false" to bypass this error.
-== SQL(line 1, position 7) ==
+== SQL(line 1, position 8) ==
 select 'a' * interval 2 second
        ^^^^^^^^^^^^^^^^^^^^^^^
 
@@ -199,7 +199,7 @@ struct<>
 -- !query output
 org.apache.spark.SparkNumberFormatException
 [CAST_INVALID_INPUT] The value 'a' of the type "STRING" cannot be cast to 
"DOUBLE" because it is malformed. Correct the value as per the syntax, or 
change its target type. To return NULL instead, use `try_cast`. If necessary 
set "spark.sql.ansi.enabled" to "false" to bypass this error.
-== SQL(line 1, position 7) ==
+== SQL(line 1, position 8) ==
 select 'a' * interval 2 year
        ^^^^^^^^^^^^^^^^^^^^^
 
@@ -229,7 +229,7 @@ struct<>
 -- !query output
 org.apache.spark.SparkArithmeticException
 [DIVIDE_BY_ZERO] Division by zero. To return NULL instead, use `try_divide`. 
If necessary set "spark.sql.ansi.enabled" to "false" (except for ANSI interval 
type) to bypass this error.
-== SQL(line 1, position 7) ==
+== SQL(line 1, position 8) ==
 select interval '2 seconds' / 0
        ^^^^^^^^^^^^^^^^^^^^^^^^
 
@@ -265,7 +265,7 @@ struct<>
 -- !query output
 org.apache.spark.SparkArithmeticException
 [DIVIDE_BY_ZERO] Division by zero. To return NULL instead, use `try_divide`. 
If necessary set "spark.sql.ansi.enabled" to "false" (except for ANSI interval 
type) to bypass this error.
-== SQL(line 1, position 7) ==
+== SQL(line 1, position 8) ==
 select interval '2' year / 0
        ^^^^^^^^^^^^^^^^^^^^^
 
@@ -665,7 +665,7 @@ struct<>
 -- !query output
 org.apache.spark.SparkArithmeticException
 [CANNOT_CHANGE_DECIMAL_PRECISION] Decimal(expanded, 1234567890123456789, 20, 
0) cannot be represented as Decimal(18, 6). If necessary set 
"spark.sql.ansi.enabled" to "false" to bypass this error.
-== SQL(line 1, position 7) ==
+== SQL(line 1, position 8) ==
 select make_interval(0, 0, 0, 0, 0, 0, 1234567890123456789)
        ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
 
@@ -1517,7 +1517,7 @@ struct<>
 -- !query output
 org.apache.spark.SparkDateTimeException
 [CAST_INVALID_INPUT] The value '4 11:11' of the type "STRING" cannot be cast 
to "TIMESTAMP" because it is malformed. Correct the value as per the syntax, or 
change its target type. To return NULL instead, use `try_cast`. If necessary 
set "spark.sql.ansi.enabled" to "false" to bypass this error.
-== SQL(line 1, position 7) ==
+== SQL(line 1, position 8) ==
 select '4 11:11' - interval '4 22:12' day to minute
        ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
 
@@ -1529,7 +1529,7 @@ struct<>
 -- !query output
 org.apache.spark.SparkDateTimeException
 [CAST_INVALID_INPUT] The value '4 12:12:12' of the type "STRING" cannot be 
cast to "TIMESTAMP" because it is malformed. Correct the value as per the 
syntax, or change its target type. To return NULL instead, use `try_cast`. If 
necessary set "spark.sql.ansi.enabled" to "false" to bypass this error.
-== SQL(line 1, position 7) ==
+== SQL(line 1, position 8) ==
 select '4 12:12:12' + interval '4 22:12' day to minute
        ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
 
@@ -1567,7 +1567,7 @@ struct<>
 -- !query output
 org.apache.spark.SparkDateTimeException
 [CAST_INVALID_INPUT] The value '1' of the type "STRING" cannot be cast to 
"TIMESTAMP" because it is malformed. Correct the value as per the syntax, or 
change its target type. To return NULL instead, use `try_cast`. If necessary 
set "spark.sql.ansi.enabled" to "false" to bypass this error.
-== SQL(line 1, position 7) ==
+== SQL(line 1, position 8) ==
 select str - interval '4 22:12' day to minute from interval_view
        ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
 
@@ -1579,7 +1579,7 @@ struct<>
 -- !query output
 org.apache.spark.SparkDateTimeException
 [CAST_INVALID_INPUT] The value '1' of the type "STRING" cannot be cast to 
"TIMESTAMP" because it is malformed. Correct the value as per the syntax, or 
change its target type. To return NULL instead, use `try_cast`. If necessary 
set "spark.sql.ansi.enabled" to "false" to bypass this error.
-== SQL(line 1, position 7) ==
+== SQL(line 1, position 8) ==
 select str + interval '4 22:12' day to minute from interval_view
        ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
 
@@ -2037,7 +2037,7 @@ struct<>
 -- !query output
 org.apache.spark.SparkArithmeticException
 [ARITHMETIC_OVERFLOW] Overflow in integral divide. To return NULL instead, use 
'try_divide'. If necessary set spark.sql.ansi.enabled to "false" (except for 
ANSI interval type) to bypass this error.
-== SQL(line 1, position 7) ==
+== SQL(line 1, position 8) ==
 SELECT (INTERVAL '-178956970-8' YEAR TO MONTH) / -1
        ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
 
@@ -2049,7 +2049,7 @@ struct<>
 -- !query output
 org.apache.spark.SparkArithmeticException
 [ARITHMETIC_OVERFLOW] Overflow in integral divide. To return NULL instead, use 
'try_divide'. If necessary set spark.sql.ansi.enabled to "false" (except for 
ANSI interval type) to bypass this error.
-== SQL(line 1, position 7) ==
+== SQL(line 1, position 8) ==
 SELECT (INTERVAL '-178956970-8' YEAR TO MONTH) / -1L
        ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
 
@@ -2095,7 +2095,7 @@ struct<>
 -- !query output
 org.apache.spark.SparkArithmeticException
 [ARITHMETIC_OVERFLOW] Overflow in integral divide. To return NULL instead, use 
'try_divide'. If necessary set spark.sql.ansi.enabled to "false" (except for 
ANSI interval type) to bypass this error.
-== SQL(line 1, position 7) ==
+== SQL(line 1, position 8) ==
 SELECT (INTERVAL '-106751991 04:00:54.775808' DAY TO SECOND) / -1
        ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
 
@@ -2107,7 +2107,7 @@ struct<>
 -- !query output
 org.apache.spark.SparkArithmeticException
 [ARITHMETIC_OVERFLOW] Overflow in integral divide. To return NULL instead, use 
'try_divide'. If necessary set spark.sql.ansi.enabled to "false" (except for 
ANSI interval type) to bypass this error.
-== SQL(line 1, position 7) ==
+== SQL(line 1, position 8) ==
 SELECT (INTERVAL '-106751991 04:00:54.775808' DAY TO SECOND) / -1L
        ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
 
diff --git a/sql/core/src/test/resources/sql-tests/results/ansi/map.sql.out 
b/sql/core/src/test/resources/sql-tests/results/ansi/map.sql.out
index fec9c42f5dc..8a4d9978274 100644
--- a/sql/core/src/test/resources/sql-tests/results/ansi/map.sql.out
+++ b/sql/core/src/test/resources/sql-tests/results/ansi/map.sql.out
@@ -9,7 +9,7 @@ struct<>
 -- !query output
 org.apache.spark.SparkNoSuchElementException
 [MAP_KEY_DOES_NOT_EXIST] Key 5 does not exist. To return NULL instead, use 
`try_element_at`. If necessary set "spark.sql.ansi.enabled" to "false" to 
bypass this error.
-== SQL(line 1, position 7) ==
+== SQL(line 1, position 8) ==
 select element_at(map(1, 'a', 2, 'b'), 5)
        ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
 
@@ -21,7 +21,7 @@ struct<>
 -- !query output
 org.apache.spark.SparkNoSuchElementException
 [MAP_KEY_DOES_NOT_EXIST] Key 5 does not exist. To return NULL instead, use 
`try_element_at`. If necessary set "spark.sql.ansi.enabled" to "false" to 
bypass this error.
-== SQL(line 1, position 7) ==
+== SQL(line 1, position 8) ==
 select map(1, 'a', 2, 'b')[5]
        ^^^^^^^^^^^^^^^^^^^^^^
 
@@ -115,7 +115,7 @@ struct<>
 -- !query output
 org.apache.spark.SparkNoSuchElementException
 [MAP_KEY_DOES_NOT_EXIST] Key 5 does not exist. To return NULL instead, use 
`try_element_at`. If necessary set "spark.sql.ansi.enabled" to "false" to 
bypass this error.
-== SQL(line 1, position 7) ==
+== SQL(line 1, position 8) ==
 select element_at(map(1, 'a', 2, 'b'), 5)
        ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
 
@@ -127,6 +127,6 @@ struct<>
 -- !query output
 org.apache.spark.SparkNoSuchElementException
 [MAP_KEY_DOES_NOT_EXIST] Key 'c' does not exist. To return NULL instead, use 
`try_element_at`. If necessary set "spark.sql.ansi.enabled" to "false" to 
bypass this error.
-== SQL(line 1, position 7) ==
+== SQL(line 1, position 8) ==
 select element_at(map('a', 1, 'b', 2), 'c')
        ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
diff --git 
a/sql/core/src/test/resources/sql-tests/results/ansi/string-functions.sql.out 
b/sql/core/src/test/resources/sql-tests/results/ansi/string-functions.sql.out
index b4991a5b683..23dea32dc9d 100644
--- 
a/sql/core/src/test/resources/sql-tests/results/ansi/string-functions.sql.out
+++ 
b/sql/core/src/test/resources/sql-tests/results/ansi/string-functions.sql.out
@@ -83,7 +83,7 @@ struct<>
 -- !query output
 org.apache.spark.SparkNumberFormatException
 [CAST_INVALID_INPUT] The value 'a' of the type "STRING" cannot be cast to 
"INT" because it is malformed. Correct the value as per the syntax, or change 
its target type. To return NULL instead, use `try_cast`. If necessary set 
"spark.sql.ansi.enabled" to "false" to bypass this error.
-== SQL(line 1, position 42) ==
+== SQL(line 1, position 43) ==
 ...t("abcd", -2), left("abcd", 0), left("abcd", 'a')
                                    ^^^^^^^^^^^^^^^^^
 
@@ -111,7 +111,7 @@ struct<>
 -- !query output
 org.apache.spark.SparkNumberFormatException
 [CAST_INVALID_INPUT] The value 'a' of the type "STRING" cannot be cast to 
"INT" because it is malformed. Correct the value as per the syntax, or change 
its target type. To return NULL instead, use `try_cast`. If necessary set 
"spark.sql.ansi.enabled" to "false" to bypass this error.
-== SQL(line 1, position 44) ==
+== SQL(line 1, position 45) ==
 ...("abcd", -2), right("abcd", 0), right("abcd", 'a')
                                    ^^^^^^^^^^^^^^^^^^
 
@@ -420,7 +420,7 @@ struct<>
 -- !query output
 org.apache.spark.SparkNumberFormatException
 [CAST_INVALID_INPUT] The value 'invalid_length' of the type "STRING" cannot be 
cast to "INT" because it is malformed. Correct the value as per the syntax, or 
change its target type. To return NULL instead, use `try_cast`. If necessary 
set "spark.sql.ansi.enabled" to "false" to bypass this error.
-== SQL(line 1, position 7) ==
+== SQL(line 1, position 8) ==
 SELECT lpad('hi', 'invalid_length')
        ^^^^^^^^^^^^^^^^^^^^^^^^^^^^
 
@@ -432,7 +432,7 @@ struct<>
 -- !query output
 org.apache.spark.SparkNumberFormatException
 [CAST_INVALID_INPUT] The value 'invalid_length' of the type "STRING" cannot be 
cast to "INT" because it is malformed. Correct the value as per the syntax, or 
change its target type. To return NULL instead, use `try_cast`. If necessary 
set "spark.sql.ansi.enabled" to "false" to bypass this error.
-== SQL(line 1, position 7) ==
+== SQL(line 1, position 8) ==
 SELECT rpad('hi', 'invalid_length')
        ^^^^^^^^^^^^^^^^^^^^^^^^^^^^
 
diff --git a/sql/core/src/test/resources/sql-tests/results/interval.sql.out 
b/sql/core/src/test/resources/sql-tests/results/interval.sql.out
index f095ea5360b..aa07876e1ca 100644
--- a/sql/core/src/test/resources/sql-tests/results/interval.sql.out
+++ b/sql/core/src/test/resources/sql-tests/results/interval.sql.out
@@ -205,7 +205,7 @@ struct<>
 -- !query output
 org.apache.spark.SparkArithmeticException
 [DIVIDE_BY_ZERO] Division by zero. To return NULL instead, use `try_divide`. 
If necessary set "spark.sql.ansi.enabled" to "false" (except for ANSI interval 
type) to bypass this error.
-== SQL(line 1, position 7) ==
+== SQL(line 1, position 8) ==
 select interval '2 seconds' / 0
        ^^^^^^^^^^^^^^^^^^^^^^^^
 
@@ -241,7 +241,7 @@ struct<>
 -- !query output
 org.apache.spark.SparkArithmeticException
 [DIVIDE_BY_ZERO] Division by zero. To return NULL instead, use `try_divide`. 
If necessary set "spark.sql.ansi.enabled" to "false" (except for ANSI interval 
type) to bypass this error.
-== SQL(line 1, position 7) ==
+== SQL(line 1, position 8) ==
 select interval '2' year / 0
        ^^^^^^^^^^^^^^^^^^^^^
 
@@ -1993,7 +1993,7 @@ struct<>
 -- !query output
 org.apache.spark.SparkArithmeticException
 [ARITHMETIC_OVERFLOW] Overflow in integral divide. To return NULL instead, use 
'try_divide'. If necessary set spark.sql.ansi.enabled to "false" (except for 
ANSI interval type) to bypass this error.
-== SQL(line 1, position 7) ==
+== SQL(line 1, position 8) ==
 SELECT (INTERVAL '-178956970-8' YEAR TO MONTH) / -1
        ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
 
@@ -2005,7 +2005,7 @@ struct<>
 -- !query output
 org.apache.spark.SparkArithmeticException
 [ARITHMETIC_OVERFLOW] Overflow in integral divide. To return NULL instead, use 
'try_divide'. If necessary set spark.sql.ansi.enabled to "false" (except for 
ANSI interval type) to bypass this error.
-== SQL(line 1, position 7) ==
+== SQL(line 1, position 8) ==
 SELECT (INTERVAL '-178956970-8' YEAR TO MONTH) / -1L
        ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
 
@@ -2051,7 +2051,7 @@ struct<>
 -- !query output
 org.apache.spark.SparkArithmeticException
 [ARITHMETIC_OVERFLOW] Overflow in integral divide. To return NULL instead, use 
'try_divide'. If necessary set spark.sql.ansi.enabled to "false" (except for 
ANSI interval type) to bypass this error.
-== SQL(line 1, position 7) ==
+== SQL(line 1, position 8) ==
 SELECT (INTERVAL '-106751991 04:00:54.775808' DAY TO SECOND) / -1
        ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
 
@@ -2063,7 +2063,7 @@ struct<>
 -- !query output
 org.apache.spark.SparkArithmeticException
 [ARITHMETIC_OVERFLOW] Overflow in integral divide. To return NULL instead, use 
'try_divide'. If necessary set spark.sql.ansi.enabled to "false" (except for 
ANSI interval type) to bypass this error.
-== SQL(line 1, position 7) ==
+== SQL(line 1, position 8) ==
 SELECT (INTERVAL '-106751991 04:00:54.775808' DAY TO SECOND) / -1L
        ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
 
diff --git 
a/sql/core/src/test/resources/sql-tests/results/postgreSQL/boolean.sql.out 
b/sql/core/src/test/resources/sql-tests/results/postgreSQL/boolean.sql.out
index 6f17a0cd760..6b8d31b917a 100644
--- a/sql/core/src/test/resources/sql-tests/results/postgreSQL/boolean.sql.out
+++ b/sql/core/src/test/resources/sql-tests/results/postgreSQL/boolean.sql.out
@@ -57,7 +57,7 @@ struct<>
 -- !query output
 org.apache.spark.SparkRuntimeException
 [CAST_INVALID_INPUT] The value 'test' of the type "STRING" cannot be cast to 
"BOOLEAN" because it is malformed. Correct the value as per the syntax, or 
change its target type. To return NULL instead, use `try_cast`. If necessary 
set "spark.sql.ansi.enabled" to "false" to bypass this error.
-== SQL(line 1, position 7) ==
+== SQL(line 1, position 8) ==
 SELECT boolean('test') AS error
        ^^^^^^^^^^^^^^^
 
@@ -77,7 +77,7 @@ struct<>
 -- !query output
 org.apache.spark.SparkRuntimeException
 [CAST_INVALID_INPUT] The value 'foo' of the type "STRING" cannot be cast to 
"BOOLEAN" because it is malformed. Correct the value as per the syntax, or 
change its target type. To return NULL instead, use `try_cast`. If necessary 
set "spark.sql.ansi.enabled" to "false" to bypass this error.
-== SQL(line 1, position 7) ==
+== SQL(line 1, position 8) ==
 SELECT boolean('foo') AS error
        ^^^^^^^^^^^^^^
 
@@ -105,7 +105,7 @@ struct<>
 -- !query output
 org.apache.spark.SparkRuntimeException
 [CAST_INVALID_INPUT] The value 'yeah' of the type "STRING" cannot be cast to 
"BOOLEAN" because it is malformed. Correct the value as per the syntax, or 
change its target type. To return NULL instead, use `try_cast`. If necessary 
set "spark.sql.ansi.enabled" to "false" to bypass this error.
-== SQL(line 1, position 7) ==
+== SQL(line 1, position 8) ==
 SELECT boolean('yeah') AS error
        ^^^^^^^^^^^^^^^
 
@@ -133,7 +133,7 @@ struct<>
 -- !query output
 org.apache.spark.SparkRuntimeException
 [CAST_INVALID_INPUT] The value 'nay' of the type "STRING" cannot be cast to 
"BOOLEAN" because it is malformed. Correct the value as per the syntax, or 
change its target type. To return NULL instead, use `try_cast`. If necessary 
set "spark.sql.ansi.enabled" to "false" to bypass this error.
-== SQL(line 1, position 7) ==
+== SQL(line 1, position 8) ==
 SELECT boolean('nay') AS error
        ^^^^^^^^^^^^^^
 
@@ -145,7 +145,7 @@ struct<>
 -- !query output
 org.apache.spark.SparkRuntimeException
 [CAST_INVALID_INPUT] The value 'on' of the type "STRING" cannot be cast to 
"BOOLEAN" because it is malformed. Correct the value as per the syntax, or 
change its target type. To return NULL instead, use `try_cast`. If necessary 
set "spark.sql.ansi.enabled" to "false" to bypass this error.
-== SQL(line 1, position 7) ==
+== SQL(line 1, position 8) ==
 SELECT boolean('on') AS true
        ^^^^^^^^^^^^^
 
@@ -157,7 +157,7 @@ struct<>
 -- !query output
 org.apache.spark.SparkRuntimeException
 [CAST_INVALID_INPUT] The value 'off' of the type "STRING" cannot be cast to 
"BOOLEAN" because it is malformed. Correct the value as per the syntax, or 
change its target type. To return NULL instead, use `try_cast`. If necessary 
set "spark.sql.ansi.enabled" to "false" to bypass this error.
-== SQL(line 1, position 7) ==
+== SQL(line 1, position 8) ==
 SELECT boolean('off') AS `false`
        ^^^^^^^^^^^^^^
 
@@ -169,7 +169,7 @@ struct<>
 -- !query output
 org.apache.spark.SparkRuntimeException
 [CAST_INVALID_INPUT] The value 'of' of the type "STRING" cannot be cast to 
"BOOLEAN" because it is malformed. Correct the value as per the syntax, or 
change its target type. To return NULL instead, use `try_cast`. If necessary 
set "spark.sql.ansi.enabled" to "false" to bypass this error.
-== SQL(line 1, position 7) ==
+== SQL(line 1, position 8) ==
 SELECT boolean('of') AS `false`
        ^^^^^^^^^^^^^
 
@@ -181,7 +181,7 @@ struct<>
 -- !query output
 org.apache.spark.SparkRuntimeException
 [CAST_INVALID_INPUT] The value 'o' of the type "STRING" cannot be cast to 
"BOOLEAN" because it is malformed. Correct the value as per the syntax, or 
change its target type. To return NULL instead, use `try_cast`. If necessary 
set "spark.sql.ansi.enabled" to "false" to bypass this error.
-== SQL(line 1, position 7) ==
+== SQL(line 1, position 8) ==
 SELECT boolean('o') AS error
        ^^^^^^^^^^^^
 
@@ -193,7 +193,7 @@ struct<>
 -- !query output
 org.apache.spark.SparkRuntimeException
 [CAST_INVALID_INPUT] The value 'on_' of the type "STRING" cannot be cast to 
"BOOLEAN" because it is malformed. Correct the value as per the syntax, or 
change its target type. To return NULL instead, use `try_cast`. If necessary 
set "spark.sql.ansi.enabled" to "false" to bypass this error.
-== SQL(line 1, position 7) ==
+== SQL(line 1, position 8) ==
 SELECT boolean('on_') AS error
        ^^^^^^^^^^^^^^
 
@@ -205,7 +205,7 @@ struct<>
 -- !query output
 org.apache.spark.SparkRuntimeException
 [CAST_INVALID_INPUT] The value 'off_' of the type "STRING" cannot be cast to 
"BOOLEAN" because it is malformed. Correct the value as per the syntax, or 
change its target type. To return NULL instead, use `try_cast`. If necessary 
set "spark.sql.ansi.enabled" to "false" to bypass this error.
-== SQL(line 1, position 7) ==
+== SQL(line 1, position 8) ==
 SELECT boolean('off_') AS error
        ^^^^^^^^^^^^^^^
 
@@ -225,7 +225,7 @@ struct<>
 -- !query output
 org.apache.spark.SparkRuntimeException
 [CAST_INVALID_INPUT] The value '11' of the type "STRING" cannot be cast to 
"BOOLEAN" because it is malformed. Correct the value as per the syntax, or 
change its target type. To return NULL instead, use `try_cast`. If necessary 
set "spark.sql.ansi.enabled" to "false" to bypass this error.
-== SQL(line 1, position 7) ==
+== SQL(line 1, position 8) ==
 SELECT boolean('11') AS error
        ^^^^^^^^^^^^^
 
@@ -245,7 +245,7 @@ struct<>
 -- !query output
 org.apache.spark.SparkRuntimeException
 [CAST_INVALID_INPUT] The value '000' of the type "STRING" cannot be cast to 
"BOOLEAN" because it is malformed. Correct the value as per the syntax, or 
change its target type. To return NULL instead, use `try_cast`. If necessary 
set "spark.sql.ansi.enabled" to "false" to bypass this error.
-== SQL(line 1, position 7) ==
+== SQL(line 1, position 8) ==
 SELECT boolean('000') AS error
        ^^^^^^^^^^^^^^
 
@@ -257,7 +257,7 @@ struct<>
 -- !query output
 org.apache.spark.SparkRuntimeException
 [CAST_INVALID_INPUT] The value '' of the type "STRING" cannot be cast to 
"BOOLEAN" because it is malformed. Correct the value as per the syntax, or 
change its target type. To return NULL instead, use `try_cast`. If necessary 
set "spark.sql.ansi.enabled" to "false" to bypass this error.
-== SQL(line 1, position 7) ==
+== SQL(line 1, position 8) ==
 SELECT boolean('') AS error
        ^^^^^^^^^^^
 
@@ -366,7 +366,7 @@ struct<>
 -- !query output
 org.apache.spark.SparkRuntimeException
 [CAST_INVALID_INPUT] The value '  tru e ' of the type "STRING" cannot be cast 
to "BOOLEAN" because it is malformed. Correct the value as per the syntax, or 
change its target type. To return NULL instead, use `try_cast`. If necessary 
set "spark.sql.ansi.enabled" to "false" to bypass this error.
-== SQL(line 1, position 7) ==
+== SQL(line 1, position 8) ==
 SELECT boolean(string('  tru e ')) AS invalid
        ^^^^^^^^^^^^^^^^^^^^^^^^^^^
 
@@ -378,7 +378,7 @@ struct<>
 -- !query output
 org.apache.spark.SparkRuntimeException
 [CAST_INVALID_INPUT] The value '' of the type "STRING" cannot be cast to 
"BOOLEAN" because it is malformed. Correct the value as per the syntax, or 
change its target type. To return NULL instead, use `try_cast`. If necessary 
set "spark.sql.ansi.enabled" to "false" to bypass this error.
-== SQL(line 1, position 7) ==
+== SQL(line 1, position 8) ==
 SELECT boolean(string('')) AS invalid
        ^^^^^^^^^^^^^^^^^^^
 
@@ -525,7 +525,7 @@ struct<>
 -- !query output
 org.apache.spark.sql.AnalysisException
 failed to evaluate expression CAST('XXX' AS BOOLEAN): [CAST_INVALID_INPUT] The 
value 'XXX' of the type "STRING" cannot be cast to "BOOLEAN" because it is 
malformed. Correct the value as per the syntax, or change its target type. To 
return NULL instead, use `try_cast`. If necessary set "spark.sql.ansi.enabled" 
to "false" to bypass this error.
-== SQL(line 2, position 11) ==
+== SQL(line 2, position 12) ==
    VALUES (boolean('XXX'))
            ^^^^^^^^^^^^^^
 ; line 2 pos 3
diff --git 
a/sql/core/src/test/resources/sql-tests/results/postgreSQL/float4.sql.out 
b/sql/core/src/test/resources/sql-tests/results/postgreSQL/float4.sql.out
index d172e2ace04..58d5e9b86bb 100644
--- a/sql/core/src/test/resources/sql-tests/results/postgreSQL/float4.sql.out
+++ b/sql/core/src/test/resources/sql-tests/results/postgreSQL/float4.sql.out
@@ -97,7 +97,7 @@ struct<>
 -- !query output
 org.apache.spark.SparkNumberFormatException
 [CAST_INVALID_INPUT] The value 'N A N' of the type "STRING" cannot be cast to 
"FLOAT" because it is malformed. Correct the value as per the syntax, or change 
its target type. To return NULL instead, use `try_cast`. If necessary set 
"spark.sql.ansi.enabled" to "false" to bypass this error.
-== SQL(line 1, position 7) ==
+== SQL(line 1, position 8) ==
 SELECT float('N A N')
        ^^^^^^^^^^^^^^
 
@@ -109,7 +109,7 @@ struct<>
 -- !query output
 org.apache.spark.SparkNumberFormatException
 [CAST_INVALID_INPUT] The value 'NaN x' of the type "STRING" cannot be cast to 
"FLOAT" because it is malformed. Correct the value as per the syntax, or change 
its target type. To return NULL instead, use `try_cast`. If necessary set 
"spark.sql.ansi.enabled" to "false" to bypass this error.
-== SQL(line 1, position 7) ==
+== SQL(line 1, position 8) ==
 SELECT float('NaN x')
        ^^^^^^^^^^^^^^
 
@@ -121,7 +121,7 @@ struct<>
 -- !query output
 org.apache.spark.SparkNumberFormatException
 [CAST_INVALID_INPUT] The value ' INFINITY    x' of the type "STRING" cannot be 
cast to "FLOAT" because it is malformed. Correct the value as per the syntax, 
or change its target type. To return NULL instead, use `try_cast`. If necessary 
set "spark.sql.ansi.enabled" to "false" to bypass this error.
-== SQL(line 1, position 7) ==
+== SQL(line 1, position 8) ==
 SELECT float(' INFINITY    x')
        ^^^^^^^^^^^^^^^^^^^^^^^
 
@@ -157,7 +157,7 @@ struct<>
 -- !query output
 org.apache.spark.SparkNumberFormatException
 [CAST_INVALID_INPUT] The value 'nan' of the type "STRING" cannot be cast to 
"DECIMAL(10,0)" because it is malformed. Correct the value as per the syntax, 
or change its target type. To return NULL instead, use `try_cast`. If necessary 
set "spark.sql.ansi.enabled" to "false" to bypass this error.
-== SQL(line 1, position 13) ==
+== SQL(line 1, position 14) ==
 SELECT float(decimal('nan'))
              ^^^^^^^^^^^^^^
 
diff --git 
a/sql/core/src/test/resources/sql-tests/results/postgreSQL/float8.sql.out 
b/sql/core/src/test/resources/sql-tests/results/postgreSQL/float8.sql.out
index 8259856ed7e..4df6d37ef2a 100644
--- a/sql/core/src/test/resources/sql-tests/results/postgreSQL/float8.sql.out
+++ b/sql/core/src/test/resources/sql-tests/results/postgreSQL/float8.sql.out
@@ -129,7 +129,7 @@ struct<>
 -- !query output
 org.apache.spark.SparkNumberFormatException
 [CAST_INVALID_INPUT] The value 'N A N' of the type "STRING" cannot be cast to 
"DOUBLE" because it is malformed. Correct the value as per the syntax, or 
change its target type. To return NULL instead, use `try_cast`. If necessary 
set "spark.sql.ansi.enabled" to "false" to bypass this error.
-== SQL(line 1, position 7) ==
+== SQL(line 1, position 8) ==
 SELECT double('N A N')
        ^^^^^^^^^^^^^^^
 
@@ -141,7 +141,7 @@ struct<>
 -- !query output
 org.apache.spark.SparkNumberFormatException
 [CAST_INVALID_INPUT] The value 'NaN x' of the type "STRING" cannot be cast to 
"DOUBLE" because it is malformed. Correct the value as per the syntax, or 
change its target type. To return NULL instead, use `try_cast`. If necessary 
set "spark.sql.ansi.enabled" to "false" to bypass this error.
-== SQL(line 1, position 7) ==
+== SQL(line 1, position 8) ==
 SELECT double('NaN x')
        ^^^^^^^^^^^^^^^
 
@@ -153,7 +153,7 @@ struct<>
 -- !query output
 org.apache.spark.SparkNumberFormatException
 [CAST_INVALID_INPUT] The value ' INFINITY    x' of the type "STRING" cannot be 
cast to "DOUBLE" because it is malformed. Correct the value as per the syntax, 
or change its target type. To return NULL instead, use `try_cast`. If necessary 
set "spark.sql.ansi.enabled" to "false" to bypass this error.
-== SQL(line 1, position 7) ==
+== SQL(line 1, position 8) ==
 SELECT double(' INFINITY    x')
        ^^^^^^^^^^^^^^^^^^^^^^^^
 
@@ -189,7 +189,7 @@ struct<>
 -- !query output
 org.apache.spark.SparkNumberFormatException
 [CAST_INVALID_INPUT] The value 'nan' of the type "STRING" cannot be cast to 
"DECIMAL(10,0)" because it is malformed. Correct the value as per the syntax, 
or change its target type. To return NULL instead, use `try_cast`. If necessary 
set "spark.sql.ansi.enabled" to "false" to bypass this error.
-== SQL(line 1, position 14) ==
+== SQL(line 1, position 15) ==
 SELECT double(decimal('nan'))
               ^^^^^^^^^^^^^^
 
diff --git 
a/sql/core/src/test/resources/sql-tests/results/postgreSQL/int4.sql.out 
b/sql/core/src/test/resources/sql-tests/results/postgreSQL/int4.sql.out
index 7e9fe0357c5..f84feac720e 100755
--- a/sql/core/src/test/resources/sql-tests/results/postgreSQL/int4.sql.out
+++ b/sql/core/src/test/resources/sql-tests/results/postgreSQL/int4.sql.out
@@ -201,7 +201,7 @@ struct<>
 -- !query output
 org.apache.spark.SparkArithmeticException
 [ARITHMETIC_OVERFLOW] integer overflow. To return NULL instead, use 
'try_multiply'. If necessary set spark.sql.ansi.enabled to "false" (except for 
ANSI interval type) to bypass this error.
-== SQL(line 1, position 25) ==
+== SQL(line 1, position 26) ==
 SELECT '' AS five, i.f1, i.f1 * smallint('2') AS x FROM INT4_TBL i
                          ^^^^^^^^^^^^^^^^^^^^
 
@@ -224,7 +224,7 @@ struct<>
 -- !query output
 org.apache.spark.SparkArithmeticException
 [ARITHMETIC_OVERFLOW] integer overflow. To return NULL instead, use 
'try_multiply'. If necessary set spark.sql.ansi.enabled to "false" (except for 
ANSI interval type) to bypass this error.
-== SQL(line 1, position 25) ==
+== SQL(line 1, position 26) ==
 SELECT '' AS five, i.f1, i.f1 * int('2') AS x FROM INT4_TBL i
                          ^^^^^^^^^^^^^^^
 
@@ -247,7 +247,7 @@ struct<>
 -- !query output
 org.apache.spark.SparkArithmeticException
 [ARITHMETIC_OVERFLOW] integer overflow. To return NULL instead, use 'try_add'. 
If necessary set spark.sql.ansi.enabled to "false" (except for ANSI interval 
type) to bypass this error.
-== SQL(line 1, position 25) ==
+== SQL(line 1, position 26) ==
 SELECT '' AS five, i.f1, i.f1 + smallint('2') AS x FROM INT4_TBL i
                          ^^^^^^^^^^^^^^^^^^^^
 
@@ -271,7 +271,7 @@ struct<>
 -- !query output
 org.apache.spark.SparkArithmeticException
 [ARITHMETIC_OVERFLOW] integer overflow. To return NULL instead, use 'try_add'. 
If necessary set spark.sql.ansi.enabled to "false" (except for ANSI interval 
type) to bypass this error.
-== SQL(line 1, position 25) ==
+== SQL(line 1, position 26) ==
 SELECT '' AS five, i.f1, i.f1 + int('2') AS x FROM INT4_TBL i
                          ^^^^^^^^^^^^^^^
 
@@ -295,7 +295,7 @@ struct<>
 -- !query output
 org.apache.spark.SparkArithmeticException
 [ARITHMETIC_OVERFLOW] integer overflow. To return NULL instead, use 
'try_subtract'. If necessary set spark.sql.ansi.enabled to "false" (except for 
ANSI interval type) to bypass this error.
-== SQL(line 1, position 25) ==
+== SQL(line 1, position 26) ==
 SELECT '' AS five, i.f1, i.f1 - smallint('2') AS x FROM INT4_TBL i
                          ^^^^^^^^^^^^^^^^^^^^
 
@@ -319,7 +319,7 @@ struct<>
 -- !query output
 org.apache.spark.SparkArithmeticException
 [ARITHMETIC_OVERFLOW] integer overflow. To return NULL instead, use 
'try_subtract'. If necessary set spark.sql.ansi.enabled to "false" (except for 
ANSI interval type) to bypass this error.
-== SQL(line 1, position 25) ==
+== SQL(line 1, position 26) ==
 SELECT '' AS five, i.f1, i.f1 - int('2') AS x FROM INT4_TBL i
                          ^^^^^^^^^^^^^^^
 
diff --git 
a/sql/core/src/test/resources/sql-tests/results/postgreSQL/int8.sql.out 
b/sql/core/src/test/resources/sql-tests/results/postgreSQL/int8.sql.out
index 93614221ff8..2d68cbb4481 100755
--- a/sql/core/src/test/resources/sql-tests/results/postgreSQL/int8.sql.out
+++ b/sql/core/src/test/resources/sql-tests/results/postgreSQL/int8.sql.out
@@ -393,7 +393,7 @@ struct<>
 -- !query output
 org.apache.spark.SparkArithmeticException
 [ARITHMETIC_OVERFLOW] long overflow. To return NULL instead, use 
'try_multiply'. If necessary set spark.sql.ansi.enabled to "false" (except for 
ANSI interval type) to bypass this error.
-== SQL(line 1, position 28) ==
+== SQL(line 1, position 29) ==
 SELECT '' AS three, q1, q2, q1 * q2 AS multiply FROM INT8_TBL
                             ^^^^^^^
 
@@ -651,7 +651,7 @@ struct<>
 -- !query output
 org.apache.spark.SparkArithmeticException
 [DIVIDE_BY_ZERO] Division by zero. To return NULL instead, use `try_divide`. 
If necessary set "spark.sql.ansi.enabled" to "false" (except for ANSI interval 
type) to bypass this error.
-== SQL(line 1, position 7) ==
+== SQL(line 1, position 8) ==
 select bigint('9223372036854775800') / bigint('0')
        ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
 
@@ -663,7 +663,7 @@ struct<>
 -- !query output
 org.apache.spark.SparkArithmeticException
 [DIVIDE_BY_ZERO] Division by zero. To return NULL instead, use `try_divide`. 
If necessary set "spark.sql.ansi.enabled" to "false" (except for ANSI interval 
type) to bypass this error.
-== SQL(line 1, position 7) ==
+== SQL(line 1, position 8) ==
 select bigint('-9223372036854775808') / smallint('0')
        ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
 
@@ -675,7 +675,7 @@ struct<>
 -- !query output
 org.apache.spark.SparkArithmeticException
 [DIVIDE_BY_ZERO] Division by zero. To return NULL instead, use `try_divide`. 
If necessary set "spark.sql.ansi.enabled" to "false" (except for ANSI interval 
type) to bypass this error.
-== SQL(line 1, position 7) ==
+== SQL(line 1, position 8) ==
 select smallint('100') / bigint('0')
        ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
 
@@ -830,7 +830,7 @@ struct<>
 -- !query output
 org.apache.spark.SparkArithmeticException
 [ARITHMETIC_OVERFLOW] long overflow. To return NULL instead, use 
'try_multiply'. If necessary set spark.sql.ansi.enabled to "false" (except for 
ANSI interval type) to bypass this error.
-== SQL(line 1, position 7) ==
+== SQL(line 1, position 8) ==
 SELECT bigint((-9223372036854775808)) * bigint((-1))
        ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
 
@@ -858,7 +858,7 @@ struct<>
 -- !query output
 org.apache.spark.SparkArithmeticException
 [ARITHMETIC_OVERFLOW] long overflow. To return NULL instead, use 
'try_multiply'. If necessary set spark.sql.ansi.enabled to "false" (except for 
ANSI interval type) to bypass this error.
-== SQL(line 1, position 7) ==
+== SQL(line 1, position 8) ==
 SELECT bigint((-9223372036854775808)) * int((-1))
        ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
 
@@ -886,7 +886,7 @@ struct<>
 -- !query output
 org.apache.spark.SparkArithmeticException
 [ARITHMETIC_OVERFLOW] long overflow. To return NULL instead, use 
'try_multiply'. If necessary set spark.sql.ansi.enabled to "false" (except for 
ANSI interval type) to bypass this error.
-== SQL(line 1, position 7) ==
+== SQL(line 1, position 8) ==
 SELECT bigint((-9223372036854775808)) * smallint((-1))
        ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
 
diff --git 
a/sql/core/src/test/resources/sql-tests/results/postgreSQL/select_having.sql.out
 
b/sql/core/src/test/resources/sql-tests/results/postgreSQL/select_having.sql.out
index c7e7cd361fb..0bbd00c2d3b 100644
--- 
a/sql/core/src/test/resources/sql-tests/results/postgreSQL/select_having.sql.out
+++ 
b/sql/core/src/test/resources/sql-tests/results/postgreSQL/select_having.sql.out
@@ -178,7 +178,7 @@ struct<>
 -- !query output
 org.apache.spark.SparkArithmeticException
 [DIVIDE_BY_ZERO] Division by zero. To return NULL instead, use `try_divide`. 
If necessary set "spark.sql.ansi.enabled" to "false" (except for ANSI interval 
type) to bypass this error.
-== SQL(line 1, position 39) ==
+== SQL(line 1, position 40) ==
 ...1 AS one FROM test_having WHERE 1/a = 1 HAVING 1 < 2
                                    ^^^
 
diff --git 
a/sql/core/src/test/resources/sql-tests/results/postgreSQL/text.sql.out 
b/sql/core/src/test/resources/sql-tests/results/postgreSQL/text.sql.out
index 50c714b7f36..17aece8592a 100755
--- a/sql/core/src/test/resources/sql-tests/results/postgreSQL/text.sql.out
+++ b/sql/core/src/test/resources/sql-tests/results/postgreSQL/text.sql.out
@@ -66,7 +66,7 @@ struct<>
 -- !query output
 org.apache.spark.SparkNumberFormatException
 [CAST_INVALID_INPUT] The value 'four: 2' of the type "STRING" cannot be cast 
to "BIGINT" because it is malformed. Correct the value as per the syntax, or 
change its target type. To return NULL instead, use `try_cast`. If necessary 
set "spark.sql.ansi.enabled" to "false" to bypass this error.
-== SQL(line 1, position 7) ==
+== SQL(line 1, position 8) ==
 select string('four: ') || 2+2
        ^^^^^^^^^^^^^^^^^^^^^^^
 
@@ -78,7 +78,7 @@ struct<>
 -- !query output
 org.apache.spark.SparkNumberFormatException
 [CAST_INVALID_INPUT] The value 'four: 2' of the type "STRING" cannot be cast 
to "BIGINT" because it is malformed. Correct the value as per the syntax, or 
change its target type. To return NULL instead, use `try_cast`. If necessary 
set "spark.sql.ansi.enabled" to "false" to bypass this error.
-== SQL(line 1, position 7) ==
+== SQL(line 1, position 8) ==
 select 'four: ' || 2+2
        ^^^^^^^^^^^^^^^
 
diff --git 
a/sql/core/src/test/resources/sql-tests/results/postgreSQL/window_part2.sql.out 
b/sql/core/src/test/resources/sql-tests/results/postgreSQL/window_part2.sql.out
index 9aeab7f957b..ff5bf1c2e21 100644
--- 
a/sql/core/src/test/resources/sql-tests/results/postgreSQL/window_part2.sql.out
+++ 
b/sql/core/src/test/resources/sql-tests/results/postgreSQL/window_part2.sql.out
@@ -463,7 +463,7 @@ struct<>
 -- !query output
 org.apache.spark.SparkNumberFormatException
 [CAST_INVALID_INPUT] The value 'NaN' of the type "STRING" cannot be cast to 
"INT" because it is malformed. Correct the value as per the syntax, or change 
its target type. To return NULL instead, use `try_cast`. If necessary set 
"spark.sql.ansi.enabled" to "false" to bypass this error.
-== SQL(line 3, position 12) ==
+== SQL(line 3, position 13) ==
 window w as (order by f_numeric range between
             ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
              1.1 preceding and 'NaN' following)
diff --git 
a/sql/core/src/test/resources/sql-tests/results/postgreSQL/window_part3.sql.out 
b/sql/core/src/test/resources/sql-tests/results/postgreSQL/window_part3.sql.out
index cad5e7d77dd..4b75b81fe85 100644
--- 
a/sql/core/src/test/resources/sql-tests/results/postgreSQL/window_part3.sql.out
+++ 
b/sql/core/src/test/resources/sql-tests/results/postgreSQL/window_part3.sql.out
@@ -73,7 +73,7 @@ struct<>
 -- !query output
 org.apache.spark.sql.AnalysisException
 failed to evaluate expression CAST('11:00 BST' AS TIMESTAMP): 
[CAST_INVALID_INPUT] The value '11:00 BST' of the type "STRING" cannot be cast 
to "TIMESTAMP" because it is malformed. Correct the value as per the syntax, or 
change its target type. To return NULL instead, use `try_cast`. If necessary 
set "spark.sql.ansi.enabled" to "false" to bypass this error.
-== SQL(line 2, position 23) ==
+== SQL(line 2, position 24) ==
 (1, timestamp '11:00', cast ('11:00 BST' as timestamp), cast ('1 year' as 
timestamp), ...
                        ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
 ; line 1 pos 22
diff --git 
a/sql/core/src/test/resources/sql-tests/results/postgreSQL/window_part4.sql.out 
b/sql/core/src/test/resources/sql-tests/results/postgreSQL/window_part4.sql.out
index 5020e328a7e..5113200a058 100644
--- 
a/sql/core/src/test/resources/sql-tests/results/postgreSQL/window_part4.sql.out
+++ 
b/sql/core/src/test/resources/sql-tests/results/postgreSQL/window_part4.sql.out
@@ -502,7 +502,7 @@ struct<>
 -- !query output
 org.apache.spark.sql.AnalysisException
 failed to evaluate expression CAST('nan' AS INT): [CAST_INVALID_INPUT] The 
value 'nan' of the type "STRING" cannot be cast to "INT" because it is 
malformed. Correct the value as per the syntax, or change its target type. To 
return NULL instead, use `try_cast`. If necessary set "spark.sql.ansi.enabled" 
to "false" to bypass this error.
-== SQL(line 3, position 28) ==
+== SQL(line 3, position 29) ==
 FROM (VALUES(1,1),(2,2),(3,(cast('nan' as int))),(4,3),(5,4)) t(a,b)
                             ^^^^^^^^^^^^^^^^^^
 ; line 3 pos 6
diff --git 
a/sql/core/src/test/resources/sql-tests/results/udf/postgreSQL/udf-select_having.sql.out
 
b/sql/core/src/test/resources/sql-tests/results/udf/postgreSQL/udf-select_having.sql.out
index 4631979adf8..59be18dad7f 100644
--- 
a/sql/core/src/test/resources/sql-tests/results/udf/postgreSQL/udf-select_having.sql.out
+++ 
b/sql/core/src/test/resources/sql-tests/results/udf/postgreSQL/udf-select_having.sql.out
@@ -178,7 +178,7 @@ struct<>
 -- !query output
 org.apache.spark.SparkArithmeticException
 [DIVIDE_BY_ZERO] Division by zero. To return NULL instead, use `try_divide`. 
If necessary set "spark.sql.ansi.enabled" to "false" (except for ANSI interval 
type) to bypass this error.
-== SQL(line 1, position 39) ==
+== SQL(line 1, position 40) ==
 ...1 AS one FROM test_having WHERE 1/udf(a) = 1 HAVING 1 < 2
                                    ^^^^^^^^
 
diff --git 
a/sql/core/src/test/scala/org/apache/spark/sql/errors/QueryExecutionAnsiErrorsSuite.scala
 
b/sql/core/src/test/scala/org/apache/spark/sql/errors/QueryExecutionAnsiErrorsSuite.scala
index 1a39ecc190e..e7e3c0df814 100644
--- 
a/sql/core/src/test/scala/org/apache/spark/sql/errors/QueryExecutionAnsiErrorsSuite.scala
+++ 
b/sql/core/src/test/scala/org/apache/spark/sql/errors/QueryExecutionAnsiErrorsSuite.scala
@@ -49,7 +49,7 @@ class QueryExecutionAnsiErrorsSuite extends QueryTest with 
QueryErrorsSuiteBase
         "Division by zero. To return NULL instead, use `try_divide`. If 
necessary set " +
         s"""$ansiConf to "false" (except for ANSI interval type) to bypass 
this error.""" +
         """
-          |== SQL(line 1, position 7) ==
+          |== SQL(line 1, position 8) ==
           |select 6/0
           |       ^^^
           |""".stripMargin,
@@ -77,7 +77,7 @@ class QueryExecutionAnsiErrorsSuite extends QueryTest with 
QueryErrorsSuiteBase
         "Decimal(expanded, 66666666666666.666, 17, 3) cannot be represented as 
Decimal(8, 1). " +
         s"""If necessary set $ansiConf to "false" to bypass this error.""" +
         """
-          |== SQL(line 1, position 7) ==
+          |== SQL(line 1, position 8) ==
           |select CAST('66666666666666.666' AS DECIMAL(8, 1))
           |       ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
           |""".stripMargin,
@@ -117,7 +117,7 @@ class QueryExecutionAnsiErrorsSuite extends QueryTest with 
QueryErrorsSuiteBase
       msg = "Key 3 does not exist. To return NULL instead, use 
`try_element_at`. " +
         s"""If necessary set $ansiConf to "false" to bypass this error.""" +
         """
-          |== SQL(line 1, position 7) ==
+          |== SQL(line 1, position 8) ==
           |select element_at(map(1, 'a', 2, 'b'), 3)
           |       ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
           |""".stripMargin
@@ -134,7 +134,7 @@ class QueryExecutionAnsiErrorsSuite extends QueryTest with 
QueryErrorsSuiteBase
         "because it is malformed. Correct the value as per the syntax, " +
         "or change its target type. To return NULL instead, use `try_cast`. If 
necessary set " +
         s"""$ansiConf to \"false\" to bypass this error.
-          |== SQL(line 1, position 7) ==
+          |== SQL(line 1, position 8) ==
           |select CAST('111111111111xe23' AS DOUBLE)
           |       ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
           |""".stripMargin)


---------------------------------------------------------------------
To unsubscribe, e-mail: commits-unsubscr...@spark.apache.org
For additional commands, e-mail: commits-h...@spark.apache.org

Reply via email to