This is an automated email from the ASF dual-hosted git repository.

maxgekk pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/spark.git


The following commit(s) were added to refs/heads/master by this push:
     new af016d9eb87 [SPARK-39050][SQL] Error class: UNSUPPORTED_OPERATION to 
UNSUPPORTED_FEATURE
af016d9eb87 is described below

commit af016d9eb87dcdd2423dc6eae691a52a5a23ae63
Author: Serge Rielau <serge.rie...@databricks.com>
AuthorDate: Fri Apr 29 08:57:36 2022 +0300

    [SPARK-39050][SQL] Error class: UNSUPPORTED_OPERATION to UNSUPPORTED_FEATURE
    
    ### What changes were proposed in this pull request?
    
    UNSUPPORTED_OPERATION will be removed and replaced with the existing 
UNSUPPORTED_FEATURE.
    This effects three errors:
    ARROW TIMESTAMP, ORC TMESTAMP TO TIMESTAMP_NTZ and  ORC TMESTAMP_NTZ TO 
TIMESTAMP
    
    ### Why are the changes needed?
    
    Clean up ERROR CLASSES before publishing them.
    
    ### Does this PR introduce _any_ user-facing change?
    
    No, this is still internal, unreleased  code
    
    ### How was this patch tested?
    
    Run existing QueryExecutionErrorsSuite
    
    Closes #36385 from 
srielau/SPARK-39050-UNSUPPORTED_OPERATION-to-UNSUPPORTED_FEATURE.
    
    Authored-by: Serge Rielau <serge.rie...@databricks.com>
    Signed-off-by: Max Gekk <max.g...@gmail.com>
---
 core/src/main/resources/error/error-classes.json   |  6 ++---
 .../spark/sql/errors/QueryExecutionErrors.scala    | 30 ++++++++++------------
 .../org/apache/spark/sql/util/ArrowUtils.scala     |  2 +-
 .../apache/spark/sql/util/ArrowUtilsSuite.scala    |  4 +--
 .../sql/errors/QueryExecutionErrorsSuite.scala     | 23 +++++++++--------
 5 files changed, 31 insertions(+), 34 deletions(-)

diff --git a/core/src/main/resources/error/error-classes.json 
b/core/src/main/resources/error/error-classes.json
index 4738599685b..4908a9b6c2e 100644
--- a/core/src/main/resources/error/error-classes.json
+++ b/core/src/main/resources/error/error-classes.json
@@ -213,6 +213,9 @@
       "NATURAL_CROSS_JOIN" : {
         "message" : [ "NATURAL CROSS JOIN." ]
       },
+      "ORC_TYPE_CAST" : {
+        "message" : [ "Unable to convert <orcType> of Orc to data type 
<toType>." ]
+      },
       "PANDAS_UDAF_IN_PIVOT" : {
         "message" : [ "Pandas user defined aggregate function in the PIVOT 
clause." ]
       },
@@ -243,9 +246,6 @@
   "UNSUPPORTED_GROUPING_EXPRESSION" : {
     "message" : [ "grouping()/grouping_id() can only be used with 
GroupingSets/Cube/Rollup" ]
   },
-  "UNSUPPORTED_OPERATION" : {
-    "message" : [ "The operation is not supported: <operation>" ]
-  },
   "UNTYPED_SCALA_UDF" : {
     "message" : [ "You're using untyped Scala UDF, which does not have the 
input type information. Spark may blindly pass null to the Scala closure with 
primitive-type argument, and the closure will see the default value of the Java 
type for the null argument, e.g. `udf((x: Int) => x, IntegerType)`, the result 
is 0 for null input. To get rid of this error, you could:\n1. use typed Scala 
UDF APIs(without return type parameter), e.g. `udf((x: Int) => x)`\n2. use Java 
UDF APIs, e.g. `udf(ne [...]
   },
diff --git 
a/sql/catalyst/src/main/scala/org/apache/spark/sql/errors/QueryExecutionErrors.scala
 
b/sql/catalyst/src/main/scala/org/apache/spark/sql/errors/QueryExecutionErrors.scala
index d5e42a1dde7..225315d3f02 100644
--- 
a/sql/catalyst/src/main/scala/org/apache/spark/sql/errors/QueryExecutionErrors.scala
+++ 
b/sql/catalyst/src/main/scala/org/apache/spark/sql/errors/QueryExecutionErrors.scala
@@ -68,6 +68,11 @@ import org.apache.spark.util.CircularBuffer
  */
 object QueryExecutionErrors extends QueryErrorsBase {
 
+  def internalMissingTimezoneIdError(): Throwable = {
+    new SparkIllegalStateException(errorClass = "INTERNAL_ERROR",
+      messageParameters = Array("Missing timezoneId where it is mandatory."))
+  }
+
   def logicalHintOperatorNotRemovedDuringAnalysisError(): Throwable = {
     new SparkIllegalStateException(errorClass = "INTERNAL_ERROR",
       messageParameters = Array(
@@ -1614,15 +1619,6 @@ object QueryExecutionErrors extends QueryErrorsBase {
     new SparkException(s"Can not load in UserDefinedType ${name} for user 
class ${userClass}.")
   }
 
-  def timeZoneIdNotSpecifiedForTimestampTypeError(): Throwable = {
-    new SparkUnsupportedOperationException(
-      errorClass = "UNSUPPORTED_OPERATION",
-      messageParameters = Array(
-        s"${toSQLType(TimestampType)} must supply timeZoneId parameter " +
-          s"while converting to the arrow timestamp type.")
-    )
-  }
-
   def notPublicClassError(name: String): Throwable = {
     new UnsupportedOperationException(
       s"$name is not a public class. Only public classes are supported.")
@@ -1936,18 +1932,18 @@ object QueryExecutionErrors extends QueryErrorsBase {
 
   def cannotConvertOrcTimestampToTimestampNTZError(): Throwable = {
     new SparkUnsupportedOperationException(
-      errorClass = "UNSUPPORTED_OPERATION",
-      messageParameters = Array(
-        s"Unable to convert ${toSQLType(TimestampType)} of Orc to " +
-        s"data type ${toSQLType(TimestampNTZType)}."))
+      errorClass = "UNSUPPORTED_FEATURE",
+      messageParameters = Array("ORC_TYPE_CAST",
+        toSQLType(TimestampType),
+        toSQLType(TimestampNTZType)))
   }
 
   def cannotConvertOrcTimestampNTZToTimestampLTZError(): Throwable = {
     new SparkUnsupportedOperationException(
-      errorClass = "UNSUPPORTED_OPERATION",
-      messageParameters = Array(
-        s"Unable to convert ${toSQLType(TimestampNTZType)} of Orc to " +
-        s"data type ${toSQLType(TimestampType)}."))
+      errorClass = "UNSUPPORTED_FEATURE",
+      messageParameters = Array("ORC_TYPE_CAST",
+        toSQLType(TimestampNTZType),
+        toSQLType(TimestampType)))
   }
 
   def writePartitionExceedConfigSizeWhenDynamicPartitionError(
diff --git 
a/sql/catalyst/src/main/scala/org/apache/spark/sql/util/ArrowUtils.scala 
b/sql/catalyst/src/main/scala/org/apache/spark/sql/util/ArrowUtils.scala
index 4254c045ca6..8fa1879548c 100644
--- a/sql/catalyst/src/main/scala/org/apache/spark/sql/util/ArrowUtils.scala
+++ b/sql/catalyst/src/main/scala/org/apache/spark/sql/util/ArrowUtils.scala
@@ -49,7 +49,7 @@ private[sql] object ArrowUtils {
     case DateType => new ArrowType.Date(DateUnit.DAY)
     case TimestampType =>
       if (timeZoneId == null) {
-        throw 
QueryExecutionErrors.timeZoneIdNotSpecifiedForTimestampTypeError()
+        throw QueryExecutionErrors.internalMissingTimezoneIdError()
       } else {
         new ArrowType.Timestamp(TimeUnit.MICROSECOND, timeZoneId)
       }
diff --git 
a/sql/catalyst/src/test/scala/org/apache/spark/sql/util/ArrowUtilsSuite.scala 
b/sql/catalyst/src/test/scala/org/apache/spark/sql/util/ArrowUtilsSuite.scala
index 642b387b88e..03178f2b761 100644
--- 
a/sql/catalyst/src/test/scala/org/apache/spark/sql/util/ArrowUtilsSuite.scala
+++ 
b/sql/catalyst/src/test/scala/org/apache/spark/sql/util/ArrowUtilsSuite.scala
@@ -50,10 +50,10 @@ class ArrowUtilsSuite extends SparkFunSuite {
     roundtrip(DateType)
     roundtrip(YearMonthIntervalType())
     roundtrip(DayTimeIntervalType())
-    val tsExMsg = intercept[UnsupportedOperationException] {
+    val tsExMsg = intercept[org.apache.spark.SparkIllegalStateException] {
       roundtrip(TimestampType)
     }
-    assert(tsExMsg.getMessage.contains("timeZoneId"))
+    assert(tsExMsg.getMessage.contains("timezoneId"))
   }
 
   test("timestamp") {
diff --git 
a/sql/core/src/test/scala/org/apache/spark/sql/errors/QueryExecutionErrorsSuite.scala
 
b/sql/core/src/test/scala/org/apache/spark/sql/errors/QueryExecutionErrorsSuite.scala
index 1105503e138..338da79674e 100644
--- 
a/sql/core/src/test/scala/org/apache/spark/sql/errors/QueryExecutionErrorsSuite.scala
+++ 
b/sql/core/src/test/scala/org/apache/spark/sql/errors/QueryExecutionErrorsSuite.scala
@@ -261,17 +261,16 @@ class QueryExecutionErrorsSuite
     }
   }
 
-  test("UNSUPPORTED_OPERATION: timeZoneId not specified while converting 
TimestampType to Arrow") {
+  test("INTERNAL_ERROR: timeZoneId not specified while converting 
TimestampType to Arrow") {
     checkErrorClass(
-      exception = intercept[SparkUnsupportedOperationException] {
+      exception = intercept[SparkIllegalStateException] {
         ArrowUtils.toArrowSchema(new StructType().add("value", TimestampType), 
null)
       },
-      errorClass = "UNSUPPORTED_OPERATION",
-      msg = "The operation is not supported: \"TIMESTAMP\" must supply 
timeZoneId " +
-        "parameter while converting to the arrow timestamp type.")
+      errorClass = "INTERNAL_ERROR",
+      msg = "Missing timezoneId where it is mandatory.")
   }
 
-  test("UNSUPPORTED_OPERATION - SPARK-36346: can't read Timestamp as 
TimestampNTZ") {
+  test("UNSUPPORTED_FEATURE - SPARK-36346: can't read Timestamp as 
TimestampNTZ") {
     withTempPath { file =>
       sql("select timestamp_ltz'2019-03-21 
00:02:03'").write.orc(file.getCanonicalPath)
       withAllNativeOrcReaders {
@@ -279,14 +278,15 @@ class QueryExecutionErrorsSuite
           exception = intercept[SparkException] {
             spark.read.schema("time 
timestamp_ntz").orc(file.getCanonicalPath).collect()
           }.getCause.asInstanceOf[SparkUnsupportedOperationException],
-          errorClass = "UNSUPPORTED_OPERATION",
-          msg = "The operation is not supported: " +
+          errorClass = "UNSUPPORTED_FEATURE",
+          errorSubClass = Some("ORC_TYPE_CAST"),
+          msg = "The feature is not supported: " +
             "Unable to convert \"TIMESTAMP\" of Orc to data type 
\"TIMESTAMP_NTZ\".")
       }
     }
   }
 
-  test("UNSUPPORTED_OPERATION - SPARK-38504: can't read TimestampNTZ as 
TimestampLTZ") {
+  test("UNSUPPORTED_FEATURE - SPARK-38504: can't read TimestampNTZ as 
TimestampLTZ") {
     withTempPath { file =>
       sql("select timestamp_ntz'2019-03-21 
00:02:03'").write.orc(file.getCanonicalPath)
       withAllNativeOrcReaders {
@@ -294,8 +294,9 @@ class QueryExecutionErrorsSuite
           exception = intercept[SparkException] {
             spark.read.schema("time 
timestamp_ltz").orc(file.getCanonicalPath).collect()
           }.getCause.asInstanceOf[SparkUnsupportedOperationException],
-          errorClass = "UNSUPPORTED_OPERATION",
-          msg = "The operation is not supported: " +
+          errorClass = "UNSUPPORTED_FEATURE",
+          errorSubClass = Some("ORC_TYPE_CAST"),
+          msg = "The feature is not supported: " +
             "Unable to convert \"TIMESTAMP_NTZ\" of Orc to data type 
\"TIMESTAMP\".")
       }
     }


---------------------------------------------------------------------
To unsubscribe, e-mail: commits-unsubscr...@spark.apache.org
For additional commands, e-mail: commits-h...@spark.apache.org

Reply via email to