This is an automated email from the ASF dual-hosted git repository.

maxgekk pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/spark.git


The following commit(s) were added to refs/heads/master by this push:
     new 4ec27c3801a [SPARK-44555][SQL] Use checkError() to check Exception in 
command Suite & assign some error class names
4ec27c3801a is described below

commit 4ec27c3801aaa0cbba3e086c278a0ff96260b84a
Author: panbingkun <pbk1...@gmail.com>
AuthorDate: Wed Aug 2 10:51:16 2023 +0500

    [SPARK-44555][SQL] Use checkError() to check Exception in command Suite & 
assign some error class names
    
    ### What changes were proposed in this pull request?
    The pr aims to
    1. Use `checkError()` to check Exception in `command` Suite.
    2. Assign some error class names, include: 
`UNSUPPORTED_FEATURE.PURGE_PARTITION` and `UNSUPPORTED_FEATURE.PURGE_TABLE`.
    
    ### Why are the changes needed?
    The changes improve the error framework.
    
    ### Does this PR introduce _any_ user-facing change?
    No.
    
    ### How was this patch tested?
    - Manually test.
    - Pass GA.
    
    Closes #42169 from panbingkun/checkError_for_command.
    
    Authored-by: panbingkun <pbk1...@gmail.com>
    Signed-off-by: Max Gekk <max.g...@gmail.com>
---
 .../src/main/resources/error/error-classes.json    | 10 ++++++++
 ...r-conditions-unsupported-feature-error-class.md |  8 ++++++
 .../catalog/SupportsAtomicPartitionManagement.java |  3 ++-
 .../catalog/SupportsPartitionManagement.java       |  3 ++-
 .../spark/sql/connector/catalog/TableCatalog.java  |  3 ++-
 .../spark/sql/errors/QueryExecutionErrors.scala    | 12 +++++++++
 .../SupportsAtomicPartitionManagementSuite.scala   | 13 ++++++----
 .../catalog/SupportsPartitionManagementSuite.scala | 13 ++++++----
 .../command/v1/AlterTableAddPartitionSuite.scala   | 14 ++++++----
 .../command/v1/AlterTableDropPartitionSuite.scala  | 12 +++++----
 .../command/v1/AlterTableRenameSuite.scala         | 11 +++++---
 .../command/v1/AlterTableSetLocationSuite.scala    | 11 +++++---
 .../command/v1/ShowCreateTableSuite.scala          | 12 +++++----
 .../sql/execution/command/v1/ShowTablesSuite.scala | 22 ++++++++++------
 .../execution/command/v1/TruncateTableSuite.scala  | 11 +++++---
 .../command/v2/AlterTableDropPartitionSuite.scala  | 12 ++++++---
 .../v2/AlterTableRecoverPartitionsSuite.scala      | 11 +++++---
 .../command/v2/AlterTableSetLocationSuite.scala    | 12 +++++----
 .../sql/execution/command/v2/DropTableSuite.scala  | 12 ++++++---
 .../command/v2/MsckRepairTableSuite.scala          | 11 +++++---
 .../sql/execution/command/v2/ShowTablesSuite.scala | 11 +++++---
 .../execution/command/ShowCreateTableSuite.scala   | 30 +++++++++++++---------
 22 files changed, 172 insertions(+), 85 deletions(-)

diff --git a/common/utils/src/main/resources/error/error-classes.json 
b/common/utils/src/main/resources/error/error-classes.json
index 7012c66c895..06350522834 100644
--- a/common/utils/src/main/resources/error/error-classes.json
+++ b/common/utils/src/main/resources/error/error-classes.json
@@ -3020,6 +3020,16 @@
           "Pivoting by the value '<value>' of the column data type <type>."
         ]
       },
+      "PURGE_PARTITION" : {
+        "message" : [
+          "Partition purge."
+        ]
+      },
+      "PURGE_TABLE" : {
+        "message" : [
+          "Purge table."
+        ]
+      },
       "PYTHON_UDF_IN_ON_CLAUSE" : {
         "message" : [
           "Python UDF in the ON clause of a <joinType> JOIN. In case of an 
INNNER JOIN consider rewriting to a CROSS JOIN with a WHERE clause."
diff --git a/docs/sql-error-conditions-unsupported-feature-error-class.md 
b/docs/sql-error-conditions-unsupported-feature-error-class.md
index aa1c622c458..7a60dc76fa6 100644
--- a/docs/sql-error-conditions-unsupported-feature-error-class.md
+++ b/docs/sql-error-conditions-unsupported-feature-error-class.md
@@ -141,6 +141,14 @@ PIVOT clause following a GROUP BY clause. Consider pushing 
the GROUP BY into a s
 
 Pivoting by the value '`<value>`' of the column data type `<type>`.
 
+## PURGE_PARTITION
+
+Partition purge.
+
+## PURGE_TABLE
+
+Purge table.
+
 ## PYTHON_UDF_IN_ON_CLAUSE
 
 Python UDF in the ON clause of a `<joinType>` JOIN. In case of an INNNER JOIN 
consider rewriting to a CROSS JOIN with a WHERE clause.
diff --git 
a/sql/catalyst/src/main/java/org/apache/spark/sql/connector/catalog/SupportsAtomicPartitionManagement.java
 
b/sql/catalyst/src/main/java/org/apache/spark/sql/connector/catalog/SupportsAtomicPartitionManagement.java
index 3eb9bf9f913..48c6392d2b8 100644
--- 
a/sql/catalyst/src/main/java/org/apache/spark/sql/connector/catalog/SupportsAtomicPartitionManagement.java
+++ 
b/sql/catalyst/src/main/java/org/apache/spark/sql/connector/catalog/SupportsAtomicPartitionManagement.java
@@ -23,6 +23,7 @@ import org.apache.spark.annotation.Experimental;
 import org.apache.spark.sql.catalyst.InternalRow;
 import org.apache.spark.sql.catalyst.analysis.NoSuchPartitionException;
 import org.apache.spark.sql.catalyst.analysis.PartitionsAlreadyExistException;
+import org.apache.spark.sql.errors.QueryExecutionErrors;
 
 /**
  * An atomic partition interface of {@link Table} to operate multiple 
partitions atomically.
@@ -107,7 +108,7 @@ public interface SupportsAtomicPartitionManagement extends 
SupportsPartitionMana
    */
   default boolean purgePartitions(InternalRow[] idents)
     throws NoSuchPartitionException, UnsupportedOperationException {
-    throw new UnsupportedOperationException("Partition purge is not 
supported");
+    throw QueryExecutionErrors.unsupportedPurgePartitionError();
   }
 
   /**
diff --git 
a/sql/catalyst/src/main/java/org/apache/spark/sql/connector/catalog/SupportsPartitionManagement.java
 
b/sql/catalyst/src/main/java/org/apache/spark/sql/connector/catalog/SupportsPartitionManagement.java
index 4830e193222..e7a2af29a00 100644
--- 
a/sql/catalyst/src/main/java/org/apache/spark/sql/connector/catalog/SupportsPartitionManagement.java
+++ 
b/sql/catalyst/src/main/java/org/apache/spark/sql/connector/catalog/SupportsPartitionManagement.java
@@ -23,6 +23,7 @@ import org.apache.spark.annotation.Experimental;
 import org.apache.spark.sql.catalyst.InternalRow;
 import org.apache.spark.sql.catalyst.analysis.NoSuchPartitionException;
 import org.apache.spark.sql.catalyst.analysis.PartitionsAlreadyExistException;
+import org.apache.spark.sql.errors.QueryExecutionErrors;
 import org.apache.spark.sql.types.StructType;
 
 /**
@@ -88,7 +89,7 @@ public interface SupportsPartitionManagement extends Table {
      */
     default boolean purgePartition(InternalRow ident)
       throws NoSuchPartitionException, UnsupportedOperationException {
-      throw new UnsupportedOperationException("Partition purge is not 
supported");
+      throw QueryExecutionErrors.unsupportedPurgePartitionError();
     }
 
     /**
diff --git 
a/sql/catalyst/src/main/java/org/apache/spark/sql/connector/catalog/TableCatalog.java
 
b/sql/catalyst/src/main/java/org/apache/spark/sql/connector/catalog/TableCatalog.java
index 6cfd5ab1b6b..d99e7e14b01 100644
--- 
a/sql/catalyst/src/main/java/org/apache/spark/sql/connector/catalog/TableCatalog.java
+++ 
b/sql/catalyst/src/main/java/org/apache/spark/sql/connector/catalog/TableCatalog.java
@@ -23,6 +23,7 @@ import 
org.apache.spark.sql.catalyst.analysis.NoSuchNamespaceException;
 import org.apache.spark.sql.catalyst.analysis.NoSuchTableException;
 import org.apache.spark.sql.catalyst.analysis.TableAlreadyExistsException;
 import org.apache.spark.sql.errors.QueryCompilationErrors;
+import org.apache.spark.sql.errors.QueryExecutionErrors;
 import org.apache.spark.sql.types.StructType;
 
 import java.util.Collections;
@@ -256,7 +257,7 @@ public interface TableCatalog extends CatalogPlugin {
    * @since 3.1.0
    */
   default boolean purgeTable(Identifier ident) throws 
UnsupportedOperationException {
-    throw new UnsupportedOperationException("Purge table is not supported.");
+    throw QueryExecutionErrors.unsupportedPurgeTableError();
   }
 
   /**
diff --git 
a/sql/catalyst/src/main/scala/org/apache/spark/sql/errors/QueryExecutionErrors.scala
 
b/sql/catalyst/src/main/scala/org/apache/spark/sql/errors/QueryExecutionErrors.scala
index 43b4e7a5449..3622ffebb74 100644
--- 
a/sql/catalyst/src/main/scala/org/apache/spark/sql/errors/QueryExecutionErrors.scala
+++ 
b/sql/catalyst/src/main/scala/org/apache/spark/sql/errors/QueryExecutionErrors.scala
@@ -2795,4 +2795,16 @@ private[sql] object QueryExecutionErrors extends 
QueryErrorsBase with ExecutionE
       errorClass = "MERGE_CARDINALITY_VIOLATION",
       messageParameters = Map.empty)
   }
+
+  def unsupportedPurgePartitionError(): SparkUnsupportedOperationException = {
+    new SparkUnsupportedOperationException(
+      errorClass = "UNSUPPORTED_FEATURE.PURGE_PARTITION",
+      messageParameters = Map.empty)
+  }
+
+  def unsupportedPurgeTableError(): SparkUnsupportedOperationException = {
+    new SparkUnsupportedOperationException(
+      errorClass = "UNSUPPORTED_FEATURE.PURGE_TABLE",
+      messageParameters = Map.empty)
+  }
 }
diff --git 
a/sql/catalyst/src/test/scala/org/apache/spark/sql/connector/catalog/SupportsAtomicPartitionManagementSuite.scala
 
b/sql/catalyst/src/test/scala/org/apache/spark/sql/connector/catalog/SupportsAtomicPartitionManagementSuite.scala
index 90ed106d8ed..4d25fda92ec 100644
--- 
a/sql/catalyst/src/test/scala/org/apache/spark/sql/connector/catalog/SupportsAtomicPartitionManagementSuite.scala
+++ 
b/sql/catalyst/src/test/scala/org/apache/spark/sql/connector/catalog/SupportsAtomicPartitionManagementSuite.scala
@@ -19,7 +19,7 @@ package org.apache.spark.sql.connector.catalog
 
 import java.util
 
-import org.apache.spark.SparkFunSuite
+import org.apache.spark.{SparkFunSuite, SparkUnsupportedOperationException}
 import org.apache.spark.sql.catalyst.InternalRow
 import org.apache.spark.sql.catalyst.analysis.{NoSuchPartitionException, 
PartitionsAlreadyExistException}
 import org.apache.spark.sql.connector.expressions.{LogicalExpressions, 
NamedReference, Transform}
@@ -117,10 +117,13 @@ class SupportsAtomicPartitionManagementSuite extends 
SparkFunSuite {
     partTable.createPartitions(
       partIdents,
       Array(new util.HashMap[String, String](), new util.HashMap[String, 
String]()))
-    val errMsg = intercept[UnsupportedOperationException] {
-      partTable.purgePartitions(partIdents)
-    }.getMessage
-    assert(errMsg.contains("purge is not supported"))
+    checkError(
+      exception = intercept[SparkUnsupportedOperationException] {
+        partTable.purgePartitions(partIdents)
+      },
+      errorClass = "UNSUPPORTED_FEATURE.PURGE_PARTITION",
+      parameters = Map.empty
+    )
   }
 
   test("dropPartitions failed if partition not exists") {
diff --git 
a/sql/catalyst/src/test/scala/org/apache/spark/sql/connector/catalog/SupportsPartitionManagementSuite.scala
 
b/sql/catalyst/src/test/scala/org/apache/spark/sql/connector/catalog/SupportsPartitionManagementSuite.scala
index 40114d063aa..501f363d7dc 100644
--- 
a/sql/catalyst/src/test/scala/org/apache/spark/sql/connector/catalog/SupportsPartitionManagementSuite.scala
+++ 
b/sql/catalyst/src/test/scala/org/apache/spark/sql/connector/catalog/SupportsPartitionManagementSuite.scala
@@ -21,7 +21,7 @@ import java.util
 
 import scala.collection.JavaConverters._
 
-import org.apache.spark.SparkFunSuite
+import org.apache.spark.{SparkFunSuite, SparkUnsupportedOperationException}
 import org.apache.spark.sql.catalyst.InternalRow
 import org.apache.spark.sql.catalyst.analysis.{NoSuchPartitionException, 
PartitionsAlreadyExistException}
 import org.apache.spark.sql.connector.expressions.{LogicalExpressions, 
NamedReference, Transform}
@@ -89,10 +89,13 @@ class SupportsPartitionManagementSuite extends 
SparkFunSuite {
     val table = catalog.loadTable(ident)
     val partTable = new InMemoryPartitionTable(
       table.name(), table.schema(), table.partitioning(), table.properties())
-    val errMsg = intercept[UnsupportedOperationException] {
-      partTable.purgePartition(InternalRow.apply("3"))
-    }.getMessage
-    assert(errMsg.contains("purge is not supported"))
+    checkError(
+      exception = intercept[SparkUnsupportedOperationException] {
+        partTable.purgePartition(InternalRow.apply("3"))
+      },
+      errorClass = "UNSUPPORTED_FEATURE.PURGE_PARTITION",
+      parameters = Map.empty
+    )
   }
 
   test("replacePartitionMetadata") {
diff --git 
a/sql/core/src/test/scala/org/apache/spark/sql/execution/command/v1/AlterTableAddPartitionSuite.scala
 
b/sql/core/src/test/scala/org/apache/spark/sql/execution/command/v1/AlterTableAddPartitionSuite.scala
index d41fd6b00f8..71f04159638 100644
--- 
a/sql/core/src/test/scala/org/apache/spark/sql/execution/command/v1/AlterTableAddPartitionSuite.scala
+++ 
b/sql/core/src/test/scala/org/apache/spark/sql/execution/command/v1/AlterTableAddPartitionSuite.scala
@@ -39,11 +39,15 @@ trait AlterTableAddPartitionSuiteBase extends 
command.AlterTableAddPartitionSuit
   test("empty string as partition value") {
     withNamespaceAndTable("ns", "tbl") { t =>
       sql(s"CREATE TABLE $t (col1 INT, p1 STRING) $defaultUsing PARTITIONED BY 
(p1)")
-      val errMsg = intercept[AnalysisException] {
-        sql(s"ALTER TABLE $t ADD PARTITION (p1 = '')")
-      }.getMessage
-      assert(errMsg.contains("Partition spec is invalid. " +
-        "The spec ([p1=]) contains an empty partition column value"))
+      checkError(
+        exception = intercept[AnalysisException] {
+          sql(s"ALTER TABLE $t ADD PARTITION (p1 = '')")
+        },
+        errorClass = "_LEGACY_ERROR_TEMP_1076",
+        parameters = Map(
+          "details" -> "The spec ([p1=]) contains an empty partition column 
value"
+        )
+      )
     }
   }
 
diff --git 
a/sql/core/src/test/scala/org/apache/spark/sql/execution/command/v1/AlterTableDropPartitionSuite.scala
 
b/sql/core/src/test/scala/org/apache/spark/sql/execution/command/v1/AlterTableDropPartitionSuite.scala
index cc57e10a168..8d403429ca5 100644
--- 
a/sql/core/src/test/scala/org/apache/spark/sql/execution/command/v1/AlterTableDropPartitionSuite.scala
+++ 
b/sql/core/src/test/scala/org/apache/spark/sql/execution/command/v1/AlterTableDropPartitionSuite.scala
@@ -79,11 +79,13 @@ class AlterTableDropPartitionSuite
   test("empty string as partition value") {
     withNamespaceAndTable("ns", "tbl") { t =>
       sql(s"CREATE TABLE $t (col1 INT, p1 STRING) $defaultUsing PARTITIONED BY 
(p1)")
-      val errMsg = intercept[AnalysisException] {
-        sql(s"ALTER TABLE $t DROP PARTITION (p1 = '')")
-      }.getMessage
-      assert(errMsg.contains("Partition spec is invalid. " +
-        "The spec ([p1=]) contains an empty partition column value"))
+      checkError(
+        exception = intercept[AnalysisException] {
+          sql(s"ALTER TABLE $t DROP PARTITION (p1 = '')")
+        },
+        errorClass = "_LEGACY_ERROR_TEMP_1076",
+        parameters = Map("details" -> "The spec ([p1=]) contains an empty 
partition column value")
+      )
     }
   }
 }
diff --git 
a/sql/core/src/test/scala/org/apache/spark/sql/execution/command/v1/AlterTableRenameSuite.scala
 
b/sql/core/src/test/scala/org/apache/spark/sql/execution/command/v1/AlterTableRenameSuite.scala
index 3efd6d8a957..dfbdc6a4ca7 100644
--- 
a/sql/core/src/test/scala/org/apache/spark/sql/execution/command/v1/AlterTableRenameSuite.scala
+++ 
b/sql/core/src/test/scala/org/apache/spark/sql/execution/command/v1/AlterTableRenameSuite.scala
@@ -37,10 +37,13 @@ trait AlterTableRenameSuiteBase extends 
command.AlterTableRenameSuiteBase with Q
         sql(s"CREATE NAMESPACE $catalog.src_ns")
         val src = dst.replace("dst", "src")
         sql(s"CREATE TABLE $src (c0 INT) $defaultUsing")
-        val errMsg = intercept[AnalysisException] {
-          sql(s"ALTER TABLE $src RENAME TO dst_ns.dst_tbl")
-        }.getMessage
-        assert(errMsg.contains("source and destination databases do not 
match"))
+        checkError(
+          exception = intercept[AnalysisException] {
+            sql(s"ALTER TABLE $src RENAME TO dst_ns.dst_tbl")
+          },
+          errorClass = "_LEGACY_ERROR_TEMP_1073",
+          parameters = Map("db" -> "src_ns", "newDb" -> "dst_ns")
+        )
       }
     }
   }
diff --git 
a/sql/core/src/test/scala/org/apache/spark/sql/execution/command/v1/AlterTableSetLocationSuite.scala
 
b/sql/core/src/test/scala/org/apache/spark/sql/execution/command/v1/AlterTableSetLocationSuite.scala
index d0f1a835942..53b9853f36c 100644
--- 
a/sql/core/src/test/scala/org/apache/spark/sql/execution/command/v1/AlterTableSetLocationSuite.scala
+++ 
b/sql/core/src/test/scala/org/apache/spark/sql/execution/command/v1/AlterTableSetLocationSuite.scala
@@ -89,10 +89,13 @@ trait AlterTableSetLocationSuiteBase extends 
command.AlterTableSetLocationSuiteB
         checkLocation(tableIdent, new URI("/path/to/part/ways2"), 
Some(partSpec))
       }
       withSQLConf(SQLConf.CASE_SENSITIVE.key -> "true") {
-        val e = intercept[AnalysisException] {
-          sql(s"ALTER TABLE $t PARTITION (A='1', B='2') SET LOCATION 
'/path/to/part/ways3'")
-        }.getMessage
-        assert(e.contains("not a valid partition column"))
+        checkError(
+          exception = intercept[AnalysisException] {
+            sql(s"ALTER TABLE $t PARTITION (A='1', B='2') SET LOCATION 
'/path/to/part/ways3'")
+          },
+          errorClass = "_LEGACY_ERROR_TEMP_1231",
+          parameters = Map("key" -> "A", "tblName" -> 
"`spark_catalog`.`ns`.`tbl`")
+        )
       }
 
       sessionCatalog.setCurrentDatabase("ns")
diff --git 
a/sql/core/src/test/scala/org/apache/spark/sql/execution/command/v1/ShowCreateTableSuite.scala
 
b/sql/core/src/test/scala/org/apache/spark/sql/execution/command/v1/ShowCreateTableSuite.scala
index b9fcf76ad7c..36fde23db5c 100644
--- 
a/sql/core/src/test/scala/org/apache/spark/sql/execution/command/v1/ShowCreateTableSuite.scala
+++ 
b/sql/core/src/test/scala/org/apache/spark/sql/execution/command/v1/ShowCreateTableSuite.scala
@@ -158,11 +158,13 @@ trait ShowCreateTableSuiteBase extends 
command.ShowCreateTableSuiteBase
          """.stripMargin
       )
 
-      val cause = intercept[AnalysisException] {
-        getShowCreateDDL(t, true)
-      }
-
-      assert(cause.getMessage.contains("Use `SHOW CREATE TABLE` without `AS 
SERDE` instead"))
+      checkError(
+        exception = intercept[AnalysisException] {
+          getShowCreateDDL(t, true)
+        },
+        errorClass = "_LEGACY_ERROR_TEMP_1274",
+        parameters = Map("table" -> "`spark_catalog`.`ns1`.`tbl`")
+      )
     }
   }
 
diff --git 
a/sql/core/src/test/scala/org/apache/spark/sql/execution/command/v1/ShowTablesSuite.scala
 
b/sql/core/src/test/scala/org/apache/spark/sql/execution/command/v1/ShowTablesSuite.scala
index 4db42f1d720..5bda7d002dc 100644
--- 
a/sql/core/src/test/scala/org/apache/spark/sql/execution/command/v1/ShowTablesSuite.scala
+++ 
b/sql/core/src/test/scala/org/apache/spark/sql/execution/command/v1/ShowTablesSuite.scala
@@ -53,10 +53,13 @@ trait ShowTablesSuiteBase extends 
command.ShowTablesSuiteBase with command.Tests
   }
 
   test("only support single-level namespace") {
-    val errMsg = intercept[AnalysisException] {
-      runShowTablesSql("SHOW TABLES FROM a.b", Seq())
-    }.getMessage
-    assert(errMsg.contains("Nested databases are not supported by v1 session 
catalog: a.b"))
+    checkError(
+      exception = intercept[AnalysisException] {
+        runShowTablesSql("SHOW TABLES FROM a.b", Seq())
+      },
+      errorClass = "_LEGACY_ERROR_TEMP_1126",
+      parameters = Map("catalog" -> "a.b")
+    )
   }
 
   test("SHOW TABLE EXTENDED from default") {
@@ -96,10 +99,13 @@ trait ShowTablesSuiteBase extends 
command.ShowTablesSuiteBase with command.Tests
     Seq(
       s"SHOW TABLES IN $catalog",
       s"SHOW TABLE EXTENDED IN $catalog LIKE '*tbl'").foreach { showTableCmd =>
-      val errMsg = intercept[AnalysisException] {
-        sql(showTableCmd)
-      }.getMessage
-      assert(errMsg.contains("Database from v1 session catalog is not 
specified"))
+      checkError(
+        exception = intercept[AnalysisException] {
+          sql(showTableCmd)
+        },
+        errorClass = "_LEGACY_ERROR_TEMP_1125",
+        parameters = Map.empty
+      )
     }
   }
 
diff --git 
a/sql/core/src/test/scala/org/apache/spark/sql/execution/command/v1/TruncateTableSuite.scala
 
b/sql/core/src/test/scala/org/apache/spark/sql/execution/command/v1/TruncateTableSuite.scala
index 7da03db6f73..cd0a0572847 100644
--- 
a/sql/core/src/test/scala/org/apache/spark/sql/execution/command/v1/TruncateTableSuite.scala
+++ 
b/sql/core/src/test/scala/org/apache/spark/sql/execution/command/v1/TruncateTableSuite.scala
@@ -195,10 +195,13 @@ class TruncateTableSuite extends TruncateTableSuiteBase 
with CommandSuiteBase {
       withNamespaceAndTable("ns", "tbl") { t =>
         (("a", "b") :: Nil).toDF().write.parquet(tempDir.getCanonicalPath)
         sql(s"CREATE TABLE $t $defaultUsing LOCATION '${tempDir.toURI}'")
-        val errMsg = intercept[AnalysisException] {
-          sql(s"TRUNCATE TABLE $t")
-        }.getMessage
-        assert(errMsg.contains("Operation not allowed: TRUNCATE TABLE on 
external tables"))
+        checkError(
+          exception = intercept[AnalysisException] {
+            sql(s"TRUNCATE TABLE $t")
+          },
+          errorClass = "_LEGACY_ERROR_TEMP_1266",
+          parameters = Map("tableIdentWithDB" -> "`spark_catalog`.`ns`.`tbl`")
+        )
       }
     }
   }
diff --git 
a/sql/core/src/test/scala/org/apache/spark/sql/execution/command/v2/AlterTableDropPartitionSuite.scala
 
b/sql/core/src/test/scala/org/apache/spark/sql/execution/command/v2/AlterTableDropPartitionSuite.scala
index f2d90990257..2df7eebaecc 100644
--- 
a/sql/core/src/test/scala/org/apache/spark/sql/execution/command/v2/AlterTableDropPartitionSuite.scala
+++ 
b/sql/core/src/test/scala/org/apache/spark/sql/execution/command/v2/AlterTableDropPartitionSuite.scala
@@ -17,6 +17,7 @@
 
 package org.apache.spark.sql.execution.command.v2
 
+import org.apache.spark.SparkUnsupportedOperationException
 import org.apache.spark.sql.AnalysisException
 import org.apache.spark.sql.catalyst.analysis.UnresolvedAttribute
 import org.apache.spark.sql.catalyst.util.quoteIdentifier
@@ -56,10 +57,13 @@ class AlterTableDropPartitionSuite
       sql(s"CREATE TABLE $t (id bigint, data string) $defaultUsing PARTITIONED 
BY (id)")
       sql(s"ALTER TABLE $t ADD PARTITION (id=1)")
       try {
-        val errMsg = intercept[UnsupportedOperationException] {
-          sql(s"ALTER TABLE $t DROP PARTITION (id=1) PURGE")
-        }.getMessage
-        assert(errMsg.contains("purge is not supported"))
+        checkError(
+          exception = intercept[SparkUnsupportedOperationException] {
+            sql(s"ALTER TABLE $t DROP PARTITION (id=1) PURGE")
+          },
+          errorClass = "UNSUPPORTED_FEATURE.PURGE_PARTITION",
+          parameters = Map.empty
+        )
       } finally {
         sql(s"ALTER TABLE $t DROP PARTITION (id=1)")
       }
diff --git 
a/sql/core/src/test/scala/org/apache/spark/sql/execution/command/v2/AlterTableRecoverPartitionsSuite.scala
 
b/sql/core/src/test/scala/org/apache/spark/sql/execution/command/v2/AlterTableRecoverPartitionsSuite.scala
index a44e346d034..ff6ff0df530 100644
--- 
a/sql/core/src/test/scala/org/apache/spark/sql/execution/command/v2/AlterTableRecoverPartitionsSuite.scala
+++ 
b/sql/core/src/test/scala/org/apache/spark/sql/execution/command/v2/AlterTableRecoverPartitionsSuite.scala
@@ -31,10 +31,13 @@ class AlterTableRecoverPartitionsSuite
   test("partition recovering of v2 tables is not supported") {
     withNamespaceAndTable("ns", "tbl") { t =>
       spark.sql(s"CREATE TABLE $t (id bigint, data string) $defaultUsing")
-      val errMsg = intercept[AnalysisException] {
-        sql(s"ALTER TABLE $t RECOVER PARTITIONS")
-      }.getMessage
-      assert(errMsg.contains("ALTER TABLE ... RECOVER PARTITIONS is not 
supported for v2 tables"))
+      checkError(
+        exception = intercept[AnalysisException] {
+          sql(s"ALTER TABLE $t RECOVER PARTITIONS")
+        },
+        errorClass = "NOT_SUPPORTED_COMMAND_FOR_V2_TABLE",
+        parameters = Map("cmd" -> "ALTER TABLE ... RECOVER PARTITIONS")
+      )
     }
   }
 }
diff --git 
a/sql/core/src/test/scala/org/apache/spark/sql/execution/command/v2/AlterTableSetLocationSuite.scala
 
b/sql/core/src/test/scala/org/apache/spark/sql/execution/command/v2/AlterTableSetLocationSuite.scala
index babd3bb3714..0ac35452b60 100644
--- 
a/sql/core/src/test/scala/org/apache/spark/sql/execution/command/v2/AlterTableSetLocationSuite.scala
+++ 
b/sql/core/src/test/scala/org/apache/spark/sql/execution/command/v2/AlterTableSetLocationSuite.scala
@@ -56,11 +56,13 @@ class AlterTableSetLocationSuite
     withNamespaceAndTable("ns", "tbl") { t =>
       sql(s"CREATE TABLE $t (id int) USING foo")
 
-      val e = intercept[AnalysisException] {
-        sql(s"ALTER TABLE $t PARTITION(ds='2017-06-10') SET LOCATION 
's3://bucket/path'")
-      }
-      assert(e.getMessage.contains(
-        "ALTER TABLE SET LOCATION does not support partition for v2 tables"))
+      checkError(
+        exception = intercept[AnalysisException] {
+          sql(s"ALTER TABLE $t PARTITION(ds='2017-06-10') SET LOCATION 
's3://bucket/path'")
+        },
+        errorClass = "_LEGACY_ERROR_TEMP_1045",
+        parameters = Map.empty
+      )
     }
   }
 }
diff --git 
a/sql/core/src/test/scala/org/apache/spark/sql/execution/command/v2/DropTableSuite.scala
 
b/sql/core/src/test/scala/org/apache/spark/sql/execution/command/v2/DropTableSuite.scala
index 9c9b7d3049c..83bded7ab4f 100644
--- 
a/sql/core/src/test/scala/org/apache/spark/sql/execution/command/v2/DropTableSuite.scala
+++ 
b/sql/core/src/test/scala/org/apache/spark/sql/execution/command/v2/DropTableSuite.scala
@@ -17,6 +17,7 @@
 
 package org.apache.spark.sql.execution.command.v2
 
+import org.apache.spark.SparkUnsupportedOperationException
 import org.apache.spark.sql.Row
 import org.apache.spark.sql.connector.InMemoryTableSessionCatalog
 import org.apache.spark.sql.execution.command
@@ -29,11 +30,14 @@ class DropTableSuite extends command.DropTableSuiteBase 
with CommandSuiteBase {
   test("purge option") {
     withNamespaceAndTable("ns", "tbl") { t =>
       createTable(t)
-      val errMsg = intercept[UnsupportedOperationException] {
-        sql(s"DROP TABLE $catalog.ns.tbl PURGE")
-      }.getMessage
       // The default TableCatalog.purgeTable implementation throws an 
exception.
-      assert(errMsg.contains("Purge table is not supported"))
+      checkError(
+        exception = intercept[SparkUnsupportedOperationException] {
+          sql(s"DROP TABLE $catalog.ns.tbl PURGE")
+        },
+        errorClass = "UNSUPPORTED_FEATURE.PURGE_TABLE",
+        parameters = Map.empty
+      )
     }
   }
 
diff --git 
a/sql/core/src/test/scala/org/apache/spark/sql/execution/command/v2/MsckRepairTableSuite.scala
 
b/sql/core/src/test/scala/org/apache/spark/sql/execution/command/v2/MsckRepairTableSuite.scala
index d4b23e50786..381e55b4939 100644
--- 
a/sql/core/src/test/scala/org/apache/spark/sql/execution/command/v2/MsckRepairTableSuite.scala
+++ 
b/sql/core/src/test/scala/org/apache/spark/sql/execution/command/v2/MsckRepairTableSuite.scala
@@ -32,10 +32,13 @@ class MsckRepairTableSuite
   test("repairing of v2 tables is not supported") {
     withNamespaceAndTable("ns", "tbl") { t =>
       spark.sql(s"CREATE TABLE $t (id bigint, data string) $defaultUsing")
-      val errMsg = intercept[AnalysisException] {
-        sql(s"MSCK REPAIR TABLE $t")
-      }.getMessage
-      assert(errMsg.contains("MSCK REPAIR TABLE is not supported for v2 
tables"))
+      checkError(
+        exception = intercept[AnalysisException] {
+          sql(s"MSCK REPAIR TABLE $t")
+        },
+        errorClass = "NOT_SUPPORTED_COMMAND_FOR_V2_TABLE",
+        parameters = Map("cmd" -> "MSCK REPAIR TABLE")
+      )
     }
   }
 }
diff --git 
a/sql/core/src/test/scala/org/apache/spark/sql/execution/command/v2/ShowTablesSuite.scala
 
b/sql/core/src/test/scala/org/apache/spark/sql/execution/command/v2/ShowTablesSuite.scala
index e7e5c71c9ef..9a67eab055e 100644
--- 
a/sql/core/src/test/scala/org/apache/spark/sql/execution/command/v2/ShowTablesSuite.scala
+++ 
b/sql/core/src/test/scala/org/apache/spark/sql/execution/command/v2/ShowTablesSuite.scala
@@ -84,10 +84,13 @@ class ShowTablesSuite extends command.ShowTablesSuiteBase 
with CommandSuiteBase
     val table = "people"
     withTable(s"$catalog.$table") {
       sql(s"CREATE TABLE $catalog.$table (name STRING, id INT) $defaultUsing")
-      val errMsg = intercept[AnalysisException] {
-        sql(s"SHOW TABLE EXTENDED FROM $catalog LIKE '*$table*'").collect()
-      }.getMessage
-      assert(errMsg.contains("SHOW TABLE EXTENDED is not supported for v2 
tables"))
+      checkError(
+        exception = intercept[AnalysisException] {
+          sql(s"SHOW TABLE EXTENDED FROM $catalog LIKE '*$table*'").collect()
+        },
+        errorClass = "_LEGACY_ERROR_TEMP_1200",
+        parameters = Map("name" -> "SHOW TABLE EXTENDED")
+      )
     }
   }
 
diff --git 
a/sql/hive/src/test/scala/org/apache/spark/sql/hive/execution/command/ShowCreateTableSuite.scala
 
b/sql/hive/src/test/scala/org/apache/spark/sql/hive/execution/command/ShowCreateTableSuite.scala
index 55a27f336db..5f8f250f8e9 100644
--- 
a/sql/hive/src/test/scala/org/apache/spark/sql/hive/execution/command/ShowCreateTableSuite.scala
+++ 
b/sql/hive/src/test/scala/org/apache/spark/sql/hive/execution/command/ShowCreateTableSuite.scala
@@ -357,11 +357,17 @@ class ShowCreateTableSuite extends 
v1.ShowCreateTableSuiteBase with CommandSuite
          """.stripMargin
       )
 
-      val cause = intercept[AnalysisException] {
-        checkCreateSparkTableAsHive("t1")
-      }
-
-      assert(cause.getMessage.contains("unsupported serde configuration"))
+      checkError(
+        exception = intercept[AnalysisException] {
+          checkCreateSparkTableAsHive("t1")
+        },
+        errorClass = "_LEGACY_ERROR_TEMP_1273",
+        parameters = Map(
+          "table" -> "t1",
+          "configs" -> (" SERDE: 
org.apache.hadoop.hive.serde2.columnar.LazyBinaryColumnarSerDe " +
+            "INPUTFORMAT: org.apache.hadoop.hive.ql.io.RCFileInputFormat " +
+            "OUTPUTFORMAT: org.apache.hadoop.hive.ql.io.RCFileOutputFormat"))
+      )
     }
   }
 
@@ -423,13 +429,13 @@ class ShowCreateTableSuite extends 
v1.ShowCreateTableSuiteBase with CommandSuite
          """.stripMargin
       )
 
-
-      val cause = intercept[AnalysisException] {
-        sql("SHOW CREATE TABLE t1")
-      }
-
-      assert(cause.getMessage.contains(
-        "SHOW CREATE TABLE doesn't support transactional Hive table"))
+      checkError(
+        exception = intercept[AnalysisException] {
+          sql("SHOW CREATE TABLE t1")
+        },
+        errorClass = "_LEGACY_ERROR_TEMP_1272",
+        parameters = Map("table" -> "`spark_catalog`.`default`.`t1`")
+      )
     }
   }
 }


---------------------------------------------------------------------
To unsubscribe, e-mail: commits-unsubscr...@spark.apache.org
For additional commands, e-mail: commits-h...@spark.apache.org

Reply via email to