This is an automated email from the ASF dual-hosted git repository.

wenchen pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/spark.git


The following commit(s) were added to refs/heads/master by this push:
     new c91dbde2f94 [SPARK-33393][SQL] Support SHOW TABLE EXTENDED in v2
c91dbde2f94 is described below

commit c91dbde2f94c15d2007dccaeec5d72a159e9f4e2
Author: panbingkun <pbk1...@gmail.com>
AuthorDate: Thu Nov 16 13:15:27 2023 +0800

    [SPARK-33393][SQL] Support SHOW TABLE EXTENDED in v2
    
    ### What changes were proposed in this pull request?
    The pr aim to implement v2 SHOW TABLE EXTENDED as `ShowTableExec`
    
    ### Why are the changes needed?
    To have feature parity with the datasource V1.
    
    ### Does this PR introduce _any_ user-facing change?
    Yes, Support SHOW TABLE EXTENDED in v2.
    
    ### How was this patch tested?
    Add new UT.
    By running the unified tests for v2 implementation:
    ```
    $ build/sbt -Phive-2.3 -Phive-thriftserver "test:testOnly *ShowTablesSuite"
    $ build/sbt "test:testOnly *ShowTablesSuite"
    ```
    
    Closes #37588 from panbingkun/v2_SHOW_TABLE_EXTENDED.
    
    Authored-by: panbingkun <pbk1...@gmail.com>
    Signed-off-by: Wenchen Fan <wenc...@databricks.com>
---
 .../src/main/resources/error/error-classes.json    |   5 -
 .../sql/catalyst/analysis/CheckAnalysis.scala      |   4 -
 .../sql/catalyst/analysis/ResolveCatalogs.scala    |   2 +-
 .../sql/catalyst/catalog/SessionCatalog.scala      |  19 ++
 .../spark/sql/catalyst/parser/AstBuilder.scala     |  30 +-
 .../sql/catalyst/plans/logical/v2Commands.scala    |  23 +-
 .../spark/sql/errors/QueryCompilationErrors.scala  |  19 +-
 .../datasources/v2/DataSourceV2Implicits.scala     |   5 +
 .../catalyst/analysis/ResolveSessionCatalog.scala  |  22 +-
 .../datasources/v2/DataSourceV2Strategy.scala      |  10 +
 .../datasources/v2/ShowTablesExtendedExec.scala    | 189 ++++++++++++
 .../sql-tests/analyzer-results/show-tables.sql.out |  13 +-
 .../sql-tests/results/show-tables.sql.out          |  13 +-
 .../execution/command/ShowTablesParserSuite.scala  |  34 +--
 .../execution/command/ShowTablesSuiteBase.scala    | 334 ++++++++++++++++++++-
 .../sql/execution/command/v1/ShowTablesSuite.scala |  70 ++++-
 .../sql/execution/command/v2/ShowTablesSuite.scala |  71 ++---
 .../hive/execution/command/ShowTablesSuite.scala   |  77 +++++
 18 files changed, 815 insertions(+), 125 deletions(-)

diff --git a/common/utils/src/main/resources/error/error-classes.json 
b/common/utils/src/main/resources/error/error-classes.json
index b60d70cefc9..afcd841a2ce 100644
--- a/common/utils/src/main/resources/error/error-classes.json
+++ b/common/utils/src/main/resources/error/error-classes.json
@@ -4776,11 +4776,6 @@
       "Invalid bound function '<bound>: there are <argsLen> arguments but 
<inputTypesLen> parameters returned from 'inputTypes()'."
     ]
   },
-  "_LEGACY_ERROR_TEMP_1200" : {
-    "message" : [
-      "<name> is not supported for v2 tables."
-    ]
-  },
   "_LEGACY_ERROR_TEMP_1201" : {
     "message" : [
       "Cannot resolve column name \"<colName>\" among (<fieldNames>)."
diff --git 
a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/analysis/CheckAnalysis.scala
 
b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/analysis/CheckAnalysis.scala
index 176a45a6f8e..3843901a2e0 100644
--- 
a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/analysis/CheckAnalysis.scala
+++ 
b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/analysis/CheckAnalysis.scala
@@ -271,10 +271,6 @@ trait CheckAnalysis extends PredicateHelper with 
LookupCatalog with QueryErrorsB
           case _ =>
         }
 
-      // `ShowTableExtended` should have been converted to the v1 command if 
the table is v1.
-      case _: ShowTableExtended =>
-        throw QueryCompilationErrors.commandUnsupportedInV2TableError("SHOW 
TABLE EXTENDED")
-
       case operator: LogicalPlan =>
         operator transformExpressionsDown {
           // Check argument data types of higher-order functions downwards 
first.
diff --git 
a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/analysis/ResolveCatalogs.scala
 
b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/analysis/ResolveCatalogs.scala
index 253c8eb190f..b0df76068ff 100644
--- 
a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/analysis/ResolveCatalogs.scala
+++ 
b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/analysis/ResolveCatalogs.scala
@@ -50,7 +50,7 @@ class ResolveCatalogs(val catalogManager: CatalogManager)
     case s @ ShowTables(UnresolvedNamespace(Seq()), _, _) =>
       s.copy(namespace = ResolvedNamespace(currentCatalog,
         catalogManager.currentNamespace.toImmutableArraySeq))
-    case s @ ShowTableExtended(UnresolvedNamespace(Seq()), _, _, _) =>
+    case s @ ShowTablesExtended(UnresolvedNamespace(Seq()), _, _) =>
       s.copy(namespace = ResolvedNamespace(currentCatalog,
         catalogManager.currentNamespace.toImmutableArraySeq))
     case s @ ShowViews(UnresolvedNamespace(Seq()), _, _) =>
diff --git 
a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/catalog/SessionCatalog.scala
 
b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/catalog/SessionCatalog.scala
index e71865df94d..e9a02a243aa 100644
--- 
a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/catalog/SessionCatalog.scala
+++ 
b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/catalog/SessionCatalog.scala
@@ -1090,6 +1090,25 @@ class SessionCatalog(
     dbViews ++ listLocalTempViews(pattern)
   }
 
+  /**
+   * List all matching temp views in the specified database, including 
global/local temporary views.
+   */
+  def listTempViews(db: String, pattern: String): Seq[CatalogTable] = {
+    val globalTempViews = if (format(db) == globalTempViewManager.database) {
+      globalTempViewManager.listViewNames(pattern).flatMap { viewName =>
+        globalTempViewManager.get(viewName).map(_.tableMeta)
+      }
+    } else {
+      Seq.empty
+    }
+
+    val localTempViews = listLocalTempViews(pattern).flatMap { viewIndent =>
+      tempViews.get(viewIndent.table).map(_.tableMeta)
+    }
+
+    globalTempViews ++ localTempViews
+  }
+
   /**
    * List all matching local temporary views.
    */
diff --git 
a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/parser/AstBuilder.scala
 
b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/parser/AstBuilder.scala
index ddc91db5967..09161d8f8da 100644
--- 
a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/parser/AstBuilder.scala
+++ 
b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/parser/AstBuilder.scala
@@ -4086,19 +4086,31 @@ class AstBuilder extends DataTypeAstBuilder with 
SQLConfHelper with Logging {
   }
 
   /**
-   * Create a [[ShowTableExtended]] command.
+   * Create a [[ShowTablesExtended]] or [[ShowTablePartition]] command.
    */
   override def visitShowTableExtended(
       ctx: ShowTableExtendedContext): LogicalPlan = withOrigin(ctx) {
-    val partitionKeys = Option(ctx.partitionSpec).map { specCtx =>
-      UnresolvedPartitionSpec(visitNonOptionalPartitionSpec(specCtx), None)
-    }
-    val ns = if (ctx.identifierReference() != null) {
-      withIdentClause(ctx.identifierReference, UnresolvedNamespace(_))
-    } else {
-      UnresolvedNamespace(Seq.empty[String])
+    Option(ctx.partitionSpec).map { spec =>
+      val table = withOrigin(ctx.pattern) {
+        if (ctx.identifierReference() != null) {
+          withIdentClause(ctx.identifierReference(), ns => {
+            val names = ns :+ string(visitStringLit(ctx.pattern))
+            UnresolvedTable(names, "SHOW TABLE EXTENDED ... PARTITION ...")
+          })
+        } else {
+          val names = Seq.empty[String] :+ string(visitStringLit(ctx.pattern))
+          UnresolvedTable(names, "SHOW TABLE EXTENDED ... PARTITION ...")
+        }
+      }
+      ShowTablePartition(table, 
UnresolvedPartitionSpec(visitNonOptionalPartitionSpec(spec)))
+    }.getOrElse {
+      val ns = if (ctx.identifierReference() != null) {
+        withIdentClause(ctx.identifierReference, UnresolvedNamespace)
+      } else {
+        UnresolvedNamespace(Seq.empty[String])
+      }
+      ShowTablesExtended(ns, string(visitStringLit(ctx.pattern)))
     }
-    ShowTableExtended(ns, string(visitStringLit(ctx.pattern)), partitionKeys)
   }
 
   /**
diff --git 
a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/plans/logical/v2Commands.scala
 
b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/plans/logical/v2Commands.scala
index 55d71ff6c24..0a18532b134 100644
--- 
a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/plans/logical/v2Commands.scala
+++ 
b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/plans/logical/v2Commands.scala
@@ -886,19 +886,18 @@ object ShowTables {
 }
 
 /**
- * The logical plan of the SHOW TABLE EXTENDED command.
+ * The logical plan of the SHOW TABLE EXTENDED (without PARTITION) command.
  */
-case class ShowTableExtended(
+case class ShowTablesExtended(
     namespace: LogicalPlan,
     pattern: String,
-    partitionSpec: Option[PartitionSpec],
-    override val output: Seq[Attribute] = ShowTableExtended.getOutputAttrs) 
extends UnaryCommand {
+    override val output: Seq[Attribute] = ShowTablesUtils.getOutputAttrs) 
extends UnaryCommand {
   override def child: LogicalPlan = namespace
-  override protected def withNewChildInternal(newChild: LogicalPlan): 
ShowTableExtended =
+  override protected def withNewChildInternal(newChild: LogicalPlan): 
ShowTablesExtended =
     copy(namespace = newChild)
 }
 
-object ShowTableExtended {
+object ShowTablesUtils {
   def getOutputAttrs: Seq[Attribute] = Seq(
     AttributeReference("namespace", StringType, nullable = false)(),
     AttributeReference("tableName", StringType, nullable = false)(),
@@ -906,6 +905,18 @@ object ShowTableExtended {
     AttributeReference("information", StringType, nullable = false)())
 }
 
+/**
+ * The logical plan of the SHOW TABLE EXTENDED ... PARTITION ... command.
+ */
+case class ShowTablePartition(
+    table: LogicalPlan,
+    partitionSpec: PartitionSpec,
+    override val output: Seq[Attribute] = ShowTablesUtils.getOutputAttrs)
+  extends V2PartitionCommand {
+  override protected def withNewChildInternal(newChild: LogicalPlan): 
ShowTablePartition =
+    copy(table = newChild)
+}
+
 /**
  * The logical plan of the SHOW VIEWS command.
  *
diff --git 
a/sql/catalyst/src/main/scala/org/apache/spark/sql/errors/QueryCompilationErrors.scala
 
b/sql/catalyst/src/main/scala/org/apache/spark/sql/errors/QueryCompilationErrors.scala
index 9cc99e9bfa3..603de520b18 100644
--- 
a/sql/catalyst/src/main/scala/org/apache/spark/sql/errors/QueryCompilationErrors.scala
+++ 
b/sql/catalyst/src/main/scala/org/apache/spark/sql/errors/QueryCompilationErrors.scala
@@ -23,7 +23,7 @@ import org.apache.hadoop.fs.Path
 
 import org.apache.spark.{SPARK_DOC_ROOT, SparkException, SparkThrowable, 
SparkThrowableHelper, SparkUnsupportedOperationException}
 import org.apache.spark.sql.AnalysisException
-import org.apache.spark.sql.catalyst.{ExtendedAnalysisException, 
FunctionIdentifier, QualifiedTableName, TableIdentifier}
+import org.apache.spark.sql.catalyst.{ExtendedAnalysisException, 
FunctionIdentifier, InternalRow, QualifiedTableName, TableIdentifier}
 import 
org.apache.spark.sql.catalyst.analysis.{CannotReplaceMissingTableException, 
FunctionAlreadyExistsException, NamespaceAlreadyExistsException, 
NoSuchFunctionException, NoSuchNamespaceException, NoSuchPartitionException, 
NoSuchTableException, ResolvedTable, Star, TableAlreadyExistsException, 
UnresolvedRegex}
 import org.apache.spark.sql.catalyst.catalog.{CatalogTable, 
InvalidUDFClassException}
 import org.apache.spark.sql.catalyst.catalog.CatalogTypes.TablePartitionSpec
@@ -2145,12 +2145,6 @@ private[sql] object QueryCompilationErrors extends 
QueryErrorsBase with Compilat
         "inputTypesLen" -> bound.inputTypes().length.toString))
   }
 
-  def commandUnsupportedInV2TableError(name: String): Throwable = {
-    new AnalysisException(
-      errorClass = "_LEGACY_ERROR_TEMP_1200",
-      messageParameters = Map("name" -> name))
-  }
-
   def cannotResolveColumnNameAmongAttributesError(
       colName: String, fieldNames: String): Throwable = {
     new AnalysisException(
@@ -2477,7 +2471,7 @@ private[sql] object QueryCompilationErrors extends 
QueryErrorsBase with Compilat
       errorClass = "_LEGACY_ERROR_TEMP_1231",
       messageParameters = Map(
         "key" -> key,
-        "tblName" -> tblName))
+        "tblName" -> toSQLId(tblName)))
   }
 
   def invalidPartitionSpecError(
@@ -2489,7 +2483,7 @@ private[sql] object QueryCompilationErrors extends 
QueryErrorsBase with Compilat
       messageParameters = Map(
         "specKeys" -> specKeys,
         "partitionColumnNames" -> partitionColumnNames.mkString(", "),
-        "tableName" -> tableName))
+        "tableName" -> toSQLId(tableName)))
   }
 
   def columnAlreadyExistsError(columnName: String): Throwable = {
@@ -2547,6 +2541,13 @@ private[sql] object QueryCompilationErrors extends 
QueryErrorsBase with Compilat
     new NoSuchPartitionException(db, table, partition)
   }
 
+  def notExistPartitionError(
+      table: Identifier,
+      partitionIdent: InternalRow,
+      partitionSchema: StructType): Throwable = {
+    new NoSuchPartitionException(table.toString, partitionIdent, 
partitionSchema)
+  }
+
   def analyzingColumnStatisticsNotSupportedForColumnTypeError(
       name: String,
       dataType: DataType): Throwable = {
diff --git 
a/sql/catalyst/src/main/scala/org/apache/spark/sql/execution/datasources/v2/DataSourceV2Implicits.scala
 
b/sql/catalyst/src/main/scala/org/apache/spark/sql/execution/datasources/v2/DataSourceV2Implicits.scala
index 795778f9869..6b884713bd5 100644
--- 
a/sql/catalyst/src/main/scala/org/apache/spark/sql/execution/datasources/v2/DataSourceV2Implicits.scala
+++ 
b/sql/catalyst/src/main/scala/org/apache/spark/sql/execution/datasources/v2/DataSourceV2Implicits.scala
@@ -65,6 +65,11 @@ object DataSourceV2Implicits {
       }
     }
 
+    def supportsPartitions: Boolean = table match {
+      case _: SupportsPartitionManagement => true
+      case _ => false
+    }
+
     def asPartitionable: SupportsPartitionManagement = {
       table match {
         case support: SupportsPartitionManagement =>
diff --git 
a/sql/core/src/main/scala/org/apache/spark/sql/catalyst/analysis/ResolveSessionCatalog.scala
 
b/sql/core/src/main/scala/org/apache/spark/sql/catalyst/analysis/ResolveSessionCatalog.scala
index 078b988eaf0..5fab89f4879 100644
--- 
a/sql/core/src/main/scala/org/apache/spark/sql/catalyst/analysis/ResolveSessionCatalog.scala
+++ 
b/sql/core/src/main/scala/org/apache/spark/sql/catalyst/analysis/ResolveSessionCatalog.scala
@@ -241,19 +241,31 @@ class ResolveSessionCatalog(val catalogManager: 
CatalogManager)
     case ShowTables(DatabaseInSessionCatalog(db), pattern, output) if 
conf.useV1Command =>
       ShowTablesCommand(Some(db), pattern, output)
 
-    case ShowTableExtended(
+    case ShowTablesExtended(
         DatabaseInSessionCatalog(db),
         pattern,
-        partitionSpec @ (None | Some(UnresolvedPartitionSpec(_, _))),
         output) =>
       val newOutput = if 
(conf.getConf(SQLConf.LEGACY_KEEP_COMMAND_OUTPUT_SCHEMA)) {
-        assert(output.length == 4)
         output.head.withName("database") +: output.tail
       } else {
         output
       }
-      val tablePartitionSpec = 
partitionSpec.map(_.asInstanceOf[UnresolvedPartitionSpec].spec)
-      ShowTablesCommand(Some(db), Some(pattern), newOutput, true, 
tablePartitionSpec)
+      ShowTablesCommand(Some(db), Some(pattern), newOutput, isExtended = true)
+
+    case ShowTablePartition(
+        ResolvedTable(catalog, _, table: V1Table, _),
+        partitionSpec,
+        output) if isSessionCatalog(catalog) =>
+      val newOutput = if 
(conf.getConf(SQLConf.LEGACY_KEEP_COMMAND_OUTPUT_SCHEMA)) {
+        output.head.withName("database") +: output.tail
+      } else {
+        output
+      }
+      val tablePartitionSpec = Option(partitionSpec).map(
+        _.asInstanceOf[UnresolvedPartitionSpec].spec)
+      ShowTablesCommand(table.catalogTable.identifier.database,
+        Some(table.catalogTable.identifier.table), newOutput,
+        isExtended = true, tablePartitionSpec)
 
     // ANALYZE TABLE works on permanent views if the views are cached.
     case AnalyzeTable(ResolvedV1TableOrViewIdentifier(ident), partitionSpec, 
noScan) =>
diff --git 
a/sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/v2/DataSourceV2Strategy.scala
 
b/sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/v2/DataSourceV2Strategy.scala
index e6bce7a0990..3f0dab11cda 100644
--- 
a/sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/v2/DataSourceV2Strategy.scala
+++ 
b/sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/v2/DataSourceV2Strategy.scala
@@ -406,6 +406,16 @@ class DataSourceV2Strategy(session: SparkSession) extends 
Strategy with Predicat
     case ShowTables(ResolvedNamespace(catalog, ns), pattern, output) =>
       ShowTablesExec(output, catalog.asTableCatalog, ns, pattern) :: Nil
 
+    case ShowTablesExtended(
+        ResolvedNamespace(catalog, ns),
+        pattern,
+        output) =>
+      ShowTablesExtendedExec(output, catalog.asTableCatalog, ns, pattern) :: 
Nil
+
+    case ShowTablePartition(r: ResolvedTable, part, output) =>
+      ShowTablePartitionExec(output, r.catalog, r.identifier,
+        r.table.asPartitionable, Seq(part).asResolvedPartitionSpecs.head) :: 
Nil
+
     case SetCatalogAndNamespace(ResolvedNamespace(catalog, ns)) =>
       val catalogManager = session.sessionState.catalogManager
       val namespace = if (ns.nonEmpty) Some(ns) else None
diff --git 
a/sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/v2/ShowTablesExtendedExec.scala
 
b/sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/v2/ShowTablesExtendedExec.scala
new file mode 100644
index 00000000000..0b2d11a597d
--- /dev/null
+++ 
b/sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/v2/ShowTablesExtendedExec.scala
@@ -0,0 +1,189 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.spark.sql.execution.datasources.v2
+
+import scala.collection.mutable
+import scala.collection.mutable.ArrayBuffer
+import scala.jdk.CollectionConverters._
+
+import org.apache.spark.sql.catalyst.InternalRow
+import org.apache.spark.sql.catalyst.analysis.ResolvedPartitionSpec
+import org.apache.spark.sql.catalyst.catalog.CatalogTableType
+import 
org.apache.spark.sql.catalyst.catalog.ExternalCatalogUtils.escapePathName
+import org.apache.spark.sql.catalyst.expressions.{Attribute, Literal, 
ToPrettyString}
+import org.apache.spark.sql.catalyst.util.{quoteIdentifier, StringUtils}
+import org.apache.spark.sql.connector.catalog.{CatalogV2Util, Identifier, 
SupportsPartitionManagement, Table, TableCatalog}
+import org.apache.spark.sql.connector.catalog.CatalogV2Implicits._
+import org.apache.spark.sql.errors.QueryCompilationErrors
+import org.apache.spark.sql.execution.LeafExecNode
+import 
org.apache.spark.sql.execution.datasources.v2.DataSourceV2Implicits.TableHelper
+
+/**
+ * Physical plan node for showing tables without partition, Show the 
information of tables.
+ */
+case class ShowTablesExtendedExec(
+    output: Seq[Attribute],
+    catalog: TableCatalog,
+    namespace: Seq[String],
+    pattern: String) extends V2CommandExec with LeafExecNode {
+  override protected def run(): Seq[InternalRow] = {
+    val rows = new ArrayBuffer[InternalRow]()
+
+    // fetch tables
+    // TODO We need a new listTable overload that takes a pattern string.
+    val tables = catalog.listTables(namespace.toArray)
+    tables.map { tableIdent =>
+      if (StringUtils.filterPattern(Seq(tableIdent.name()), pattern).nonEmpty) 
{
+        val table = catalog.loadTable(tableIdent)
+        val information = getTableDetails(catalog.name, tableIdent, table)
+        rows += toCatalystRow(tableIdent.namespace().quoted, 
tableIdent.name(), false,
+          s"$information\n")
+        }
+      }
+
+    // fetch temp views, includes: global temp view, local temp view
+    val sessionCatalog = session.sessionState.catalog
+    val db = namespace match {
+      case Seq(db) => Some(db)
+      case _ => None
+    }
+    val tempViews = sessionCatalog.listTempViews(db.getOrElse(""), pattern)
+    tempViews.map { tempView =>
+      val database = tempView.identifier.database.getOrElse("")
+      val tableName = tempView.identifier.table
+      val information = tempView.simpleString
+      rows += toCatalystRow(database, tableName, true, s"$information\n")
+    }
+
+    rows.toSeq
+  }
+
+  private def getTableDetails(
+      catalogName: String,
+      identifier: Identifier,
+      table: Table): String = {
+    val results = new mutable.LinkedHashMap[String, String]()
+
+    results.put("Catalog", catalogName)
+    results.put("Namespace", identifier.namespace().quoted)
+    results.put("Table", identifier.name())
+    val tableType = if 
(table.properties().containsKey(TableCatalog.PROP_EXTERNAL)) {
+      CatalogTableType.EXTERNAL
+    } else {
+      CatalogTableType.MANAGED
+    }
+    results.put("Type", tableType.name)
+
+    CatalogV2Util.TABLE_RESERVED_PROPERTIES
+      .filterNot(_ == TableCatalog.PROP_EXTERNAL)
+      .foreach(propKey => {
+        if (table.properties.containsKey(propKey)) {
+          results.put(propKey.capitalize, table.properties.get(propKey))
+        }
+      })
+
+    val properties =
+      conf.redactOptions(table.properties.asScala.toMap).toList
+        .filter(kv => !CatalogV2Util.TABLE_RESERVED_PROPERTIES.contains(kv._1))
+        .sortBy(_._1).map {
+        case (key, value) => key + "=" + value
+      }.mkString("[", ",", "]")
+    if (!table.properties().isEmpty) {
+      results.put("Table Properties", properties.mkString("[", ", ", "]"))
+    }
+
+    // Partition Provider & Partition Columns
+    if (table.supportsPartitions && 
table.asPartitionable.partitionSchema().nonEmpty) {
+      results.put("Partition Provider", "Catalog")
+      results.put("Partition Columns", 
table.asPartitionable.partitionSchema().map(
+        field => quoteIdentifier(field.name)).mkString("[", ", ", "]"))
+    }
+
+    if (table.schema().nonEmpty) {
+      results.put("Schema", table.schema().treeString)
+    }
+
+    results.map { case (key, value) =>
+      if (value.isEmpty) key else s"$key: $value"
+    }.mkString("", "\n", "")
+  }
+}
+
+/**
+ * Physical plan node for showing tables with partition, Show the information 
of partitions.
+ */
+case class ShowTablePartitionExec(
+    output: Seq[Attribute],
+    catalog: TableCatalog,
+    tableIndent: Identifier,
+    table: SupportsPartitionManagement,
+    partSpec: ResolvedPartitionSpec) extends V2CommandExec with LeafExecNode {
+  override protected def run(): Seq[InternalRow] = {
+    val rows = new ArrayBuffer[InternalRow]()
+    val information = getTablePartitionDetails(tableIndent,
+      table, partSpec)
+    rows += toCatalystRow(tableIndent.namespace.quoted,
+      tableIndent.name(), false, s"$information\n")
+
+    rows.toSeq
+  }
+
+  private def getTablePartitionDetails(
+      tableIdent: Identifier,
+      partitionTable: SupportsPartitionManagement,
+      partSpec: ResolvedPartitionSpec): String = {
+    val results = new mutable.LinkedHashMap[String, String]()
+
+    // "Partition Values"
+    val partitionSchema = partitionTable.partitionSchema()
+    val (names, ident) = (partSpec.names, partSpec.ident)
+    val partitionIdentifiers = 
partitionTable.listPartitionIdentifiers(names.toArray, ident)
+    if (partitionIdentifiers.isEmpty) {
+      throw QueryCompilationErrors.notExistPartitionError(tableIdent, ident, 
partitionSchema)
+    }
+    assert(partitionIdentifiers.length == 1)
+    val row = partitionIdentifiers.head
+    val len = partitionSchema.length
+    val partitions = new Array[String](len)
+    val timeZoneId = conf.sessionLocalTimeZone
+    for (i <- 0 until len) {
+      val dataType = partitionSchema(i).dataType
+      val partValueUTF8String = ToPrettyString(Literal(row.get(i, dataType), 
dataType),
+        Some(timeZoneId)).eval(null)
+      val partValueStr = if (partValueUTF8String == null) "null" else 
partValueUTF8String.toString
+      partitions(i) = escapePathName(partitionSchema(i).name) + "=" + 
escapePathName(partValueStr)
+    }
+    val partitionValues = partitions.mkString("[", ", ", "]")
+    results.put("Partition Values", s"$partitionValues")
+
+    // "Partition Parameters"
+    val metadata = partitionTable.loadPartitionMetadata(ident)
+    if (!metadata.isEmpty) {
+      val metadataValues = metadata.asScala.map { case (key, value) =>
+        if (value.isEmpty) key else s"$key: $value"
+      }.mkString("{", ", ", "}")
+      results.put("Partition Parameters", metadataValues)
+    }
+
+    // TODO "Created Time", "Last Access", "Partition Statistics"
+
+    results.map { case (key, value) =>
+      if (value.isEmpty) key else s"$key: $value"
+    }.mkString("", "\n", "")
+  }
+}
diff --git 
a/sql/core/src/test/resources/sql-tests/analyzer-results/show-tables.sql.out 
b/sql/core/src/test/resources/sql-tests/analyzer-results/show-tables.sql.out
index 167e2f8622b..ce5f7995f5d 100644
--- a/sql/core/src/test/resources/sql-tests/analyzer-results/show-tables.sql.out
+++ b/sql/core/src/test/resources/sql-tests/analyzer-results/show-tables.sql.out
@@ -130,13 +130,20 @@ org.apache.spark.sql.catalyst.parser.ParseException
 -- !query
 SHOW TABLE EXTENDED LIKE 'show_t*' PARTITION(c='Us', d=1)
 -- !query analysis
-org.apache.spark.sql.catalyst.analysis.NoSuchTableException
+org.apache.spark.sql.catalyst.ExtendedAnalysisException
 {
   "errorClass" : "TABLE_OR_VIEW_NOT_FOUND",
   "sqlState" : "42P01",
   "messageParameters" : {
-    "relationName" : "`showdb`.`show_t*`"
-  }
+    "relationName" : "`show_t*`"
+  },
+  "queryContext" : [ {
+    "objectType" : "",
+    "objectName" : "",
+    "startIndex" : 26,
+    "stopIndex" : 34,
+    "fragment" : "'show_t*'"
+  } ]
 }
 
 
diff --git a/sql/core/src/test/resources/sql-tests/results/show-tables.sql.out 
b/sql/core/src/test/resources/sql-tests/results/show-tables.sql.out
index a37cf630969..442f0fe5d5f 100644
--- a/sql/core/src/test/resources/sql-tests/results/show-tables.sql.out
+++ b/sql/core/src/test/resources/sql-tests/results/show-tables.sql.out
@@ -208,13 +208,20 @@ SHOW TABLE EXTENDED LIKE 'show_t*' PARTITION(c='Us', d=1)
 -- !query schema
 struct<>
 -- !query output
-org.apache.spark.sql.catalyst.analysis.NoSuchTableException
+org.apache.spark.sql.catalyst.ExtendedAnalysisException
 {
   "errorClass" : "TABLE_OR_VIEW_NOT_FOUND",
   "sqlState" : "42P01",
   "messageParameters" : {
-    "relationName" : "`showdb`.`show_t*`"
-  }
+    "relationName" : "`show_t*`"
+  },
+  "queryContext" : [ {
+    "objectType" : "",
+    "objectName" : "",
+    "startIndex" : 26,
+    "stopIndex" : 34,
+    "fragment" : "'show_t*'"
+  } ]
 }
 
 
diff --git 
a/sql/core/src/test/scala/org/apache/spark/sql/execution/command/ShowTablesParserSuite.scala
 
b/sql/core/src/test/scala/org/apache/spark/sql/execution/command/ShowTablesParserSuite.scala
index d68e1233f7a..d70853b6360 100644
--- 
a/sql/core/src/test/scala/org/apache/spark/sql/execution/command/ShowTablesParserSuite.scala
+++ 
b/sql/core/src/test/scala/org/apache/spark/sql/execution/command/ShowTablesParserSuite.scala
@@ -17,9 +17,9 @@
 
 package org.apache.spark.sql.execution.command
 
-import org.apache.spark.sql.catalyst.analysis.{AnalysisTest, 
UnresolvedNamespace, UnresolvedPartitionSpec}
+import org.apache.spark.sql.catalyst.analysis.{AnalysisTest, 
UnresolvedNamespace, UnresolvedPartitionSpec, UnresolvedTable}
 import org.apache.spark.sql.catalyst.parser.CatalystSqlParser.parsePlan
-import org.apache.spark.sql.catalyst.plans.logical.{ShowTableExtended, 
ShowTables}
+import org.apache.spark.sql.catalyst.plans.logical.{ShowTablePartition, 
ShowTables, ShowTablesExtended}
 import org.apache.spark.sql.test.SharedSparkSession
 
 class ShowTablesParserSuite extends AnalysisTest with SharedSparkSession {
@@ -52,32 +52,32 @@ class ShowTablesParserSuite extends AnalysisTest with 
SharedSparkSession {
   test("show table extended") {
     comparePlans(
       parsePlan("SHOW TABLE EXTENDED LIKE '*test*'"),
-      ShowTableExtended(UnresolvedNamespace(Seq.empty[String]), "*test*", 
None))
+      ShowTablesExtended(UnresolvedNamespace(Seq.empty[String]), "*test*"))
     comparePlans(
       parsePlan(s"SHOW TABLE EXTENDED FROM $catalog.ns1.ns2 LIKE '*test*'"),
-      ShowTableExtended(UnresolvedNamespace(Seq(catalog, "ns1", "ns2")), 
"*test*", None))
+      ShowTablesExtended(UnresolvedNamespace(Seq(catalog, "ns1", "ns2")), 
"*test*"))
     comparePlans(
       parsePlan(s"SHOW TABLE EXTENDED IN $catalog.ns1.ns2 LIKE '*test*'"),
-      ShowTableExtended(UnresolvedNamespace(Seq(catalog, "ns1", "ns2")), 
"*test*", None))
+      ShowTablesExtended(UnresolvedNamespace(Seq(catalog, "ns1", "ns2")), 
"*test*"))
+
     comparePlans(
       parsePlan("SHOW TABLE EXTENDED LIKE '*test*' PARTITION(ds='2008-04-09', 
hr=11)"),
-      ShowTableExtended(
-        UnresolvedNamespace(Seq.empty[String]),
-        "*test*",
-        Some(UnresolvedPartitionSpec(Map("ds" -> "2008-04-09", "hr" -> 
"11")))))
+      ShowTablePartition(
+        UnresolvedTable(Seq("*test*"), "SHOW TABLE EXTENDED ... PARTITION 
..."),
+        UnresolvedPartitionSpec(Map("ds" -> "2008-04-09", "hr" -> "11"))))
     comparePlans(
       parsePlan(s"SHOW TABLE EXTENDED FROM $catalog.ns1.ns2 LIKE '*test*' " +
         "PARTITION(ds='2008-04-09')"),
-      ShowTableExtended(
-        UnresolvedNamespace(Seq(catalog, "ns1", "ns2")),
-        "*test*",
-        Some(UnresolvedPartitionSpec(Map("ds" -> "2008-04-09")))))
+      ShowTablePartition(
+        UnresolvedTable(Seq(catalog, "ns1", "ns2", "*test*"),
+          "SHOW TABLE EXTENDED ... PARTITION ..."),
+        UnresolvedPartitionSpec(Map("ds" -> "2008-04-09"))))
     comparePlans(
       parsePlan(s"SHOW TABLE EXTENDED IN $catalog.ns1.ns2 LIKE '*test*' " +
         "PARTITION(ds='2008-04-09')"),
-      ShowTableExtended(
-        UnresolvedNamespace(Seq(catalog, "ns1", "ns2")),
-        "*test*",
-        Some(UnresolvedPartitionSpec(Map("ds" -> "2008-04-09")))))
+      ShowTablePartition(
+        UnresolvedTable(Seq(catalog, "ns1", "ns2", "*test*"),
+          "SHOW TABLE EXTENDED ... PARTITION ..."),
+        UnresolvedPartitionSpec(Map("ds" -> "2008-04-09"))))
   }
 }
diff --git 
a/sql/core/src/test/scala/org/apache/spark/sql/execution/command/ShowTablesSuiteBase.scala
 
b/sql/core/src/test/scala/org/apache/spark/sql/execution/command/ShowTablesSuiteBase.scala
index 5f56b91db8f..c88217221ab 100644
--- 
a/sql/core/src/test/scala/org/apache/spark/sql/execution/command/ShowTablesSuiteBase.scala
+++ 
b/sql/core/src/test/scala/org/apache/spark/sql/execution/command/ShowTablesSuiteBase.scala
@@ -17,7 +17,7 @@
 
 package org.apache.spark.sql.execution.command
 
-import org.apache.spark.sql.{QueryTest, Row}
+import org.apache.spark.sql.{AnalysisException, QueryTest, Row}
 import org.apache.spark.sql.connector.catalog.CatalogV2Implicits._
 import org.apache.spark.sql.internal.SQLConf
 
@@ -40,6 +40,42 @@ trait ShowTablesSuiteBase extends QueryTest with 
DDLCommandTestUtils {
     checkAnswer(df, expected)
   }
 
+  // the error class & error parameters of
+  // `SHOW TABLE EXTENDED ... PARTITION ... in non-partitioned table`
+  protected def extendedPartInNonPartedTableError(
+      catalog: String,
+      namespace: String,
+      table: String): (String, Map[String, String])
+
+  protected def extendedPartExpectedResult: String =
+    "Partition Values: [id1=1, id2=2]"
+
+  protected def namespaceKey: String = "Database"
+
+  protected def extendedTableInfo: String
+
+  protected def extendedTableSchema: String =
+    s"""Schema: root
+       | |-- data: string (nullable = true)
+       | |-- id: long (nullable = true)""".stripMargin
+
+  private def extendedTableExpectedResult(
+      catalog: String,
+      namespace: String,
+      table: String,
+      dataColName: String,
+      partColName: String): String = {
+    s"""Catalog: $catalog
+       |$namespaceKey: $namespace
+       |Table: $table
+       |$extendedTableInfo
+       |Partition Provider: Catalog
+       |Partition Columns: [`$partColName`]
+       |Schema: root
+       | |-- $dataColName: string (nullable = true)
+       | |-- $partColName: long (nullable = true)""".stripMargin
+  }
+
   test("show an existing table") {
     withNamespaceAndTable("ns", "table") { t =>
       sql(s"CREATE TABLE $t (name STRING, id INT) $defaultUsing")
@@ -126,4 +162,300 @@ trait ShowTablesSuiteBase extends QueryTest with 
DDLCommandTestUtils {
       }
     }
   }
+
+  test("show table in a not existing namespace") {
+    checkError(
+      exception = intercept[AnalysisException] {
+        sql(s"SHOW TABLES IN $catalog.nonexist")
+      },
+      errorClass = "SCHEMA_NOT_FOUND",
+      parameters = Map("schemaName" -> "`nonexist`"))
+  }
+
+  test("show table extended in a not existing namespace") {
+    checkError(
+      exception = intercept[AnalysisException] {
+        sql(s"SHOW TABLE EXTENDED IN $catalog.nonexist LIKE '*tbl*'")
+      },
+      errorClass = "SCHEMA_NOT_FOUND",
+      parameters = Map("schemaName" -> "`nonexist`"))
+  }
+
+  test("show table extended with no matching table") {
+    val namespace = "ns1"
+    val table = "nonexist"
+    withNamespaceAndTable(namespace, table, catalog) { _ =>
+      val result = sql(s"SHOW TABLE EXTENDED IN $catalog.$namespace LIKE 
'*$table*'")
+      assert(result.schema.fieldNames ===
+        Seq("namespace", "tableName", "isTemporary", "information"))
+      assert(result.collect().isEmpty)
+    }
+  }
+
+  test("show table extended with a not existing partition") {
+    val namespace = "ns1"
+    val table = "tbl"
+    withNamespaceAndTable(namespace, table, catalog) { tbl =>
+      sql(s"CREATE TABLE $tbl (data string, id bigint) $defaultUsing 
PARTITIONED BY (id)")
+      sql(s"ALTER TABLE $tbl ADD PARTITION (id = 1)")
+      checkError(
+        exception = intercept[AnalysisException] {
+          sql(s"SHOW TABLE EXTENDED IN $catalog.$namespace LIKE '$table' 
PARTITION(id = 2)")
+        },
+        errorClass = "PARTITIONS_NOT_FOUND",
+        parameters = Map(
+          "partitionList" -> "PARTITION (`id` = 2)",
+          "tableName" -> "`ns1`.`tbl`"
+        )
+      )
+    }
+  }
+
+  test("show table extended in non-partitioned table") {
+    val namespace = "ns1"
+    val table = "tbl"
+    withNamespaceAndTable(namespace, table, catalog) { tbl =>
+      sql(s"CREATE TABLE $tbl (data string, id bigint) $defaultUsing")
+      val e = intercept[AnalysisException] {
+        sql(s"SHOW TABLE EXTENDED IN $catalog.$namespace LIKE '$table' 
PARTITION(id = 1)")
+      }
+      val (errorClass, parameters) = 
extendedPartInNonPartedTableError(catalog, namespace, table)
+      checkError(exception = e, errorClass = errorClass, parameters = 
parameters)
+    }
+  }
+
+  test("show table extended in multi partition key - " +
+    "the command's partition parameters are complete") {
+    val namespace = "ns1"
+    val table = "tbl"
+    withNamespaceAndTable(namespace, table, catalog) { tbl =>
+      sql(s"CREATE TABLE $tbl (data string, id1 bigint, id2 bigint) " +
+        s"$defaultUsing PARTITIONED BY (id1, id2)")
+      sql(s"ALTER TABLE $tbl ADD PARTITION (id1 = 1, id2 = 2)")
+
+      val result = sql(s"SHOW TABLE EXTENDED FROM $catalog.$namespace " +
+        s"LIKE '$table' PARTITION(id1 = 1, id2 = 2)")
+      assert(result.schema.fieldNames ===
+        Seq("namespace", "tableName", "isTemporary", "information"))
+      val resultCollect = result.collect()
+      assert(resultCollect(0).length == 4)
+      assert(resultCollect(0)(0) === namespace)
+      assert(resultCollect(0)(1) === table)
+      assert(resultCollect(0)(2) === false)
+      val actualResult = replace(resultCollect(0)(3).toString)
+      assert(actualResult === extendedPartExpectedResult)
+    }
+  }
+
+  test("show table extended in multi partition key - " +
+    "the command's partition parameters are incomplete") {
+    val namespace = "ns1"
+    val table = "tbl"
+    withNamespaceAndTable(namespace, table, catalog) { tbl =>
+      sql(s"CREATE TABLE $tbl (data string, id1 bigint, id2 bigint) " +
+        s"$defaultUsing PARTITIONED BY (id1, id2)")
+      sql(s"ALTER TABLE $tbl ADD PARTITION (id1 = 1, id2 = 2)")
+
+      checkError(
+        exception = intercept[AnalysisException] {
+          sql(s"SHOW TABLE EXTENDED IN $catalog.$namespace " +
+            s"LIKE '$table' PARTITION(id1 = 1)")
+        },
+        errorClass = "_LEGACY_ERROR_TEMP_1232",
+        parameters = Map(
+          "specKeys" -> "id1",
+          "partitionColumnNames" -> "id1, id2",
+          "tableName" -> s"`$catalog`.`$namespace`.`$table`")
+      )
+    }
+  }
+
+  test("show table extended in multi tables") {
+    val namespace = "ns1"
+    val table = "tbl"
+    withNamespaceAndTable(namespace, table, catalog) { _ =>
+      sql(s"CREATE TABLE $catalog.$namespace.$table (data string, id bigint) " 
+
+        s"$defaultUsing PARTITIONED BY (id)")
+      val table1 = "tbl1"
+      val table2 = "tbl2"
+      withTable(table1, table2) {
+        sql(s"CREATE TABLE $catalog.$namespace.$table1 (data1 string, id1 
bigint) " +
+          s"$defaultUsing PARTITIONED BY (id1)")
+        sql(s"CREATE TABLE $catalog.$namespace.$table2 (data2 string, id2 
bigint) " +
+          s"$defaultUsing PARTITIONED BY (id2)")
+
+        val result = sql(s"SHOW TABLE EXTENDED FROM $catalog.$namespace LIKE 
'$table*'")
+          .sort("tableName")
+        assert(result.schema.fieldNames ===
+          Seq("namespace", "tableName", "isTemporary", "information"))
+        val resultCollect = result.collect()
+        assert(resultCollect.length == 3)
+
+        assert(resultCollect(0).length == 4)
+        assert(resultCollect(0)(1) === table)
+        assert(resultCollect(0)(2) === false)
+        // replace "Created Time", "Last Access", "Created By", "Location"
+        val actualResult_0_3 = replace(resultCollect(0)(3).toString)
+        val expectedResult_0_3 = extendedTableExpectedResult(
+          catalog, namespace, table, "data", "id")
+        assert(actualResult_0_3 === expectedResult_0_3)
+
+        assert(resultCollect(1).length == 4)
+        assert(resultCollect(1)(1) === table1)
+        assert(resultCollect(1)(2) === false)
+        val actualResult_1_3 = replace(resultCollect(1)(3).toString)
+        // replace "Table Properties"
+        val expectedResult_1_3 = extendedTableExpectedResult(
+          catalog, namespace, table1, "data1", "id1")
+        assert(actualResult_1_3 === expectedResult_1_3)
+
+        assert(resultCollect(2).length == 4)
+        assert(resultCollect(2)(1) === table2)
+        assert(resultCollect(2)(2) === false)
+        val actualResult_2_3 = replace(resultCollect(2)(3).toString)
+        // replace "Table Properties"
+        val expectedResult_2_3 = extendedTableExpectedResult(
+          catalog, namespace, table2, "data2", "id2")
+        assert(actualResult_2_3 === expectedResult_2_3)
+      }
+    }
+  }
+
+  test("show table extended with temp views") {
+    val namespace = "ns"
+    val table = "tbl"
+    withNamespaceAndTable(namespace, table, catalog) { t =>
+      sql(s"CREATE TABLE $t (id int) $defaultUsing")
+      val viewName = table + "_view"
+      val localTmpViewName = viewName + "_local_tmp"
+      val globalTmpViewName = viewName + "_global_tmp"
+      val globalNamespace = "global_temp"
+      withView(localTmpViewName, globalNamespace + "." + globalTmpViewName) {
+        sql(s"CREATE TEMPORARY VIEW $localTmpViewName AS SELECT id FROM $t")
+        sql(s"CREATE GLOBAL TEMPORARY VIEW $globalTmpViewName AS SELECT id 
FROM $t")
+
+        // temp local view
+        val localResult = sql(s"SHOW TABLE EXTENDED LIKE 
'$viewName*'").sort("tableName")
+        assert(localResult.schema.fieldNames ===
+          Seq("namespace", "tableName", "isTemporary", "information"))
+        val localResultCollect = localResult.collect()
+        assert(localResultCollect.length == 1)
+        assert(localResultCollect(0).length == 4)
+        assert(localResultCollect(0)(1) === localTmpViewName)
+        assert(localResultCollect(0)(2) === true)
+        val actualLocalResult = replace(localResultCollect(0)(3).toString)
+        val expectedLocalResult =
+          s"""Table: $localTmpViewName
+             |Created Time: <created time>
+             |Last Access: <last access>
+             |Created By: <created by>
+             |Type: VIEW
+             |View Text: SELECT id FROM $catalog.$namespace.$table
+             |View Catalog and Namespace: spark_catalog.default
+             |View Query Output Columns: [id]
+             |Schema: root
+             | |-- id: integer (nullable = true)""".stripMargin
+        assert(actualLocalResult === expectedLocalResult)
+
+        // temp global view
+        val globalResult = sql(s"SHOW TABLE EXTENDED IN global_temp LIKE 
'$viewName*'").
+          sort("tableName")
+        assert(globalResult.schema.fieldNames ===
+          Seq("namespace", "tableName", "isTemporary", "information"))
+        val globalResultCollect = globalResult.collect()
+        assert(globalResultCollect.length == 2)
+
+        assert(globalResultCollect(0).length == 4)
+        assert(globalResultCollect(0)(1) === globalTmpViewName)
+        assert(globalResultCollect(0)(2) === true)
+        val actualGlobalResult1 = replace(globalResultCollect(0)(3).toString)
+        val expectedGlobalResult1 =
+          s"""Database: $globalNamespace
+             |Table: $globalTmpViewName
+             |Created Time: <created time>
+             |Last Access: <last access>
+             |Created By: <created by>
+             |Type: VIEW
+             |View Text: SELECT id FROM $catalog.$namespace.$table
+             |View Catalog and Namespace: spark_catalog.default
+             |View Query Output Columns: [id]
+             |Schema: root
+             | |-- id: integer (nullable = true)""".stripMargin
+        assert(actualGlobalResult1 === expectedGlobalResult1)
+
+        assert(globalResultCollect(1).length == 4)
+        assert(globalResultCollect(1)(1) === localTmpViewName)
+        assert(globalResultCollect(1)(2) === true)
+        val actualLocalResult2 = replace(globalResultCollect(1)(3).toString)
+        val expectedLocalResult2 =
+          s"""Table: $localTmpViewName
+             |Created Time: <created time>
+             |Last Access: <last access>
+             |Created By: <created by>
+             |Type: VIEW
+             |View Text: SELECT id FROM $catalog.$namespace.$table
+             |View Catalog and Namespace: spark_catalog.default
+             |View Query Output Columns: [id]
+             |Schema: root
+             | |-- id: integer (nullable = true)""".stripMargin
+        assert(actualLocalResult2 === expectedLocalResult2)
+      }
+    }
+  }
+
+  // Replace some non-deterministic values with deterministic value
+  // for easy comparison of results, such as `Created Time`, etc
+  protected def replace(text: String): String = {
+    text.split("\n").map {
+      case s"Created Time:$_" => "Created Time: <created time>"
+      case s"Last Access:$_" => "Last Access: <last access>"
+      case s"Created By:$_" => "Created By: <created by>"
+      case s"Location:$_" => "Location: <location>"
+      case s"Table Properties:$_" => "Table Properties: <table properties>"
+      case s"Partition Parameters:$_" => "Partition Parameters: <partition 
parameters>"
+      case other => other
+    }.mkString("\n")
+  }
+
+  /**
+   * - V1: `show extended` and `select *` display the schema,
+   *     the partition columns will be always displayed at the end.
+   * - V2: `show extended` and `select *` display the schema,
+   *     the columns order will respect the original table schema.
+   */
+  protected def selectCommandSchema: Array[String] = Array("data", "id")
+  test("show table extended: the display order of the columns is different in 
v1 and v2") {
+    val namespace = "ns1"
+    val table = "tbl"
+    withNamespaceAndTable(namespace, table, catalog) { _ =>
+      sql(s"CREATE TABLE $catalog.$namespace.$table (data string, id bigint) " 
+
+        s"$defaultUsing PARTITIONED BY (id)")
+      sql(s"INSERT INTO $catalog.$namespace.$table PARTITION (id = 1) (data) 
VALUES ('data1')")
+      val result = sql(s"SELECT * FROM $catalog.$namespace.$table")
+      assert(result.schema.fieldNames === Array("data", "id"))
+
+      val table1 = "tbl1"
+      withTable(table1) {
+        sql(s"CREATE TABLE $catalog.$namespace.$table1 (id bigint, data 
string) " +
+          s"$defaultUsing PARTITIONED BY (id)")
+        sql(s"INSERT INTO $catalog.$namespace.$table1 PARTITION (id = 1) 
(data) VALUES ('data2')")
+
+        val result1 = sql(s"SELECT * FROM $catalog.$namespace.$table1")
+        assert(result1.schema.fieldNames === selectCommandSchema)
+
+        val extendedResult = sql(s"SHOW TABLE EXTENDED IN $catalog.$namespace 
LIKE '$table*'").
+          sort("tableName")
+        val extendedResultCollect = extendedResult.collect()
+
+        assert(extendedResultCollect(0)(1) === table)
+        assert(extendedResultCollect(0)(3).toString.contains(
+          s"""Schema: root
+             | |-- data: string (nullable = true)
+             | |-- id: long (nullable = true)""".stripMargin))
+
+        assert(extendedResultCollect(1)(1) === table1)
+        
assert(extendedResultCollect(1)(3).toString.contains(extendedTableSchema))
+      }
+    }
+  }
 }
diff --git 
a/sql/core/src/test/scala/org/apache/spark/sql/execution/command/v1/ShowTablesSuite.scala
 
b/sql/core/src/test/scala/org/apache/spark/sql/execution/command/v1/ShowTablesSuite.scala
index 5bda7d002dc..4b4742910bd 100644
--- 
a/sql/core/src/test/scala/org/apache/spark/sql/execution/command/v1/ShowTablesSuite.scala
+++ 
b/sql/core/src/test/scala/org/apache/spark/sql/execution/command/v1/ShowTablesSuite.scala
@@ -18,7 +18,6 @@
 package org.apache.spark.sql.execution.command.v1
 
 import org.apache.spark.sql.{AnalysisException, Row, SaveMode}
-import org.apache.spark.sql.catalyst.analysis.NoSuchDatabaseException
 import org.apache.spark.sql.execution.command
 import org.apache.spark.sql.internal.SQLConf
 
@@ -134,16 +133,6 @@ trait ShowTablesSuiteBase extends 
command.ShowTablesSuiteBase with command.Tests
       }
     }
   }
-
-  test("show table in a not existing namespace") {
-    val e = intercept[NoSuchDatabaseException] {
-      runShowTablesSql(s"SHOW TABLES IN $catalog.unknown", Seq())
-    }
-    checkError(e,
-      errorClass = "SCHEMA_NOT_FOUND",
-      parameters = Map("schemaName" -> "`unknown`"))
-  }
-
 }
 
 /**
@@ -165,4 +154,63 @@ class ShowTablesSuite extends ShowTablesSuiteBase with 
CommandSuiteBase {
       }
     }
   }
+
+  override protected def extendedPartInNonPartedTableError(
+      catalog: String,
+      namespace: String,
+      table: String): (String, Map[String, String]) = {
+    ("_LEGACY_ERROR_TEMP_1251",
+      Map("action" -> "SHOW TABLE EXTENDED", "tableName" -> table))
+  }
+
+  protected override def extendedPartExpectedResult: String =
+    super.extendedPartExpectedResult +
+    """
+      |Location: <location>
+      |Created Time: <created time>
+      |Last Access: <last access>""".stripMargin
+
+  protected override def extendedTableInfo: String =
+    """Created Time: <created time>
+      |Last Access: <last access>
+      |Created By: <created by>
+      |Type: MANAGED
+      |Provider: parquet
+      |Location: <location>""".stripMargin
+
+  test("show table extended in permanent view") {
+    val namespace = "ns"
+    val table = "tbl"
+    withNamespaceAndTable(namespace, table, catalog) { t =>
+      sql(s"CREATE TABLE $t (id int) $defaultUsing")
+      val viewName = table + "_view"
+      withView(viewName) {
+        sql(s"CREATE VIEW $catalog.$namespace.$viewName AS SELECT id FROM $t")
+        val result = sql(s"SHOW TABLE EXTENDED in $namespace LIKE 
'$viewName*'").sort("tableName")
+        assert(result.schema.fieldNames ===
+          Seq("namespace", "tableName", "isTemporary", "information"))
+        val resultCollect = result.collect()
+        assert(resultCollect.length == 1)
+        assert(resultCollect(0).length == 4)
+        assert(resultCollect(0)(1) === viewName)
+        assert(resultCollect(0)(2) === false)
+        val actualResult = replace(resultCollect(0)(3).toString)
+        val expectedResult =
+          s"""Catalog: $catalog
+             |Database: $namespace
+             |Table: $viewName
+             |Created Time: <created time>
+             |Last Access: <last access>
+             |Created By: <created by>
+             |Type: VIEW
+             |View Text: SELECT id FROM $catalog.$namespace.$table
+             |View Original Text: SELECT id FROM $catalog.$namespace.$table
+             |View Catalog and Namespace: $catalog.$namespace
+             |View Query Output Columns: [id]
+             |Schema: root
+             | |-- id: integer (nullable = true)""".stripMargin
+        assert(actualResult === expectedResult)
+      }
+    }
+  }
 }
diff --git 
a/sql/core/src/test/scala/org/apache/spark/sql/execution/command/v2/ShowTablesSuite.scala
 
b/sql/core/src/test/scala/org/apache/spark/sql/execution/command/v2/ShowTablesSuite.scala
index 9a67eab055e..d66dca20d77 100644
--- 
a/sql/core/src/test/scala/org/apache/spark/sql/execution/command/v2/ShowTablesSuite.scala
+++ 
b/sql/core/src/test/scala/org/apache/spark/sql/execution/command/v2/ShowTablesSuite.scala
@@ -17,9 +17,9 @@
 
 package org.apache.spark.sql.execution.command.v2
 
-import org.apache.spark.sql.{AnalysisException, Row}
-import org.apache.spark.sql.catalyst.analysis.NoSuchNamespaceException
+import org.apache.spark.sql.Row
 import org.apache.spark.sql.execution.command
+import org.apache.spark.util.Utils
 
 /**
  * The class contains tests for the `SHOW TABLES` command to check V2 table 
catalogs.
@@ -49,57 +49,26 @@ class ShowTablesSuite extends command.ShowTablesSuiteBase 
with CommandSuiteBase
     }
   }
 
-  // The test fails for V1 catalog with the error:
-  // org.apache.spark.sql.AnalysisException:
-  //   The namespace in session catalog must have exactly one name part: 
spark_catalog.ns1.ns2.tbl
-  test("SHOW TABLE EXTENDED not valid v1 database") {
-    def testV1CommandNamespace(sqlCommand: String, namespace: String): Unit = {
-      val e = intercept[AnalysisException] {
-        sql(sqlCommand)
-      }
-      assert(e.message.contains(s"SHOW TABLE EXTENDED is not supported for v2 
tables"))
-    }
+  override protected def extendedPartInNonPartedTableError(
+      catalog: String,
+      namespace: String,
+      table: String): (String, Map[String, String]) = {
+    ("_LEGACY_ERROR_TEMP_1231",
+      Map("key" -> "id", "tblName" -> s"`$catalog`.`$namespace`.`$table`"))
+  }
 
-    val namespace = s"$catalog.ns1.ns2"
-    val table = "tbl"
-    withTable(s"$namespace.$table") {
-      sql(s"CREATE TABLE $namespace.$table (id bigint, data string) " +
-        s"$defaultUsing PARTITIONED BY (id)")
+  protected override def namespaceKey: String = "Namespace"
 
-      testV1CommandNamespace(s"SHOW TABLE EXTENDED FROM $namespace LIKE 'tb*'",
-        namespace)
-      testV1CommandNamespace(s"SHOW TABLE EXTENDED IN $namespace LIKE 'tb*'",
-        namespace)
-      testV1CommandNamespace("SHOW TABLE EXTENDED " +
-        s"FROM $namespace LIKE 'tb*' PARTITION(id=1)",
-        namespace)
-      testV1CommandNamespace("SHOW TABLE EXTENDED " +
-        s"IN $namespace LIKE 'tb*' PARTITION(id=1)",
-        namespace)
-    }
-  }
+  protected override def extendedTableInfo: String =
+    s"""Type: MANAGED
+       |Provider: _
+       |Owner: ${Utils.getCurrentUserName()}
+       |Table Properties: <table properties>""".stripMargin
 
-  // TODO(SPARK-33393): Support SHOW TABLE EXTENDED in DSv2
-  test("SHOW TABLE EXTENDED: an existing table") {
-    val table = "people"
-    withTable(s"$catalog.$table") {
-      sql(s"CREATE TABLE $catalog.$table (name STRING, id INT) $defaultUsing")
-      checkError(
-        exception = intercept[AnalysisException] {
-          sql(s"SHOW TABLE EXTENDED FROM $catalog LIKE '*$table*'").collect()
-        },
-        errorClass = "_LEGACY_ERROR_TEMP_1200",
-        parameters = Map("name" -> "SHOW TABLE EXTENDED")
-      )
-    }
-  }
+  protected override def extendedTableSchema: String =
+    s"""Schema: root
+       | |-- id: long (nullable = true)
+       | |-- data: string (nullable = true)""".stripMargin
 
-  test("show table in a not existing namespace") {
-    val e = intercept[NoSuchNamespaceException] {
-      runShowTablesSql(s"SHOW TABLES IN $catalog.unknown", Seq())
-    }
-    checkError(e,
-      errorClass = "SCHEMA_NOT_FOUND",
-      parameters = Map("schemaName" -> "`unknown`"))
-  }
+  protected override def selectCommandSchema: Array[String] = Array("id", 
"data")
 }
diff --git 
a/sql/hive/src/test/scala/org/apache/spark/sql/hive/execution/command/ShowTablesSuite.scala
 
b/sql/hive/src/test/scala/org/apache/spark/sql/hive/execution/command/ShowTablesSuite.scala
index 653a157e762..79b1eb6c096 100644
--- 
a/sql/hive/src/test/scala/org/apache/spark/sql/hive/execution/command/ShowTablesSuite.scala
+++ 
b/sql/hive/src/test/scala/org/apache/spark/sql/hive/execution/command/ShowTablesSuite.scala
@@ -18,6 +18,7 @@
 package org.apache.spark.sql.hive.execution.command
 
 import org.apache.spark.sql.execution.command.v1
+import org.apache.spark.util.Utils
 
 /**
  * The class contains tests for the `SHOW TABLES` command to check V1 Hive 
external table catalog.
@@ -33,4 +34,80 @@ class ShowTablesSuite extends v1.ShowTablesSuiteBase with 
CommandSuiteBase {
       }
     }
   }
+
+  override protected def extendedPartInNonPartedTableError(
+      catalog: String,
+      namespace: String,
+      table: String): (String, Map[String, String]) = {
+    ("_LEGACY_ERROR_TEMP_1231",
+      Map("key" -> "id", "tblName" -> s"`$catalog`.`$namespace`.`$table`"))
+  }
+
+  protected override def extendedPartExpectedResult: String =
+    super.extendedPartExpectedResult +
+    """
+      |Location: <location>
+      |Serde Library: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+      |InputFormat: org.apache.hadoop.mapred.TextInputFormat
+      |OutputFormat: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+      |Storage Properties: [serialization.format=1]
+      |Partition Parameters: <partition parameters>
+      |Created Time: <created time>
+      |Last Access: <last access>""".stripMargin
+
+  protected override def extendedTableInfo: String =
+    s"""Owner: ${Utils.getCurrentUserName()}
+       |Created Time: <created time>
+       |Last Access: <last access>
+       |Created By: <created by>
+       |Type: MANAGED
+       |Provider: hive
+       |Table Properties: <table properties>
+       |Location: <location>
+       |Serde Library: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+       |InputFormat: org.apache.hadoop.mapred.TextInputFormat
+       |OutputFormat: 
org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+       |Storage Properties: [serialization.format=1]""".stripMargin
+
+  test("show table extended in permanent view") {
+    val namespace = "ns"
+    val table = "tbl"
+    withNamespaceAndTable(namespace, table, catalog) { t =>
+      sql(s"CREATE TABLE $t (id int) $defaultUsing")
+      val viewName = table + "_view"
+      withView(viewName) {
+        sql(s"CREATE VIEW $catalog.$namespace.$viewName AS SELECT id FROM $t")
+        val result = sql(s"SHOW TABLE EXTENDED in $namespace LIKE 
'$viewName*'").sort("tableName")
+        assert(result.schema.fieldNames ===
+          Seq("namespace", "tableName", "isTemporary", "information"))
+        val resultCollect = result.collect()
+        assert(resultCollect.length == 1)
+        assert(resultCollect(0).length == 4)
+        assert(resultCollect(0)(1) === viewName)
+        assert(resultCollect(0)(2) === false)
+        val actualResult = replace(resultCollect(0)(3).toString)
+        val expectedResult =
+          s"""Catalog: $catalog
+             |Database: $namespace
+             |Table: $viewName
+             |Owner: ${Utils.getCurrentUserName()}
+             |Created Time: <created time>
+             |Last Access: <last access>
+             |Created By: <created by>
+             |Type: VIEW
+             |View Text: SELECT id FROM $catalog.$namespace.$table
+             |View Original Text: SELECT id FROM $catalog.$namespace.$table
+             |View Catalog and Namespace: $catalog.$namespace
+             |View Query Output Columns: [id]
+             |Table Properties: <table properties>
+             |Serde Library: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+             |InputFormat: org.apache.hadoop.mapred.SequenceFileInputFormat
+             |OutputFormat: 
org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
+             |Storage Properties: [serialization.format=1]
+             |Schema: root
+             | |-- id: integer (nullable = true)""".stripMargin
+        assert(actualResult === expectedResult)
+      }
+    }
+  }
 }


---------------------------------------------------------------------
To unsubscribe, e-mail: commits-unsubscr...@spark.apache.org
For additional commands, e-mail: commits-h...@spark.apache.org

Reply via email to