This is an automated email from the ASF dual-hosted git repository.

gurwls223 pushed a commit to branch branch-3.0
in repository https://gitbox.apache.org/repos/asf/spark.git


The following commit(s) were added to refs/heads/branch-3.0 by this push:
     new 9555658  [SPARK-33670][SQL][3.0] Verify the partition provider is Hive 
in v1 SHOW TABLE EXTENDED
9555658 is described below

commit 955565849593b521b801f4166f8bbac9e411b0e1
Author: Max Gekk <max.g...@gmail.com>
AuthorDate: Mon Dec 7 19:19:46 2020 +0900

    [SPARK-33670][SQL][3.0] Verify the partition provider is Hive in v1 SHOW 
TABLE EXTENDED
    
    ### What changes were proposed in this pull request?
    Invoke the check `DDLUtils.verifyPartitionProviderIsHive()` from V1 
implementation of `SHOW TABLE EXTENDED` when partition specs are specified.
    
    This PR is some kind of follow up 
https://github.com/apache/spark/pull/16373 and 
https://github.com/apache/spark/pull/15515.
    
    ### Why are the changes needed?
    To output an user friendly error with recommendation like
    **"
    ... partition metadata is not stored in the Hive metastore. To import this 
information into the metastore, run `msck repair table tableName`
    "**
    instead of silently output an empty result.
    
    ### Does this PR introduce _any_ user-facing change?
    Yes.
    
    ### How was this patch tested?
    By running the affected test suites, in particular:
    ```
    $ build/sbt -Phive-2.3 -Phive-thriftserver "test:testOnly 
*HiveCatalogedDDLSuite"
    $ build/sbt -Phive-2.3 -Phive-thriftserver "hive/test:testOnly 
*PartitionProviderCompatibilitySuite"
    ```
    
    Authored-by: Max Gekk <max.gekkgmail.com>
    Signed-off-by: HyukjinKwon <gurwls223apache.org>
    (cherry picked from commit 29096a8869c95221dc75ce7fd3d098680bef4f55)
    Signed-off-by: Max Gekk <max.gekkgmail.com>
    
    Closes #30640 from 
MaxGekk/show-table-extended-verifyPartitionProviderIsHive-3.0.
    
    Authored-by: Max Gekk <max.g...@gmail.com>
    Signed-off-by: HyukjinKwon <gurwls...@apache.org>
---
 .../org/apache/spark/sql/execution/command/tables.scala    |  3 +++
 .../org/apache/spark/sql/execution/command/DDLSuite.scala  | 10 ++++++++++
 .../sql/hive/PartitionProviderCompatibilitySuite.scala     | 14 ++++++++++----
 3 files changed, 23 insertions(+), 4 deletions(-)

diff --git 
a/sql/core/src/main/scala/org/apache/spark/sql/execution/command/tables.scala 
b/sql/core/src/main/scala/org/apache/spark/sql/execution/command/tables.scala
index d8efc8b..d550683 100644
--- 
a/sql/core/src/main/scala/org/apache/spark/sql/execution/command/tables.scala
+++ 
b/sql/core/src/main/scala/org/apache/spark/sql/execution/command/tables.scala
@@ -885,6 +885,9 @@ case class ShowTablesCommand(
       // Note: tableIdentifierPattern should be non-empty, otherwise a 
[[ParseException]]
       // should have been thrown by the sql parser.
       val table = 
catalog.getTableMetadata(TableIdentifier(tableIdentifierPattern.get, Some(db)))
+
+      DDLUtils.verifyPartitionProviderIsHive(sparkSession, table, "SHOW TABLE 
EXTENDED")
+
       val tableIdent = table.identifier
       val normalizedSpec = PartitioningUtils.normalizePartitionSpec(
         partitionSpec.get,
diff --git 
a/sql/core/src/test/scala/org/apache/spark/sql/execution/command/DDLSuite.scala 
b/sql/core/src/test/scala/org/apache/spark/sql/execution/command/DDLSuite.scala
index 64a706d..cf464d1 100644
--- 
a/sql/core/src/test/scala/org/apache/spark/sql/execution/command/DDLSuite.scala
+++ 
b/sql/core/src/test/scala/org/apache/spark/sql/execution/command/DDLSuite.scala
@@ -3073,6 +3073,16 @@ abstract class DDLSuite extends QueryTest with 
SQLTestUtils {
       }
     }
   }
+
+  test("SPARK-33670: show partitions from a datasource table") {
+    import testImplicits._
+    val t = "part_datasrc"
+    withTable(t) {
+      val df = (1 to 3).map(i => (i, s"val_$i", i * 2)).toDF("a", "b", "c")
+      
df.write.partitionBy("a").format("parquet").mode(SaveMode.Overwrite).saveAsTable(t)
+      assert(sql(s"SHOW TABLE EXTENDED LIKE '$t' PARTITION(a = 1)").count() 
=== 1)
+    }
+  }
 }
 
 object FakeLocalFsFileSystem {
diff --git 
a/sql/hive/src/test/scala/org/apache/spark/sql/hive/PartitionProviderCompatibilitySuite.scala
 
b/sql/hive/src/test/scala/org/apache/spark/sql/hive/PartitionProviderCompatibilitySuite.scala
index 80afc9d..e1b0637 100644
--- 
a/sql/hive/src/test/scala/org/apache/spark/sql/hive/PartitionProviderCompatibilitySuite.scala
+++ 
b/sql/hive/src/test/scala/org/apache/spark/sql/hive/PartitionProviderCompatibilitySuite.scala
@@ -53,7 +53,8 @@ class PartitionProviderCompatibilitySuite
       s"ALTER TABLE $tableName PARTITION (partCol=1) SET LOCATION '/foo'",
       s"ALTER TABLE $tableName DROP PARTITION (partCol=1)",
       s"DESCRIBE $tableName PARTITION (partCol=1)",
-      s"SHOW PARTITIONS $tableName")
+      s"SHOW PARTITIONS $tableName",
+      s"SHOW TABLE EXTENDED LIKE '$tableName' PARTITION (partCol=1)")
 
     withSQLConf(SQLConf.HIVE_MANAGE_FILESOURCE_PARTITIONS.key -> "true") {
       for (cmd <- unsupportedCommands) {
@@ -124,10 +125,15 @@ class PartitionProviderCompatibilitySuite
         }
         // disabled
         withSQLConf(SQLConf.HIVE_MANAGE_FILESOURCE_PARTITIONS.key -> "false") {
-          val e = intercept[AnalysisException] {
-            spark.sql(s"show partitions test")
+          Seq(
+            "SHOW PARTITIONS test",
+            "SHOW TABLE EXTENDED LIKE 'test' PARTITION (partCol=1)"
+          ).foreach { showPartitions =>
+            val e = intercept[AnalysisException] {
+              spark.sql(showPartitions)
+            }
+            assert(e.getMessage.contains("filesource partition management is 
disabled"))
           }
-          assert(e.getMessage.contains("filesource partition management is 
disabled"))
           spark.sql("refresh table test")
           assert(spark.sql("select * from test").count() == 5)
         }


---------------------------------------------------------------------
To unsubscribe, e-mail: commits-unsubscr...@spark.apache.org
For additional commands, e-mail: commits-h...@spark.apache.org

Reply via email to