This is an automated email from the ASF dual-hosted git repository.

dongjoon pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/spark.git


The following commit(s) were added to refs/heads/master by this push:
     new 1318be7  [SPARK-34267][SQL] Remove `refreshTable()` from `SessionState`
1318be7 is described below

commit 1318be7ee94a40289b3d584261c9d38d66398fec
Author: Max Gekk <[email protected]>
AuthorDate: Wed Jan 27 09:43:59 2021 -0800

    [SPARK-34267][SQL] Remove `refreshTable()` from `SessionState`
    
    ### What changes were proposed in this pull request?
    Remove `SessionState.refreshTable()` and modify the tests where the method 
is used.
    
    ### Why are the changes needed?
    There are already 2 methods with the same name in:
    - `SessionCatalog`
    - `CatalogImpl`
    
    One more method in `SessionState` does not give any benefits. By removing 
it, we can improve code maintenance.
    
    ### Does this PR introduce _any_ user-facing change?
    Should not because `SessionState` is an internal class.
    
    ### How was this patch tested?
    By running the modified test suites:
    ```
    $ build/sbt -Phive -Phive-thriftserver "test:testOnly 
*MetastoreDataSourcesSuite"
    $ build/sbt -Phive -Phive-thriftserver "test:testOnly *HiveOrcQuerySuite"
    $ build/sbt -Phive -Phive-thriftserver "test:testOnly 
*HiveParquetMetastoreSuite"
    ```
    
    Closes #31366 from MaxGekk/remove-refreshTable-from-SessionState.
    
    Authored-by: Max Gekk <[email protected]>
    Signed-off-by: Dongjoon Hyun <[email protected]>
---
 .../org/apache/spark/sql/internal/SessionState.scala     |  4 ----
 .../spark/sql/hive/HiveParquetMetastoreSuite.scala       |  6 +++---
 .../spark/sql/hive/MetastoreDataSourcesSuite.scala       | 16 ++++++++--------
 .../apache/spark/sql/hive/orc/HiveOrcQuerySuite.scala    |  1 -
 4 files changed, 11 insertions(+), 16 deletions(-)

diff --git 
a/sql/core/src/main/scala/org/apache/spark/sql/internal/SessionState.scala 
b/sql/core/src/main/scala/org/apache/spark/sql/internal/SessionState.scala
index 60ca06d..258c9bb 100644
--- a/sql/core/src/main/scala/org/apache/spark/sql/internal/SessionState.scala
+++ b/sql/core/src/main/scala/org/apache/spark/sql/internal/SessionState.scala
@@ -119,10 +119,6 @@ private[sql] class SessionState(
   // ------------------------------------------------------
 
   def executePlan(plan: LogicalPlan): QueryExecution = 
createQueryExecution(plan)
-
-  def refreshTable(tableName: String): Unit = {
-    catalog.refreshTable(sqlParser.parseTableIdentifier(tableName))
-  }
 }
 
 private[sql] object SessionState {
diff --git 
a/sql/hive/src/test/scala/org/apache/spark/sql/hive/HiveParquetMetastoreSuite.scala
 
b/sql/hive/src/test/scala/org/apache/spark/sql/hive/HiveParquetMetastoreSuite.scala
index 0bdaa0c..0351754 100644
--- 
a/sql/hive/src/test/scala/org/apache/spark/sql/hive/HiveParquetMetastoreSuite.scala
+++ 
b/sql/hive/src/test/scala/org/apache/spark/sql/hive/HiveParquetMetastoreSuite.scala
@@ -473,7 +473,7 @@ class HiveParquetMetastoreSuite extends 
ParquetPartitioningTest {
     checkCached(tableIdentifier)
     // For insert into non-partitioned table, we will do the conversion,
     // so the converted test_insert_parquet should be cached.
-    sessionState.refreshTable("test_insert_parquet")
+    spark.catalog.refreshTable("test_insert_parquet")
     assert(getCachedDataSourceTable(tableIdentifier) === null)
     sql(
       """
@@ -486,7 +486,7 @@ class HiveParquetMetastoreSuite extends 
ParquetPartitioningTest {
       sql("select * from test_insert_parquet"),
       sql("select a, b from jt").collect())
     // Invalidate the cache.
-    sessionState.refreshTable("test_insert_parquet")
+    spark.catalog.refreshTable("test_insert_parquet")
     assert(getCachedDataSourceTable(tableIdentifier) === null)
 
     // Create a partitioned table.
@@ -536,7 +536,7 @@ class HiveParquetMetastoreSuite extends 
ParquetPartitioningTest {
           |select b, '2015-04-02', a FROM jt
         """.stripMargin).collect())
 
-    sessionState.refreshTable("test_parquet_partitioned_cache_test")
+    spark.catalog.refreshTable("test_parquet_partitioned_cache_test")
     assert(getCachedDataSourceTable(tableIdentifier) === null)
 
     dropTables("test_insert_parquet", "test_parquet_partitioned_cache_test")
diff --git 
a/sql/hive/src/test/scala/org/apache/spark/sql/hive/MetastoreDataSourcesSuite.scala
 
b/sql/hive/src/test/scala/org/apache/spark/sql/hive/MetastoreDataSourcesSuite.scala
index ecbb104..ba44192 100644
--- 
a/sql/hive/src/test/scala/org/apache/spark/sql/hive/MetastoreDataSourcesSuite.scala
+++ 
b/sql/hive/src/test/scala/org/apache/spark/sql/hive/MetastoreDataSourcesSuite.scala
@@ -256,13 +256,13 @@ class MetastoreDataSourcesSuite extends QueryTest with 
SQLTestUtils with TestHiv
           sql("SELECT `c_!@(3)` FROM expectedJsonTable").collect().toSeq)
 
         // Discard the cached relation.
-        sessionState.refreshTable("jsonTable")
+        spark.catalog.refreshTable("jsonTable")
 
         checkAnswer(
           sql("SELECT * FROM jsonTable"),
           sql("SELECT `c_!@(3)` FROM expectedJsonTable").collect().toSeq)
 
-        sessionState.refreshTable("jsonTable")
+        spark.catalog.refreshTable("jsonTable")
         val expectedSchema = StructType(StructField("c_!@(3)", IntegerType, 
true) :: Nil)
 
         assert(expectedSchema === table("jsonTable").schema)
@@ -350,7 +350,7 @@ class MetastoreDataSourcesSuite extends QueryTest with 
SQLTestUtils with TestHiv
            """.stripMargin)
 
         // Discard the cached relation.
-        sessionState.refreshTable("ctasJsonTable")
+        spark.catalog.refreshTable("ctasJsonTable")
 
         // Schema should not be changed.
         assert(table("ctasJsonTable").schema === table("jsonTable").schema)
@@ -438,7 +438,7 @@ class MetastoreDataSourcesSuite extends QueryTest with 
SQLTestUtils with TestHiv
         sql("SELECT * FROM savedJsonTable tmp where tmp.a > 5"),
         (6 to 10).map(i => Row(i, s"str$i")))
 
-      sessionState.refreshTable("savedJsonTable")
+      spark.catalog.refreshTable("savedJsonTable")
 
       checkAnswer(
         sql("SELECT * FROM savedJsonTable where savedJsonTable.a < 5"),
@@ -718,7 +718,7 @@ class MetastoreDataSourcesSuite extends QueryTest with 
SQLTestUtils with TestHiv
         )
         spark.sessionState.catalog.createTable(tableDesc, ignoreIfExists = 
false)
 
-        sessionState.refreshTable("wide_schema")
+        spark.catalog.refreshTable("wide_schema")
 
         val actualSchema = table("wide_schema").schema
         assert(schema === actualSchema)
@@ -751,7 +751,7 @@ class MetastoreDataSourcesSuite extends QueryTest with 
SQLTestUtils with TestHiv
 
       hiveClient.createTable(hiveTable, ignoreIfExists = false)
 
-      sessionState.refreshTable(tableName)
+      spark.catalog.refreshTable(tableName)
       val actualSchema = table(tableName).schema
       assert(schema === actualSchema)
     }
@@ -763,7 +763,7 @@ class MetastoreDataSourcesSuite extends QueryTest with 
SQLTestUtils with TestHiv
 
     withTable(tableName) {
       df.write.format("parquet").partitionBy("d", "b").saveAsTable(tableName)
-      sessionState.refreshTable(tableName)
+      spark.catalog.refreshTable(tableName)
       val metastoreTable = hiveClient.getTable("default", tableName)
       val expectedPartitionColumns = StructType(df.schema("d") :: 
df.schema("b") :: Nil)
 
@@ -798,7 +798,7 @@ class MetastoreDataSourcesSuite extends QueryTest with 
SQLTestUtils with TestHiv
         .bucketBy(8, "d", "b")
         .sortBy("c")
         .saveAsTable(tableName)
-      sessionState.refreshTable(tableName)
+      spark.catalog.refreshTable(tableName)
       val metastoreTable = hiveClient.getTable("default", tableName)
       val expectedBucketByColumns = StructType(df.schema("d") :: 
df.schema("b") :: Nil)
       val expectedSortByColumns = StructType(df.schema("c") :: Nil)
diff --git 
a/sql/hive/src/test/scala/org/apache/spark/sql/hive/orc/HiveOrcQuerySuite.scala 
b/sql/hive/src/test/scala/org/apache/spark/sql/hive/orc/HiveOrcQuerySuite.scala
index 45a5b3d..280ffb0 100644
--- 
a/sql/hive/src/test/scala/org/apache/spark/sql/hive/orc/HiveOrcQuerySuite.scala
+++ 
b/sql/hive/src/test/scala/org/apache/spark/sql/hive/orc/HiveOrcQuerySuite.scala
@@ -255,7 +255,6 @@ class HiveOrcQuerySuite extends OrcQueryTest with 
TestHiveSingleton {
         withSQLConf(HiveUtils.CONVERT_METASTORE_ORC.key -> "true",
           HiveUtils.CONVERT_INSERTING_PARTITIONED_TABLE.key -> conversion) {
           withTable("dummy_orc_partitioned") {
-            spark.sessionState.refreshTable("dummy_orc_partitioned")
             spark.sql(
               s"""
                  |CREATE TABLE dummy_orc_partitioned(key INT, value STRING)


---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]

Reply via email to