This is an automated email from the ASF dual-hosted git repository.

wenchen pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/spark.git


The following commit(s) were added to refs/heads/master by this push:
     new 07427b854be [SPARK-41154][SQL] Incorrect relation caching for queries 
with time travel spec
07427b854be is described below

commit 07427b854be58810bd485c00c5e5c576d5aa404e
Author: ulysses-you <ulyssesyo...@gmail.com>
AuthorDate: Mon Nov 21 18:19:10 2022 +0800

    [SPARK-41154][SQL] Incorrect relation caching for queries with time travel 
spec
    
    ### What changes were proposed in this pull request?
    
    Add TimeTravelSpec to the key of relation cache in AnalysisContext.
    
    ### Why are the changes needed?
    
    Correct the relation resolution for the same table but different 
TimeTravelSpec.
    
    ### Does this PR introduce _any_ user-facing change?
    
    yes, bug fix
    
    ### How was this patch tested?
    
    add test
    
    Closes #38687 from ulysses-you/time-travel-spec.
    
    Authored-by: ulysses-you <ulyssesyo...@gmail.com>
    Signed-off-by: Wenchen Fan <wenc...@databricks.com>
---
 .../apache/spark/sql/catalyst/analysis/Analyzer.scala   | 11 ++++++-----
 .../spark/sql/connector/DataSourceV2SQLSuite.scala      | 17 +++++++++++++++++
 2 files changed, 23 insertions(+), 5 deletions(-)

diff --git 
a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/analysis/Analyzer.scala
 
b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/analysis/Analyzer.scala
index 104c5c1e080..8cdf83f4be7 100644
--- 
a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/analysis/Analyzer.scala
+++ 
b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/analysis/Analyzer.scala
@@ -113,9 +113,9 @@ object FakeV2SessionCatalog extends TableCatalog with 
FunctionCatalog {
  * @param nestedViewDepth The nested depth in the view resolution, this 
enables us to limit the
  *                        depth of nested views.
  * @param maxNestedViewDepth The maximum allowed depth of nested view 
resolution.
- * @param relationCache A mapping from qualified table names to resolved 
relations. This can ensure
- *                      that the table is resolved only once if a table is 
used multiple times
- *                      in a query.
+ * @param relationCache A mapping from qualified table names and time travel 
spec to resolved
+ *                      relations. This can ensure that the table is resolved 
only once if a table
+ *                      is used multiple times in a query.
  * @param referredTempViewNames All the temp view names referred by the 
current view we are
  *                              resolving. It's used to make sure the relation 
resolution is
  *                              consistent between view creation and view 
resolution. For example,
@@ -129,7 +129,8 @@ case class AnalysisContext(
     catalogAndNamespace: Seq[String] = Nil,
     nestedViewDepth: Int = 0,
     maxNestedViewDepth: Int = -1,
-    relationCache: mutable.Map[Seq[String], LogicalPlan] = mutable.Map.empty,
+    relationCache: mutable.Map[(Seq[String], Option[TimeTravelSpec]), 
LogicalPlan] =
+      mutable.Map.empty,
     referredTempViewNames: Seq[Seq[String]] = Seq.empty,
     // 1. If we are resolving a view, this field will be restored from the 
view metadata,
     //    by calling `AnalysisContext.withAnalysisContext(viewDesc)`.
@@ -1239,7 +1240,7 @@ class Analyzer(override val catalogManager: 
CatalogManager)
       resolveTempView(u.multipartIdentifier, u.isStreaming, 
timeTravelSpec.isDefined).orElse {
         expandIdentifier(u.multipartIdentifier) match {
           case CatalogAndIdentifier(catalog, ident) =>
-            val key = catalog.name +: ident.namespace :+ ident.name
+            val key = ((catalog.name +: ident.namespace :+ ident.name).toSeq, 
timeTravelSpec)
             AnalysisContext.get.relationCache.get(key).map(_.transform {
               case multi: MultiInstanceRelation =>
                 val newRelation = multi.newInstance()
diff --git 
a/sql/core/src/test/scala/org/apache/spark/sql/connector/DataSourceV2SQLSuite.scala
 
b/sql/core/src/test/scala/org/apache/spark/sql/connector/DataSourceV2SQLSuite.scala
index de8612c3348..ea93367b3db 100644
--- 
a/sql/core/src/test/scala/org/apache/spark/sql/connector/DataSourceV2SQLSuite.scala
+++ 
b/sql/core/src/test/scala/org/apache/spark/sql/connector/DataSourceV2SQLSuite.scala
@@ -2775,6 +2775,23 @@ class DataSourceV2SQLSuiteV1Filter extends 
DataSourceV2SQLSuite with AlterTableT
     }
   }
 
+  test("SPARK-41154: Incorrect relation caching for queries with time travel 
spec") {
+    sql("use testcat")
+    val t1 = "testcat.t1"
+    val t2 = "testcat.t2"
+    withTable(t1, t2) {
+      sql(s"CREATE TABLE $t1 USING foo AS SELECT 1 as c")
+      sql(s"CREATE TABLE $t2 USING foo AS SELECT 2 as c")
+      assert(
+        sql("""
+              |SELECT * FROM t VERSION AS OF '1'
+              |UNION ALL
+              |SELECT * FROM t VERSION AS OF '2'
+              |""".stripMargin
+        ).collect() === Array(Row(1), Row(2)))
+    }
+  }
+
   private def testNotSupportedV2Command(sqlCommand: String, sqlParams: 
String): Unit = {
     checkError(
       exception = intercept[AnalysisException] {


---------------------------------------------------------------------
To unsubscribe, e-mail: commits-unsubscr...@spark.apache.org
For additional commands, e-mail: commits-h...@spark.apache.org

Reply via email to