This is an automated email from the ASF dual-hosted git repository.

wenchen pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/spark.git


The following commit(s) were added to refs/heads/master by this push:
     new 1591f7461e68 [SPARK-46446][SQL][FOLLOW-UP] Fix sql tests for 
subqueries with correlated OFFSET
1591f7461e68 is described below

commit 1591f7461e68887d22cf613ee37b6015cd960d72
Author: Jack Chen <jack.c...@databricks.com>
AuthorDate: Wed Dec 20 13:42:20 2023 +0800

    [SPARK-46446][SQL][FOLLOW-UP] Fix sql tests for subqueries with correlated 
OFFSET
    
    ### What changes were proposed in this pull request?
    Follow-up PR for https://github.com/apache/spark/pull/44401, I had some 
small test fixes that didn't get included in that PR.
    
    ### Why are the changes needed?
    Fix tests
    
    ### Does this PR introduce _any_ user-facing change?
    No
    
    ### How was this patch tested?
    Tests themselves
    
    ### Was this patch authored or co-authored using generative AI tooling?
    No
    
    Closes #44415 from jchen5/offset-tests.
    
    Authored-by: Jack Chen <jack.c...@databricks.com>
    Signed-off-by: Wenchen Fan <wenc...@databricks.com>
---
 .../subquery/subquery-offset.sql.out               | 147 +++++++++------------
 .../sql-tests/inputs/subquery/subquery-offset.sql  |  10 +-
 .../results/subquery/subquery-offset.sql.out       | 140 ++++++++------------
 3 files changed, 132 insertions(+), 165 deletions(-)

diff --git 
a/sql/core/src/test/resources/sql-tests/analyzer-results/subquery/subquery-offset.sql.out
 
b/sql/core/src/test/resources/sql-tests/analyzer-results/subquery/subquery-offset.sql.out
index cee1de55aa4a..f49ee3178a0e 100644
--- 
a/sql/core/src/test/resources/sql-tests/analyzer-results/subquery/subquery-offset.sql.out
+++ 
b/sql/core/src/test/resources/sql-tests/analyzer-results/subquery/subquery-offset.sql.out
@@ -1,68 +1,44 @@
 -- Automatically generated by SQLQueryTestSuite
 -- !query
-create table x(x1 int, x2 int)
+drop table if exists x
 -- !query analysis
-org.apache.spark.sql.catalyst.ExtendedAnalysisException
-{
-  "errorClass" : "NOT_SUPPORTED_COMMAND_WITHOUT_HIVE_SUPPORT",
-  "sqlState" : "0A000",
-  "messageParameters" : {
-    "cmd" : "CREATE Hive TABLE (AS SELECT)"
-  }
-}
+DropTable true, false
++- ResolvedIdentifier V2SessionCatalog(spark_catalog), default.x
+
+
+-- !query
+drop table if exists y
+-- !query analysis
+DropTable true, false
++- ResolvedIdentifier V2SessionCatalog(spark_catalog), default.y
+
+
+-- !query
+create table x(x1 int, x2 int) using json
+-- !query analysis
+CreateDataSourceTableCommand `spark_catalog`.`default`.`x`, false
 
 
 -- !query
 insert into x values (1, 1), (2, 2)
 -- !query analysis
-org.apache.spark.sql.catalyst.ExtendedAnalysisException
-{
-  "errorClass" : "TABLE_OR_VIEW_NOT_FOUND",
-  "sqlState" : "42P01",
-  "messageParameters" : {
-    "relationName" : "`x`"
-  },
-  "queryContext" : [ {
-    "objectType" : "",
-    "objectName" : "",
-    "startIndex" : 13,
-    "stopIndex" : 13,
-    "fragment" : "x"
-  } ]
-}
+InsertIntoHadoopFsRelationCommand file:[not included in 
comparison]/{warehouse_dir}/x, false, JSON, [path=file:[not included in 
comparison]/{warehouse_dir}/x], Append, `spark_catalog`.`default`.`x`, 
org.apache.spark.sql.execution.datasources.InMemoryFileIndex(file:[not included 
in comparison]/{warehouse_dir}/x), [x1, x2]
++- Project [cast(col1#x as int) AS x1#x, cast(col2#x as int) AS x2#x]
+   +- LocalRelation [col1#x, col2#x]
 
 
 -- !query
-create table y(y1 int, y2 int)
+create table y(y1 int, y2 int) using json
 -- !query analysis
-org.apache.spark.sql.catalyst.ExtendedAnalysisException
-{
-  "errorClass" : "NOT_SUPPORTED_COMMAND_WITHOUT_HIVE_SUPPORT",
-  "sqlState" : "0A000",
-  "messageParameters" : {
-    "cmd" : "CREATE Hive TABLE (AS SELECT)"
-  }
-}
+CreateDataSourceTableCommand `spark_catalog`.`default`.`y`, false
 
 
 -- !query
 insert into y values (1, 1), (1, 2), (2, 4)
 -- !query analysis
-org.apache.spark.sql.catalyst.ExtendedAnalysisException
-{
-  "errorClass" : "TABLE_OR_VIEW_NOT_FOUND",
-  "sqlState" : "42P01",
-  "messageParameters" : {
-    "relationName" : "`y`"
-  },
-  "queryContext" : [ {
-    "objectType" : "",
-    "objectName" : "",
-    "startIndex" : 13,
-    "stopIndex" : 13,
-    "fragment" : "y"
-  } ]
-}
+InsertIntoHadoopFsRelationCommand file:[not included in 
comparison]/{warehouse_dir}/y, false, JSON, [path=file:[not included in 
comparison]/{warehouse_dir}/y], Append, `spark_catalog`.`default`.`y`, 
org.apache.spark.sql.execution.datasources.InMemoryFileIndex(file:[not included 
in comparison]/{warehouse_dir}/y), [y1, y2]
++- Project [cast(col1#x as int) AS y1#x, cast(col2#x as int) AS y2#x]
+   +- LocalRelation [col1#x, col2#x]
 
 
 -- !query
@@ -70,17 +46,17 @@ select * from x where exists (select * from y where x1 = y1 
limit 1 offset 2)
 -- !query analysis
 org.apache.spark.sql.catalyst.ExtendedAnalysisException
 {
-  "errorClass" : "TABLE_OR_VIEW_NOT_FOUND",
-  "sqlState" : "42P01",
+  "errorClass" : 
"UNSUPPORTED_SUBQUERY_EXPRESSION_CATEGORY.ACCESSING_OUTER_QUERY_COLUMN_IS_NOT_ALLOWED",
+  "sqlState" : "0A000",
   "messageParameters" : {
-    "relationName" : "`x`"
+    "treeNode" : "Filter (outer(x1#x) = y1#x)\n+- SubqueryAlias 
spark_catalog.default.y\n   +- Relation spark_catalog.default.y[y1#x,y2#x] 
json\n"
   },
   "queryContext" : [ {
     "objectType" : "",
     "objectName" : "",
-    "startIndex" : 15,
-    "stopIndex" : 15,
-    "fragment" : "x"
+    "startIndex" : 31,
+    "stopIndex" : 59,
+    "fragment" : "select * from y where x1 = y1"
   } ]
 }
 
@@ -90,17 +66,17 @@ select * from x join lateral (select * from y where x1 = y1 
limit 1 offset 2)
 -- !query analysis
 org.apache.spark.sql.catalyst.ExtendedAnalysisException
 {
-  "errorClass" : "TABLE_OR_VIEW_NOT_FOUND",
-  "sqlState" : "42P01",
+  "errorClass" : 
"UNSUPPORTED_SUBQUERY_EXPRESSION_CATEGORY.ACCESSING_OUTER_QUERY_COLUMN_IS_NOT_ALLOWED",
+  "sqlState" : "0A000",
   "messageParameters" : {
-    "relationName" : "`x`"
+    "treeNode" : "Filter (outer(x1#x) = y1#x)\n+- SubqueryAlias 
spark_catalog.default.y\n   +- Relation spark_catalog.default.y[y1#x,y2#x] 
json\n"
   },
   "queryContext" : [ {
     "objectType" : "",
     "objectName" : "",
-    "startIndex" : 15,
-    "stopIndex" : 15,
-    "fragment" : "x"
+    "startIndex" : 31,
+    "stopIndex" : 59,
+    "fragment" : "select * from y where x1 = y1"
   } ]
 }
 
@@ -108,21 +84,16 @@ org.apache.spark.sql.catalyst.ExtendedAnalysisException
 -- !query
 select * from x where x1 in (select y1 from y limit 1 offset 2)
 -- !query analysis
-org.apache.spark.sql.catalyst.ExtendedAnalysisException
-{
-  "errorClass" : "TABLE_OR_VIEW_NOT_FOUND",
-  "sqlState" : "42P01",
-  "messageParameters" : {
-    "relationName" : "`x`"
-  },
-  "queryContext" : [ {
-    "objectType" : "",
-    "objectName" : "",
-    "startIndex" : 15,
-    "stopIndex" : 15,
-    "fragment" : "x"
-  } ]
-}
+Project [x1#x, x2#x]
++- Filter x1#x IN (list#x [])
+   :  +- GlobalLimit 1
+   :     +- LocalLimit 1
+   :        +- Offset 2
+   :           +- Project [y1#x]
+   :              +- SubqueryAlias spark_catalog.default.y
+   :                 +- Relation spark_catalog.default.y[y1#x,y2#x] json
+   +- SubqueryAlias spark_catalog.default.x
+      +- Relation spark_catalog.default.x[x1#x,x2#x] json
 
 
 -- !query
@@ -130,17 +101,17 @@ select * from x where (select sum(y2) from y where x1 = 
y1 limit 1 offset 2) > 2
 -- !query analysis
 org.apache.spark.sql.catalyst.ExtendedAnalysisException
 {
-  "errorClass" : "TABLE_OR_VIEW_NOT_FOUND",
-  "sqlState" : "42P01",
+  "errorClass" : 
"UNSUPPORTED_SUBQUERY_EXPRESSION_CATEGORY.ACCESSING_OUTER_QUERY_COLUMN_IS_NOT_ALLOWED",
+  "sqlState" : "0A000",
   "messageParameters" : {
-    "relationName" : "`x`"
+    "treeNode" : "Filter (outer(x1#x) = y1#x)\n+- SubqueryAlias 
spark_catalog.default.y\n   +- Relation spark_catalog.default.y[y1#x,y2#x] 
json\n"
   },
   "queryContext" : [ {
     "objectType" : "",
     "objectName" : "",
-    "startIndex" : 15,
-    "stopIndex" : 15,
-    "fragment" : "x"
+    "startIndex" : 24,
+    "stopIndex" : 58,
+    "fragment" : "select sum(y2) from y where x1 = y1"
   } ]
 }
 
@@ -251,3 +222,17 @@ org.apache.spark.sql.catalyst.ExtendedAnalysisException
     "fragment" : "SELECT max(dept.dept_id) a\n                   FROM   dept\n 
                  WHERE  dept.dept_id = emp.dept_id\n                   GROUP  
BY state"
   } ]
 }
+
+
+-- !query
+drop table x
+-- !query analysis
+DropTable false, false
++- ResolvedIdentifier V2SessionCatalog(spark_catalog), default.x
+
+
+-- !query
+drop table y
+-- !query analysis
+DropTable false, false
++- ResolvedIdentifier V2SessionCatalog(spark_catalog), default.y
diff --git 
a/sql/core/src/test/resources/sql-tests/inputs/subquery/subquery-offset.sql 
b/sql/core/src/test/resources/sql-tests/inputs/subquery/subquery-offset.sql
index 80ba45a3a579..9b61d1b26270 100644
--- a/sql/core/src/test/resources/sql-tests/inputs/subquery/subquery-offset.sql
+++ b/sql/core/src/test/resources/sql-tests/inputs/subquery/subquery-offset.sql
@@ -1,6 +1,9 @@
-create table x(x1 int, x2 int);
+drop table if exists x;
+drop table if exists y;
+
+create table x(x1 int, x2 int) using json;
 insert into x values (1, 1), (2, 2);
-create table y(y1 int, y2 int);
+create table y(y1 int, y2 int) using json;
 insert into y values (1, 1), (1, 2), (2, 4);
 
 select * from x where exists (select * from y where x1 = y1 limit 1 offset 2);
@@ -48,3 +51,6 @@ JOIN LATERAL (SELECT max(dept.dept_id) a
                    ORDER  BY state
                    LIMIT 2
                    OFFSET 1);
+
+drop table x;
+drop table y;
diff --git 
a/sql/core/src/test/resources/sql-tests/results/subquery/subquery-offset.sql.out
 
b/sql/core/src/test/resources/sql-tests/results/subquery/subquery-offset.sql.out
index 7736305b9cfa..fd7fa4830b46 100644
--- 
a/sql/core/src/test/resources/sql-tests/results/subquery/subquery-offset.sql.out
+++ 
b/sql/core/src/test/resources/sql-tests/results/subquery/subquery-offset.sql.out
@@ -1,17 +1,26 @@
 -- Automatically generated by SQLQueryTestSuite
 -- !query
-create table x(x1 int, x2 int)
+drop table if exists x
+-- !query schema
+struct<>
+-- !query output
+
+
+
+-- !query
+drop table if exists y
+-- !query schema
+struct<>
+-- !query output
+
+
+
+-- !query
+create table x(x1 int, x2 int) using json
 -- !query schema
 struct<>
 -- !query output
-org.apache.spark.sql.catalyst.ExtendedAnalysisException
-{
-  "errorClass" : "NOT_SUPPORTED_COMMAND_WITHOUT_HIVE_SUPPORT",
-  "sqlState" : "0A000",
-  "messageParameters" : {
-    "cmd" : "CREATE Hive TABLE (AS SELECT)"
-  }
-}
+
 
 
 -- !query
@@ -19,36 +28,15 @@ insert into x values (1, 1), (2, 2)
 -- !query schema
 struct<>
 -- !query output
-org.apache.spark.sql.catalyst.ExtendedAnalysisException
-{
-  "errorClass" : "TABLE_OR_VIEW_NOT_FOUND",
-  "sqlState" : "42P01",
-  "messageParameters" : {
-    "relationName" : "`x`"
-  },
-  "queryContext" : [ {
-    "objectType" : "",
-    "objectName" : "",
-    "startIndex" : 13,
-    "stopIndex" : 13,
-    "fragment" : "x"
-  } ]
-}
+
 
 
 -- !query
-create table y(y1 int, y2 int)
+create table y(y1 int, y2 int) using json
 -- !query schema
 struct<>
 -- !query output
-org.apache.spark.sql.catalyst.ExtendedAnalysisException
-{
-  "errorClass" : "NOT_SUPPORTED_COMMAND_WITHOUT_HIVE_SUPPORT",
-  "sqlState" : "0A000",
-  "messageParameters" : {
-    "cmd" : "CREATE Hive TABLE (AS SELECT)"
-  }
-}
+
 
 
 -- !query
@@ -56,21 +44,7 @@ insert into y values (1, 1), (1, 2), (2, 4)
 -- !query schema
 struct<>
 -- !query output
-org.apache.spark.sql.catalyst.ExtendedAnalysisException
-{
-  "errorClass" : "TABLE_OR_VIEW_NOT_FOUND",
-  "sqlState" : "42P01",
-  "messageParameters" : {
-    "relationName" : "`y`"
-  },
-  "queryContext" : [ {
-    "objectType" : "",
-    "objectName" : "",
-    "startIndex" : 13,
-    "stopIndex" : 13,
-    "fragment" : "y"
-  } ]
-}
+
 
 
 -- !query
@@ -80,17 +54,17 @@ struct<>
 -- !query output
 org.apache.spark.sql.catalyst.ExtendedAnalysisException
 {
-  "errorClass" : "TABLE_OR_VIEW_NOT_FOUND",
-  "sqlState" : "42P01",
+  "errorClass" : 
"UNSUPPORTED_SUBQUERY_EXPRESSION_CATEGORY.ACCESSING_OUTER_QUERY_COLUMN_IS_NOT_ALLOWED",
+  "sqlState" : "0A000",
   "messageParameters" : {
-    "relationName" : "`x`"
+    "treeNode" : "Filter (outer(x1#x) = y1#x)\n+- SubqueryAlias 
spark_catalog.default.y\n   +- Relation spark_catalog.default.y[y1#x,y2#x] 
json\n"
   },
   "queryContext" : [ {
     "objectType" : "",
     "objectName" : "",
-    "startIndex" : 15,
-    "stopIndex" : 15,
-    "fragment" : "x"
+    "startIndex" : 31,
+    "stopIndex" : 59,
+    "fragment" : "select * from y where x1 = y1"
   } ]
 }
 
@@ -102,17 +76,17 @@ struct<>
 -- !query output
 org.apache.spark.sql.catalyst.ExtendedAnalysisException
 {
-  "errorClass" : "TABLE_OR_VIEW_NOT_FOUND",
-  "sqlState" : "42P01",
+  "errorClass" : 
"UNSUPPORTED_SUBQUERY_EXPRESSION_CATEGORY.ACCESSING_OUTER_QUERY_COLUMN_IS_NOT_ALLOWED",
+  "sqlState" : "0A000",
   "messageParameters" : {
-    "relationName" : "`x`"
+    "treeNode" : "Filter (outer(x1#x) = y1#x)\n+- SubqueryAlias 
spark_catalog.default.y\n   +- Relation spark_catalog.default.y[y1#x,y2#x] 
json\n"
   },
   "queryContext" : [ {
     "objectType" : "",
     "objectName" : "",
-    "startIndex" : 15,
-    "stopIndex" : 15,
-    "fragment" : "x"
+    "startIndex" : 31,
+    "stopIndex" : 59,
+    "fragment" : "select * from y where x1 = y1"
   } ]
 }
 
@@ -120,23 +94,9 @@ org.apache.spark.sql.catalyst.ExtendedAnalysisException
 -- !query
 select * from x where x1 in (select y1 from y limit 1 offset 2)
 -- !query schema
-struct<>
+struct<x1:int,x2:int>
 -- !query output
-org.apache.spark.sql.catalyst.ExtendedAnalysisException
-{
-  "errorClass" : "TABLE_OR_VIEW_NOT_FOUND",
-  "sqlState" : "42P01",
-  "messageParameters" : {
-    "relationName" : "`x`"
-  },
-  "queryContext" : [ {
-    "objectType" : "",
-    "objectName" : "",
-    "startIndex" : 15,
-    "stopIndex" : 15,
-    "fragment" : "x"
-  } ]
-}
+1      1
 
 
 -- !query
@@ -146,17 +106,17 @@ struct<>
 -- !query output
 org.apache.spark.sql.catalyst.ExtendedAnalysisException
 {
-  "errorClass" : "TABLE_OR_VIEW_NOT_FOUND",
-  "sqlState" : "42P01",
+  "errorClass" : 
"UNSUPPORTED_SUBQUERY_EXPRESSION_CATEGORY.ACCESSING_OUTER_QUERY_COLUMN_IS_NOT_ALLOWED",
+  "sqlState" : "0A000",
   "messageParameters" : {
-    "relationName" : "`x`"
+    "treeNode" : "Filter (outer(x1#x) = y1#x)\n+- SubqueryAlias 
spark_catalog.default.y\n   +- Relation spark_catalog.default.y[y1#x,y2#x] 
json\n"
   },
   "queryContext" : [ {
     "objectType" : "",
     "objectName" : "",
-    "startIndex" : 15,
-    "stopIndex" : 15,
-    "fragment" : "x"
+    "startIndex" : 24,
+    "stopIndex" : 58,
+    "fragment" : "select sum(y2) from y where x1 = y1"
   } ]
 }
 
@@ -252,3 +212,19 @@ org.apache.spark.sql.catalyst.ExtendedAnalysisException
     "fragment" : "SELECT max(dept.dept_id) a\n                   FROM   dept\n 
                  WHERE  dept.dept_id = emp.dept_id\n                   GROUP  
BY state"
   } ]
 }
+
+
+-- !query
+drop table x
+-- !query schema
+struct<>
+-- !query output
+
+
+
+-- !query
+drop table y
+-- !query schema
+struct<>
+-- !query output
+


---------------------------------------------------------------------
To unsubscribe, e-mail: commits-unsubscr...@spark.apache.org
For additional commands, e-mail: commits-h...@spark.apache.org

Reply via email to