This is an automated email from the ASF dual-hosted git repository.

wenchen pushed a commit to branch branch-3.5
in repository https://gitbox.apache.org/repos/asf/spark.git


The following commit(s) were added to refs/heads/branch-3.5 by this push:
     new 043944e1b549 [SPARK-48273][SQL][FOLLOWUP] Explicitly create non-Hive 
table in identifier-clause.sql
043944e1b549 is described below

commit 043944e1b54902f6d8204a5610e8eb780f1fe753
Author: Wenchen Fan <wenc...@databricks.com>
AuthorDate: Wed May 29 13:35:01 2024 -0700

    [SPARK-48273][SQL][FOLLOWUP] Explicitly create non-Hive table in 
identifier-clause.sql
    
    ### What changes were proposed in this pull request?
    
    A followup of https://github.com/apache/spark/pull/46580 . It's better to 
create non-Hive tables in the tests, so that it's backport safe, as old 
branches creates hive table by default.
    
    ### Why are the changes needed?
    
    fix branch-3.5 CI
    
    ### Does this PR introduce _any_ user-facing change?
    
    no
    
    ### How was this patch tested?
    
    N/A
    
    ### Was this patch authored or co-authored using generative AI tooling?
    
    no
    
    Closes #46794 from cloud-fan/test.
    
    Authored-by: Wenchen Fan <wenc...@databricks.com>
    Signed-off-by: Wenchen Fan <wenc...@databricks.com>
    (cherry picked from commit cf47293b5fc7c80d19e50fda44a01f91d5e34530)
    Signed-off-by: Wenchen Fan <wenc...@databricks.com>
---
 .../sql-tests/analyzer-results/identifier-clause.sql.out          | 8 ++++----
 .../src/test/resources/sql-tests/inputs/identifier-clause.sql     | 6 +++---
 .../test/resources/sql-tests/results/identifier-clause.sql.out    | 6 +++---
 3 files changed, 10 insertions(+), 10 deletions(-)

diff --git 
a/sql/core/src/test/resources/sql-tests/analyzer-results/identifier-clause.sql.out
 
b/sql/core/src/test/resources/sql-tests/analyzer-results/identifier-clause.sql.out
index 823ce43247a7..9b56a172e59d 100644
--- 
a/sql/core/src/test/resources/sql-tests/analyzer-results/identifier-clause.sql.out
+++ 
b/sql/core/src/test/resources/sql-tests/analyzer-results/identifier-clause.sql.out
@@ -687,7 +687,7 @@ org.apache.spark.sql.AnalysisException
 
 
 -- !query
-CREATE TABLE IDENTIFIER(1)(c1 INT)
+CREATE TABLE IDENTIFIER(1)(c1 INT) USING csv
 -- !query analysis
 org.apache.spark.sql.AnalysisException
 {
@@ -709,7 +709,7 @@ org.apache.spark.sql.AnalysisException
 
 
 -- !query
-CREATE TABLE IDENTIFIER('a.b.c')(c1 INT)
+CREATE TABLE IDENTIFIER('a.b.c')(c1 INT) USING csv
 -- !query analysis
 org.apache.spark.sql.AnalysisException
 {
@@ -902,7 +902,7 @@ CacheTableAsSelect t1, (select my_col from (values (1), 
(2), (1) as (my_col)) gr
 
 
 -- !query
-create table identifier('t2') as (select my_col from (values (1), (2), (1) as 
(my_col)) group by 1)
+create table identifier('t2') using csv as (select my_col from (values (1), 
(2), (1) as (my_col)) group by 1)
 -- !query analysis
 CreateDataSourceTableAsSelectCommand `spark_catalog`.`default`.`t2`, 
ErrorIfExists, [my_col]
    +- Aggregate [my_col#x], [my_col#x]
@@ -914,7 +914,7 @@ CreateDataSourceTableAsSelectCommand 
`spark_catalog`.`default`.`t2`, ErrorIfExis
 -- !query
 insert into identifier('t2') select my_col from (values (3) as (my_col)) group 
by 1
 -- !query analysis
-InsertIntoHadoopFsRelationCommand file:[not included in 
comparison]/{warehouse_dir}/t2, false, Parquet, [path=file:[not included in 
comparison]/{warehouse_dir}/t2], Append, `spark_catalog`.`default`.`t2`, 
org.apache.spark.sql.execution.datasources.InMemoryFileIndex(file:[not included 
in comparison]/{warehouse_dir}/t2), [my_col]
+InsertIntoHadoopFsRelationCommand file:[not included in 
comparison]/{warehouse_dir}/t2, false, CSV, [path=file:[not included in 
comparison]/{warehouse_dir}/t2], Append, `spark_catalog`.`default`.`t2`, 
org.apache.spark.sql.execution.datasources.InMemoryFileIndex(file:[not included 
in comparison]/{warehouse_dir}/t2), [my_col]
 +- Aggregate [my_col#x], [my_col#x]
    +- SubqueryAlias __auto_generated_subquery_name
       +- SubqueryAlias as
diff --git a/sql/core/src/test/resources/sql-tests/inputs/identifier-clause.sql 
b/sql/core/src/test/resources/sql-tests/inputs/identifier-clause.sql
index 9e6314202b5f..e85fdf7b5da3 100644
--- a/sql/core/src/test/resources/sql-tests/inputs/identifier-clause.sql
+++ b/sql/core/src/test/resources/sql-tests/inputs/identifier-clause.sql
@@ -109,8 +109,8 @@ VALUES(IDENTIFIER(1));
 VALUES(IDENTIFIER(SUBSTR('HELLO', 1, RAND() + 1)));
 SELECT `IDENTIFIER`('abs')(c1) FROM VALUES(-1) AS T(c1);
 
-CREATE TABLE IDENTIFIER(1)(c1 INT);
-CREATE TABLE IDENTIFIER('a.b.c')(c1 INT);
+CREATE TABLE IDENTIFIER(1)(c1 INT) USING csv;
+CREATE TABLE IDENTIFIER('a.b.c')(c1 INT) USING csv;
 CREATE VIEW IDENTIFIER('a.b.c')(c1) AS VALUES(1);
 DROP TABLE IDENTIFIER('a.b.c');
 DROP VIEW IDENTIFIER('a.b.c');
@@ -125,7 +125,7 @@ CREATE TEMPORARY VIEW IDENTIFIER('default.v')(c1) AS 
VALUES(1);
 -- SPARK-48273: Aggregation operation in statements using identifier clause 
for table name
 create temporary view identifier('v1') as (select my_col from (values (1), 
(2), (1) as (my_col)) group by 1);
 cache table identifier('t1') as (select my_col from (values (1), (2), (1) as 
(my_col)) group by 1);
-create table identifier('t2') as (select my_col from (values (1), (2), (1) as 
(my_col)) group by 1);
+create table identifier('t2') using csv as (select my_col from (values (1), 
(2), (1) as (my_col)) group by 1);
 insert into identifier('t2') select my_col from (values (3) as (my_col)) group 
by 1;
 drop view v1;
 drop table t1;
diff --git 
a/sql/core/src/test/resources/sql-tests/results/identifier-clause.sql.out 
b/sql/core/src/test/resources/sql-tests/results/identifier-clause.sql.out
index 4d62c371a171..62f43152c48d 100644
--- a/sql/core/src/test/resources/sql-tests/results/identifier-clause.sql.out
+++ b/sql/core/src/test/resources/sql-tests/results/identifier-clause.sql.out
@@ -794,7 +794,7 @@ org.apache.spark.sql.AnalysisException
 
 
 -- !query
-CREATE TABLE IDENTIFIER(1)(c1 INT)
+CREATE TABLE IDENTIFIER(1)(c1 INT) USING csv
 -- !query schema
 struct<>
 -- !query output
@@ -818,7 +818,7 @@ org.apache.spark.sql.AnalysisException
 
 
 -- !query
-CREATE TABLE IDENTIFIER('a.b.c')(c1 INT)
+CREATE TABLE IDENTIFIER('a.b.c')(c1 INT) USING csv
 -- !query schema
 struct<>
 -- !query output
@@ -1027,7 +1027,7 @@ struct<>
 
 
 -- !query
-create table identifier('t2') as (select my_col from (values (1), (2), (1) as 
(my_col)) group by 1)
+create table identifier('t2') using csv as (select my_col from (values (1), 
(2), (1) as (my_col)) group by 1)
 -- !query schema
 struct<>
 -- !query output


---------------------------------------------------------------------
To unsubscribe, e-mail: commits-unsubscr...@spark.apache.org
For additional commands, e-mail: commits-h...@spark.apache.org

Reply via email to