This is an automated email from the ASF dual-hosted git repository.

ruifengz pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/spark.git


The following commit(s) were added to refs/heads/master by this push:
     new 201df0d7ac81 [MINOR][PYTHON][TESTS] Move a test out of parity tests
201df0d7ac81 is described below

commit 201df0d7ac81f6bd5c39f513b0a06cb659dc9a3f
Author: Ruifeng Zheng <ruife...@apache.org>
AuthorDate: Sat Jun 8 07:49:15 2024 +0800

    [MINOR][PYTHON][TESTS] Move a test out of parity tests
    
    ### What changes were proposed in this pull request?
    Move a test out of parity tests
    
    ### Why are the changes needed?
    it is not tested in Spark Classic, not a parity test
    
    ### Does this PR introduce _any_ user-facing change?
    no
    
    ### How was this patch tested?
    ci
    
    ### Was this patch authored or co-authored using generative AI tooling?
    no
    
    Closes #46914 from zhengruifeng/move_a_non_parity_test.
    
    Authored-by: Ruifeng Zheng <ruife...@apache.org>
    Signed-off-by: Ruifeng Zheng <ruife...@apache.org>
---
 .../connect/test_connect_dataframe_property.py     | 23 +++++++++++++++++++++
 .../sql/tests/connect/test_parity_dataframe.py     | 24 ----------------------
 2 files changed, 23 insertions(+), 24 deletions(-)

diff --git 
a/python/pyspark/sql/tests/connect/test_connect_dataframe_property.py 
b/python/pyspark/sql/tests/connect/test_connect_dataframe_property.py
index f80f4509a7ce..c87c44760256 100644
--- a/python/pyspark/sql/tests/connect/test_connect_dataframe_property.py
+++ b/python/pyspark/sql/tests/connect/test_connect_dataframe_property.py
@@ -37,6 +37,29 @@ if have_pandas:
 
 
 class SparkConnectDataFramePropertyTests(SparkConnectSQLTestCase):
+    def test_cached_property_is_copied(self):
+        schema = StructType(
+            [
+                StructField("id", IntegerType(), True),
+                StructField("name", StringType(), True),
+                StructField("age", IntegerType(), True),
+                StructField("city", StringType(), True),
+            ]
+        )
+        # Create some dummy data
+        data = [
+            (1, "Alice", 30, "New York"),
+            (2, "Bob", 25, "San Francisco"),
+            (3, "Cathy", 29, "Los Angeles"),
+            (4, "David", 35, "Chicago"),
+        ]
+        df = self.spark.createDataFrame(data, schema)
+        df_columns = df.columns
+        assert len(df.columns) == 4
+        for col in ["id", "name"]:
+            df_columns.remove(col)
+        assert len(df.columns) == 4
+
     def test_cached_schema_to(self):
         cdf = self.connect.read.table(self.tbl_name)
         sdf = self.spark.read.table(self.tbl_name)
diff --git a/python/pyspark/sql/tests/connect/test_parity_dataframe.py 
b/python/pyspark/sql/tests/connect/test_parity_dataframe.py
index c9888a6a8f1a..343f485553a9 100644
--- a/python/pyspark/sql/tests/connect/test_parity_dataframe.py
+++ b/python/pyspark/sql/tests/connect/test_parity_dataframe.py
@@ -19,7 +19,6 @@ import unittest
 
 from pyspark.sql.tests.test_dataframe import DataFrameTestsMixin
 from pyspark.testing.connectutils import ReusedConnectTestCase
-from pyspark.sql.types import StructType, StructField, IntegerType, StringType
 
 
 class DataFrameParityTests(DataFrameTestsMixin, ReusedConnectTestCase):
@@ -27,29 +26,6 @@ class DataFrameParityTests(DataFrameTestsMixin, 
ReusedConnectTestCase):
         df = self.spark.createDataFrame(data=[{"foo": "bar"}, {"foo": "baz"}])
         super().check_help_command(df)
 
-    def test_cached_property_is_copied(self):
-        schema = StructType(
-            [
-                StructField("id", IntegerType(), True),
-                StructField("name", StringType(), True),
-                StructField("age", IntegerType(), True),
-                StructField("city", StringType(), True),
-            ]
-        )
-        # Create some dummy data
-        data = [
-            (1, "Alice", 30, "New York"),
-            (2, "Bob", 25, "San Francisco"),
-            (3, "Cathy", 29, "Los Angeles"),
-            (4, "David", 35, "Chicago"),
-        ]
-        df = self.spark.createDataFrame(data, schema)
-        df_columns = df.columns
-        assert len(df.columns) == 4
-        for col in ["id", "name"]:
-            df_columns.remove(col)
-        assert len(df.columns) == 4
-
     @unittest.skip("Spark Connect does not support RDD but the tests depend on 
them.")
     def test_toDF_with_schema_string(self):
         super().test_toDF_with_schema_string()


---------------------------------------------------------------------
To unsubscribe, e-mail: commits-unsubscr...@spark.apache.org
For additional commands, e-mail: commits-h...@spark.apache.org

Reply via email to