This is an automated email from the ASF dual-hosted git repository.

gurwls223 pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/spark.git


The following commit(s) were added to refs/heads/master by this push:
     new 82a22606c59 [SPARK-41697][CONNECT][TESTS] Enable test_df_show, 
test_drop, test_dropna, test_toDF_with_schema_string and 
test_with_columns_renamed
82a22606c59 is described below

commit 82a22606c5951e8d0a9c270595d63d6836f2d51b
Author: Hyukjin Kwon <gurwls...@apache.org>
AuthorDate: Fri Dec 23 21:52:58 2022 +0900

    [SPARK-41697][CONNECT][TESTS] Enable test_df_show, test_drop, test_dropna, 
test_toDF_with_schema_string and test_with_columns_renamed
    
    ### What changes were proposed in this pull request?
    
    This PR enables the reused PySpark tests in Spark Connect that pass now.
    
    ### Why are the changes needed?
    
    To make sure on the test coverage.
    
    ### Does this PR introduce _any_ user-facing change?
    
    No, test-only.
    
    ### How was this patch tested?
    
    Manually ran it in my local.
    
    Closes #39193 from HyukjinKwon/SPARK-41697.
    
    Authored-by: Hyukjin Kwon <gurwls...@apache.org>
    Signed-off-by: Hyukjin Kwon <gurwls...@apache.org>
---
 .../sql/tests/connect/test_parity_dataframe.py     | 24 ----------------------
 1 file changed, 24 deletions(-)

diff --git a/python/pyspark/sql/tests/connect/test_parity_dataframe.py 
b/python/pyspark/sql/tests/connect/test_parity_dataframe.py
index ccb5dd45b54..7dfdc8de751 100644
--- a/python/pyspark/sql/tests/connect/test_parity_dataframe.py
+++ b/python/pyspark/sql/tests/connect/test_parity_dataframe.py
@@ -71,22 +71,10 @@ class DataFrameParityTests(DataFrameTestsMixin, 
ReusedSQLTestCase):
     def test_create_nan_decimal_dataframe(self):
         super().test_create_nan_decimal_dataframe()
 
-    @unittest.skip("Fails in Spark Connect, should enable.")
-    def test_df_show(self):
-        super().test_df_show()
-
-    @unittest.skip("Fails in Spark Connect, should enable.")
-    def test_drop(self):
-        super().test_drop()
-
     @unittest.skip("Fails in Spark Connect, should enable.")
     def test_drop_duplicates(self):
         super().test_drop_duplicates()
 
-    @unittest.skip("Fails in Spark Connect, should enable.")
-    def test_dropna(self):
-        super().test_dropna()
-
     @unittest.skip("Fails in Spark Connect, should enable.")
     def test_duplicated_column_names(self):
         super().test_duplicated_column_names()
@@ -99,10 +87,6 @@ class DataFrameParityTests(DataFrameTestsMixin, 
ReusedSQLTestCase):
     def test_fillna(self):
         super().test_fillna()
 
-    @unittest.skip("Fails in Spark Connect, should enable.")
-    def test_freqItems(self):
-        super().test_freqItems()
-
     @unittest.skip("Fails in Spark Connect, should enable.")
     def test_generic_hints(self):
         super().test_generic_hints()
@@ -163,10 +147,6 @@ class DataFrameParityTests(DataFrameTestsMixin, 
ReusedSQLTestCase):
     def test_to(self):
         super().test_to()
 
-    @unittest.skip("Fails in Spark Connect, should enable.")
-    def test_toDF_with_schema_string(self):
-        super().test_toDF_with_schema_string()
-
     @unittest.skip("Fails in Spark Connect, should enable.")
     def test_to_local_iterator(self):
         super().test_to_local_iterator()
@@ -219,10 +199,6 @@ class DataFrameParityTests(DataFrameTestsMixin, 
ReusedSQLTestCase):
     def test_unpivot(self):
         super().test_unpivot()
 
-    @unittest.skip("Fails in Spark Connect, should enable.")
-    def test_with_columns_renamed(self):
-        super().test_with_columns_renamed()
-
 
 if __name__ == "__main__":
     import unittest


---------------------------------------------------------------------
To unsubscribe, e-mail: commits-unsubscr...@spark.apache.org
For additional commands, e-mail: commits-h...@spark.apache.org

Reply via email to