This is an automated email from the ASF dual-hosted git repository.

ruifengz pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/spark.git


The following commit(s) were added to refs/heads/master by this push:
     new 9b40c0cdc5c [SPARK-43611][PS][CONNECT][TESTS][FOLLOWUPS] Enable more 
tests
9b40c0cdc5c is described below

commit 9b40c0cdc5cf8e24e66f7ee8e122702d3f157291
Author: Ruifeng Zheng <ruife...@apache.org>
AuthorDate: Thu Jul 27 16:30:47 2023 +0800

    [SPARK-43611][PS][CONNECT][TESTS][FOLLOWUPS] Enable more tests
    
    ### What changes were proposed in this pull request?
    Enable more tests, they were excluded from 
https://github.com/apache/spark/pull/42086 due to the flaky CI issues
    
    ### Why are the changes needed?
    for test parity
    
    ### Does this PR introduce _any_ user-facing change?
    no, test-only
    
    ### How was this patch tested?
    enabled tests
    
    Closes #42182 from zhengruifeng/spark_43611_followup.
    
    Authored-by: Ruifeng Zheng <ruife...@apache.org>
    Signed-off-by: Ruifeng Zheng <ruife...@apache.org>
---
 .../connect/computation/test_parity_compute.py     | 12 ------
 .../connect/computation/test_parity_cumulative.py  | 48 ----------------------
 .../diff_frames_ops/test_parity_basic_slow.py      | 18 +-------
 .../tests/connect/frame/test_parity_time_series.py |  6 ---
 .../connect/groupby/test_parity_cumulative.py      | 30 +-------------
 .../tests/connect/groupby/test_parity_groupby.py   | 18 +-------
 .../connect/groupby/test_parity_missing_data.py    | 18 +-------
 .../tests/connect/indexes/test_parity_base.py      |  6 ---
 .../connect/indexes/test_parity_reset_index.py     |  6 ---
 .../tests/connect/test_parity_default_index.py     |  6 ---
 .../tests/connect/test_parity_generic_functions.py |  6 +--
 ...st_parity_ops_on_diff_frames_groupby_rolling.py | 42 +------------------
 12 files changed, 6 insertions(+), 210 deletions(-)

diff --git 
a/python/pyspark/pandas/tests/connect/computation/test_parity_compute.py 
b/python/pyspark/pandas/tests/connect/computation/test_parity_compute.py
index 88eeb735d46..e2b92190b6e 100644
--- a/python/pyspark/pandas/tests/connect/computation/test_parity_compute.py
+++ b/python/pyspark/pandas/tests/connect/computation/test_parity_compute.py
@@ -27,22 +27,10 @@ class FrameParityComputeTests(FrameComputeMixin, 
PandasOnSparkTestUtils, ReusedC
     def psdf(self):
         return ps.from_pandas(self.pdf)
 
-    @unittest.skip(
-        "TODO(SPARK-43611): Fix unexpected `AnalysisException` from Spark 
Connect client."
-    )
-    def test_diff(self):
-        super().test_diff()
-
     @unittest.skip("Spark Connect does not support RDD but the tests depend on 
them.")
     def test_mode(self):
         super().test_mode()
 
-    @unittest.skip(
-        "TODO(SPARK-43611): Fix unexpected `AnalysisException` from Spark 
Connect client."
-    )
-    def test_pct_change(self):
-        super().test_pct_change()
-
     @unittest.skip("TODO(SPARK-43618): Fix pyspark.sq.column._unary_op to work 
with Spark Connect.")
     def test_rank(self):
         super().test_rank()
diff --git 
a/python/pyspark/pandas/tests/connect/computation/test_parity_cumulative.py 
b/python/pyspark/pandas/tests/connect/computation/test_parity_cumulative.py
index 8015d90aaa5..e14d296749c 100644
--- a/python/pyspark/pandas/tests/connect/computation/test_parity_cumulative.py
+++ b/python/pyspark/pandas/tests/connect/computation/test_parity_cumulative.py
@@ -29,54 +29,6 @@ class FrameParityCumulativeTests(
     def psdf(self):
         return ps.from_pandas(self.pdf)
 
-    @unittest.skip(
-        "TODO(SPARK-43611): Fix unexpected `AnalysisException` from Spark 
Connect client."
-    )
-    def test_cummax(self):
-        super().test_cummax()
-
-    @unittest.skip(
-        "TODO(SPARK-43611): Fix unexpected `AnalysisException` from Spark 
Connect client."
-    )
-    def test_cummax_multiindex_columns(self):
-        super().test_cummax_multiindex_columns()
-
-    @unittest.skip(
-        "TODO(SPARK-43611): Fix unexpected `AnalysisException` from Spark 
Connect client."
-    )
-    def test_cummin(self):
-        super().test_cummin()
-
-    @unittest.skip(
-        "TODO(SPARK-43611): Fix unexpected `AnalysisException` from Spark 
Connect client."
-    )
-    def test_cummin_multiindex_columns(self):
-        super().test_cummin_multiindex_columns()
-
-    @unittest.skip(
-        "TODO(SPARK-43611): Fix unexpected `AnalysisException` from Spark 
Connect client."
-    )
-    def test_cumprod(self):
-        super().test_cumprod()
-
-    @unittest.skip(
-        "TODO(SPARK-43611): Fix unexpected `AnalysisException` from Spark 
Connect client."
-    )
-    def test_cumprod_multiindex_columns(self):
-        super().test_cumprod_multiindex_columns()
-
-    @unittest.skip(
-        "TODO(SPARK-43611): Fix unexpected `AnalysisException` from Spark 
Connect client."
-    )
-    def test_cumsum(self):
-        super().test_cumsum()
-
-    @unittest.skip(
-        "TODO(SPARK-43611): Fix unexpected `AnalysisException` from Spark 
Connect client."
-    )
-    def test_cumsum_multiindex_columns(self):
-        super().test_cumsum_multiindex_columns()
-
 
 if __name__ == "__main__":
     from pyspark.pandas.tests.connect.computation.test_parity_cumulative 
import *  # noqa: F401
diff --git 
a/python/pyspark/pandas/tests/connect/diff_frames_ops/test_parity_basic_slow.py 
b/python/pyspark/pandas/tests/connect/diff_frames_ops/test_parity_basic_slow.py
index 926caf56979..ef84a8931d3 100644
--- 
a/python/pyspark/pandas/tests/connect/diff_frames_ops/test_parity_basic_slow.py
+++ 
b/python/pyspark/pandas/tests/connect/diff_frames_ops/test_parity_basic_slow.py
@@ -24,23 +24,7 @@ from pyspark.testing.pandasutils import 
PandasOnSparkTestUtils
 class DiffFramesParityBasicSlowTests(
     DiffFramesBasicSlowMixin, PandasOnSparkTestUtils, ReusedConnectTestCase
 ):
-    @unittest.skip(
-        "TODO(SPARK-43611): Fix unexpected `AnalysisException` from Spark 
Connect client."
-    )
-    def test_diff(self):
-        super().test_diff()
-
-    @unittest.skip(
-        "TODO(SPARK-43611): Fix unexpected `AnalysisException` from Spark 
Connect client."
-    )
-    def test_rank(self):
-        super().test_rank()
-
-    @unittest.skip(
-        "TODO(SPARK-43611): Fix unexpected `AnalysisException` from Spark 
Connect client."
-    )
-    def test_shift(self):
-        super().test_shift()
+    pass
 
 
 if __name__ == "__main__":
diff --git 
a/python/pyspark/pandas/tests/connect/frame/test_parity_time_series.py 
b/python/pyspark/pandas/tests/connect/frame/test_parity_time_series.py
index ae289edbc85..6b8a93f895e 100644
--- a/python/pyspark/pandas/tests/connect/frame/test_parity_time_series.py
+++ b/python/pyspark/pandas/tests/connect/frame/test_parity_time_series.py
@@ -29,12 +29,6 @@ class FrameParityTimeSeriesTests(
     def psdf(self):
         return ps.from_pandas(self.pdf)
 
-    @unittest.skip(
-        "TODO(SPARK-43611): Fix unexpected `AnalysisException` from Spark 
Connect client."
-    )
-    def test_shift(self):
-        super().test_shift()
-
 
 if __name__ == "__main__":
     from pyspark.pandas.tests.connect.frame.test_parity_time_series import *  
# noqa: F401
diff --git 
a/python/pyspark/pandas/tests/connect/groupby/test_parity_cumulative.py 
b/python/pyspark/pandas/tests/connect/groupby/test_parity_cumulative.py
index aaa799bc996..696c283b648 100644
--- a/python/pyspark/pandas/tests/connect/groupby/test_parity_cumulative.py
+++ b/python/pyspark/pandas/tests/connect/groupby/test_parity_cumulative.py
@@ -24,35 +24,7 @@ from pyspark.testing.pandasutils import 
PandasOnSparkTestUtils
 class GroupbyParityCumulativeTests(
     GroupbyCumulativeMixin, PandasOnSparkTestUtils, ReusedConnectTestCase
 ):
-    @unittest.skip(
-        "TODO(SPARK-43611): Fix unexpected `AnalysisException` from Spark 
Connect client."
-    )
-    def test_cumcount(self):
-        super().test_cumcount()
-
-    @unittest.skip(
-        "TODO(SPARK-43611): Fix unexpected `AnalysisException` from Spark 
Connect client."
-    )
-    def test_cummax(self):
-        super().test_cummax()
-
-    @unittest.skip(
-        "TODO(SPARK-43611): Fix unexpected `AnalysisException` from Spark 
Connect client."
-    )
-    def test_cummin(self):
-        super().test_cummin()
-
-    @unittest.skip(
-        "TODO(SPARK-43611): Fix unexpected `AnalysisException` from Spark 
Connect client."
-    )
-    def test_cumprod(self):
-        super().test_cumprod()
-
-    @unittest.skip(
-        "TODO(SPARK-43611): Fix unexpected `AnalysisException` from Spark 
Connect client."
-    )
-    def test_cumsum(self):
-        super().test_cumsum()
+    pass
 
 
 if __name__ == "__main__":
diff --git a/python/pyspark/pandas/tests/connect/groupby/test_parity_groupby.py 
b/python/pyspark/pandas/tests/connect/groupby/test_parity_groupby.py
index 4e9f5108fd9..8293652b476 100644
--- a/python/pyspark/pandas/tests/connect/groupby/test_parity_groupby.py
+++ b/python/pyspark/pandas/tests/connect/groupby/test_parity_groupby.py
@@ -24,23 +24,7 @@ from pyspark.testing.pandasutils import 
PandasOnSparkTestUtils, TestUtils
 class GroupByParityTests(
     GroupByTestsMixin, PandasOnSparkTestUtils, TestUtils, ReusedConnectTestCase
 ):
-    @unittest.skip(
-        "TODO(SPARK-43611): Fix unexpected `AnalysisException` from Spark 
Connect client."
-    )
-    def test_shift(self):
-        super().test_shift()
-
-    @unittest.skip(
-        "TODO(SPARK-43611): Fix unexpected `AnalysisException` from Spark 
Connect client."
-    )
-    def test_diff(self):
-        super().test_diff()
-
-    @unittest.skip(
-        "TODO(SPARK-43611): Fix unexpected `AnalysisException` from Spark 
Connect client."
-    )
-    def test_rank(self):
-        super().test_rank()
+    pass
 
 
 if __name__ == "__main__":
diff --git 
a/python/pyspark/pandas/tests/connect/groupby/test_parity_missing_data.py 
b/python/pyspark/pandas/tests/connect/groupby/test_parity_missing_data.py
index 1ca101ef545..752e8568fbd 100644
--- a/python/pyspark/pandas/tests/connect/groupby/test_parity_missing_data.py
+++ b/python/pyspark/pandas/tests/connect/groupby/test_parity_missing_data.py
@@ -24,23 +24,7 @@ from pyspark.testing.pandasutils import 
PandasOnSparkTestUtils
 class GroupbyParityMissingDataTests(
     GroupbyMissingDataMixin, PandasOnSparkTestUtils, ReusedConnectTestCase
 ):
-    @unittest.skip(
-        "TODO(SPARK-43611): Fix unexpected `AnalysisException` from Spark 
Connect client."
-    )
-    def test_bfill(self):
-        super().test_bfill()
-
-    @unittest.skip(
-        "TODO(SPARK-43611): Fix unexpected `AnalysisException` from Spark 
Connect client."
-    )
-    def test_ffill(self):
-        super().test_ffill()
-
-    @unittest.skip(
-        "TODO(SPARK-43611): Fix unexpected `AnalysisException` from Spark 
Connect client."
-    )
-    def test_fillna(self):
-        super().test_fillna()
+    pass
 
 
 if __name__ == "__main__":
diff --git a/python/pyspark/pandas/tests/connect/indexes/test_parity_base.py 
b/python/pyspark/pandas/tests/connect/indexes/test_parity_base.py
index b1e185389f3..d5dec01bde5 100644
--- a/python/pyspark/pandas/tests/connect/indexes/test_parity_base.py
+++ b/python/pyspark/pandas/tests/connect/indexes/test_parity_base.py
@@ -35,12 +35,6 @@ class IndexesParityTests(
     def test_append(self):
         super().test_append()
 
-    @unittest.skip(
-        "TODO(SPARK-43611): Fix unexpected `AnalysisException` from Spark 
Connect client."
-    )
-    def test_monotonic(self):
-        super().test_monotonic()
-
     @unittest.skip("TODO(SPARK-43620): Support `Column` for 
SparkConnectColumn.__getitem__.")
     def test_factorize(self):
         super().test_factorize()
diff --git 
a/python/pyspark/pandas/tests/connect/indexes/test_parity_reset_index.py 
b/python/pyspark/pandas/tests/connect/indexes/test_parity_reset_index.py
index 6647d76735b..c19460946d1 100644
--- a/python/pyspark/pandas/tests/connect/indexes/test_parity_reset_index.py
+++ b/python/pyspark/pandas/tests/connect/indexes/test_parity_reset_index.py
@@ -29,12 +29,6 @@ class FrameParityResetIndexTests(
     def psdf(self):
         return ps.from_pandas(self.pdf)
 
-    @unittest.skip(
-        "TODO(SPARK-43611): Fix unexpected `AnalysisException` from Spark 
Connect client."
-    )
-    def test_reset_index_with_default_index_types(self):
-        super().test_reset_index_with_default_index_types()
-
 
 if __name__ == "__main__":
     from pyspark.pandas.tests.connect.indexes.test_parity_reset_index import * 
 # noqa: F401
diff --git a/python/pyspark/pandas/tests/connect/test_parity_default_index.py 
b/python/pyspark/pandas/tests/connect/test_parity_default_index.py
index c5410e6dd58..1e95fac9285 100644
--- a/python/pyspark/pandas/tests/connect/test_parity_default_index.py
+++ b/python/pyspark/pandas/tests/connect/test_parity_default_index.py
@@ -24,12 +24,6 @@ from pyspark.testing.pandasutils import 
PandasOnSparkTestUtils
 class DefaultIndexParityTests(
     DefaultIndexTestsMixin, PandasOnSparkTestUtils, ReusedConnectTestCase
 ):
-    @unittest.skip(
-        "TODO(SPARK-43611): Fix unexpected `AnalysisException` from Spark 
Connect client."
-    )
-    def test_default_index_sequence(self):
-        super().test_default_index_sequence()
-
     @unittest.skip(
         "TODO(SPARK-43623): Enable 
DefaultIndexParityTests.test_index_distributed_sequence_cleanup."
     )
diff --git 
a/python/pyspark/pandas/tests/connect/test_parity_generic_functions.py 
b/python/pyspark/pandas/tests/connect/test_parity_generic_functions.py
index 1bf2650d874..158215073ad 100644
--- a/python/pyspark/pandas/tests/connect/test_parity_generic_functions.py
+++ b/python/pyspark/pandas/tests/connect/test_parity_generic_functions.py
@@ -24,11 +24,7 @@ from pyspark.testing.pandasutils import 
PandasOnSparkTestUtils, TestUtils
 class GenericFunctionsParityTests(
     GenericFunctionsTestsMixin, TestUtils, PandasOnSparkTestUtils, 
ReusedConnectTestCase
 ):
-    @unittest.skip(
-        "TODO(SPARK-43611): Fix unexpected `AnalysisException` from Spark 
Connect client."
-    )
-    def test_interpolate(self):
-        super().test_interpolate()
+    pass
 
 
 if __name__ == "__main__":
diff --git 
a/python/pyspark/pandas/tests/connect/test_parity_ops_on_diff_frames_groupby_rolling.py
 
b/python/pyspark/pandas/tests/connect/test_parity_ops_on_diff_frames_groupby_rolling.py
index dd82e443256..4a52bb0748f 100644
--- 
a/python/pyspark/pandas/tests/connect/test_parity_ops_on_diff_frames_groupby_rolling.py
+++ 
b/python/pyspark/pandas/tests/connect/test_parity_ops_on_diff_frames_groupby_rolling.py
@@ -29,47 +29,7 @@ class OpsOnDiffFramesGroupByRollingParityTests(
     TestUtils,
     ReusedConnectTestCase,
 ):
-    @unittest.skip(
-        "TODO(SPARK-43611): Fix unexpected `AnalysisException` from Spark 
Connect client."
-    )
-    def test_groupby_rolling_count(self):
-        super().test_groupby_rolling_count()
-
-    @unittest.skip(
-        "TODO(SPARK-43611): Fix unexpected `AnalysisException` from Spark 
Connect client."
-    )
-    def test_groupby_rolling_min(self):
-        super().test_groupby_rolling_min()
-
-    @unittest.skip(
-        "TODO(SPARK-43611): Fix unexpected `AnalysisException` from Spark 
Connect client."
-    )
-    def test_groupby_rolling_max(self):
-        super().test_groupby_rolling_max()
-
-    @unittest.skip(
-        "TODO(SPARK-43611): Fix unexpected `AnalysisException` from Spark 
Connect client."
-    )
-    def test_groupby_rolling_mean(self):
-        super().test_groupby_rolling_mean()
-
-    @unittest.skip(
-        "TODO(SPARK-43611): Fix unexpected `AnalysisException` from Spark 
Connect client."
-    )
-    def test_groupby_rolling_sum(self):
-        super().test_groupby_rolling_sum()
-
-    @unittest.skip(
-        "TODO(SPARK-43611): Fix unexpected `AnalysisException` from Spark 
Connect client."
-    )
-    def test_groupby_rolling_std(self):
-        super().test_groupby_rolling_std()
-
-    @unittest.skip(
-        "TODO(SPARK-43611): Fix unexpected `AnalysisException` from Spark 
Connect client."
-    )
-    def test_groupby_rolling_var(self):
-        super().test_groupby_rolling_var()
+    pass
 
 
 if __name__ == "__main__":


---------------------------------------------------------------------
To unsubscribe, e-mail: commits-unsubscr...@spark.apache.org
For additional commands, e-mail: commits-h...@spark.apache.org

Reply via email to