This is an automated email from the ASF dual-hosted git repository.

ruifengz pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/spark.git


The following commit(s) were added to refs/heads/master by this push:
     new 185a0a5a239 [SPARK-43611][SQL][PS][CONNCECT] Make 
`ExtractWindowExpressions` retain the `PLAN_ID_TAG`
185a0a5a239 is described below

commit 185a0a5a23958676e4236eaf9e4d78cdfd2dd2d7
Author: Ruifeng Zheng <ruife...@apache.org>
AuthorDate: Thu Jul 27 11:00:18 2023 +0800

    [SPARK-43611][SQL][PS][CONNCECT] Make `ExtractWindowExpressions` retain the 
`PLAN_ID_TAG`
    
    ### What changes were proposed in this pull request?
    Make rule `ExtractWindowExpressions` retain the `PLAN_ID_TAG `
    
    ### Why are the changes needed?
    In https://github.com/apache/spark/pull/39925, we introduced a new 
mechanism to resolve expression with specified plan.
    
    However, sometimes the plan ID might be discarded by some analyzer rules, 
and then some expressions can not be correctly resolved, this issue is the main 
blocker of PS on Connect.
    
    ### Does this PR introduce _any_ user-facing change?
    yes, a lot of Pandas APIs enabled
    
    ### How was this patch tested?
    Enable UTs
    
    Closes #42086 from zhengruifeng/ps_connect_analyze_window.
    
    Authored-by: Ruifeng Zheng <ruife...@apache.org>
    Signed-off-by: Ruifeng Zheng <ruife...@apache.org>
---
 .../computation/test_parity_missing_data.py        |  30 ------
 .../tests/connect/series/test_parity_compute.py    |  16 ---
 .../tests/connect/series/test_parity_cumulative.py |  25 +----
 .../tests/connect/series/test_parity_index.py      |   7 +-
 .../connect/series/test_parity_missing_data.py     |  35 +-----
 .../tests/connect/series/test_parity_stat.py       |  11 +-
 .../pandas/tests/connect/test_parity_ewm.py        |  12 +--
 .../pandas/tests/connect/test_parity_expanding.py  | 120 +--------------------
 .../test_parity_ops_on_diff_frames_groupby.py      |  48 +--------
 ..._parity_ops_on_diff_frames_groupby_expanding.py |  42 +-------
 .../pandas/tests/connect/test_parity_rolling.py    | 120 +--------------------
 .../spark/sql/catalyst/analysis/Analyzer.scala     |  12 ++-
 12 files changed, 21 insertions(+), 457 deletions(-)

diff --git 
a/python/pyspark/pandas/tests/connect/computation/test_parity_missing_data.py 
b/python/pyspark/pandas/tests/connect/computation/test_parity_missing_data.py
index a88c8692eca..d2ff09e5e8a 100644
--- 
a/python/pyspark/pandas/tests/connect/computation/test_parity_missing_data.py
+++ 
b/python/pyspark/pandas/tests/connect/computation/test_parity_missing_data.py
@@ -29,36 +29,6 @@ class FrameParityMissingDataTests(
     def psdf(self):
         return ps.from_pandas(self.pdf)
 
-    @unittest.skip(
-        "TODO(SPARK-43611): Fix unexpected `AnalysisException` from Spark 
Connect client."
-    )
-    def test_backfill(self):
-        super().test_backfill()
-
-    @unittest.skip(
-        "TODO(SPARK-43611): Fix unexpected `AnalysisException` from Spark 
Connect client."
-    )
-    def test_bfill(self):
-        super().test_bfill()
-
-    @unittest.skip(
-        "TODO(SPARK-43611): Fix unexpected `AnalysisException` from Spark 
Connect client."
-    )
-    def test_ffill(self):
-        super().test_ffill()
-
-    @unittest.skip(
-        "TODO(SPARK-43611): Fix unexpected `AnalysisException` from Spark 
Connect client."
-    )
-    def test_fillna(self):
-        return super().test_fillna()
-
-    @unittest.skip(
-        "TODO(SPARK-43611): Fix unexpected `AnalysisException` from Spark 
Connect client."
-    )
-    def test_pad(self):
-        super().test_pad()
-
 
 if __name__ == "__main__":
     from pyspark.pandas.tests.connect.computation.test_parity_missing_data 
import *  # noqa: F401
diff --git a/python/pyspark/pandas/tests/connect/series/test_parity_compute.py 
b/python/pyspark/pandas/tests/connect/series/test_parity_compute.py
index 00e35b27e8f..f757d19ca69 100644
--- a/python/pyspark/pandas/tests/connect/series/test_parity_compute.py
+++ b/python/pyspark/pandas/tests/connect/series/test_parity_compute.py
@@ -22,22 +22,6 @@ from pyspark.testing.pandasutils import 
PandasOnSparkTestUtils
 
 
 class SeriesParityComputeTests(SeriesComputeMixin, PandasOnSparkTestUtils, 
ReusedConnectTestCase):
-    @unittest.skip(
-        "TODO(SPARK-43611): Fix unexpected `AnalysisException` from Spark 
Connect client."
-    )
-    def test_diff(self):
-        super().test_diff()
-
-    @unittest.skip("TODO(SPARK-43620): Support `Column` for 
SparkConnectColumn.__getitem__.")
-    def test_factorize(self):
-        super().test_factorize()
-
-    @unittest.skip(
-        "TODO(SPARK-43611): Fix unexpected `AnalysisException` from Spark 
Connect client."
-    )
-    def test_shift(self):
-        super().test_shift()
-
     @unittest.skip(
         "TODO(SPARK-43611): Fix unexpected `AnalysisException` from Spark 
Connect client."
     )
diff --git 
a/python/pyspark/pandas/tests/connect/series/test_parity_cumulative.py 
b/python/pyspark/pandas/tests/connect/series/test_parity_cumulative.py
index f7cd03e057a..c5c61e1f33b 100644
--- a/python/pyspark/pandas/tests/connect/series/test_parity_cumulative.py
+++ b/python/pyspark/pandas/tests/connect/series/test_parity_cumulative.py
@@ -24,29 +24,8 @@ from pyspark.testing.pandasutils import 
PandasOnSparkTestUtils
 class SeriesParityCumulativeTests(
     SeriesCumulativeMixin, PandasOnSparkTestUtils, ReusedConnectTestCase
 ):
-    @unittest.skip(
-        "TODO(SPARK-43611): Fix unexpected `AnalysisException` from Spark 
Connect client."
-    )
-    def test_cummax(self):
-        super().test_cummax()
-
-    @unittest.skip(
-        "TODO(SPARK-43611): Fix unexpected `AnalysisException` from Spark 
Connect client."
-    )
-    def test_cummin(self):
-        super().test_cummin()
-
-    @unittest.skip(
-        "TODO(SPARK-43611): Fix unexpected `AnalysisException` from Spark 
Connect client."
-    )
-    def test_cumprod(self):
-        super().test_cumprod()
-
-    @unittest.skip(
-        "TODO(SPARK-43611): Fix unexpected `AnalysisException` from Spark 
Connect client."
-    )
-    def test_cumsum(self):
-        super().test_cumsum()
+
+    pass
 
 
 if __name__ == "__main__":
diff --git a/python/pyspark/pandas/tests/connect/series/test_parity_index.py 
b/python/pyspark/pandas/tests/connect/series/test_parity_index.py
index 81da3e44d6d..2b92cce61fb 100644
--- a/python/pyspark/pandas/tests/connect/series/test_parity_index.py
+++ b/python/pyspark/pandas/tests/connect/series/test_parity_index.py
@@ -22,11 +22,8 @@ from pyspark.testing.pandasutils import 
PandasOnSparkTestUtils
 
 
 class SeriesParityIndexTests(SeriesIndexMixin, PandasOnSparkTestUtils, 
ReusedConnectTestCase):
-    @unittest.skip(
-        "TODO(SPARK-43611): Fix unexpected `AnalysisException` from Spark 
Connect client."
-    )
-    def test_reset_index_with_default_index_types(self):
-        super().test_reset_index_with_default_index_types()
+
+    pass
 
 
 if __name__ == "__main__":
diff --git 
a/python/pyspark/pandas/tests/connect/series/test_parity_missing_data.py 
b/python/pyspark/pandas/tests/connect/series/test_parity_missing_data.py
index e648173289c..a95b312bce6 100644
--- a/python/pyspark/pandas/tests/connect/series/test_parity_missing_data.py
+++ b/python/pyspark/pandas/tests/connect/series/test_parity_missing_data.py
@@ -24,41 +24,8 @@ from pyspark.testing.pandasutils import 
PandasOnSparkTestUtils
 class SeriesParityMissingDataTests(
     SeriesMissingDataMixin, PandasOnSparkTestUtils, ReusedConnectTestCase
 ):
-    @unittest.skip(
-        "TODO(SPARK-43611): Fix unexpected `AnalysisException` from Spark 
Connect client."
-    )
-    def test_backfill(self):
-        super().test_backfill()
 
-    @unittest.skip(
-        "TODO(SPARK-43611): Fix unexpected `AnalysisException` from Spark 
Connect client."
-    )
-    def test_bfill(self):
-        super().test_bfill()
-
-    @unittest.skip(
-        "TODO(SPARK-43611): Fix unexpected `AnalysisException` from Spark 
Connect client."
-    )
-    def test_ffill(self):
-        super().test_ffill()
-
-    @unittest.skip(
-        "TODO(SPARK-43611): Fix unexpected `AnalysisException` from Spark 
Connect client."
-    )
-    def test_fillna(self):
-        super().test_fillna()
-
-    @unittest.skip(
-        "TODO(SPARK-43611): Fix unexpected `AnalysisException` from Spark 
Connect client."
-    )
-    def test_pad(self):
-        super().test_pad()
-
-    @unittest.skip(
-        "TODO(SPARK-43611): Fix unexpected `AnalysisException` from Spark 
Connect client."
-    )
-    def test_replace(self):
-        super().test_replace()
+    pass
 
 
 if __name__ == "__main__":
diff --git a/python/pyspark/pandas/tests/connect/series/test_parity_stat.py 
b/python/pyspark/pandas/tests/connect/series/test_parity_stat.py
index 17e83fa3b47..916e120f99d 100644
--- a/python/pyspark/pandas/tests/connect/series/test_parity_stat.py
+++ b/python/pyspark/pandas/tests/connect/series/test_parity_stat.py
@@ -22,15 +22,8 @@ from pyspark.testing.pandasutils import 
PandasOnSparkTestUtils
 
 
 class SeriesParityStatTests(SeriesStatMixin, PandasOnSparkTestUtils, 
ReusedConnectTestCase):
-    @unittest.skip(
-        "TODO(SPARK-43611): Fix unexpected `AnalysisException` from Spark 
Connect client."
-    )
-    def test_pct_change(self):
-        super().test_pct_change()
-
-    @unittest.skip("TODO(SPARK-43618): Fix pyspark.sq.column._unary_op to work 
with Spark Connect.")
-    def test_rank(self):
-        super().test_rank()
+
+    pass
 
 
 if __name__ == "__main__":
diff --git a/python/pyspark/pandas/tests/connect/test_parity_ewm.py 
b/python/pyspark/pandas/tests/connect/test_parity_ewm.py
index e079f847296..74872820333 100644
--- a/python/pyspark/pandas/tests/connect/test_parity_ewm.py
+++ b/python/pyspark/pandas/tests/connect/test_parity_ewm.py
@@ -22,17 +22,7 @@ from pyspark.testing.pandasutils import 
PandasOnSparkTestUtils, TestUtils
 
 
 class EWMParityTests(EWMTestsMixin, PandasOnSparkTestUtils, 
ReusedConnectTestCase, TestUtils):
-    @unittest.skip(
-        "TODO(SPARK-43611): Fix unexpected `AnalysisException` from Spark 
Connect client."
-    )
-    def test_ewm_mean(self):
-        super().test_ewm_mean()
-
-    @unittest.skip(
-        "TODO(SPARK-43611): Fix unexpected `AnalysisException` from Spark 
Connect client."
-    )
-    def test_groupby_ewm_func(self):
-        super().test_groupby_ewm_func()
+    pass
 
 
 if __name__ == "__main__":
diff --git a/python/pyspark/pandas/tests/connect/test_parity_expanding.py 
b/python/pyspark/pandas/tests/connect/test_parity_expanding.py
index a6f2cf9bc3c..7f8b1a3cac2 100644
--- a/python/pyspark/pandas/tests/connect/test_parity_expanding.py
+++ b/python/pyspark/pandas/tests/connect/test_parity_expanding.py
@@ -24,125 +24,7 @@ from pyspark.testing.pandasutils import 
PandasOnSparkTestUtils, TestUtils
 class ExpandingParityTests(
     ExpandingTestsMixin, PandasOnSparkTestUtils, TestUtils, 
ReusedConnectTestCase
 ):
-    @unittest.skip(
-        "TODO(SPARK-43611): Fix unexpected `AnalysisException` from Spark 
Connect client."
-    )
-    def test_expanding_count(self):
-        super().test_expanding_count()
-
-    @unittest.skip(
-        "TODO(SPARK-43611): Fix unexpected `AnalysisException` from Spark 
Connect client."
-    )
-    def test_expanding_kurt(self):
-        super().test_expanding_kurt()
-
-    @unittest.skip(
-        "TODO(SPARK-43611): Fix unexpected `AnalysisException` from Spark 
Connect client."
-    )
-    def test_expanding_max(self):
-        super().test_expanding_max()
-
-    @unittest.skip(
-        "TODO(SPARK-43611): Fix unexpected `AnalysisException` from Spark 
Connect client."
-    )
-    def test_expanding_mean(self):
-        super().test_expanding_mean()
-
-    @unittest.skip(
-        "TODO(SPARK-43611): Fix unexpected `AnalysisException` from Spark 
Connect client."
-    )
-    def test_expanding_min(self):
-        super().test_expanding_min()
-
-    @unittest.skip(
-        "TODO(SPARK-43611): Fix unexpected `AnalysisException` from Spark 
Connect client."
-    )
-    def test_expanding_quantile(self):
-        super().test_expanding_quantile()
-
-    @unittest.skip(
-        "TODO(SPARK-43611): Fix unexpected `AnalysisException` from Spark 
Connect client."
-    )
-    def test_expanding_skew(self):
-        super().test_expanding_skew()
-
-    @unittest.skip(
-        "TODO(SPARK-43611): Fix unexpected `AnalysisException` from Spark 
Connect client."
-    )
-    def test_expanding_std(self):
-        super().test_expanding_std()
-
-    @unittest.skip(
-        "TODO(SPARK-43611): Fix unexpected `AnalysisException` from Spark 
Connect client."
-    )
-    def test_expanding_sum(self):
-        super().test_expanding_sum()
-
-    @unittest.skip(
-        "TODO(SPARK-43611): Fix unexpected `AnalysisException` from Spark 
Connect client."
-    )
-    def test_expanding_var(self):
-        super().test_expanding_var()
-
-    @unittest.skip(
-        "TODO(SPARK-43611): Fix unexpected `AnalysisException` from Spark 
Connect client."
-    )
-    def test_groupby_expanding_count(self):
-        super().test_groupby_expanding_count()
-
-    @unittest.skip(
-        "TODO(SPARK-43611): Fix unexpected `AnalysisException` from Spark 
Connect client."
-    )
-    def test_groupby_expanding_kurt(self):
-        super().test_groupby_expanding_kurt()
-
-    @unittest.skip(
-        "TODO(SPARK-43611): Fix unexpected `AnalysisException` from Spark 
Connect client."
-    )
-    def test_groupby_expanding_max(self):
-        super().test_groupby_expanding_max()
-
-    @unittest.skip(
-        "TODO(SPARK-43611): Fix unexpected `AnalysisException` from Spark 
Connect client."
-    )
-    def test_groupby_expanding_mean(self):
-        super().test_groupby_expanding_mean()
-
-    @unittest.skip(
-        "TODO(SPARK-43611): Fix unexpected `AnalysisException` from Spark 
Connect client."
-    )
-    def test_groupby_expanding_min(self):
-        super().test_groupby_expanding_min()
-
-    @unittest.skip(
-        "TODO(SPARK-43611): Fix unexpected `AnalysisException` from Spark 
Connect client."
-    )
-    def test_groupby_expanding_quantile(self):
-        super().test_groupby_expanding_quantile()
-
-    @unittest.skip(
-        "TODO(SPARK-43611): Fix unexpected `AnalysisException` from Spark 
Connect client."
-    )
-    def test_groupby_expanding_skew(self):
-        super().test_groupby_expanding_skew()
-
-    @unittest.skip(
-        "TODO(SPARK-43611): Fix unexpected `AnalysisException` from Spark 
Connect client."
-    )
-    def test_groupby_expanding_std(self):
-        super().test_groupby_expanding_std()
-
-    @unittest.skip(
-        "TODO(SPARK-43611): Fix unexpected `AnalysisException` from Spark 
Connect client."
-    )
-    def test_groupby_expanding_sum(self):
-        super().test_groupby_expanding_sum()
-
-    @unittest.skip(
-        "TODO(SPARK-43611): Fix unexpected `AnalysisException` from Spark 
Connect client."
-    )
-    def test_groupby_expanding_var(self):
-        super().test_groupby_expanding_var()
+    pass
 
 
 if __name__ == "__main__":
diff --git 
a/python/pyspark/pandas/tests/connect/test_parity_ops_on_diff_frames_groupby.py 
b/python/pyspark/pandas/tests/connect/test_parity_ops_on_diff_frames_groupby.py
index 5d6b6a80b9b..685ec5c45c5 100644
--- 
a/python/pyspark/pandas/tests/connect/test_parity_ops_on_diff_frames_groupby.py
+++ 
b/python/pyspark/pandas/tests/connect/test_parity_ops_on_diff_frames_groupby.py
@@ -24,53 +24,7 @@ from pyspark.testing.pandasutils import 
PandasOnSparkTestUtils
 class OpsOnDiffFramesGroupByParityTests(
     OpsOnDiffFramesGroupByTestsMixin, PandasOnSparkTestUtils, 
ReusedConnectTestCase
 ):
-    @unittest.skip(
-        "TODO(SPARK-43611): Fix unexpected `AnalysisException` from Spark 
Connect client."
-    )
-    def test_cumcount(self):
-        super().test_cumcount()
-
-    @unittest.skip(
-        "TODO(SPARK-43611): Fix unexpected `AnalysisException` from Spark 
Connect client."
-    )
-    def test_cummax(self):
-        super().test_cummax()
-
-    @unittest.skip(
-        "TODO(SPARK-43611): Fix unexpected `AnalysisException` from Spark 
Connect client."
-    )
-    def test_cummin(self):
-        super().test_cummin()
-
-    @unittest.skip(
-        "TODO(SPARK-43611): Fix unexpected `AnalysisException` from Spark 
Connect client."
-    )
-    def test_cumprod(self):
-        super().test_cumprod()
-
-    @unittest.skip(
-        "TODO(SPARK-43611): Fix unexpected `AnalysisException` from Spark 
Connect client."
-    )
-    def test_cumsum(self):
-        super().test_cumsum()
-
-    @unittest.skip(
-        "TODO(SPARK-43611): Fix unexpected `AnalysisException` from Spark 
Connect client."
-    )
-    def test_diff(self):
-        super().test_diff()
-
-    @unittest.skip(
-        "TODO(SPARK-43611): Fix unexpected `AnalysisException` from Spark 
Connect client."
-    )
-    def test_fillna(self):
-        super().test_fillna()
-
-    @unittest.skip(
-        "TODO(SPARK-43611): Fix unexpected `AnalysisException` from Spark 
Connect client."
-    )
-    def test_shift(self):
-        super().test_shift()
+    pass
 
 
 if __name__ == "__main__":
diff --git 
a/python/pyspark/pandas/tests/connect/test_parity_ops_on_diff_frames_groupby_expanding.py
 
b/python/pyspark/pandas/tests/connect/test_parity_ops_on_diff_frames_groupby_expanding.py
index 90fa36f3b98..c373268cdb2 100644
--- 
a/python/pyspark/pandas/tests/connect/test_parity_ops_on_diff_frames_groupby_expanding.py
+++ 
b/python/pyspark/pandas/tests/connect/test_parity_ops_on_diff_frames_groupby_expanding.py
@@ -29,47 +29,7 @@ class OpsOnDiffFramesGroupByExpandingParityTests(
     TestUtils,
     ReusedConnectTestCase,
 ):
-    @unittest.skip(
-        "TODO(SPARK-43611): Fix unexpected `AnalysisException` from Spark 
Connect client."
-    )
-    def test_groupby_expanding_count(self):
-        super().test_groupby_expanding_count()
-
-    @unittest.skip(
-        "TODO(SPARK-43611): Fix unexpected `AnalysisException` from Spark 
Connect client."
-    )
-    def test_groupby_expanding_min(self):
-        super().test_groupby_expanding_min()
-
-    @unittest.skip(
-        "TODO(SPARK-43611): Fix unexpected `AnalysisException` from Spark 
Connect client."
-    )
-    def test_groupby_expanding_max(self):
-        super().test_groupby_expanding_max()
-
-    @unittest.skip(
-        "TODO(SPARK-43611): Fix unexpected `AnalysisException` from Spark 
Connect client."
-    )
-    def test_groupby_expanding_mean(self):
-        super().test_groupby_expanding_mean()
-
-    @unittest.skip(
-        "TODO(SPARK-43611): Fix unexpected `AnalysisException` from Spark 
Connect client."
-    )
-    def test_groupby_expanding_sum(self):
-        super().test_groupby_expanding_sum()
-
-    @unittest.skip(
-        "TODO(SPARK-43611): Fix unexpected `AnalysisException` from Spark 
Connect client."
-    )
-    def test_groupby_expanding_std(self):
-        super().test_groupby_expanding_std()
-
-    @unittest.skip(
-        "TODO(SPARK-43611): Fix unexpected `AnalysisException` from Spark 
Connect client."
-    )
-    def test_groupby_expanding_var(self):
-        super().test_groupby_expanding_var()
+    pass
 
 
 if __name__ == "__main__":
diff --git a/python/pyspark/pandas/tests/connect/test_parity_rolling.py 
b/python/pyspark/pandas/tests/connect/test_parity_rolling.py
index 712c1a10df9..8318bed24f0 100644
--- a/python/pyspark/pandas/tests/connect/test_parity_rolling.py
+++ b/python/pyspark/pandas/tests/connect/test_parity_rolling.py
@@ -24,125 +24,7 @@ from pyspark.testing.pandasutils import 
PandasOnSparkTestUtils, TestUtils
 class RollingParityTests(
     RollingTestsMixin, PandasOnSparkTestUtils, TestUtils, ReusedConnectTestCase
 ):
-    @unittest.skip(
-        "TODO(SPARK-43611): Fix unexpected `AnalysisException` from Spark 
Connect client."
-    )
-    def test_groupby_rolling_count(self):
-        super().test_groupby_rolling_count()
-
-    @unittest.skip(
-        "TODO(SPARK-43611): Fix unexpected `AnalysisException` from Spark 
Connect client."
-    )
-    def test_groupby_rolling_kurt(self):
-        super().test_groupby_rolling_kurt()
-
-    @unittest.skip(
-        "TODO(SPARK-43611): Fix unexpected `AnalysisException` from Spark 
Connect client."
-    )
-    def test_groupby_rolling_max(self):
-        super().test_groupby_rolling_max()
-
-    @unittest.skip(
-        "TODO(SPARK-43611): Fix unexpected `AnalysisException` from Spark 
Connect client."
-    )
-    def test_groupby_rolling_mean(self):
-        super().test_groupby_rolling_mean()
-
-    @unittest.skip(
-        "TODO(SPARK-43611): Fix unexpected `AnalysisException` from Spark 
Connect client."
-    )
-    def test_groupby_rolling_min(self):
-        super().test_groupby_rolling_min()
-
-    @unittest.skip(
-        "TODO(SPARK-43611): Fix unexpected `AnalysisException` from Spark 
Connect client."
-    )
-    def test_groupby_rolling_quantile(self):
-        super().test_groupby_rolling_quantile()
-
-    @unittest.skip(
-        "TODO(SPARK-43611): Fix unexpected `AnalysisException` from Spark 
Connect client."
-    )
-    def test_groupby_rolling_skew(self):
-        super().test_groupby_rolling_skew()
-
-    @unittest.skip(
-        "TODO(SPARK-43611): Fix unexpected `AnalysisException` from Spark 
Connect client."
-    )
-    def test_groupby_rolling_std(self):
-        super().test_groupby_rolling_std()
-
-    @unittest.skip(
-        "TODO(SPARK-43611): Fix unexpected `AnalysisException` from Spark 
Connect client."
-    )
-    def test_groupby_rolling_sum(self):
-        super().test_groupby_rolling_sum()
-
-    @unittest.skip(
-        "TODO(SPARK-43611): Fix unexpected `AnalysisException` from Spark 
Connect client."
-    )
-    def test_groupby_rolling_var(self):
-        super().test_groupby_rolling_var()
-
-    @unittest.skip(
-        "TODO(SPARK-43611): Fix unexpected `AnalysisException` from Spark 
Connect client."
-    )
-    def test_rolling_count(self):
-        super().test_rolling_count()
-
-    @unittest.skip(
-        "TODO(SPARK-43611): Fix unexpected `AnalysisException` from Spark 
Connect client."
-    )
-    def test_rolling_kurt(self):
-        super().test_rolling_kurt()
-
-    @unittest.skip(
-        "TODO(SPARK-43611): Fix unexpected `AnalysisException` from Spark 
Connect client."
-    )
-    def test_rolling_max(self):
-        super().test_rolling_max()
-
-    @unittest.skip(
-        "TODO(SPARK-43611): Fix unexpected `AnalysisException` from Spark 
Connect client."
-    )
-    def test_rolling_mean(self):
-        super().test_rolling_mean()
-
-    @unittest.skip(
-        "TODO(SPARK-43611): Fix unexpected `AnalysisException` from Spark 
Connect client."
-    )
-    def test_rolling_min(self):
-        super().test_rolling_min()
-
-    @unittest.skip(
-        "TODO(SPARK-43611): Fix unexpected `AnalysisException` from Spark 
Connect client."
-    )
-    def test_rolling_quantile(self):
-        super().test_rolling_quantile()
-
-    @unittest.skip(
-        "TODO(SPARK-43611): Fix unexpected `AnalysisException` from Spark 
Connect client."
-    )
-    def test_rolling_skew(self):
-        super().test_rolling_skew()
-
-    @unittest.skip(
-        "TODO(SPARK-43611): Fix unexpected `AnalysisException` from Spark 
Connect client."
-    )
-    def test_rolling_std(self):
-        super().test_rolling_std()
-
-    @unittest.skip(
-        "TODO(SPARK-43611): Fix unexpected `AnalysisException` from Spark 
Connect client."
-    )
-    def test_rolling_sum(self):
-        super().test_rolling_sum()
-
-    @unittest.skip(
-        "TODO(SPARK-43611): Fix unexpected `AnalysisException` from Spark 
Connect client."
-    )
-    def test_rolling_var(self):
-        super().test_rolling_var()
+    pass
 
 
 if __name__ == "__main__":
diff --git 
a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/analysis/Analyzer.scala
 
b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/analysis/Analyzer.scala
index 49dea4fa03e..bcc5574dd82 100644
--- 
a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/analysis/Analyzer.scala
+++ 
b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/analysis/Analyzer.scala
@@ -3124,7 +3124,9 @@ class Analyzer(override val catalogManager: 
CatalogManager) extends RuleExecutor
 
         // Finally, generate output columns according to the original 
projectList.
         val finalProjectList = aggregateExprs.map(_.toAttribute)
-        Project(finalProjectList, withWindow)
+        val newProject = Project(finalProjectList, withWindow)
+        newProject.copyTagsFrom(f)
+        newProject
 
       case p: LogicalPlan if !p.childrenResolved => p
 
@@ -3142,7 +3144,9 @@ class Analyzer(override val catalogManager: 
CatalogManager) extends RuleExecutor
 
         // Finally, generate output columns according to the original 
projectList.
         val finalProjectList = aggregateExprs.map(_.toAttribute)
-        Project(finalProjectList, withWindow)
+        val newProject = Project(finalProjectList, withWindow)
+        newProject.copyTagsFrom(a)
+        newProject
 
       // We only extract Window Expressions after all expressions of the 
Project
       // have been resolved, and lateral column aliases are properly handled 
first.
@@ -3159,7 +3163,9 @@ class Analyzer(override val catalogManager: 
CatalogManager) extends RuleExecutor
 
         // Finally, generate output columns according to the original 
projectList.
         val finalProjectList = projectList.map(_.toAttribute)
-        Project(finalProjectList, withWindow)
+        val newProject = Project(finalProjectList, withWindow)
+        newProject.copyTagsFrom(p)
+        newProject
     }
   }
 


---------------------------------------------------------------------
To unsubscribe, e-mail: commits-unsubscr...@spark.apache.org
For additional commands, e-mail: commits-h...@spark.apache.org

Reply via email to