HyukjinKwon commented on PR #46417:
URL: https://github.com/apache/spark/pull/46417#issuecomment-2101691495

   ```
   ======================================================================
   ERROR [0.522s]: test_string_rsplit 
(pyspark.pandas.tests.connect.test_parity_series_string.SeriesStringParityTests)
   ----------------------------------------------------------------------
   Traceback (most recent call last):
     File 
"/home/runner/work/spark/spark-3.5/python/pyspark/pandas/tests/test_series_string.py",
 line 319, in test_string_rsplit
       self.check_func_on_series(lambda x: repr(x.str.rsplit()), self.pser[:-1])
     File 
"/home/runner/work/spark/spark-3.5/python/pyspark/pandas/tests/test_series_string.py",
 line 51, in check_func_on_series
       self.assert_eq(func(ps.from_pandas(pser)), func(pser), almost=almost)
     File 
"/home/runner/work/spark/spark-3.5/python/pyspark/pandas/tests/test_series_string.py",
 line 319, in <lambda>
       self.check_func_on_series(lambda x: repr(x.str.rsplit()), self.pser[:-1])
     File "/home/runner/work/spark/spark-3.5/python/pyspark/pandas/series.py", 
line 7342, in __repr__
       pser = 
self._psdf._get_or_create_repr_pandas_cache(max_display_count)[self.name]
     File "/home/runner/work/spark/spark-3.5/python/pyspark/pandas/frame.py", 
line 13393, in _get_or_create_repr_pandas_cache
       self, "_repr_pandas_cache", {n: self.head(n + 1)._to_internal_pandas()}
     File "/home/runner/work/spark/spark-3.5/python/pyspark/pandas/frame.py", 
line 13388, in _to_internal_pandas
       return self._internal.to_pandas_frame
     File "/home/runner/work/spark/spark-3.5/python/pyspark/pandas/utils.py", 
line 600, in wrapped_lazy_property
       setattr(self, attr_name, fn(self))
     File 
"/home/runner/work/spark/spark-3.5/python/pyspark/pandas/internal.py", line 
1115, in to_pandas_frame
       pdf = sdf.toPandas()
     File 
"/home/runner/work/spark/spark-3.5/python/pyspark/sql/connect/dataframe.py", 
line 1663, in toPandas
       return self._session.client.to_pandas(query)
     File 
"/home/runner/work/spark/spark-3.5/python/pyspark/sql/connect/client/core.py", 
line 873, in to_pandas
       table, schema, metrics, observed_metrics, _ = self._execute_and_fetch(
     File 
"/home/runner/work/spark/spark-3.5/python/pyspark/sql/connect/client/core.py", 
line 1283, in _execute_and_fetch
       for response in self._execute_and_fetch_as_iterator(req):
     File 
"/home/runner/work/spark/spark-3.5/python/pyspark/sql/connect/client/core.py", 
line 1264, in _execute_and_fetch_as_iterator
       self._handle_error(error)
     File 
"/home/runner/work/spark/spark-3.5/python/pyspark/sql/connect/client/core.py", 
line 1503, in _handle_error
       self._handle_rpc_error(error)
     File 
"/home/runner/work/spark/spark-3.5/python/pyspark/sql/connect/client/core.py", 
line 1539, in _handle_rpc_error
       raise convert_exception(info, status.message) from None
   pyspark.errors.exceptions.connect.PythonException: 
     An exception was thrown from the Python worker. Please see the stack trace 
below.
   Traceback (most recent call last):
     File 
"/home/runner/work/spark/spark/python/lib/pyspark.zip/pyspark/worker.py", line 
1834, in main
       process()
     File 
"/home/runner/work/spark/spark/python/lib/pyspark.zip/pyspark/worker.py", line 
1826, in process
       serializer.dump_stream(out_iter, outfile)
     File 
"/home/runner/work/spark/spark/python/lib/pyspark.zip/pyspark/sql/pandas/serializers.py",
 line 531, in dump_stream
       return ArrowStreamSerializer.dump_stream(self, 
init_stream_yield_batches(), stream)
     File 
"/home/runner/work/spark/spark/python/lib/pyspark.zip/pyspark/sql/pandas/serializers.py",
 line 104, in dump_stream
       for batch in iterator:
     File 
"/home/runner/work/spark/spark/python/lib/pyspark.zip/pyspark/sql/pandas/serializers.py",
 line 524, in init_stream_yield_batches
       for series in iterator:
     File 
"/home/runner/work/spark/spark/python/lib/pyspark.zip/pyspark/worker.py", line 
1734, in mapper
       result = tuple(f(*[a[o] for o in arg_offsets]) for arg_offsets, f in 
udfs)
     File 
"/home/runner/work/spark/spark/python/lib/pyspark.zip/pyspark/worker.py", line 
1734, in <genexpr>
       result = tuple(f(*[a[o] for o in arg_offsets]) for arg_offsets, f in 
udfs)
     File 
"/home/runner/work/spark/spark/python/lib/pyspark.zip/pyspark/worker.py", line 
146, in <lambda>
       verify_result_length(verify_result_type(func(*a)), len(a[0])),
     File 
"/home/runner/work/spark/spark/python/lib/pyspark.zip/pyspark/util.py", line 
134, in wrapper
       return f(*args, **kwargs)
     File "/home/runner/work/spark/spark-3.5/python/pyspark/pandas/strings.py", 
line 2177, in pudf
       return s.str.rsplit(pat, n)
     File 
"/usr/share/miniconda/envs/server-env/lib/python3.10/site-packages/pandas/core/strings/accessor.py",
 line 137, in wrapper
       return func(self, *args, **kwargs)
   TypeError: StringMethods.rsplit() takes from 1 to 2 positional arguments but 
3 were given
   ```
   ```
   ======================================================================
   ERROR [0.455s]: test_string_split 
(pyspark.pandas.tests.connect.test_parity_series_string.SeriesStringParityTests)
   ----------------------------------------------------------------------
   Traceback (most recent call last):
     File 
"/home/runner/work/spark/spark-3.5/python/pyspark/pandas/tests/test_series_string.py",
 line 305, in test_string_split
       self.check_func_on_series(lambda x: repr(x.str.split()), self.pser[:-1])
     File 
"/home/runner/work/spark/spark-3.5/python/pyspark/pandas/tests/test_series_string.py",
 line 51, in check_func_on_series
       self.assert_eq(func(ps.from_pandas(pser)), func(pser), almost=almost)
     File 
"/home/runner/work/spark/spark-3.5/python/pyspark/pandas/tests/test_series_string.py",
 line 305, in <lambda>
       self.check_func_on_series(lambda x: repr(x.str.split()), self.pser[:-1])
     File "/home/runner/work/spark/spark-3.5/python/pyspark/pandas/series.py", 
line 7342, in __repr__
       pser = 
self._psdf._get_or_create_repr_pandas_cache(max_display_count)[self.name]
     File "/home/runner/work/spark/spark-3.5/python/pyspark/pandas/frame.py", 
line 13393, in _get_or_create_repr_pandas_cache
       self, "_repr_pandas_cache", {n: self.head(n + 1)._to_internal_pandas()}
     File "/home/runner/work/spark/spark-3.5/python/pyspark/pandas/frame.py", 
line 13388, in _to_internal_pandas
       return self._internal.to_pandas_frame
     File "/home/runner/work/spark/spark-3.5/python/pyspark/pandas/utils.py", 
line 600, in wrapped_lazy_property
       setattr(self, attr_name, fn(self))
     File 
"/home/runner/work/spark/spark-3.5/python/pyspark/pandas/internal.py", line 
1115, in to_pandas_frame
       pdf = sdf.toPandas()
     File 
"/home/runner/work/spark/spark-3.5/python/pyspark/sql/connect/dataframe.py", 
line 1663, in toPandas
       return self._session.client.to_pandas(query)
     File 
"/home/runner/work/spark/spark-3.5/python/pyspark/sql/connect/client/core.py", 
line 873, in to_pandas
       table, schema, metrics, observed_metrics, _ = self._execute_and_fetch(
     File 
"/home/runner/work/spark/spark-3.5/python/pyspark/sql/connect/client/core.py", 
line 1283, in _execute_and_fetch
       for response in self._execute_and_fetch_as_iterator(req):
     File 
"/home/runner/work/spark/spark-3.5/python/pyspark/sql/connect/client/core.py", 
line 1264, in _execute_and_fetch_as_iterator
       self._handle_error(error)
     File 
"/home/runner/work/spark/spark-3.5/python/pyspark/sql/connect/client/core.py", 
line 1503, in _handle_error
       self._handle_rpc_error(error)
     File 
"/home/runner/work/spark/spark-3.5/python/pyspark/sql/connect/client/core.py", 
line 1539, in _handle_rpc_error
       raise convert_exception(info, status.message) from None
   pyspark.errors.exceptions.connect.PythonException: 
     An exception was thrown from the Python worker. Please see the stack trace 
below.
   Traceback (most recent call last):
     File 
"/home/runner/work/spark/spark/python/lib/pyspark.zip/pyspark/worker.py", line 
1834, in main
       process()
     File 
"/home/runner/work/spark/spark/python/lib/pyspark.zip/pyspark/worker.py", line 
1[826](https://github.com/HyukjinKwon/spark/actions/runs/8997403746/job/24754849272#step:9:827),
 in process
       serializer.dump_stream(out_iter, outfile)
     File 
"/home/runner/work/spark/spark/python/lib/pyspark.zip/pyspark/sql/pandas/serializers.py",
 line 531, in dump_stream
       return ArrowStreamSerializer.dump_stream(self, 
init_stream_yield_batches(), stream)
     File 
"/home/runner/work/spark/spark/python/lib/pyspark.zip/pyspark/sql/pandas/serializers.py",
 line 104, in dump_stream
       for batch in iterator:
     File 
"/home/runner/work/spark/spark/python/lib/pyspark.zip/pyspark/sql/pandas/serializers.py",
 line 524, in init_stream_yield_batches
       for series in iterator:
     File 
"/home/runner/work/spark/spark/python/lib/pyspark.zip/pyspark/worker.py", line 
1734, in mapper
       result = tuple(f(*[a[o] for o in arg_offsets]) for arg_offsets, f in 
udfs)
     File 
"/home/runner/work/spark/spark/python/lib/pyspark.zip/pyspark/worker.py", line 
1734, in <genexpr>
       result = tuple(f(*[a[o] for o in arg_offsets]) for arg_offsets, f in 
udfs)
     File 
"/home/runner/work/spark/spark/python/lib/pyspark.zip/pyspark/worker.py", line 
146, in <lambda>
       verify_result_length(verify_result_type(func(*a)), len(a[0])),
     File 
"/home/runner/work/spark/spark/python/lib/pyspark.zip/pyspark/util.py", line 
134, in wrapper
       return f(*args, **kwargs)
     File "/home/runner/work/spark/spark-3.5/python/pyspark/pandas/strings.py", 
line 2030, in pudf
       return s.str.split(pat, n)
     File 
"/usr/share/miniconda/envs/server-env/lib/python3.10/site-packages/pandas/core/strings/accessor.py",
 line 137, in wrapper
       return func(self, *args, **kwargs)
   TypeError: StringMethods.split() takes from 1 to 2 positional arguments but 
3 were given
   ----------------------------------------------------------------------
   ```


-- 
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

To unsubscribe, e-mail: reviews-unsubscr...@spark.apache.org

For queries about this service, please contact Infrastructure at:
us...@infra.apache.org


---------------------------------------------------------------------
To unsubscribe, e-mail: reviews-unsubscr...@spark.apache.org
For additional commands, e-mail: reviews-h...@spark.apache.org

Reply via email to