[ 
https://issues.apache.org/jira/browse/SPARK-36031?page=com.atlassian.jira.plugin.system.issuetabpanels:all-tabpanel
 ]

Yikun Jiang updated SPARK-36031:
--------------------------------
    Description: 
There are many operations for series doen't follow the pandas, such as:

=========================================================
 *>>> pser = pd.Series([1, 2, np.nan], dtype=float)*
 *>>> psser = ps.from_pandas(pser)*
 *>>> pser.astype(int)*
 Traceback (most recent call last):
 File 
"/Users/jiangyikun/venv36/lib/python3.6/site-packages/IPython/core/interactiveshell.py",
 line 3343, in run_code
 exec(code_obj, self.user_global_ns, self.user_ns)
 File "<ipython-input-30-1ca2ff8756d2>", line 1, in <module>
 pser.astype(int)
 File 
"/Users/jiangyikun/venv36/lib/python3.6/site-packages/pandas/core/generic.py", 
line 5548, in astype
 new_data = self._mgr.astype(dtype=dtype, copy=copy, errors=errors,)
 File 
"/Users/jiangyikun/venv36/lib/python3.6/site-packages/pandas/core/internals/managers.py",
 line 604, in astype
 return self.apply("astype", dtype=dtype, copy=copy, errors=errors)
 File 
"/Users/jiangyikun/venv36/lib/python3.6/site-packages/pandas/core/internals/managers.py",
 line 409, in apply
 applied = getattr(b, f)(**kwargs)
 File 
"/Users/jiangyikun/venv36/lib/python3.6/site-packages/pandas/core/internals/blocks.py",
 line 595, in astype
 values = astype_nansafe(vals1d, dtype, copy=True)
 File 
"/Users/jiangyikun/venv36/lib/python3.6/site-packages/pandas/core/dtypes/cast.py",
 line 968, in astype_nansafe
 raise ValueError("Cannot convert non-finite values (NA or inf) to integer")
 ValueError: Cannot convert non-finite values (NA or inf) to integer
 *>>> psser.astype(int)*
 0 1.0
 1 2.0
 2 NaN
 dtype: float64

=========================================================
 *>>> pser = pd.Series([1, 2, np.nan], dtype=float)*
 *>>> psser = ps.from_pandas(pser)*
 *>>> pser ** False*
 0 1.0
 1 1.0
 2 1.0
 dtype: float64

*>>> psser ** False*
 0 1.0
 1 1.0
 2 NaN
 dtype: float64

 

=========================================================

*>>> pser = pd.Series([decimal.Decimal(1.0), decimal.Decimal(2.0), 
decimal.Decimal(np.nan)])*

>>> *psser = ps.from_pandas(pser)*

*>>> pser + 1*

0 2
 1 3
 2 NaN

*>>> psser + 1*

 *Driver stacktrace:*
 at 
org.apache.spark.scheduler.DAGScheduler.failJobAndIndependentStages(DAGScheduler.scala:2259)
 at 
org.apache.spark.scheduler.DAGScheduler.$anonfun$abortStage$2(DAGScheduler.scala:2208)
 at 
org.apache.spark.scheduler.DAGScheduler.$anonfun$abortStage$2$adapted(DAGScheduler.scala:2207)
 at scala.collection.mutable.ResizableArray.foreach(ResizableArray.scala:62)
 at scala.collection.mutable.ResizableArray.foreach$(ResizableArray.scala:55)
 at scala.collection.mutable.ArrayBuffer.foreach(ArrayBuffer.scala:49)
 at org.apache.spark.scheduler.DAGScheduler.abortStage(DAGScheduler.scala:2207)
 at 
org.apache.spark.scheduler.DAGScheduler.$anonfun$handleTaskSetFailed$1(DAGScheduler.scala:1084)
 at 
org.apache.spark.scheduler.DAGScheduler.$anonfun$handleTaskSetFailed$1$adapted(DAGScheduler.scala:1084)
 at scala.Option.foreach(Option.scala:407)
 at 
org.apache.spark.scheduler.DAGScheduler.handleTaskSetFailed(DAGScheduler.scala:1084)
 at 
org.apache.spark.scheduler.DAGSchedulerEventProcessLoop.doOnReceive(DAGScheduler.scala:2446)
 at 
org.apache.spark.scheduler.DAGSchedulerEventProcessLoop.onReceive(DAGScheduler.scala:2388)
 at 
org.apache.spark.scheduler.DAGSchedulerEventProcessLoop.onReceive(DAGScheduler.scala:2377)
 at org.apache.spark.util.EventLoop$$anon$1.run(EventLoop.scala:49)
 at org.apache.spark.scheduler.DAGScheduler.runJob(DAGScheduler.scala:873)
 at org.apache.spark.SparkContext.runJob(SparkContext.scala:2208)
 at org.apache.spark.SparkContext.runJob(SparkContext.scala:2303)
 at 
org.apache.spark.sql.Dataset.$anonfun$collectAsArrowToPython$5(Dataset.scala:3648)
 at scala.runtime.java8.JFunction0$mcV$sp.apply(JFunction0$mcV$sp.java:23)
 at org.apache.spark.util.Utils$.tryWithSafeFinally(Utils.scala:1437)
 at 
org.apache.spark.sql.Dataset.$anonfun$collectAsArrowToPython$2(Dataset.scala:3652)
 at 
org.apache.spark.sql.Dataset.$anonfun$collectAsArrowToPython$2$adapted(Dataset.scala:3629)
 at org.apache.spark.sql.Dataset.$anonfun$withAction$1(Dataset.scala:3706)
 at 
org.apache.spark.sql.execution.SQLExecution$.$anonfun$withNewExecutionId$5(SQLExecution.scala:103)
 at 
org.apache.spark.sql.execution.SQLExecution$.withSQLConfPropagated(SQLExecution.scala:163)
 at 
org.apache.spark.sql.execution.SQLExecution$.$anonfun$withNewExecutionId$1(SQLExecution.scala:90)
 at org.apache.spark.sql.SparkSession.withActive(SparkSession.scala:774)
 at 
org.apache.spark.sql.execution.SQLExecution$.withNewExecutionId(SQLExecution.scala:64)
 at org.apache.spark.sql.Dataset.withAction(Dataset.scala:3704)
 at 
org.apache.spark.sql.Dataset.$anonfun$collectAsArrowToPython$1(Dataset.scala:3629)
 at 
org.apache.spark.sql.Dataset.$anonfun$collectAsArrowToPython$1$adapted(Dataset.scala:3628)
 at 
org.apache.spark.security.SocketAuthServer$.$anonfun$serveToStream$2(SocketAuthServer.scala:139)
 at scala.runtime.java8.JFunction0$mcV$sp.apply(JFunction0$mcV$sp.java:23)
 at org.apache.spark.util.Utils$.tryWithSafeFinally(Utils.scala:1437)
 at 
org.apache.spark.security.SocketAuthServer$.$anonfun$serveToStream$1(SocketAuthServer.scala:141)
 at 
org.apache.spark.security.SocketAuthServer$.$anonfun$serveToStream$1$adapted(SocketAuthServer.scala:136)
 at 
org.apache.spark.security.SocketFuncServer.handleConnection(SocketAuthServer.scala:113)
 at 
org.apache.spark.security.SocketFuncServer.handleConnection(SocketAuthServer.scala:107)
 at 
org.apache.spark.security.SocketAuthServer$$anon$1.$anonfun$run$4(SocketAuthServer.scala:68)
 at scala.util.Try$.apply(Try.scala:213)
 at 
org.apache.spark.security.SocketAuthServer$$anon$1.run(SocketAuthServer.scala:68)
 *Caused by: java.lang.NullPointerException*
 at 
org.apache.spark.sql.catalyst.expressions.GeneratedClass$GeneratedIteratorForCodegenStage1.processNext(Unknown
 Source)
 at 
org.apache.spark.sql.execution.BufferedRowIterator.hasNext(BufferedRowIterator.java:43)
 at 
org.apache.spark.sql.execution.WholeStageCodegenExec$$anon$1.hasNext(WholeStageCodegenExec.scala:759)
 at scala.collection.Iterator$SliceIterator.hasNext(Iterator.scala:266)
 at scala.collection.Iterator$$anon$10.hasNext(Iterator.scala:458)
 at 
org.apache.spark.shuffle.sort.BypassMergeSortShuffleWriter.write(BypassMergeSortShuffleWriter.java:131)
 at 
org.apache.spark.shuffle.ShuffleWriteProcessor.write(ShuffleWriteProcessor.scala:60)
 at org.apache.spark.scheduler.ShuffleMapTask.runTask(ShuffleMapTask.scala:99)
 at org.apache.spark.scheduler.ShuffleMapTask.runTask(ShuffleMapTask.scala:52)
 at org.apache.spark.scheduler.Task.run(Task.scala:131)
 at 
org.apache.spark.executor.Executor$TaskRunner.$anonfun$run$3(Executor.scala:498)
 at org.apache.spark.util.Utils$.tryWithSafeFinally(Utils.scala:1437)
 at org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:501)
 at 
java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)
 at 
java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)
 at java.lang.Thread.run(Thread.java:748)

 

  was:
There are many operations for series doen't follow the pandas, such as:

=========================================================
*>>> pser = pd.Series([1, 2, np.nan], dtype=float)*
*>>> psser = ps.from_pandas(pser)*
*>>> pser.astype(int)*
Traceback (most recent call last):
  File 
"/Users/jiangyikun/venv36/lib/python3.6/site-packages/IPython/core/interactiveshell.py",
 line 3343, in run_code
    exec(code_obj, self.user_global_ns, self.user_ns)
  File "<ipython-input-30-1ca2ff8756d2>", line 1, in <module>
    pser.astype(int)
  File 
"/Users/jiangyikun/venv36/lib/python3.6/site-packages/pandas/core/generic.py", 
line 5548, in astype
    new_data = self._mgr.astype(dtype=dtype, copy=copy, errors=errors,)
  File 
"/Users/jiangyikun/venv36/lib/python3.6/site-packages/pandas/core/internals/managers.py",
 line 604, in astype
    return self.apply("astype", dtype=dtype, copy=copy, errors=errors)
  File 
"/Users/jiangyikun/venv36/lib/python3.6/site-packages/pandas/core/internals/managers.py",
 line 409, in apply
    applied = getattr(b, f)(**kwargs)
  File 
"/Users/jiangyikun/venv36/lib/python3.6/site-packages/pandas/core/internals/blocks.py",
 line 595, in astype
    values = astype_nansafe(vals1d, dtype, copy=True)
  File 
"/Users/jiangyikun/venv36/lib/python3.6/site-packages/pandas/core/dtypes/cast.py",
 line 968, in astype_nansafe
    raise ValueError("Cannot convert non-finite values (NA or inf) to integer")
ValueError: Cannot convert non-finite values (NA or inf) to integer
*>>> psser.astype(int)*
0    1.0
1    2.0
2    NaN
dtype: float64

=========================================================
*>>> pser = pd.Series([1, 2, np.nan], dtype=float)*
*>>> psser = ps.from_pandas(pser)*
*>>> pser ** False*
pser ** False
Out[6]: 
0    1.0
1    1.0
2    1.0
dtype: float64

*>>> psser ** False*
0    1.0
1    1.0
2    NaN
dtype: float64



> Keep same behavior with pandas for operations of series with nan 
> -----------------------------------------------------------------
>
>                 Key: SPARK-36031
>                 URL: https://issues.apache.org/jira/browse/SPARK-36031
>             Project: Spark
>          Issue Type: Sub-task
>          Components: PySpark
>    Affects Versions: 3.2.0, 3.3.0
>            Reporter: Yikun Jiang
>            Priority: Major
>
> There are many operations for series doen't follow the pandas, such as:
> =========================================================
>  *>>> pser = pd.Series([1, 2, np.nan], dtype=float)*
>  *>>> psser = ps.from_pandas(pser)*
>  *>>> pser.astype(int)*
>  Traceback (most recent call last):
>  File 
> "/Users/jiangyikun/venv36/lib/python3.6/site-packages/IPython/core/interactiveshell.py",
>  line 3343, in run_code
>  exec(code_obj, self.user_global_ns, self.user_ns)
>  File "<ipython-input-30-1ca2ff8756d2>", line 1, in <module>
>  pser.astype(int)
>  File 
> "/Users/jiangyikun/venv36/lib/python3.6/site-packages/pandas/core/generic.py",
>  line 5548, in astype
>  new_data = self._mgr.astype(dtype=dtype, copy=copy, errors=errors,)
>  File 
> "/Users/jiangyikun/venv36/lib/python3.6/site-packages/pandas/core/internals/managers.py",
>  line 604, in astype
>  return self.apply("astype", dtype=dtype, copy=copy, errors=errors)
>  File 
> "/Users/jiangyikun/venv36/lib/python3.6/site-packages/pandas/core/internals/managers.py",
>  line 409, in apply
>  applied = getattr(b, f)(**kwargs)
>  File 
> "/Users/jiangyikun/venv36/lib/python3.6/site-packages/pandas/core/internals/blocks.py",
>  line 595, in astype
>  values = astype_nansafe(vals1d, dtype, copy=True)
>  File 
> "/Users/jiangyikun/venv36/lib/python3.6/site-packages/pandas/core/dtypes/cast.py",
>  line 968, in astype_nansafe
>  raise ValueError("Cannot convert non-finite values (NA or inf) to integer")
>  ValueError: Cannot convert non-finite values (NA or inf) to integer
>  *>>> psser.astype(int)*
>  0 1.0
>  1 2.0
>  2 NaN
>  dtype: float64
> =========================================================
>  *>>> pser = pd.Series([1, 2, np.nan], dtype=float)*
>  *>>> psser = ps.from_pandas(pser)*
>  *>>> pser ** False*
>  0 1.0
>  1 1.0
>  2 1.0
>  dtype: float64
> *>>> psser ** False*
>  0 1.0
>  1 1.0
>  2 NaN
>  dtype: float64
>  
> =========================================================
> *>>> pser = pd.Series([decimal.Decimal(1.0), decimal.Decimal(2.0), 
> decimal.Decimal(np.nan)])*
> >>> *psser = ps.from_pandas(pser)*
> *>>> pser + 1*
> 0 2
>  1 3
>  2 NaN
> *>>> psser + 1*
>  *Driver stacktrace:*
>  at 
> org.apache.spark.scheduler.DAGScheduler.failJobAndIndependentStages(DAGScheduler.scala:2259)
>  at 
> org.apache.spark.scheduler.DAGScheduler.$anonfun$abortStage$2(DAGScheduler.scala:2208)
>  at 
> org.apache.spark.scheduler.DAGScheduler.$anonfun$abortStage$2$adapted(DAGScheduler.scala:2207)
>  at scala.collection.mutable.ResizableArray.foreach(ResizableArray.scala:62)
>  at scala.collection.mutable.ResizableArray.foreach$(ResizableArray.scala:55)
>  at scala.collection.mutable.ArrayBuffer.foreach(ArrayBuffer.scala:49)
>  at 
> org.apache.spark.scheduler.DAGScheduler.abortStage(DAGScheduler.scala:2207)
>  at 
> org.apache.spark.scheduler.DAGScheduler.$anonfun$handleTaskSetFailed$1(DAGScheduler.scala:1084)
>  at 
> org.apache.spark.scheduler.DAGScheduler.$anonfun$handleTaskSetFailed$1$adapted(DAGScheduler.scala:1084)
>  at scala.Option.foreach(Option.scala:407)
>  at 
> org.apache.spark.scheduler.DAGScheduler.handleTaskSetFailed(DAGScheduler.scala:1084)
>  at 
> org.apache.spark.scheduler.DAGSchedulerEventProcessLoop.doOnReceive(DAGScheduler.scala:2446)
>  at 
> org.apache.spark.scheduler.DAGSchedulerEventProcessLoop.onReceive(DAGScheduler.scala:2388)
>  at 
> org.apache.spark.scheduler.DAGSchedulerEventProcessLoop.onReceive(DAGScheduler.scala:2377)
>  at org.apache.spark.util.EventLoop$$anon$1.run(EventLoop.scala:49)
>  at org.apache.spark.scheduler.DAGScheduler.runJob(DAGScheduler.scala:873)
>  at org.apache.spark.SparkContext.runJob(SparkContext.scala:2208)
>  at org.apache.spark.SparkContext.runJob(SparkContext.scala:2303)
>  at 
> org.apache.spark.sql.Dataset.$anonfun$collectAsArrowToPython$5(Dataset.scala:3648)
>  at scala.runtime.java8.JFunction0$mcV$sp.apply(JFunction0$mcV$sp.java:23)
>  at org.apache.spark.util.Utils$.tryWithSafeFinally(Utils.scala:1437)
>  at 
> org.apache.spark.sql.Dataset.$anonfun$collectAsArrowToPython$2(Dataset.scala:3652)
>  at 
> org.apache.spark.sql.Dataset.$anonfun$collectAsArrowToPython$2$adapted(Dataset.scala:3629)
>  at org.apache.spark.sql.Dataset.$anonfun$withAction$1(Dataset.scala:3706)
>  at 
> org.apache.spark.sql.execution.SQLExecution$.$anonfun$withNewExecutionId$5(SQLExecution.scala:103)
>  at 
> org.apache.spark.sql.execution.SQLExecution$.withSQLConfPropagated(SQLExecution.scala:163)
>  at 
> org.apache.spark.sql.execution.SQLExecution$.$anonfun$withNewExecutionId$1(SQLExecution.scala:90)
>  at org.apache.spark.sql.SparkSession.withActive(SparkSession.scala:774)
>  at 
> org.apache.spark.sql.execution.SQLExecution$.withNewExecutionId(SQLExecution.scala:64)
>  at org.apache.spark.sql.Dataset.withAction(Dataset.scala:3704)
>  at 
> org.apache.spark.sql.Dataset.$anonfun$collectAsArrowToPython$1(Dataset.scala:3629)
>  at 
> org.apache.spark.sql.Dataset.$anonfun$collectAsArrowToPython$1$adapted(Dataset.scala:3628)
>  at 
> org.apache.spark.security.SocketAuthServer$.$anonfun$serveToStream$2(SocketAuthServer.scala:139)
>  at scala.runtime.java8.JFunction0$mcV$sp.apply(JFunction0$mcV$sp.java:23)
>  at org.apache.spark.util.Utils$.tryWithSafeFinally(Utils.scala:1437)
>  at 
> org.apache.spark.security.SocketAuthServer$.$anonfun$serveToStream$1(SocketAuthServer.scala:141)
>  at 
> org.apache.spark.security.SocketAuthServer$.$anonfun$serveToStream$1$adapted(SocketAuthServer.scala:136)
>  at 
> org.apache.spark.security.SocketFuncServer.handleConnection(SocketAuthServer.scala:113)
>  at 
> org.apache.spark.security.SocketFuncServer.handleConnection(SocketAuthServer.scala:107)
>  at 
> org.apache.spark.security.SocketAuthServer$$anon$1.$anonfun$run$4(SocketAuthServer.scala:68)
>  at scala.util.Try$.apply(Try.scala:213)
>  at 
> org.apache.spark.security.SocketAuthServer$$anon$1.run(SocketAuthServer.scala:68)
>  *Caused by: java.lang.NullPointerException*
>  at 
> org.apache.spark.sql.catalyst.expressions.GeneratedClass$GeneratedIteratorForCodegenStage1.processNext(Unknown
>  Source)
>  at 
> org.apache.spark.sql.execution.BufferedRowIterator.hasNext(BufferedRowIterator.java:43)
>  at 
> org.apache.spark.sql.execution.WholeStageCodegenExec$$anon$1.hasNext(WholeStageCodegenExec.scala:759)
>  at scala.collection.Iterator$SliceIterator.hasNext(Iterator.scala:266)
>  at scala.collection.Iterator$$anon$10.hasNext(Iterator.scala:458)
>  at 
> org.apache.spark.shuffle.sort.BypassMergeSortShuffleWriter.write(BypassMergeSortShuffleWriter.java:131)
>  at 
> org.apache.spark.shuffle.ShuffleWriteProcessor.write(ShuffleWriteProcessor.scala:60)
>  at org.apache.spark.scheduler.ShuffleMapTask.runTask(ShuffleMapTask.scala:99)
>  at org.apache.spark.scheduler.ShuffleMapTask.runTask(ShuffleMapTask.scala:52)
>  at org.apache.spark.scheduler.Task.run(Task.scala:131)
>  at 
> org.apache.spark.executor.Executor$TaskRunner.$anonfun$run$3(Executor.scala:498)
>  at org.apache.spark.util.Utils$.tryWithSafeFinally(Utils.scala:1437)
>  at org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:501)
>  at 
> java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)
>  at 
> java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)
>  at java.lang.Thread.run(Thread.java:748)
>  



--
This message was sent by Atlassian Jira
(v8.3.4#803005)

---------------------------------------------------------------------
To unsubscribe, e-mail: issues-unsubscr...@spark.apache.org
For additional commands, e-mail: issues-h...@spark.apache.org

Reply via email to