Hyukjin Kwon created SPARK-32300:
------------------------------------

             Summary: toPandas with no partitions don't work
                 Key: SPARK-32300
                 URL: https://issues.apache.org/jira/browse/SPARK-32300
             Project: Spark
          Issue Type: Bug
          Components: PySpark
    Affects Versions: 2.4.6
            Reporter: Hyukjin Kwon


{code}
>>> spark.sparkContext.emptyRDD().toDF("col1 int").toPandas()
  An error occurred while calling o158.getResult.
: org.apache.spark.SparkException: Exception thrown in awaitResult:
        at org.apache.spark.util.ThreadUtils$.awaitResult(ThreadUtils.scala:226)
        at 
org.apache.spark.api.python.PythonServer.getResult(PythonRDD.scala:874)
        at 
org.apache.spark.api.python.PythonServer.getResult(PythonRDD.scala:870)
        at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)
        at 
sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:62)
        at 
sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)
        at java.lang.reflect.Method.invoke(Method.java:498)
        at py4j.reflection.MethodInvoker.invoke(MethodInvoker.java:244)
        at py4j.reflection.ReflectionEngine.invoke(ReflectionEngine.java:357)
        at py4j.Gateway.invoke(Gateway.java:282)
        at py4j.commands.AbstractCommand.invokeMethod(AbstractCommand.java:132)
        at py4j.commands.CallCommand.execute(CallCommand.java:79)
        at py4j.GatewayConnection.run(GatewayConnection.java:238)
        at java.lang.Thread.run(Thread.java:748)
Caused by: java.lang.NegativeArraySizeException
        at 
org.apache.spark.sql.Dataset$$anonfun$collectAsArrowToPython$1$$anonfun$apply$17.apply(Dataset.scala:3293)
        at 
org.apache.spark.sql.Dataset$$anonfun$collectAsArrowToPython$1$$anonfun$apply$17.apply(Dataset.scala:3287)
        at org.apache.spark.sql.Dataset$$anonfun$52.apply(Dataset.scala:3370)
        at 
org.apache.spark.sql.execution.SQLExecution$$anonfun$withNewExecutionId$1.apply(SQLExecution.scala:80)
        at 
org.apache.spark.sql.execution.SQLExecution$.withSQLConfPropagated(SQLExecution.scala:127)
        at 
org.apache.spark.sql.execution.SQLExecution$.withNewExecutionId(SQLExecution.scala:75)
        at 
org.apache.spark.sql.Dataset.org$apache$spark$sql$Dataset$$withAction(Dataset.scala:3369)
        at 
org.apache.spark.sql.Dataset$$anonfun$collectAsArrowToPython$1.apply(Dataset.scala:3287)
        at 
org.apache.spark.sql.Dataset$$anonfun$collectAsArrowToPython$1.apply(Dataset.scala:3286)
        at 
org.apache.spark.api.python.PythonRDD$$anonfun$7$$anonfun$apply$3.apply$mcV$sp(PythonRDD.scala:456)
        at 
org.apache.spark.api.python.PythonRDD$$anonfun$7$$anonfun$apply$3.apply(PythonRDD.scala:456)
        at 
org.apache.spark.api.python.PythonRDD$$anonfun$7$$anonfun$apply$3.apply(PythonRDD.scala:456)
        at org.apache.spark.util.Utils$.tryWithSafeFinally(Utils.scala:1360)
        at 
org.apache.spark.api.python.PythonRDD$$anonfun$7.apply(PythonRDD.scala:457)
        at 
org.apache.spark.api.python.PythonRDD$$anonfun$7.apply(PythonRDD.scala:453)
        at 
org.apache.spark.api.python.SocketFuncServer.handleConnection(PythonRDD.scala:994)
        at 
org.apache.spark.api.python.SocketFuncServer.handleConnection(PythonRDD.scala:988)
        at 
org.apache.spark.api.python.PythonServer$$anonfun$11$$anonfun$apply$9.apply(PythonRDD.scala:853)
        at scala.util.Try$.apply(Try.scala:192)
        at 
org.apache.spark.api.python.PythonServer$$anonfun$11.apply(PythonRDD.scala:853)
        at 
org.apache.spark.api.python.PythonServer$$anonfun$11.apply(PythonRDD.scala:852)
        at 
org.apache.spark.api.python.PythonServer$$anon$1.run(PythonRDD.scala:908)

  warnings.warn(msg)
Traceback (most recent call last):
  File "<stdin>", line 1, in <module>
  File 
"/Users/hyukjin.kwon/workspace/forked/spark/python/pyspark/sql/dataframe.py", 
line 2132, in toPandas
    batches = self.toDF(*tmp_column_names)._collectAsArrow()
  File 
"/Users/hyukjin.kwon/workspace/forked/spark/python/pyspark/sql/dataframe.py", 
line 2223, in _collectAsArrow
    jsocket_auth_server.getResult()  # Join serving thread and raise any 
exceptions
  File 
"/Users/hyukjin.kwon/workspace/forked/spark/python/lib/py4j-0.10.7-src.zip/py4j/java_gateway.py",
 line 1257, in __call__
  File 
"/Users/hyukjin.kwon/workspace/forked/spark/python/pyspark/sql/utils.py", line 
63, in deco
    return f(*a, **kw)
  File 
"/Users/hyukjin.kwon/workspace/forked/spark/python/lib/py4j-0.10.7-src.zip/py4j/protocol.py",
 line 328, in get_return_value
py4j.protocol.Py4JJavaError: An error occurred while calling o158.getResult.
: org.apache.spark.SparkException: Exception thrown in awaitResult:
        at org.apache.spark.util.ThreadUtils$.awaitResult(ThreadUtils.scala:226)
        at 
org.apache.spark.api.python.PythonServer.getResult(PythonRDD.scala:874)
        at 
org.apache.spark.api.python.PythonServer.getResult(PythonRDD.scala:870)
        at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)
        at 
sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:62)
        at 
sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)
        at java.lang.reflect.Method.invoke(Method.java:498)
        at py4j.reflection.MethodInvoker.invoke(MethodInvoker.java:244)
        at py4j.reflection.ReflectionEngine.invoke(ReflectionEngine.java:357)
        at py4j.Gateway.invoke(Gateway.java:282)
        at py4j.commands.AbstractCommand.invokeMethod(AbstractCommand.java:132)
        at py4j.commands.CallCommand.execute(CallCommand.java:79)
        at py4j.GatewayConnection.run(GatewayConnection.java:238)
        at java.lang.Thread.run(Thread.java:748)
Caused by: java.lang.NegativeArraySizeException
        at 
org.apache.spark.sql.Dataset$$anonfun$collectAsArrowToPython$1$$anonfun$apply$17.apply(Dataset.scala:3293)
        at 
org.apache.spark.sql.Dataset$$anonfun$collectAsArrowToPython$1$$anonfun$apply$17.apply(Dataset.scala:3287)
        at org.apache.spark.sql.Dataset$$anonfun$52.apply(Dataset.scala:3370)
        at 
org.apache.spark.sql.execution.SQLExecution$$anonfun$withNewExecutionId$1.apply(SQLExecution.scala:80)
        at 
org.apache.spark.sql.execution.SQLExecution$.withSQLConfPropagated(SQLExecution.scala:127)
        at 
org.apache.spark.sql.execution.SQLExecution$.withNewExecutionId(SQLExecution.scala:75)
        at 
org.apache.spark.sql.Dataset.org$apache$spark$sql$Dataset$$withAction(Dataset.scala:3369)
        at 
org.apache.spark.sql.Dataset$$anonfun$collectAsArrowToPython$1.apply(Dataset.scala:3287)
        at 
org.apache.spark.sql.Dataset$$anonfun$collectAsArrowToPython$1.apply(Dataset.scala:3286)
        at 
org.apache.spark.api.python.PythonRDD$$anonfun$7$$anonfun$apply$3.apply$mcV$sp(PythonRDD.scala:456)
        at 
org.apache.spark.api.python.PythonRDD$$anonfun$7$$anonfun$apply$3.apply(PythonRDD.scala:456)
        at 
org.apache.spark.api.python.PythonRDD$$anonfun$7$$anonfun$apply$3.apply(PythonRDD.scala:456)
        at org.apache.spark.util.Utils$.tryWithSafeFinally(Utils.scala:1360)
        at 
org.apache.spark.api.python.PythonRDD$$anonfun$7.apply(PythonRDD.scala:457)
        at 
org.apache.spark.api.python.PythonRDD$$anonfun$7.apply(PythonRDD.scala:453)
        at 
org.apache.spark.api.python.SocketFuncServer.handleConnection(PythonRDD.scala:994)
        at 
org.apache.spark.api.python.SocketFuncServer.handleConnection(PythonRDD.scala:988)
        at 
org.apache.spark.api.python.PythonServer$$anonfun$11$$anonfun$apply$9.apply(PythonRDD.scala:853)
        at scala.util.Try$.apply(Try.scala:192)
        at 
org.apache.spark.api.python.PythonServer$$anonfun$11.apply(PythonRDD.scala:853)
        at 
org.apache.spark.api.python.PythonServer$$anonfun$11.apply(PythonRDD.scala:852)
        at 
org.apache.spark.api.python.PythonServer$$anon$1.run(PythonRDD.scala:908)
{code}



--
This message was sent by Atlassian Jira
(v8.3.4#803005)

---------------------------------------------------------------------
To unsubscribe, e-mail: issues-unsubscr...@spark.apache.org
For additional commands, e-mail: issues-h...@spark.apache.org

Reply via email to