[ 
https://issues.apache.org/jira/browse/SPARK-21076?page=com.atlassian.jira.plugin.system.issuetabpanels:all-tabpanel
 ]

Xu Yang updated SPARK-21076:
----------------------------
    Description: 
still have this issue when input data is an array column not having the same 
length on each vector, like:


{code}
head(test1)

               key              value
1 4dda7d68a202e9e3              1595297780
2  4e08f349deb7392              641991337
3 4e105531747ee00b              374773009
4 4f1d5ef7fdb4620a              2570136926
5 4f63a71e6dde04cd              2117602722
6 4fa2f96b689624fc              3489692062, 1344510747, 1095592237, 424510360, 
3211239587

sparkR.stop()
sc <- sparkR.init()
sqlContext <- sparkRSQL.init(sc)
spark_df = createDataFrame(sqlContext, test1)

# Fails
dapplyCollect(spark_df, function(x) x)

Caused by: org.apache.spark.SparkException: R computation failed with
 Error in (function (..., deparse.level = 1, make.row.names = TRUE, 
stringsAsFactors = default.stringsAsFactors())  : 
  invalid list argument: all variables should have the same length
        at org.apache.spark.api.r.RRunner.compute(RRunner.scala:108)
        at 
org.apache.spark.sql.execution.r.MapPartitionsRWrapper.apply(MapPartitionsRWrapper.scala:59)
        at 
org.apache.spark.sql.execution.r.MapPartitionsRWrapper.apply(MapPartitionsRWrapper.scala:29)
        at 
org.apache.spark.sql.execution.MapPartitionsExec$$anonfun$6.apply(objects.scala:186)
        at 
org.apache.spark.sql.execution.MapPartitionsExec$$anonfun$6.apply(objects.scala:183)
        at 
org.apache.spark.rdd.RDD$$anonfun$mapPartitionsInternal$1$$anonfun$apply$25.apply(RDD.scala:827)
        at 
org.apache.spark.rdd.RDD$$anonfun$mapPartitionsInternal$1$$anonfun$apply$25.apply(RDD.scala:827)
        at 
org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38)
        at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:323)
        at org.apache.spark.rdd.RDD.iterator(RDD.scala:287)
        at 
org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38)
        at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:323)
        at org.apache.spark.rdd.RDD.iterator(RDD.scala:287)
        at 
org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38)
        at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:323)
        at org.apache.spark.rdd.RDD.iterator(RDD.scala:287)
        at org.apache.spark.scheduler.ResultTask.runTask(ResultTask.scala:87)
        at org.apache.spark.scheduler.Task.run(Task.scala:99)
        at org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:322)
        at 
java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1142)
        at 
java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:617)
        ... 1 more

# Works fine
spark_df <- selectExpr(spark_df, "key", "explode(value) value") 
dapplyCollect(spark_df, function(x) x)

                key         value
1  4dda7d68a202e9e3 1595297780
2   4e08f349deb7392  641991337
3  4e105531747ee00b  374773009
4  4f1d5ef7fdb4620a 2570136926
5  4f63a71e6dde04cd 2117602722
6  4fa2f96b689624fc 3489692062
7  4fa2f96b689624fc 1344510747
8  4fa2f96b689624fc 1095592237
9  4fa2f96b689624fc  424510360
10 4fa2f96b689624fc 3211239587

{code}


  was:
still have this issue when input data is an array column not having the same 
length on each vector, like:

head(test1)

               key              value
1 4dda7d68a202e9e3              1595297780
2  4e08f349deb7392              641991337
3 4e105531747ee00b              374773009
4 4f1d5ef7fdb4620a              2570136926
5 4f63a71e6dde04cd              2117602722
6 4fa2f96b689624fc              3489692062, 1344510747, 1095592237, 424510360, 
3211239587

sparkR.stop()
sc <- sparkR.init()
sqlContext <- sparkRSQL.init(sc)
spark_df = createDataFrame(sqlContext, test1)

# Fails
dapplyCollect(spark_df, function(x) x)

Caused by: org.apache.spark.SparkException: R computation failed with
 Error in (function (..., deparse.level = 1, make.row.names = TRUE, 
stringsAsFactors = default.stringsAsFactors())  : 
  invalid list argument: all variables should have the same length
        at org.apache.spark.api.r.RRunner.compute(RRunner.scala:108)
        at 
org.apache.spark.sql.execution.r.MapPartitionsRWrapper.apply(MapPartitionsRWrapper.scala:59)
        at 
org.apache.spark.sql.execution.r.MapPartitionsRWrapper.apply(MapPartitionsRWrapper.scala:29)
        at 
org.apache.spark.sql.execution.MapPartitionsExec$$anonfun$6.apply(objects.scala:186)
        at 
org.apache.spark.sql.execution.MapPartitionsExec$$anonfun$6.apply(objects.scala:183)
        at 
org.apache.spark.rdd.RDD$$anonfun$mapPartitionsInternal$1$$anonfun$apply$25.apply(RDD.scala:827)
        at 
org.apache.spark.rdd.RDD$$anonfun$mapPartitionsInternal$1$$anonfun$apply$25.apply(RDD.scala:827)
        at 
org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38)
        at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:323)
        at org.apache.spark.rdd.RDD.iterator(RDD.scala:287)
        at 
org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38)
        at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:323)
        at org.apache.spark.rdd.RDD.iterator(RDD.scala:287)
        at 
org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38)
        at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:323)
        at org.apache.spark.rdd.RDD.iterator(RDD.scala:287)
        at org.apache.spark.scheduler.ResultTask.runTask(ResultTask.scala:87)
        at org.apache.spark.scheduler.Task.run(Task.scala:99)
        at org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:322)
        at 
java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1142)
        at 
java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:617)
        ... 1 more

# Works fine
spark_df <- selectExpr(spark_df, "key", "explode(value) value") 
dapplyCollect(spark_df, function(x) x)

                key         value
1  4dda7d68a202e9e3 1595297780
2   4e08f349deb7392  641991337
3  4e105531747ee00b  374773009
4  4f1d5ef7fdb4620a 2570136926
5  4f63a71e6dde04cd 2117602722
6  4fa2f96b689624fc 3489692062
7  4fa2f96b689624fc 1344510747
8  4fa2f96b689624fc 1095592237
9  4fa2f96b689624fc  424510360
10 4fa2f96b689624fc 3211239587



> R dapply doesn't return array or raw columns when array have different length
> -----------------------------------------------------------------------------
>
>                 Key: SPARK-21076
>                 URL: https://issues.apache.org/jira/browse/SPARK-21076
>             Project: Spark
>          Issue Type: Bug
>          Components: SparkR
>    Affects Versions: 2.1.0
>            Reporter: Xu Yang
>
> still have this issue when input data is an array column not having the same 
> length on each vector, like:
> {code}
> head(test1)
>                key              value
> 1 4dda7d68a202e9e3              1595297780
> 2  4e08f349deb7392              641991337
> 3 4e105531747ee00b              374773009
> 4 4f1d5ef7fdb4620a              2570136926
> 5 4f63a71e6dde04cd              2117602722
> 6 4fa2f96b689624fc              3489692062, 1344510747, 1095592237, 
> 424510360, 3211239587
> sparkR.stop()
> sc <- sparkR.init()
> sqlContext <- sparkRSQL.init(sc)
> spark_df = createDataFrame(sqlContext, test1)
> # Fails
> dapplyCollect(spark_df, function(x) x)
> Caused by: org.apache.spark.SparkException: R computation failed with
>  Error in (function (..., deparse.level = 1, make.row.names = TRUE, 
> stringsAsFactors = default.stringsAsFactors())  : 
>   invalid list argument: all variables should have the same length
>       at org.apache.spark.api.r.RRunner.compute(RRunner.scala:108)
>       at 
> org.apache.spark.sql.execution.r.MapPartitionsRWrapper.apply(MapPartitionsRWrapper.scala:59)
>       at 
> org.apache.spark.sql.execution.r.MapPartitionsRWrapper.apply(MapPartitionsRWrapper.scala:29)
>       at 
> org.apache.spark.sql.execution.MapPartitionsExec$$anonfun$6.apply(objects.scala:186)
>       at 
> org.apache.spark.sql.execution.MapPartitionsExec$$anonfun$6.apply(objects.scala:183)
>       at 
> org.apache.spark.rdd.RDD$$anonfun$mapPartitionsInternal$1$$anonfun$apply$25.apply(RDD.scala:827)
>       at 
> org.apache.spark.rdd.RDD$$anonfun$mapPartitionsInternal$1$$anonfun$apply$25.apply(RDD.scala:827)
>       at 
> org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38)
>       at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:323)
>       at org.apache.spark.rdd.RDD.iterator(RDD.scala:287)
>       at 
> org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38)
>       at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:323)
>       at org.apache.spark.rdd.RDD.iterator(RDD.scala:287)
>       at 
> org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38)
>       at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:323)
>       at org.apache.spark.rdd.RDD.iterator(RDD.scala:287)
>       at org.apache.spark.scheduler.ResultTask.runTask(ResultTask.scala:87)
>       at org.apache.spark.scheduler.Task.run(Task.scala:99)
>       at org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:322)
>       at 
> java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1142)
>       at 
> java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:617)
>       ... 1 more
> # Works fine
> spark_df <- selectExpr(spark_df, "key", "explode(value) value") 
> dapplyCollect(spark_df, function(x) x)
>                 key         value
> 1  4dda7d68a202e9e3 1595297780
> 2   4e08f349deb7392  641991337
> 3  4e105531747ee00b  374773009
> 4  4f1d5ef7fdb4620a 2570136926
> 5  4f63a71e6dde04cd 2117602722
> 6  4fa2f96b689624fc 3489692062
> 7  4fa2f96b689624fc 1344510747
> 8  4fa2f96b689624fc 1095592237
> 9  4fa2f96b689624fc  424510360
> 10 4fa2f96b689624fc 3211239587
> {code}



--
This message was sent by Atlassian JIRA
(v6.4.14#64029)

---------------------------------------------------------------------
To unsubscribe, e-mail: issues-unsubscr...@spark.apache.org
For additional commands, e-mail: issues-h...@spark.apache.org

Reply via email to