[ https://issues.apache.org/jira/browse/SPARK-6570?page=com.atlassian.jira.plugin.system.issuetabpanels:comment-tabpanel&focusedCommentId=14383794#comment-14383794 ]
Jon Chase commented on SPARK-6570: ---------------------------------- Stack trace for saveAsParquetFile(): {code} root |-- col1: string (nullable = false) |-- col2s: array (nullable = true) | |-- element: integer (containsNull = true) SLF4J: Failed to load class "org.slf4j.impl.StaticLoggerBinder". SLF4J: Defaulting to no-operation (NOP) logger implementation SLF4J: See http://www.slf4j.org/codes.html#StaticLoggerBinder for further details. ERROR org.apache.spark.executor.Executor Exception in task 7.0 in stage 1.0 (TID 15) java.lang.ClassCastException: [I cannot be cast to scala.collection.Seq at org.apache.spark.sql.parquet.RowWriteSupport.writeValue(ParquetTableSupport.scala:185) ~[spark-sql_2.10-1.3.0.jar:1.3.0] at org.apache.spark.sql.parquet.RowWriteSupport.write(ParquetTableSupport.scala:171) ~[spark-sql_2.10-1.3.0.jar:1.3.0] at org.apache.spark.sql.parquet.RowWriteSupport.write(ParquetTableSupport.scala:134) ~[spark-sql_2.10-1.3.0.jar:1.3.0] at parquet.hadoop.InternalParquetRecordWriter.write(InternalParquetRecordWriter.java:120) ~[parquet-hadoop-1.6.0rc3.jar:na] at parquet.hadoop.ParquetRecordWriter.write(ParquetRecordWriter.java:81) ~[parquet-hadoop-1.6.0rc3.jar:na] at parquet.hadoop.ParquetRecordWriter.write(ParquetRecordWriter.java:37) ~[parquet-hadoop-1.6.0rc3.jar:na] at org.apache.spark.sql.parquet.ParquetRelation2.org$apache$spark$sql$parquet$ParquetRelation2$$writeShard$1(newParquet.scala:631) ~[spark-sql_2.10-1.3.0.jar:1.3.0] at org.apache.spark.sql.parquet.ParquetRelation2$$anonfun$insert$2.apply(newParquet.scala:648) ~[spark-sql_2.10-1.3.0.jar:1.3.0] at org.apache.spark.sql.parquet.ParquetRelation2$$anonfun$insert$2.apply(newParquet.scala:648) ~[spark-sql_2.10-1.3.0.jar:1.3.0] at org.apache.spark.scheduler.ResultTask.runTask(ResultTask.scala:61) ~[spark-core_2.10-1.3.0.jar:1.3.0] at org.apache.spark.scheduler.Task.run(Task.scala:64) ~[spark-core_2.10-1.3.0.jar:1.3.0] at org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:203) ~[spark-core_2.10-1.3.0.jar:1.3.0] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1142) [na:1.8.0_31] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:617) [na:1.8.0_31] at java.lang.Thread.run(Thread.java:745) [na:1.8.0_31] WARN o.a.spark.scheduler.TaskSetManager Lost task 7.0 in stage 1.0 (TID 15, localhost): java.lang.ClassCastException: [I cannot be cast to scala.collection.Seq at org.apache.spark.sql.parquet.RowWriteSupport.writeValue(ParquetTableSupport.scala:185) at org.apache.spark.sql.parquet.RowWriteSupport.write(ParquetTableSupport.scala:171) at org.apache.spark.sql.parquet.RowWriteSupport.write(ParquetTableSupport.scala:134) at parquet.hadoop.InternalParquetRecordWriter.write(InternalParquetRecordWriter.java:120) at parquet.hadoop.ParquetRecordWriter.write(ParquetRecordWriter.java:81) at parquet.hadoop.ParquetRecordWriter.write(ParquetRecordWriter.java:37) at org.apache.spark.sql.parquet.ParquetRelation2.org$apache$spark$sql$parquet$ParquetRelation2$$writeShard$1(newParquet.scala:631) at org.apache.spark.sql.parquet.ParquetRelation2$$anonfun$insert$2.apply(newParquet.scala:648) at org.apache.spark.sql.parquet.ParquetRelation2$$anonfun$insert$2.apply(newParquet.scala:648) at org.apache.spark.scheduler.ResultTask.runTask(ResultTask.scala:61) at org.apache.spark.scheduler.Task.run(Task.scala:64) at org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:203) at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1142) at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:617) at java.lang.Thread.run(Thread.java:745) ERROR o.a.spark.scheduler.TaskSetManager Task 7 in stage 1.0 failed 1 times; aborting job org.apache.spark.SparkException: Job aborted due to stage failure: Task 7 in stage 1.0 failed 1 times, most recent failure: Lost task 7.0 in stage 1.0 (TID 15, localhost): java.lang.ClassCastException: [I cannot be cast to scala.collection.Seq at org.apache.spark.sql.parquet.RowWriteSupport.writeValue(ParquetTableSupport.scala:185) at org.apache.spark.sql.parquet.RowWriteSupport.write(ParquetTableSupport.scala:171) at org.apache.spark.sql.parquet.RowWriteSupport.write(ParquetTableSupport.scala:134) at parquet.hadoop.InternalParquetRecordWriter.write(InternalParquetRecordWriter.java:120) at parquet.hadoop.ParquetRecordWriter.write(ParquetRecordWriter.java:81) at parquet.hadoop.ParquetRecordWriter.write(ParquetRecordWriter.java:37) at org.apache.spark.sql.parquet.ParquetRelation2.org$apache$spark$sql$parquet$ParquetRelation2$$writeShard$1(newParquet.scala:631) at org.apache.spark.sql.parquet.ParquetRelation2$$anonfun$insert$2.apply(newParquet.scala:648) at org.apache.spark.sql.parquet.ParquetRelation2$$anonfun$insert$2.apply(newParquet.scala:648) at org.apache.spark.scheduler.ResultTask.runTask(ResultTask.scala:61) at org.apache.spark.scheduler.Task.run(Task.scala:64) at org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:203) at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1142) at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:617) at java.lang.Thread.run(Thread.java:745) Driver stacktrace: at org.apache.spark.scheduler.DAGScheduler.org$apache$spark$scheduler$DAGScheduler$$failJobAndIndependentStages(DAGScheduler.scala:1203) at org.apache.spark.scheduler.DAGScheduler$$anonfun$abortStage$1.apply(DAGScheduler.scala:1192) at org.apache.spark.scheduler.DAGScheduler$$anonfun$abortStage$1.apply(DAGScheduler.scala:1191) at scala.collection.mutable.ResizableArray$class.foreach(ResizableArray.scala:59) at scala.collection.mutable.ArrayBuffer.foreach(ArrayBuffer.scala:47) at org.apache.spark.scheduler.DAGScheduler.abortStage(DAGScheduler.scala:1191) at org.apache.spark.scheduler.DAGScheduler$$anonfun$handleTaskSetFailed$1.apply(DAGScheduler.scala:693) at org.apache.spark.scheduler.DAGScheduler$$anonfun$handleTaskSetFailed$1.apply(DAGScheduler.scala:693) at scala.Option.foreach(Option.scala:236) at org.apache.spark.scheduler.DAGScheduler.handleTaskSetFailed(DAGScheduler.scala:693) at org.apache.spark.scheduler.DAGSchedulerEventProcessLoop.onReceive(DAGScheduler.scala:1393) at org.apache.spark.scheduler.DAGSchedulerEventProcessLoop.onReceive(DAGScheduler.scala:1354) at org.apache.spark.util.EventLoop$$anon$1.run(EventLoop.scala:48) {code} > Spark SQL "explode()" fails, assumes underlying SQL array is represented by > Scala Seq > ------------------------------------------------------------------------------------- > > Key: SPARK-6570 > URL: https://issues.apache.org/jira/browse/SPARK-6570 > Project: Spark > Issue Type: Bug > Components: SQL > Affects Versions: 1.3.0 > Reporter: Jon Chase > > {code} > @Rule > public TemporaryFolder tmp = new TemporaryFolder(); > @Test > public void testPercentileWithExplode() throws Exception { > StructType schema = DataTypes.createStructType(Lists.newArrayList( > DataTypes.createStructField("col1", DataTypes.StringType, > false), > DataTypes.createStructField("col2s", > DataTypes.createArrayType(DataTypes.IntegerType, true), true) > )); > JavaRDD<Row> rowRDD = sc.parallelize(Lists.newArrayList( > RowFactory.create("test", new int[]{1, 2, 3}) > )); > DataFrame df = sql.createDataFrame(rowRDD, schema); > df.registerTempTable("df"); > df.printSchema(); > List<int[]> ints = sql.sql("select col2s from df").javaRDD() > .map(row -> (int[]) row.get(0)).collect(); > assertEquals(1, ints.size()); > assertArrayEquals(new int[]{1, 2, 3}, ints.get(0)); > // fails: lateral view explode does not work: > java.lang.ClassCastException: [I cannot be cast to scala.collection.Seq > List<Integer> explodedInts = sql.sql("select col2 from df lateral > view explode(col2s) splode as col2").javaRDD() > .map(row -> row.getInt(0)).collect(); > assertEquals(3, explodedInts.size()); > assertEquals(Lists.newArrayList(1, 2, 3), explodedInts); > // fails: java.lang.ClassCastException: [I cannot be cast to > scala.collection.Seq > df.saveAsParquetFile(tmp.getRoot().getAbsolutePath() + "/parquet"); > DataFrame loadedDf = sql.load(tmp.getRoot().getAbsolutePath() + > "/parquet"); > loadedDf.registerTempTable("loadedDf"); > List<int[]> moreInts = sql.sql("select col2s from loadedDf").javaRDD() > .map(row -> (int[]) row.get(0)).collect(); > assertEquals(1, moreInts.size()); > assertArrayEquals(new int[]{1, 2, 3}, moreInts.get(0)); > } > {code} > {code} > root > |-- col1: string (nullable = false) > |-- col2s: array (nullable = true) > | |-- element: integer (containsNull = true) > ERROR org.apache.spark.executor.Executor Exception in task 7.0 in stage 1.0 > (TID 15) > java.lang.ClassCastException: [I cannot be cast to scala.collection.Seq > at > org.apache.spark.sql.catalyst.expressions.Explode.eval(generators.scala:125) > ~[spark-catalyst_2.10-1.3.0.jar:1.3.0] > at > org.apache.spark.sql.execution.Generate$$anonfun$2$$anonfun$apply$1.apply(Generate.scala:70) > ~[spark-sql_2.10-1.3.0.jar:1.3.0] > at > org.apache.spark.sql.execution.Generate$$anonfun$2$$anonfun$apply$1.apply(Generate.scala:69) > ~[spark-sql_2.10-1.3.0.jar:1.3.0] > at scala.collection.Iterator$$anon$13.hasNext(Iterator.scala:371) > ~[scala-library-2.10.4.jar:na] > at scala.collection.Iterator$$anon$11.hasNext(Iterator.scala:327) > ~[scala-library-2.10.4.jar:na] > at scala.collection.Iterator$$anon$11.hasNext(Iterator.scala:327) > ~[scala-library-2.10.4.jar:na] > at scala.collection.Iterator$$anon$11.hasNext(Iterator.scala:327) > ~[scala-library-2.10.4.jar:na] > at scala.collection.Iterator$class.foreach(Iterator.scala:727) > ~[scala-library-2.10.4.jar:na] > at scala.collection.AbstractIterator.foreach(Iterator.scala:1157) > ~[scala-library-2.10.4.jar:na] > {code} -- This message was sent by Atlassian JIRA (v6.3.4#6332) --------------------------------------------------------------------- To unsubscribe, e-mail: issues-unsubscr...@spark.apache.org For additional commands, e-mail: issues-h...@spark.apache.org