Github user cloud-fan commented on a diff in the pull request: https://github.com/apache/spark/pull/20525#discussion_r166829369 --- Diff: sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/FileFormatWriter.scala --- @@ -190,9 +190,13 @@ object FileFormatWriter extends Logging { global = false, child = plan).execute() } - val ret = new Array[WriteTaskResult](rdd.partitions.length) + + // SPARK-23271 If we are attempting to write a zero partition rdd, change the number of + // partition to 1 to make sure we at least set up one write task to write the metadata. + val finalRdd = if (rdd.partitions.length == 0) rdd.repartition(1) else rdd --- End diff -- One simple way to fix it: create an empty 1-partition RDD and use it here.
--- --------------------------------------------------------------------- To unsubscribe, e-mail: reviews-unsubscr...@spark.apache.org For additional commands, e-mail: reviews-h...@spark.apache.org