Github user hvanhovell commented on a diff in the pull request: https://github.com/apache/spark/pull/22429#discussion_r217915071 --- Diff: sql/core/src/main/scala/org/apache/spark/sql/execution/QueryExecution.scala --- @@ -250,5 +253,35 @@ class QueryExecution(val sparkSession: SparkSession, val logical: LogicalPlan) { def codegenToSeq(): Seq[(String, String)] = { org.apache.spark.sql.execution.debug.codegenStringSeq(executedPlan) } + + /** + * Dumps debug information about query execution into the specified file. + */ + def toFile(path: String): Unit = { + val filePath = new Path(path) + val fs = FileSystem.get(filePath.toUri, sparkSession.sparkContext.hadoopConfiguration) --- End diff -- Why use the hadoop configuration of the `SparkContext`? It is probably better to use the one that `sparkSession.sessionState.newHadoopConf()` provides.
--- --------------------------------------------------------------------- To unsubscribe, e-mail: reviews-unsubscr...@spark.apache.org For additional commands, e-mail: reviews-h...@spark.apache.org