maropu commented on a change in pull request #23665: [SPARK-26745][SQL] Skip empty lines in JSON-derived DataFrames when skipParsing optimization in effect URL: https://github.com/apache/spark/pull/23665#discussion_r251281725
########## File path: sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/util/FailureSafeParser.scala ########## @@ -55,11 +56,15 @@ class FailureSafeParser[IN]( def parse(input: IN): Iterator[InternalRow] = { try { - if (skipParsing) { - Iterator.single(InternalRow.empty) - } else { - rawParser.apply(input).toIterator.map(row => toResultRow(Some(row), () => null)) - } + if (skipParsing) { + if (unparsedRecordIsNonEmpty(input)) { + Iterator.single(InternalRow.empty) + } else { + Iterator.empty + } + } else { + rawParser.apply(input).toIterator.map(row => toResultRow(Some(row), () => null)) + } Review comment: I thought this: https://github.com/maropu/spark/commit/f4df9074619e6cafff679c5f22fa153727be1079 But, you should follow other guys who are familiar this part, @HyukjinKwon and @MaxGekk ---------------------------------------------------------------- This is an automated message from the Apache Git Service. To respond to the message, please log on GitHub and use the URL above to go to the specific comment. For queries about this service, please contact Infrastructure at: us...@infra.apache.org With regards, Apache Git Services --------------------------------------------------------------------- To unsubscribe, e-mail: reviews-unsubscr...@spark.apache.org For additional commands, e-mail: reviews-h...@spark.apache.org