[ 
https://issues.apache.org/jira/browse/SPARK-26372?page=com.atlassian.jira.plugin.system.issuetabpanels:comment-tabpanel&focusedCommentId=16722367#comment-16722367
 ] 

ASF GitHub Bot commented on SPARK-26372:
----------------------------------------

asfgit closed pull request #23323: [SPARK-26372][SQL] Don't reuse value from 
previous row when parsing bad CSV input field
URL: https://github.com/apache/spark/pull/23323
 
 
   

This is a PR merged from a forked repository.
As GitHub hides the original diff on merge, it is displayed below for
the sake of provenance:

As this is a foreign pull request (from a fork), the diff is supplied
below (as it won't show otherwise due to GitHub magic):

diff --git 
a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/csv/UnivocityParser.scala
 
b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/csv/UnivocityParser.scala
index 0f375e036029c..aafc9ebdcaa12 100644
--- 
a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/csv/UnivocityParser.scala
+++ 
b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/csv/UnivocityParser.scala
@@ -239,6 +239,7 @@ class UnivocityParser(
         } catch {
           case NonFatal(e) =>
             badRecordException = badRecordException.orElse(Some(e))
+            row.setNullAt(i)
         }
         i += 1
       }
diff --git a/sql/core/src/test/resources/test-data/bad_after_good.csv 
b/sql/core/src/test/resources/test-data/bad_after_good.csv
new file mode 100644
index 0000000000000..4621a7d23714d
--- /dev/null
+++ b/sql/core/src/test/resources/test-data/bad_after_good.csv
@@ -0,0 +1,2 @@
+"good record",1999-08-01
+"bad record",1999-088-01
diff --git 
a/sql/core/src/test/scala/org/apache/spark/sql/execution/datasources/csv/CSVSuite.scala
 
b/sql/core/src/test/scala/org/apache/spark/sql/execution/datasources/csv/CSVSuite.scala
index 3b977d74053e6..d9e5d7af19671 100644
--- 
a/sql/core/src/test/scala/org/apache/spark/sql/execution/datasources/csv/CSVSuite.scala
+++ 
b/sql/core/src/test/scala/org/apache/spark/sql/execution/datasources/csv/CSVSuite.scala
@@ -63,6 +63,7 @@ class CSVSuite extends QueryTest with SharedSQLContext with 
SQLTestUtils with Te
   private val datesFile = "test-data/dates.csv"
   private val unescapedQuotesFile = "test-data/unescaped-quotes.csv"
   private val valueMalformedFile = "test-data/value-malformed.csv"
+  private val badAfterGoodFile = "test-data/bad_after_good.csv"
 
   /** Verifies data and schema. */
   private def verifyCars(
@@ -2012,4 +2013,22 @@ class CSVSuite extends QueryTest with SharedSQLContext 
with SQLTestUtils with Te
       assert(!files.exists(_.getName.endsWith("csv")))
     }
   }
+
+  test("Do not reuse last good value for bad input field") {
+    val schema = StructType(
+      StructField("col1", StringType) ::
+      StructField("col2", DateType) ::
+      Nil
+    )
+    val rows = spark.read
+      .schema(schema)
+      .format("csv")
+      .load(testFile(badAfterGoodFile))
+
+    val expectedRows = Seq(
+      Row("good record", java.sql.Date.valueOf("1999-08-01")),
+      Row("bad record", null))
+
+    checkAnswer(rows, expectedRows)
+  }
 }


 

----------------------------------------------------------------
This is an automated message from the Apache Git Service.
To respond to the message, please log on GitHub and use the
URL above to go to the specific comment.
 
For queries about this service, please contact Infrastructure at:
us...@infra.apache.org


> CSV parsing uses previous good value for bad input field
> --------------------------------------------------------
>
>                 Key: SPARK-26372
>                 URL: https://issues.apache.org/jira/browse/SPARK-26372
>             Project: Spark
>          Issue Type: Bug
>          Components: SQL
>    Affects Versions: 3.0.0
>            Reporter: Bruce Robbins
>            Assignee: Bruce Robbins
>            Priority: Major
>             Fix For: 3.0.0
>
>
> For example:
> {noformat}
> bash-3.2$ cat test.csv 
> "hello",1999-08-01
> "there","bad date"
> "again","2017-11-22"
> bash-3.2$ bin/spark-shell
> ..etc..
> scala> import org.apache.spark.sql.types._
> scala> import org.apache.spark.sql.SaveMode
> scala> var schema = StructType(StructField("col1", StringType) ::
>      |   StructField("col2", DateType) ::
>      |   Nil)
> schema: org.apache.spark.sql.types.StructType = 
> StructType(StructField(col1,StringType,true), StructField(col2,DateType,true))
> scala> val df = spark.read.schema(schema).csv("test.csv")
> df: org.apache.spark.sql.DataFrame = [col1: string, col2: date]
> scala> df.show
> +-----+----------+                                                            
>   
> | col1|      col2|
> +-----+----------+
> |hello|1999-08-01|
> |there|1999-08-01|
> |again|2017-11-22|
> +-----+----------+
> scala> 
> {noformat}
> col2 from the second row contains "1999-08-01", when it should contain null.
> This is because UnivocityParser reuses the same Row object for each input 
> record. If there is an exception converting an input field, the code simply 
> skips over that field, leaving the existing value in the Row object.
> The simple fix is to set the column to null in the Row object whenever there 
> is a badRecordException while converting the input field.



--
This message was sent by Atlassian JIRA
(v7.6.3#76005)

---------------------------------------------------------------------
To unsubscribe, e-mail: issues-unsubscr...@spark.apache.org
For additional commands, e-mail: issues-h...@spark.apache.org

Reply via email to