Github user liancheng commented on a diff in the pull request:

    https://github.com/apache/spark/pull/8806#discussion_r39891131
  
    --- Diff: 
sql/core/src/test/scala/org/apache/spark/sql/execution/datasources/json/JsonSuite.scala
 ---
    @@ -1159,4 +1159,104 @@ class JsonSuite extends QueryTest with 
SharedSQLContext with TestJsonData {
               "SELECT count(a) FROM test_myjson_with_part where d1 = 1"), 
Row(9))
         })
       }
    +
    +  // scalastyle:off
    +  test("backward compatibility") {
    +    // This test we make sure our JSON support can read JSON data 
generated by previous version of Spark
    +    // generated through toJSON method and JSON data source.
    +    // The data is generated by the following program.
    +    // Here are a few notes:
    +    //  - Spark 1.5.0 cannot save timestamp data. So, we manually added 
timestamp field (col13)
    +    //      in the JSON object.
    +    //  - For Spark before 1.5.1, we do not generate UDTs. So, we manually 
added the UDT value to
    +    //      JSON objects generated by those Spark versions (col17).
    +    //  - If the type is NullType, we do not write data out.
    +
    +    // Create the schema.
    +    val struct =
    +      StructType(
    +        StructField("f1", FloatType, true) ::
    +          StructField("f2", ArrayType(BooleanType), true) :: Nil)
    +
    +    val dataTypes =
    +      Seq(
    +        StringType, BinaryType, NullType, BooleanType,
    +        ByteType, ShortType, IntegerType, LongType,
    +        FloatType, DoubleType, DecimalType(25, 5), DecimalType(6, 5),
    +        DateType, TimestampType,
    +        ArrayType(IntegerType), MapType(StringType, LongType), struct,
    +        new MyDenseVectorUDT())
    +    val fields = dataTypes.zipWithIndex.map { case (dataType, index) =>
    +        StructField(s"col$index", dataType, nullable = true)
    +      }
    +    val schema = StructType(fields)
    +
    +    val constantValues =
    +      Seq(
    +        "a string in binary".getBytes("UTF-8"),
    +        null,
    +        true,
    +        1.toByte,
    +        2.toShort,
    +        3,
    +        Long.MaxValue,
    +        0.25.toFloat,
    +        0.75,
    +        new java.math.BigDecimal(s"1234.23456"),
    +        new java.math.BigDecimal(s"1.23456"),
    +        java.sql.Date.valueOf("2015-01-01"),
    +        java.sql.Timestamp.valueOf("2015-01-01 23:50:59.123"),
    +        Seq(2, 3, 4),
    +        Map("a string" -> 2000L),
    +        Row(4.75.toFloat, Seq(false, true)),
    +        new MyDenseVector(Array(0.25, 2.25, 4.25)))
    +    val data = Row.fromSeq(Seq("Spark " + sqlContext.sparkContext.version) 
++ constantValues) :: Nil
    +
    +    // Data generated by previous versions.
    +    val existingJSONData =
    +      """{"col0":"Spark 
1.2.2","col1":"YSBzdHJpbmcgaW4gYmluYXJ5","col3":true,"col4":1,"col5":2,"col6":3,"col7":9223372036854775807,"col8":0.25,"col9":0.75,"col10":1234.23456,"col11":1.23456,"col12":"2015-01-01","col13":"2015-01-01
 23:50:59.123","col14":[2,3,4],"col15":{"a 
string":2000},"col16":{"f1":4.75,"f2":[false,true]},"col17":[0.25,2.25,4.25]}"""
 ::
    +      """{"col0":"Spark 
1.3.1","col1":"YSBzdHJpbmcgaW4gYmluYXJ5","col3":true,"col4":1,"col5":2,"col6":3,"col7":9223372036854775807,"col8":0.25,"col9":0.75,"col10":1234.23456,"col11":1.23456,"col12":"2015-01-01","col13":"2015-01-01
 23:50:59.123","col14":[2,3,4],"col15":{"a 
string":2000},"col16":{"f1":4.75,"f2":[false,true]},"col17":[0.25,2.25,4.25]}"""
 ::
    +      """{"col0":"Spark 
1.3.1","col1":"YSBzdHJpbmcgaW4gYmluYXJ5","col3":true,"col4":1,"col5":2,"col6":3,"col7":9223372036854775807,"col8":0.25,"col9":0.75,"col10":1234.23456,"col11":1.23456,"col12":"2015-01-01","col13":"2015-01-01
 23:50:59.123","col14":[2,3,4],"col15":{"a 
string":2000},"col16":{"f1":4.75,"f2":[false,true]},"col17":[0.25,2.25,4.25]}"""
 ::
    +      """{"col0":"Spark 
1.4.1","col1":"YSBzdHJpbmcgaW4gYmluYXJ5","col3":true,"col4":1,"col5":2,"col6":3,"col7":9223372036854775807,"col8":0.25,"col9":0.75,"col10":1234.23456,"col11":1.23456,"col12":"2015-01-01","col13":"2015-01-01
 23:50:59.123","col14":[2,3,4],"col15":{"a 
string":2000},"col16":{"f1":4.75,"f2":[false,true]},"col17":[0.25,2.25,4.25]}"""
 ::
    +      """{"col0":"Spark 
1.4.1","col1":"YSBzdHJpbmcgaW4gYmluYXJ5","col3":true,"col4":1,"col5":2,"col6":3,"col7":9223372036854775807,"col8":0.25,"col9":0.75,"col10":1234.23456,"col11":1.23456,"col12":"2015-01-01","col13":"2015-01-01
 23:50:59.123","col14":[2,3,4],"col15":{"a 
string":2000},"col16":{"f1":4.75,"f2":[false,true]},"col17":[0.25,2.25,4.25]}"""
 ::
    +      """{"col0":"Spark 
1.5.0","col1":"YSBzdHJpbmcgaW4gYmluYXJ5","col3":true,"col4":1,"col5":2,"col6":3,"col7":9223372036854775807,"col8":0.25,"col9":0.75,"col10":1234.23456,"col11":1.23456,"col12":"2015-01-01","col13":"2015-01-01
 23:50:59.123","col14":[2,3,4],"col15":{"a 
string":2000},"col16":{"f1":4.75,"f2":[false,true]},"col17":[0.25,2.25,4.25]}"""
 ::
    +      """{"col0":"Spark 
1.5.0","col1":"YSBzdHJpbmcgaW4gYmluYXJ5","col3":true,"col4":1,"col5":2,"col6":3,"col7":9223372036854775807,"col8":0.25,"col9":0.75,"col10":1234.23456,"col11":1.23456,"col12":"16436","col13":"2015-01-01
 23:50:59.123","col14":[2,3,4],"col15":{"a 
string":2000},"col16":{"f1":4.75,"f2":[false,true]},"col17":[0.25,2.25,4.25]}"""
 :: Nil
    +
    +    // Generate data for the current version.
    +    val df = 
sqlContext.createDataFrame(sqlContext.sparkContext.parallelize(data, 1), schema)
    +    withTempPath { path =>
    +      df.write.format("json").mode("overwrite").save(path.getCanonicalPath)
    +
    +      // df.toJSON will convert internal rows to external rows first and 
then generate JSON objects.
    +      // While, df.write.format("json") will write internal rows directly.
    --- End diff --
    
    Especially, `udt.serialize` produces internal types, while all others are 
external types, which is pretty weird.


---
If your project is set up for it, you can reply to this email and have your
reply appear on GitHub as well. If your project does not have this feature
enabled and wishes so, or if the feature is enabled but not working, please
contact infrastructure at infrastruct...@apache.org or file a JIRA ticket
with INFRA.
---

---------------------------------------------------------------------
To unsubscribe, e-mail: reviews-unsubscr...@spark.apache.org
For additional commands, e-mail: reviews-h...@spark.apache.org

Reply via email to