Repository: spark Updated Branches: refs/heads/master abae889f0 -> 3a40c0e57
[SPARK-12749][SQL] add json option to parse floating-point types as DecimalType I tried to add this via `USE_BIG_DECIMAL_FOR_FLOATS` option from Jackson with no success. Added test for non-complex types. Should I add a test for complex types? Author: Brandon Bradley <bradleytas...@gmail.com> Closes #10936 from blbradley/spark-12749. Project: http://git-wip-us.apache.org/repos/asf/spark/repo Commit: http://git-wip-us.apache.org/repos/asf/spark/commit/3a40c0e5 Tree: http://git-wip-us.apache.org/repos/asf/spark/tree/3a40c0e5 Diff: http://git-wip-us.apache.org/repos/asf/spark/diff/3a40c0e5 Branch: refs/heads/master Commit: 3a40c0e575fd4215302ea60c9821d31a5a138b8a Parents: abae889 Author: Brandon Bradley <bradleytas...@gmail.com> Authored: Thu Jan 28 15:25:57 2016 -0800 Committer: Reynold Xin <r...@databricks.com> Committed: Thu Jan 28 15:25:57 2016 -0800 ---------------------------------------------------------------------- python/pyspark/sql/readwriter.py | 2 ++ .../org/apache/spark/sql/DataFrameReader.scala | 2 ++ .../datasources/json/InferSchema.scala | 8 ++++-- .../datasources/json/JSONOptions.scala | 2 ++ .../execution/datasources/json/JsonSuite.scala | 28 ++++++++++++++++++++ 5 files changed, 40 insertions(+), 2 deletions(-) ---------------------------------------------------------------------- http://git-wip-us.apache.org/repos/asf/spark/blob/3a40c0e5/python/pyspark/sql/readwriter.py ---------------------------------------------------------------------- diff --git a/python/pyspark/sql/readwriter.py b/python/pyspark/sql/readwriter.py index 0b20022..b1453c6 100644 --- a/python/pyspark/sql/readwriter.py +++ b/python/pyspark/sql/readwriter.py @@ -152,6 +152,8 @@ class DataFrameReader(object): You can set the following JSON-specific options to deal with non-standard JSON files: * ``primitivesAsString`` (default ``false``): infers all primitive values as a string \ type + * `floatAsBigDecimal` (default `false`): infers all floating-point values as a decimal \ + type * ``allowComments`` (default ``false``): ignores Java/C++ style comment in JSON records * ``allowUnquotedFieldNames`` (default ``false``): allows unquoted JSON field names * ``allowSingleQuotes`` (default ``true``): allows single quotes in addition to double \ http://git-wip-us.apache.org/repos/asf/spark/blob/3a40c0e5/sql/core/src/main/scala/org/apache/spark/sql/DataFrameReader.scala ---------------------------------------------------------------------- diff --git a/sql/core/src/main/scala/org/apache/spark/sql/DataFrameReader.scala b/sql/core/src/main/scala/org/apache/spark/sql/DataFrameReader.scala index 634c1bd..2e0c6c7 100644 --- a/sql/core/src/main/scala/org/apache/spark/sql/DataFrameReader.scala +++ b/sql/core/src/main/scala/org/apache/spark/sql/DataFrameReader.scala @@ -252,6 +252,8 @@ class DataFrameReader private[sql](sqlContext: SQLContext) extends Logging { * * You can set the following JSON-specific options to deal with non-standard JSON files: * <li>`primitivesAsString` (default `false`): infers all primitive values as a string type</li> + * <li>`floatAsBigDecimal` (default `false`): infers all floating-point values as a decimal + * type</li> * <li>`allowComments` (default `false`): ignores Java/C++ style comment in JSON records</li> * <li>`allowUnquotedFieldNames` (default `false`): allows unquoted JSON field names</li> * <li>`allowSingleQuotes` (default `true`): allows single quotes in addition to double quotes http://git-wip-us.apache.org/repos/asf/spark/blob/3a40c0e5/sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/json/InferSchema.scala ---------------------------------------------------------------------- diff --git a/sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/json/InferSchema.scala b/sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/json/InferSchema.scala index 44d5e4f..8b773dd 100644 --- a/sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/json/InferSchema.scala +++ b/sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/json/InferSchema.scala @@ -134,8 +134,12 @@ private[json] object InferSchema { val v = parser.getDecimalValue DecimalType(v.precision(), v.scale()) case FLOAT | DOUBLE => - // TODO(davies): Should we use decimal if possible? - DoubleType + if (configOptions.floatAsBigDecimal) { + val v = parser.getDecimalValue + DecimalType(v.precision(), v.scale()) + } else { + DoubleType + } } case VALUE_TRUE | VALUE_FALSE => BooleanType http://git-wip-us.apache.org/repos/asf/spark/blob/3a40c0e5/sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/json/JSONOptions.scala ---------------------------------------------------------------------- diff --git a/sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/json/JSONOptions.scala b/sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/json/JSONOptions.scala index fe5b206..31a95ed 100644 --- a/sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/json/JSONOptions.scala +++ b/sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/json/JSONOptions.scala @@ -34,6 +34,8 @@ private[sql] class JSONOptions( parameters.get("samplingRatio").map(_.toDouble).getOrElse(1.0) val primitivesAsString = parameters.get("primitivesAsString").map(_.toBoolean).getOrElse(false) + val floatAsBigDecimal = + parameters.get("floatAsBigDecimal").map(_.toBoolean).getOrElse(false) val allowComments = parameters.get("allowComments").map(_.toBoolean).getOrElse(false) val allowUnquotedFieldNames = http://git-wip-us.apache.org/repos/asf/spark/blob/3a40c0e5/sql/core/src/test/scala/org/apache/spark/sql/execution/datasources/json/JsonSuite.scala ---------------------------------------------------------------------- diff --git a/sql/core/src/test/scala/org/apache/spark/sql/execution/datasources/json/JsonSuite.scala b/sql/core/src/test/scala/org/apache/spark/sql/execution/datasources/json/JsonSuite.scala index 00eaeb0..dd83a0e 100644 --- a/sql/core/src/test/scala/org/apache/spark/sql/execution/datasources/json/JsonSuite.scala +++ b/sql/core/src/test/scala/org/apache/spark/sql/execution/datasources/json/JsonSuite.scala @@ -771,6 +771,34 @@ class JsonSuite extends QueryTest with SharedSQLContext with TestJsonData { ) } + test("Loading a JSON dataset floatAsBigDecimal returns schema with float types as BigDecimal") { + val jsonDF = sqlContext.read.option("floatAsBigDecimal", "true").json(primitiveFieldAndType) + + val expectedSchema = StructType( + StructField("bigInteger", DecimalType(20, 0), true) :: + StructField("boolean", BooleanType, true) :: + StructField("double", DecimalType(17, -292), true) :: + StructField("integer", LongType, true) :: + StructField("long", LongType, true) :: + StructField("null", StringType, true) :: + StructField("string", StringType, true) :: Nil) + + assert(expectedSchema === jsonDF.schema) + + jsonDF.registerTempTable("jsonTable") + + checkAnswer( + sql("select * from jsonTable"), + Row(BigDecimal("92233720368547758070"), + true, + BigDecimal("1.7976931348623157E308"), + 10, + 21474836470L, + null, + "this is a simple string.") + ) + } + test("Loading a JSON dataset from a text file with SQL") { val dir = Utils.createTempDir() dir.delete() --------------------------------------------------------------------- To unsubscribe, e-mail: commits-unsubscr...@spark.apache.org For additional commands, e-mail: commits-h...@spark.apache.org