Repository: spark
Updated Branches:
  refs/heads/master 7f8eb3bf6 -> e388b39d1


[SPARK-11692][SQL] Support for Parquet logical types, JSON and  BSON (embedded 
types)

Parquet supports some JSON and BSON datatypes. They are represented as binary 
for BSON and string (UTF-8) for JSON internally.

I searched a bit and found Apache drill also supports both in this way, 
[link](https://drill.apache.org/docs/parquet-format/).

Author: hyukjinkwon <gurwls...@gmail.com>
Author: Hyukjin Kwon <gurwls...@gmail.com>

Closes #9658 from HyukjinKwon/SPARK-11692.


Project: http://git-wip-us.apache.org/repos/asf/spark/repo
Commit: http://git-wip-us.apache.org/repos/asf/spark/commit/e388b39d
Tree: http://git-wip-us.apache.org/repos/asf/spark/tree/e388b39d
Diff: http://git-wip-us.apache.org/repos/asf/spark/diff/e388b39d

Branch: refs/heads/master
Commit: e388b39d10fc269cdd3d630ea7d4ae80fd0efa97
Parents: 7f8eb3b
Author: hyukjinkwon <gurwls...@gmail.com>
Authored: Mon Nov 16 21:59:33 2015 +0800
Committer: Cheng Lian <l...@databricks.com>
Committed: Mon Nov 16 21:59:33 2015 +0800

----------------------------------------------------------------------
 .../parquet/CatalystSchemaConverter.scala       |  3 ++-
 .../datasources/parquet/ParquetIOSuite.scala    | 25 ++++++++++++++++++++
 2 files changed, 27 insertions(+), 1 deletion(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/spark/blob/e388b39d/sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/parquet/CatalystSchemaConverter.scala
----------------------------------------------------------------------
diff --git 
a/sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/parquet/CatalystSchemaConverter.scala
 
b/sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/parquet/CatalystSchemaConverter.scala
index f28a18e..5f9f908 100644
--- 
a/sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/parquet/CatalystSchemaConverter.scala
+++ 
b/sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/parquet/CatalystSchemaConverter.scala
@@ -170,9 +170,10 @@ private[parquet] class CatalystSchemaConverter(
 
       case BINARY =>
         originalType match {
-          case UTF8 | ENUM => StringType
+          case UTF8 | ENUM | JSON => StringType
           case null if assumeBinaryIsString => StringType
           case null => BinaryType
+          case BSON => BinaryType
           case DECIMAL => makeDecimalType()
           case _ => illegalType()
         }

http://git-wip-us.apache.org/repos/asf/spark/blob/e388b39d/sql/core/src/test/scala/org/apache/spark/sql/execution/datasources/parquet/ParquetIOSuite.scala
----------------------------------------------------------------------
diff --git 
a/sql/core/src/test/scala/org/apache/spark/sql/execution/datasources/parquet/ParquetIOSuite.scala
 
b/sql/core/src/test/scala/org/apache/spark/sql/execution/datasources/parquet/ParquetIOSuite.scala
index 2aa5dca..a148fac 100644
--- 
a/sql/core/src/test/scala/org/apache/spark/sql/execution/datasources/parquet/ParquetIOSuite.scala
+++ 
b/sql/core/src/test/scala/org/apache/spark/sql/execution/datasources/parquet/ParquetIOSuite.scala
@@ -259,6 +259,31 @@ class ParquetIOSuite extends QueryTest with ParquetTest 
with SharedSQLContext {
     }
   }
 
+  test("SPARK-11692 Support for Parquet logical types, JSON and BSON (embedded 
types)") {
+    val parquetSchema = MessageTypeParser.parseMessageType(
+      """message root {
+        |  required binary a(JSON);
+        |  required binary b(BSON);
+        |}
+      """.stripMargin)
+
+    withTempPath { location =>
+      val extraMetadata = Map.empty[String, String].asJava
+      val fileMetadata = new FileMetaData(parquetSchema, extraMetadata, 
"Spark")
+      val path = new Path(location.getCanonicalPath)
+      val footer = List(
+        new Footer(path, new ParquetMetadata(fileMetadata, 
Collections.emptyList()))
+      ).asJava
+
+      ParquetFileWriter.writeMetadataFile(sparkContext.hadoopConfiguration, 
path, footer)
+
+      val jsonDataType = 
sqlContext.read.parquet(path.toString).schema(0).dataType
+      assert(jsonDataType === StringType)
+      val bsonDataType = 
sqlContext.read.parquet(path.toString).schema(1).dataType
+      assert(bsonDataType === BinaryType)
+    }
+  }
+
   test("compression codec") {
     def compressionCodecFor(path: String, codecName: String): String = {
       val codecs = for {


---------------------------------------------------------------------
To unsubscribe, e-mail: commits-unsubscr...@spark.apache.org
For additional commands, e-mail: commits-h...@spark.apache.org

Reply via email to