AngersZhuuuu commented on a change in pull request #30957:
URL: https://github.com/apache/spark/pull/30957#discussion_r570723751



##########
File path: 
sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/CatalystTypeConverters.scala
##########
@@ -174,6 +174,7 @@ object CatalystTypeConverters {
             convertedIterable += elementConverter.toCatalyst(item)
           }
           new GenericArrayData(convertedIterable.toArray)
+        case g: GenericArrayData => new 
GenericArrayData(g.array.map(elementConverter.toCatalyst))

Review comment:
       > `toCatalystImpl` converts Scala data into Catalyst one but 
`GenericArrayData` is Catalyst-internal, so this change looks weried.
   
   For complex type, after use JsonToStruct, don't need this converter any 
more. So I remove this.
   How about current change?

##########
File path: 
sql/core/src/main/scala/org/apache/spark/sql/execution/BaseScriptTransformationExec.scala
##########
@@ -47,7 +47,13 @@ trait BaseScriptTransformationExec extends UnaryExecNode {
   def ioschema: ScriptTransformationIOSchema
 
   protected lazy val inputExpressionsWithoutSerde: Seq[Expression] = {
-    input.map(Cast(_, StringType).withTimeZone(conf.sessionLocalTimeZone))
+    input.map { in: Expression =>

Review comment:
       > nit: `input.map { in =>`
   
   Done

##########
File path: 
sql/core/src/main/scala/org/apache/spark/sql/execution/BaseScriptTransformationExec.scala
##########
@@ -220,6 +226,9 @@ trait BaseScriptTransformationExec extends UnaryExecNode {
       case CalendarIntervalType => wrapperConvertException(
         data => IntervalUtils.stringToInterval(UTF8String.fromString(data)),
         converter)
+      case _: ArrayType | _: MapType | _: StructType => 
wrapperConvertException(data =>
+        JsonToStructs(attr.dataType, Map.empty[String, String],
+          Literal(data), Some(conf.sessionLocalTimeZone)).eval(), any => any)

Review comment:
       > nit format:
   > 
   > ```
   >       case _: ArrayType | _: MapType | _: StructType =>
   >         wrapperConvertException(data => JsonToStructs(attr.dataType, 
Map.empty[String, String],
   >           Literal(data), Some(conf.sessionLocalTimeZone)).eval(), any => 
any)
   > ```
   
   Done

##########
File path: 
sql/core/src/test/scala/org/apache/spark/sql/execution/BaseScriptTransformationSuite.scala
##########
@@ -471,6 +473,100 @@ abstract class BaseScriptTransformationSuite extends 
SparkPlanTest with SQLTestU
     }
   }
 
+  test("SPARK-31936: Script transform support ArrayType/MapType/StructType (no 
serde)") {
+    assume(TestUtils.testCommandAvailable("python"))
+    withTempView("v") {
+      val df = Seq(
+        (Array(0, 1, 2), Array(Array(0, 1), Array(2)),
+          Map("a" -> 1), Map("b" -> Array("a", "b"))),
+        (Array(3, 4, 5), Array(Array(3, 4), Array(5)),
+          Map("b" -> 2), Map("c" -> Array("c", "d"))),
+        (Array(6, 7, 8), Array(Array(6, 7), Array(8)),
+          Map("c" -> 3), Map("d" -> Array("e", "f")))
+      ).toDF("a", "b", "c", "d")
+        .select('a, 'b, 'c, 'd,
+          struct('a, 'b).as("e"),
+          struct('a, 'd).as("f"),
+          struct(struct('a, 'b), struct('a, 'd)).as("g")
+        )
+
+      checkAnswer(
+        df,
+        (child: SparkPlan) => createScriptTransformationExec(
+          input = Seq(
+            df.col("a").expr,
+            df.col("b").expr,
+            df.col("c").expr,
+            df.col("d").expr,
+            df.col("e").expr,
+            df.col("f").expr,
+            df.col("g").expr),
+          script = "cat",
+          output = Seq(
+            AttributeReference("a", ArrayType(IntegerType))(),
+            AttributeReference("b", ArrayType(ArrayType(IntegerType)))(),
+            AttributeReference("c", MapType(StringType, IntegerType))(),
+            AttributeReference("d", MapType(StringType, 
ArrayType(StringType)))(),
+            AttributeReference("e", StructType(
+              Array(StructField("a", ArrayType(IntegerType)),
+                StructField("b", ArrayType(ArrayType(IntegerType))))))(),
+            AttributeReference("f", StructType(
+              Array(StructField("a", ArrayType(IntegerType)),
+                StructField("d", MapType(StringType, 
ArrayType(StringType))))))(),
+            AttributeReference("g", StructType(
+              Array(StructField("col1", StructType(
+                Array(StructField("a", ArrayType(IntegerType)),
+                  StructField("b", ArrayType(ArrayType(IntegerType)))))),
+                StructField("col2", StructType(
+                  Array(StructField("a", ArrayType(IntegerType)),
+                    StructField("d", MapType(StringType, 
ArrayType(StringType)))))))))()),
+          child = child,
+          ioschema = defaultIOSchema
+        ),
+        df.select('a, 'b, 'c, 'd, 'e, 'f, 'g).collect())
+    }
+  }
+
+  test("SPARK-31936: Script transform support 7 level nested complex type (no 
serde)") {

Review comment:
       > This test is only for deep-nested array cases? How about the other 
deep-nested cases of map/struct?
   
   Updated, how about current?

##########
File path: 
sql/core/src/main/scala/org/apache/spark/sql/execution/BaseScriptTransformationExec.scala
##########
@@ -47,7 +47,13 @@ trait BaseScriptTransformationExec extends UnaryExecNode {
   def ioschema: ScriptTransformationIOSchema
 
   protected lazy val inputExpressionsWithoutSerde: Seq[Expression] = {
-    input.map(Cast(_, StringType).withTimeZone(conf.sessionLocalTimeZone))
+    input.map { in =>
+      in.dataType match {
+        case _: ArrayType | _: MapType | _: StructType =>
+          new StructsToJson(in).withTimeZone(conf.sessionLocalTimeZone)

Review comment:
       > Is it okay to follow the default behaviour w/o `options`?
   
   I am not so familiar about this part's properties. (I am checking these 
properties)
   Just a quick thought,  how about we add `inputSerdeProps` as `StructTJson`'s 
properties and add `outputSerdeProps` as `JsonToStruct` 's perperties.

##########
File path: 
sql/core/src/main/scala/org/apache/spark/sql/execution/BaseScriptTransformationExec.scala
##########
@@ -47,7 +47,13 @@ trait BaseScriptTransformationExec extends UnaryExecNode {
   def ioschema: ScriptTransformationIOSchema
 
   protected lazy val inputExpressionsWithoutSerde: Seq[Expression] = {
-    input.map(Cast(_, StringType).withTimeZone(conf.sessionLocalTimeZone))
+    input.map { in =>

Review comment:
       > nit format:
   > 
   > ```
   >     input.map { _.dataType match {
   >       case _: ArrayType | _: MapType | _: StructType =>
   >         new StructsToJson(in).withTimeZone(conf.sessionLocalTimeZone)
   >       case _ => Cast(in, 
StringType).withTimeZone(conf.sessionLocalTimeZone)
   >     }
   > ```
   
   In this way, we miss value `in`, Hmmmm

##########
File path: 
sql/core/src/main/scala/org/apache/spark/sql/execution/BaseScriptTransformationExec.scala
##########
@@ -220,6 +226,9 @@ trait BaseScriptTransformationExec extends UnaryExecNode {
       case CalendarIntervalType => wrapperConvertException(
         data => IntervalUtils.stringToInterval(UTF8String.fromString(data)),
         converter)
+      case _: ArrayType | _: MapType | _: StructType =>
+        wrapperConvertException(data => JsonToStructs(attr.dataType, 
Map.empty[String, String],

Review comment:
       > This can cause much overhead cuz this make a new object 
(`JsonToStructs `) for each call. Could you avoid it?
   
   This problem also happen in input side's `Cast` and `StructToJson`.
   To avoid it maybe we need to extract common method from these expression or 
just write some thing for` ScriptTransform`.  WDYT @cloud-fan 

##########
File path: 
sql/core/src/test/scala/org/apache/spark/sql/execution/BaseScriptTransformationSuite.scala
##########
@@ -471,6 +473,126 @@ abstract class BaseScriptTransformationSuite extends 
SparkPlanTest with SQLTestU
     }
   }
 
+  test("SPARK-31936: Script transform support ArrayType/MapType/StructType (no 
serde)") {
+    assume(TestUtils.testCommandAvailable("python"))
+    withTempView("v") {
+      val df = Seq(
+        (Array(0, 1, 2), Array(Array(0, 1), Array(2)),
+          Map("a" -> 1), Map("b" -> Array("a", "b"))),
+        (Array(3, 4, 5), Array(Array(3, 4), Array(5)),
+          Map("b" -> 2), Map("c" -> Array("c", "d"))),
+        (Array(6, 7, 8), Array(Array(6, 7), Array(8)),
+          Map("c" -> 3), Map("d" -> Array("e", "f")))
+      ).toDF("a", "b", "c", "d")
+        .select('a, 'b, 'c, 'd,
+          struct('a, 'b).as("e"),
+          struct('a, 'd).as("f"),
+          struct(struct('a, 'b), struct('a, 'd)).as("g")
+        )
+
+      checkAnswer(
+        df,
+        (child: SparkPlan) => createScriptTransformationExec(
+          input = Seq(
+            df.col("a").expr,
+            df.col("b").expr,
+            df.col("c").expr,
+            df.col("d").expr,
+            df.col("e").expr,
+            df.col("f").expr,
+            df.col("g").expr),
+          script = "cat",
+          output = Seq(
+            AttributeReference("a", ArrayType(IntegerType))(),
+            AttributeReference("b", ArrayType(ArrayType(IntegerType)))(),
+            AttributeReference("c", MapType(StringType, IntegerType))(),
+            AttributeReference("d", MapType(StringType, 
ArrayType(StringType)))(),
+            AttributeReference("e", StructType(
+              Array(StructField("a", ArrayType(IntegerType)),
+                StructField("b", ArrayType(ArrayType(IntegerType))))))(),
+            AttributeReference("f", StructType(
+              Array(StructField("a", ArrayType(IntegerType)),
+                StructField("d", MapType(StringType, 
ArrayType(StringType))))))(),
+            AttributeReference("g", StructType(
+              Array(StructField("col1", StructType(
+                Array(StructField("a", ArrayType(IntegerType)),
+                  StructField("b", ArrayType(ArrayType(IntegerType)))))),
+                StructField("col2", StructType(
+                  Array(StructField("a", ArrayType(IntegerType)),
+                    StructField("d", MapType(StringType, 
ArrayType(StringType)))))))))()),
+          child = child,
+          ioschema = defaultIOSchema
+        ),
+        df.select('a, 'b, 'c, 'd, 'e, 'f, 'g).collect())
+    }
+  }
+
+  test("SPARK-31936: Script transform support nested complex type (no serde)") 
{

Review comment:
       > We still need this test now? On second thought, only the test 
`"SPARK-31936: Script transform support ArrayType/MapType/StructType (no 
serde)"` looks fine.
   
   Emmm, add a more complex test is more better, how about combine this two 
test?




----------------------------------------------------------------
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

For queries about this service, please contact Infrastructure at:
us...@infra.apache.org



---------------------------------------------------------------------
To unsubscribe, e-mail: reviews-unsubscr...@spark.apache.org
For additional commands, e-mail: reviews-h...@spark.apache.org

Reply via email to