Github user viirya commented on a diff in the pull request: https://github.com/apache/spark/pull/22646#discussion_r223169392 --- Diff: sql/core/src/main/scala/org/apache/spark/sql/SQLContext.scala --- @@ -1115,8 +1123,31 @@ object SQLContext { }) } } - def createConverter(cls: Class[_], dataType: DataType): Any => Any = dataType match { - case struct: StructType => createStructConverter(cls, struct.map(_.dataType)) + def createConverter(t: Type, dataType: DataType): Any => Any = (t, dataType) match { + case (cls: Class[_], struct: StructType) => + createStructConverter(cls, struct.map(_.dataType)) + case (arrayType: Class[_], array: ArrayType) => + val converter = createConverter(arrayType.getComponentType, array.elementType) + value => new GenericArrayData( + (0 until JavaArray.getLength(value)).map(i => + converter(JavaArray.get(value, i))).toArray) + case (_, array: ArrayType) => --- End diff -- Can you add few comments explaining why having two cases both for `ArrayType`?
--- --------------------------------------------------------------------- To unsubscribe, e-mail: reviews-unsubscr...@spark.apache.org For additional commands, e-mail: reviews-h...@spark.apache.org