Github user HyukjinKwon commented on a diff in the pull request:

    https://github.com/apache/spark/pull/22527#discussion_r219686429
  
    --- Diff: sql/core/src/main/scala/org/apache/spark/sql/SQLContext.scala ---
    @@ -1100,13 +1101,24 @@ object SQLContext {
           attrs: Seq[AttributeReference]): Iterator[InternalRow] = {
         val extractors =
           
JavaTypeInference.getJavaBeanReadableProperties(beanClass).map(_.getReadMethod)
    -    val methodsToConverts = extractors.zip(attrs).map { case (e, attr) =>
    -      (e, CatalystTypeConverters.createToCatalystConverter(attr.dataType))
    +    val methodsToTypes = extractors.zip(attrs).map { case (e, attr) =>
    +      (e, attr.dataType)
    +    }
    +    def invoke(element: Any)(tuple: (Method, DataType)): Any = tuple match 
{
    +      case (e, structType: StructType) =>
    +        val value = e.invoke(element)
    +        val nestedExtractors = 
JavaTypeInference.getJavaBeanReadableProperties(value.getClass)
    +            .map(desc => desc.getName -> desc.getReadMethod)
    +            .toMap
    +        new GenericInternalRow(structType.map(nestedProperty =>
    +          invoke(value)(nestedExtractors(nestedProperty.name) -> 
nestedProperty.dataType)
    +        ).toArray)
    --- End diff --
    
    Why should we use a map here while we don't need it for the root bean?


---

---------------------------------------------------------------------
To unsubscribe, e-mail: reviews-unsubscr...@spark.apache.org
For additional commands, e-mail: reviews-h...@spark.apache.org

Reply via email to