Github user cloud-fan commented on a diff in the pull request:

    https://github.com/apache/spark/pull/10577#discussion_r49790507
  
    --- Diff: 
sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/analysis/HiveTypeCoercion.scala
 ---
    @@ -200,41 +203,70 @@ object HiveTypeCoercion {
        */
       object WidenSetOperationTypes extends Rule[LogicalPlan] {
     
    -    private[this] def widenOutputTypes(
    -        planName: String,
    -        left: LogicalPlan,
    -        right: LogicalPlan): (LogicalPlan, LogicalPlan) = {
    -      require(left.output.length == right.output.length)
    +    private def widenOutputTypes(children: Seq[LogicalPlan]): 
Seq[LogicalPlan] = {
    +      require(children.forall(_.output.length == 
children.head.output.length))
     
    -      val castedTypes = left.output.zip(right.output).map {
    -        case (lhs, rhs) if lhs.dataType != rhs.dataType =>
    -          findWiderTypeForTwo(lhs.dataType, rhs.dataType)
    -        case other => None
    -      }
    +      // Get a sequence of data types, each of which is the widest type of 
this specific attribute
    +      // in all the children
    +      val castedTypes: Seq[DataType] =
    +        getCastedTypes(children, attrIndex = 0, mutable.Queue[DataType]())
     
    -      def castOutput(plan: LogicalPlan): LogicalPlan = {
    -        val casted = plan.output.zip(castedTypes).map {
    -          case (e, Some(dt)) if e.dataType != dt =>
    -            Alias(Cast(e, dt), e.name)()
    -          case (e, _) => e
    -        }
    -        Project(casted, plan)
    +      // Add extra Project for type promotion if necessary
    +      if (castedTypes.isEmpty) children else children.map(castOutput(_, 
castedTypes))
    +    }
    +
    +    // Add Project if the data types do not match
    +    private def castOutput(
    +        plan: LogicalPlan,
    +        castedTypes: Seq[DataType]): LogicalPlan = {
    +      val casted = plan.output.zip(castedTypes).map {
    +        case (e, dt) if e.dataType != dt =>
    +          Alias(Cast(e, dt), e.name)()
    +        case (e, _) => e
           }
    +      if (casted.exists(_.isInstanceOf[Alias])) Project(casted, plan) else 
plan
    +    }
     
    -      if (castedTypes.exists(_.isDefined)) {
    -        (castOutput(left), castOutput(right))
    -      } else {
    -        (left, right)
    +    // Get the widest type for each attribute in all the children
    +    @tailrec private def getCastedTypes(
    +        children: Seq[LogicalPlan],
    +        attrIndex: Int,
    +        castedTypes: mutable.Queue[DataType]): Seq[DataType] = {
    +      // Return the result after the widen data types have been found for 
all the children
    +      if (attrIndex >= children.head.output.length) return 
castedTypes.toSeq
    +
    +      // For the attrIndex-th attribute, find the widest type
    +      val initialType = Option(children.head.output(attrIndex).dataType)
    +      children.foldLeft(initialType) { (currentOutputDataTypes, child) =>
    --- End diff --
    
    is this just `findWiderCommonType(child.output.map(_.dataType))`?


---
If your project is set up for it, you can reply to this email and have your
reply appear on GitHub as well. If your project does not have this feature
enabled and wishes so, or if the feature is enabled but not working, please
contact infrastructure at infrastruct...@apache.org or file a JIRA ticket
with INFRA.
---

---------------------------------------------------------------------
To unsubscribe, e-mail: reviews-unsubscr...@spark.apache.org
For additional commands, e-mail: reviews-h...@spark.apache.org

Reply via email to