Github user cloud-fan commented on a diff in the pull request: https://github.com/apache/spark/pull/21156#discussion_r227253732 --- Diff: sql/core/src/main/scala/org/apache/spark/sql/execution/joins/JoinUtils.scala --- @@ -0,0 +1,63 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.spark.sql.execution.joins + +import org.apache.spark.sql.catalyst.expressions.Expression +import org.apache.spark.sql.catalyst.plans.physical.{ClusteredDistribution, Distribution, HashClusteredDistribution, HashPartitioning} +import org.apache.spark.sql.execution.{FileSourceScanExec, SparkPlan} +import org.apache.spark.sql.execution.exchange.ShuffleExchangeExec + +object JoinUtils { + private def avoidShuffleIfPossible( + joinKeys: Seq[Expression], + expressions: Seq[Expression], + leftKeys: Seq[Expression], + rightKeys: Seq[Expression]): Seq[Distribution] = { + val indices = expressions.map(x => joinKeys.indexWhere(_.semanticEquals(x))) + HashClusteredDistribution(indices.map(leftKeys(_))) :: + HashClusteredDistribution(indices.map(rightKeys(_))) :: Nil + } + + def requiredChildDistributionForShuffledJoin( + partitioningDetection: Boolean, + leftKeys: Seq[Expression], + rightKeys: Seq[Expression], + left: SparkPlan, + right: SparkPlan): Seq[Distribution] = { + if (!partitioningDetection) { + return HashClusteredDistribution(leftKeys) :: HashClusteredDistribution(rightKeys) :: Nil + } + + val leftPartitioning = left.outputPartitioning + val rightPartitioning = right.outputPartitioning --- End diff -- This is my biggest concern. Currently Spark adds shuffle with a rule, so we can't always get the children partitioning precisely. We implemented a similar feature in `EnsureRequirements.reorderJoinPredicates`, which is hacky and we should improve the framework before adding more features like this.
--- --------------------------------------------------------------------- To unsubscribe, e-mail: reviews-unsubscr...@spark.apache.org For additional commands, e-mail: reviews-h...@spark.apache.org