Github user chenghao-intel commented on a diff in the pull request:

    https://github.com/apache/spark/pull/1134#discussion_r14111131
  
    --- Diff: 
sql/core/src/main/scala/org/apache/spark/sql/execution/joins.scala ---
    @@ -341,3 +342,85 @@ case class BroadcastNestedLoopJoin(
           streamedPlusMatches.flatMap(_._1), sc.makeRDD(rightOuterMatches))
       }
     }
    +
    +
    +
    +
    +
    +/**
    + * :: DeveloperApi ::
    + * In some case ,data skew happens.SkewJoin  sample the table rdd to find 
the largest key,
    + * then make the largest key rows as a table rdd.The streamed rdd will be 
made  as mainstreamedtable
    + * rdd without the largest key and the maxkeystreamedtable rdd with the 
largest key.Then,join the two table  with the buildtable.
    + * Finally,union the two result rdd.
    + */
    +@DeveloperApi
    +case class SkewJoin(
    +                           leftKeys: Seq[Expression],
    +                           rightKeys: Seq[Expression],
    +                           buildSide: BuildSide,
    +                           left: SparkPlan,
    +                           right: SparkPlan,
    +                           @transient sc: SparkContext) extends BinaryNode 
{
    +    override def outputPartitioning: Partitioning = left.outputPartitioning
    +
    +    override def requiredChildDistribution =
    +        ClusteredDistribution(leftKeys) :: 
ClusteredDistribution(rightKeys) :: Nil
    +
    +    val (buildPlan, streamedPlan) = buildSide match {
    +        case BuildLeft => (left, right)
    +        case BuildRight => (right, left)
    +    }
    +    val (buildKeys, streamedKeys) = buildSide match {
    +        case BuildLeft => (leftKeys, rightKeys)
    +        case BuildRight => (rightKeys, leftKeys)
    +    }
    +
    +    def output = left.output ++ right.output
    +
    +    @transient lazy val buildSideKeyGenerator = new Projection(buildKeys, 
buildPlan.output)
    +    @transient lazy val streamSideKeyGenerator = new 
Projection(streamedKeys, streamedPlan.output)
    +
    +
    +    def execute() = {
    +        val streamedTable = streamedPlan.execute()
    +        //This will later write as configuration
    +        val sample = streamedTable.sample(false, 0.3, 9).map(row => 
streamSideKeyGenerator(row)).collect()
    +        val sortedSample = sample.sortWith((row1, row2) => row1.hashCode() 
> row2.hashCode())
    +        var max = 0
    +        var num = sample.size - 1
    +        var temp = 0
    +        var maxrowKey = sortedSample(0)
    +        //find the largest key
    +        if (sortedSample.size > 1) {
    +            for (i <- 1 to num) {
    +                if (sortedSample(i - 1) == sortedSample(i)) temp += 1
    +                else {
    +                    if (temp > max) {
    +                        max = temp
    +                        maxrowKey = sortedSample(i - 1)
    +                    }
    +                    temp = 0
    +                }
    +            }
    +        }
    +        val maxKeyStreamedTable = streamedTable.filter(row => {
    --- End diff --
    
    This can be done like 
    ```
    val (maxKeyStreamedTable, mainStreamedTable) = streamedTable.partition(row 
=> {
      streamSideKeyGenerator(row).toString().equals(maxrowKey.toString())
    })
    ```


---
If your project is set up for it, you can reply to this email and have your
reply appear on GitHub as well. If your project does not have this feature
enabled and wishes so, or if the feature is enabled but not working, please
contact infrastructure at infrastruct...@apache.org or file a JIRA ticket
with INFRA.
---

Reply via email to