Github user rxin commented on a diff in the pull request:

    https://github.com/apache/spark/pull/250#discussion_r11012318
  
    --- Diff: 
sql/core/src/main/scala/org/apache/spark/sql/execution/joins.scala ---
    @@ -40,33 +50,86 @@ case class SparkEquiInnerJoin(
       override def requiredChildDistribution =
         ClusteredDistribution(leftKeys) :: ClusteredDistribution(rightKeys) :: 
Nil
     
    +  val (buildPlan, streamedPlan) = buildSide match {
    +    case BuildLeft => (left, right)
    +    case BuildRight => (right, left)
    +  }
    +
    +  val (buildKeys, streamedKeys) = buildSide match {
    +    case BuildLeft => (leftKeys, rightKeys)
    +    case BuildRight => (rightKeys, leftKeys)
    +  }
    +
       def output = left.output ++ right.output
     
    -  def execute() = attachTree(this, "execute") {
    -    val leftWithKeys = left.execute().mapPartitions { iter =>
    -      val generateLeftKeys = new Projection(leftKeys, left.output)
    -      iter.map(row => (generateLeftKeys(row), row.copy()))
    -    }
    +  @transient lazy val buildSideKeyGenerator = new Projection(buildKeys, 
buildPlan.output)
    +  @transient lazy val streamSideKeyGenerator =
    +    () => new MutableProjection(streamedKeys, streamedPlan.output)
     
    -    val rightWithKeys = right.execute().mapPartitions { iter =>
    -      val generateRightKeys = new Projection(rightKeys, right.output)
    -      iter.map(row => (generateRightKeys(row), row.copy()))
    -    }
    +  def execute() = {
     
    -    // Do the join.
    -    val joined = 
filterNulls(leftWithKeys).joinLocally(filterNulls(rightWithKeys))
    -    // Drop join keys and merge input tuples.
    -    joined.map { case (_, (leftTuple, rightTuple)) => buildRow(leftTuple 
++ rightTuple) }
    -  }
    +    buildPlan.execute().zipPartitions(streamedPlan.execute()) { 
(buildIter, streamIter) =>
    +      val hashTable = new java.util.HashMap[Row, ArrayBuffer[Row]]()
    +      var currentRow: Row = null
    +
    +      // Create a mapping of buildKeys -> rows
    +      while(buildIter.hasNext) {
    +        currentRow = buildIter.next()
    +        val rowKey = buildSideKeyGenerator(currentRow)
    +        if(!rowKey.anyNull) {
    +          val existingMatchList = hashTable.get(rowKey)
    +          val matchList = if (existingMatchList == null) {
    +            val newMatchList = new ArrayBuffer[Row]()
    +            hashTable.put(rowKey, newMatchList)
    +            newMatchList
    +          } else {
    +            existingMatchList
    +          }
    +          matchList += currentRow.copy()
    +        }
    +      }
     
    -  /**
    -   * Filters any rows where the any of the join keys is null, ensuring 
three-valued
    -   * logic for the equi-join conditions.
    -   */
    -  protected def filterNulls(rdd: RDD[(Row, Row)]) =
    -    rdd.filter {
    -      case (key: Seq[_], _) => !key.exists(_ == null)
    +      new Iterator[Row] {
    +        private[this] var currentRow: Row = _
    +        private[this] var currentMatches: ArrayBuffer[Row] = _
    +        private[this] var currentPosition: Int = -1
    +
    +        // Mutable per row objects.
    +        private[this] val joinRow = new JoinedRow
    +
    +        @transient private val joinKeys = streamSideKeyGenerator()
    +
    +        def hasNext: Boolean =
    --- End diff --
    
    add override to hasNext and next


---
If your project is set up for it, you can reply to this email and have your
reply appear on GitHub as well. If your project does not have this feature
enabled and wishes so, or if the feature is enabled but not working, please
contact infrastructure at infrastruct...@apache.org or file a JIRA ticket
with INFRA.
---

Reply via email to