Github user JoshRosen commented on a diff in the pull request:

    https://github.com/apache/spark/pull/8835#discussion_r40131745
  
    --- Diff: 
sql/core/src/main/scala/org/apache/spark/sql/execution/pythonUDFs.scala ---
    @@ -342,51 +348,57 @@ case class BatchPythonEvaluation(udf: PythonUDF, 
output: Seq[Attribute], child:
       override def canProcessSafeRows: Boolean = true
     
       protected override def doExecute(): RDD[InternalRow] = {
    -    val childResults = child.execute().map(_.copy())
    +    val inputRDD = child.execute().map(_.copy())
    +    val bufferSize = inputRDD.conf.getInt("spark.buffer.size", 65536)
    +    val reuseWorker = 
inputRDD.conf.getBoolean("spark.python.worker.reuse", defaultValue = true)
     
    -    val parent = childResults.mapPartitions { iter =>
    +    inputRDD.mapPartitions { iter =>
           EvaluatePython.registerPicklers()  // register pickler for Row
    +
    +      // The queue used to buffer input rows so we can drain it to
    +      // combine input with output from Python.
    +      val queue = new 
java.util.concurrent.ConcurrentLinkedQueue[InternalRow]()
    +
           val pickle = new Pickler
           val currentRow = newMutableProjection(udf.children, child.output)()
           val fields = udf.children.map(_.dataType)
           val schema = new StructType(fields.map(t => new StructField("", t, 
true)).toArray)
    -      iter.grouped(100).map { inputRows =>
    +
    +      // Input iterator to Python: input rows are grouped so we send them 
in batches to Python.
    +      // For each row, add it to the queue.
    +      val inputIterator = iter.grouped(100).map { inputRows =>
             val toBePickled = inputRows.map { row =>
    +          queue.add(row)
               EvaluatePython.toJava(currentRow(row), schema)
             }.toArray
             pickle.dumps(toBePickled)
           }
    -    }
     
    -    val pyRDD = new PythonRDD(
    -      parent,
    -      udf.command,
    -      udf.envVars,
    -      udf.pythonIncludes,
    -      false,
    -      udf.pythonExec,
    -      udf.pythonVer,
    -      udf.broadcastVars,
    -      udf.accumulator
    -    ).mapPartitions { iter =>
    -      val pickle = new Unpickler
    -      iter.flatMap { pickedResult =>
    -        val unpickledBatch = pickle.loads(pickedResult)
    -        unpickledBatch.asInstanceOf[java.util.ArrayList[Any]].asScala
    -      }
    -    }.mapPartitions { iter =>
    +      val context = TaskContext.get()
    +
    +      // Output iterator for results from Python.
    +      val outputIterator = new PythonRunner(
    +        udf.command,
    +        udf.envVars,
    +        udf.pythonIncludes,
    +        udf.pythonExec,
    +        udf.pythonVer,
    +        udf.broadcastVars,
    +        udf.accumulator,
    +        bufferSize,
    +        reuseWorker
    +      ).compute(inputIterator, context.partitionId(), context)
    +
    +      val unpickle = new Unpickler
           val row = new GenericMutableRow(1)
    -      iter.map { result =>
    -        row(0) = EvaluatePython.fromJava(result, udf.dataType)
    -        row: InternalRow
    -      }
    -    }
    +      val joined = new JoinedRow
    --- End diff --
    
    Same comment here, RE: `joined`: should this be inside of a `mapPartitions` 
call?


---
If your project is set up for it, you can reply to this email and have your
reply appear on GitHub as well. If your project does not have this feature
enabled and wishes so, or if the feature is enabled but not working, please
contact infrastructure at infrastruct...@apache.org or file a JIRA ticket
with INFRA.
---

---------------------------------------------------------------------
To unsubscribe, e-mail: reviews-unsubscr...@spark.apache.org
For additional commands, e-mail: reviews-h...@spark.apache.org

Reply via email to