Github user cloud-fan commented on a diff in the pull request:

    https://github.com/apache/spark/pull/11117#discussion_r52866283
  
    --- Diff: 
sql/core/src/main/scala/org/apache/spark/sql/execution/objects.scala ---
    @@ -67,6 +74,72 @@ case class MapPartitions(
       }
     }
     
    +case class PythonMapPartitions(
    +    func: PythonFunction,
    +    output: Seq[Attribute],
    +    child: SparkPlan) extends UnaryNode {
    +
    +  override def expressions: Seq[Expression] = Nil
    +
    +  private def isPickled(schema: StructType): Boolean = {
    +    schema.length == 1 && schema.head.dataType == BinaryType &&
    +      schema.head.metadata.contains("pickled")
    +  }
    +
    +  override protected def doExecute(): RDD[InternalRow] = {
    +    val inputRDD = child.execute().map(_.copy())
    +    val bufferSize = inputRDD.conf.getInt("spark.buffer.size", 65536)
    +    val reuseWorker = 
inputRDD.conf.getBoolean("spark.python.worker.reuse", defaultValue = true)
    +    val childIsPickled = isPickled(child.schema)
    +    val outputIsPickled = isPickled(schema)
    +
    +    inputRDD.mapPartitions { iter =>
    +      val inputIterator = if (childIsPickled) {
    +        iter.map(_.getBinary(0))
    +      } else {
    +        EvaluatePython.registerPicklers()  // register pickler for Row
    +
    +        val pickle = new Pickler
    +
    +        // Input iterator to Python: input rows are grouped so we send 
them in batches to Python.
    +        // For each row, add it to the queue.
    +        iter.grouped(100).map { inputRows =>
    +          val toBePickled = inputRows.map { row =>
    +            EvaluatePython.toJava(row, child.schema)
    +          }.toArray
    +          pickle.dumps(toBePickled)
    +        }
    +      }
    +
    +      val context = TaskContext.get()
    +
    +      // Output iterator for results from Python.
    +      val outputIterator =
    +        new PythonRunner(
    +          func.command,
    +          func.envVars,
    +          func.pythonIncludes,
    +          func.pythonExec,
    +          func.pythonVer,
    +          func.broadcastVars,
    +          func.accumulator,
    +          bufferSize,
    +          reuseWorker
    +        ).compute(inputIterator, context.partitionId(), context)
    +
    +      if (outputIsPickled) {
    +        outputIterator.map(bytes => InternalRow(bytes))
    --- End diff --
    
    To avoid copying the bytes, here I create safe rows. However, according to 
https://github.com/apache/spark/pull/10511, operators should always produce 
unsafe rows. Actually python UDF operator(`BatchPythonEvaluation`) also produce 
safe rows, which may also have problems. Should we bring back the 
`requireUnsafeRow` stuff? In some cases like here, converting to unsafe rows is 
expensive and may not have much benefit.
    
    cc @davies 


---
If your project is set up for it, you can reply to this email and have your
reply appear on GitHub as well. If your project does not have this feature
enabled and wishes so, or if the feature is enabled but not working, please
contact infrastructure at infrastruct...@apache.org or file a JIRA ticket
with INFRA.
---

---------------------------------------------------------------------
To unsubscribe, e-mail: reviews-unsubscr...@spark.apache.org
For additional commands, e-mail: reviews-h...@spark.apache.org

Reply via email to