Github user holdenk commented on a diff in the pull request: https://github.com/apache/spark/pull/14467#discussion_r75263947 --- Diff: core/src/main/scala/org/apache/spark/api/python/PythonRDD.scala --- @@ -889,21 +892,42 @@ private class PythonAccumulatorParam(@transient private val serverHost: String, socket } - override def zero(value: JList[Array[Byte]]): JList[Array[Byte]] = new JArrayList + override def reset(): Unit = { + this._acc = Collections.synchronizedList(new JArrayList[Array[Byte]]) + } + + override def isZero: Boolean = { + this._acc.isEmpty + } + + override def copyAndReset(): PythonAccumulatorV2 = new PythonAccumulatorV2(serverHost, serverPort) + + override def copy(): PythonAccumulatorV2 = { + val newAcc = new PythonAccumulatorV2(serverHost, serverPort) + newAcc._acc.addAll(this._acc) + newAcc + } + + // This happens on the worker node, where we just want to remember all the updates + override def add(val2: JList[Array[Byte]]): Unit = { + _acc.addAll(val2) + } + - override def addInPlace(val1: JList[Array[Byte]], val2: JList[Array[Byte]]) - : JList[Array[Byte]] = synchronized { + override def merge(other: AccumulatorV2[JList[Array[Byte]], JList[Array[Byte]]]): Unit = { + val otherPythonAccumulator = other.asInstanceOf[PythonAccumulatorV2] + // This conditional isn't strictly speaking needed - merging only currently happens on the + // driver program - but that isn't gauranteed so incase this changes. if (serverHost == null) { - // This happens on the worker node, where we just want to remember all the updates - val1.addAll(val2) - val1 + // We are on the worker + add(otherPythonAccumulator._acc) } else { // This happens on the master, where we pass the updates to Python through a socket val socket = openSocket() val in = socket.getInputStream val out = new DataOutputStream(new BufferedOutputStream(socket.getOutputStream, bufferSize)) - out.writeInt(val2.size) - for (array <- val2.asScala) { + out.writeInt(otherPythonAccumulator._acc.size) --- End diff -- So this code path is only taken during merging on the driver side - and there is no reason to merge the same accumulated value into two different accumulators at the same time. You can also see the merge logic inside of DAGScheduler.scala & TaskMetrics (although not applicable here since the Python accumulator isn't a task metric) and verify that the updates are merged in one at a time.
--- If your project is set up for it, you can reply to this email and have your reply appear on GitHub as well. If your project does not have this feature enabled and wishes so, or if the feature is enabled but not working, please contact infrastructure at infrastruct...@apache.org or file a JIRA ticket with INFRA. --- --------------------------------------------------------------------- To unsubscribe, e-mail: reviews-unsubscr...@spark.apache.org For additional commands, e-mail: reviews-h...@spark.apache.org