Github user JoshRosen commented on a diff in the pull request:

    https://github.com/apache/spark/pull/10835#discussion_r50805637
  
    --- Diff: core/src/main/scala/org/apache/spark/executor/TaskMetrics.scala 
---
    @@ -230,86 +297,119 @@ class TaskMetrics extends Serializable {
        */
       def shuffleWriteMetrics: Option[ShuffleWriteMetrics] = 
_shuffleWriteMetrics
     
    -  @deprecated("setting ShuffleWriteMetrics is for internal use only", 
"2.0.0")
    -  def shuffleWriteMetrics_=(swm: Option[ShuffleWriteMetrics]): Unit = {
    -    _shuffleWriteMetrics = swm
    -  }
    -
       /**
        * Get or create a new [[ShuffleWriteMetrics]] associated with this task.
        */
       private[spark] def registerShuffleWriteMetrics(): ShuffleWriteMetrics = 
synchronized {
         _shuffleWriteMetrics.getOrElse {
    -      val metrics = new ShuffleWriteMetrics
    +      val metrics = new ShuffleWriteMetrics(initialAccumsMap)
           _shuffleWriteMetrics = Some(metrics)
           metrics
         }
       }
     
    -  private var _updatedBlockStatuses: Seq[(BlockId, BlockStatus)] =
    -    Seq.empty[(BlockId, BlockStatus)]
    -
    -  /**
    -   * Storage statuses of any blocks that have been updated as a result of 
this task.
    -   */
    -  def updatedBlockStatuses: Seq[(BlockId, BlockStatus)] = 
_updatedBlockStatuses
     
    -  @deprecated("setting updated blocks is for internal use only", "2.0.0")
    -  def updatedBlocks_=(ub: Option[Seq[(BlockId, BlockStatus)]]): Unit = {
    -    _updatedBlockStatuses = ub.getOrElse(Seq.empty[(BlockId, BlockStatus)])
    -  }
    +  /* ========================== *
    +   |        OTHER THINGS        |
    +   * ========================== */
     
    -  private[spark] def incUpdatedBlockStatuses(v: Seq[(BlockId, 
BlockStatus)]): Unit = {
    -    _updatedBlockStatuses ++= v
    +  private[spark] def registerAccumulator(a: Accumulable[_, _]): Unit = {
    +    accums += a
       }
     
    -  private[spark] def setUpdatedBlockStatuses(v: Seq[(BlockId, 
BlockStatus)]): Unit = {
    -    _updatedBlockStatuses = v
    +  /**
    +   * Return the latest updates of accumulators in this task.
    +   */
    +  def accumulatorUpdates(): Seq[AccumulableInfo] = accums.map { a =>
    +    new AccumulableInfo(
    +      a.id, a.name.orNull, Some(a.localValue), None, a.isInternal, 
a.countFailedValues)
       }
     
    -  @deprecated("use updatedBlockStatuses instead", "2.0.0")
    -  def updatedBlocks: Option[Seq[(BlockId, BlockStatus)]] = {
    -    if (_updatedBlockStatuses.nonEmpty) Some(_updatedBlockStatuses) else 
None
    +  // If we are reconstructing this TaskMetrics on the driver, some metrics 
may already be set.
    +  // If so, initialize all relevant metrics classes so listeners can 
access them downstream.
    +  {
    +    var (hasShuffleRead, hasShuffleWrite, hasInput, hasOutput) = (false, 
false, false, false)
    +    initialAccums
    +      .filter { a => a.localValue != a.zero }
    +      .foreach { a =>
    +      a.name.get match {
    +        case sr if sr.startsWith(SHUFFLE_READ_METRICS_PREFIX) => 
hasShuffleRead = true
    +        case sw if sw.startsWith(SHUFFLE_WRITE_METRICS_PREFIX) => 
hasShuffleWrite = true
    +        case in if in.startsWith(INPUT_METRICS_PREFIX) => hasInput = true
    +        case out if out.startsWith(OUTPUT_METRICS_PREFIX) => hasOutput = 
true
    +        case _ =>
    +      }
    +    }
    +    if (hasShuffleRead) { _shuffleReadMetrics = Some(new 
ShuffleReadMetrics(initialAccumsMap)) }
    +    if (hasShuffleWrite) { _shuffleWriteMetrics = Some(new 
ShuffleWriteMetrics(initialAccumsMap)) }
    +    if (hasInput) { _inputMetrics = Some(new 
InputMetrics(initialAccumsMap)) }
    +    if (hasOutput) { _outputMetrics = Some(new 
OutputMetrics(initialAccumsMap)) }
       }
     
    -  private[spark] def updateInputMetrics(): Unit = synchronized {
    -    inputMetrics.foreach(_.updateBytesRead())
    -  }
    +}
     
    -  @throws(classOf[IOException])
    -  private def readObject(in: ObjectInputStream): Unit = 
Utils.tryOrIOException {
    -    in.defaultReadObject()
    -    // Get the hostname from cached data, since hostname is the order of 
number of nodes in
    -    // cluster, so using cached hostname will decrease the object number 
and alleviate the GC
    -    // overhead.
    -    _hostname = TaskMetrics.getCachedHostName(_hostname)
    -  }
    +private[spark] object TaskMetrics extends Logging {
     
    -  private var _accumulatorUpdates: Map[Long, Any] = Map.empty
    -  @transient private var _accumulatorsUpdater: () => Map[Long, Any] = null
    +  def empty: TaskMetrics = new TaskMetrics
     
    -  private[spark] def updateAccumulators(): Unit = synchronized {
    -    _accumulatorUpdates = _accumulatorsUpdater()
    +  /**
    +   * Get an accumulator from the given map by name, assuming it exists.
    +   */
    +  def getAccum[T](accumMap: Map[String, Accumulator[_]], name: String): 
Accumulator[T] = {
    +    require(accumMap.contains(name), s"metric '$name' is missing")
    +    val accum = accumMap(name)
    +    try {
    +      // Note: we can't do pattern matching here because types are erased 
by compile time
    +      accum.asInstanceOf[Accumulator[T]]
    +    } catch {
    +      case e: ClassCastException =>
    +        throw new SparkException(s"accumulator $name was of unexpected 
type", e)
    +    }
       }
     
       /**
    -   * Return the latest updates of accumulators in this task.
    +   * Construct a [[TaskMetrics]] object from a list of accumulator 
updates, called on driver only.
    +   *
    +   * Executors only send accumulator updates back to the driver, not 
[[TaskMetrics]]. However, we
    +   * need the latter to post task end events to listeners, so we need to 
reconstruct the metrics
    +   * on the driver.
    +   *
    +   * This assumes the provided updates contain the initial set of 
accumulators representing
    +   * internal task level metrics.
        */
    -  def accumulatorUpdates(): Map[Long, Any] = _accumulatorUpdates
    -
    -  private[spark] def setAccumulatorsUpdater(accumulatorsUpdater: () => 
Map[Long, Any]): Unit = {
    -    _accumulatorsUpdater = accumulatorsUpdater
    +  def fromAccumulatorUpdates(accumUpdates: Seq[AccumulableInfo]): 
TaskMetrics = {
    +    // Initial accumulators are passed into the TaskMetrics constructor 
first because these
    +    // are required to be uniquely named. The rest of the accumulators 
from this task are
    +    // registered later because they need not satisfy this requirement.
    +    val (initialAccumInfos, otherAccumInfos) = accumUpdates
    +      .filter { info => info.update.isDefined }
    +      .partition { info =>
    +      info.name != null && 
info.name.startsWith(InternalAccumulator.METRICS_PREFIX)
    --- End diff --
    
    Also, doesn't `AccumulableInfo` have an `isInternal` or `internal` field 
which could be used to check this? If that field isn't part of the 
`AccumulableInfo` itself, I suppose you could define a method on 
`AccumulableInfo` which has this `name.startsWith` check.


---
If your project is set up for it, you can reply to this email and have your
reply appear on GitHub as well. If your project does not have this feature
enabled and wishes so, or if the feature is enabled but not working, please
contact infrastructure at infrastruct...@apache.org or file a JIRA ticket
with INFRA.
---

---------------------------------------------------------------------
To unsubscribe, e-mail: reviews-unsubscr...@spark.apache.org
For additional commands, e-mail: reviews-h...@spark.apache.org

Reply via email to