Github user tgravescs commented on a diff in the pull request:

    https://github.com/apache/spark/pull/12113#discussion_r58536751
  
    --- Diff: core/src/main/scala/org/apache/spark/MapOutputTracker.scala ---
    @@ -428,40 +503,93 @@ private[spark] class MapOutputTrackerMaster(conf: 
SparkConf)
         }
       }
     
    +  private def removeBroadcast(bcast: Broadcast[_]): Unit = {
    +    if (null != bcast) {
    +      broadcastManager.unbroadcast(bcast.id,
    +        removeFromDriver = true, blocking = false)
    +    }
    +  }
    +
    +  private def clearCachedBroadcast(): Unit = {
    +    for (cached <- cachedSerializedBroadcast) removeBroadcast(cached._2)
    +    cachedSerializedBroadcast.clear()
    +  }
    +
       def getSerializedMapOutputStatuses(shuffleId: Int): Array[Byte] = {
         var statuses: Array[MapStatus] = null
         var epochGotten: Long = -1
         epochLock.synchronized {
           if (epoch > cacheEpoch) {
             cachedSerializedStatuses.clear()
    +        clearCachedBroadcast()
             cacheEpoch = epoch
           }
           cachedSerializedStatuses.get(shuffleId) match {
             case Some(bytes) =>
               return bytes
             case None =>
    +          logDebug("cached status not found for : " + shuffleId)
               statuses = mapStatuses.getOrElse(shuffleId, Array[MapStatus]())
               epochGotten = epoch
           }
         }
    -    // If we got here, we failed to find the serialized locations in the 
cache, so we pulled
    -    // out a snapshot of the locations as "statuses"; let's serialize and 
return that
    -    val bytes = MapOutputTracker.serializeMapStatuses(statuses)
    -    logInfo("Size of output statuses for shuffle %d is %d 
bytes".format(shuffleId, bytes.length))
    -    // Add them into the table only if the epoch hasn't changed while we 
were working
    -    epochLock.synchronized {
    -      if (epoch == epochGotten) {
    -        cachedSerializedStatuses(shuffleId) = bytes
    +
    +    var shuffleIdLock = shuffleIdLocks.get(shuffleId)
    +    if (null == shuffleIdLock) {
    +      val newLock = new Object()
    +      // in general, this condition should be false - but good to be 
paranoid
    +      val prevLock = shuffleIdLocks.putIfAbsent(shuffleId, newLock)
    +      shuffleIdLock = if (null != prevLock) prevLock else newLock
    +    }
    +    val newbytes = shuffleIdLock.synchronized {
    +
    +      // double check to make sure someone else didn't serialize and cache 
the same
    +      // mapstatus while we were waiting on the synchronize
    +      epochLock.synchronized {
    +        if (epoch > cacheEpoch) {
    +          cachedSerializedStatuses.clear()
    +          clearCachedBroadcast()
    +          cacheEpoch = epoch
    +        }
    +        cachedSerializedStatuses.get(shuffleId) match {
    +          case Some(bytes) =>
    +            return bytes
    +          case None =>
    +            logDebug("shuffle lock cached status not found for : " + 
shuffleId)
    +            statuses = mapStatuses.getOrElse(shuffleId, Array[MapStatus]())
    +            epochGotten = epoch
    +        }
    +      }
    +
    +      // If we got here, we failed to find the serialized locations in the 
cache, so we pulled
    +      // out a snapshot of the locations as "statuses"; let's serialize 
and return that
    +      val (bytes, bcast) = MapOutputTracker.serializeMapStatuses(statuses, 
broadcastManager,
    +        isLocal, minSizeForBroadcast)
    +      logInfo("Size of output statuses for shuffle %d is %d 
bytes".format(shuffleId, bytes.length))
    +      // Add them into the table only if the epoch hasn't changed while we 
were working
    +      epochLock.synchronized {
    +        if (epoch == epochGotten) {
    +          cachedSerializedStatuses(shuffleId) = bytes
    +          if (null != bcast) cachedSerializedBroadcast(shuffleId) = bcast
    +        } else {
    +          logInfo("Epoch changed, not caching!")
    +          removeBroadcast(bcast)
    +        }
           }
    +      bytes
         }
    -    bytes
    +    newbytes
       }
     
       override def stop() {
    +    mapOutputRequests.offer(PoisonPill)
    +    threadpool.shutdown()
         sendTracker(StopMapOutputTracker)
         mapStatuses.clear()
         trackerEndpoint = null
         cachedSerializedStatuses.clear()
    +    clearCachedBroadcast()
    --- End diff --
    
    https://issues.apache.org/jira/browse/SPARK-14405


---
If your project is set up for it, you can reply to this email and have your
reply appear on GitHub as well. If your project does not have this feature
enabled and wishes so, or if the feature is enabled but not working, please
contact infrastructure at infrastruct...@apache.org or file a JIRA ticket
with INFRA.
---

---------------------------------------------------------------------
To unsubscribe, e-mail: reviews-unsubscr...@spark.apache.org
For additional commands, e-mail: reviews-h...@spark.apache.org

Reply via email to