Github user ericl commented on a diff in the pull request:

    https://github.com/apache/spark/pull/13152#discussion_r74161937
  
    --- Diff: core/src/main/scala/org/apache/spark/storage/BlockManager.scala 
---
    @@ -1088,109 +1108,88 @@ private[spark] class BlockManager(
       }
     
       /**
    -   * Replicate block to another node. Not that this is a blocking call 
that returns after
    +   * Replicate block to another node. Note that this is a blocking call 
that returns after
        * the block has been replicated.
    +   *
    +   * @param blockId
    +   * @param data
    +   * @param level
    +   * @param classTag
        */
       private def replicate(
    -      blockId: BlockId,
    -      data: ChunkedByteBuffer,
    -      level: StorageLevel,
    -      classTag: ClassTag[_]): Unit = {
    +    blockId: BlockId,
    +    data: ChunkedByteBuffer,
    +    level: StorageLevel,
    +    classTag: ClassTag[_]): Unit = {
    +
         val maxReplicationFailures = 
conf.getInt("spark.storage.maxReplicationFailures", 1)
    -    val numPeersToReplicateTo = level.replication - 1
    -    val peersForReplication = new ArrayBuffer[BlockManagerId]
    -    val peersReplicatedTo = new ArrayBuffer[BlockManagerId]
    -    val peersFailedToReplicateTo = new ArrayBuffer[BlockManagerId]
         val tLevel = StorageLevel(
           useDisk = level.useDisk,
           useMemory = level.useMemory,
           useOffHeap = level.useOffHeap,
           deserialized = level.deserialized,
           replication = 1)
    +
    +    val numPeersToReplicateTo = level.replication - 1
    +
         val startTime = System.currentTimeMillis
    -    val random = new Random(blockId.hashCode)
    -
    -    var replicationFailed = false
    -    var failures = 0
    -    var done = false
    -
    -    // Get cached list of peers
    -    peersForReplication ++= getPeers(forceFetch = false)
    -
    -    // Get a random peer. Note that this selection of a peer is 
deterministic on the block id.
    -    // So assuming the list of peers does not change and no replication 
failures,
    -    // if there are multiple attempts in the same node to replicate the 
same block,
    -    // the same set of peers will be selected.
    -    def getRandomPeer(): Option[BlockManagerId] = {
    -      // If replication had failed, then force update the cached list of 
peers and remove the peers
    -      // that have been already used
    -      if (replicationFailed) {
    -        peersForReplication.clear()
    -        peersForReplication ++= getPeers(forceFetch = true)
    -        peersForReplication --= peersReplicatedTo
    -        peersForReplication --= peersFailedToReplicateTo
    -      }
    -      if (!peersForReplication.isEmpty) {
    -        Some(peersForReplication(random.nextInt(peersForReplication.size)))
    -      } else {
    -        None
    -      }
    -    }
     
    -    // One by one choose a random peer and try uploading the block to it
    -    // If replication fails (e.g., target peer is down), force the list of 
cached peers
    -    // to be re-fetched from driver and then pick another random peer for 
replication. Also
    -    // temporarily black list the peer for which replication failed.
    -    //
    -    // This selection of a peer and replication is continued in a loop 
until one of the
    -    // following 3 conditions is fulfilled:
    -    // (i) specified number of peers have been replicated to
    -    // (ii) too many failures in replicating to peers
    -    // (iii) no peer left to replicate to
    -    //
    -    while (!done) {
    -      getRandomPeer() match {
    -        case Some(peer) =>
    -          try {
    -            val onePeerStartTime = System.currentTimeMillis
    -            logTrace(s"Trying to replicate $blockId of ${data.size} bytes 
to $peer")
    -            blockTransferService.uploadBlockSync(
    -              peer.host,
    -              peer.port,
    -              peer.executorId,
    -              blockId,
    -              new NettyManagedBuffer(data.toNetty),
    -              tLevel,
    -              classTag)
    -            logTrace(s"Replicated $blockId of ${data.size} bytes to $peer 
in %s ms"
    -              .format(System.currentTimeMillis - onePeerStartTime))
    -            peersReplicatedTo += peer
    -            peersForReplication -= peer
    -            replicationFailed = false
    -            if (peersReplicatedTo.size == numPeersToReplicateTo) {
    -              done = true  // specified number of peers have been 
replicated to
    -            }
    -          } catch {
    -            case e: Exception =>
    -              logWarning(s"Failed to replicate $blockId to $peer, failure 
#$failures", e)
    -              failures += 1
    -              replicationFailed = true
    -              peersFailedToReplicateTo += peer
    -              if (failures > maxReplicationFailures) { // too many 
failures in replicating to peers
    -                done = true
    -              }
    +    var peersForReplication =
    +      blockReplicationPrioritizer.prioritize(blockManagerId, 
getPeers(false), Set.empty, blockId)
    +    var peersReplicatedTo = Set.empty[BlockManagerId]
    +    var peersFailedToReplicateTo = Set.empty[BlockManagerId]
    +    var numFailures = 0
    +
    +    while(!(numFailures > maxReplicationFailures
    +          || peersForReplication.isEmpty
    +          || peersReplicatedTo.size == numPeersToReplicateTo)) {
    +      val peer = peersForReplication.head
    +      try {
    +        val onePeerStartTime = System.currentTimeMillis
    +        logTrace(s"Trying to replicate $blockId of ${data.size} bytes to 
$peer")
    +        blockTransferService.uploadBlockSync(
    +          peer.host,
    +          peer.port,
    +          peer.executorId,
    +          blockId,
    +          new NettyManagedBuffer(data.toNetty),
    +          tLevel,
    +          classTag)
    +        logTrace(s"Replicated $blockId of ${data.size} bytes to $peer" +
    +          s" in ${System.currentTimeMillis - onePeerStartTime} ms")
    +        // the block was replicated, lets update state and move ahead
    +
    +        peersForReplication = peersForReplication.tail
    +        peersReplicatedTo += peer
    +      } catch {
    +        case e: Exception =>
    +          logWarning(s"Failed to replicate $blockId to $peer, failure 
#$numFailures", e)
    +          peersFailedToReplicateTo += peer
    +          // we have a failed replication, so we get the list of peers 
again
    +          // we don't want peers we have already replicated to and the 
ones that
    +          // have failed previously
    +          val filteredPeers = getPeers(true).filter { p =>
    +            !(peersFailedToReplicateTo.contains(p) || 
peersReplicatedTo.contains(p))
    --- End diff --
    
    ?


---
If your project is set up for it, you can reply to this email and have your
reply appear on GitHub as well. If your project does not have this feature
enabled and wishes so, or if the feature is enabled but not working, please
contact infrastructure at infrastruct...@apache.org or file a JIRA ticket
with INFRA.
---

---------------------------------------------------------------------
To unsubscribe, e-mail: reviews-unsubscr...@spark.apache.org
For additional commands, e-mail: reviews-h...@spark.apache.org

Reply via email to