Github user Ngone51 commented on a diff in the pull request:

    https://github.com/apache/spark/pull/23223#discussion_r239316608
  
    --- Diff: 
resource-managers/yarn/src/test/scala/org/apache/spark/deploy/yarn/YarnAllocatorSuite.scala
 ---
    @@ -417,4 +426,59 @@ class YarnAllocatorSuite extends SparkFunSuite with 
Matchers with BeforeAndAfter
         clock.advance(50 * 1000L)
         handler.getNumExecutorsFailed should be (0)
       }
    +
    +  test("SPARK-26296: YarnAllocator should have same blacklist behaviour 
with YARN") {
    +    val rmClientSpy = spy(rmClient)
    +    val maxExecutors = 11
    +
    +    val handler = createAllocator(
    +      maxExecutors,
    +      rmClientSpy,
    +      Map(
    +        "spark.yarn.blacklist.executor.launch.blacklisting.enabled" -> 
"true",
    +        "spark.blacklist.application.maxFailedExecutorsPerNode" -> "0"))
    +    handler.updateResourceRequests()
    +
    +    val hosts = (0 until maxExecutors).map(i => s"host$i")
    +    val ids = (0 to maxExecutors).map(i => 
ContainerId.newContainerId(appAttemptId, i))
    +    val containers = createContainers(hosts, ids)
    +    handler.handleAllocatedContainers(containers.slice(0, 9))
    +    val cs0 = ContainerStatus.newInstance(containers(0).getId, 
ContainerState.COMPLETE,
    +      "success", ContainerExitStatus.SUCCESS)
    +    val cs1 = ContainerStatus.newInstance(containers(1).getId, 
ContainerState.COMPLETE,
    +      "preempted", ContainerExitStatus.PREEMPTED)
    +    val cs2 = ContainerStatus.newInstance(containers(2).getId, 
ContainerState.COMPLETE,
    +      "killed_exceeded_vmem", ContainerExitStatus.KILLED_EXCEEDED_VMEM)
    +    val cs3 = ContainerStatus.newInstance(containers(3).getId, 
ContainerState.COMPLETE,
    +      "killed_exceeded_pmem", ContainerExitStatus.KILLED_EXCEEDED_PMEM)
    +    val cs4 = ContainerStatus.newInstance(containers(4).getId, 
ContainerState.COMPLETE,
    +      "killed_by_resourcemanager", 
ContainerExitStatus.KILLED_BY_RESOURCEMANAGER)
    +    val cs5 = ContainerStatus.newInstance(containers(5).getId, 
ContainerState.COMPLETE,
    +      "killed_by_appmaster", ContainerExitStatus.KILLED_BY_APPMASTER)
    +    val cs6 = ContainerStatus.newInstance(containers(6).getId, 
ContainerState.COMPLETE,
    +      "killed_after_app_completion", 
ContainerExitStatus.KILLED_AFTER_APP_COMPLETION)
    +    val cs7 = ContainerStatus.newInstance(containers(7).getId, 
ContainerState.COMPLETE,
    +      "aborted", ContainerExitStatus.ABORTED)
    +    val cs8 = ContainerStatus.newInstance(containers(8).getId, 
ContainerState.COMPLETE,
    +      "disk_failed", ContainerExitStatus.DISKS_FAILED)
    --- End diff --
    
    Nice suggestion!


---

---------------------------------------------------------------------
To unsubscribe, e-mail: reviews-unsubscr...@spark.apache.org
For additional commands, e-mail: reviews-h...@spark.apache.org

Reply via email to