Github user tgravescs commented on a diff in the pull request:

    https://github.com/apache/spark/pull/3607#discussion_r22051045
  
    --- Diff: 
yarn/src/main/scala/org/apache/spark/deploy/yarn/ClientArguments.scala ---
    @@ -39,23 +39,37 @@ private[spark] class ClientArguments(args: 
Array[String], sparkConf: SparkConf)
       var appName: String = "Spark"
       var priority = 0
     
    -  // Additional memory to allocate to containers
    -  // For now, use driver's memory overhead as our AM container's memory 
overhead
    -  val amMemoryOverhead = 
sparkConf.getInt("spark.yarn.driver.memoryOverhead",
    -    math.max((MEMORY_OVERHEAD_FACTOR * amMemory).toInt, 
MEMORY_OVERHEAD_MIN))
    -
    -  val executorMemoryOverhead = 
sparkConf.getInt("spark.yarn.executor.memoryOverhead",
    -    math.max((MEMORY_OVERHEAD_FACTOR * executorMemory).toInt, 
MEMORY_OVERHEAD_MIN))
    -
       private val isDynamicAllocationEnabled =
         sparkConf.getBoolean("spark.dynamicAllocation.enabled", false)
     
       parseArgs(args.toList)
    +
    +  val isClusterMode = userClass != null
    +
       loadEnvironmentArgs()
       validateArgs()
     
    +  // Additional memory to allocate to containers. In different modes, we 
use different configs.
    +  val amMemoryOverheadConf = if (isClusterMode) {
    +    "spark.yarn.driver.memoryOverhead"
    +  } else {
    +    "spark.yarn.am.memoryOverhead"
    +  }
    +  val amMemoryOverhead = sparkConf.getInt(amMemoryOverheadConf,
    +    math.max((MEMORY_OVERHEAD_FACTOR * amMemory).toInt, 
MEMORY_OVERHEAD_MIN))
    +
    +  val executorMemoryOverhead = 
sparkConf.getInt("spark.yarn.executor.memoryOverhead",
    +    math.max((MEMORY_OVERHEAD_FACTOR * executorMemory).toInt, 
MEMORY_OVERHEAD_MIN))
    +
       /** Load any default arguments provided through environment variables 
and Spark properties. */
       private def loadEnvironmentArgs(): Unit = {
    +    // In cluster mode, the driver and the AM live in the same JVM, so 
this does not apply
    +    if (!isClusterMode) {
    +      amMemory = 
Utils.memoryStringToMb(sparkConf.get("spark.yarn.am.memory", "512m"))
    +    } else {
    +      println("spark.yarn.am.memory is set but does not apply in cluster 
mode, " +
    --- End diff --
    
    we might as well make it consistent and add warning about 
spark.yarn.am.memoryOverhead being set in cluster mode.


---
If your project is set up for it, you can reply to this email and have your
reply appear on GitHub as well. If your project does not have this feature
enabled and wishes so, or if the feature is enabled but not working, please
contact infrastructure at infrastruct...@apache.org or file a JIRA ticket
with INFRA.
---

---------------------------------------------------------------------
To unsubscribe, e-mail: reviews-unsubscr...@spark.apache.org
For additional commands, e-mail: reviews-h...@spark.apache.org

Reply via email to