Repository: spark
Updated Branches:
  refs/heads/master 47346cd02 -> e8813be65


[SPARK-4095][YARN][Minor]extract val isLaunchingDriver in ClientBase

Instead of checking if `args.userClass` is null repeatedly, we extract it to an 
global val as in `ApplicationMaster`.

Author: WangTaoTheTonic <barneystin...@aliyun.com>

Closes #2954 from WangTaoTheTonic/MemUnit and squashes the following commits:

13bda20 [WangTaoTheTonic] extract val isLaunchingDriver in ClientBase


Project: http://git-wip-us.apache.org/repos/asf/spark/repo
Commit: http://git-wip-us.apache.org/repos/asf/spark/commit/e8813be6
Tree: http://git-wip-us.apache.org/repos/asf/spark/tree/e8813be6
Diff: http://git-wip-us.apache.org/repos/asf/spark/diff/e8813be6

Branch: refs/heads/master
Commit: e8813be6539aba1cd1f8854c204b7938464403ed
Parents: 47346cd
Author: WangTaoTheTonic <barneystin...@aliyun.com>
Authored: Tue Oct 28 08:53:10 2014 -0500
Committer: Thomas Graves <tgra...@apache.org>
Committed: Tue Oct 28 08:53:10 2014 -0500

----------------------------------------------------------------------
 .../main/scala/org/apache/spark/deploy/yarn/ClientBase.scala    | 5 ++---
 1 file changed, 2 insertions(+), 3 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/spark/blob/e8813be6/yarn/common/src/main/scala/org/apache/spark/deploy/yarn/ClientBase.scala
----------------------------------------------------------------------
diff --git 
a/yarn/common/src/main/scala/org/apache/spark/deploy/yarn/ClientBase.scala 
b/yarn/common/src/main/scala/org/apache/spark/deploy/yarn/ClientBase.scala
index fb0e34b..0417cdd 100644
--- a/yarn/common/src/main/scala/org/apache/spark/deploy/yarn/ClientBase.scala
+++ b/yarn/common/src/main/scala/org/apache/spark/deploy/yarn/ClientBase.scala
@@ -55,6 +55,7 @@ private[spark] trait ClientBase extends Logging {
   protected val amMemoryOverhead = args.amMemoryOverhead // MB
   protected val executorMemoryOverhead = args.executorMemoryOverhead // MB
   private val distCacheMgr = new ClientDistributedCacheManager()
+  private val isLaunchingDriver = args.userClass != null
 
   /**
    * Fail fast if we have requested more resources per container than is 
available in the cluster.
@@ -267,7 +268,6 @@ private[spark] trait ClientBase extends Logging {
     // Note that to warn the user about the deprecation in cluster mode, some 
code from
     // SparkConf#validateSettings() is duplicated here (to avoid triggering 
the condition
     // described above).
-    val isLaunchingDriver = args.userClass != null
     if (isLaunchingDriver) {
       sys.env.get("SPARK_JAVA_OPTS").foreach { value =>
         val warning =
@@ -344,7 +344,6 @@ private[spark] trait ClientBase extends Logging {
     }
 
     // Include driver-specific java options if we are launching a driver
-    val isLaunchingDriver = args.userClass != null
     if (isLaunchingDriver) {
       sparkConf.getOption("spark.driver.extraJavaOptions")
         .orElse(sys.env.get("SPARK_JAVA_OPTS"))
@@ -357,7 +356,7 @@ private[spark] trait ClientBase extends Logging {
     javaOpts += ("-Dspark.yarn.app.container.log.dir=" + 
ApplicationConstants.LOG_DIR_EXPANSION_VAR)
 
     val userClass =
-      if (args.userClass != null) {
+      if (isLaunchingDriver) {
         Seq("--class", YarnSparkHadoopUtil.escapeForShell(args.userClass))
       } else {
         Nil


---------------------------------------------------------------------
To unsubscribe, e-mail: commits-unsubscr...@spark.apache.org
For additional commands, e-mail: commits-h...@spark.apache.org

Reply via email to