Repository: spark
Updated Branches:
  refs/heads/master 76e20a0a0 -> 8d3e2414d


SPARK-6085 Increase default value for memory overhead

Author: tedyu <yuzhih...@gmail.com>

Closes #4836 from tedyu/master and squashes the following commits:

d65b495 [tedyu] SPARK-6085 Increase default value for memory overhead
1fdd4df [tedyu] SPARK-6085 Increase default value for memory overhead


Project: http://git-wip-us.apache.org/repos/asf/spark/repo
Commit: http://git-wip-us.apache.org/repos/asf/spark/commit/8d3e2414
Tree: http://git-wip-us.apache.org/repos/asf/spark/tree/8d3e2414
Diff: http://git-wip-us.apache.org/repos/asf/spark/diff/8d3e2414

Branch: refs/heads/master
Commit: 8d3e2414d430e1a0eb209eacba2cf739f3eab0c5
Parents: 76e20a0
Author: tedyu <yuzhih...@gmail.com>
Authored: Wed Mar 4 11:00:52 2015 +0000
Committer: Sean Owen <so...@cloudera.com>
Committed: Wed Mar 4 11:00:52 2015 +0000

----------------------------------------------------------------------
 .../org/apache/spark/scheduler/cluster/mesos/MemoryUtils.scala   | 2 +-
 docs/running-on-mesos.md                                         | 2 +-
 docs/running-on-yarn.md                                          | 2 +-
 .../scala/org/apache/spark/deploy/yarn/YarnSparkHadoopUtil.scala | 4 ++--
 4 files changed, 5 insertions(+), 5 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/spark/blob/8d3e2414/core/src/main/scala/org/apache/spark/scheduler/cluster/mesos/MemoryUtils.scala
----------------------------------------------------------------------
diff --git 
a/core/src/main/scala/org/apache/spark/scheduler/cluster/mesos/MemoryUtils.scala
 
b/core/src/main/scala/org/apache/spark/scheduler/cluster/mesos/MemoryUtils.scala
index 5101ec8..705116c 100644
--- 
a/core/src/main/scala/org/apache/spark/scheduler/cluster/mesos/MemoryUtils.scala
+++ 
b/core/src/main/scala/org/apache/spark/scheduler/cluster/mesos/MemoryUtils.scala
@@ -21,7 +21,7 @@ import org.apache.spark.SparkContext
 
 private[spark] object MemoryUtils {
   // These defaults copied from YARN
-  val OVERHEAD_FRACTION = 1.07
+  val OVERHEAD_FRACTION = 1.10
   val OVERHEAD_MINIMUM = 384
 
   def calculateTotalMemory(sc: SparkContext) = {

http://git-wip-us.apache.org/repos/asf/spark/blob/8d3e2414/docs/running-on-mesos.md
----------------------------------------------------------------------
diff --git a/docs/running-on-mesos.md b/docs/running-on-mesos.md
index db1173a..e509e4b 100644
--- a/docs/running-on-mesos.md
+++ b/docs/running-on-mesos.md
@@ -225,7 +225,7 @@ See the [configuration page](configuration.html) for 
information on Spark config
 </tr>
 <tr>
   <td><code>spark.mesos.executor.memoryOverhead</code></td>
-  <td>executor memory * 0.07, with minimum of 384</td>
+  <td>executor memory * 0.10, with minimum of 384</td>
   <td>
     This value is an additive for <code>spark.executor.memory</code>, 
specified in MiB,
     which is used to calculate the total Mesos task memory. A value of 
<code>384</code>

http://git-wip-us.apache.org/repos/asf/spark/blob/8d3e2414/docs/running-on-yarn.md
----------------------------------------------------------------------
diff --git a/docs/running-on-yarn.md b/docs/running-on-yarn.md
index 2b93eef..68b1aeb 100644
--- a/docs/running-on-yarn.md
+++ b/docs/running-on-yarn.md
@@ -113,7 +113,7 @@ Most of the configs are the same for Spark on YARN as for 
other deployment modes
 </tr>
 <tr>
  <td><code>spark.yarn.executor.memoryOverhead</code></td>
-  <td>executorMemory * 0.07, with minimum of 384 </td>
+  <td>executorMemory * 0.10, with minimum of 384 </td>
   <td>
     The amount of off heap memory (in megabytes) to be allocated per executor. 
This is memory that accounts for things like VM overheads, interned strings, 
other native overheads, etc. This tends to grow with the executor size 
(typically 6-10%).
   </td>

http://git-wip-us.apache.org/repos/asf/spark/blob/8d3e2414/yarn/src/main/scala/org/apache/spark/deploy/yarn/YarnSparkHadoopUtil.scala
----------------------------------------------------------------------
diff --git 
a/yarn/src/main/scala/org/apache/spark/deploy/yarn/YarnSparkHadoopUtil.scala 
b/yarn/src/main/scala/org/apache/spark/deploy/yarn/YarnSparkHadoopUtil.scala
index 146b2c0..5881dc5 100644
--- a/yarn/src/main/scala/org/apache/spark/deploy/yarn/YarnSparkHadoopUtil.scala
+++ b/yarn/src/main/scala/org/apache/spark/deploy/yarn/YarnSparkHadoopUtil.scala
@@ -86,10 +86,10 @@ class YarnSparkHadoopUtil extends SparkHadoopUtil {
 
 object YarnSparkHadoopUtil {
   // Additional memory overhead 
-  // 7% was arrived at experimentally. In the interest of minimizing memory 
waste while covering
+  // 10% was arrived at experimentally. In the interest of minimizing memory 
waste while covering
   // the common cases. Memory overhead tends to grow with container size. 
 
-  val MEMORY_OVERHEAD_FACTOR = 0.07
+  val MEMORY_OVERHEAD_FACTOR = 0.10
   val MEMORY_OVERHEAD_MIN = 384
 
   val ANY_HOST = "*"


---------------------------------------------------------------------
To unsubscribe, e-mail: commits-unsubscr...@spark.apache.org
For additional commands, e-mail: commits-h...@spark.apache.org

Reply via email to