This is an automated email from the ASF dual-hosted git repository.

gurwls223 pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/spark.git


The following commit(s) were added to refs/heads/master by this push:
     new 1cd80fa  [SPARK-31109][MESOS][DOC] Add version information to the 
configuration of Mesos
1cd80fa is described below

commit 1cd80fa9fa18f6866c73e94d0558e83827d6ce91
Author: beliefer <belie...@163.com>
AuthorDate: Thu Mar 12 11:02:29 2020 +0900

    [SPARK-31109][MESOS][DOC] Add version information to the configuration of 
Mesos
    
    ### What changes were proposed in this pull request?
    Add version information to the configuration of `Mesos`.
    
    I sorted out some information show below.
    
    Item name | Since version | JIRA ID | Commit ID | Note
    -- | -- | -- | -- | --
    spark.mesos.$taskType.secret.names | 2.3.0 | SPARK-22131 | 
5415963d2caaf95604211419ffc4e29fff38e1d7#diff-91e6e5f871160782dc50d4060d6faea3 
|  
    spark.mesos.$taskType.secret.values | 2.3.0 | SPARK-22131 | 
5415963d2caaf95604211419ffc4e29fff38e1d7#diff-91e6e5f871160782dc50d4060d6faea3 
|  
    spark.mesos.$taskType.secret.envkeys | 2.3.0 | SPARK-22131 | 
5415963d2caaf95604211419ffc4e29fff38e1d7#diff-91e6e5f871160782dc50d4060d6faea3 
|  
    spark.mesos.$taskType.secret.filenames | 2.3.0 | SPARK-22131 | 
5415963d2caaf95604211419ffc4e29fff38e1d7#diff-91e6e5f871160782dc50d4060d6faea3 
|  
    spark.mesos.principal | 1.5.0 | SPARK-6284 | 
d86bbb4e286f16f77ba125452b07827684eafeed#diff-02a6d899f7a529eb7cfbb12182a110b0 
|  
    spark.mesos.principal.file | 2.4.0 | SPARK-16501 | 
7f10cf83f311526737fc96d5bb8281d12e41932f#diff-daf48dabbe58afaeed8787751750b01d 
|  
    spark.mesos.secret | 1.5.0 | SPARK-6284 | 
d86bbb4e286f16f77ba125452b07827684eafeed#diff-02a6d899f7a529eb7cfbb12182a110b0 
|  
    spark.mesos.secret.file | 2.4.0 | SPARK-16501 | 
7f10cf83f311526737fc96d5bb8281d12e41932f#diff-daf48dabbe58afaeed8787751750b01d 
|  
    spark.shuffle.cleaner.interval | 2.0.0 | SPARK-12583 | 
310981d49a332bd329303f610b150bbe02cf5f87#diff-2fafefee94f2a2023ea9765536870258 
|  
    spark.mesos.dispatcher.webui.url | 2.0.0 | SPARK-13492 | 
a4a0addccffb7cd0ece7947d55ce2538afa54c97#diff-f541460c7a74cee87cbb460b3b01665e 
|  
    spark.mesos.dispatcher.historyServer.url | 2.1.0 | SPARK-16809 | 
62e62124419f3fa07b324f5e42feb2c5b4fde715#diff-3779e2035d9a09fa5f6af903925b9512 
|  
    spark.mesos.driver.labels | 2.3.0 | SPARK-21000 | 
8da3f7041aafa71d7596b531625edb899970fec2#diff-91e6e5f871160782dc50d4060d6faea3 
|  
    spark.mesos.driver.webui.url | 2.0.0 | SPARK-13492 | 
a4a0addccffb7cd0ece7947d55ce2538afa54c97#diff-e3a5e67b8de2069ce99801372e214b8e 
|  
    spark.mesos.driver.failoverTimeout | 2.3.0 | SPARK-21456 | 
c42ef953343073a50ef04c5ce848b574ff7f2238#diff-91e6e5f871160782dc50d4060d6faea3 
|  
    spark.mesos.network.name | 2.1.0 | SPARK-18232 | 
d89bfc92302424406847ac7a9cfca714e6b742fc#diff-ab5bf34f1951a8f7ea83c9456a6c3ab7 
|  
    spark.mesos.network.labels | 2.3.0 | SPARK-21694 | 
ce0d3bb377766bdf4df7852272557ae846408877#diff-91e6e5f871160782dc50d4060d6faea3 
|  
    spark.mesos.driver.constraints | 2.2.1 | SPARK-19606 | 
f6ee3d90d5c299e67ae6e2d553c16c0d9759d4b5#diff-91e6e5f871160782dc50d4060d6faea3 
|  
    spark.mesos.driver.frameworkId | 2.1.0 | SPARK-16809 | 
62e62124419f3fa07b324f5e42feb2c5b4fde715#diff-02a6d899f7a529eb7cfbb12182a110b0 
|  
    spark.executor.uri | 0.8.0 | None | 
46eecd110a4017ea0c86cbb1010d0ccd6a5eb2ef#diff-a885e7df97790e9b59c21c63353e7476 
|  
    spark.mesos.proxy.baseURL | 2.3.0 | SPARK-13041 | 
663f30d14a0c9219e07697af1ab56e11a714d9a6#diff-0b9b4e122eb666155aa189a4321a6ca8 
|  
    spark.mesos.coarse | 0.6.0 | None | 
63051dd2bcc4bf09d413ff7cf89a37967edc33ba#diff-eaf125f56ce786d64dcef99cf446a751 
|  
    spark.mesos.coarse.shutdownTimeout | 2.0.0 | SPARK-12330 | 
c756bda477f458ba4aad7fdb2026263507e0ad9b#diff-d425d35aa23c47a62fbb538554f2f2cf 
|  
    spark.mesos.maxDrivers | 1.4.0 | SPARK-5338 | 
53befacced828bbac53c6e3a4976ec3f036bae9e#diff-b964c449b99c51f0a5fd77270b2951a4 
|  
    spark.mesos.retainedDrivers | 1.4.0 | SPARK-5338 | 
53befacced828bbac53c6e3a4976ec3f036bae9e#diff-b964c449b99c51f0a5fd77270b2951a4 
|  
    spark.mesos.cluster.retry.wait.max | 1.4.0 | SPARK-5338 | 
53befacced828bbac53c6e3a4976ec3f036bae9e#diff-b964c449b99c51f0a5fd77270b2951a4 
|  
    spark.mesos.fetcherCache.enable | 2.1.0 | SPARK-15994 | 
e34b4e12673fb76c92f661d7c03527410857a0f8#diff-772ea7311566edb25f11a4c4f882179a 
|  
    spark.mesos.appJar.local.resolution.mode | 2.4.0 | SPARK-24326 | 
22df953f6bb191858053eafbabaa5b3ebca29f56#diff-6e4d0a0445975f03f975fdc1e3d80e49 
|  
    spark.mesos.rejectOfferDuration | 2.2.0 | SPARK-19702 | 
2e30c0b9bcaa6f7757bd85d1f1ec392d5f916f83#diff-daf48dabbe58afaeed8787751750b01d 
|  
    spark.mesos.rejectOfferDurationForUnmetConstraints | 1.6.0 | SPARK-10471 | 
74f50275e429e649212928a9f36552941b862edc#diff-02a6d899f7a529eb7cfbb12182a110b0 
|  
    spark.mesos.rejectOfferDurationForReachedMaxCores | 2.0.0 | SPARK-13001 | 
1e7d9bfb5a41f5c2479ab3b4d4081f00bf00bd31#diff-02a6d899f7a529eb7cfbb12182a110b0 
|  
    spark.mesos.uris | 1.5.0 | SPARK-8798 | 
a2f805729b401c68b60bd690ad02533b8db57b58#diff-e3a5e67b8de2069ce99801372e214b8e 
|  
    spark.mesos.executor.home | 1.1.1 | SPARK-3264 | 
069ecfef02c4af69fc0d3755bd78be321b68b01d#diff-e3a5e67b8de2069ce99801372e214b8e 
|  
    spark.mesos.mesosExecutor.cores | 1.4.0 | SPARK-6350 | 
6fbeb82e13db7117d8f216e6148632490a4bc5be#diff-e3a5e67b8de2069ce99801372e214b8e 
|  
    spark.mesos.extra.cores | 0.6.0 | None | 
2d761e3353651049f6707c74bb5ffdd6e86f6f35#diff-37af8c6e3634f97410ade813a5172621 
|  
    spark.mesos.executor.memoryOverhead | 1.1.1 | SPARK-3535 | 
6f150978477830bbc14ba983786dd2bce12d1fe2#diff-6b498f5407d10e848acac4a1b182457c 
|  
    spark.mesos.executor.docker.image | 1.4.0 | SPARK-2691 | 
8f50a07d2188ccc5315d979755188b1e5d5b5471#diff-e3a5e67b8de2069ce99801372e214b8e 
|  
    spark.mesos.executor.docker.forcePullImage | 2.1.0 | SPARK-15271 | 
978cd5f125eb5a410bad2e60bf8385b11cf1b978#diff-0dd025320c7ecda2ea310ed7172d7f5a 
|  
    spark.mesos.executor.docker.portmaps | 1.4.0 | SPARK-7373 | 
226033cfffa2f37ebaf8bc2c653f094e91ef0c9b#diff-b964c449b99c51f0a5fd77270b2951a4 
|  
    spark.mesos.executor.docker.parameters | 2.2.0 | SPARK-19740 | 
a888fed3099e84c2cf45e9419f684a3658ada19d#diff-4139e6605a8c7f242f65cde538770c99 
|  
    spark.mesos.executor.docker.volumes | 1.4.0 | SPARK-7373 | 
226033cfffa2f37ebaf8bc2c653f094e91ef0c9b#diff-b964c449b99c51f0a5fd77270b2951a4 
|  
    spark.mesos.gpus.max | 2.1.0 | SPARK-14082 | 
29f186bfdf929b1e8ffd8e33ee37b76d5dc5af53#diff-d427ee890b913c5a7056be21eb4f39d7 
|  
    spark.mesos.task.labels | 2.2.0 | SPARK-20085 | 
c8fc1f3badf61bcfc4bd8eeeb61f73078ca068d1#diff-387c5d0c916278495fc28420571adf9e 
|  
    spark.mesos.constraints | 1.5.0 | SPARK-6707 | 
1165b17d24cdf1dbebb2faca14308dfe5c2a652c#diff-e3a5e67b8de2069ce99801372e214b8e 
|  
    spark.mesos.containerizer | 2.1.0 | SPARK-16637 | 
266b92faffb66af24d8ed2725beb80770a2d91f8#diff-0dd025320c7ecda2ea310ed7172d7f5a 
|  
    spark.mesos.role | 1.5.0 | SPARK-6284 | 
d86bbb4e286f16f77ba125452b07827684eafeed#diff-02a6d899f7a529eb7cfbb12182a110b0 
|  
    The following appears in the document |   |   |   |  
    spark.mesos.driverEnv.[EnvironmentVariableName] | 2.1.0 | SPARK-16194 | 
235cb256d06653bcde4c3ed6b081503a94996321#diff-b964c449b99c51f0a5fd77270b2951a4 
|  
    spark.mesos.dispatcher.driverDefault.[PropertyName] | 2.1.0 | SPARK-16927 
and SPARK-16923 | 
eca58755fbbc11937b335ad953a3caff89b818e6#diff-b964c449b99c51f0a5fd77270b2951a4 
|  
    
    ### Why are the changes needed?
    Supplemental configuration version information.
    
    ### Does this PR introduce any user-facing change?
    'No'.
    
    ### How was this patch tested?
    Exists UT
    
    Closes #27863 from beliefer/add-version-to-mesos-config.
    
    Authored-by: beliefer <belie...@163.com>
    Signed-off-by: HyukjinKwon <gurwls...@apache.org>
---
 docs/running-on-mesos.md                           | 47 ++++++++++++++++---
 .../org/apache/spark/deploy/mesos/config.scala     | 52 +++++++++++++++++++---
 2 files changed, 87 insertions(+), 12 deletions(-)

diff --git a/docs/running-on-mesos.md b/docs/running-on-mesos.md
index cf51620a..6f6ae1c 100644
--- a/docs/running-on-mesos.md
+++ b/docs/running-on-mesos.md
@@ -371,7 +371,7 @@ See the [configuration page](configuration.html) for 
information on Spark config
 #### Spark Properties
 
 <table class="table">
-<tr><th>Property Name</th><th>Default</th><th>Meaning</th></tr>
+<tr><th>Property Name</th><th>Default</th><th>Meaning</th><th>Since 
Version</th></tr>
 <tr>
   <td><code>spark.mesos.coarse</code></td>
   <td>true</td>
@@ -380,6 +380,7 @@ See the [configuration page](configuration.html) for 
information on Spark config
     If set to <code>false</code>, runs over Mesos cluster in "fine-grained" 
sharing mode, where one Mesos task is created per Spark task.
     Detailed information in <a 
href="running-on-mesos.html#mesos-run-modes">'Mesos Run Modes'</a>.
   </td>
+  <td>0.6.0</td>
 </tr>
 <tr>
   <td><code>spark.mesos.extra.cores</code></td>
@@ -391,6 +392,7 @@ See the [configuration page](configuration.html) for 
information on Spark config
     send it more tasks.  Use this to increase parallelism.  This
     setting is only used for Mesos coarse-grained mode.
   </td>
+  <td>0.6.0</td>
 </tr>
 <tr>
   <td><code>spark.mesos.mesosExecutor.cores</code></td>
@@ -401,6 +403,7 @@ See the [configuration page](configuration.html) for 
information on Spark config
     is being run, each Mesos executor will occupy the number of cores 
configured here.
     The value can be a floating point number.
   </td>
+  <td>1.4.0</td>
 </tr>
 <tr>
   <td><code>spark.mesos.executor.docker.image</code></td>
@@ -411,6 +414,7 @@ See the [configuration page](configuration.html) for 
information on Spark config
     The installed path of Spark in the image can be specified with 
<code>spark.mesos.executor.home</code>;
     the installed path of the Mesos library can be specified with 
<code>spark.executorEnv.MESOS_NATIVE_JAVA_LIBRARY</code>.
   </td>
+  <td>1.4.0</td>
 </tr>
 <tr>
   <td><code>spark.mesos.executor.docker.forcePullImage</code></td>
@@ -419,6 +423,7 @@ See the [configuration page](configuration.html) for 
information on Spark config
     Force Mesos agents to pull the image specified in 
<code>spark.mesos.executor.docker.image</code>.
     By default Mesos agents will not pull images they already have cached.
   </td>
+  <td>2.1.0</td>
 </tr>
 <tr>
   <td><code>spark.mesos.executor.docker.parameters</code></td>
@@ -429,6 +434,7 @@ See the [configuration page](configuration.html) for 
information on Spark config
 
     <pre>key1=val1,key2=val2,key3=val3</pre>
   </td>
+  <td>2.2.0</td>
 </tr>
 <tr>
   <td><code>spark.mesos.executor.docker.volumes</code></td>
@@ -440,6 +446,7 @@ See the [configuration page](configuration.html) for 
information on Spark config
 
     <pre>[host_path:]container_path[:ro|:rw]</pre>
   </td>
+  <td>1.4.0</td>
 </tr>
 <tr>
   <td><code>spark.mesos.task.labels</code></td>
@@ -450,6 +457,7 @@ See the [configuration page](configuration.html) for 
information on Spark config
     list more than one.  If your label includes a colon or comma, you
     can escape it with a backslash.  Ex. key:value,key2:a\:b.
   </td>
+  <td>2.2.0</td>
 </tr>
 <tr>
   <td><code>spark.mesos.executor.home</code></td>
@@ -460,6 +468,7 @@ See the [configuration page](configuration.html) for 
information on Spark config
     them. Note that this is only relevant if a Spark binary package is not 
specified through
     <code>spark.executor.uri</code>.
   </td>
+  <td>1.1.1</td>
 </tr>
 <tr>
   <td><code>spark.mesos.executor.memoryOverhead</code></td>
@@ -469,6 +478,7 @@ See the [configuration page](configuration.html) for 
information on Spark config
     the overhead will be larger of either 384 or 10% of 
<code>spark.executor.memory</code>. If set,
     the final overhead will be this value.
   </td>
+  <td>1.1.1</td>
 </tr>
 <tr>
   <td><code>spark.mesos.uris</code></td>
@@ -478,6 +488,7 @@ See the [configuration page](configuration.html) for 
information on Spark config
     when driver or executor is launched by Mesos.  This applies to
     both coarse-grained and fine-grained mode.
   </td>
+  <td>1.5.0</td>
 </tr>
 <tr>
   <td><code>spark.mesos.principal</code></td>
@@ -485,6 +496,7 @@ See the [configuration page](configuration.html) for 
information on Spark config
   <td>
     Set the principal with which Spark framework will use to authenticate with 
Mesos.  You can also specify this via the environment variable 
`SPARK_MESOS_PRINCIPAL`.
   </td>
+  <td>1.5.0</td>
 </tr>
 <tr>
   <td><code>spark.mesos.principal.file</code></td>
@@ -492,6 +504,7 @@ See the [configuration page](configuration.html) for 
information on Spark config
   <td>
     Set the file containing the principal with which Spark framework will use 
to authenticate with Mesos.  Allows specifying the principal indirectly in more 
security conscious deployments.  The file must be readable by the user 
launching the job and be UTF-8 encoded plaintext.  You can also specify this 
via the environment variable `SPARK_MESOS_PRINCIPAL_FILE`.
   </td>
+  <td>2.4.0</td>
 </tr>
 <tr>
   <td><code>spark.mesos.secret</code></td>
@@ -500,6 +513,7 @@ See the [configuration page](configuration.html) for 
information on Spark config
     Set the secret with which Spark framework will use to authenticate with 
Mesos. Used, for example, when
     authenticating with the registry.  You can also specify this via the 
environment variable `SPARK_MESOS_SECRET`.
   </td>
+  <td>1.5.0</td>
 </tr>
 <tr>
   <td><code>spark.mesos.secret.file</code></td>
@@ -508,6 +522,7 @@ See the [configuration page](configuration.html) for 
information on Spark config
     Set the file containing the secret with which Spark framework will use to 
authenticate with Mesos. Used, for example, when
     authenticating with the registry.  Allows for specifying the secret 
indirectly in more security conscious deployments.  The file must be readable 
by the user launching the job and be UTF-8 encoded plaintext.  You can also 
specify this via the environment variable `SPARK_MESOS_SECRET_FILE`.
   </td>
+  <td>2.4.0</td>
 </tr>
 <tr>
   <td><code>spark.mesos.role</code></td>
@@ -516,6 +531,7 @@ See the [configuration page](configuration.html) for 
information on Spark config
     Set the role of this Spark framework for Mesos. Roles are used in Mesos 
for reservations
     and resource weight sharing.
   </td>
+  <td>1.5.0</td>
 </tr>
 <tr>
   <td><code>spark.mesos.constraints</code></td>
@@ -532,6 +548,7 @@ See the [configuration page](configuration.html) for 
information on Spark config
       <li>In case there is no value present as a part of the constraint any 
offer with the corresponding attribute will be accepted (without value 
check).</li>
     </ul>
   </td>
+  <td>1.5.0</td>
 </tr>
 <tr>
   <td><code>spark.mesos.driver.constraints</code></td>
@@ -540,6 +557,7 @@ See the [configuration page](configuration.html) for 
information on Spark config
     Same as <code>spark.mesos.constraints</code> except applied to drivers 
when launched through the dispatcher. By default,
     all offers with sufficient resources will be accepted.
   </td>
+  <td>2.2.1</td>
 </tr>
 <tr>
   <td><code>spark.mesos.containerizer</code></td>
@@ -550,6 +568,7 @@ See the [configuration page](configuration.html) for 
information on Spark config
     containerizers for docker: the "docker" containerizer, and the preferred
     "mesos" containerizer.  Read more here: 
http://mesos.apache.org/documentation/latest/container-image/
   </td>
+  <td>2.1.0</td>
 </tr>
 <tr>
   <td><code>spark.mesos.driver.webui.url</code></td>
@@ -558,6 +577,7 @@ See the [configuration page](configuration.html) for 
information on Spark config
     Set the Spark Mesos driver webui_url for interacting with the framework.
     If unset it will point to Spark's internal web UI.
   </td>
+  <td>2.0.0</td>
 </tr>
 <tr>
   <td><code>spark.mesos.driver.labels</code></td>
@@ -566,8 +586,8 @@ See the [configuration page](configuration.html) for 
information on Spark config
     Mesos labels to add to the driver.  See 
<code>spark.mesos.task.labels</code>
     for formatting information.
   </td>
+  <td>2.3.0</td>
 </tr>
-
 <tr>
   <td>
     <code>spark.mesos.driver.secret.values</code>,
@@ -616,8 +636,8 @@ See the [configuration page](configuration.html) for 
information on Spark config
       <pre>spark.mesos.driver.secret.names=password1,password2</pre>
     </p>
   </td>
+  <td>2.3.0</td>
 </tr>
-
 <tr>
   <td>
     <code>spark.mesos.driver.secret.envkeys</code>,
@@ -670,8 +690,8 @@ See the [configuration page](configuration.html) for 
information on Spark config
       <pre>spark.mesos.driver.secret.filenames=pwdfile1,pwdfile2</pre>
     </p>
   </td>
+  <td>2.3.0</td>
 </tr>
-
 <tr>
   <td><code>spark.mesos.driverEnv.[EnvironmentVariableName]</code></td>
   <td><code>(none)</code></td>
@@ -681,6 +701,7 @@ See the [configuration page](configuration.html) for 
information on Spark config
     driver process. The user can specify multiple of these to set
     multiple environment variables.
   </td>
+  <td>2.1.0</td>
 </tr>
 <tr>
   <td><code>spark.mesos.dispatcher.webui.url</code></td>
@@ -689,7 +710,8 @@ See the [configuration page](configuration.html) for 
information on Spark config
     Set the Spark Mesos dispatcher webui_url for interacting with the 
framework.
     If unset it will point to Spark's internal web UI.
   </td>
-  </tr>
+  <td>2.0.0</td>
+</tr>
 <tr>
   <td><code>spark.mesos.dispatcher.driverDefault.[PropertyName]</code></td>
   <td><code>(none)</code></td>
@@ -699,7 +721,8 @@ See the [configuration page](configuration.html) for 
information on Spark config
     spark.mesos.dispatcher.driverProperty.spark.executor.memory=32g
     results in the executors for all drivers submitted in cluster mode
     to run in 32g containers.
-</td>
+  </td>
+  <td>2.1.0</td>
 </tr>
 <tr>
   <td><code>spark.mesos.dispatcher.historyServer.url</code></td>
@@ -709,6 +732,7 @@ See the [configuration page](configuration.html) for 
information on Spark config
     server</a>.  The dispatcher will then link each driver to its entry
     in the history server.
   </td>
+  <td>2.1.0</td>
 </tr>
 <tr>
   <td><code>spark.mesos.gpus.max</code></td>
@@ -717,7 +741,8 @@ See the [configuration page](configuration.html) for 
information on Spark config
     Set the maximum number GPU resources to acquire for this job. Note that 
executors will still launch when no GPU resources are found
     since this configuration is just an upper limit and not a guaranteed 
amount.
   </td>
-  </tr>
+  <td>2.1.0</td>
+</tr>
 <tr>
   <td><code>spark.mesos.network.name</code></td>
   <td><code>(none)</code></td>
@@ -728,6 +753,7 @@ See the [configuration page](configuration.html) for 
information on Spark config
     <a href="http://mesos.apache.org/documentation/latest/cni/";>the Mesos CNI 
docs</a>
     for more details.
   </td>
+  <td>2.1.0</td>
 </tr>
 <tr>
   <td><code>spark.mesos.network.labels</code></td>
@@ -742,6 +768,7 @@ See the [configuration page](configuration.html) for 
information on Spark config
     <a 
href="http://mesos.apache.org/documentation/latest/cni/#mesos-meta-data-to-cni-plugins";>the
 Mesos CNI docs</a>
     for more details.
   </td>
+  <td>2.3.0</td>
 </tr>
 <tr>
   <td><code>spark.mesos.fetcherCache.enable</code></td>
@@ -752,6 +779,7 @@ See the [configuration page](configuration.html) for 
information on Spark config
     href="http://mesos.apache.org/documentation/latest/fetcher/";>Mesos
     Fetcher Cache</a>
   </td>
+  <td>2.1.0</td>
 </tr>
 <tr>
   <td><code>spark.mesos.driver.failoverTimeout</code></td>
@@ -763,6 +791,7 @@ See the [configuration page](configuration.html) for 
information on Spark config
     executors. The default value is zero, meaning no timeout: if the 
     driver disconnects, the master immediately tears down the framework.
   </td>
+  <td>2.3.0</td>
 </tr>
 <tr>
   <td><code>spark.mesos.rejectOfferDuration</code></td>
@@ -772,6 +801,7 @@ See the [configuration page](configuration.html) for 
information on Spark config
     `spark.mesos.rejectOfferDurationForUnmetConstraints`,
     `spark.mesos.rejectOfferDurationForReachedMaxCores`
   </td>
+  <td>2.2.0</td>
 </tr>
 <tr>
   <td><code>spark.mesos.rejectOfferDurationForUnmetConstraints</code></td>
@@ -779,6 +809,7 @@ See the [configuration page](configuration.html) for 
information on Spark config
   <td>
     Time to consider unused resources refused with unmet constraints
   </td>
+  <td>1.6.0</td>
 </tr>
 <tr>
   <td><code>spark.mesos.rejectOfferDurationForReachedMaxCores</code></td>
@@ -787,6 +818,7 @@ See the [configuration page](configuration.html) for 
information on Spark config
     Time to consider unused resources refused when maximum number of cores
     <code>spark.cores.max</code> is reached
   </td>
+  <td>2.0.0</td>
 </tr>
 <tr>
   <td><code>spark.mesos.appJar.local.resolution.mode</code></td>
@@ -799,6 +831,7 @@ See the [configuration page](configuration.html) for 
information on Spark config
     If the value is `container` then spark submit in the container will use 
the jar in the container's path:
     `/path/to/jar`.
   </td>
+  <td>2.4.0</td>
 </tr>
 </table>
 
diff --git 
a/resource-managers/mesos/src/main/scala/org/apache/spark/deploy/mesos/config.scala
 
b/resource-managers/mesos/src/main/scala/org/apache/spark/deploy/mesos/config.scala
index 79a1137..e1c0d18 100644
--- 
a/resource-managers/mesos/src/main/scala/org/apache/spark/deploy/mesos/config.scala
+++ 
b/resource-managers/mesos/src/main/scala/org/apache/spark/deploy/mesos/config.scala
@@ -28,6 +28,7 @@ package object config {
       ConfigBuilder(s"spark.mesos.$taskType.secret.names")
         .doc("A comma-separated list of secret reference names. Consult the 
Mesos Secret " +
           "protobuf for more information.")
+        .version("2.3.0")
         .stringConf
         .toSequence
         .createOptional
@@ -35,6 +36,7 @@ package object config {
     private[spark] val SECRET_VALUES =
       ConfigBuilder(s"spark.mesos.$taskType.secret.values")
         .doc("A comma-separated list of secret values.")
+        .version("2.3.0")
         .stringConf
         .toSequence
         .createOptional
@@ -43,6 +45,7 @@ package object config {
       ConfigBuilder(s"spark.mesos.$taskType.secret.envkeys")
         .doc("A comma-separated list of the environment variables to contain 
the secrets." +
           "The environment variable will be set on the driver.")
+        .version("2.3.0")
         .stringConf
         .toSequence
         .createOptional
@@ -51,6 +54,7 @@ package object config {
       ConfigBuilder(s"spark.mesos.$taskType.secret.filenames")
         .doc("A comma-separated list of file paths secret will be written to.  
Consult the Mesos " +
           "Secret protobuf for more information.")
+        .version("2.3.0")
         .stringConf
         .toSequence
         .createOptional
@@ -59,6 +63,7 @@ package object config {
   private[spark] val CREDENTIAL_PRINCIPAL =
     ConfigBuilder("spark.mesos.principal")
       .doc("Name of the Kerberos principal to authenticate Spark to Mesos.")
+      .version("1.5.0")
       .stringConf
       .createOptional
 
@@ -66,18 +71,21 @@ package object config {
     ConfigBuilder("spark.mesos.principal.file")
       .doc("The path of file which contains the name of the Kerberos principal 
" +
         "to authenticate Spark to Mesos.")
+      .version("2.4.0")
       .stringConf
       .createOptional
 
   private[spark] val CREDENTIAL_SECRET =
     ConfigBuilder("spark.mesos.secret")
       .doc("The secret value to authenticate Spark to Mesos.")
+      .version("1.5.0")
       .stringConf
       .createOptional
 
   private[spark] val CREDENTIAL_SECRET_FILE =
     ConfigBuilder("spark.mesos.secret.file")
       .doc("The path of file which contains the secret value to authenticate 
Spark to Mesos.")
+      .version("2.4.0")
       .stringConf
       .createOptional
 
@@ -85,6 +93,7 @@ package object config {
 
   private[spark] val SHUFFLE_CLEANER_INTERVAL_S =
     ConfigBuilder("spark.shuffle.cleaner.interval")
+      .version("2.0.0")
       .timeConf(TimeUnit.SECONDS)
       .createWithDefaultString("30s")
 
@@ -92,6 +101,7 @@ package object config {
     ConfigBuilder("spark.mesos.dispatcher.webui.url")
       .doc("Set the Spark Mesos dispatcher webui_url for interacting with the 
" +
         "framework. If unset it will point to Spark's internal web UI.")
+      .version("2.0.0")
       .stringConf
       .createOptional
 
@@ -99,6 +109,7 @@ package object config {
     ConfigBuilder("spark.mesos.dispatcher.historyServer.url")
       .doc("Set the URL of the history server. The dispatcher will then " +
         "link each driver to its entry in the history server.")
+      .version("2.1.0")
       .stringConf
       .createOptional
 
@@ -107,6 +118,7 @@ package object config {
       .doc("Mesos labels to add to the driver.  Labels are free-form key-value 
pairs. Key-value " +
         "pairs should be separated by a colon, and commas used to list more 
than one." +
         "Ex. key:value,key2:value2")
+      .version("2.3.0")
       .stringConf
       .createOptional
 
@@ -114,6 +126,7 @@ package object config {
     ConfigBuilder("spark.mesos.driver.webui.url")
       .doc("Set the Spark Mesos driver webui_url for interacting with the 
framework. " +
         "If unset it will point to Spark's internal web UI.")
+      .version("2.0.0")
       .stringConf
       .createOptional
 
@@ -125,6 +138,7 @@ package object config {
     ConfigBuilder("spark.mesos.driver.failoverTimeout")
       .doc("Amount of time in seconds that the master will wait to hear from 
the driver, " +
           "during a temporary disconnection, before tearing down all the 
executors.")
+      .version("2.3.0")
       .doubleConf
       .createWithDefault(0.0)
 
@@ -132,6 +146,7 @@ package object config {
     ConfigBuilder("spark.mesos.network.name")
       .doc("Attach containers to the given named network. If this job is 
launched " +
         "in cluster mode, also launch the driver in the given named network.")
+      .version("2.1.0")
       .stringConf
       .createOptional
 
@@ -140,6 +155,7 @@ package object config {
       .doc("Network labels to pass to CNI plugins.  This is a comma-separated 
list " +
         "of key-value pairs, where each key-value pair has the format 
key:value. " +
         "Example: key1:val1,key2:val2")
+      .version("2.3.0")
       .stringConf
       .createOptional
 
@@ -147,19 +163,21 @@ package object config {
     ConfigBuilder("spark.mesos.driver.constraints")
       .doc("Attribute based constraints on mesos resource offers. Applied by 
the dispatcher " +
         "when launching drivers. Default is to accept all offers with 
sufficient resources.")
+      .version("2.2.1")
       .stringConf
       .createWithDefault("")
 
   private[spark] val DRIVER_FRAMEWORK_ID =
     ConfigBuilder("spark.mesos.driver.frameworkId")
+      .version("2.1.0")
       .stringConf
       .createOptional
 
   private[spark] val EXECUTOR_URI =
-    ConfigBuilder("spark.executor.uri").stringConf.createOptional
+    
ConfigBuilder("spark.executor.uri").version("0.8.0").stringConf.createOptional
 
   private[spark] val PROXY_BASE_URL =
-    ConfigBuilder("spark.mesos.proxy.baseURL").stringConf.createOptional
+    
ConfigBuilder("spark.mesos.proxy.baseURL").version("2.3.0").stringConf.createOptional
 
   private[spark] val COARSE_MODE =
     ConfigBuilder("spark.mesos.coarse")
@@ -167,22 +185,26 @@ package object config {
         "Spark acquires one long-lived Mesos task on each machine. If set to 
false, runs over " +
         "Mesos cluster in \"fine-grained\" sharing mode, where one Mesos task 
is created per " +
         "Spark task.")
-      .booleanConf.createWithDefault(true)
+      .version("0.6.0")
+      .booleanConf
+      .createWithDefault(true)
 
   private[spark] val COARSE_SHUTDOWN_TIMEOUT =
     ConfigBuilder("spark.mesos.coarse.shutdownTimeout")
+      .version("2.0.0")
       .timeConf(TimeUnit.MILLISECONDS)
       .checkValue(_ >= 0, s"spark.mesos.coarse.shutdownTimeout must be >= 0")
       .createWithDefaultString("10s")
 
   private[spark] val MAX_DRIVERS =
-    ConfigBuilder("spark.mesos.maxDrivers").intConf.createWithDefault(200)
+    
ConfigBuilder("spark.mesos.maxDrivers").version("1.4.0").intConf.createWithDefault(200)
 
   private[spark] val RETAINED_DRIVERS =
-    ConfigBuilder("spark.mesos.retainedDrivers").intConf.createWithDefault(200)
+    
ConfigBuilder("spark.mesos.retainedDrivers").version("1.4.0").intConf.createWithDefault(200)
 
   private[spark] val CLUSTER_RETRY_WAIT_MAX_SECONDS =
     ConfigBuilder("spark.mesos.cluster.retry.wait.max")
+      .version("1.4.0")
       .intConf
       .createWithDefault(60) // 1 minute
 
@@ -190,6 +212,7 @@ package object config {
     ConfigBuilder("spark.mesos.fetcherCache.enable")
       .doc("If set to true, all URIs (example: `spark.executor.uri`, 
`spark.mesos.uris`) will be " +
         "cached by the Mesos Fetcher Cache.")
+      .version("2.1.0")
       .booleanConf
       .createWithDefault(false)
 
@@ -201,6 +224,7 @@ package object config {
         "resource from the host's file system. If the value is unknown it 
prints a warning msg " +
         "in the dispatcher logs and defaults to `host`. If the value is 
`container` then spark " +
         "submit in the container will use the jar in the container's path: 
`/path/to/jar`.")
+      .version("2.4.0")
       .stringConf
       .checkValues(Set("host", "container"))
       .createWithDefault("host")
@@ -210,12 +234,14 @@ package object config {
       .doc("Time to consider unused resources refused, serves as a fallback of 
" +
         "`spark.mesos.rejectOfferDurationForUnmetConstraints`, " +
         "`spark.mesos.rejectOfferDurationForReachedMaxCores`.")
+      .version("2.2.0")
       .timeConf(TimeUnit.SECONDS)
       .createWithDefaultString("120s")
 
   private[spark] val REJECT_OFFER_DURATION_FOR_UNMET_CONSTRAINTS =
     ConfigBuilder("spark.mesos.rejectOfferDurationForUnmetConstraints")
       .doc("Time to consider unused resources refused with unmet constraints.")
+      .version("1.6.0")
       .timeConf(TimeUnit.SECONDS)
       .createOptional
 
@@ -223,6 +249,7 @@ package object config {
     ConfigBuilder("spark.mesos.rejectOfferDurationForReachedMaxCores")
       .doc("Time to consider unused resources refused when maximum number of 
cores " +
         "`spark.cores.max` is reached.")
+      .version("2.0.0")
       .timeConf(TimeUnit.SECONDS)
       .createOptional
 
@@ -231,6 +258,7 @@ package object config {
       .doc("A comma-separated list of URIs to be downloaded to the sandbox 
when driver or " +
         "executor is launched by Mesos. This applies to both coarse-grained 
and fine-grained " +
         "mode.")
+      .version("1.5.0")
       .stringConf
       .toSequence
       .createWithDefault(Nil)
@@ -241,6 +269,7 @@ package object config {
         "By default, the executors will simply use the driver's Spark home 
directory, which may " +
         "not be visible to them. Note that this is only relevant if a Spark 
binary package is " +
         "not specified through `spark.executor.uri`.")
+      .version("1.1.1")
       .stringConf
       .createOptional
 
@@ -250,6 +279,7 @@ package object config {
         "include the cores used to run the Spark tasks. In other words, even 
if no Spark task " +
         "is being run, each Mesos executor will occupy the number of cores 
configured here. " +
         "The value can be a floating point number.")
+      .version("1.4.0")
       .doubleConf
       .createWithDefault(1.0)
 
@@ -259,6 +289,7 @@ package object config {
         "more cores allocated. It instead means that an executor will 
\"pretend\" it has more " +
         "cores, so that the driver will send it more tasks. Use this to 
increase parallelism. " +
         "This setting is only used for Mesos coarse-grained mode.")
+      .version("0.6.0")
       .intConf
       .createWithDefault(0)
 
@@ -267,6 +298,7 @@ package object config {
       .doc("The amount of additional memory, specified in MiB, to be allocated 
per executor. " +
         "By default, the overhead will be larger of either 384 or 10% of " +
         "`spark.executor.memory`. If set, the final overhead will be this 
value.")
+      .version("1.1.1")
       .intConf
       .createOptional
 
@@ -277,6 +309,7 @@ package object config {
         "The installed path of Spark in the image can be specified with " +
         "`spark.mesos.executor.home`; the installed path of the Mesos library 
can be specified " +
         "with `spark.executorEnv.MESOS_NATIVE_JAVA_LIBRARY`.")
+      .version("1.4.0")
       .stringConf
       .createOptional
 
@@ -285,11 +318,13 @@ package object config {
       .doc("Force Mesos agents to pull the image specified in " +
         "`spark.mesos.executor.docker.image`. By default Mesos agents will not 
pull images they " +
         "already have cached.")
+      .version("2.1.0")
       .booleanConf
       .createOptional
 
   private[spark] val EXECUTOR_DOCKER_PORT_MAPS =
     ConfigBuilder("spark.mesos.executor.docker.portmaps")
+      .version("1.4.0")
       .stringConf
       .toSequence
       .createOptional
@@ -299,6 +334,7 @@ package object config {
       .doc("Set the list of custom parameters which will be passed into the 
`docker run` " +
         "command when launching the Spark executor on Mesos using the docker 
containerizer. " +
         "The format of this property is a list of key/value pairs which pair 
looks key1=value1.")
+      .version("2.2.0")
       .stringConf
       .toSequence
       .createOptional
@@ -309,6 +345,7 @@ package object config {
         "using `spark.mesos.executor.docker.image`. The format of this 
property is a list of " +
         "mappings following the form passed to `docker run -v`. That is they 
take the form:  " +
         "`[host_path:]container_path[:ro|:rw]`")
+      .version("1.4.0")
       .stringConf
       .toSequence
       .createOptional
@@ -318,6 +355,7 @@ package object config {
       .doc("Set the maximum number GPU resources to acquire for this job. Note 
that executors " +
         "will still launch when no GPU resources are found since this 
configuration is just an " +
         "upper limit and not a guaranteed amount.")
+      .version("2.1.0")
       .intConf
       .createWithDefault(0)
 
@@ -327,6 +365,7 @@ package object config {
         "Key-value pairs should be separated by a colon, and commas used to 
list more than one. " +
         "If your label includes a colon or comma, you can escape it with a 
backslash. " +
         "Ex. key:value,key2:a\\:b.")
+      .version("2.2.0")
       .stringConf
       .createWithDefault("")
 
@@ -335,6 +374,7 @@ package object config {
       .doc("Attribute-based constraints on mesos resource offers. By default, 
all resource " +
         "offers will be accepted. This setting applies only to executors. 
Refer to Mesos " +
         "Attributes & Resources doc for more information on attributes.")
+      .version("1.5.0")
       .stringConf
       .createWithDefault("")
 
@@ -344,6 +384,7 @@ package object config {
         "Mesos supports two types of containerizers for docker: the \"docker\" 
containerizer, " +
         "and the preferred \"mesos\" containerizer. " +
         "Read more here: 
http://mesos.apache.org/documentation/latest/container-image/";)
+      .version("2.1.0")
       .stringConf
       .checkValues(Set("docker", "mesos"))
       .createWithDefault("docker")
@@ -352,6 +393,7 @@ package object config {
     ConfigBuilder("spark.mesos.role")
       .doc("Set the role of this Spark framework for Mesos. Roles are used in 
Mesos for " +
         "reservations and resource weight sharing.")
+      .version("1.5.0")
       .stringConf
       .createOptional
 


---------------------------------------------------------------------
To unsubscribe, e-mail: commits-unsubscr...@spark.apache.org
For additional commands, e-mail: commits-h...@spark.apache.org

Reply via email to