[ 
https://issues.apache.org/jira/browse/SPARK-25220?page=com.atlassian.jira.plugin.system.issuetabpanels:comment-tabpanel&focusedCommentId=16727009#comment-16727009
 ] 

ASF GitHub Bot commented on SPARK-25220:
----------------------------------------

vanzin closed pull request #22212: [SPARK-25220] Seperate kubernetes node 
selector config between driver and executors.
URL: https://github.com/apache/spark/pull/22212
 
 
   

This is a PR merged from a forked repository.
As GitHub hides the original diff on merge, it is displayed below for
the sake of provenance:

As this is a foreign pull request (from a fork), the diff is supplied
below (as it won't show otherwise due to GitHub magic):

diff --git a/docs/running-on-kubernetes.md b/docs/running-on-kubernetes.md
index 8f84ca044e163..fd9606ebbd1cf 100644
--- a/docs/running-on-kubernetes.md
+++ b/docs/running-on-kubernetes.md
@@ -663,11 +663,21 @@ specific to Spark on Kubernetes.
   </td>
 </tr>
 <tr>
-  <td><code>spark.kubernetes.node.selector.[labelKey]</code></td>
+  <td><code>spark.kubernetes.driver.selector.[labelKey]</code></td>
   <td>(none)</td>
   <td>
-    Adds to the node selector of the driver pod and executor pods, with key 
<code>labelKey</code> and the value as the
-    configuration's value. For example, setting 
<code>spark.kubernetes.node.selector.identifier</code> to 
<code>myIdentifier</code>
+    Adds to the node selector of the driver pod, with key 
<code>labelKey</code> and the value as the
+    configuration's value. For example, setting 
<code>spark.kubernetes.driver.selector.identifier</code> to 
<code>myIdentifier</code>
+    will result in the driver pod and executors having a node selector with 
key <code>identifier</code> and value
+     <code>myIdentifier</code>. Multiple node selector keys can be added by 
setting multiple configurations with this prefix.
+  </td>
+</tr>
+<tr>
+  <td><code>spark.kubernetes.executor.selector.[labelKey]</code></td>
+  <td>(none)</td>
+  <td>
+    Adds to the node selector of the executor pods, with key 
<code>labelKey</code> and the value as the
+    configuration's value. For example, setting 
<code>spark.kubernetes.executor.selector.identifier</code> to 
<code>myIdentifier</code>
     will result in the driver pod and executors having a node selector with 
key <code>identifier</code> and value
      <code>myIdentifier</code>. Multiple node selector keys can be added by 
setting multiple configurations with this prefix.
   </td>
diff --git 
a/resource-managers/kubernetes/core/src/main/scala/org/apache/spark/deploy/k8s/Config.scala
 
b/resource-managers/kubernetes/core/src/main/scala/org/apache/spark/deploy/k8s/Config.scala
index 1b582fe53624a..5677cd6392c1f 100644
--- 
a/resource-managers/kubernetes/core/src/main/scala/org/apache/spark/deploy/k8s/Config.scala
+++ 
b/resource-managers/kubernetes/core/src/main/scala/org/apache/spark/deploy/k8s/Config.scala
@@ -228,14 +228,14 @@ private[spark] object Config extends Logging {
   val KUBERNETES_AUTH_SUBMISSION_CONF_PREFIX =
     "spark.kubernetes.authenticate.submission"
 
-  val KUBERNETES_NODE_SELECTOR_PREFIX = "spark.kubernetes.node.selector."
-
+  val KUBERNETES_DRIVER_NODE_SELECTOR_PREFIX = 
"spark.kubernetes.driver.selector."
   val KUBERNETES_DRIVER_LABEL_PREFIX = "spark.kubernetes.driver.label."
   val KUBERNETES_DRIVER_ANNOTATION_PREFIX = 
"spark.kubernetes.driver.annotation."
   val KUBERNETES_DRIVER_SECRETS_PREFIX = "spark.kubernetes.driver.secrets."
   val KUBERNETES_DRIVER_SECRET_KEY_REF_PREFIX = 
"spark.kubernetes.driver.secretKeyRef."
   val KUBERNETES_DRIVER_VOLUMES_PREFIX = "spark.kubernetes.driver.volumes."
 
+  val KUBERNETES_EXECUTOR_NODE_SELECTOR_PREFIX = 
"spark.kubernetes.executor.selector."
   val KUBERNETES_EXECUTOR_LABEL_PREFIX = "spark.kubernetes.executor.label."
   val KUBERNETES_EXECUTOR_ANNOTATION_PREFIX = 
"spark.kubernetes.executor.annotation."
   val KUBERNETES_EXECUTOR_SECRETS_PREFIX = "spark.kubernetes.executor.secrets."
diff --git 
a/resource-managers/kubernetes/core/src/main/scala/org/apache/spark/deploy/k8s/KubernetesConf.scala
 
b/resource-managers/kubernetes/core/src/main/scala/org/apache/spark/deploy/k8s/KubernetesConf.scala
index 3aa35d419073f..3fe21498406a7 100644
--- 
a/resource-managers/kubernetes/core/src/main/scala/org/apache/spark/deploy/k8s/KubernetesConf.scala
+++ 
b/resource-managers/kubernetes/core/src/main/scala/org/apache/spark/deploy/k8s/KubernetesConf.scala
@@ -94,8 +94,11 @@ private[spark] case class KubernetesConf[T <: 
KubernetesRoleSpecificConf](
       }
   }
 
-  def nodeSelector(): Map[String, String] =
-    KubernetesUtils.parsePrefixedKeyValuePairs(sparkConf, 
KUBERNETES_NODE_SELECTOR_PREFIX)
+  def driverSelector(): Map[String, String] = 
+    KubernetesUtils.parsePrefixedKeyValuePairs(sparkConf, 
KUBERNETES_DRIVER_NODE_SELECTOR_PREFIX)
+
+  def executorSelector(): Map[String, String] =
+    KubernetesUtils.parsePrefixedKeyValuePairs(sparkConf, 
KUBERNETES_EXECUTOR_NODE_SELECTOR_PREFIX)
 
   def get[T](config: ConfigEntry[T]): T = sparkConf.get(config)
 
diff --git 
a/resource-managers/kubernetes/core/src/main/scala/org/apache/spark/deploy/k8s/features/BasicDriverFeatureStep.scala
 
b/resource-managers/kubernetes/core/src/main/scala/org/apache/spark/deploy/k8s/features/BasicDriverFeatureStep.scala
index 575bc54ffe2bb..6a122f2c6e65b 100644
--- 
a/resource-managers/kubernetes/core/src/main/scala/org/apache/spark/deploy/k8s/features/BasicDriverFeatureStep.scala
+++ 
b/resource-managers/kubernetes/core/src/main/scala/org/apache/spark/deploy/k8s/features/BasicDriverFeatureStep.scala
@@ -121,7 +121,7 @@ private[spark] class BasicDriverFeatureStep(
         .endMetadata()
       .withNewSpec()
         .withRestartPolicy("Never")
-        .withNodeSelector(conf.nodeSelector().asJava)
+        .withNodeSelector(conf.driverSelector().asJava)
         .addToImagePullSecrets(conf.imagePullSecrets(): _*)
         .endSpec()
       .build()
diff --git 
a/resource-managers/kubernetes/core/src/main/scala/org/apache/spark/deploy/k8s/features/BasicExecutorFeatureStep.scala
 
b/resource-managers/kubernetes/core/src/main/scala/org/apache/spark/deploy/k8s/features/BasicExecutorFeatureStep.scala
index c37f713c56de1..804e9e3a2f2ea 100644
--- 
a/resource-managers/kubernetes/core/src/main/scala/org/apache/spark/deploy/k8s/features/BasicExecutorFeatureStep.scala
+++ 
b/resource-managers/kubernetes/core/src/main/scala/org/apache/spark/deploy/k8s/features/BasicExecutorFeatureStep.scala
@@ -170,7 +170,7 @@ private[spark] class BasicExecutorFeatureStep(
       .editOrNewSpec()
         .withHostname(hostname)
         .withRestartPolicy("Never")
-        .withNodeSelector(kubernetesConf.nodeSelector().asJava)
+        .withNodeSelector(kubernetesConf.executorSelector().asJava)
         .addToImagePullSecrets(kubernetesConf.imagePullSecrets(): _*)
         .endSpec()
       .build()


 

----------------------------------------------------------------
This is an automated message from the Apache Git Service.
To respond to the message, please log on GitHub and use the
URL above to go to the specific comment.
 
For queries about this service, please contact Infrastructure at:
us...@infra.apache.org


> [K8S] Split out node selector config between driver and executors.
> ------------------------------------------------------------------
>
>                 Key: SPARK-25220
>                 URL: https://issues.apache.org/jira/browse/SPARK-25220
>             Project: Spark
>          Issue Type: Improvement
>          Components: Kubernetes
>    Affects Versions: 2.4.0
>            Reporter: Jonathan A Weaver
>            Priority: Trivial
>              Labels: Kubernetes
>
> Seperated node selector config option between executors and driver.
> This removes the spark.kubernetes.node.selector config option and seperates 
> it to
> spark.kubernetes.driver.selector and spark.kubernetes.executor.selector
> to allow seperate node selectors on drivers and executors.
> My personal use case for this change is that on AWS we have cheap 
> spotinstances that can terminate at any moment which is okay for executors 
> but not the driver.
> With a single node selector option I am unable to use the spot instances to 
> save costs on the executor nodes.



--
This message was sent by Atlassian JIRA
(v7.6.3#76005)

---------------------------------------------------------------------
To unsubscribe, e-mail: issues-unsubscr...@spark.apache.org
For additional commands, e-mail: issues-h...@spark.apache.org

Reply via email to