Github user ueshin commented on a diff in the pull request:

    https://github.com/apache/spark/pull/19954#discussion_r157248504
  
    --- Diff: 
resource-managers/kubernetes/core/src/main/scala/org/apache/spark/scheduler/cluster/k8s/KubernetesClusterManager.scala
 ---
    @@ -45,6 +45,60 @@ private[spark] class KubernetesClusterManager extends 
ExternalClusterManager wit
           masterURL: String,
           scheduler: TaskScheduler): SchedulerBackend = {
         val sparkConf = sc.getConf
    +    val maybeInitContainerConfigMap = 
sparkConf.get(INIT_CONTAINER_CONFIG_MAP_NAME)
    +    val maybeInitContainerConfigMapKey = 
sparkConf.get(INIT_CONTAINER_CONFIG_MAP_KEY_CONF)
    +
    +    if (maybeInitContainerConfigMap.isEmpty) {
    +      logWarning("The executor's init-container config map was not 
specified. Executors will " +
    +        "therefore not attempt to fetch remote or submitted dependencies.")
    +    }
    +
    +    if (maybeInitContainerConfigMapKey.isEmpty) {
    +      logWarning("The executor's init-container config map key was not 
specified. Executors will " +
    +        "therefore not attempt to fetch remote or submitted dependencies.")
    +    }
    +
    +    // Only set up the bootstrap if they've provided both the config map 
key and the config map
    +    // name. The config map might not be provided if init-containers 
aren't being used to
    +    // bootstrap dependencies.
    +    val maybeInitContainerBootstrap = for {
    +      configMap <- maybeInitContainerConfigMap
    +      configMapKey <- maybeInitContainerConfigMapKey
    +    } yield {
    +      val initContainerImage = sparkConf
    +        .get(INIT_CONTAINER_DOCKER_IMAGE)
    +        .getOrElse(throw new SparkException(
    +          "Must specify the init-container Docker image when there are 
remote dependencies"))
    +      new InitContainerBootstrapImpl(
    +        initContainerImage,
    +        sparkConf.get(DOCKER_IMAGE_PULL_POLICY),
    +        sparkConf.get(JARS_DOWNLOAD_LOCATION),
    +        sparkConf.get(FILES_DOWNLOAD_LOCATION),
    +        sparkConf.get(INIT_CONTAINER_MOUNT_TIMEOUT),
    +        configMap,
    +        configMapKey,
    +        SPARK_POD_EXECUTOR_ROLE,
    +        sparkConf)
    +    }
    +
    +    val executorSecretNamesToMountPaths = 
ConfigurationUtils.parsePrefixedKeyValuePairs(
    +      sparkConf, KUBERNETES_EXECUTOR_SECRETS_PREFIX)
    +    val mayBeMountSecretBootstrap = if 
(executorSecretNamesToMountPaths.nonEmpty) {
    +      Some(new MountSecretsBootstrapImpl(executorSecretNamesToMountPaths))
    +    } else {
    +      None
    +    }
    +    // Mount user-specified executor secrets also into the executor's 
init-container. The
    +    // init-container may need credentials in the secrets to be able to 
download remote
    +    // dependencies. The executor's main container and its init-container 
share the secrets
    +    // because the init-container is sort of an implementation details and 
this sharing
    +    // avoids introducing a dedicated configuration property just for the 
init-container.
    +    val mayBeInitContainerMountSecretsBootstrap = if 
(maybeInitContainerBootstrap.nonEmpty &&
    --- End diff --
    
    Oh, I see. Thanks!


---

---------------------------------------------------------------------
To unsubscribe, e-mail: reviews-unsubscr...@spark.apache.org
For additional commands, e-mail: reviews-h...@spark.apache.org

Reply via email to