Github user rvesse commented on a diff in the pull request:

    https://github.com/apache/spark/pull/20167#discussion_r164123285
  
    --- Diff: 
resource-managers/mesos/src/main/scala/org/apache/spark/scheduler/cluster/mesos/MesosSchedulerUtils.scala
 ---
    @@ -71,40 +74,64 @@ trait MesosSchedulerUtils extends Logging {
           failoverTimeout: Option[Double] = None,
           frameworkId: Option[String] = None): SchedulerDriver = {
         val fwInfoBuilder = 
FrameworkInfo.newBuilder().setUser(sparkUser).setName(appName)
    -    val credBuilder = Credential.newBuilder()
    +    
fwInfoBuilder.setHostname(Option(conf.getenv("SPARK_PUBLIC_DNS")).getOrElse(
    +      conf.get(DRIVER_HOST_ADDRESS)))
         webuiUrl.foreach { url => fwInfoBuilder.setWebuiUrl(url) }
         checkpoint.foreach { checkpoint => 
fwInfoBuilder.setCheckpoint(checkpoint) }
         failoverTimeout.foreach { timeout => 
fwInfoBuilder.setFailoverTimeout(timeout) }
         frameworkId.foreach { id =>
           fwInfoBuilder.setId(FrameworkID.newBuilder().setValue(id).build())
         }
    -    
fwInfoBuilder.setHostname(Option(conf.getenv("SPARK_PUBLIC_DNS")).getOrElse(
    -      conf.get(DRIVER_HOST_ADDRESS)))
    -    conf.getOption("spark.mesos.principal").foreach { principal =>
    -      fwInfoBuilder.setPrincipal(principal)
    -      credBuilder.setPrincipal(principal)
    -    }
    -    conf.getOption("spark.mesos.secret").foreach { secret =>
    -      credBuilder.setSecret(secret)
    -    }
    -    if (credBuilder.hasSecret && !fwInfoBuilder.hasPrincipal) {
    -      throw new SparkException(
    -        "spark.mesos.principal must be configured when spark.mesos.secret 
is set")
    -    }
    +
         conf.getOption("spark.mesos.role").foreach { role =>
           fwInfoBuilder.setRole(role)
         }
         val maxGpus = conf.getInt("spark.mesos.gpus.max", 0)
         if (maxGpus > 0) {
           
fwInfoBuilder.addCapabilities(Capability.newBuilder().setType(Capability.Type.GPU_RESOURCES))
         }
    +    val credBuilder = buildCredentials(conf, fwInfoBuilder)
         if (credBuilder.hasPrincipal) {
           new MesosSchedulerDriver(
             scheduler, fwInfoBuilder.build(), masterUrl, credBuilder.build())
         } else {
           new MesosSchedulerDriver(scheduler, fwInfoBuilder.build(), masterUrl)
         }
       }
    +  
    +  def buildCredentials(
    +      conf: SparkConf, 
    +      fwInfoBuilder: Protos.FrameworkInfo.Builder): 
Protos.Credential.Builder = {
    +    val credBuilder = Credential.newBuilder()
    +    conf.getOption("spark.mesos.principal")
    +      .orElse(Option(conf.getenv("SPARK_MESOS_PRINCIPAL")))
    --- End diff --
    
    I am not sure I understand enough of spark internals to answer your 
question.
    
    These variables are only necessary for Mesos and only on the driver which 
registers the framework with Mesos. Is it actually possible to submit jobs via 
REST into a Mesos cluster? Even if it is the Spark framework must exist at that 
point thereby rendering credentials unnecessary in that scenario.
    
    Or am I missing something here?
    



---

---------------------------------------------------------------------
To unsubscribe, e-mail: reviews-unsubscr...@spark.apache.org
For additional commands, e-mail: reviews-h...@spark.apache.org

Reply via email to