[spark] branch branch-3.0 updated: [SPARK-30969][CORE] Remove resource coordination support from Standalone

2020-03-02 Thread jiangxb1987
This is an automated email from the ASF dual-hosted git repository.

jiangxb1987 pushed a commit to branch branch-3.0
in repository https://gitbox.apache.org/repos/asf/spark.git


The following commit(s) were added to refs/heads/branch-3.0 by this push:
 new 148262f  [SPARK-30969][CORE] Remove resource coordination support from 
Standalone
148262f is described below

commit 148262f3fbdf7c5da7cd147cf43bf5ebab5f5244
Author: yi.wu 
AuthorDate: Mon Mar 2 11:23:07 2020 -0800

[SPARK-30969][CORE] Remove resource coordination support from Standalone

### What changes were proposed in this pull request?

Remove automatically resource coordination support from Standalone.

### Why are the changes needed?

Resource coordination is mainly designed for the scenario where multiple 
workers launched on the same host. However, it's, actually, a non-existed  
scenario for today's Spark. Because, Spark now can start multiple executors in 
a single Worker, while it only allow one executor per Worker at very beginning. 
So, now, it really help nothing for user to launch multiple workers on the same 
host. Thus, it's not worth for us to bring over complicated implementation and 
potential high maintain [...]

### Does this PR introduce any user-facing change?

No, it's Spark 3.0 feature.

### How was this patch tested?

Pass Jenkins.

Closes #27722 from Ngone51/abandon_coordination.

Authored-by: yi.wu 
Signed-off-by: Xingbo Jiang 
(cherry picked from commit b517f991fe0c95a186872d38be6a2091d9326195)
Signed-off-by: Xingbo Jiang 
---
 .gitignore |   1 -
 .../main/scala/org/apache/spark/SparkContext.scala |  25 +-
 .../spark/deploy/StandaloneResourceUtils.scala | 263 +
 .../org/apache/spark/deploy/worker/Worker.scala|  27 +--
 .../org/apache/spark/internal/config/package.scala |  17 --
 .../main/scala/org/apache/spark/util/Utils.scala   |  22 --
 .../scala/org/apache/spark/SparkContextSuite.scala |   6 +-
 .../apache/spark/deploy/worker/WorkerSuite.scala   |  74 +-
 .../resource/ResourceDiscoveryPluginSuite.scala|   4 -
 docs/configuration.md  |  27 +--
 docs/spark-standalone.md   |  10 +-
 11 files changed, 17 insertions(+), 459 deletions(-)

diff --git a/.gitignore b/.gitignore
index 798e8ac..198fdee 100644
--- a/.gitignore
+++ b/.gitignore
@@ -72,7 +72,6 @@ scalastyle-on-compile.generated.xml
 scalastyle-output.xml
 scalastyle.txt
 spark-*-bin-*.tgz
-spark-resources/
 spark-tests.log
 src_managed/
 streaming-tests.log
diff --git a/core/src/main/scala/org/apache/spark/SparkContext.scala 
b/core/src/main/scala/org/apache/spark/SparkContext.scala
index 91188d5..bcbb7e4 100644
--- a/core/src/main/scala/org/apache/spark/SparkContext.scala
+++ b/core/src/main/scala/org/apache/spark/SparkContext.scala
@@ -41,7 +41,6 @@ import org.apache.hadoop.mapreduce.lib.input.{FileInputFormat 
=> NewFileInputFor
 import org.apache.spark.annotation.DeveloperApi
 import org.apache.spark.broadcast.Broadcast
 import org.apache.spark.deploy.{LocalSparkCluster, SparkHadoopUtil}
-import org.apache.spark.deploy.StandaloneResourceUtils._
 import org.apache.spark.executor.{ExecutorMetrics, ExecutorMetricsSource}
 import org.apache.spark.input.{FixedLengthBinaryInputFormat, 
PortableDataStream, StreamInputFormat, WholeTextFileInputFormat}
 import org.apache.spark.internal.Logging
@@ -250,15 +249,6 @@ class SparkContext(config: SparkConf) extends Logging {
 
   def isLocal: Boolean = Utils.isLocalMaster(_conf)
 
-  private def isClientStandalone: Boolean = {
-val isSparkCluster = master match {
-  case SparkMasterRegex.SPARK_REGEX(_) => true
-  case SparkMasterRegex.LOCAL_CLUSTER_REGEX(_, _, _) => true
-  case _ => false
-}
-deployMode == "client" && isSparkCluster
-  }
-
   /**
* @return true if context is stopped or in the midst of stopping.
*/
@@ -396,17 +386,7 @@ class SparkContext(config: SparkConf) extends Logging {
 _driverLogger = DriverLogger(_conf)
 
 val resourcesFileOpt = conf.get(DRIVER_RESOURCES_FILE)
-val allResources = getOrDiscoverAllResources(_conf, SPARK_DRIVER_PREFIX, 
resourcesFileOpt)
-_resources = {
-  // driver submitted in client mode under Standalone may have conflicting 
resources with
-  // other drivers/workers on this host. We should sync driver's resources 
info into
-  // SPARK_RESOURCES/SPARK_RESOURCES_COORDINATE_DIR/ to avoid collision.
-  if (isClientStandalone) {
-acquireResources(_conf, SPARK_DRIVER_PREFIX, allResources, 
Utils.getProcessId)
-  } else {
-allResources
-  }
-}
+_resources = getOrDiscoverAllResources(_conf, SPARK_DRIVER_PREFIX, 
resourcesFileOpt)
 logResourceInfo(SPARK_DRIVER_PREFIX, _resources)
 
 // log out spark.app.name in the Spark driver logs
@@ -2019,9 +1999,6 @@ class 

[spark] branch branch-3.0 updated: [SPARK-30969][CORE] Remove resource coordination support from Standalone

2020-03-02 Thread jiangxb1987
This is an automated email from the ASF dual-hosted git repository.

jiangxb1987 pushed a commit to branch branch-3.0
in repository https://gitbox.apache.org/repos/asf/spark.git


The following commit(s) were added to refs/heads/branch-3.0 by this push:
 new 148262f  [SPARK-30969][CORE] Remove resource coordination support from 
Standalone
148262f is described below

commit 148262f3fbdf7c5da7cd147cf43bf5ebab5f5244
Author: yi.wu 
AuthorDate: Mon Mar 2 11:23:07 2020 -0800

[SPARK-30969][CORE] Remove resource coordination support from Standalone

### What changes were proposed in this pull request?

Remove automatically resource coordination support from Standalone.

### Why are the changes needed?

Resource coordination is mainly designed for the scenario where multiple 
workers launched on the same host. However, it's, actually, a non-existed  
scenario for today's Spark. Because, Spark now can start multiple executors in 
a single Worker, while it only allow one executor per Worker at very beginning. 
So, now, it really help nothing for user to launch multiple workers on the same 
host. Thus, it's not worth for us to bring over complicated implementation and 
potential high maintain [...]

### Does this PR introduce any user-facing change?

No, it's Spark 3.0 feature.

### How was this patch tested?

Pass Jenkins.

Closes #27722 from Ngone51/abandon_coordination.

Authored-by: yi.wu 
Signed-off-by: Xingbo Jiang 
(cherry picked from commit b517f991fe0c95a186872d38be6a2091d9326195)
Signed-off-by: Xingbo Jiang 
---
 .gitignore |   1 -
 .../main/scala/org/apache/spark/SparkContext.scala |  25 +-
 .../spark/deploy/StandaloneResourceUtils.scala | 263 +
 .../org/apache/spark/deploy/worker/Worker.scala|  27 +--
 .../org/apache/spark/internal/config/package.scala |  17 --
 .../main/scala/org/apache/spark/util/Utils.scala   |  22 --
 .../scala/org/apache/spark/SparkContextSuite.scala |   6 +-
 .../apache/spark/deploy/worker/WorkerSuite.scala   |  74 +-
 .../resource/ResourceDiscoveryPluginSuite.scala|   4 -
 docs/configuration.md  |  27 +--
 docs/spark-standalone.md   |  10 +-
 11 files changed, 17 insertions(+), 459 deletions(-)

diff --git a/.gitignore b/.gitignore
index 798e8ac..198fdee 100644
--- a/.gitignore
+++ b/.gitignore
@@ -72,7 +72,6 @@ scalastyle-on-compile.generated.xml
 scalastyle-output.xml
 scalastyle.txt
 spark-*-bin-*.tgz
-spark-resources/
 spark-tests.log
 src_managed/
 streaming-tests.log
diff --git a/core/src/main/scala/org/apache/spark/SparkContext.scala 
b/core/src/main/scala/org/apache/spark/SparkContext.scala
index 91188d5..bcbb7e4 100644
--- a/core/src/main/scala/org/apache/spark/SparkContext.scala
+++ b/core/src/main/scala/org/apache/spark/SparkContext.scala
@@ -41,7 +41,6 @@ import org.apache.hadoop.mapreduce.lib.input.{FileInputFormat 
=> NewFileInputFor
 import org.apache.spark.annotation.DeveloperApi
 import org.apache.spark.broadcast.Broadcast
 import org.apache.spark.deploy.{LocalSparkCluster, SparkHadoopUtil}
-import org.apache.spark.deploy.StandaloneResourceUtils._
 import org.apache.spark.executor.{ExecutorMetrics, ExecutorMetricsSource}
 import org.apache.spark.input.{FixedLengthBinaryInputFormat, 
PortableDataStream, StreamInputFormat, WholeTextFileInputFormat}
 import org.apache.spark.internal.Logging
@@ -250,15 +249,6 @@ class SparkContext(config: SparkConf) extends Logging {
 
   def isLocal: Boolean = Utils.isLocalMaster(_conf)
 
-  private def isClientStandalone: Boolean = {
-val isSparkCluster = master match {
-  case SparkMasterRegex.SPARK_REGEX(_) => true
-  case SparkMasterRegex.LOCAL_CLUSTER_REGEX(_, _, _) => true
-  case _ => false
-}
-deployMode == "client" && isSparkCluster
-  }
-
   /**
* @return true if context is stopped or in the midst of stopping.
*/
@@ -396,17 +386,7 @@ class SparkContext(config: SparkConf) extends Logging {
 _driverLogger = DriverLogger(_conf)
 
 val resourcesFileOpt = conf.get(DRIVER_RESOURCES_FILE)
-val allResources = getOrDiscoverAllResources(_conf, SPARK_DRIVER_PREFIX, 
resourcesFileOpt)
-_resources = {
-  // driver submitted in client mode under Standalone may have conflicting 
resources with
-  // other drivers/workers on this host. We should sync driver's resources 
info into
-  // SPARK_RESOURCES/SPARK_RESOURCES_COORDINATE_DIR/ to avoid collision.
-  if (isClientStandalone) {
-acquireResources(_conf, SPARK_DRIVER_PREFIX, allResources, 
Utils.getProcessId)
-  } else {
-allResources
-  }
-}
+_resources = getOrDiscoverAllResources(_conf, SPARK_DRIVER_PREFIX, 
resourcesFileOpt)
 logResourceInfo(SPARK_DRIVER_PREFIX, _resources)
 
 // log out spark.app.name in the Spark driver logs
@@ -2019,9 +1999,6 @@ class