spark git commit: [SPARK-16017][CORE] Send hostname from CoarseGrainedExecutorBackend to driver

2016-06-17 Thread zsxwing
Repository: spark
Updated Branches:
  refs/heads/branch-2.0 3457497e0 -> 0701b8d95


[SPARK-16017][CORE] Send hostname from CoarseGrainedExecutorBackend to driver

## What changes were proposed in this pull request?

[SPARK-15395](https://issues.apache.org/jira/browse/SPARK-15395) changes the 
behavior that how the driver gets the executor host and the driver will get the 
executor IP address instead of the host name. This PR just sends the hostname 
from executors to driver so that driver can pass it to TaskScheduler.

## How was this patch tested?

Existing unit tests.

Author: Shixiong Zhu 

Closes #13741 from zsxwing/SPARK-16017.

(cherry picked from commit 62d8fe2089659e8212753a622708517e0f4a77bc)
Signed-off-by: Shixiong Zhu 


Project: http://git-wip-us.apache.org/repos/asf/spark/repo
Commit: http://git-wip-us.apache.org/repos/asf/spark/commit/0701b8d9
Tree: http://git-wip-us.apache.org/repos/asf/spark/tree/0701b8d9
Diff: http://git-wip-us.apache.org/repos/asf/spark/diff/0701b8d9

Branch: refs/heads/branch-2.0
Commit: 0701b8d95caa79f220425b2a7376e88d69864663
Parents: 3457497
Author: Shixiong Zhu 
Authored: Fri Jun 17 15:48:17 2016 -0700
Committer: Shixiong Zhu 
Committed: Fri Jun 17 15:48:25 2016 -0700

--
 .../apache/spark/executor/CoarseGrainedExecutorBackend.scala  | 7 ---
 .../spark/scheduler/cluster/CoarseGrainedClusterMessage.scala | 4 ++--
 .../scheduler/cluster/CoarseGrainedSchedulerBackend.scala | 6 +++---
 .../test/scala/org/apache/spark/HeartbeatReceiverSuite.scala  | 4 ++--
 .../spark/deploy/StandaloneDynamicAllocationSuite.scala   | 2 +-
 5 files changed, 12 insertions(+), 11 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/spark/blob/0701b8d9/core/src/main/scala/org/apache/spark/executor/CoarseGrainedExecutorBackend.scala
--
diff --git 
a/core/src/main/scala/org/apache/spark/executor/CoarseGrainedExecutorBackend.scala
 
b/core/src/main/scala/org/apache/spark/executor/CoarseGrainedExecutorBackend.scala
index e087295..ccc6c36 100644
--- 
a/core/src/main/scala/org/apache/spark/executor/CoarseGrainedExecutorBackend.scala
+++ 
b/core/src/main/scala/org/apache/spark/executor/CoarseGrainedExecutorBackend.scala
@@ -39,6 +39,7 @@ private[spark] class CoarseGrainedExecutorBackend(
 override val rpcEnv: RpcEnv,
 driverUrl: String,
 executorId: String,
+hostname: String,
 cores: Int,
 userClassPath: Seq[URL],
 env: SparkEnv)
@@ -57,7 +58,7 @@ private[spark] class CoarseGrainedExecutorBackend(
 rpcEnv.asyncSetupEndpointRefByURI(driverUrl).flatMap { ref =>
   // This is a very fast action so we can use "ThreadUtils.sameThread"
   driver = Some(ref)
-  ref.ask[Boolean](RegisterExecutor(executorId, self, cores, 
extractLogUrls))
+  ref.ask[Boolean](RegisterExecutor(executorId, self, hostname, cores, 
extractLogUrls))
 }(ThreadUtils.sameThread).onComplete {
   // This is a very fast action so we can use "ThreadUtils.sameThread"
   case Success(msg) =>
@@ -75,7 +76,7 @@ private[spark] class CoarseGrainedExecutorBackend(
   }
 
   override def receive: PartialFunction[Any, Unit] = {
-case RegisteredExecutor(hostname) =>
+case RegisteredExecutor =>
   logInfo("Successfully registered with driver")
   executor = new Executor(executorId, hostname, env, userClassPath, 
isLocal = false)
 
@@ -201,7 +202,7 @@ private[spark] object CoarseGrainedExecutorBackend extends 
Logging {
 driverConf, executorId, hostname, port, cores, isLocal = false)
 
   env.rpcEnv.setupEndpoint("Executor", new CoarseGrainedExecutorBackend(
-env.rpcEnv, driverUrl, executorId, cores, userClassPath, env))
+env.rpcEnv, driverUrl, executorId, hostname, cores, userClassPath, 
env))
   workerUrl.foreach { url =>
 env.rpcEnv.setupEndpoint("WorkerWatcher", new 
WorkerWatcher(env.rpcEnv, url))
   }

http://git-wip-us.apache.org/repos/asf/spark/blob/0701b8d9/core/src/main/scala/org/apache/spark/scheduler/cluster/CoarseGrainedClusterMessage.scala
--
diff --git 
a/core/src/main/scala/org/apache/spark/scheduler/cluster/CoarseGrainedClusterMessage.scala
 
b/core/src/main/scala/org/apache/spark/scheduler/cluster/CoarseGrainedClusterMessage.scala
index 46a8291..edc8aac 100644
--- 
a/core/src/main/scala/org/apache/spark/scheduler/cluster/CoarseGrainedClusterMessage.scala
+++ 
b/core/src/main/scala/org/apache/spark/scheduler/cluster/CoarseGrainedClusterMessage.scala
@@ -40,8 +40,7 @@ private[spark] object CoarseGrainedClusterMessages {
 
   sealed trait RegisterExecutorResponse
 
-  case class RegisteredExecutor(hostname: String) extends 
CoarseGrainedClusterMessage
-with RegisterExecutorResponse
+  case object RegisteredExecutor extends CoarseG

spark git commit: [SPARK-16017][CORE] Send hostname from CoarseGrainedExecutorBackend to driver

2016-06-17 Thread zsxwing
Repository: spark
Updated Branches:
  refs/heads/master 298c4ae81 -> 62d8fe208


[SPARK-16017][CORE] Send hostname from CoarseGrainedExecutorBackend to driver

## What changes were proposed in this pull request?

[SPARK-15395](https://issues.apache.org/jira/browse/SPARK-15395) changes the 
behavior that how the driver gets the executor host and the driver will get the 
executor IP address instead of the host name. This PR just sends the hostname 
from executors to driver so that driver can pass it to TaskScheduler.

## How was this patch tested?

Existing unit tests.

Author: Shixiong Zhu 

Closes #13741 from zsxwing/SPARK-16017.


Project: http://git-wip-us.apache.org/repos/asf/spark/repo
Commit: http://git-wip-us.apache.org/repos/asf/spark/commit/62d8fe20
Tree: http://git-wip-us.apache.org/repos/asf/spark/tree/62d8fe20
Diff: http://git-wip-us.apache.org/repos/asf/spark/diff/62d8fe20

Branch: refs/heads/master
Commit: 62d8fe2089659e8212753a622708517e0f4a77bc
Parents: 298c4ae
Author: Shixiong Zhu 
Authored: Fri Jun 17 15:48:17 2016 -0700
Committer: Shixiong Zhu 
Committed: Fri Jun 17 15:48:17 2016 -0700

--
 .../apache/spark/executor/CoarseGrainedExecutorBackend.scala  | 7 ---
 .../spark/scheduler/cluster/CoarseGrainedClusterMessage.scala | 4 ++--
 .../scheduler/cluster/CoarseGrainedSchedulerBackend.scala | 6 +++---
 .../test/scala/org/apache/spark/HeartbeatReceiverSuite.scala  | 4 ++--
 .../spark/deploy/StandaloneDynamicAllocationSuite.scala   | 2 +-
 5 files changed, 12 insertions(+), 11 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/spark/blob/62d8fe20/core/src/main/scala/org/apache/spark/executor/CoarseGrainedExecutorBackend.scala
--
diff --git 
a/core/src/main/scala/org/apache/spark/executor/CoarseGrainedExecutorBackend.scala
 
b/core/src/main/scala/org/apache/spark/executor/CoarseGrainedExecutorBackend.scala
index e087295..ccc6c36 100644
--- 
a/core/src/main/scala/org/apache/spark/executor/CoarseGrainedExecutorBackend.scala
+++ 
b/core/src/main/scala/org/apache/spark/executor/CoarseGrainedExecutorBackend.scala
@@ -39,6 +39,7 @@ private[spark] class CoarseGrainedExecutorBackend(
 override val rpcEnv: RpcEnv,
 driverUrl: String,
 executorId: String,
+hostname: String,
 cores: Int,
 userClassPath: Seq[URL],
 env: SparkEnv)
@@ -57,7 +58,7 @@ private[spark] class CoarseGrainedExecutorBackend(
 rpcEnv.asyncSetupEndpointRefByURI(driverUrl).flatMap { ref =>
   // This is a very fast action so we can use "ThreadUtils.sameThread"
   driver = Some(ref)
-  ref.ask[Boolean](RegisterExecutor(executorId, self, cores, 
extractLogUrls))
+  ref.ask[Boolean](RegisterExecutor(executorId, self, hostname, cores, 
extractLogUrls))
 }(ThreadUtils.sameThread).onComplete {
   // This is a very fast action so we can use "ThreadUtils.sameThread"
   case Success(msg) =>
@@ -75,7 +76,7 @@ private[spark] class CoarseGrainedExecutorBackend(
   }
 
   override def receive: PartialFunction[Any, Unit] = {
-case RegisteredExecutor(hostname) =>
+case RegisteredExecutor =>
   logInfo("Successfully registered with driver")
   executor = new Executor(executorId, hostname, env, userClassPath, 
isLocal = false)
 
@@ -201,7 +202,7 @@ private[spark] object CoarseGrainedExecutorBackend extends 
Logging {
 driverConf, executorId, hostname, port, cores, isLocal = false)
 
   env.rpcEnv.setupEndpoint("Executor", new CoarseGrainedExecutorBackend(
-env.rpcEnv, driverUrl, executorId, cores, userClassPath, env))
+env.rpcEnv, driverUrl, executorId, hostname, cores, userClassPath, 
env))
   workerUrl.foreach { url =>
 env.rpcEnv.setupEndpoint("WorkerWatcher", new 
WorkerWatcher(env.rpcEnv, url))
   }

http://git-wip-us.apache.org/repos/asf/spark/blob/62d8fe20/core/src/main/scala/org/apache/spark/scheduler/cluster/CoarseGrainedClusterMessage.scala
--
diff --git 
a/core/src/main/scala/org/apache/spark/scheduler/cluster/CoarseGrainedClusterMessage.scala
 
b/core/src/main/scala/org/apache/spark/scheduler/cluster/CoarseGrainedClusterMessage.scala
index 46a8291..edc8aac 100644
--- 
a/core/src/main/scala/org/apache/spark/scheduler/cluster/CoarseGrainedClusterMessage.scala
+++ 
b/core/src/main/scala/org/apache/spark/scheduler/cluster/CoarseGrainedClusterMessage.scala
@@ -40,8 +40,7 @@ private[spark] object CoarseGrainedClusterMessages {
 
   sealed trait RegisterExecutorResponse
 
-  case class RegisteredExecutor(hostname: String) extends 
CoarseGrainedClusterMessage
-with RegisterExecutorResponse
+  case object RegisteredExecutor extends CoarseGrainedClusterMessage with 
RegisterExecutorResponse
 
   case class RegisterExecutorFailed(message: String)