spark git commit: [SPARK-5846] Correctly set job description and pool for SQL jobs

2015-02-18 Thread lian
Repository: spark
Updated Branches:
  refs/heads/branch-1.2 36e15b48e -> f6ee80b18


[SPARK-5846] Correctly set job description and pool for SQL jobs

This is #4630 but modified for the 1.2 branch, because I'm guessing it makes 
sense to fix this issue in that branch (again, unless I missed something 
obvious here...)

Author: Kay Ousterhout 

Closes #4631 from kayousterhout/SPARK-5846_1.2.1 and squashes the following 
commits:

ffe8ff2 [Kay Ousterhout] [SPARK-5846] Correctly set job description and pool 
for SQL jobs


Project: http://git-wip-us.apache.org/repos/asf/spark/repo
Commit: http://git-wip-us.apache.org/repos/asf/spark/commit/f6ee80b1
Tree: http://git-wip-us.apache.org/repos/asf/spark/tree/f6ee80b1
Diff: http://git-wip-us.apache.org/repos/asf/spark/diff/f6ee80b1

Branch: refs/heads/branch-1.2
Commit: f6ee80b1885cb3822c52a4aa92ea0115c991e43f
Parents: 36e15b4
Author: Kay Ousterhout 
Authored: Thu Feb 19 10:03:56 2015 +0800
Committer: Cheng Lian 
Committed: Thu Feb 19 10:03:56 2015 +0800

--
 .../org/apache/spark/sql/hive/thriftserver/Shim12.scala   | 10 +-
 .../org/apache/spark/sql/hive/thriftserver/Shim13.scala   | 10 +-
 2 files changed, 10 insertions(+), 10 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/spark/blob/f6ee80b1/sql/hive-thriftserver/v0.12.0/src/main/scala/org/apache/spark/sql/hive/thriftserver/Shim12.scala
--
diff --git 
a/sql/hive-thriftserver/v0.12.0/src/main/scala/org/apache/spark/sql/hive/thriftserver/Shim12.scala
 
b/sql/hive-thriftserver/v0.12.0/src/main/scala/org/apache/spark/sql/hive/thriftserver/Shim12.scala
index 9258ad0..26ebc3b 100644
--- 
a/sql/hive-thriftserver/v0.12.0/src/main/scala/org/apache/spark/sql/hive/thriftserver/Shim12.scala
+++ 
b/sql/hive-thriftserver/v0.12.0/src/main/scala/org/apache/spark/sql/hive/thriftserver/Shim12.scala
@@ -186,6 +186,11 @@ private[hive] class SparkExecuteStatementOperation(
   def run(): Unit = {
 logInfo(s"Running query '$statement'")
 setState(OperationState.RUNNING)
+val groupId = round(random * 100).toString
+hiveContext.sparkContext.setJobGroup(groupId, statement)
+sessionToActivePool.get(parentSession).foreach { pool =>
+  hiveContext.sparkContext.setLocalProperty("spark.scheduler.pool", pool)
+}
 try {
   result = hiveContext.sql(statement)
   logDebug(result.queryExecution.toString())
@@ -196,11 +201,6 @@ private[hive] class SparkExecuteStatementOperation(
 case _ =>
   }
 
-  val groupId = round(random * 100).toString
-  hiveContext.sparkContext.setJobGroup(groupId, statement)
-  sessionToActivePool.get(parentSession).foreach { pool =>
-hiveContext.sparkContext.setLocalProperty("spark.scheduler.pool", pool)
-  }
   iter = {
 val useIncrementalCollect =
   hiveContext.getConf("spark.sql.thriftServer.incrementalCollect", 
"false").toBoolean

http://git-wip-us.apache.org/repos/asf/spark/blob/f6ee80b1/sql/hive-thriftserver/v0.13.1/src/main/scala/org/apache/spark/sql/hive/thriftserver/Shim13.scala
--
diff --git 
a/sql/hive-thriftserver/v0.13.1/src/main/scala/org/apache/spark/sql/hive/thriftserver/Shim13.scala
 
b/sql/hive-thriftserver/v0.13.1/src/main/scala/org/apache/spark/sql/hive/thriftserver/Shim13.scala
index 17f1ad3..5519db1 100644
--- 
a/sql/hive-thriftserver/v0.13.1/src/main/scala/org/apache/spark/sql/hive/thriftserver/Shim13.scala
+++ 
b/sql/hive-thriftserver/v0.13.1/src/main/scala/org/apache/spark/sql/hive/thriftserver/Shim13.scala
@@ -157,6 +157,11 @@ private[hive] class SparkExecuteStatementOperation(
   def run(): Unit = {
 logInfo(s"Running query '$statement'")
 setState(OperationState.RUNNING)
+val groupId = round(random * 100).toString
+hiveContext.sparkContext.setJobGroup(groupId, statement)
+sessionToActivePool.get(parentSession).foreach { pool =>
+  hiveContext.sparkContext.setLocalProperty("spark.scheduler.pool", pool)
+}
 try {
   result = hiveContext.sql(statement)
   logDebug(result.queryExecution.toString())
@@ -167,11 +172,6 @@ private[hive] class SparkExecuteStatementOperation(
 case _ =>
   }
 
-  val groupId = round(random * 100).toString
-  hiveContext.sparkContext.setJobGroup(groupId, statement)
-  sessionToActivePool.get(parentSession).foreach { pool =>
-hiveContext.sparkContext.setLocalProperty("spark.scheduler.pool", pool)
-  }
   iter = {
 val useIncrementalCollect =
   hiveContext.getConf("spark.sql.thriftServer.incrementalCollect", 
"false").toBoolean


-
To unsubscribe, e-mail: commits-unsubscr...@spark.apache.org
For additional co

spark git commit: [SPARK-5846] Correctly set job description and pool for SQL jobs

2015-02-18 Thread lian
Repository: spark
Updated Branches:
  refs/heads/branch-1.3 a64f374a3 -> 092b45f69


[SPARK-5846] Correctly set job description and pool for SQL jobs

marmbrus am I missing something obvious here? I verified that this fixes the 
problem for me (on 1.2.1) on EC2, but I'm confused about how others wouldn't 
have noticed this?

Author: Kay Ousterhout 

Closes #4630 from kayousterhout/SPARK-5846_1.3 and squashes the following 
commits:

2022ad4 [Kay Ousterhout] [SPARK-5846] Correctly set job description and pool 
for SQL jobs

(cherry picked from commit e945aa6139e022d13ac793f46819cfee07b782fc)
Signed-off-by: Cheng Lian 


Project: http://git-wip-us.apache.org/repos/asf/spark/repo
Commit: http://git-wip-us.apache.org/repos/asf/spark/commit/092b45f6
Tree: http://git-wip-us.apache.org/repos/asf/spark/tree/092b45f6
Diff: http://git-wip-us.apache.org/repos/asf/spark/diff/092b45f6

Branch: refs/heads/branch-1.3
Commit: 092b45f6939c88b41fbeb957d4d703bf8f008e48
Parents: a64f374
Author: Kay Ousterhout 
Authored: Thu Feb 19 09:49:34 2015 +0800
Committer: Cheng Lian 
Committed: Thu Feb 19 09:50:15 2015 +0800

--
 .../org/apache/spark/sql/hive/thriftserver/Shim12.scala  | 8 
 .../org/apache/spark/sql/hive/thriftserver/Shim13.scala  | 8 
 2 files changed, 8 insertions(+), 8 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/spark/blob/092b45f6/sql/hive-thriftserver/v0.12.0/src/main/scala/org/apache/spark/sql/hive/thriftserver/Shim12.scala
--
diff --git 
a/sql/hive-thriftserver/v0.12.0/src/main/scala/org/apache/spark/sql/hive/thriftserver/Shim12.scala
 
b/sql/hive-thriftserver/v0.12.0/src/main/scala/org/apache/spark/sql/hive/thriftserver/Shim12.scala
index ea9d61d..13116b4 100644
--- 
a/sql/hive-thriftserver/v0.12.0/src/main/scala/org/apache/spark/sql/hive/thriftserver/Shim12.scala
+++ 
b/sql/hive-thriftserver/v0.12.0/src/main/scala/org/apache/spark/sql/hive/thriftserver/Shim12.scala
@@ -185,6 +185,10 @@ private[hive] class SparkExecuteStatementOperation(
   def run(): Unit = {
 logInfo(s"Running query '$statement'")
 setState(OperationState.RUNNING)
+hiveContext.sparkContext.setJobDescription(statement)
+sessionToActivePool.get(parentSession.getSessionHandle).foreach { pool =>
+  hiveContext.sparkContext.setLocalProperty("spark.scheduler.pool", pool)
+}
 try {
   result = hiveContext.sql(statement)
   logDebug(result.queryExecution.toString())
@@ -194,10 +198,6 @@ private[hive] class SparkExecuteStatementOperation(
   logInfo(s"Setting spark.scheduler.pool=$value for future statements 
in this session.")
 case _ =>
   }
-  hiveContext.sparkContext.setJobDescription(statement)
-  sessionToActivePool.get(parentSession.getSessionHandle).foreach { pool =>
-hiveContext.sparkContext.setLocalProperty("spark.scheduler.pool", pool)
-  }
   iter = {
 val useIncrementalCollect =
   hiveContext.getConf("spark.sql.thriftServer.incrementalCollect", 
"false").toBoolean

http://git-wip-us.apache.org/repos/asf/spark/blob/092b45f6/sql/hive-thriftserver/v0.13.1/src/main/scala/org/apache/spark/sql/hive/thriftserver/Shim13.scala
--
diff --git 
a/sql/hive-thriftserver/v0.13.1/src/main/scala/org/apache/spark/sql/hive/thriftserver/Shim13.scala
 
b/sql/hive-thriftserver/v0.13.1/src/main/scala/org/apache/spark/sql/hive/thriftserver/Shim13.scala
index 71e3954..9b8faef 100644
--- 
a/sql/hive-thriftserver/v0.13.1/src/main/scala/org/apache/spark/sql/hive/thriftserver/Shim13.scala
+++ 
b/sql/hive-thriftserver/v0.13.1/src/main/scala/org/apache/spark/sql/hive/thriftserver/Shim13.scala
@@ -156,6 +156,10 @@ private[hive] class SparkExecuteStatementOperation(
   def run(): Unit = {
 logInfo(s"Running query '$statement'")
 setState(OperationState.RUNNING)
+hiveContext.sparkContext.setJobDescription(statement)
+sessionToActivePool.get(parentSession.getSessionHandle).foreach { pool =>
+  hiveContext.sparkContext.setLocalProperty("spark.scheduler.pool", pool)
+}
 try {
   result = hiveContext.sql(statement)
   logDebug(result.queryExecution.toString())
@@ -165,10 +169,6 @@ private[hive] class SparkExecuteStatementOperation(
   logInfo(s"Setting spark.scheduler.pool=$value for future statements 
in this session.")
 case _ =>
   }
-  hiveContext.sparkContext.setJobDescription(statement)
-  sessionToActivePool.get(parentSession.getSessionHandle).foreach { pool =>
-hiveContext.sparkContext.setLocalProperty("spark.scheduler.pool", pool)
-  }
   iter = {
 val useIncrementalCollect =
   hiveContext.getConf("spark.sql.thriftServer.incrementalCollect", 
"false").toBoolean


---

spark git commit: [SPARK-5846] Correctly set job description and pool for SQL jobs

2015-02-18 Thread lian
Repository: spark
Updated Branches:
  refs/heads/master d12d2ad76 -> e945aa613


[SPARK-5846] Correctly set job description and pool for SQL jobs

marmbrus am I missing something obvious here? I verified that this fixes the 
problem for me (on 1.2.1) on EC2, but I'm confused about how others wouldn't 
have noticed this?

Author: Kay Ousterhout 

Closes #4630 from kayousterhout/SPARK-5846_1.3 and squashes the following 
commits:

2022ad4 [Kay Ousterhout] [SPARK-5846] Correctly set job description and pool 
for SQL jobs


Project: http://git-wip-us.apache.org/repos/asf/spark/repo
Commit: http://git-wip-us.apache.org/repos/asf/spark/commit/e945aa61
Tree: http://git-wip-us.apache.org/repos/asf/spark/tree/e945aa61
Diff: http://git-wip-us.apache.org/repos/asf/spark/diff/e945aa61

Branch: refs/heads/master
Commit: e945aa6139e022d13ac793f46819cfee07b782fc
Parents: d12d2ad
Author: Kay Ousterhout 
Authored: Thu Feb 19 09:49:34 2015 +0800
Committer: Cheng Lian 
Committed: Thu Feb 19 09:49:34 2015 +0800

--
 .../org/apache/spark/sql/hive/thriftserver/Shim12.scala  | 8 
 .../org/apache/spark/sql/hive/thriftserver/Shim13.scala  | 8 
 2 files changed, 8 insertions(+), 8 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/spark/blob/e945aa61/sql/hive-thriftserver/v0.12.0/src/main/scala/org/apache/spark/sql/hive/thriftserver/Shim12.scala
--
diff --git 
a/sql/hive-thriftserver/v0.12.0/src/main/scala/org/apache/spark/sql/hive/thriftserver/Shim12.scala
 
b/sql/hive-thriftserver/v0.12.0/src/main/scala/org/apache/spark/sql/hive/thriftserver/Shim12.scala
index ea9d61d..13116b4 100644
--- 
a/sql/hive-thriftserver/v0.12.0/src/main/scala/org/apache/spark/sql/hive/thriftserver/Shim12.scala
+++ 
b/sql/hive-thriftserver/v0.12.0/src/main/scala/org/apache/spark/sql/hive/thriftserver/Shim12.scala
@@ -185,6 +185,10 @@ private[hive] class SparkExecuteStatementOperation(
   def run(): Unit = {
 logInfo(s"Running query '$statement'")
 setState(OperationState.RUNNING)
+hiveContext.sparkContext.setJobDescription(statement)
+sessionToActivePool.get(parentSession.getSessionHandle).foreach { pool =>
+  hiveContext.sparkContext.setLocalProperty("spark.scheduler.pool", pool)
+}
 try {
   result = hiveContext.sql(statement)
   logDebug(result.queryExecution.toString())
@@ -194,10 +198,6 @@ private[hive] class SparkExecuteStatementOperation(
   logInfo(s"Setting spark.scheduler.pool=$value for future statements 
in this session.")
 case _ =>
   }
-  hiveContext.sparkContext.setJobDescription(statement)
-  sessionToActivePool.get(parentSession.getSessionHandle).foreach { pool =>
-hiveContext.sparkContext.setLocalProperty("spark.scheduler.pool", pool)
-  }
   iter = {
 val useIncrementalCollect =
   hiveContext.getConf("spark.sql.thriftServer.incrementalCollect", 
"false").toBoolean

http://git-wip-us.apache.org/repos/asf/spark/blob/e945aa61/sql/hive-thriftserver/v0.13.1/src/main/scala/org/apache/spark/sql/hive/thriftserver/Shim13.scala
--
diff --git 
a/sql/hive-thriftserver/v0.13.1/src/main/scala/org/apache/spark/sql/hive/thriftserver/Shim13.scala
 
b/sql/hive-thriftserver/v0.13.1/src/main/scala/org/apache/spark/sql/hive/thriftserver/Shim13.scala
index 71e3954..9b8faef 100644
--- 
a/sql/hive-thriftserver/v0.13.1/src/main/scala/org/apache/spark/sql/hive/thriftserver/Shim13.scala
+++ 
b/sql/hive-thriftserver/v0.13.1/src/main/scala/org/apache/spark/sql/hive/thriftserver/Shim13.scala
@@ -156,6 +156,10 @@ private[hive] class SparkExecuteStatementOperation(
   def run(): Unit = {
 logInfo(s"Running query '$statement'")
 setState(OperationState.RUNNING)
+hiveContext.sparkContext.setJobDescription(statement)
+sessionToActivePool.get(parentSession.getSessionHandle).foreach { pool =>
+  hiveContext.sparkContext.setLocalProperty("spark.scheduler.pool", pool)
+}
 try {
   result = hiveContext.sql(statement)
   logDebug(result.queryExecution.toString())
@@ -165,10 +169,6 @@ private[hive] class SparkExecuteStatementOperation(
   logInfo(s"Setting spark.scheduler.pool=$value for future statements 
in this session.")
 case _ =>
   }
-  hiveContext.sparkContext.setJobDescription(statement)
-  sessionToActivePool.get(parentSession.getSessionHandle).foreach { pool =>
-hiveContext.sparkContext.setLocalProperty("spark.scheduler.pool", pool)
-  }
   iter = {
 val useIncrementalCollect =
   hiveContext.getConf("spark.sql.thriftServer.incrementalCollect", 
"false").toBoolean


-
To unsubscribe, e-mail: commits-unsubscr...@sp