This is an automated email from the ASF dual-hosted git repository.

sunchao pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/spark.git


The following commit(s) were added to refs/heads/master by this push:
     new 4708adc9185 [SPARK-43064][SQL] Spark SQL CLI SQL tab should only show 
once statement once
4708adc9185 is described below

commit 4708adc91856d7665e54e99545ecd5249ce817f7
Author: Angerszhuuuu <angers....@gmail.com>
AuthorDate: Fri Apr 14 08:43:42 2023 -0700

    [SPARK-43064][SQL] Spark SQL CLI SQL tab should only show once statement 
once
    
    ### What changes were proposed in this pull request?
    Before
    <img width="1789" alt="截屏2023-04-07 下午4 22 54" 
src="https://user-images.githubusercontent.com/46485123/230573688-42acb9f2-6fa0-48d0-bfde-c7ceeb306aef.png";>
    
    After
    <img width="1792" alt="截屏2023-04-07 下午4 24 02" 
src="https://user-images.githubusercontent.com/46485123/230573720-2c2a7731-d776-439c-ba6f-0dad9dc87a42.png";>
    
    ### Why are the changes needed?
    Don't need show twice, too weird
    
    ### Does this PR introduce _any_ user-facing change?
    No
    
    ### How was this patch tested?
    MT
    
    Closes #40701 from AngersZhuuuu/SPARK-43064.
    
    Authored-by: Angerszhuuuu <angers....@gmail.com>
    Signed-off-by: Chao Sun <sunc...@apple.com>
---
 .../apache/spark/sql/hive/thriftserver/SparkSQLDriver.scala  | 12 ++++++++++--
 1 file changed, 10 insertions(+), 2 deletions(-)

diff --git 
a/sql/hive-thriftserver/src/main/scala/org/apache/spark/sql/hive/thriftserver/SparkSQLDriver.scala
 
b/sql/hive-thriftserver/src/main/scala/org/apache/spark/sql/hive/thriftserver/SparkSQLDriver.scala
index 18a0e57a2d3..8ae65a58608 100644
--- 
a/sql/hive-thriftserver/src/main/scala/org/apache/spark/sql/hive/thriftserver/SparkSQLDriver.scala
+++ 
b/sql/hive-thriftserver/src/main/scala/org/apache/spark/sql/hive/thriftserver/SparkSQLDriver.scala
@@ -29,6 +29,7 @@ import 
org.apache.hadoop.hive.ql.processors.CommandProcessorResponse
 import org.apache.spark.SparkThrowable
 import org.apache.spark.internal.Logging
 import org.apache.spark.sql.SQLContext
+import org.apache.spark.sql.catalyst.plans.logical.CommandResult
 import org.apache.spark.sql.execution.{QueryExecution, SQLExecution}
 import org.apache.spark.sql.execution.HiveResult.hiveResultString
 import org.apache.spark.sql.internal.{SQLConf, VariableSubstitution}
@@ -65,8 +66,15 @@ private[hive] class SparkSQLDriver(val context: SQLContext = 
SparkSQLEnv.sqlCont
       }
       context.sparkContext.setJobDescription(substitutorCommand)
       val execution = 
context.sessionState.executePlan(context.sql(command).logicalPlan)
-      hiveResponse = SQLExecution.withNewExecutionId(execution, Some("cli")) {
-        hiveResultString(execution.executedPlan)
+      // The SQL command has been executed above via `executePlan`, therefore 
we don't need to
+      // wrap it again with a new execution ID when getting Hive result.
+      execution.logical match {
+        case _: CommandResult =>
+          hiveResponse = hiveResultString(execution.executedPlan)
+        case _ =>
+          hiveResponse = SQLExecution.withNewExecutionId(execution, 
Some("cli")) {
+            hiveResultString(execution.executedPlan)
+          }
       }
       tableSchema = getResultSetSchema(execution)
       new CommandProcessorResponse(0)


---------------------------------------------------------------------
To unsubscribe, e-mail: commits-unsubscr...@spark.apache.org
For additional commands, e-mail: commits-h...@spark.apache.org

Reply via email to