This is an automated email from the ASF dual-hosted git repository.

dongjoon pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/spark.git


The following commit(s) were added to refs/heads/master by this push:
     new cfb048a  [SPARK-37783][SQL][FOLLOWUP] Enable tail-recursion wherever 
possible
cfb048a is described below

commit cfb048ae1648934c29daf5036f98a94df8ff17c0
Author: yangjie01 <yangji...@baidu.com>
AuthorDate: Wed Feb 16 22:50:10 2022 -0800

    [SPARK-37783][SQL][FOLLOWUP] Enable tail-recursion wherever possible
    
    ### What changes were proposed in this pull request?
    This pr adds `scala.annotation.tailrec` inspected by IDE (IntelliJ)
    
    ### Why are the changes needed?
    To improve performance.
    
    ### Does this PR introduce _any_ user-facing change?
    No
    
    ### How was this patch tested?
    Pass GA
    
    Closes #35540 from LuciferYang/SPARK-37783-FOLLOWUP.
    
    Authored-by: yangjie01 <yangji...@baidu.com>
    Signed-off-by: Dongjoon Hyun <dongj...@apache.org>
---
 .../scala/org/apache/spark/sql/catalyst/analysis/AnsiTypeCoercion.scala | 1 +
 .../scala/org/apache/spark/sql/catalyst/csv/UnivocityGenerator.scala    | 1 +
 .../main/scala/org/apache/spark/sql/execution/columnar/ColumnType.scala | 2 +-
 sql/hive/src/main/scala/org/apache/spark/sql/hive/client/HiveShim.scala | 1 +
 4 files changed, 4 insertions(+), 1 deletion(-)

diff --git 
a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/analysis/AnsiTypeCoercion.scala
 
b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/analysis/AnsiTypeCoercion.scala
index 90f28fb..e13ff2b 100644
--- 
a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/analysis/AnsiTypeCoercion.scala
+++ 
b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/analysis/AnsiTypeCoercion.scala
@@ -135,6 +135,7 @@ object AnsiTypeCoercion extends TypeCoercionBase {
   }
 
   /** Promotes StringType to other data types. */
+  @scala.annotation.tailrec
   private def findWiderTypeForString(dt1: DataType, dt2: DataType): 
Option[DataType] = {
     (dt1, dt2) match {
       case (StringType, _: IntegralType) => Some(LongType)
diff --git 
a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/csv/UnivocityGenerator.scala
 
b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/csv/UnivocityGenerator.scala
index 9d65824..5dd8c35 100644
--- 
a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/csv/UnivocityGenerator.scala
+++ 
b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/csv/UnivocityGenerator.scala
@@ -60,6 +60,7 @@ class UnivocityGenerator(
     legacyFormat = FAST_DATE_FORMAT,
     isParsing = false)
 
+  @scala.annotation.tailrec
   private def makeConverter(dataType: DataType): ValueConverter = dataType 
match {
     case DateType =>
       (row: InternalRow, ordinal: Int) => 
dateFormatter.format(row.getInt(ordinal))
diff --git 
a/sql/core/src/main/scala/org/apache/spark/sql/execution/columnar/ColumnType.scala
 
b/sql/core/src/main/scala/org/apache/spark/sql/execution/columnar/ColumnType.scala
index 9b4c136..c029786 100644
--- 
a/sql/core/src/main/scala/org/apache/spark/sql/execution/columnar/ColumnType.scala
+++ 
b/sql/core/src/main/scala/org/apache/spark/sql/execution/columnar/ColumnType.scala
@@ -833,7 +833,7 @@ private[columnar] object ColumnType {
       case arr: ArrayType => ARRAY(arr)
       case map: MapType => MAP(map)
       case struct: StructType => STRUCT(struct)
-      case udt: UserDefinedType[_] => apply(udt.sqlType)
+      case udt: UserDefinedType[_] => ColumnType(udt.sqlType)
       case other => throw QueryExecutionErrors.unsupportedTypeError(other)
     }
   }
diff --git 
a/sql/hive/src/main/scala/org/apache/spark/sql/hive/client/HiveShim.scala 
b/sql/hive/src/main/scala/org/apache/spark/sql/hive/client/HiveShim.scala
index c197b17..71be39a 100644
--- a/sql/hive/src/main/scala/org/apache/spark/sql/hive/client/HiveShim.scala
+++ b/sql/hive/src/main/scala/org/apache/spark/sql/hive/client/HiveShim.scala
@@ -978,6 +978,7 @@ private[client] class Shim_v0_13 extends Shim_v0_12 {
     val inSetThreshold = SQLConf.get.metastorePartitionPruningInSetThreshold
 
     object ExtractAttribute {
+      @scala.annotation.tailrec
       def unapply(expr: Expression): Option[Attribute] = {
         expr match {
           case attr: Attribute => Some(attr)

---------------------------------------------------------------------
To unsubscribe, e-mail: commits-unsubscr...@spark.apache.org
For additional commands, e-mail: commits-h...@spark.apache.org

Reply via email to