This is an automated email from the ASF dual-hosted git repository.

dongjoon pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/spark.git


The following commit(s) were added to refs/heads/master by this push:
     new 3c0bb7d3ef1 [SPARK-43424][SQL] Support vanila JDBC CHAR/VARCHAR 
through STS
3c0bb7d3ef1 is described below

commit 3c0bb7d3ef16c1520dfa6f3e9a159d2ebe41f28d
Author: Kent Yao <y...@apache.org>
AuthorDate: Wed May 10 22:22:49 2023 -0700

    [SPARK-43424][SQL] Support vanila JDBC CHAR/VARCHAR through STS
    
    ### What changes were proposed in this pull request?
    
    - Add getColumnDisplaySize API support for varchar & char
    - make data type mapping consistent between SQL and 
MetaOperation(GetColumns)
    
    ### Why are the changes needed?
    
    better JDBC API support
    
    ### Does this PR introduce _any_ user-facing change?
    
    yes, java.sql.ResultSetMetaData.getColumnDisplaySize now offers the exact 
max length for char varchar instead of Int.Max
    
    ### How was this patch tested?
    
    new tests
    
    Closes #41102 from yaooqinn/SPARK-43424.
    
    Authored-by: Kent Yao <y...@apache.org>
    Signed-off-by: Dongjoon Hyun <dongj...@apache.org>
---
 .../hive/thriftserver/SparkExecuteStatementOperation.scala  |  8 +++++++-
 .../thriftserver/ThriftServerWithSparkContextSuite.scala    | 13 +++++++++++++
 2 files changed, 20 insertions(+), 1 deletion(-)

diff --git 
a/sql/hive-thriftserver/src/main/scala/org/apache/spark/sql/hive/thriftserver/SparkExecuteStatementOperation.scala
 
b/sql/hive-thriftserver/src/main/scala/org/apache/spark/sql/hive/thriftserver/SparkExecuteStatementOperation.scala
index c41e92e618b..a9b46739fa6 100644
--- 
a/sql/hive-thriftserver/src/main/scala/org/apache/spark/sql/hive/thriftserver/SparkExecuteStatementOperation.scala
+++ 
b/sql/hive-thriftserver/src/main/scala/org/apache/spark/sql/hive/thriftserver/SparkExecuteStatementOperation.scala
@@ -32,6 +32,7 @@ import 
org.apache.hive.service.rpc.thrift.{TCLIServiceConstants, TColumnDesc, TP
 
 import org.apache.spark.internal.Logging
 import org.apache.spark.sql.{DataFrame, Row, SQLContext}
+import org.apache.spark.sql.catalyst.util.CharVarcharUtils
 import org.apache.spark.sql.execution.HiveResult.getTimeFormatters
 import org.apache.spark.sql.internal.{SQLConf, VariableSubstitution}
 import org.apache.spark.sql.types._
@@ -333,6 +334,8 @@ object SparkExecuteStatementOperation {
     case _: ArrayType => TTypeId.ARRAY_TYPE
     case _: MapType => TTypeId.MAP_TYPE
     case _: StructType => TTypeId.STRUCT_TYPE
+    case _: CharType => TTypeId.CHAR_TYPE
+    case _: VarcharType => TTypeId.VARCHAR_TYPE
     case other =>
       throw new IllegalArgumentException(s"Unrecognized type name: 
${other.catalogString}")
   }
@@ -344,6 +347,9 @@ object SparkExecuteStatementOperation {
         Map(
           TCLIServiceConstants.PRECISION -> 
TTypeQualifierValue.i32Value(d.precision),
           TCLIServiceConstants.SCALE -> 
TTypeQualifierValue.i32Value(d.scale)).asJava
+      case _: VarcharType | _: CharType =>
+        Map(TCLIServiceConstants.CHARACTER_MAXIMUM_LENGTH ->
+          TTypeQualifierValue.i32Value(typ.defaultSize)).asJava
       case _ => Collections.emptyMap[String, TTypeQualifierValue]()
     }
     ret.setQualifiers(qualifiers)
@@ -369,7 +375,7 @@ object SparkExecuteStatementOperation {
 
   def toTTableSchema(schema: StructType): TTableSchema = {
     val tTableSchema = new TTableSchema()
-    schema.zipWithIndex.foreach { case (f, i) =>
+    CharVarcharUtils.getRawSchema(schema).zipWithIndex.foreach { case (f, i) =>
       tTableSchema.addToColumns(toTColumnDesc(f, i))
     }
     tTableSchema
diff --git 
a/sql/hive-thriftserver/src/test/scala/org/apache/spark/sql/hive/thriftserver/ThriftServerWithSparkContextSuite.scala
 
b/sql/hive-thriftserver/src/test/scala/org/apache/spark/sql/hive/thriftserver/ThriftServerWithSparkContextSuite.scala
index dcb387e37a1..44e216258a1 100644
--- 
a/sql/hive-thriftserver/src/test/scala/org/apache/spark/sql/hive/thriftserver/ThriftServerWithSparkContextSuite.scala
+++ 
b/sql/hive-thriftserver/src/test/scala/org/apache/spark/sql/hive/thriftserver/ThriftServerWithSparkContextSuite.scala
@@ -217,6 +217,19 @@ trait ThriftServerWithSparkContextSuite extends 
SharedThriftServer {
       // scalastyle:on line.size.limit
     }
   }
+
+  test("Support column display size for char/varchar") {
+    withTable("t") {
+      sql("CREATE TABLE t (c char(10), v varchar(11)) using parquet")
+
+      withJdbcStatement { stmt =>
+        val rs = stmt.executeQuery("SELECT * FROM t")
+        val metaData = rs.getMetaData
+        assert(metaData.getColumnDisplaySize(1) === 10)
+        assert(metaData.getColumnDisplaySize(2) === 11)
+      }
+    }
+  }
 }
 
 class ThriftServerWithSparkContextInBinarySuite extends 
ThriftServerWithSparkContextSuite {


---------------------------------------------------------------------
To unsubscribe, e-mail: commits-unsubscr...@spark.apache.org
For additional commands, e-mail: commits-h...@spark.apache.org

Reply via email to