cloud-fan commented on code in PR #36593: URL: https://github.com/apache/spark/pull/36593#discussion_r877123725
########## sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/v2/jdbc/JDBCCatalog.scala: ########## @@ -32,11 +35,14 @@ import org.apache.spark.sql.jdbc.{JdbcDialect, JdbcDialects} import org.apache.spark.sql.types.StructType import org.apache.spark.sql.util.CaseInsensitiveStringMap -class JDBCTableCatalog extends TableCatalog with SupportsNamespaces with Logging { +class JDBCCatalog extends TableCatalog with SupportsNamespaces with FunctionCatalog with Logging { private var catalogName: String = null private var options: JDBCOptions = _ private var dialect: JdbcDialect = _ + private val functions: util.Map[Identifier, UnboundFunction] = + new ConcurrentHashMap[Identifier, UnboundFunction]() Review Comment: We should clearly define how can this be used. I thought each JDBC dialect should have APIs to register its own UDFs. -- This is an automated message from the Apache Git Service. To respond to the message, please log on to GitHub and use the URL above to go to the specific comment. To unsubscribe, e-mail: reviews-unsubscr...@spark.apache.org For queries about this service, please contact Infrastructure at: us...@infra.apache.org --------------------------------------------------------------------- To unsubscribe, e-mail: reviews-unsubscr...@spark.apache.org For additional commands, e-mail: reviews-h...@spark.apache.org