This is an automated email from the ASF dual-hosted git repository. yao pushed a commit to branch branch-3.4 in repository https://gitbox.apache.org/repos/asf/spark.git
The following commit(s) were added to refs/heads/branch-3.4 by this push: new 7a0c72ff7724 Revert "[SPARK-48172][SQL] Fix escaping issues in JDBC Dialects" 7a0c72ff7724 is described below commit 7a0c72ff7724b2ee40843e5bd4f83833bfa56052 Author: Kent Yao <y...@apache.org> AuthorDate: Wed May 15 10:10:03 2024 +0800 Revert "[SPARK-48172][SQL] Fix escaping issues in JDBC Dialects" This reverts commit a848e2790cba0b7ee77d391dc534146bd35ee50a. --- .../spark/sql/jdbc/v2/DB2IntegrationSuite.scala | 6 - .../sql/jdbc/v2/DockerJDBCIntegrationV2Suite.scala | 11 - .../sql/jdbc/v2/MsSqlServerIntegrationSuite.scala | 6 - .../spark/sql/jdbc/v2/MySQLIntegrationSuite.scala | 6 - .../spark/sql/jdbc/v2/OracleIntegrationSuite.scala | 6 - .../sql/jdbc/v2/PostgresIntegrationSuite.scala | 6 - .../org/apache/spark/sql/jdbc/v2/V2JDBCTest.scala | 229 --------------------- .../sql/connector/util/V2ExpressionSQLBuilder.java | 3 + .../sql/connector/expressions/expressions.scala | 4 +- .../org/apache/spark/sql/jdbc/H2Dialect.scala | 7 + .../org/apache/spark/sql/jdbc/MySQLDialect.scala | 15 -- .../org/apache/spark/sql/jdbc/JDBCV2Suite.scala | 6 +- 12 files changed, 14 insertions(+), 291 deletions(-) diff --git a/connector/docker-integration-tests/src/test/scala/org/apache/spark/sql/jdbc/v2/DB2IntegrationSuite.scala b/connector/docker-integration-tests/src/test/scala/org/apache/spark/sql/jdbc/v2/DB2IntegrationSuite.scala index 11ddce68aecd..1a25cd2802dd 100644 --- a/connector/docker-integration-tests/src/test/scala/org/apache/spark/sql/jdbc/v2/DB2IntegrationSuite.scala +++ b/connector/docker-integration-tests/src/test/scala/org/apache/spark/sql/jdbc/v2/DB2IntegrationSuite.scala @@ -67,12 +67,6 @@ class DB2IntegrationSuite extends DockerJDBCIntegrationV2Suite with V2JDBCTest { connection.prepareStatement( "CREATE TABLE employee (dept INTEGER, name VARCHAR(10), salary DECIMAL(20, 2), bonus DOUBLE)") .executeUpdate() - connection.prepareStatement( - s"""CREATE TABLE pattern_testing_table ( - |pattern_testing_col LONGTEXT - |) - """.stripMargin - ).executeUpdate() } override def testUpdateColumnType(tbl: String): Unit = { diff --git a/connector/docker-integration-tests/src/test/scala/org/apache/spark/sql/jdbc/v2/DockerJDBCIntegrationV2Suite.scala b/connector/docker-integration-tests/src/test/scala/org/apache/spark/sql/jdbc/v2/DockerJDBCIntegrationV2Suite.scala index a42caeafe6fe..72edfc9f1bf1 100644 --- a/connector/docker-integration-tests/src/test/scala/org/apache/spark/sql/jdbc/v2/DockerJDBCIntegrationV2Suite.scala +++ b/connector/docker-integration-tests/src/test/scala/org/apache/spark/sql/jdbc/v2/DockerJDBCIntegrationV2Suite.scala @@ -38,17 +38,6 @@ abstract class DockerJDBCIntegrationV2Suite extends DockerJDBCIntegrationSuite { .executeUpdate() connection.prepareStatement("INSERT INTO employee VALUES (6, 'jen', 12000, 1200)") .executeUpdate() - - connection.prepareStatement( - s""" - |INSERT INTO pattern_testing_table VALUES - |('special_character_quote\\'_present'), - |('special_character_quote_not_present'), - |('special_character_percent%_present'), - |('special_character_percent_not_present'), - |('special_character_underscore_present'), - |('special_character_underscorenot_present') - """.stripMargin).executeUpdate() } def tablePreparation(connection: Connection): Unit diff --git a/connector/docker-integration-tests/src/test/scala/org/apache/spark/sql/jdbc/v2/MsSqlServerIntegrationSuite.scala b/connector/docker-integration-tests/src/test/scala/org/apache/spark/sql/jdbc/v2/MsSqlServerIntegrationSuite.scala index 6658b5ed6c77..a527c6f8cb5b 100644 --- a/connector/docker-integration-tests/src/test/scala/org/apache/spark/sql/jdbc/v2/MsSqlServerIntegrationSuite.scala +++ b/connector/docker-integration-tests/src/test/scala/org/apache/spark/sql/jdbc/v2/MsSqlServerIntegrationSuite.scala @@ -66,12 +66,6 @@ class MsSqlServerIntegrationSuite extends DockerJDBCIntegrationV2Suite with V2JD connection.prepareStatement( "CREATE TABLE employee (dept INT, name VARCHAR(32), salary NUMERIC(20, 2), bonus FLOAT)") .executeUpdate() - connection.prepareStatement( - s"""CREATE TABLE pattern_testing_table ( - |pattern_testing_col LONGTEXT - |) - """.stripMargin - ).executeUpdate() } override def notSupportsTableComment: Boolean = true diff --git a/connector/docker-integration-tests/src/test/scala/org/apache/spark/sql/jdbc/v2/MySQLIntegrationSuite.scala b/connector/docker-integration-tests/src/test/scala/org/apache/spark/sql/jdbc/v2/MySQLIntegrationSuite.scala index 5eff5c412971..c4056c224f66 100644 --- a/connector/docker-integration-tests/src/test/scala/org/apache/spark/sql/jdbc/v2/MySQLIntegrationSuite.scala +++ b/connector/docker-integration-tests/src/test/scala/org/apache/spark/sql/jdbc/v2/MySQLIntegrationSuite.scala @@ -65,12 +65,6 @@ class MySQLIntegrationSuite extends DockerJDBCIntegrationV2Suite with V2JDBCTest connection.prepareStatement( "CREATE TABLE employee (dept INT, name VARCHAR(32), salary DECIMAL(20, 2)," + " bonus DOUBLE)").executeUpdate() - connection.prepareStatement( - s"""CREATE TABLE pattern_testing_table ( - |pattern_testing_col LONGTEXT - |) - """.stripMargin - ).executeUpdate() } override def testUpdateColumnType(tbl: String): Unit = { diff --git a/connector/docker-integration-tests/src/test/scala/org/apache/spark/sql/jdbc/v2/OracleIntegrationSuite.scala b/connector/docker-integration-tests/src/test/scala/org/apache/spark/sql/jdbc/v2/OracleIntegrationSuite.scala index 3ee10723dc3c..5de760891885 100644 --- a/connector/docker-integration-tests/src/test/scala/org/apache/spark/sql/jdbc/v2/OracleIntegrationSuite.scala +++ b/connector/docker-integration-tests/src/test/scala/org/apache/spark/sql/jdbc/v2/OracleIntegrationSuite.scala @@ -83,12 +83,6 @@ class OracleIntegrationSuite extends DockerJDBCIntegrationV2Suite with V2JDBCTes connection.prepareStatement( "CREATE TABLE employee (dept NUMBER(32), name VARCHAR2(32), salary NUMBER(20, 2)," + " bonus BINARY_DOUBLE)").executeUpdate() - connection.prepareStatement( - s"""CREATE TABLE pattern_testing_table ( - |pattern_testing_col LONGTEXT - |) - """.stripMargin - ).executeUpdate() } override def testUpdateColumnType(tbl: String): Unit = { diff --git a/connector/docker-integration-tests/src/test/scala/org/apache/spark/sql/jdbc/v2/PostgresIntegrationSuite.scala b/connector/docker-integration-tests/src/test/scala/org/apache/spark/sql/jdbc/v2/PostgresIntegrationSuite.scala index 3b5dec1a4b38..36f81f2c52e9 100644 --- a/connector/docker-integration-tests/src/test/scala/org/apache/spark/sql/jdbc/v2/PostgresIntegrationSuite.scala +++ b/connector/docker-integration-tests/src/test/scala/org/apache/spark/sql/jdbc/v2/PostgresIntegrationSuite.scala @@ -57,12 +57,6 @@ class PostgresIntegrationSuite extends DockerJDBCIntegrationV2Suite with V2JDBCT connection.prepareStatement( "CREATE TABLE employee (dept INTEGER, name VARCHAR(32), salary NUMERIC(20, 2)," + " bonus double precision)").executeUpdate() - connection.prepareStatement( - s"""CREATE TABLE pattern_testing_table ( - |pattern_testing_col LONGTEXT - |) - """.stripMargin - ).executeUpdate() } override def testUpdateColumnType(tbl: String): Unit = { diff --git a/connector/docker-integration-tests/src/test/scala/org/apache/spark/sql/jdbc/v2/V2JDBCTest.scala b/connector/docker-integration-tests/src/test/scala/org/apache/spark/sql/jdbc/v2/V2JDBCTest.scala index 1c36a25b15b6..f16d9b507d5f 100644 --- a/connector/docker-integration-tests/src/test/scala/org/apache/spark/sql/jdbc/v2/V2JDBCTest.scala +++ b/connector/docker-integration-tests/src/test/scala/org/apache/spark/sql/jdbc/v2/V2JDBCTest.scala @@ -330,235 +330,6 @@ private[v2] trait V2JDBCTest extends SharedSparkSession with DockerIntegrationFu assert(scan.schema.names.sameElements(Seq(col))) } - test("SPARK-48172: Test CONTAINS") { - val df1 = spark.sql( - s""" - |SELECT * FROM $catalogName.pattern_testing_table - |WHERE contains(pattern_testing_col, 'quote\\'')""".stripMargin) - df1.explain("formatted") - val rows1 = df1.collect() - assert(rows1.length === 1) - assert(rows1(0).getString(0) === "special_character_quote'_present") - - val df2 = spark.sql( - s"""SELECT * FROM $catalogName.pattern_testing_table - |WHERE contains(pattern_testing_col, 'percent%')""".stripMargin) - val rows2 = df2.collect() - assert(rows2.length === 1) - assert(rows2(0).getString(0) === "special_character_percent%_present") - - val df3 = spark. - sql( - s"""SELECT * FROM $catalogName.pattern_testing_table - |WHERE contains(pattern_testing_col, 'underscore_')""".stripMargin) - val rows3 = df3.collect() - assert(rows3.length === 1) - assert(rows3(0).getString(0) === "special_character_underscore_present") - - val df4 = spark. - sql( - s"""SELECT * FROM $catalogName.pattern_testing_table - |WHERE contains(pattern_testing_col, 'character') - |ORDER BY pattern_testing_col""".stripMargin) - val rows4 = df4.collect() - assert(rows4.length === 1) - assert(rows4(0).getString(0) === "special_character_percent%_present") - assert(rows4(1).getString(0) === "special_character_percent_not_present") - assert(rows4(2).getString(0) === "special_character_quote'_present") - assert(rows4(3).getString(0) === "special_character_quote_not_present") - assert(rows4(4).getString(0) === "special_character_underscore_present") - assert(rows4(5).getString(0) === "special_character_underscorenot_present") - } - - test("SPARK-48172: Test ENDSWITH") { - val df1 = spark.sql( - s"""SELECT * FROM $catalogName.pattern_testing_table - |WHERE endswith(pattern_testing_col, 'quote\\'_present')""".stripMargin) - val rows1 = df1.collect() - assert(rows1.length === 1) - assert(rows1(0).getString(0) === "special_character_quote'_present") - - val df2 = spark.sql( - s"""SELECT * FROM $catalogName.pattern_testing_table - |WHERE endswith(pattern_testing_col, 'percent%_present')""".stripMargin) - val rows2 = df2.collect() - assert(rows2.length === 1) - assert(rows2(0).getString(0) === "special_character_percent%_present") - - val df3 = spark. - sql( - s"""SELECT * FROM $catalogName.pattern_testing_table - |WHERE endswith(pattern_testing_col, 'underscore_present')""".stripMargin) - val rows3 = df3.collect() - assert(rows3.length === 1) - assert(rows3(0).getString(0) === "special_character_underscore_present") - - val df4 = spark. - sql( - s"""SELECT * FROM $catalogName.pattern_testing_table - |WHERE endswith(pattern_testing_col, 'present') - |ORDER BY pattern_testing_col""".stripMargin) - val rows4 = df4.collect() - assert(rows4.length === 1) - assert(rows4(0).getString(0) === "special_character_percent%_present") - assert(rows4(1).getString(0) === "special_character_percent_not_present") - assert(rows4(2).getString(0) === "special_character_quote'_present") - assert(rows4(3).getString(0) === "special_character_quote_not_present") - assert(rows4(4).getString(0) === "special_character_underscore_present") - assert(rows4(5).getString(0) === "special_character_underscorenot_present") - } - - test("SPARK-48172: Test STARTSWITH") { - val df1 = spark.sql( - s"""SELECT * FROM $catalogName.pattern_testing_table - |WHERE startswith(pattern_testing_col, 'special_character_quote\\'')""".stripMargin) - val rows1 = df1.collect() - assert(rows1.length === 1) - assert(rows1(0).getString(0) === "special_character_quote'_present") - - val df2 = spark.sql( - s"""SELECT * FROM $catalogName.pattern_testing_table - |WHERE startswith(pattern_testing_col, 'special_character_percent%')""".stripMargin) - val rows2 = df2.collect() - assert(rows2.length === 1) - assert(rows2(0).getString(0) === "special_character_percent%_present") - - val df3 = spark. - sql( - s"""SELECT * FROM $catalogName.pattern_testing_table - |WHERE startswith(pattern_testing_col, 'special_character_underscore_')""".stripMargin) - val rows3 = df3.collect() - assert(rows3.length === 1) - assert(rows3(0).getString(0) === "special_character_underscore_present") - - val df4 = spark. - sql( - s"""SELECT * FROM $catalogName.pattern_testing_table - |WHERE startswith(pattern_testing_col, 'special_character') - |ORDER BY pattern_testing_col""".stripMargin) - val rows4 = df4.collect() - assert(rows4.length === 1) - assert(rows4(0).getString(0) === "special_character_percent%_present") - assert(rows4(1).getString(0) === "special_character_percent_not_present") - assert(rows4(2).getString(0) === "special_character_quote'_present") - assert(rows4(3).getString(0) === "special_character_quote_not_present") - assert(rows4(4).getString(0) === "special_character_underscore_present") - assert(rows4(5).getString(0) === "special_character_underscorenot_present") - } - - test("SPARK-48172: Test LIKE") { - // this one should map to contains - val df1 = spark.sql( - s"""SELECT * FROM $catalogName.pattern_testing_table - |WHERE pattern_testing_col LIKE '%quote\\'%'""".stripMargin) - val rows1 = df1.collect() - assert(rows1.length === 1) - assert(rows1(0).getString(0) === "special_character_quote'_present") - - val df2 = spark.sql( - s"""SELECT * FROM $catalogName.pattern_testing_table - |WHERE pattern_testing_col LIKE '%percent\\%%'""".stripMargin) - val rows2 = df2.collect() - assert(rows2.length === 1) - assert(rows2(0).getString(0) === "special_character_percent%_present") - - val df3 = spark. - sql( - s"""SELECT * FROM $catalogName.pattern_testing_table - |WHERE pattern_testing_col LIKE '%underscore\\_%'""".stripMargin) - val rows3 = df3.collect() - assert(rows3.length === 1) - assert(rows3(0).getString(0) === "special_character_underscore_present") - - val df4 = spark. - sql( - s"""SELECT * FROM $catalogName.pattern_testing_table - |WHERE pattern_testing_col LIKE '%character%' - |ORDER BY pattern_testing_col""".stripMargin) - val rows4 = df4.collect() - assert(rows4.length === 1) - assert(rows4(0).getString(0) === "special_character_percent%_present") - assert(rows4(1).getString(0) === "special_character_percent_not_present") - assert(rows4(2).getString(0) === "special_character_quote'_present") - assert(rows4(3).getString(0) === "special_character_quote_not_present") - assert(rows4(4).getString(0) === "special_character_underscore_present") - assert(rows4(5).getString(0) === "special_character_underscorenot_present") - - // map to startsWith - // this one should map to contains - val df5 = spark.sql( - s"""SELECT * FROM $catalogName.pattern_testing_table - |WHERE pattern_testing_col LIKE 'special_character_quote\\'%'""".stripMargin) - val rows5 = df5.collect() - assert(rows5.length === 1) - assert(rows5(0).getString(0) === "special_character_quote'_present") - - val df6 = spark.sql( - s"""SELECT * FROM $catalogName.pattern_testing_table - |WHERE pattern_testing_col LIKE 'special_character_percent\\%%'""".stripMargin) - val rows6 = df6.collect() - assert(rows6.length === 1) - assert(rows6(0).getString(0) === "special_character_percent%_present") - - val df7 = spark. - sql( - s"""SELECT * FROM $catalogName.pattern_testing_table - |WHERE pattern_testing_col LIKE 'special_character_underscore\\_%'""".stripMargin) - val rows7 = df7.collect() - assert(rows7.length === 1) - assert(rows7(0).getString(0) === "special_character_underscore_present") - - val df8 = spark. - sql( - s"""SELECT * FROM $catalogName.pattern_testing_table - |WHERE pattern_testing_col LIKE 'special_character%' - |ORDER BY pattern_testing_col""".stripMargin) - val rows8 = df8.collect() - assert(rows8.length === 1) - assert(rows8(0).getString(0) === "special_character_percent%_present") - assert(rows8(1).getString(0) === "special_character_percent_not_present") - assert(rows8(2).getString(0) === "special_character_quote'_present") - assert(rows8(3).getString(0) === "special_character_quote_not_present") - assert(rows8(4).getString(0) === "special_character_underscore_present") - assert(rows8(5).getString(0) === "special_character_underscorenot_present") - // map to endsWith - // this one should map to contains - val df9 = spark.sql( - s"""SELECT * FROM $catalogName.pattern_testing_table - |WHERE pattern_testing_col LIKE '%quote\\'_present'""".stripMargin) - val rows9 = df9.collect() - assert(rows9.length === 1) - assert(rows9(0).getString(0) === "special_character_quote'_present") - - val df10 = spark.sql( - s"""SELECT * FROM $catalogName.pattern_testing_table - |WHERE pattern_testing_col LIKE '%percent\\%_present'""".stripMargin) - val rows10 = df10.collect() - assert(rows10.length === 1) - assert(rows10(0).getString(0) === "special_character_percent%_present") - - val df11 = spark. - sql( - s"""SELECT * FROM $catalogName.pattern_testing_table - |WHERE pattern_testing_col LIKE '%underscore\\_present'""".stripMargin) - val rows11 = df11.collect() - assert(rows11.length === 1) - assert(rows11(0).getString(0) === "special_character_underscore_present") - - val df12 = spark. - sql( - s"""SELECT * FROM $catalogName.pattern_testing_table - |WHERE pattern_testing_col LIKE '%present' ORDER BY pattern_testing_col""".stripMargin) - val rows12 = df12.collect() - assert(rows12.length === 1) - assert(rows12(0).getString(0) === "special_character_percent%_present") - assert(rows12(1).getString(0) === "special_character_percent_not_present") - assert(rows12(2).getString(0) === "special_character_quote'_present") - assert(rows12(3).getString(0) === "special_character_quote_not_present") - assert(rows12(4).getString(0) === "special_character_underscore_present") - assert(rows12(5).getString(0) === "special_character_underscorenot_present") - } - test("SPARK-37038: Test TABLESAMPLE") { if (supportsTableSample) { withTable(s"$catalogName.new_table") { diff --git a/sql/catalyst/src/main/java/org/apache/spark/sql/connector/util/V2ExpressionSQLBuilder.java b/sql/catalyst/src/main/java/org/apache/spark/sql/connector/util/V2ExpressionSQLBuilder.java index e170951bfa28..dcb3c706946c 100644 --- a/sql/catalyst/src/main/java/org/apache/spark/sql/connector/util/V2ExpressionSQLBuilder.java +++ b/sql/catalyst/src/main/java/org/apache/spark/sql/connector/util/V2ExpressionSQLBuilder.java @@ -66,6 +66,9 @@ public class V2ExpressionSQLBuilder { case '%': builder.append("\\%"); break; + case '\'': + builder.append("\\\'"); + break; default: builder.append(c); } diff --git a/sql/catalyst/src/main/scala/org/apache/spark/sql/connector/expressions/expressions.scala b/sql/catalyst/src/main/scala/org/apache/spark/sql/connector/expressions/expressions.scala index 7f536bdb712a..fbd2520e2a77 100644 --- a/sql/catalyst/src/main/scala/org/apache/spark/sql/connector/expressions/expressions.scala +++ b/sql/catalyst/src/main/scala/org/apache/spark/sql/connector/expressions/expressions.scala @@ -17,8 +17,6 @@ package org.apache.spark.sql.connector.expressions -import org.apache.commons.lang3.StringUtils - import org.apache.spark.SparkException import org.apache.spark.sql.catalyst import org.apache.spark.sql.catalyst.parser.CatalystSqlParser @@ -352,7 +350,7 @@ private[sql] object HoursTransform { private[sql] final case class LiteralValue[T](value: T, dataType: DataType) extends Literal[T] { override def toString: String = { if (dataType.isInstanceOf[StringType]) { - s"'${StringUtils.replace(s"$value", "'", "''")}'" + s"'$value'" } else { s"$value" } diff --git a/sql/core/src/main/scala/org/apache/spark/sql/jdbc/H2Dialect.scala b/sql/core/src/main/scala/org/apache/spark/sql/jdbc/H2Dialect.scala index 620af0918edc..4ae3260f3d7a 100644 --- a/sql/core/src/main/scala/org/apache/spark/sql/jdbc/H2Dialect.scala +++ b/sql/core/src/main/scala/org/apache/spark/sql/jdbc/H2Dialect.scala @@ -240,6 +240,13 @@ private[sql] object H2Dialect extends JdbcDialect { } class H2SQLBuilder extends JDBCSQLBuilder { + override def escapeSpecialCharsForLikePattern(str: String): String = { + str.map { + case '_' => "\\_" + case '%' => "\\%" + case c => c.toString + }.mkString + } override def visitAggregateFunction( funcName: String, isDistinct: Boolean, inputs: Array[String]): String = diff --git a/sql/core/src/main/scala/org/apache/spark/sql/jdbc/MySQLDialect.scala b/sql/core/src/main/scala/org/apache/spark/sql/jdbc/MySQLDialect.scala index 43f747bb7027..3cea8fa10883 100644 --- a/sql/core/src/main/scala/org/apache/spark/sql/jdbc/MySQLDialect.scala +++ b/sql/core/src/main/scala/org/apache/spark/sql/jdbc/MySQLDialect.scala @@ -51,21 +51,6 @@ private case object MySQLDialect extends JdbcDialect with SQLConfHelper { supportedFunctions.contains(funcName) class MySQLSQLBuilder extends JDBCSQLBuilder { - override def visitStartsWith(l: String, r: String): String = { - val value = r.substring(1, r.length() - 1) - s"$l LIKE '${escapeSpecialCharsForLikePattern(value)}%' ESCAPE '\\\\'" - } - - override def visitEndsWith(l: String, r: String): String = { - val value = r.substring(1, r.length() - 1) - s"$l LIKE '%${escapeSpecialCharsForLikePattern(value)}' ESCAPE '\\\\'" - } - - override def visitContains(l: String, r: String): String = { - val value = r.substring(1, r.length() - 1) - s"$l LIKE '%${escapeSpecialCharsForLikePattern(value)}%' ESCAPE '\\\\'" - } - override def visitAggregateFunction( funcName: String, isDistinct: Boolean, inputs: Array[String]): String = if (isDistinct && distinctUnsupportedAggregateFunctions.contains(funcName)) { diff --git a/sql/core/src/test/scala/org/apache/spark/sql/jdbc/JDBCV2Suite.scala b/sql/core/src/test/scala/org/apache/spark/sql/jdbc/JDBCV2Suite.scala index 7a5a49107167..2d1cf1f54672 100644 --- a/sql/core/src/test/scala/org/apache/spark/sql/jdbc/JDBCV2Suite.scala +++ b/sql/core/src/test/scala/org/apache/spark/sql/jdbc/JDBCV2Suite.scala @@ -1245,7 +1245,7 @@ class JDBCV2Suite extends QueryTest with SharedSparkSession with ExplainSuiteHel val df5 = spark.table("h2.test.address").filter($"email".startsWith("abc_'%")) checkFiltersRemoved(df5) checkPushedInfo(df5, - raw"PushedFilters: [EMAIL IS NOT NULL, EMAIL LIKE 'abc\_''\%%' ESCAPE '\']") + raw"PushedFilters: [EMAIL IS NOT NULL, EMAIL LIKE 'abc\_\'\%%' ESCAPE '\']") checkAnswer(df5, Seq(Row("abc_'%d...@gmail.com"))) val df6 = spark.table("h2.test.address").filter($"email".endsWith("_...@gmail.com")) @@ -1276,7 +1276,7 @@ class JDBCV2Suite extends QueryTest with SharedSparkSession with ExplainSuiteHel val df10 = spark.table("h2.test.address").filter($"email".endsWith("_'%d...@gmail.com")) checkFiltersRemoved(df10) checkPushedInfo(df10, - raw"PushedFilters: [EMAIL IS NOT NULL, EMAIL LIKE '%\_''\%d...@gmail.com' ESCAPE '\']") + raw"PushedFilters: [EMAIL IS NOT NULL, EMAIL LIKE '%\_\'\%d...@gmail.com' ESCAPE '\']") checkAnswer(df10, Seq(Row("abc_'%d...@gmail.com"))) val df11 = spark.table("h2.test.address").filter($"email".contains("c_d")) @@ -1304,7 +1304,7 @@ class JDBCV2Suite extends QueryTest with SharedSparkSession with ExplainSuiteHel val df15 = spark.table("h2.test.address").filter($"email".contains("c_'%d")) checkFiltersRemoved(df15) checkPushedInfo(df15, - raw"PushedFilters: [EMAIL IS NOT NULL, EMAIL LIKE '%c\_''\%d%' ESCAPE '\']") + raw"PushedFilters: [EMAIL IS NOT NULL, EMAIL LIKE '%c\_\'\%d%' ESCAPE '\']") checkAnswer(df15, Seq(Row("abc_'%d...@gmail.com"))) } --------------------------------------------------------------------- To unsubscribe, e-mail: commits-unsubscr...@spark.apache.org For additional commands, e-mail: commits-h...@spark.apache.org