This is an automated email from the ASF dual-hosted git repository.

gengliang pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/spark.git


The following commit(s) were added to refs/heads/master by this push:
     new 9bc1cb90294 [SPARK-40585][SQL] Support double quoted identifiers
9bc1cb90294 is described below

commit 9bc1cb9029482914d92de2b0557ad2c336061d0f
Author: Serge Rielau <serge.rie...@databricks.com>
AuthorDate: Tue Oct 4 16:11:21 2022 -0700

    [SPARK-40585][SQL] Support double quoted identifiers
    
    ### What changes were proposed in this pull request?
    In this PR I propose a new session config:
    spark.sql.ansi.double_quoted_identifiers (true | false)
    
    When true the parser will interpret a double quoted string not as a 
string-literal, but - in compliance with ANSI SQL  - as an identifier.
    We do this by splitting the double-quoted  literal from the STRING token, 
onto its own BACKQUOTED_STRING token in the lexer.
    in the grammar we replace all STRING references with a rule stringLit 
covering STRING and BACKQUOTED_STRING with the later being conditional on the 
config setting being false. (Note there already is a rule stringLiteral, hence 
the a tad quirky name).
    
    Similarly quotedIdentifier is extended with BACKQUOTED_STRING conditional 
on the config being true.
    
    Note that this is NOT PERFECT.
    The escape logic for strings (backslash) is different from that of 
identifiers (double-doublequotes).
    Unfortunately I do not know how to change this, since introducing a NEW 
token DOUBLE_QUOTED_IDENTIFIER has proven to break STRING - presumably due to 
the overlap in the pattern in the lexer.
    
    At this point I consider this an edge-case.
    
    ### Why are the changes needed?
    
    ANSI requires quotation of identifiers to use double quotes. We have seen 
customer requests for support especially around column aliases. But it makes 
sense to have a holistic fix rather than a context specific application.
    
    ### Does this PR introduce _any_ user-facing change?
    Yes, this is a new config introducing a new feature. It is not a breaking 
change, though.
    
    ### How was this patch tested?
    double_quoted_identifiers.sql was added to sqltests
    
    Closes #38022 from srielau/SPARK-40585-double-quoted-identifier.
    
    Lead-authored-by: Serge Rielau <serge.rie...@databricks.com>
    Co-authored-by: Serge Rielau <srie...@users.noreply.github.com>
    Co-authored-by: Gengliang Wang <ltn...@gmail.com>
    Signed-off-by: Gengliang Wang <gengli...@apache.org>
---
 .../spark/sql/catalyst/parser/SqlBaseLexer.g4      |   5 +-
 .../spark/sql/catalyst/parser/SqlBaseParser.g4     | 111 ++--
 .../spark/sql/catalyst/parser/AstBuilder.scala     | 134 +++--
 .../spark/sql/catalyst/parser/ParseDriver.scala    |  18 +
 .../spark/sql/catalyst/parser/ParserUtils.scala    |   2 +-
 .../org/apache/spark/sql/internal/SQLConf.scala    |   9 +
 .../sql/catalyst/parser/ParserUtilsSuite.scala     |   8 +-
 .../spark/sql/execution/SparkSqlParser.scala       |  40 +-
 .../sql-tests/inputs/double-quoted-identifiers.sql | 107 ++++
 .../results/double-quoted-identifiers.sql.out      | 608 +++++++++++++++++++++
 10 files changed, 937 insertions(+), 105 deletions(-)

diff --git 
a/sql/catalyst/src/main/antlr4/org/apache/spark/sql/catalyst/parser/SqlBaseLexer.g4
 
b/sql/catalyst/src/main/antlr4/org/apache/spark/sql/catalyst/parser/SqlBaseLexer.g4
index 1cbd6d24dea..d2ccd3dfdd9 100644
--- 
a/sql/catalyst/src/main/antlr4/org/apache/spark/sql/catalyst/parser/SqlBaseLexer.g4
+++ 
b/sql/catalyst/src/main/antlr4/org/apache/spark/sql/catalyst/parser/SqlBaseLexer.g4
@@ -400,11 +400,14 @@ HENT_END: '*/';
 
 STRING
     : '\'' ( ~('\''|'\\') | ('\\' .) )* '\''
-    | '"' ( ~('"'|'\\') | ('\\' .) )* '"'
     | 'R\'' (~'\'')* '\''
     | 'R"'(~'"')* '"'
     ;
 
+DOUBLEQUOTED_STRING
+    :'"' ( ~('"'|'\\') | ('\\' .) )* '"'
+    ;
+
 BIGINT_LITERAL
     : DIGIT+ 'L'
     ;
diff --git 
a/sql/catalyst/src/main/antlr4/org/apache/spark/sql/catalyst/parser/SqlBaseParser.g4
 
b/sql/catalyst/src/main/antlr4/org/apache/spark/sql/catalyst/parser/SqlBaseParser.g4
index f398ddd76f7..5b61c767fbe 100644
--- 
a/sql/catalyst/src/main/antlr4/org/apache/spark/sql/catalyst/parser/SqlBaseParser.g4
+++ 
b/sql/catalyst/src/main/antlr4/org/apache/spark/sql/catalyst/parser/SqlBaseParser.g4
@@ -35,6 +35,11 @@ options { tokenVocab = SqlBaseLexer; }
    * When true, the behavior of keywords follows ANSI SQL standard.
    */
   public boolean SQL_standard_keyword_behavior = false;
+
+  /**
+   * When true, double quoted literals are identifiers rather than STRINGs.
+   */
+  public boolean double_quoted_identifiers = false;
 }
 
 singleStatement
@@ -70,7 +75,7 @@ statement
     | ctes? dmlStatementNoWith                                         
#dmlStatement
     | USE multipartIdentifier                                          #use
     | USE namespace multipartIdentifier                                
#useNamespace
-    | SET CATALOG (identifier | STRING)                                
#setCatalog
+    | SET CATALOG (identifier | stringLit)                             
#setCatalog
     | CREATE namespace (IF NOT EXISTS)? multipartIdentifier
         (commentSpec |
          locationSpec |
@@ -82,7 +87,7 @@ statement
     | DROP namespace (IF EXISTS)? multipartIdentifier
         (RESTRICT | CASCADE)?                                          
#dropNamespace
     | SHOW namespaces ((FROM | IN) multipartIdentifier)?
-        (LIKE? pattern=STRING)?                                        
#showNamespaces
+        (LIKE? pattern=stringLit)?                                        
#showNamespaces
     | createTableHeader (LEFT_PAREN createOrReplaceTableColTypeList 
RIGHT_PAREN)? tableProvider?
         createTableClauses
         (AS? query)?                                                   
#createTable
@@ -132,7 +137,7 @@ statement
         LEFT_PAREN columns=qualifiedColTypeWithPositionList
         RIGHT_PAREN                                                    
#hiveReplaceColumns
     | ALTER TABLE multipartIdentifier (partitionSpec)?
-        SET SERDE STRING (WITH SERDEPROPERTIES propertyList)?          
#setTableSerDe
+        SET SERDE stringLit (WITH SERDEPROPERTIES propertyList)?       
#setTableSerDe
     | ALTER TABLE multipartIdentifier (partitionSpec)?
         SET SERDEPROPERTIES propertyList                               
#setTableSerDe
     | ALTER (TABLE | VIEW) multipartIdentifier ADD (IF NOT EXISTS)?
@@ -158,27 +163,27 @@ statement
         (OPTIONS propertyList)?                                        
#createTempViewUsing
     | ALTER VIEW multipartIdentifier AS? query                         
#alterViewQuery
     | CREATE (OR REPLACE)? TEMPORARY? FUNCTION (IF NOT EXISTS)?
-        multipartIdentifier AS className=STRING
+        multipartIdentifier AS className=stringLit
         (USING resource (COMMA resource)*)?                            
#createFunction
     | DROP TEMPORARY? FUNCTION (IF EXISTS)? multipartIdentifier        
#dropFunction
     | EXPLAIN (LOGICAL | FORMATTED | EXTENDED | CODEGEN | COST)?
         statement                                                      #explain
     | SHOW TABLES ((FROM | IN) multipartIdentifier)?
-        (LIKE? pattern=STRING)?                                        
#showTables
+        (LIKE? pattern=stringLit)?                                        
#showTables
     | SHOW TABLE EXTENDED ((FROM | IN) ns=multipartIdentifier)?
-        LIKE pattern=STRING partitionSpec?                             
#showTableExtended
+        LIKE pattern=stringLit partitionSpec?                             
#showTableExtended
     | SHOW TBLPROPERTIES table=multipartIdentifier
         (LEFT_PAREN key=propertyKey RIGHT_PAREN)?                      
#showTblProperties
     | SHOW COLUMNS (FROM | IN) table=multipartIdentifier
         ((FROM | IN) ns=multipartIdentifier)?                          
#showColumns
     | SHOW VIEWS ((FROM | IN) multipartIdentifier)?
-        (LIKE? pattern=STRING)?                                        
#showViews
+        (LIKE? pattern=stringLit)?                                        
#showViews
     | SHOW PARTITIONS multipartIdentifier partitionSpec?               
#showPartitions
     | SHOW identifier? FUNCTIONS ((FROM | IN) ns=multipartIdentifier)?
-        (LIKE? (legacy=multipartIdentifier | pattern=STRING))?         
#showFunctions
+        (LIKE? (legacy=multipartIdentifier | pattern=stringLit))?         
#showFunctions
     | SHOW CREATE TABLE multipartIdentifier (AS SERDE)?                
#showCreateTable
     | SHOW CURRENT namespace                                           
#showCurrentNamespace
-    | SHOW CATALOGS (LIKE? pattern=STRING)?                            
#showCatalogs
+    | SHOW CATALOGS (LIKE? pattern=stringLit)?                            
#showCatalogs
     | (DESC | DESCRIBE) FUNCTION EXTENDED? describeFuncName            
#describeFunction
     | (DESC | DESCRIBE) namespace EXTENDED?
         multipartIdentifier                                            
#describeNamespace
@@ -186,16 +191,16 @@ statement
         multipartIdentifier partitionSpec? describeColName?            
#describeRelation
     | (DESC | DESCRIBE) QUERY? query                                   
#describeQuery
     | COMMENT ON namespace multipartIdentifier IS
-        comment=(STRING | NULL)                                        
#commentNamespace
-    | COMMENT ON TABLE multipartIdentifier IS comment=(STRING | NULL)  
#commentTable
+        comment                                                        
#commentNamespace
+    | COMMENT ON TABLE multipartIdentifier IS comment                  
#commentTable
     | REFRESH TABLE multipartIdentifier                                
#refreshTable
     | REFRESH FUNCTION multipartIdentifier                             
#refreshFunction
-    | REFRESH (STRING | .*?)                                           
#refreshResource
+    | REFRESH (stringLit | .*?)                                        
#refreshResource
     | CACHE LAZY? TABLE multipartIdentifier
         (OPTIONS options=propertyList)? (AS? query)?                   
#cacheTable
     | UNCACHE TABLE (IF EXISTS)? multipartIdentifier                   
#uncacheTable
     | CLEAR CACHE                                                      
#clearCache
-    | LOAD DATA LOCAL? INPATH path=STRING OVERWRITE? INTO TABLE
+    | LOAD DATA LOCAL? INPATH path=stringLit OVERWRITE? INTO TABLE
         multipartIdentifier partitionSpec?                             
#loadData
     | TRUNCATE TABLE multipartIdentifier partitionSpec?                
#truncateTable
     | MSCK REPAIR TABLE multipartIdentifier
@@ -203,7 +208,7 @@ statement
     | op=(ADD | LIST) identifier .*?                                   
#manageResource
     | SET ROLE .*?                                                     
#failNativeCommand
     | SET TIME ZONE interval                                           
#setTimeZone
-    | SET TIME ZONE timezone=(STRING | LOCAL)                          
#setTimeZone
+    | SET TIME ZONE timezone                                           
#setTimeZone
     | SET TIME ZONE .*?                                                
#setTimeZone
     | SET configKey EQ configValue                                     
#setQuotedConfiguration
     | SET configKey (EQ .*?)?                                          
#setConfiguration
@@ -219,12 +224,18 @@ statement
     | unsupportedHiveNativeCommands .*?                                
#failNativeCommand
     ;
 
+timezone
+    : STRING
+    | {!double_quoted_identifiers}? DOUBLEQUOTED_STRING
+    | LOCAL
+    ;
+
 configKey
     : quotedIdentifier
     ;
 
 configValue
-    : quotedIdentifier
+    : backQuotedIdentifier
     ;
 
 unsupportedHiveNativeCommands
@@ -295,11 +306,11 @@ skewSpec
     ;
 
 locationSpec
-    : LOCATION STRING
+    : LOCATION stringLit
     ;
 
 commentSpec
-    : COMMENT STRING
+    : COMMENT stringLit
     ;
 
 query
@@ -309,8 +320,8 @@ query
 insertInto
     : INSERT OVERWRITE TABLE? multipartIdentifier (partitionSpec (IF NOT 
EXISTS)?)?  identifierList?        #insertOverwriteTable
     | INSERT INTO TABLE? multipartIdentifier partitionSpec? (IF NOT EXISTS)? 
identifierList?                #insertIntoTable
-    | INSERT OVERWRITE LOCAL? DIRECTORY path=STRING rowFormat? 
createFileFormat?                            #insertOverwriteHiveDir
-    | INSERT OVERWRITE LOCAL? DIRECTORY (path=STRING)? tableProvider (OPTIONS 
options=propertyList)?        #insertOverwriteDir
+    | INSERT OVERWRITE LOCAL? DIRECTORY path=stringLit rowFormat? 
createFileFormat?                            #insertOverwriteHiveDir
+    | INSERT OVERWRITE LOCAL? DIRECTORY (path=stringLit)? tableProvider 
(OPTIONS options=propertyList)?        #insertOverwriteDir
     ;
 
 partitionSpecLocation
@@ -340,7 +351,7 @@ namespaces
 
 describeFuncName
     : qualifiedName
-    | STRING
+    | stringLit
     | comparisonOperator
     | arithmeticOperator
     | predicateOperator
@@ -384,14 +395,14 @@ property
 
 propertyKey
     : identifier (DOT identifier)*
-    | STRING
+    | stringLit
     ;
 
 propertyValue
     : INTEGER_VALUE
     | DECIMAL_VALUE
     | booleanValue
-    | STRING
+    | stringLit
     ;
 
 constantList
@@ -408,16 +419,16 @@ createFileFormat
     ;
 
 fileFormat
-    : INPUTFORMAT inFmt=STRING OUTPUTFORMAT outFmt=STRING    #tableFileFormat
+    : INPUTFORMAT inFmt=stringLit OUTPUTFORMAT outFmt=stringLit    
#tableFileFormat
     | identifier                                             #genericFileFormat
     ;
 
 storageHandler
-    : STRING (WITH SERDEPROPERTIES propertyList)?
+    : stringLit (WITH SERDEPROPERTIES propertyList)?
     ;
 
 resource
-    : identifier STRING
+    : identifier stringLit
     ;
 
 dmlStatementNoWith
@@ -508,11 +519,11 @@ transformClause
             | kind=MAP setQuantifier? expressionSeq
             | kind=REDUCE setQuantifier? expressionSeq)
       inRowFormat=rowFormat?
-      (RECORDWRITER recordWriter=STRING)?
-      USING script=STRING
+      (RECORDWRITER recordWriter=stringLit)?
+      USING script=stringLit
       (AS (identifierSeq | colTypeList | (LEFT_PAREN (identifierSeq | 
colTypeList) RIGHT_PAREN)))?
       outRowFormat=rowFormat?
-      (RECORDREADER recordReader=STRING)?
+      (RECORDREADER recordReader=stringLit)?
     ;
 
 selectClause
@@ -572,7 +583,7 @@ fromClause
     ;
 
 temporalClause
-    : FOR? (SYSTEM_VERSION | VERSION) AS OF version=(INTEGER_VALUE | STRING)
+    : FOR? (SYSTEM_VERSION | VERSION) AS OF version
     | FOR? (SYSTEM_TIME | TIMESTAMP) AS OF timestamp=valueExpression
     ;
 
@@ -709,13 +720,13 @@ tableAlias
     ;
 
 rowFormat
-    : ROW FORMAT SERDE name=STRING (WITH SERDEPROPERTIES props=propertyList)?  
     #rowFormatSerde
+    : ROW FORMAT SERDE name=stringLit (WITH SERDEPROPERTIES 
props=propertyList)?       #rowFormatSerde
     | ROW FORMAT DELIMITED
-      (FIELDS TERMINATED BY fieldsTerminatedBy=STRING (ESCAPED BY 
escapedBy=STRING)?)?
-      (COLLECTION ITEMS TERMINATED BY collectionItemsTerminatedBy=STRING)?
-      (MAP KEYS TERMINATED BY keysTerminatedBy=STRING)?
-      (LINES TERMINATED BY linesSeparatedBy=STRING)?
-      (NULL DEFINED AS nullDefinedAs=STRING)?                                  
     #rowFormatDelimited
+      (FIELDS TERMINATED BY fieldsTerminatedBy=stringLit (ESCAPED BY 
escapedBy=stringLit)?)?
+      (COLLECTION ITEMS TERMINATED BY collectionItemsTerminatedBy=stringLit)?
+      (MAP KEYS TERMINATED BY keysTerminatedBy=stringLit)?
+      (LINES TERMINATED BY linesSeparatedBy=stringLit)?
+      (NULL DEFINED AS nullDefinedAs=stringLit)?                               
        #rowFormatDelimited
     ;
 
 multipartIdentifierList
@@ -792,7 +803,7 @@ predicate
     | NOT? kind=IN LEFT_PAREN query RIGHT_PAREN
     | NOT? kind=RLIKE pattern=valueExpression
     | NOT? kind=(LIKE | ILIKE) quantifier=(ANY | SOME | ALL) (LEFT_PAREN 
RIGHT_PAREN | LEFT_PAREN expression (COMMA expression)* RIGHT_PAREN)
-    | NOT? kind=(LIKE | ILIKE) pattern=valueExpression (ESCAPE 
escapeChar=STRING)?
+    | NOT? kind=(LIKE | ILIKE) pattern=valueExpression (ESCAPE 
escapeChar=stringLit)?
     | IS NOT? kind=NULL
     | IS NOT? kind=(TRUE | FALSE | UNKNOWN)
     | IS NOT? kind=DISTINCT FROM right=valueExpression
@@ -856,10 +867,10 @@ primaryExpression
 constant
     : NULL                                                                     
                #nullLiteral
     | interval                                                                 
                #intervalLiteral
-    | identifier STRING                                                        
                #typeConstructor
+    | identifier stringLit                                                     
                #typeConstructor
     | number                                                                   
                #numericLiteral
     | booleanValue                                                             
                #booleanLiteral
-    | STRING+                                                                  
                #stringLiteral
+    | stringLit+                                                               
                #stringLiteral
     ;
 
 comparisonOperator
@@ -899,7 +910,9 @@ unitToUnitInterval
     ;
 
 intervalValue
-    : (PLUS | MINUS)? (INTEGER_VALUE | DECIMAL_VALUE | STRING)
+    : (PLUS | MINUS)?
+      (INTEGER_VALUE | DECIMAL_VALUE | STRING
+       | {!double_quoted_identifiers}? DOUBLEQUOTED_STRING)
     ;
 
 colPosition
@@ -1030,6 +1043,11 @@ strictIdentifier
     ;
 
 quotedIdentifier
+    : BACKQUOTED_IDENTIFIER
+    | {double_quoted_identifiers}? DOUBLEQUOTED_STRING
+    ;
+
+backQuotedIdentifier
     : BACKQUOTED_IDENTIFIER
     ;
 
@@ -1055,7 +1073,22 @@ alterColumnAction
     | dropDefault=DROP DEFAULT
     ;
 
+stringLit
+    : STRING
+    | {!double_quoted_identifiers}? DOUBLEQUOTED_STRING
+    ;
+
+comment
+    : STRING
+    | {!double_quoted_identifiers}? DOUBLEQUOTED_STRING
+    | NULL
+    ;
 
+version
+    : INTEGER_VALUE
+    | STRING
+    | {!double_quoted_identifiers}? DOUBLEQUOTED_STRING
+    ;
 
 // When `SQL_standard_keyword_behavior=true`, there are 2 kinds of keywords in 
Spark SQL.
 // - Reserved keywords:
diff --git 
a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/parser/AstBuilder.scala
 
b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/parser/AstBuilder.scala
index 248837cc3ae..a89f5a0f3ae 100644
--- 
a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/parser/AstBuilder.scala
+++ 
b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/parser/AstBuilder.scala
@@ -707,15 +707,15 @@ class AstBuilder extends SqlBaseParserBaseVisitor[AnyRef] 
with SQLConfHelper wit
       isDistinct = false)
 
     ScriptTransformation(
-      string(transformClause.script),
+      string(visitStringLit(transformClause.script)),
       attributes,
       plan,
       withScriptIOSchema(
         ctx,
         transformClause.inRowFormat,
-        transformClause.recordWriter,
+        visitStringLit(transformClause.recordWriter),
         transformClause.outRowFormat,
-        transformClause.recordReader,
+        visitStringLit(transformClause.recordReader),
         schemaLess
       )
     )
@@ -819,6 +819,11 @@ class AstBuilder extends SqlBaseParserBaseVisitor[AnyRef] 
with SQLConfHelper wit
     (Seq[(String, String)], Option[String], Seq[(String, String)], 
Option[String])
 
   protected def getRowFormatDelimited(ctx: RowFormatDelimitedContext): 
ScriptIOFormat = {
+
+    def entry(key: String, value: StringLitContext): Seq[(String, String)] = {
+      Option(value).toSeq.map(x => key -> string(visitStringLit(x)))
+    }
+
     // TODO we should use the visitRowFormatDelimited function here. However 
HiveScriptIOSchema
     // expects a seq of pairs in which the old parsers' token names are used 
as keys.
     // Transforming the result of visitRowFormatDelimited would be quite a bit 
messier than
@@ -827,8 +832,8 @@ class AstBuilder extends SqlBaseParserBaseVisitor[AnyRef] 
with SQLConfHelper wit
       entry("TOK_TABLEROWFORMATCOLLITEMS", ctx.collectionItemsTerminatedBy) ++
       entry("TOK_TABLEROWFORMATMAPKEYS", ctx.keysTerminatedBy) ++
       entry("TOK_TABLEROWFORMATNULL", ctx.nullDefinedAs) ++
-      Option(ctx.linesSeparatedBy).toSeq.map { token =>
-        val value = string(token)
+      Option(ctx.linesSeparatedBy).toSeq.map { stringLitCtx =>
+        val value = string(visitStringLit(stringLitCtx))
         validate(
           value == "\n",
           s"LINES TERMINATED BY only supports newline '\\n' right now: $value",
@@ -1276,14 +1281,24 @@ class AstBuilder extends 
SqlBaseParserBaseVisitor[AnyRef] with SQLConfHelper wit
     table.optionalMap(ctx.sample)(withSample)
   }
 
+  override def visitVersion(ctx: VersionContext): Option[String] = {
+    if (ctx != null) {
+      if (ctx.INTEGER_VALUE != null) {
+        Some(ctx.INTEGER_VALUE().getText)
+      } else if (ctx.DOUBLEQUOTED_STRING() != null) {
+        Option(ctx.DOUBLEQUOTED_STRING()).map(string)
+      } else {
+        Option(ctx.STRING()).map(string)
+      }
+    } else {
+      None
+    }
+  }
+
   private def withTimeTravel(
       ctx: TemporalClauseContext, plan: LogicalPlan): LogicalPlan = 
withOrigin(ctx) {
     val v = ctx.version
-    val version = if (ctx.INTEGER_VALUE != null) {
-      Some(v.getText)
-    } else {
-      Option(v).map(string)
-    }
+    val version = visitVersion(ctx.version)
     val timestamp = Option(ctx.timestamp).map(expression)
     if (timestamp.exists(_.references.nonEmpty)) {
       throw QueryParsingErrors.invalidTimeTravelSpec(
@@ -1671,7 +1686,8 @@ class AstBuilder extends SqlBaseParserBaseVisitor[AnyRef] 
with SQLConfHelper wit
                 .map(p => invertIfNotDefined(getLike(e, 
p))).toSeq.reduceLeft(And)
             }
           case _ =>
-            val escapeChar = Option(ctx.escapeChar).map(string).map { str =>
+            val escapeChar = Option(ctx.escapeChar)
+              .map(stringLitCtx => string(visitStringLit(stringLitCtx))).map { 
str =>
               if (str.length != 1) {
                 throw QueryParsingErrors.invalidEscapeStringError(ctx)
               }
@@ -2205,7 +2221,7 @@ class AstBuilder extends SqlBaseParserBaseVisitor[AnyRef] 
with SQLConfHelper wit
    * Currently Date, Timestamp, Interval and Binary typed literals are 
supported.
    */
   override def visitTypeConstructor(ctx: TypeConstructorContext): Literal = 
withOrigin(ctx) {
-    val value = string(ctx.STRING)
+    val value = string(visitStringLit(ctx.stringLit))
     val valueType = ctx.identifier.getText.toUpperCase(Locale.ROOT)
 
     def toLiteral[T](f: UTF8String => Option[T], t: DataType): Literal = {
@@ -2444,9 +2460,9 @@ class AstBuilder extends SqlBaseParserBaseVisitor[AnyRef] 
with SQLConfHelper wit
    */
   private def createString(ctx: StringLiteralContext): String = {
     if (conf.escapedStringLiterals) {
-      ctx.STRING().asScala.map(stringWithoutUnescape).mkString
+      ctx.stringLit.asScala.map(x => 
stringWithoutUnescape(visitStringLit(x))).mkString
     } else {
-      ctx.STRING().asScala.map(string).mkString
+      ctx.stringLit.asScala.map(x => string(visitStringLit(x))).mkString
     }
   }
 
@@ -2598,8 +2614,14 @@ class AstBuilder extends 
SqlBaseParserBaseVisitor[AnyRef] with SQLConfHelper wit
         assert(units.length == values.length)
         val kvs = units.indices.map { i =>
           val u = units(i).getText
-          val v = if (values(i).STRING() != null) {
-            val value = string(values(i).STRING())
+          val v = if (values(i).STRING() != null || 
values(i).DOUBLEQUOTED_STRING() != null) {
+            val value = string(if (values(i).STRING() != null) {
+              values(i).STRING()
+            }
+            else {
+              values(i).DOUBLEQUOTED_STRING()
+            }
+            )
             // SPARK-32840: For invalid cases, e.g. INTERVAL '1 day 2' hour,
             // INTERVAL 'interval 1' day, we need to check ahead before they 
are concatenated with
             // units and become valid ones, e.g. '1 day 2 hour'.
@@ -2637,7 +2659,12 @@ class AstBuilder extends 
SqlBaseParserBaseVisitor[AnyRef] with SQLConfHelper wit
    */
   override def visitUnitToUnitInterval(ctx: UnitToUnitIntervalContext): 
CalendarInterval = {
     withOrigin(ctx) {
-      val value = Option(ctx.intervalValue.STRING).map(string).map { interval 
=>
+      val value = Option(if (ctx.intervalValue.STRING != null) {
+        ctx.intervalValue.STRING
+      } else {
+        ctx.intervalValue.DOUBLEQUOTED_STRING
+      }
+      ).map(string).map { interval =>
         if (ctx.intervalValue().MINUS() == null) {
           interval
         } else if (interval.startsWith("-")) {
@@ -2869,7 +2896,7 @@ class AstBuilder extends SqlBaseParserBaseVisitor[AnyRef] 
with SQLConfHelper wit
    * Create a location string.
    */
   override def visitLocationSpec(ctx: LocationSpecContext): String = 
withOrigin(ctx) {
-    string(ctx.STRING)
+    string(visitStringLit(ctx.stringLit))
   }
 
   /**
@@ -2883,7 +2910,7 @@ class AstBuilder extends SqlBaseParserBaseVisitor[AnyRef] 
with SQLConfHelper wit
    * Create a comment string.
    */
   override def visitCommentSpec(ctx: CommentSpecContext): String = 
withOrigin(ctx) {
-    string(ctx.STRING)
+    string(visitStringLit(ctx.stringLit))
   }
 
   /**
@@ -2978,8 +3005,8 @@ class AstBuilder extends SqlBaseParserBaseVisitor[AnyRef] 
with SQLConfHelper wit
    * identifier.
    */
   override def visitPropertyKey(key: PropertyKeyContext): String = {
-    if (key.STRING != null) {
-      string(key.STRING)
+    if (key.stringLit() != null) {
+      string(visitStringLit(key.stringLit()))
     } else {
       key.getText
     }
@@ -2992,8 +3019,8 @@ class AstBuilder extends SqlBaseParserBaseVisitor[AnyRef] 
with SQLConfHelper wit
   override def visitPropertyValue(value: PropertyValueContext): String = {
     if (value == null) {
       null
-    } else if (value.STRING != null) {
-      string(value.STRING)
+    } else if (value.stringLit() != null) {
+      string(visitStringLit(value.stringLit()))
     } else if (value.booleanValue != null) {
       value.getText.toLowerCase(Locale.ROOT)
     } else {
@@ -3001,6 +3028,18 @@ class AstBuilder extends 
SqlBaseParserBaseVisitor[AnyRef] with SQLConfHelper wit
     }
   }
 
+  override def visitStringLit(ctx: StringLitContext): Token = {
+    if (ctx != null) {
+      if (ctx.STRING != null) {
+        ctx.STRING.getSymbol
+      } else {
+        ctx.DOUBLEQUOTED_STRING.getSymbol
+      }
+    } else {
+      null
+    }
+  }
+
   /**
    * Type to keep track of a table header: (identifier, isTemporary, 
ifNotExists, isExternal).
    */
@@ -3266,7 +3305,7 @@ class AstBuilder extends SqlBaseParserBaseVisitor[AnyRef] 
with SQLConfHelper wit
     val multiPart = 
Option(ctx.multipartIdentifier).map(visitMultipartIdentifier)
     ShowNamespaces(
       UnresolvedNamespace(multiPart.getOrElse(Seq.empty[String])),
-      Option(ctx.pattern).map(string))
+      Option(ctx.pattern).map(x => string(visitStringLit(x))))
   }
 
   /**
@@ -3342,7 +3381,8 @@ class AstBuilder extends SqlBaseParserBaseVisitor[AnyRef] 
with SQLConfHelper wit
     (ctx.fileFormat, ctx.storageHandler) match {
       // Expected format: INPUTFORMAT input_format OUTPUTFORMAT output_format
       case (c: TableFileFormatContext, null) =>
-        SerdeInfo(formatClasses = Some(FormatClasses(string(c.inFmt), 
string(c.outFmt))))
+        SerdeInfo(formatClasses = 
Some(FormatClasses(string(visitStringLit(c.inFmt)),
+          string(visitStringLit(c.outFmt)))))
       // Expected format: SEQUENCEFILE | TEXTFILE | RCFILE | ORC | PARQUET | 
AVRO
       case (c: GenericFileFormatContext, null) =>
         SerdeInfo(storedAs = Some(c.identifier.getText))
@@ -3384,7 +3424,7 @@ class AstBuilder extends SqlBaseParserBaseVisitor[AnyRef] 
with SQLConfHelper wit
   override def visitRowFormatSerde(ctx: RowFormatSerdeContext): SerdeInfo = 
withOrigin(ctx) {
     import ctx._
     SerdeInfo(
-      serde = Some(string(name)),
+      serde = Some(string(visitStringLit(name))),
       serdeProperties = 
Option(propertyList).map(visitPropertyKeyValues).getOrElse(Map.empty))
   }
 
@@ -3394,8 +3434,8 @@ class AstBuilder extends SqlBaseParserBaseVisitor[AnyRef] 
with SQLConfHelper wit
   override def visitRowFormatDelimited(
       ctx: RowFormatDelimitedContext): SerdeInfo = withOrigin(ctx) {
     // Collect the entries if any.
-    def entry(key: String, value: Token): Seq[(String, String)] = {
-      Option(value).toSeq.map(x => key -> string(x))
+    def entry(key: String, value: StringLitContext): Seq[(String, String)] = {
+      Option(value).toSeq.map(x => key -> string(visitStringLit(x)))
     }
     // TODO we need proper support for the NULL format.
     val entries =
@@ -3406,7 +3446,7 @@ class AstBuilder extends SqlBaseParserBaseVisitor[AnyRef] 
with SQLConfHelper wit
           entry("colelction.delim", ctx.collectionItemsTerminatedBy) ++
           entry("mapkey.delim", ctx.keysTerminatedBy) ++
           Option(ctx.linesSeparatedBy).toSeq.map { token =>
-            val value = string(token)
+            val value = string(visitStringLit(token))
             validate(
               value == "\n",
               s"LINES TERMINATED BY only supports newline '\\n' right now: 
$value",
@@ -3714,7 +3754,7 @@ class AstBuilder extends SqlBaseParserBaseVisitor[AnyRef] 
with SQLConfHelper wit
     val multiPart = 
Option(ctx.multipartIdentifier).map(visitMultipartIdentifier)
     ShowTables(
       UnresolvedNamespace(multiPart.getOrElse(Seq.empty[String])),
-      Option(ctx.pattern).map(string))
+      Option(ctx.pattern).map(x => string(visitStringLit(x))))
   }
 
   /**
@@ -3728,7 +3768,7 @@ class AstBuilder extends SqlBaseParserBaseVisitor[AnyRef] 
with SQLConfHelper wit
     }
     ShowTableExtended(
       UnresolvedNamespace(multiPart.getOrElse(Seq.empty[String])),
-      string(ctx.pattern),
+      string(visitStringLit(ctx.pattern)),
       partitionKeys)
   }
 
@@ -3739,7 +3779,7 @@ class AstBuilder extends SqlBaseParserBaseVisitor[AnyRef] 
with SQLConfHelper wit
     val multiPart = 
Option(ctx.multipartIdentifier).map(visitMultipartIdentifier)
     ShowViews(
       UnresolvedNamespace(multiPart.getOrElse(Seq.empty[String])),
-      Option(ctx.pattern).map(string))
+      Option(ctx.pattern).map(x => string(visitStringLit(x))))
   }
 
   override def visitColPosition(ctx: ColPositionContext): ColumnPosition = {
@@ -4193,7 +4233,7 @@ class AstBuilder extends SqlBaseParserBaseVisitor[AnyRef] 
with SQLConfHelper wit
   override def visitLoadData(ctx: LoadDataContext): LogicalPlan = 
withOrigin(ctx) {
     LoadData(
       child = createUnresolvedTable(ctx.multipartIdentifier, "LOAD DATA"),
-      path = string(ctx.path),
+      path = string(visitStringLit(ctx.path)),
       isLocal = ctx.LOCAL != null,
       isOverwrite = ctx.OVERWRITE != null,
       partition = Option(ctx.partitionSpec).map(visitNonOptionalPartitionSpec)
@@ -4439,7 +4479,7 @@ class AstBuilder extends SqlBaseParserBaseVisitor[AnyRef] 
with SQLConfHelper wit
         ctx.multipartIdentifier,
         "ALTER TABLE ... SET [SERDE|SERDEPROPERTIES]",
         alterTableTypeMismatchHint),
-      Option(ctx.STRING).map(string),
+      Option(ctx.stringLit).map(x => string(visitStringLit(x))),
       Option(ctx.propertyList).map(visitPropertyKeyValues),
       // TODO a partition spec is allowed to have optional values. This is 
currently violated.
       Option(ctx.partitionSpec).map(visitNonOptionalPartitionSpec))
@@ -4500,8 +4540,8 @@ class AstBuilder extends SqlBaseParserBaseVisitor[AnyRef] 
with SQLConfHelper wit
   override def visitDescribeFunction(ctx: DescribeFunctionContext): 
LogicalPlan = withOrigin(ctx) {
     import ctx._
     val functionName =
-      if (describeFuncName.STRING() != null) {
-        Seq(string(describeFuncName.STRING()))
+      if (describeFuncName.stringLit() != null) {
+        Seq(string(visitStringLit(describeFuncName.stringLit())))
       } else if (describeFuncName.qualifiedName() != null) {
         visitQualifiedName(describeFuncName.qualifiedName)
       } else {
@@ -4540,7 +4580,7 @@ class AstBuilder extends SqlBaseParserBaseVisitor[AnyRef] 
with SQLConfHelper wit
     } else {
       UnresolvedNamespace(Nil)
     }
-    val pattern = Option(ctx.pattern).map(string).orElse(legacy.map(_.last))
+    val pattern = Option(ctx.pattern).map(x => 
string(visitStringLit(x))).orElse(legacy.map(_.last))
     ShowFunctions(nsPlan, userScope, systemScope, pattern)
   }
 
@@ -4554,22 +4594,26 @@ class AstBuilder extends 
SqlBaseParserBaseVisitor[AnyRef] with SQLConfHelper wit
   }
 
   override def visitCommentNamespace(ctx: CommentNamespaceContext): 
LogicalPlan = withOrigin(ctx) {
-    val comment = ctx.comment.getType match {
-      case SqlBaseParser.NULL => ""
-      case _ => string(ctx.STRING)
-    }
+    val comment = visitComment(ctx.comment)
     val nameParts = visitMultipartIdentifier(ctx.multipartIdentifier)
     CommentOnNamespace(UnresolvedNamespace(nameParts), comment)
   }
 
   override def visitCommentTable(ctx: CommentTableContext): LogicalPlan = 
withOrigin(ctx) {
-    val comment = ctx.comment.getType match {
-      case SqlBaseParser.NULL => ""
-      case _ => string(ctx.STRING)
-    }
+    val comment = visitComment(ctx.comment)
     CommentOnTable(createUnresolvedTable(ctx.multipartIdentifier, "COMMENT ON 
TABLE"), comment)
   }
 
+  override def visitComment (ctx: CommentContext): String = {
+    if (ctx.STRING() != null) {
+      string(ctx.STRING)
+    } else if (ctx.DOUBLEQUOTED_STRING() != null) {
+      string(ctx.DOUBLEQUOTED_STRING())
+    } else {
+      ""
+    }
+  }
+
   /**
    * Create an index, returning a [[CreateIndex]] logical plan.
    * For example:
diff --git 
a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/parser/ParseDriver.scala
 
b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/parser/ParseDriver.scala
index c85a0c2cd45..d22514ade78 100644
--- 
a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/parser/ParseDriver.scala
+++ 
b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/parser/ParseDriver.scala
@@ -118,6 +118,7 @@ abstract class AbstractSqlParser extends ParserInterface 
with SQLConfHelper with
     parser.legacy_setops_precedence_enabled = conf.setOpsPrecedenceEnforced
     parser.legacy_exponent_literal_as_decimal_enabled = 
conf.exponentLiteralAsDecimalEnabled
     parser.SQL_standard_keyword_behavior = conf.enforceReservedKeywords
+    parser.double_quoted_identifiers = conf.double_quoted_identifiers
 
     try {
       try {
@@ -347,6 +348,23 @@ case object PostProcessor extends 
SqlBaseParserBaseListener {
 
   /** Remove the back ticks from an Identifier. */
   override def exitQuotedIdentifier(ctx: 
SqlBaseParser.QuotedIdentifierContext): Unit = {
+    if (ctx.BACKQUOTED_IDENTIFIER() != null) {
+      replaceTokenByIdentifier(ctx, 1) { token =>
+        // Remove the double back ticks in the string.
+        token.setText(token.getText.replace("``", "`"))
+        token
+      }
+    } else if (ctx.DOUBLEQUOTED_STRING() != null) {
+      replaceTokenByIdentifier(ctx, 1) { token =>
+        // Remove the double quotes in the string.
+        token.setText(token.getText.replace("\"\"", "\""))
+        token
+      }
+    }
+  }
+
+  /** Remove the back ticks from an Identifier. */
+  override def exitBackQuotedIdentifier(ctx: 
SqlBaseParser.BackQuotedIdentifierContext): Unit = {
     replaceTokenByIdentifier(ctx, 1) { token =>
       // Remove the double back ticks in the string.
       token.setText(token.getText.replace("``", "`"))
diff --git 
a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/parser/ParserUtils.scala
 
b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/parser/ParserUtils.scala
index 6be8d922bb8..acd0ecfd109 100644
--- 
a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/parser/ParserUtils.scala
+++ 
b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/parser/ParserUtils.scala
@@ -98,7 +98,7 @@ object ParserUtils {
   def string(node: TerminalNode): String = unescapeSQLString(node.getText)
 
   /** Convert a string node into a string without unescaping. */
-  def stringWithoutUnescape(node: TerminalNode): String = {
+  def stringWithoutUnescape(node: Token): String = {
     // STRING parser rule forces that the input always has quotes at the 
starting and ending.
     node.getText.slice(1, node.getText.size - 1)
   }
diff --git 
a/sql/catalyst/src/main/scala/org/apache/spark/sql/internal/SQLConf.scala 
b/sql/catalyst/src/main/scala/org/apache/spark/sql/internal/SQLConf.scala
index 44d34af1e47..5a5f3a83a2a 100644
--- a/sql/catalyst/src/main/scala/org/apache/spark/sql/internal/SQLConf.scala
+++ b/sql/catalyst/src/main/scala/org/apache/spark/sql/internal/SQLConf.scala
@@ -2909,6 +2909,13 @@ object SQLConf {
     .booleanConf
     .createWithDefault(sys.env.get("SPARK_ANSI_SQL_MODE").contains("true"))
 
+  val DOUBLE_QUOTED_IDENTIFIERS = 
buildConf("spark.sql.ansi.double_quoted_identifiers")
+    .doc("When true, Spark SQL reads literals enclosed in double quoted (\") 
as identifiers. " +
+      "When false they are read as string literals.")
+    .version("3.4.0")
+    .booleanConf
+    .createWithDefault(false)
+
   val ENABLE_DEFAULT_COLUMNS =
     buildConf("spark.sql.defaultColumn.enabled")
       .internal()
@@ -4585,6 +4592,8 @@ class SQLConf extends Serializable with Logging {
 
   def enforceReservedKeywords: Boolean = ansiEnabled && 
getConf(ENFORCE_RESERVED_KEYWORDS)
 
+  def double_quoted_identifiers: Boolean = getConf(DOUBLE_QUOTED_IDENTIFIERS)
+
   def timestampType: AtomicType = getConf(TIMESTAMP_TYPE) match {
     case "TIMESTAMP_LTZ" =>
       // For historical reason, the TimestampType maps to TIMESTAMP WITH LOCAL 
TIME ZONE
diff --git 
a/sql/catalyst/src/test/scala/org/apache/spark/sql/catalyst/parser/ParserUtilsSuite.scala
 
b/sql/catalyst/src/test/scala/org/apache/spark/sql/catalyst/parser/ParserUtilsSuite.scala
index 818ddb63104..1d9965548a2 100644
--- 
a/sql/catalyst/src/test/scala/org/apache/spark/sql/catalyst/parser/ParserUtilsSuite.scala
+++ 
b/sql/catalyst/src/test/scala/org/apache/spark/sql/catalyst/parser/ParserUtilsSuite.scala
@@ -180,10 +180,10 @@ class ParserUtilsSuite extends SparkFunSuite {
   }
 
   test("string") {
-    assert(string(showDbsContext.pattern) == "identifier_with_wildcards")
-    assert(string(createDbContext.commentSpec().get(0).STRING()) == 
"database_comment")
+    assert(string(showDbsContext.pattern.STRING()) == 
"identifier_with_wildcards")
+    assert(string(createDbContext.commentSpec().get(0).stringLit().STRING()) 
== "database_comment")
 
-    assert(string(createDbContext.locationSpec.asScala.head.STRING) == 
"/home/user/db")
+    
assert(string(createDbContext.locationSpec.asScala.head.stringLit().STRING) == 
"/home/user/db")
   }
 
   test("position") {
@@ -211,7 +211,7 @@ class ParserUtilsSuite extends SparkFunSuite {
     val ctx = createDbContext.locationSpec.asScala.head
     val current = CurrentOrigin.get
     val (location, origin) = withOrigin(ctx) {
-      (string(ctx.STRING), CurrentOrigin.get)
+      (string(ctx.stringLit().STRING), CurrentOrigin.get)
     }
     assert(location == "/home/user/db")
     assert(origin == Origin(Some(3), Some(27)))
diff --git 
a/sql/core/src/main/scala/org/apache/spark/sql/execution/SparkSqlParser.scala 
b/sql/core/src/main/scala/org/apache/spark/sql/execution/SparkSqlParser.scala
index a82fc47f427..5719b0566df 100644
--- 
a/sql/core/src/main/scala/org/apache/spark/sql/execution/SparkSqlParser.scala
+++ 
b/sql/core/src/main/scala/org/apache/spark/sql/execution/SparkSqlParser.scala
@@ -161,22 +161,31 @@ class SparkSqlAstBuilder extends AstBuilder {
         SetCommand(Some(key -> 
Some(ZoneOffset.ofTotalSeconds(seconds).toString)))
       }
     } else if (ctx.timezone != null) {
-      ctx.timezone.getType match {
-        case SqlBaseParser.LOCAL =>
-          SetCommand(Some(key -> Some(TimeZone.getDefault.getID)))
-        case _ =>
-          SetCommand(Some(key -> Some(string(ctx.STRING))))
-      }
+      SetCommand(Some(key -> Some(visitTimezone(ctx.timezone()))))
     } else {
       throw QueryParsingErrors.invalidTimeZoneDisplacementValueError(ctx)
     }
   }
 
+  override def visitTimezone (ctx: TimezoneContext): String = {
+    if (ctx.STRING() != null) {
+      string(ctx.STRING)
+    } else if (ctx.DOUBLEQUOTED_STRING() != null) {
+      string(ctx.DOUBLEQUOTED_STRING())
+    } else {
+      TimeZone.getDefault.getID
+    }
+  }
+
   /**
    * Create a [[RefreshResource]] logical plan.
    */
   override def visitRefreshResource(ctx: RefreshResourceContext): LogicalPlan 
= withOrigin(ctx) {
-    val path = if (ctx.STRING != null) string(ctx.STRING) else 
extractUnquotedResourcePath(ctx)
+    val path = if (ctx.stringLit != null) {
+      string(visitStringLit(ctx.stringLit))
+    } else {
+      extractUnquotedResourcePath(ctx)
+    }
     RefreshResource(path)
   }
 
@@ -258,8 +267,8 @@ class SparkSqlAstBuilder extends AstBuilder {
   override def visitSetCatalog(ctx: SetCatalogContext): LogicalPlan = 
withOrigin(ctx) {
     if (ctx.identifier() != null) {
       SetCatalogCommand(ctx.identifier().getText)
-    } else if (ctx.STRING() != null) {
-      SetCatalogCommand(string(ctx.STRING()))
+    } else if (ctx.stringLit() != null) {
+      SetCatalogCommand(string(visitStringLit(ctx.stringLit())))
     } else {
       throw new IllegalStateException("Invalid catalog name")
     }
@@ -269,7 +278,7 @@ class SparkSqlAstBuilder extends AstBuilder {
    * Create a [[ShowCatalogsCommand]] logical command.
    */
   override def visitShowCatalogs(ctx: ShowCatalogsContext) : LogicalPlan = 
withOrigin(ctx) {
-    ShowCatalogsCommand(Option(ctx.pattern).map(string))
+    ShowCatalogsCommand(Option(ctx.pattern).map(x => 
string(visitStringLit(x))))
   }
 
   /**
@@ -534,7 +543,8 @@ class SparkSqlAstBuilder extends AstBuilder {
       val resourceType = resource.identifier.getText.toLowerCase(Locale.ROOT)
       resourceType match {
         case "jar" | "file" | "archive" =>
-          FunctionResource(FunctionResourceType.fromString(resourceType), 
string(resource.STRING))
+          FunctionResource(FunctionResourceType.fromString(resourceType),
+            string(visitStringLit(resource.stringLit())))
         case other =>
           operationNotAllowed(s"CREATE FUNCTION with resource type 
'$resourceType'", ctx)
       }
@@ -548,7 +558,7 @@ class SparkSqlAstBuilder extends AstBuilder {
     if (ctx.TEMPORARY == null) {
       CreateFunction(
         UnresolvedIdentifier(functionIdentifier),
-        string(ctx.className),
+        string(visitStringLit(ctx.className)),
         resources.toSeq,
         ctx.EXISTS != null,
         ctx.REPLACE != null)
@@ -566,7 +576,7 @@ class SparkSqlAstBuilder extends AstBuilder {
       }
       CreateFunctionCommand(
         FunctionIdentifier(functionIdentifier.last),
-        string(ctx.className),
+        string(visitStringLit(ctx.className)),
         resources.toSeq,
         true,
         ctx.EXISTS != null,
@@ -788,7 +798,7 @@ class SparkSqlAstBuilder extends AstBuilder {
     val options = 
Option(ctx.options).map(visitPropertyKeyValues).getOrElse(Map.empty)
     var storage = DataSource.buildStorageFormatFromOptions(options)
 
-    val path = Option(ctx.path).map(string).getOrElse("")
+    val path = Option(ctx.path).map(x => 
string(visitStringLit(x))).getOrElse("")
 
     if (!(path.isEmpty ^ storage.locationUri.isEmpty)) {
       throw 
QueryParsingErrors.directoryPathAndOptionsPathBothSpecifiedError(ctx)
@@ -833,7 +843,7 @@ class SparkSqlAstBuilder extends AstBuilder {
       ctx: InsertOverwriteHiveDirContext): InsertDirParams = withOrigin(ctx) {
     val serdeInfo = getSerdeInfo(
       Option(ctx.rowFormat).toSeq, Option(ctx.createFileFormat).toSeq, ctx)
-    val path = string(ctx.path)
+    val path = string(visitStringLit(ctx.path))
     // The path field is required
     if (path.isEmpty) {
       operationNotAllowed("INSERT OVERWRITE DIRECTORY must be accompanied by 
path", ctx)
diff --git 
a/sql/core/src/test/resources/sql-tests/inputs/double-quoted-identifiers.sql 
b/sql/core/src/test/resources/sql-tests/inputs/double-quoted-identifiers.sql
new file mode 100644
index 00000000000..7fe35e5a410
--- /dev/null
+++ b/sql/core/src/test/resources/sql-tests/inputs/double-quoted-identifiers.sql
@@ -0,0 +1,107 @@
+-- test cases for spark.sql.ansi.double_quoted_identifiers
+
+-- Base line
+SET spark.sql.ansi.double_quoted_identifiers = false;
+
+-- All these should error out in the parser
+SELECT 1 FROM "not_exist";
+
+USE SCHEMA "not_exist";
+
+ALTER TABLE "not_exist" ADD COLUMN not_exist int;
+
+ALTER TABLE not_exist ADD COLUMN "not_exist" int;
+
+SELECT 1 AS "not_exist" FROM not_exist;
+
+SELECT 1 FROM not_exist AS X("hello");
+
+SELECT "not_exist"();
+
+SELECT "not_exist".not_exist();
+
+-- All these should error out in analysis
+SELECT 1 FROM `hello`;
+
+USE SCHEMA `not_exist`;
+
+ALTER TABLE `not_exist` ADD COLUMN not_exist int;
+
+ALTER TABLE not_exist ADD COLUMN `not_exist` int;
+
+SELECT 1 AS `not_exist` FROM `not_exist`;
+
+SELECT 1 FROM not_exist AS X(`hello`);
+
+SELECT `not_exist`();
+
+SELECT `not_exist`.not_exist();
+
+-- Strings in various situations all work
+SELECT "hello";
+
+CREATE TEMPORARY VIEW v(c1 COMMENT "hello") AS SELECT 1;
+DROP VIEW v;
+
+SELECT INTERVAL "1" YEAR;
+
+-- Now turn on the config.
+SET spark.sql.ansi.double_quoted_identifiers = true;
+
+-- All these should error out in analysis now
+SELECT 1 FROM "not_exist";
+
+USE SCHEMA "not_exist";
+
+ALTER TABLE "not_exist" ADD COLUMN not_exist int;
+
+ALTER TABLE not_exist ADD COLUMN "not_exist" int;
+
+SELECT 1 AS "not_exist" FROM not_exist;
+
+SELECT 1 FROM not_exist AS X("hello");
+
+SELECT "not_exist"();
+
+SELECT "not_exist".not_exist();
+
+SELECT "hello";
+
+-- Back ticks still work
+SELECT 1 FROM `hello`;
+
+USE SCHEMA `not_exist`;
+
+ALTER TABLE `not_exist` ADD COLUMN not_exist int;
+
+ALTER TABLE not_exist ADD COLUMN `not_exist` int;
+
+SELECT 1 AS `not_exist` FROM `not_exist`;
+
+SELECT 1 FROM not_exist AS X(`hello`);
+
+SELECT `not_exist`();
+
+SELECT `not_exist`.not_exist();
+
+-- These fail in the parser now
+CREATE TEMPORARY VIEW v(c1 COMMENT "hello") AS SELECT 1;
+DROP VIEW v;
+
+SELECT INTERVAL "1" YEAR;
+
+-- Single ticks still work
+SELECT 'hello';
+
+CREATE TEMPORARY VIEW v(c1 COMMENT 'hello') AS SELECT 1;
+DROP VIEW v;
+
+SELECT INTERVAL '1' YEAR;
+
+-- A whole scenario
+CREATE SCHEMA "myschema";
+CREATE TEMPORARY VIEW "myview"("c1") AS
+  WITH "v"("a") AS (SELECT 1) SELECT "a" FROM "v";
+SELECT "a1" AS "a2" FROM "myview" AS "atab"("a1");
+DROP TABLE "myview";
+DROP SCHEMA "myschema";
diff --git 
a/sql/core/src/test/resources/sql-tests/results/double-quoted-identifiers.sql.out
 
b/sql/core/src/test/resources/sql-tests/results/double-quoted-identifiers.sql.out
new file mode 100644
index 00000000000..9207c76e9a0
--- /dev/null
+++ 
b/sql/core/src/test/resources/sql-tests/results/double-quoted-identifiers.sql.out
@@ -0,0 +1,608 @@
+-- Automatically generated by SQLQueryTestSuite
+-- !query
+SET spark.sql.ansi.double_quoted_identifiers = false
+-- !query schema
+struct<key:string,value:string>
+-- !query output
+spark.sql.ansi.double_quoted_identifiers       false
+
+
+-- !query
+SELECT 1 FROM "not_exist"
+-- !query schema
+struct<>
+-- !query output
+org.apache.spark.sql.catalyst.parser.ParseException
+{
+  "errorClass" : "PARSE_SYNTAX_ERROR",
+  "sqlState" : "42000",
+  "messageParameters" : {
+    "error" : "'\"not_exist\"'",
+    "hint" : ""
+  }
+}
+
+
+-- !query
+USE SCHEMA "not_exist"
+-- !query schema
+struct<>
+-- !query output
+org.apache.spark.sql.catalyst.parser.ParseException
+{
+  "errorClass" : "PARSE_SYNTAX_ERROR",
+  "sqlState" : "42000",
+  "messageParameters" : {
+    "error" : "'\"not_exist\"'",
+    "hint" : ""
+  }
+}
+
+
+-- !query
+ALTER TABLE "not_exist" ADD COLUMN not_exist int
+-- !query schema
+struct<>
+-- !query output
+org.apache.spark.sql.catalyst.parser.ParseException
+{
+  "errorClass" : "PARSE_SYNTAX_ERROR",
+  "sqlState" : "42000",
+  "messageParameters" : {
+    "error" : "'\"not_exist\"'",
+    "hint" : ""
+  }
+}
+
+
+-- !query
+ALTER TABLE not_exist ADD COLUMN "not_exist" int
+-- !query schema
+struct<>
+-- !query output
+org.apache.spark.sql.catalyst.parser.ParseException
+{
+  "errorClass" : "PARSE_SYNTAX_ERROR",
+  "sqlState" : "42000",
+  "messageParameters" : {
+    "error" : "'\"not_exist\"'",
+    "hint" : ""
+  }
+}
+
+
+-- !query
+SELECT 1 AS "not_exist" FROM not_exist
+-- !query schema
+struct<>
+-- !query output
+org.apache.spark.sql.catalyst.parser.ParseException
+{
+  "errorClass" : "PARSE_SYNTAX_ERROR",
+  "sqlState" : "42000",
+  "messageParameters" : {
+    "error" : "'\"not_exist\"'",
+    "hint" : ""
+  }
+}
+
+
+-- !query
+SELECT 1 FROM not_exist AS X("hello")
+-- !query schema
+struct<>
+-- !query output
+org.apache.spark.sql.catalyst.parser.ParseException
+{
+  "errorClass" : "PARSE_SYNTAX_ERROR",
+  "sqlState" : "42000",
+  "messageParameters" : {
+    "error" : "'\"hello\"'",
+    "hint" : ""
+  }
+}
+
+
+-- !query
+SELECT "not_exist"()
+-- !query schema
+struct<>
+-- !query output
+org.apache.spark.sql.catalyst.parser.ParseException
+{
+  "errorClass" : "PARSE_SYNTAX_ERROR",
+  "sqlState" : "42000",
+  "messageParameters" : {
+    "error" : "'\"not_exist\"'",
+    "hint" : ""
+  }
+}
+
+
+-- !query
+SELECT "not_exist".not_exist()
+-- !query schema
+struct<>
+-- !query output
+org.apache.spark.sql.catalyst.parser.ParseException
+{
+  "errorClass" : "PARSE_SYNTAX_ERROR",
+  "sqlState" : "42000",
+  "messageParameters" : {
+    "error" : "'\"not_exist\"'",
+    "hint" : ""
+  }
+}
+
+
+-- !query
+SELECT 1 FROM `hello`
+-- !query schema
+struct<>
+-- !query output
+org.apache.spark.sql.AnalysisException
+Table or view not found: hello; line 1 pos 14
+
+
+-- !query
+USE SCHEMA `not_exist`
+-- !query schema
+struct<>
+-- !query output
+org.apache.spark.sql.catalyst.analysis.NoSuchDatabaseException
+Database 'not_exist' not found
+
+
+-- !query
+ALTER TABLE `not_exist` ADD COLUMN not_exist int
+-- !query schema
+struct<>
+-- !query output
+org.apache.spark.sql.AnalysisException
+Table not found: not_exist; line 1 pos 12
+
+
+-- !query
+ALTER TABLE not_exist ADD COLUMN `not_exist` int
+-- !query schema
+struct<>
+-- !query output
+org.apache.spark.sql.AnalysisException
+Table not found: not_exist; line 1 pos 12
+
+
+-- !query
+SELECT 1 AS `not_exist` FROM `not_exist`
+-- !query schema
+struct<>
+-- !query output
+org.apache.spark.sql.AnalysisException
+Table or view not found: not_exist; line 1 pos 29
+
+
+-- !query
+SELECT 1 FROM not_exist AS X(`hello`)
+-- !query schema
+struct<>
+-- !query output
+org.apache.spark.sql.AnalysisException
+Table or view not found: not_exist; line 1 pos 14
+
+
+-- !query
+SELECT `not_exist`()
+-- !query schema
+struct<>
+-- !query output
+org.apache.spark.sql.AnalysisException
+{
+  "errorClass" : "_LEGACY_ERROR_TEMP_1242",
+  "messageParameters" : {
+    "fullName" : "spark_catalog.default.not_exist",
+    "rawName" : "not_exist"
+  },
+  "queryContext" : [ {
+    "objectType" : "",
+    "objectName" : "",
+    "startIndex" : 8,
+    "stopIndex" : 20,
+    "fragment" : "`not_exist`()"
+  } ]
+}
+
+
+-- !query
+SELECT `not_exist`.not_exist()
+-- !query schema
+struct<>
+-- !query output
+org.apache.spark.sql.AnalysisException
+{
+  "errorClass" : "_LEGACY_ERROR_TEMP_1243",
+  "messageParameters" : {
+    "rawName" : "not_exist.not_exist"
+  },
+  "queryContext" : [ {
+    "objectType" : "",
+    "objectName" : "",
+    "startIndex" : 8,
+    "stopIndex" : 30,
+    "fragment" : "`not_exist`.not_exist()"
+  } ]
+}
+
+
+-- !query
+SELECT "hello"
+-- !query schema
+struct<hello:string>
+-- !query output
+hello
+
+
+-- !query
+CREATE TEMPORARY VIEW v(c1 COMMENT "hello") AS SELECT 1
+-- !query schema
+struct<>
+-- !query output
+
+
+
+-- !query
+DROP VIEW v
+-- !query schema
+struct<>
+-- !query output
+
+
+
+-- !query
+SELECT INTERVAL "1" YEAR
+-- !query schema
+struct<INTERVAL '1' YEAR:interval year>
+-- !query output
+1-0
+
+
+-- !query
+SET spark.sql.ansi.double_quoted_identifiers = true
+-- !query schema
+struct<key:string,value:string>
+-- !query output
+spark.sql.ansi.double_quoted_identifiers       true
+
+
+-- !query
+SELECT 1 FROM "not_exist"
+-- !query schema
+struct<>
+-- !query output
+org.apache.spark.sql.AnalysisException
+Table or view not found: not_exist; line 1 pos 14
+
+
+-- !query
+USE SCHEMA "not_exist"
+-- !query schema
+struct<>
+-- !query output
+org.apache.spark.sql.catalyst.analysis.NoSuchDatabaseException
+Database 'not_exist' not found
+
+
+-- !query
+ALTER TABLE "not_exist" ADD COLUMN not_exist int
+-- !query schema
+struct<>
+-- !query output
+org.apache.spark.sql.AnalysisException
+Table not found: not_exist; line 1 pos 12
+
+
+-- !query
+ALTER TABLE not_exist ADD COLUMN "not_exist" int
+-- !query schema
+struct<>
+-- !query output
+org.apache.spark.sql.AnalysisException
+Table not found: not_exist; line 1 pos 12
+
+
+-- !query
+SELECT 1 AS "not_exist" FROM not_exist
+-- !query schema
+struct<>
+-- !query output
+org.apache.spark.sql.AnalysisException
+Table or view not found: not_exist; line 1 pos 29
+
+
+-- !query
+SELECT 1 FROM not_exist AS X("hello")
+-- !query schema
+struct<>
+-- !query output
+org.apache.spark.sql.AnalysisException
+Table or view not found: not_exist; line 1 pos 14
+
+
+-- !query
+SELECT "not_exist"()
+-- !query schema
+struct<>
+-- !query output
+org.apache.spark.sql.AnalysisException
+{
+  "errorClass" : "_LEGACY_ERROR_TEMP_1242",
+  "messageParameters" : {
+    "fullName" : "spark_catalog.default.not_exist",
+    "rawName" : "not_exist"
+  },
+  "queryContext" : [ {
+    "objectType" : "",
+    "objectName" : "",
+    "startIndex" : 8,
+    "stopIndex" : 20,
+    "fragment" : "\"not_exist\"()"
+  } ]
+}
+
+
+-- !query
+SELECT "not_exist".not_exist()
+-- !query schema
+struct<>
+-- !query output
+org.apache.spark.sql.AnalysisException
+{
+  "errorClass" : "_LEGACY_ERROR_TEMP_1243",
+  "messageParameters" : {
+    "rawName" : "not_exist.not_exist"
+  },
+  "queryContext" : [ {
+    "objectType" : "",
+    "objectName" : "",
+    "startIndex" : 8,
+    "stopIndex" : 30,
+    "fragment" : "\"not_exist\".not_exist()"
+  } ]
+}
+
+
+-- !query
+SELECT "hello"
+-- !query schema
+struct<>
+-- !query output
+org.apache.spark.sql.AnalysisException
+{
+  "errorClass" : "UNRESOLVED_COLUMN",
+  "errorSubClass" : "WITHOUT_SUGGESTION",
+  "sqlState" : "42000",
+  "messageParameters" : {
+    "objectName" : "`hello`"
+  },
+  "queryContext" : [ {
+    "objectType" : "",
+    "objectName" : "",
+    "startIndex" : 8,
+    "stopIndex" : 14,
+    "fragment" : "\"hello\""
+  } ]
+}
+
+
+-- !query
+SELECT 1 FROM `hello`
+-- !query schema
+struct<>
+-- !query output
+org.apache.spark.sql.AnalysisException
+Table or view not found: hello; line 1 pos 14
+
+
+-- !query
+USE SCHEMA `not_exist`
+-- !query schema
+struct<>
+-- !query output
+org.apache.spark.sql.catalyst.analysis.NoSuchDatabaseException
+Database 'not_exist' not found
+
+
+-- !query
+ALTER TABLE `not_exist` ADD COLUMN not_exist int
+-- !query schema
+struct<>
+-- !query output
+org.apache.spark.sql.AnalysisException
+Table not found: not_exist; line 1 pos 12
+
+
+-- !query
+ALTER TABLE not_exist ADD COLUMN `not_exist` int
+-- !query schema
+struct<>
+-- !query output
+org.apache.spark.sql.AnalysisException
+Table not found: not_exist; line 1 pos 12
+
+
+-- !query
+SELECT 1 AS `not_exist` FROM `not_exist`
+-- !query schema
+struct<>
+-- !query output
+org.apache.spark.sql.AnalysisException
+Table or view not found: not_exist; line 1 pos 29
+
+
+-- !query
+SELECT 1 FROM not_exist AS X(`hello`)
+-- !query schema
+struct<>
+-- !query output
+org.apache.spark.sql.AnalysisException
+Table or view not found: not_exist; line 1 pos 14
+
+
+-- !query
+SELECT `not_exist`()
+-- !query schema
+struct<>
+-- !query output
+org.apache.spark.sql.AnalysisException
+{
+  "errorClass" : "_LEGACY_ERROR_TEMP_1242",
+  "messageParameters" : {
+    "fullName" : "spark_catalog.default.not_exist",
+    "rawName" : "not_exist"
+  },
+  "queryContext" : [ {
+    "objectType" : "",
+    "objectName" : "",
+    "startIndex" : 8,
+    "stopIndex" : 20,
+    "fragment" : "`not_exist`()"
+  } ]
+}
+
+
+-- !query
+SELECT `not_exist`.not_exist()
+-- !query schema
+struct<>
+-- !query output
+org.apache.spark.sql.AnalysisException
+{
+  "errorClass" : "_LEGACY_ERROR_TEMP_1243",
+  "messageParameters" : {
+    "rawName" : "not_exist.not_exist"
+  },
+  "queryContext" : [ {
+    "objectType" : "",
+    "objectName" : "",
+    "startIndex" : 8,
+    "stopIndex" : 30,
+    "fragment" : "`not_exist`.not_exist()"
+  } ]
+}
+
+
+-- !query
+CREATE TEMPORARY VIEW v(c1 COMMENT "hello") AS SELECT 1
+-- !query schema
+struct<>
+-- !query output
+org.apache.spark.sql.catalyst.parser.ParseException
+{
+  "errorClass" : "PARSE_SYNTAX_ERROR",
+  "sqlState" : "42000",
+  "messageParameters" : {
+    "error" : "'\"hello\"'",
+    "hint" : ""
+  }
+}
+
+
+-- !query
+DROP VIEW v
+-- !query schema
+struct<>
+-- !query output
+org.apache.spark.sql.catalyst.analysis.NoSuchTableException
+{
+  "errorClass" : "_LEGACY_ERROR_TEMP_1115",
+  "messageParameters" : {
+    "msg" : "Table spark_catalog.default.v not found"
+  }
+}
+
+
+-- !query
+SELECT INTERVAL "1" YEAR
+-- !query schema
+struct<>
+-- !query output
+org.apache.spark.sql.catalyst.parser.ParseException
+{
+  "errorClass" : "PARSE_SYNTAX_ERROR",
+  "sqlState" : "42000",
+  "messageParameters" : {
+    "error" : "'\"1\"'",
+    "hint" : ""
+  }
+}
+
+
+-- !query
+SELECT 'hello'
+-- !query schema
+struct<hello:string>
+-- !query output
+hello
+
+
+-- !query
+CREATE TEMPORARY VIEW v(c1 COMMENT 'hello') AS SELECT 1
+-- !query schema
+struct<>
+-- !query output
+
+
+
+-- !query
+DROP VIEW v
+-- !query schema
+struct<>
+-- !query output
+
+
+
+-- !query
+SELECT INTERVAL '1' YEAR
+-- !query schema
+struct<INTERVAL '1' YEAR:interval year>
+-- !query output
+1-0
+
+
+-- !query
+CREATE SCHEMA "myschema"
+-- !query schema
+struct<>
+-- !query output
+
+
+
+-- !query
+CREATE TEMPORARY VIEW "myview"("c1") AS
+  WITH "v"("a") AS (SELECT 1) SELECT "a" FROM "v"
+-- !query schema
+struct<>
+-- !query output
+
+
+
+-- !query
+SELECT "a1" AS "a2" FROM "myview" AS "atab"("a1")
+-- !query schema
+struct<a2:int>
+-- !query output
+1
+
+
+-- !query
+DROP TABLE "myview"
+-- !query schema
+struct<>
+-- !query output
+
+
+
+-- !query
+DROP SCHEMA "myschema"
+-- !query schema
+struct<>
+-- !query output
+


---------------------------------------------------------------------
To unsubscribe, e-mail: commits-unsubscr...@spark.apache.org
For additional commands, e-mail: commits-h...@spark.apache.org

Reply via email to