Github user yhuai commented on a diff in the pull request:

    https://github.com/apache/spark/pull/3431#discussion_r22697226
  
    --- Diff: sql/core/src/main/scala/org/apache/spark/sql/sources/ddl.scala ---
    @@ -83,10 +118,73 @@ private[sql] class DDLParser extends 
StandardTokenParsers with PackratParsers wi
       protected lazy val className: Parser[String] = repsep(ident, ".") ^^ { 
case s => s.mkString(".")}
     
       protected lazy val pair: Parser[(String, String)] = ident ~ stringLit ^^ 
{ case k ~ v => (k,v) }
    +
    +  protected lazy val column: Parser[StructField] =
    +    ident ~ dataType ^^ { case columnName ~ typ =>
    +      StructField(cleanIdentifier(columnName), typ)
    +    }
    +
    +  protected lazy val primitiveType: Parser[DataType] =
    +    STRING ^^^ StringType |
    +    BINARY ^^^ BinaryType |
    +    BOOLEAN ^^^ BooleanType |
    +    TINYINT ^^^ ByteType |
    +    SMALLINT ^^^ ShortType |
    +    INT ^^^ IntegerType |
    +    BIGINT ^^^ LongType |
    +    FLOAT ^^^ FloatType |
    +    DOUBLE ^^^ DoubleType |
    +    fixedDecimalType |                   // decimal with precision/scale
    +    DECIMAL ^^^ DecimalType.Unlimited |  // decimal with no precision/scale
    +    DATE ^^^ DateType |
    +    TIMESTAMP ^^^ TimestampType |
    +    VARCHAR ~ "(" ~ numericLit ~ ")" ^^^ StringType
    +
    +  protected lazy val fixedDecimalType: Parser[DataType] =
    +    (DECIMAL ~ "(" ~> numericLit) ~ ("," ~> numericLit <~ ")") ^^ {
    +      case precision ~ scale => DecimalType(precision.toInt, scale.toInt)
    +    }
    +
    +  protected lazy val arrayType: Parser[DataType] =
    +    ARRAY ~> "<" ~> dataType <~ ">" ^^ {
    +      case tpe => ArrayType(tpe)
    +    }
    +
    +  protected lazy val mapType: Parser[DataType] =
    +    MAP ~> "<" ~> dataType ~ "," ~ dataType <~ ">" ^^ {
    +      case t1 ~ _ ~ t2 => MapType(t1, t2)
    +    }
    +
    +  protected lazy val structField: Parser[StructField] =
    +    ident ~ ":" ~ dataType ^^ {
    +      case fieldName ~ _ ~ tpe => StructField(cleanIdentifier(fieldName), 
tpe, nullable = true)
    +    }
    +
    +  protected lazy val structType: Parser[DataType] =
    +    (STRUCT ~> "<" ~> repsep(structField, ",") <~ ">" ^^ {
    +    case fields => new StructType(fields)
    +    }) |
    +    (STRUCT ~> "<>" ^^ {
    +      case fields => new StructType(Nil)
    +    })
    +
    +  private[sql] lazy val dataType: Parser[DataType] =
    +    arrayType |
    +    mapType |
    +    structType |
    +    primitiveType
    +
    +  protected val escapedIdentifier = "`([^`]+)`".r
    +  /** Strips backticks from ident if present */
    +  protected def cleanIdentifier(ident: String): String = ident match {
    +    case escapedIdentifier(i) => i
    +    case plainIdent => plainIdent
    +  }
    --- End diff --
    
    Seems when we use `ident`, the parser will automatically take care 
backticks. We can remove it. I am sorry I just noticed it.


---
If your project is set up for it, you can reply to this email and have your
reply appear on GitHub as well. If your project does not have this feature
enabled and wishes so, or if the feature is enabled but not working, please
contact infrastructure at infrastruct...@apache.org or file a JIRA ticket
with INFRA.
---

---------------------------------------------------------------------
To unsubscribe, e-mail: reviews-unsubscr...@spark.apache.org
For additional commands, e-mail: reviews-h...@spark.apache.org

Reply via email to