Github user gatorsmile commented on a diff in the pull request:

    https://github.com/apache/spark/pull/14482#discussion_r73466961
  
    --- Diff: 
sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/rules.scala 
---
    @@ -62,6 +66,122 @@ private[sql] class ResolveDataSource(sparkSession: 
SparkSession) extends Rule[Lo
     }
     
     /**
    + * Preprocess some DDL plans, e.g. [[CreateTable]], to do some 
normalization and checking.
    + */
    +case class PreprocessDDL(conf: SQLConf) extends Rule[LogicalPlan] {
    +
    +  def apply(plan: LogicalPlan): LogicalPlan = plan transform {
    +    // When we CREATE TABLE without specifying the table schema, we should 
fail the query if
    +    // bucketing information is specified, as we can't infer bucketing 
from data files currently,
    +    // and we should ignore the partition columns if it's specified, as we 
will infer it later, at
    +    // runtime.
    +    case c @ CreateTable(tableDesc, _, None) if tableDesc.schema.isEmpty =>
    +      if (tableDesc.bucketSpec.isDefined) {
    +        failAnalysis("Cannot specify bucketing information if the table 
schema is not specified " +
    +          "when creating and will be inferred at runtime")
    +      }
    +
    +      val partitionColumnNames = tableDesc.partitionColumnNames
    +      if (partitionColumnNames.nonEmpty) {
    +        // The table does not have a specified schema, which means that 
the schema will be inferred
    +        // at runtime. So, we are not expecting partition columns and we 
will discover partitions
    +        // at runtime. However, if there are specified partition columns, 
we simply ignore them and
    +        // provide a warning message.
    +        logWarning(
    +          s"Specified partition columns 
(${partitionColumnNames.mkString(",")}) will be " +
    +            s"ignored. The schema and partition columns of table 
${tableDesc.identifier} will " +
    +            "be inferred.")
    +        c.copy(tableDesc = tableDesc.copy(partitionColumnNames = Nil))
    +      } else {
    +        c
    +      }
    +
    +    // Here we normalize partition, bucket and sort column names, w.r.t. 
the case sensitivity
    +    // config, and do various checks:
    +    //   * column names in table definition can't be duplicated.
    +    //   * partition, bucket and sort column names must exist in table 
definition.
    +    //   * partition, bucket and sort column names can't be duplicated.
    +    //   * can't use all table columns as partition columns.
    +    //   * partition columns' type must be AtomicType.
    +    //   * sort columns' type must be orderable.
    +    case c @ CreateTable(tableDesc, mode, query) if c.childrenResolved =>
    +      val schema = if (query.isDefined) query.get.schema else 
tableDesc.schema
    +      checkDuplication(schema.map(_.name), "table definition of " + 
tableDesc.identifier)
    +
    +      val partitionColsChecked = checkPartitionColumns(schema, tableDesc)
    +      val bucketColsChecked = checkBucketColumns(schema, 
partitionColsChecked)
    +      c.copy(tableDesc = bucketColsChecked)
    +  }
    +
    +  private def checkPartitionColumns(schema: StructType, tableDesc: 
CatalogTable): CatalogTable = {
    +    val normalizedPartitionCols = tableDesc.partitionColumnNames.map { 
colName =>
    +      normalizeColumnName(tableDesc.identifier, schema, colName, 
"partition")
    +    }
    +    checkDuplication(normalizedPartitionCols, "partition")
    +
    +    if (schema.nonEmpty && normalizedPartitionCols.length == 
schema.length) {
    +      failAnalysis("Cannot use all columns for partition columns")
    +    }
    +
    +    schema.filter(f => 
normalizedPartitionCols.contains(f.name)).map(_.dataType).foreach {
    +      case _: AtomicType => // OK
    +      case other => failAnalysis(s"Cannot use ${other.simpleString} for 
partition column")
    +    }
    +
    +    tableDesc.copy(partitionColumnNames = normalizedPartitionCols)
    +  }
    +
    +  private def checkBucketColumns(schema: StructType, tableDesc: 
CatalogTable): CatalogTable = {
    +    tableDesc.bucketSpec match {
    +      case Some(BucketSpec(numBuckets, bucketColumnNames, 
sortColumnNames)) =>
    +        val normalizedBucketCols = bucketColumnNames.map { colName =>
    +          normalizeColumnName(tableDesc.identifier, schema, colName, 
"bucket")
    +        }
    +        checkDuplication(normalizedBucketCols, "bucket")
    +
    +        val normalizedSortCols = sortColumnNames.map { colName =>
    +          normalizeColumnName(tableDesc.identifier, schema, colName, 
"sort")
    +        }
    +        checkDuplication(normalizedSortCols, "sort")
    +
    +        schema.filter(f => 
normalizedSortCols.contains(f.name)).map(_.dataType).foreach {
    +          case dt if RowOrdering.isOrderable(dt) => // OK
    +          case other => failAnalysis(s"Cannot use ${other.simpleString} 
for sorting column")
    +        }
    +
    +        tableDesc.copy(
    +          bucketSpec = Some(BucketSpec(numBuckets, normalizedBucketCols, 
normalizedSortCols))
    +        )
    +
    +      case None => tableDesc
    +    }
    +  }
    +
    +  private def checkDuplication(colNames: Seq[String], colType: String): 
Unit = {
    +    if (colNames.distinct.length != colNames.length) {
    +      val duplicateColumns = colNames.groupBy(identity).collect {
    --- End diff --
    
    Instead of using `identity`, we can do something like:
    ```Scala
      private def convertToCaseSensitiveAnalysisAware(name: String): String = {
        if (conf.caseSensitiveAnalysis) name else name.toLowerCase
      }
    
    ```Scala
        val duplicateColumns =
          columnNames.groupBy(convertToCaseSensitiveAnalysisAware).collect {
            case (x, ys) if ys.length > 1 => s"`$x`"
          }
    ```
    
    This is from this PR: https://github.com/apache/spark/pull/13756
    
    Do you think it is OK?
    



---
If your project is set up for it, you can reply to this email and have your
reply appear on GitHub as well. If your project does not have this feature
enabled and wishes so, or if the feature is enabled but not working, please
contact infrastructure at infrastruct...@apache.org or file a JIRA ticket
with INFRA.
---

---------------------------------------------------------------------
To unsubscribe, e-mail: reviews-unsubscr...@spark.apache.org
For additional commands, e-mail: reviews-h...@spark.apache.org

Reply via email to