Github user gatorsmile commented on a diff in the pull request:

    https://github.com/apache/spark/pull/16626#discussion_r106097589
  
    --- Diff: 
sql/core/src/main/scala/org/apache/spark/sql/execution/command/tables.scala ---
    @@ -175,6 +178,80 @@ case class AlterTableRenameCommand(
     }
     
     /**
    + * A command that add columns to a table
    + * The syntax of using this command in SQL is:
    + * {{{
    + *   ALTER TABLE table_identifier
    + *   ADD COLUMNS (col_name data_type [COMMENT col_comment], ...);
    + * }}}
    +*/
    +case class AlterTableAddColumnsCommand(
    +    table: TableIdentifier,
    +    columns: Seq[StructField]) extends RunnableCommand {
    +  override def run(sparkSession: SparkSession): Seq[Row] = {
    +    val catalog = sparkSession.sessionState.catalog
    +    val catalogTable = verifyAlterTableAddColumn(catalog, table)
    +
    +    // If an exception is thrown here we can just assume the table is 
uncached;
    +    // this can happen with Hive tables when the underlying catalog is 
in-memory.
    +    val wasCached = 
Try(sparkSession.catalog.isCached(table.unquotedString)).getOrElse(false)
    +    if (wasCached) {
    +      try {
    +        sparkSession.catalog.uncacheTable(table.unquotedString)
    +      } catch {
    +        case NonFatal(e) => log.warn(e.toString, e)
    +      }
    +    }
    +    // Invalidate the table last, otherwise uncaching the table would load 
the logical plan
    +    // back into the hive metastore cache
    +    catalog.refreshTable(table)
    +    val partitionFields = 
catalogTable.schema.takeRight(catalogTable.partitionColumnNames.length)
    +    val dataSchema = catalogTable.schema
    +      .take(catalogTable.schema.length - 
catalogTable.partitionColumnNames.length)
    +    catalog.alterTableSchema(table, newSchema =
    +      catalogTable.schema.copy(fields = (dataSchema ++ columns ++ 
partitionFields).toArray))
    +
    +    Seq.empty[Row]
    +  }
    +
    +  /**
    +   * ALTER TABLE ADD COLUMNS command does not support temporary view/table,
    +   * view, or datasource table with text, orc formats or external provider.
    +   * For datasource table, it currently only supports parquet, json, csv.
    +   */
    +  private def verifyAlterTableAddColumn(
    +      catalog: SessionCatalog,
    +      table: TableIdentifier): CatalogTable = {
    +    val catalogTable = catalog.getTempViewOrPermanentTableMetadata(table)
    +
    +    if (catalogTable.tableType == CatalogTableType.VIEW) {
    +      throw new AnalysisException(
    +        s"${table.toString} is a VIEW, which does not support ALTER ADD 
COLUMNS.")
    +    }
    +
    +    if (DDLUtils.isDatasourceTable(catalogTable)) {
    +      DataSource.lookupDataSource(catalogTable.provider.get).newInstance() 
match {
    +        // For datasource table, this command can only support the 
following File format.
    +        // TextFileFormat only default to one column "value"
    +        // OrcFileFormat can not handle difference between user-specified 
schema and
    +        // inferred schema yet. TODO, once this issue is resolved , we can 
add Orc back.
    +        // Hive type is already considered as hive serde table, so the 
logic will not
    +        // come in here.
    +        case _: JsonFileFormat | _: CSVFileFormat | _: ParquetFileFormat =>
    +        case s =>
    +          throw new AnalysisException(
    +            s"""${table} is a datasource table with type $s,
    +               |which does not support ALTER ADD COLUMNS.
    +            """.stripMargin)
    --- End diff --
    
    Nit:
    ```Scala
              throw new AnalysisException(
                s"$table is a datasource table with type $s, which does not 
support ALTER ADD COLUMNS.")
    ```


---
If your project is set up for it, you can reply to this email and have your
reply appear on GitHub as well. If your project does not have this feature
enabled and wishes so, or if the feature is enabled but not working, please
contact infrastructure at infrastruct...@apache.org or file a JIRA ticket
with INFRA.
---

---------------------------------------------------------------------
To unsubscribe, e-mail: reviews-unsubscr...@spark.apache.org
For additional commands, e-mail: reviews-h...@spark.apache.org

Reply via email to