Github user gatorsmile commented on a diff in the pull request: https://github.com/apache/spark/pull/16626#discussion_r106472336 --- Diff: sql/core/src/main/scala/org/apache/spark/sql/execution/command/tables.scala --- @@ -175,6 +178,87 @@ case class AlterTableRenameCommand( } /** + * A command that add columns to a table + * The syntax of using this command in SQL is: + * {{{ + * ALTER TABLE table_identifier + * ADD COLUMNS (col_name data_type [COMMENT col_comment], ...); + * }}} +*/ +case class AlterTableAddColumnsCommand( + table: TableIdentifier, + columns: Seq[StructField]) extends RunnableCommand { + override def run(sparkSession: SparkSession): Seq[Row] = { + val catalog = sparkSession.sessionState.catalog + val catalogTable = verifyAlterTableAddColumn(catalog, table) + + try { + sparkSession.catalog.uncacheTable(table.unquotedString) + } catch { + case NonFatal(e) => + log.warn(s"Exception when attempting to uncache table ${table.unquotedString}", e) + } + + // Invalidate the table last, otherwise uncaching the table would load the logical plan + // back into the hive metastore cache + catalog.refreshTable(table) + val partitionFields = catalogTable.schema.takeRight(catalogTable.partitionColumnNames.length) + val newSchemaFields = catalogTable.schema + .take(catalogTable.schema.length - catalogTable.partitionColumnNames.length) ++ + columns ++ partitionFields + checkDuplication(newSchemaFields.map(_.name)) + catalog.alterTableSchema(table, newSchema = + catalogTable.schema.copy(fields = newSchemaFields.toArray)) + + Seq.empty[Row] + } + + private def checkDuplication(colNames: Seq[String]): Unit = { + if (colNames.distinct.length != colNames.length) { + val duplicateColumns = colNames.groupBy(identity).collect { + case (x, ys) if ys.length > 1 => x + } + throw new AnalysisException( + s"Found duplicate column(s): ${duplicateColumns.mkString(", ")}") + } + } + + /** + * ALTER TABLE ADD COLUMNS command does not support temporary view/table, + * view, or datasource table with text, orc formats or external provider. + * For datasource table, it currently only supports parquet, json, csv. + */ + private def verifyAlterTableAddColumn( + catalog: SessionCatalog, + table: TableIdentifier): CatalogTable = { + val catalogTable = catalog.getTempViewOrPermanentTableMetadata(table) + + if (catalogTable.tableType == CatalogTableType.VIEW) { + throw new AnalysisException( + s"${table.toString} is a VIEW, which does not support ALTER ADD COLUMNS.") + } + + if (DDLUtils.isDatasourceTable(catalogTable)) { + DataSource.lookupDataSource(catalogTable.provider.get).newInstance() match { + // For datasource table, this command can only support the following File format. + // TextFileFormat only default to one column "value" + // OrcFileFormat can not handle difference between user-specified schema and + // inferred schema yet. TODO, once this issue is resolved , we can add Orc back. + // Hive type is already considered as hive serde table, so the logic will not + // come in here. + case _: JsonFileFormat | _: CSVFileFormat | _: ParquetFileFormat => + case s => + throw new AnalysisException( + s"Datasource table $table with type $s, which does not support ALTER ADD COLUMNS.") --- End diff -- The same here.
--- If your project is set up for it, you can reply to this email and have your reply appear on GitHub as well. If your project does not have this feature enabled and wishes so, or if the feature is enabled but not working, please contact infrastructure at infrastruct...@apache.org or file a JIRA ticket with INFRA. --- --------------------------------------------------------------------- To unsubscribe, e-mail: reviews-unsubscr...@spark.apache.org For additional commands, e-mail: reviews-h...@spark.apache.org