Github user cloud-fan commented on a diff in the pull request:

    https://github.com/apache/spark/pull/15996#discussion_r92526260
  
    --- Diff: 
sql/core/src/main/scala/org/apache/spark/sql/DataFrameWriter.scala ---
    @@ -363,48 +364,120 @@ final class DataFrameWriter[T] private[sql](ds: 
Dataset[T]) {
           throw new AnalysisException("Cannot create hive serde table with 
saveAsTable API")
         }
     
    -    val tableExists = 
df.sparkSession.sessionState.catalog.tableExists(tableIdent)
    -
    -    (tableExists, mode) match {
    -      case (true, SaveMode.Ignore) =>
    -        // Do nothing
    -
    -      case (true, SaveMode.ErrorIfExists) =>
    -        throw new AnalysisException(s"Table $tableIdent already exists.")
    -
    -      case _ =>
    -        val existingTable = if (tableExists) {
    -          
Some(df.sparkSession.sessionState.catalog.getTableMetadata(tableIdent))
    -        } else {
    -          None
    -        }
    -        val storage = if (tableExists) {
    -          existingTable.get.storage
    -        } else {
    -          DataSource.buildStorageFormatFromOptions(extraOptions.toMap)
    -        }
    -        val tableType = if (tableExists) {
    -          existingTable.get.tableType
    -        } else if (storage.locationUri.isDefined) {
    -          CatalogTableType.EXTERNAL
    -        } else {
    -          CatalogTableType.MANAGED
    +    val catalog = df.sparkSession.sessionState.catalog
    +    val db = tableIdent.database.getOrElse(catalog.getCurrentDatabase)
    +    val tableIdentWithDB = tableIdent.copy(database = Some(db))
    +    catalog.getTableMetadataOption(tableIdent) match {
    +      // If the table already exists...
    +      case Some(tableMeta) =>
    +        mode match {
    +          case SaveMode.Ignore => // Do nothing
    +
    +          case SaveMode.ErrorIfExists =>
    +            throw new AnalysisException(s"Table $tableIdent already 
exists. You can set SaveMode " +
    +              "to SaveMode.Append to insert data into the table or set 
SaveMode to " +
    +              "SaveMode.Overwrite to overwrite the existing data.")
    +
    +          case SaveMode.Append =>
    +            // Check if the specified data source match the data source of 
the existing table.
    +            val specifiedProvider = DataSource.lookupDataSource(source)
    +            // TODO: Check that options from the resolved relation match 
the relation that we are
    +            // inserting into (i.e. using the same compression).
    +
    +            // Pass a table identifier with database part, so that 
`lookupRelation` won't get temp
    +            // views unexpectedly.
    +            
EliminateSubqueryAliases(catalog.lookupRelation(tableIdentWithDB)) match {
    +              case l @ LogicalRelation(_: InsertableRelation | _: 
HadoopFsRelation, _, _) =>
    +                // check if the file formats match
    +                l.relation match {
    +                  case r: HadoopFsRelation if r.fileFormat.getClass != 
specifiedProvider =>
    +                    throw new AnalysisException(
    +                      s"The file format of the existing table $tableIdent 
is " +
    +                        s"`${r.fileFormat.getClass.getName}`. It doesn't 
match the specified " +
    +                        s"format `$source`")
    +                  case _ =>
    +                }
    +              case s: SimpleCatalogRelation if 
DDLUtils.isDatasourceTable(s.metadata) => // OK.
    +              case c: CatalogRelation if c.catalogTable.provider == 
Some(DDLUtils.HIVE_PROVIDER) =>
    +                throw new AnalysisException("Saving data in the Hive serde 
table " +
    +                  s"${c.catalogTable.identifier} is not supported yet. 
Please use the " +
    +                  "insertInto() API as an alternative..")
    +              case o =>
    +                throw new AnalysisException(s"Saving data in ${o.toString} 
is not supported.")
    +            }
    +
    +            val existingSchema = tableMeta.schema
    +            if (df.logicalPlan.schema.size != existingSchema.size) {
    +              throw new AnalysisException(
    +                s"The column number of the existing 
schema[$existingSchema] " +
    +                  s"doesn't match the data 
schema[${df.logicalPlan.schema}]")
    +            }
    +
    +            val specifiedPartCols = partitioningColumns.getOrElse(Nil)
    +            val existingPartCols = tableMeta.partitionColumnNames
    +            if (specifiedPartCols.map(_.toLowerCase) != 
existingPartCols.map(_.toLowerCase)) {
    +              throw new AnalysisException("The partition columns of the 
existing table " +
    +                s"$tableIdent are: [${existingPartCols.mkString(", ")}]. 
It doesn't match the " +
    +                s"specified partition columns: 
[${specifiedPartCols.mkString(", ")}]")
    +            }
    +
    +            // Because we are inserting into an existing table, we should 
respect the existing
    +            // schema and adjust columns order of the given dataframe 
according to it.
    +            df.select(existingSchema.map(f => Column(f.name)): _*)
    +              .write.insertInto(tableIdentWithDB)
    --- End diff --
    
    I thought it's ok to analyze twice, but not analyze an optimized plan, let 
me look into it.


---
If your project is set up for it, you can reply to this email and have your
reply appear on GitHub as well. If your project does not have this feature
enabled and wishes so, or if the feature is enabled but not working, please
contact infrastructure at infrastruct...@apache.org or file a JIRA ticket
with INFRA.
---

---------------------------------------------------------------------
To unsubscribe, e-mail: reviews-unsubscr...@spark.apache.org
For additional commands, e-mail: reviews-h...@spark.apache.org

Reply via email to