Github user budde commented on a diff in the pull request: https://github.com/apache/spark/pull/16944#discussion_r102554381 --- Diff: sql/hive/src/main/scala/org/apache/spark/sql/hive/HiveMetastoreCatalog.scala --- @@ -161,22 +164,70 @@ private[hive] class HiveMetastoreCatalog(sparkSession: SparkSession) extends Log bucketSpec, Some(partitionSchema)) + val catalogTable = metastoreRelation.catalogTable val logicalRelation = cached.getOrElse { val sizeInBytes = metastoreRelation.stats(sparkSession.sessionState.conf).sizeInBytes.toLong val fileIndex = { - val index = new CatalogFileIndex( - sparkSession, metastoreRelation.catalogTable, sizeInBytes) + val index = new CatalogFileIndex(sparkSession, catalogTable, sizeInBytes) if (lazyPruningEnabled) { index } else { index.filterPartitions(Nil) // materialize all the partitions in memory } } val partitionSchemaColumnNames = partitionSchema.map(_.name.toLowerCase).toSet - val dataSchema = - StructType(metastoreSchema - .filterNot(field => partitionSchemaColumnNames.contains(field.name.toLowerCase))) + val filteredMetastoreSchema = StructType(metastoreSchema + .filterNot(field => partitionSchemaColumnNames.contains(field.name.toLowerCase))) + + // Infer a case-sensitive schema when the metastore doesn't return one, if configured. + val inferenceMode = sparkSession.sessionState.conf.caseSensitiveInferenceMode + val inferredSchema = if (!catalogTable.schemaPreservesCase && + inferenceMode != HiveCaseSensitiveInferenceMode.NEVER_INFER) { + logInfo(s"Inferring case-sensitive schema for table $tableIdentifier") + val fileStatuses = fileIndex.listFiles(Nil).flatMap(_.files) + val inferred = defaultSource.inferSchema(sparkSession, options, fileStatuses) + if (fileType.equals("parquet")) { + inferred.map(ParquetFileFormat.mergeMetastoreParquetSchema(metastoreSchema, _)) + } else { + inferred + } + } else { + None + } + + // If configured, save the inferred case-sensitive schema to the table properties and + // fetch the updated CatalogTable record for use in the LogicalRelation. + val updatedCatalogTable = if (!catalogTable.schemaPreservesCase && + inferenceMode == HiveCaseSensitiveInferenceMode.INFER_AND_SAVE) { + inferredSchema.flatMap { schema => + logInfo(s"Saving case-sensitive schema for table $tableIdentifier to table " + + "properties") + val updatedTable = catalogTable.copy(schema = schema) + try { + val catalog = sparkSession.sharedState.externalCatalog + catalog.alterTable(updatedTable) + Option(catalog.getTable(updatedTable.identifier.database.get, + updatedTable.identifier.table)) + } catch { + case NonFatal(ex) => + logError(s"Error saving case-sensitive schema for table $tableIdentifier: $ex") + None + } + } + } else { + None + } + + val dataSchema = if (!catalogTable.schemaPreservesCase) { + inferredSchema.getOrElse { + logWarning(s"Unable to infer schema for table $tableIdentifier from file format " + + s"$defaultSource; using metastore schema.") + filteredMetastoreSchema + } + } else { + filteredMetastoreSchema + } --- End diff -- I started by trying to keep this as consistent as possible with the previous schema inference code but I agree that this function is a bit unwieldy. I'll refactor some of this code into helper functions.
--- If your project is set up for it, you can reply to this email and have your reply appear on GitHub as well. If your project does not have this feature enabled and wishes so, or if the feature is enabled but not working, please contact infrastructure at infrastruct...@apache.org or file a JIRA ticket with INFRA. --- --------------------------------------------------------------------- To unsubscribe, e-mail: reviews-unsubscr...@spark.apache.org For additional commands, e-mail: reviews-h...@spark.apache.org