rdblue commented on code in PR #9423:
URL: https://github.com/apache/iceberg/pull/9423#discussion_r1466899911
##########
spark/v3.5/spark-extensions/src/main/scala/org/apache/spark/sql/catalyst/analysis/ResolveViews.scala:
##########
@@ -59,6 +61,33 @@ case class ResolveViews(spark: SparkSession) extends
Rule[LogicalPlan] with Look
loadView(catalog, ident)
.map(_ => ResolvedV2View(catalog.asViewCatalog, ident))
.getOrElse(u)
+
+ case c@CreateIcebergView(ResolvedIdentifier(_, ident), _, query,
columnAliases, columnComments, _, _, _, _, _, _)
+ if query.resolved && !c.rewritten =>
+ val rewritten = rewriteIdentifiers(query, ident.asMultipartIdentifier)
+ val aliasedPlan = aliasPlan(rewritten, columnAliases, columnComments)
+ c.copy(query = aliasedPlan, queryColumnNames = query.schema.fieldNames,
rewritten = true)
+ }
+
+ private def aliasPlan(
+ analyzedPlan: LogicalPlan,
+ columnAliases: Seq[String],
+ columnComments: Seq[Option[String]]): LogicalPlan = {
+ if (columnAliases.isEmpty || columnAliases.length !=
analyzedPlan.output.length) {
+ analyzedPlan
+ } else {
+ val projectList = analyzedPlan.output.zipWithIndex.map { case (_, pos) =>
+ val column = GetColumnByOrdinal(pos,
analyzedPlan.schema.fields.apply(pos).dataType)
Review Comment:
I don't think this should use `GetColumnByOrdinal`. That is used when the
underlying plan has not yet been resolved so there is no `output`. Here, we
know there is `output` so this can get each output expression and wrap it
directly.
--
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.
To unsubscribe, e-mail: [email protected]
For queries about this service, please contact Infrastructure at:
[email protected]
---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]