nastra commented on code in PR #9423:
URL: https://github.com/apache/iceberg/pull/9423#discussion_r1466150091
##########
spark/v3.5/spark-extensions/src/main/scala/org/apache/spark/sql/catalyst/analysis/ResolveViews.scala:
##########
@@ -59,6 +61,32 @@ case class ResolveViews(spark: SparkSession) extends
Rule[LogicalPlan] with Look
loadView(catalog, ident)
.map(_ => ResolvedV2View(catalog.asViewCatalog, ident))
.getOrElse(u)
+
+ case c@CreateIcebergView(ResolvedIdentifier(_, ident), _, query,
columnAliases, columnComments, _, _, _, _, _,
+ rewritten)
+ if query.resolved && !rewritten =>
+ val rewritten = rewriteIdentifiers(query, ident.asMultipartIdentifier)
+ val aliasedPlan = aliasPlan(rewritten, columnAliases, columnComments)
+ c.copy(query = aliasedPlan, queryColumnNames = query.schema.fieldNames,
rewritten = true)
+ }
+
+ private def aliasPlan(
+ analyzedPlan: LogicalPlan,
+ columnAliases: Seq[String],
+ columnComments: Seq[Option[String]]): LogicalPlan = {
+ if (columnAliases.isEmpty || columnAliases.length !=
analyzedPlan.output.length) {
+ analyzedPlan
Review Comment:
also the reason why I'm checking `columnAliases.length !=
analyzedPlan.output.length` here is so that we can properly fail in
`CheckViews` (which gets called after `ResolveViews`) where a better error
message is thrown
--
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.
To unsubscribe, e-mail: [email protected]
For queries about this service, please contact Infrastructure at:
[email protected]
---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]