aokolnychyi commented on code in PR #10074:
URL: https://github.com/apache/iceberg/pull/10074#discussion_r1560282149


##########
spark/v3.5/spark/src/main/java/org/apache/iceberg/spark/BaseCatalog.java:
##########
@@ -21,19 +21,31 @@
 import org.apache.iceberg.spark.procedures.SparkProcedures;
 import org.apache.iceberg.spark.procedures.SparkProcedures.ProcedureBuilder;
 import org.apache.iceberg.spark.source.HasIcebergCatalog;
+import org.apache.iceberg.util.PropertyUtil;
 import org.apache.spark.sql.catalyst.analysis.NoSuchProcedureException;
 import org.apache.spark.sql.connector.catalog.Identifier;
 import org.apache.spark.sql.connector.catalog.StagingTableCatalog;
 import org.apache.spark.sql.connector.catalog.SupportsNamespaces;
 import org.apache.spark.sql.connector.iceberg.catalog.Procedure;
 import org.apache.spark.sql.connector.iceberg.catalog.ProcedureCatalog;
+import org.apache.spark.sql.util.CaseInsensitiveStringMap;
 
 abstract class BaseCatalog
     implements StagingTableCatalog,
         ProcedureCatalog,
         SupportsNamespaces,
         HasIcebergCatalog,
         SupportsFunctions {
+  /**
+   * Controls whether to mark all the fields as nullable when executing 
CREATE/REPLACE TABLE ... AS
+   * SELECT ... and creating the table. If false, fields' nullability will be 
preserved when
+   * creating the table.
+   */
+  private static final String TABLE_CREATE_NULLABLE_QUERY_SCHEMA = 
"use-nullable-query-schema";

Review Comment:
   I have mixed feelings about the name. On one hand, it is not very 
descriptive. On the other hand, it matches Spark API. Let me think about it.



##########
spark/v3.5/spark/src/main/java/org/apache/iceberg/spark/BaseCatalog.java:
##########
@@ -21,19 +21,31 @@
 import org.apache.iceberg.spark.procedures.SparkProcedures;
 import org.apache.iceberg.spark.procedures.SparkProcedures.ProcedureBuilder;
 import org.apache.iceberg.spark.source.HasIcebergCatalog;
+import org.apache.iceberg.util.PropertyUtil;
 import org.apache.spark.sql.catalyst.analysis.NoSuchProcedureException;
 import org.apache.spark.sql.connector.catalog.Identifier;
 import org.apache.spark.sql.connector.catalog.StagingTableCatalog;
 import org.apache.spark.sql.connector.catalog.SupportsNamespaces;
 import org.apache.spark.sql.connector.iceberg.catalog.Procedure;
 import org.apache.spark.sql.connector.iceberg.catalog.ProcedureCatalog;
+import org.apache.spark.sql.util.CaseInsensitiveStringMap;
 
 abstract class BaseCatalog
     implements StagingTableCatalog,
         ProcedureCatalog,
         SupportsNamespaces,
         HasIcebergCatalog,
         SupportsFunctions {
+  /**
+   * Controls whether to mark all the fields as nullable when executing 
CREATE/REPLACE TABLE ... AS
+   * SELECT ... and creating the table. If false, fields' nullability will be 
preserved when
+   * creating the table.
+   */
+  private static final String TABLE_CREATE_NULLABLE_QUERY_SCHEMA = 
"use-nullable-query-schema";

Review Comment:
   I have mixed feelings about the name. On one hand, it is not very 
descriptive. On the other hand, it matches the Spark API. Let me think about it.



-- 
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

To unsubscribe, e-mail: issues-unsubscr...@iceberg.apache.org

For queries about this service, please contact Infrastructure at:
us...@infra.apache.org


---------------------------------------------------------------------
To unsubscribe, e-mail: issues-unsubscr...@iceberg.apache.org
For additional commands, e-mail: issues-h...@iceberg.apache.org

Reply via email to