dramaticlly commented on code in PR #13956:
URL: https://github.com/apache/iceberg/pull/13956#discussion_r2323700944


##########
spark/v4.0/spark/src/test/java/org/apache/iceberg/spark/actions/TestRewriteTablePathsAction.java:
##########
@@ -1370,16 +1376,22 @@ private Table createMetastoreTable(
       String sqlStr =
           String.format(
               "CREATE TABLE hive.%s.%s (c1 bigint, c2 string, c3 string)", 
namespace, tableName);
+      if (partitionColumn != null && !partitionColumn.isEmpty()) {
+        sqlStr = String.format("%s USING iceberg PARTITIONED BY (%s)", sqlStr, 
partitionColumn);
+      }
       if (!location.isEmpty()) {
-        sqlStr = String.format("%s USING iceberg LOCATION '%s'", sqlStr, 
location);
+        sqlStr = String.format("%s LOCATION '%s'", sqlStr, location);
       }
       sql(sqlStr);
     } else {
       String sqlStr =
           String.format(
               "CREATE TABLE hive.%s.%s (c1 bigint, c2 string, c3 string)", 
namespace, tableName);
+      if (partitionColumn != null && !partitionColumn.isEmpty()) {
+        sqlStr = String.format("%s USING iceberg PARTITIONED BY (%s)", sqlStr, 
partitionColumn);
+      }
       if (!location.isEmpty()) {

Review Comment:
   this starts to look a bit hard to read with many configurable parameter on 
table creation, Can we try move it out to a separate method for table creation, 
something like
   
   ```java
     private Table createMetastoreTable(
         String location,
         Map<String, String> properties,
         String namespace,
         String tableName,
         int snapshotNumber,
         String partitionColumn) {
       spark.conf().set("spark.sql.catalog.hive", SparkCatalog.class.getName());
       spark.conf().set("spark.sql.catalog.hive.type", "hive");
       spark.conf().set("spark.sql.catalog.hive.default-namespace", "default");
       spark.conf().set("spark.sql.catalog.hive.cache-enabled", "false");
   
       sql("DROP TABLE IF EXISTS hive.%s.%s", namespace, tableName);
   
       // create table
       sql(tableDDL(location, properties, namespace, tableName, 
partitionColumn));
   
       // Insert test data
       for (int i = 0; i < snapshotNumber; i++) {
         sql("INSERT INTO hive.%s.%s VALUES (%s, 'AAAAAAAAAA', 'AAAA')", 
namespace, tableName, i);
       }
   
       return catalog.loadTable(TableIdentifier.of(namespace, tableName));
     }
   
     private String tableDDL(
         String location,
         Map<String, String> properties,
         String namespace,
         String tableName,
         String partitionColumn) {
       String tblProperties =
           properties.entrySet().stream()
               .map(entry -> String.format("'%s'='%s'", entry.getKey(), 
entry.getValue()))
               .collect(Collectors.joining(","));
   
       StringBuilder createTableSql = new StringBuilder();
       createTableSql
           .append("CREATE TABLE hive.")
           .append(namespace)
           .append(".")
           .append(tableName)
           .append(" (c1 bigint, c2 string, c3 string)");
   
       if (partitionColumn != null && !partitionColumn.isEmpty()) {
         createTableSql.append(" USING iceberg PARTITIONED BY 
(").append(partitionColumn).append(")");
       } else {
         createTableSql.append(" USING iceberg");
       }
   
       if (!location.isEmpty()) {
         createTableSql.append(" LOCATION '").append(location).append("'");
       }
   
       if (!tblProperties.isEmpty()) {
         createTableSql.append(" TBLPROPERTIES 
(").append(tblProperties).append(")");
       }
       return createTableSql.toString();
     }
   ```



-- 
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

To unsubscribe, e-mail: [email protected]

For queries about this service, please contact Infrastructure at:
[email protected]


---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]

Reply via email to