FANNG1 commented on code in PR #7067:
URL: https://github.com/apache/gravitino/pull/7067#discussion_r2062613776


##########
spark-connector/spark-common/src/main/java/org/apache/gravitino/spark/connector/hive/SparkHiveTable.java:
##########
@@ -76,4 +80,50 @@ public Map<String, String> properties() {
   public Transform[] partitioning() {
     return gravitinoTableInfoHelper.partitioning();
   }
+
+  @Override
+  public void createPartition(InternalRow ident, Map<String, String> 
properties)
+      throws PartitionAlreadyExistsException, UnsupportedOperationException {
+    gravitinoTableInfoHelper.createPartition(ident, properties, 
partitionSchema());
+  }
+
+  @Override
+  public boolean dropPartition(InternalRow ident) {
+    return gravitinoTableInfoHelper.dropPartition(ident, partitionSchema());
+  }
+
+  @Override
+  public void replacePartitionMetadata(InternalRow ident, Map<String, String> 
properties)
+      throws NoSuchPartitionException, UnsupportedOperationException {
+    throw new UnsupportedOperationException("Partition replace is not 
supported");

Review Comment:
   `Partition replace` -> `Partition replacement` or  `Replace partition`?



##########
spark-connector/spark-common/src/main/java/org/apache/gravitino/spark/connector/SparkPartitionConverter.java:
##########
@@ -0,0 +1,161 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *  http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.gravitino.spark.connector;
+
+import java.time.LocalDate;
+import java.time.format.DateTimeFormatter;
+import org.apache.gravitino.rel.expressions.literals.Literal;
+import org.apache.gravitino.rel.expressions.literals.Literals;
+import org.apache.gravitino.rel.types.Types;
+import org.apache.spark.sql.catalyst.InternalRow;
+import org.apache.spark.sql.types.BooleanType;
+import org.apache.spark.sql.types.ByteType;
+import org.apache.spark.sql.types.CharType;
+import org.apache.spark.sql.types.DataType;
+import org.apache.spark.sql.types.DateType;
+import org.apache.spark.sql.types.Decimal;
+import org.apache.spark.sql.types.DecimalType;
+import org.apache.spark.sql.types.DoubleType;
+import org.apache.spark.sql.types.FloatType;
+import org.apache.spark.sql.types.IntegerType;
+import org.apache.spark.sql.types.LongType;
+import org.apache.spark.sql.types.ShortType;
+import org.apache.spark.sql.types.StringType;
+import org.apache.spark.sql.types.VarcharType;
+import org.apache.spark.unsafe.types.UTF8String;
+
+public class SparkPartitionConverter {

Review Comment:
   rename to `SparkPartitionUtils`?



##########
spark-connector/spark-common/src/main/java/org/apache/gravitino/spark/connector/SparkPartitionConverter.java:
##########
@@ -0,0 +1,161 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *  http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.gravitino.spark.connector;
+
+import java.time.LocalDate;
+import java.time.format.DateTimeFormatter;
+import org.apache.gravitino.rel.expressions.literals.Literal;
+import org.apache.gravitino.rel.expressions.literals.Literals;
+import org.apache.gravitino.rel.types.Types;
+import org.apache.spark.sql.catalyst.InternalRow;
+import org.apache.spark.sql.types.BooleanType;
+import org.apache.spark.sql.types.ByteType;
+import org.apache.spark.sql.types.CharType;
+import org.apache.spark.sql.types.DataType;
+import org.apache.spark.sql.types.DateType;
+import org.apache.spark.sql.types.Decimal;
+import org.apache.spark.sql.types.DecimalType;
+import org.apache.spark.sql.types.DoubleType;
+import org.apache.spark.sql.types.FloatType;
+import org.apache.spark.sql.types.IntegerType;
+import org.apache.spark.sql.types.LongType;
+import org.apache.spark.sql.types.ShortType;
+import org.apache.spark.sql.types.StringType;
+import org.apache.spark.sql.types.VarcharType;
+import org.apache.spark.unsafe.types.UTF8String;
+
+public class SparkPartitionConverter {
+
+  private SparkPartitionConverter() {}
+
+  public static Literal<?> toGravitinoLiteral(InternalRow ident, int ordinal, 
DataType sparkType) {
+    if (sparkType instanceof ByteType) {
+      return Literals.byteLiteral(ident.getByte(ordinal));
+    } else if (sparkType instanceof ShortType) {
+      return Literals.shortLiteral(ident.getShort(ordinal));
+    } else if (sparkType instanceof IntegerType) {
+      return Literals.integerLiteral(ident.getInt(ordinal));
+    } else if (sparkType instanceof LongType) {
+      return Literals.longLiteral(ident.getLong(ordinal));
+    } else if (sparkType instanceof FloatType) {
+      return Literals.floatLiteral(ident.getFloat(ordinal));
+    } else if (sparkType instanceof DoubleType) {
+      return Literals.doubleLiteral(ident.getDouble(ordinal));
+    } else if (sparkType instanceof DecimalType) {
+      DecimalType decimalType = (DecimalType) sparkType;
+      org.apache.spark.sql.types.Decimal decimal =
+          ident.getDecimal(ordinal, decimalType.precision(), 
decimalType.scale());
+      return Literals.decimalLiteral(
+          
org.apache.gravitino.rel.types.Decimal.of(decimal.toJavaBigDecimal()));
+    } else if (sparkType instanceof StringType) {
+      return Literals.stringLiteral(ident.getString(ordinal));
+    } else if (sparkType instanceof VarcharType) {
+      VarcharType varcharType = (VarcharType) sparkType;
+      return Literals.varcharLiteral(varcharType.length(), 
ident.getString(ordinal));
+    } else if (sparkType instanceof CharType) {
+      CharType charType = (CharType) sparkType;
+      return Literals.of(ident.get(ordinal, sparkType), 
Types.FixedCharType.of(charType.length()));
+    } else if (sparkType instanceof BooleanType) {
+      return Literals.booleanLiteral(ident.getBoolean(ordinal));
+    } else if (sparkType instanceof DateType) {
+      LocalDate localDate = LocalDate.ofEpochDay(ident.getInt(ordinal));
+      return Literals.dateLiteral(localDate);
+    }
+    throw new UnsupportedOperationException("Not support " + 
sparkType.toString());
+  }
+
+  public static String getPartitionValueAsString(
+      InternalRow ident, int ordinal, DataType dataType) {
+    if (ident.isNullAt(ordinal)) {
+      return null;
+    }
+    if (dataType instanceof ByteType) {
+      return String.valueOf(ident.getByte(ordinal));
+    } else if (dataType instanceof ShortType) {
+      return String.valueOf(ident.getShort(ordinal));
+    } else if (dataType instanceof IntegerType) {
+      return String.valueOf(ident.getInt(ordinal));
+    } else if (dataType instanceof StringType) {
+      return ident.getUTF8String(ordinal).toString();
+    } else if (dataType instanceof VarcharType) {
+      return ident.get(ordinal, dataType).toString();
+    } else if (dataType instanceof CharType) {
+      return ident.get(ordinal, dataType).toString();
+    } else if (dataType instanceof DateType) {
+      // DateType spark use int store.
+      LocalDate localDate = LocalDate.ofEpochDay(ident.getInt(ordinal));
+      return localDate.format(DateTimeFormatter.ISO_LOCAL_DATE);
+    } else if (dataType instanceof BooleanType) {
+      return String.valueOf(ident.getBoolean(ordinal));
+    } else if (dataType instanceof LongType) {
+      return String.valueOf(ident.getLong(ordinal));
+    } else if (dataType instanceof DoubleType) {
+      return String.valueOf(ident.getDouble(ordinal));
+    } else if (dataType instanceof FloatType) {
+      return String.valueOf(ident.getFloat(ordinal));
+    } else if (dataType instanceof DecimalType) {
+      return ident
+          .getDecimal(
+              ordinal, ((DecimalType) dataType).precision(), ((DecimalType) 
dataType).scale())
+          .toString();
+    } else {
+      throw new UnsupportedOperationException(
+          String.format("Unsupported partition column type: %s", dataType));
+    }
+  }
+
+  public static Object getSparkPartitionValue(String hivePartitionValue, 
DataType dataType) {

Review Comment:
   does this class specific for Hive?



##########
spark-connector/spark-common/src/test/java/org/apache/gravitino/spark/connector/integration/test/hive/SparkHiveCatalogIT.java:
##########
@@ -129,6 +129,37 @@ void testCreateHiveFormatPartitionTable() {
     checkPartitionDirExists(tableInfo);
   }
 
+  @Test
+  void testPartitionTableManage() {
+    System.out.println("" + System.getProperty("SKIP_DOCKER_TESTS"));
+    String tableName = "hive_partition_ops_table";
+
+    dropTableIfExists(tableName);
+    String createTableSQL = getCreateSimpleTableString(tableName);
+    createTableSQL = createTableSQL + "PARTITIONED BY (age_p1 INT, age_p2 
STRING)";
+    sql(createTableSQL);
+
+    List<Object[]> tableInfo = getTablePartitions(tableName);
+    Assertions.assertEquals(0, tableInfo.size());
+
+    sql("ALTER TABLE  " + tableName + " ADD PARTITION (age_p1=20, 
age_p2='twenty')");
+    sql("ALTER TABLE  " + tableName + " ADD PARTITION (age_p1=21, 
age_p2='twenty one')");
+    tableInfo = getTablePartitions(tableName);

Review Comment:
   could you check the partition values too?



##########
spark-connector/spark-common/src/main/java/org/apache/gravitino/spark/connector/hive/SparkHiveTable.java:
##########
@@ -76,4 +80,50 @@ public Map<String, String> properties() {
   public Transform[] partitioning() {
     return gravitinoTableInfoHelper.partitioning();
   }
+
+  @Override
+  public void createPartition(InternalRow ident, Map<String, String> 
properties)
+      throws PartitionAlreadyExistsException, UnsupportedOperationException {
+    gravitinoTableInfoHelper.createPartition(ident, properties, 
partitionSchema());
+  }
+
+  @Override
+  public boolean dropPartition(InternalRow ident) {
+    return gravitinoTableInfoHelper.dropPartition(ident, partitionSchema());
+  }
+
+  @Override
+  public void replacePartitionMetadata(InternalRow ident, Map<String, String> 
properties)
+      throws NoSuchPartitionException, UnsupportedOperationException {
+    throw new UnsupportedOperationException("Partition replace is not 
supported");
+  }
+
+  @Override
+  public Map<String, String> loadPartitionMetadata(InternalRow ident)
+      throws UnsupportedOperationException {
+    return gravitinoTableInfoHelper.loadPartitionMetadata(ident, 
partitionSchema());
+  }
+
+  @Override
+  public InternalRow[] listPartitionIdentifiers(String[] names, InternalRow 
ident) {
+    return gravitinoTableInfoHelper.listPartitionIdentifiers(names, ident, 
partitionSchema());
+  }
+
+  @Override
+  public Object productElement(int n) {

Review Comment:
   Any reason to add `productElement` `productArity` `canEqual` method?



##########
spark-connector/spark-common/src/main/java/org/apache/gravitino/spark/connector/hive/SparkHiveTable.java:
##########
@@ -76,4 +80,50 @@ public Map<String, String> properties() {
   public Transform[] partitioning() {
     return gravitinoTableInfoHelper.partitioning();
   }
+
+  @Override
+  public void createPartition(InternalRow ident, Map<String, String> 
properties)
+      throws PartitionAlreadyExistsException, UnsupportedOperationException {
+    gravitinoTableInfoHelper.createPartition(ident, properties, 
partitionSchema());

Review Comment:
   `gravitinoTableInfoHelper` is designed to get some information from 
Gravitino table not doing some IO operations.  Could you move partition 
operation to a separate class like `HiveGravitinoOperationOperator`?



-- 
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

To unsubscribe, e-mail: [email protected]

For queries about this service, please contact Infrastructure at:
[email protected]

Reply via email to