pvary commented on a change in pull request #2173:
URL: https://github.com/apache/hive/pull/2173#discussion_r612501984



##########
File path: 
iceberg-handler/src/main/java/org/apache/iceberg/mr/hive/HiveIcebergMetaHook.java
##########
@@ -190,6 +180,78 @@ public void 
commitDropTable(org.apache.hadoop.hive.metastore.api.Table hmsTable,
     }
   }
 
+  @Override
+  public void preAlterTable(org.apache.hadoop.hive.metastore.api.Table 
hmsTable, EnvironmentContext context)
+      throws MetaException {
+    HiveMetaHook.super.preAlterTable(hmsTable, context);
+    catalogProperties = getCatalogProperties(hmsTable);
+    try {
+      icebergTable = Catalogs.loadTable(conf, catalogProperties);
+    } catch (NoSuchTableException nte) {
+      // If the iceberg table does not exist, and the hms table is external 
and not temporary, not acid,
+      // and not bucketed, we will create it in commitAlterTable
+      StorageDescriptor sd = hmsTable.getSd();
+      generateMetadata = MetaStoreUtils.isExternalTable(hmsTable) && 
!hmsTable.isTemporary() &&
+          !(sd.getNumBuckets() > 0) && 
!AcidUtils.isTransactionalTable(hmsTable);
+      if (!generateMetadata) {
+        throw new MetaException("Converting non-external, temporary, bucketed 
or transactional hive table to iceberg " +
+            "table is not allowed.");
+      }
+
+      PreAlterTableProperties.tableLocation = sd.getLocation();
+      PreAlterTableProperties.format = sd.getInputFormat();
+      PreAlterTableProperties.schema = schema(catalogProperties, hmsTable);
+      PreAlterTableProperties.spec = spec(PreAlterTableProperties.schema, 
catalogProperties, hmsTable);
+
+      context.getProperties().put(HiveMetaHook.ALLOW_PARTITION_KEY_CHANGE, 
"true");
+      // If there are partition keys specified remove them from the HMS table 
and add them to the column list
+      if (hmsTable.isSetPartitionKeys()) {
+        hmsTable.getSd().getCols().addAll(hmsTable.getPartitionKeys());
+        hmsTable.setPartitionKeysIsSet(false);
+      }
+      sd.setInputFormat(HiveIcebergInputFormat.class.getCanonicalName());
+      sd.setOutputFormat(HiveIcebergOutputFormat.class.getCanonicalName());
+      sd.setSerdeInfo(new SerDeInfo("icebergSerde", 
HiveIcebergSerDe.class.getCanonicalName(),
+          Collections.emptyMap()));
+      updateHmsTableProperties(hmsTable);
+    }
+  }
+
+  @Override
+  public void commitAlterTable(org.apache.hadoop.hive.metastore.api.Table 
hmsTable,
+      PartitionSpecProxy partitionSpecProxy) {
+    HiveMetaHook.super.commitAlterTable(hmsTable, partitionSpecProxy);
+    if (generateMetadata) {
+      catalogProperties = getCatalogProperties(hmsTable);
+      catalogProperties.put(InputFormatConfig.TABLE_SCHEMA, 
SchemaParser.toJson(PreAlterTableProperties.schema));
+      catalogProperties.put(InputFormatConfig.PARTITION_SPEC, 
PartitionSpecParser.toJson(PreAlterTableProperties.spec));
+      if (Catalogs.hiveCatalog(conf)) {
+        catalogProperties.put(TableProperties.ENGINE_HIVE_ENABLED, true);
+      }
+      icebergTable = Catalogs.createTable(conf, catalogProperties);
+      HiveTableUtil.importFiles(PreAlterTableProperties.tableLocation, 
PreAlterTableProperties.format,

Review comment:
       What happens if this fails? Any chance to rollback?




-- 
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

For queries about this service, please contact Infrastructure at:
[email protected]



---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]

Reply via email to