danny0405 commented on code in PR #8684: URL: https://github.com/apache/hudi/pull/8684#discussion_r1192913868
########## hudi-client/hudi-spark-client/src/main/java/org/apache/hudi/metadata/SparkHoodieBackedTableMetadataWriter.java: ########## @@ -118,46 +123,32 @@ protected void initRegistry() { } @Override - protected <T extends SpecificRecordBase> void initialize(HoodieEngineContext engineContext, - Option<T> actionMetadata, - Option<String> inflightInstantTimestamp) { - try { - metrics.map(HoodieMetadataMetrics::registry).ifPresent(registry -> { - if (registry instanceof DistributedRegistry) { - HoodieSparkEngineContext sparkEngineContext = (HoodieSparkEngineContext) engineContext; - ((DistributedRegistry) registry).register(sparkEngineContext.getJavaSparkContext()); - } - }); + protected void commit(String instantTime, Map<MetadataPartitionType, HoodieData<HoodieRecord>> partitionRecordsMap) { + commitInternal(instantTime, partitionRecordsMap, Option.empty()); + } - if (enabled) { - initializeIfNeeded(dataMetaClient, actionMetadata, inflightInstantTimestamp); - } - } catch (IOException e) { - LOG.error("Failed to initialize metadata table. Disabling the writer.", e); - enabled = false; - } + protected void bulkCommit( + String instantTime, MetadataPartitionType partitionType, HoodieData<HoodieRecord> records, + int fileGroupCount) { + Map<MetadataPartitionType, HoodieData<HoodieRecord>> partitionRecordsMap = new HashMap<>(); + partitionRecordsMap.put(partitionType, records); + SparkHoodieMetadataBulkInsertPartitioner partitioner = new SparkHoodieMetadataBulkInsertPartitioner(fileGroupCount); Review Comment: You are right, thanks for the clarification. -- This is an automated message from the Apache Git Service. To respond to the message, please log on to GitHub and use the URL above to go to the specific comment. To unsubscribe, e-mail: commits-unsubscr...@hudi.apache.org For queries about this service, please contact Infrastructure at: us...@infra.apache.org