yihua commented on a change in pull request #3741: URL: https://github.com/apache/hudi/pull/3741#discussion_r726800869
########## File path: hudi-client/hudi-flink-client/src/main/java/org/apache/hudi/table/action/compact/HoodieFlinkMergeOnReadTableCompactor.java ########## @@ -75,164 +42,27 @@ * <p>Note: the compaction logic is invoked through the flink pipeline. */ @SuppressWarnings("checkstyle:LineLength") -public class HoodieFlinkMergeOnReadTableCompactor<T extends HoodieRecordPayload> implements HoodieCompactor<T, List<HoodieRecord<T>>, List<HoodieKey>, List<WriteStatus>> { - - private static final Logger LOG = LogManager.getLogger(HoodieFlinkMergeOnReadTableCompactor.class); - - // Accumulator to keep track of total log files for a table - private AtomicLong totalLogFiles; - // Accumulator to keep track of total log file slices for a table - private AtomicLong totalFileSlices; +public class HoodieFlinkMergeOnReadTableCompactor<T extends HoodieRecordPayload> + extends HoodieCompactor<T, List<HoodieRecord<T>>, List<HoodieKey>, List<WriteStatus>> { @Override - public List<WriteStatus> compact(HoodieEngineContext context, HoodieCompactionPlan compactionPlan, - HoodieTable hoodieTable, HoodieWriteConfig config, String compactionInstantTime) throws IOException { - throw new UnsupportedOperationException("HoodieFlinkMergeOnReadTableCompactor does not support compact directly, " - + "the function works as a separate pipeline"); + public Schema getReaderSchema(HoodieWriteConfig config) { + return HoodieAvroUtils.addMetadataFields(new Schema.Parser().parse(config.getSchema()), config.allowOperationMetadataField()); Review comment: @nsivabalan Just so you know that I'm going to remove these two schema-related methods. Let me know if Spark needs special attention here. -- This is an automated message from the Apache Git Service. To respond to the message, please log on to GitHub and use the URL above to go to the specific comment. To unsubscribe, e-mail: commits-unsubscr...@hudi.apache.org For queries about this service, please contact Infrastructure at: us...@infra.apache.org