danny0405 commented on a change in pull request #3134:
URL: https://github.com/apache/hudi/pull/3134#discussion_r658615870



##########
File path: 
hudi-flink/src/main/java/org/apache/hudi/sink/bootstrap/BootstrapFunction.java
##########
@@ -168,32 +175,69 @@ private HoodieFlinkTable getTable() {
    * @param partitionPath The partition path
    */
   @SuppressWarnings("unchecked")
-  private void loadRecords(String partitionPath, Collector<O> out) {
+  private void loadRecords(String partitionPath, Collector<O> out) throws 
Exception {
     long start = System.currentTimeMillis();
+
     BaseFileUtils fileUtils = 
BaseFileUtils.getInstance(this.hoodieTable.getBaseFileFormat());
-    List<HoodieBaseFile> latestBaseFiles =
-        HoodieIndexUtils.getLatestBaseFilesForPartition(partitionPath, 
this.hoodieTable);
-    LOG.info("All baseFile in partition {} size = {}", partitionPath, 
latestBaseFiles.size());
+    Schema schema = new 
TableSchemaResolver(this.hoodieTable.getMetaClient()).getTableAvroSchema();
 
     final int parallelism = getRuntimeContext().getNumberOfParallelSubtasks();
     final int maxParallelism = 
getRuntimeContext().getMaxNumberOfParallelSubtasks();
     final int taskID = getRuntimeContext().getIndexOfThisSubtask();
-    for (HoodieBaseFile baseFile : latestBaseFiles) {
-      boolean shouldLoad = KeyGroupRangeAssignment.assignKeyToParallelOperator(
-          baseFile.getFileId(), maxParallelism, parallelism) == taskID;
 
-      if (shouldLoad) {
-        LOG.info("Load records from file {}.", baseFile);
-        final List<HoodieKey> hoodieKeys;
+    Option<HoodieInstant> latestCommitTime = 
this.hoodieTable.getMetaClient().getCommitsTimeline()
+        .filterCompletedInstants().lastInstant();
+
+    if (latestCommitTime.isPresent()) {
+      List<FileSlice> fileSlices = this.hoodieTable.getSliceView()
+          .getLatestFileSlicesBeforeOrOn(partitionPath, 
latestCommitTime.get().getTimestamp(), true)
+          .collect(toList());
+
+      for (FileSlice fileSlice : fileSlices) {
+        if (!shouldLoadFileId(fileSlice.getFileId(), maxParallelism, 
parallelism, taskID)) {
+          continue;
+        }
+        LOG.info("Load records from {}.", fileSlice);
+
+        // load parquet records
+        fileSlice.getBaseFile().ifPresent(baseFile -> {
+          // filter out crushed files
+          if (baseFile.getFileSize() <= 0) {
+            return;
+          }
+
+          final List<HoodieKey> hoodieKeys;
+          try {
+            hoodieKeys =
+                fileUtils.fetchRecordKeyPartitionPath(this.hadoopConf, new 
Path(baseFile.getPath()));
+          } catch (Exception e) {
+            throw new HoodieException(String.format("Error when loading record 
keys from file: %s", baseFile), e);
+          }
+
+          for (HoodieKey hoodieKey : hoodieKeys) {
+            out.collect((O) new IndexRecord(generateHoodieRecord(hoodieKey, 
fileSlice)));
+          }
+        });
+
+        // load avro log records
+        List<String> logPaths = fileSlice.getLogFiles()
+                // filter out crushed files
+                .filter(logFile -> logFile.getFileSize() > 0)
+                .map(logFile -> logFile.getPath().toString())
+                .collect(toList());
+        HoodieMergedLogRecordScanner scanner = scanLog(logPaths, schema, 
latestCommitTime.get().getTimestamp());
+
+        final List<HoodieKey> hoodieKeys = new ArrayList<>();
         try {
-          hoodieKeys =
-              fileUtils.fetchRecordKeyPartitionPath(this.hadoopConf, new 
Path(baseFile.getPath()));
+          for (String recordKey : scanner.getRecords().keySet()) {
+            hoodieKeys.add(new HoodieKey(recordKey, partitionPath));

Review comment:
       Can we send the keys directly and do not buffer the records ? The cache 
takes too much memory footprint.




-- 
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

For queries about this service, please contact Infrastructure at:
us...@infra.apache.org


Reply via email to