nsivabalan commented on code in PR #13254:
URL: https://github.com/apache/hudi/pull/13254#discussion_r2072895299


##########
hudi-client/hudi-client-common/src/main/java/org/apache/hudi/metadata/HoodieBackedTableMetadataWriter.java:
##########
@@ -417,6 +418,7 @@ private boolean initializeFromFilesystem(String 
dataTableInstantTime, List<Metad
       }
     }
 
+    Lazy<List<Pair<String, FileSlice>>> lazyLatestMergedPartitionFileSliceList 
= getLazyLatestMergedPartitionFileSliceList();

Review Comment:
   can we clear this entry in closeInternal() of the same class. 
   So, that on re-initialization,  we don't keep using older merged file slice 
state. 



##########
hudi-client/hudi-client-common/src/main/java/org/apache/hudi/metadata/HoodieBackedTableMetadataWriter.java:
##########
@@ -665,38 +670,37 @@ private Pair<Integer, HoodieData<HoodieRecord>> 
initializeSecondaryIndexPartitio
     return Pair.of(fileGroupCount, records);
   }
 
-  private List<Pair<String, FileSlice>> getPartitionFileSlicePairs() throws 
IOException {
-    String latestInstant = 
dataMetaClient.getActiveTimeline().filterCompletedAndCompactionInstants().lastInstant()
-        .map(HoodieInstant::requestedTime).orElse(SOLO_COMMIT_TIMESTAMP);
-    try (HoodieTableFileSystemView fsView = getMetadataView()) {
-      // Collect the list of latest file slices present in each partition
-      List<String> partitions = metadata.getAllPartitionPaths();
-      fsView.loadAllPartitions();
-      List<Pair<String, FileSlice>> partitionFileSlicePairs = new 
ArrayList<>();
-      partitions.forEach(partition -> 
fsView.getLatestMergedFileSlicesBeforeOrOn(partition, latestInstant)
-          .forEach(fs -> partitionFileSlicePairs.add(Pair.of(partition, fs))));
-      return partitionFileSlicePairs;
-    }
+  private Lazy<List<Pair<String, FileSlice>>> 
getLazyLatestMergedPartitionFileSliceList() {
+    return Lazy.lazily(() -> {
+      String latestInstant = 
dataMetaClient.getActiveTimeline().filterCompletedAndCompactionInstants().lastInstant()
+          .map(HoodieInstant::requestedTime).orElse(SOLO_COMMIT_TIMESTAMP);
+      try (HoodieTableFileSystemView fsView = getMetadataView()) {
+        // Collect the list of latest file slices present in each partition
+        List<String> partitions = metadata.getAllPartitionPaths();
+        fsView.loadAllPartitions();
+        List<Pair<String, FileSlice>> partitionFileSlicePairs = new 
ArrayList<>();
+        partitions.forEach(partition -> 
fsView.getLatestMergedFileSlicesBeforeOrOn(partition, latestInstant)
+            .forEach(fs -> partitionFileSlicePairs.add(Pair.of(partition, 
fs))));
+        return partitionFileSlicePairs;
+      } catch (IOException e) {
+        throw new UncheckedIOException(e);
+      }
+    });
   }
 
-  private Pair<Integer, HoodieData<HoodieRecord>> 
initializeRecordIndexPartition() throws IOException {
-    final HoodieTableFileSystemView fsView = getMetadataView();
+  private Pair<Integer, HoodieData<HoodieRecord>> 
initializeRecordIndexPartition(
+      Lazy<List<Pair<String, FileSlice>>> 
lazyLatestMergedPartitionFileSliceList) {

Review Comment:
   I suspect we might run into issues with this. 
   if you look at impl of getMetadataView
   ```
     private HoodieTableFileSystemView getMetadataView() {
       if (metadataView == null || 
!metadataView.equals(metadata.getMetadataFileSystemView())) {
         ValidationUtils.checkState(metadata != null, "Metadata table not 
initialized");
         ValidationUtils.checkState(dataMetaClient != null, "Data table meta 
client not initialized");
         metadataView = new HoodieTableFileSystemView(metadata, dataMetaClient, 
dataMetaClient.getActiveTimeline());
       }
       return metadataView;
     }
   ```
   So, any point in time if `metadata.getMetadataFileSystemView()` changes, 
then it will automatically re-initialize at L 
   
   



##########
hudi-client/hudi-client-common/src/main/java/org/apache/hudi/metadata/HoodieBackedTableMetadataWriter.java:
##########
@@ -665,38 +670,37 @@ private Pair<Integer, HoodieData<HoodieRecord>> 
initializeSecondaryIndexPartitio
     return Pair.of(fileGroupCount, records);
   }
 
-  private List<Pair<String, FileSlice>> getPartitionFileSlicePairs() throws 
IOException {
-    String latestInstant = 
dataMetaClient.getActiveTimeline().filterCompletedAndCompactionInstants().lastInstant()
-        .map(HoodieInstant::requestedTime).orElse(SOLO_COMMIT_TIMESTAMP);
-    try (HoodieTableFileSystemView fsView = getMetadataView()) {
-      // Collect the list of latest file slices present in each partition
-      List<String> partitions = metadata.getAllPartitionPaths();
-      fsView.loadAllPartitions();
-      List<Pair<String, FileSlice>> partitionFileSlicePairs = new 
ArrayList<>();
-      partitions.forEach(partition -> 
fsView.getLatestMergedFileSlicesBeforeOrOn(partition, latestInstant)
-          .forEach(fs -> partitionFileSlicePairs.add(Pair.of(partition, fs))));
-      return partitionFileSlicePairs;
-    }
+  private Lazy<List<Pair<String, FileSlice>>> 
getLazyLatestMergedPartitionFileSliceList() {
+    return Lazy.lazily(() -> {
+      String latestInstant = 
dataMetaClient.getActiveTimeline().filterCompletedAndCompactionInstants().lastInstant()
+          .map(HoodieInstant::requestedTime).orElse(SOLO_COMMIT_TIMESTAMP);
+      try (HoodieTableFileSystemView fsView = getMetadataView()) {
+        // Collect the list of latest file slices present in each partition
+        List<String> partitions = metadata.getAllPartitionPaths();
+        fsView.loadAllPartitions();
+        List<Pair<String, FileSlice>> partitionFileSlicePairs = new 
ArrayList<>();
+        partitions.forEach(partition -> 
fsView.getLatestMergedFileSlicesBeforeOrOn(partition, latestInstant)
+            .forEach(fs -> partitionFileSlicePairs.add(Pair.of(partition, 
fs))));
+        return partitionFileSlicePairs;
+      } catch (IOException e) {
+        throw new UncheckedIOException(e);

Review Comment:
   +1 



-- 
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

To unsubscribe, e-mail: [email protected]

For queries about this service, please contact Infrastructure at:
[email protected]

Reply via email to