singhpk234 commented on code in PR #5669:
URL: https://github.com/apache/iceberg/pull/5669#discussion_r983898446


##########
core/src/main/java/org/apache/iceberg/IncrementalFileCleanup.java:
##########
@@ -0,0 +1,366 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.iceberg;
+
+import java.io.IOException;
+import java.util.List;
+import java.util.Set;
+import java.util.concurrent.ConcurrentHashMap;
+import java.util.concurrent.ExecutorService;
+import java.util.function.Consumer;
+import org.apache.iceberg.exceptions.NotFoundException;
+import org.apache.iceberg.exceptions.RuntimeIOException;
+import org.apache.iceberg.io.CloseableIterable;
+import org.apache.iceberg.relocated.com.google.common.base.Joiner;
+import org.apache.iceberg.relocated.com.google.common.collect.Iterables;
+import org.apache.iceberg.relocated.com.google.common.collect.Sets;
+import org.apache.iceberg.util.PropertyUtil;
+import org.apache.iceberg.util.SnapshotUtil;
+import org.apache.iceberg.util.Tasks;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+class IncrementalFileCleanup extends FileCleanupStrategy {
+  private static final Logger LOG = 
LoggerFactory.getLogger(IncrementalFileCleanup.class);
+
+  private final TableMetadata base;
+  private final TableMetadata current;
+  private final Consumer<String> deleteFunc;
+
+  IncrementalFileCleanup(
+      TableOperations ops,
+      TableMetadata base,
+      ExecutorService deleteExecutorService,
+      ExecutorService planExecutorService,
+      Consumer<String> deleteFunc) {
+    super(ops, deleteExecutorService, planExecutorService, deleteFunc);
+    this.base = base;
+    this.current = ops.refresh();
+    this.deleteFunc = deleteFunc;
+  }
+
+  @Override
+  @SuppressWarnings({"checkstyle:CyclomaticComplexity", "MethodLength"})
+  public void cleanFiles() {
+    if (current.refs().size() > 1) {
+      throw new UnsupportedOperationException(
+          "Cannot incrementally clean files for tables with more than 1 ref");
+    }
+
+    // clean up the expired snapshots:
+    // 1. Get a list of the snapshots that were removed
+    // 2. Delete any data files that were deleted by those snapshots and are 
not in the table
+    // 3. Delete any manifests that are no longer used by current snapshots
+    // 4. Delete the manifest lists
+
+    Set<Long> validIds = Sets.newHashSet();
+    for (Snapshot snapshot : current.snapshots()) {
+      validIds.add(snapshot.snapshotId());
+    }
+
+    Set<Long> expiredIds = Sets.newHashSet();
+    for (Snapshot snapshot : base.snapshots()) {
+      long snapshotId = snapshot.snapshotId();
+      if (!validIds.contains(snapshotId)) {
+        // the snapshot was expired
+        LOG.info("Expired snapshot: {}", snapshot);
+        expiredIds.add(snapshotId);
+      }
+    }
+
+    if (expiredIds.isEmpty()) {
+      // if no snapshots were expired, skip cleanup
+      return;
+    }
+
+    // Reads and deletes are done using 
Tasks.foreach(...).suppressFailureWhenFinished to complete
+    // as much of the delete work as possible and avoid orphaned data or 
manifest files.
+

Review Comment:
   [nit] extra line



##########
core/src/main/java/org/apache/iceberg/ReachableFileCleanup.java:
##########
@@ -0,0 +1,181 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.iceberg;
+
+import java.io.IOException;
+import java.util.List;
+import java.util.Map;
+import java.util.Set;
+import java.util.concurrent.ConcurrentHashMap;
+import java.util.concurrent.ExecutorService;
+import java.util.function.Consumer;
+import java.util.stream.Collectors;
+import org.apache.iceberg.exceptions.NotFoundException;
+import org.apache.iceberg.exceptions.RuntimeIOException;
+import org.apache.iceberg.io.CloseableIterable;
+import org.apache.iceberg.relocated.com.google.common.base.Joiner;
+import org.apache.iceberg.relocated.com.google.common.collect.Lists;
+import org.apache.iceberg.relocated.com.google.common.collect.MapDifference;
+import org.apache.iceberg.relocated.com.google.common.collect.Maps;
+import org.apache.iceberg.relocated.com.google.common.collect.Sets;
+import org.apache.iceberg.util.Tasks;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+/**
+ * File cleanup strategy for snapshot expiration which determines, via an 
in-memory reference set,
+ * metadata and data files that are not reachable given the previous and 
current table states.
+ */
+class ReachableFileCleanup extends FileCleanupStrategy {
+
+  private static final Logger LOG = 
LoggerFactory.getLogger(IncrementalFileCleanup.class);
+
+  private final TableMetadata afterExpiration;
+  private final TableMetadata beforeExpiration;
+
+  ReachableFileCleanup(
+      TableOperations ops,
+      TableMetadata beforeExpiration,
+      ExecutorService deleteExecutorService,
+      ExecutorService planExecutorService,
+      Consumer<String> deleteFunc) {
+    super(ops, deleteExecutorService, planExecutorService, deleteFunc);
+    this.beforeExpiration = beforeExpiration;
+    this.afterExpiration = ops.refresh();
+  }
+
+  @Override
+  public void cleanFiles() {
+    // Identify all of the manifest lists to retain
+    Set<String> manifestListsBeforeExpiration =
+        ReachableFileUtil.manifestListLocations(beforeExpiration);
+    Map<String, ManifestFile> manifestsBeforeExpiration =
+        computeManifestsForManifestLists(manifestListsBeforeExpiration);
+    // Identify all of the manifest lists that currently exist
+    Set<String> manifestListsAfterExpiration =
+        ReachableFileUtil.manifestListLocations(afterExpiration);
+    Map<String, ManifestFile> manifestsAfterExpiration =
+        computeManifestsForManifestLists(manifestListsAfterExpiration);
+
+    // The manifest files which we could delete are just the ones which 
existed before expiration
+    // which no longer exist
+    Set<String> manifestListsToDelete =
+        Sets.difference(manifestListsBeforeExpiration, 
manifestListsAfterExpiration);
+
+    MapDifference<String, ManifestFile> diff =
+        Maps.difference(manifestsBeforeExpiration, manifestsAfterExpiration);
+
+    List<ManifestFile> manifestsToDelete = 
Lists.newArrayList(diff.entriesOnlyOnLeft().values());

Review Comment:
   should we compute manifest to delete like : 
   1. compute diff between manifestListBeforeExpiration & 
manifestlistAfterExpiration
   2. create a set of manifest files by reading steps from step 1
   3. remove mainfests from step2 which are present in currentManifests (a 
safety check)



##########
core/src/main/java/org/apache/iceberg/ReachableFileCleanup.java:
##########
@@ -0,0 +1,181 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.iceberg;
+
+import java.io.IOException;
+import java.util.List;
+import java.util.Map;
+import java.util.Set;
+import java.util.concurrent.ConcurrentHashMap;
+import java.util.concurrent.ExecutorService;
+import java.util.function.Consumer;
+import java.util.stream.Collectors;
+import org.apache.iceberg.exceptions.NotFoundException;
+import org.apache.iceberg.exceptions.RuntimeIOException;
+import org.apache.iceberg.io.CloseableIterable;
+import org.apache.iceberg.relocated.com.google.common.base.Joiner;
+import org.apache.iceberg.relocated.com.google.common.collect.Lists;
+import org.apache.iceberg.relocated.com.google.common.collect.MapDifference;
+import org.apache.iceberg.relocated.com.google.common.collect.Maps;
+import org.apache.iceberg.relocated.com.google.common.collect.Sets;
+import org.apache.iceberg.util.Tasks;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+/**
+ * File cleanup strategy for snapshot expiration which determines, via an 
in-memory reference set,
+ * metadata and data files that are not reachable given the previous and 
current table states.
+ */
+class ReachableFileCleanup extends FileCleanupStrategy {
+
+  private static final Logger LOG = 
LoggerFactory.getLogger(IncrementalFileCleanup.class);

Review Comment:
   ```suggestion
     private static final Logger LOG = 
LoggerFactory.getLogger(ReachableFileCleanup.class);
   ```



##########
core/src/main/java/org/apache/iceberg/ReachableFileCleanup.java:
##########
@@ -0,0 +1,181 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.iceberg;
+
+import java.io.IOException;
+import java.util.List;
+import java.util.Map;
+import java.util.Set;
+import java.util.concurrent.ConcurrentHashMap;
+import java.util.concurrent.ExecutorService;
+import java.util.function.Consumer;
+import java.util.stream.Collectors;
+import org.apache.iceberg.exceptions.NotFoundException;
+import org.apache.iceberg.exceptions.RuntimeIOException;
+import org.apache.iceberg.io.CloseableIterable;
+import org.apache.iceberg.relocated.com.google.common.base.Joiner;
+import org.apache.iceberg.relocated.com.google.common.collect.Lists;
+import org.apache.iceberg.relocated.com.google.common.collect.MapDifference;
+import org.apache.iceberg.relocated.com.google.common.collect.Maps;
+import org.apache.iceberg.relocated.com.google.common.collect.Sets;
+import org.apache.iceberg.util.Tasks;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+/**
+ * File cleanup strategy for snapshot expiration which determines, via an 
in-memory reference set,
+ * metadata and data files that are not reachable given the previous and 
current table states.
+ */
+class ReachableFileCleanup extends FileCleanupStrategy {
+
+  private static final Logger LOG = 
LoggerFactory.getLogger(IncrementalFileCleanup.class);
+
+  private final TableMetadata afterExpiration;
+  private final TableMetadata beforeExpiration;
+
+  ReachableFileCleanup(
+      TableOperations ops,
+      TableMetadata beforeExpiration,
+      ExecutorService deleteExecutorService,
+      ExecutorService planExecutorService,
+      Consumer<String> deleteFunc) {
+    super(ops, deleteExecutorService, planExecutorService, deleteFunc);
+    this.beforeExpiration = beforeExpiration;
+    this.afterExpiration = ops.refresh();
+  }
+
+  @Override
+  public void cleanFiles() {
+    // Identify all of the manifest lists to retain
+    Set<String> manifestListsBeforeExpiration =
+        ReachableFileUtil.manifestListLocations(beforeExpiration);
+    Map<String, ManifestFile> manifestsBeforeExpiration =
+        computeManifestsForManifestLists(manifestListsBeforeExpiration);
+    // Identify all of the manifest lists that currently exist
+    Set<String> manifestListsAfterExpiration =
+        ReachableFileUtil.manifestListLocations(afterExpiration);
+    Map<String, ManifestFile> manifestsAfterExpiration =
+        computeManifestsForManifestLists(manifestListsAfterExpiration);
+
+    // The manifest files which we could delete are just the ones which 
existed before expiration
+    // which no longer exist
+    Set<String> manifestListsToDelete =
+        Sets.difference(manifestListsBeforeExpiration, 
manifestListsAfterExpiration);
+
+    MapDifference<String, ManifestFile> diff =
+        Maps.difference(manifestsBeforeExpiration, manifestsAfterExpiration);
+
+    List<ManifestFile> manifestsToDelete = 
Lists.newArrayList(diff.entriesOnlyOnLeft().values());
+    List<ManifestFile> currentManifests = 
Lists.newArrayList(manifestsAfterExpiration.values());
+
+    Set<String> filesToDelete = findFilesToDelete(manifestsToDelete, 
currentManifests);
+    Tasks.foreach(filesToDelete)
+        .executeWith(deleteExecutorService)
+        .retry(3)
+        .stopRetryOn(NotFoundException.class)
+        .suppressFailureWhenFinished()
+        .onFailure((file, exc) -> LOG.warn("Delete failed for data file: {}", 
file, exc))
+        .run(deleteFunc::accept);
+
+    Set<String> manifestPathsToDelete =
+        
manifestsToDelete.stream().map(ManifestFile::path).collect(Collectors.toSet());
+
+    deleteMetadataFiles(manifestPathsToDelete, manifestListsToDelete);
+  }
+
+  private Map<String, ManifestFile> 
computeManifestsForManifestLists(Set<String> manifestLists) {
+    Map<String, ManifestFile> pathToManifestFiles = Maps.newHashMap();
+    for (String manifestList : manifestLists) {
+      CloseableIterable<ManifestFile> manifestFiles = 
readManifestFiles(manifestList);
+      for (ManifestFile manifest : manifestFiles) {
+        pathToManifestFiles.put(manifest.path(), manifest.copy());
+      }
+    }
+    return pathToManifestFiles;
+  }
+
+  private void deleteMetadataFiles(
+      Set<String> manifestsToDelete, Set<String> manifestListsToDelete) {
+    LOG.warn("Manifests to delete: {}", Joiner.on(", 
").join(manifestsToDelete));
+    LOG.warn("Manifests Lists to delete: {}", Joiner.on(", 
").join(manifestListsToDelete));
+
+    Tasks.foreach(manifestsToDelete)
+        .executeWith(deleteExecutorService)
+        .retry(3)
+        .stopRetryOn(NotFoundException.class)
+        .suppressFailureWhenFinished()
+        .onFailure((manifest, exc) -> LOG.warn("Delete failed for manifest: 
{}", manifest, exc))
+        .run(deleteFunc::accept);

Review Comment:
   should we move this to private function which take the List of files and 
file types ('Manifest' / 'Manifest List`)  as args



##########
core/src/main/java/org/apache/iceberg/IncrementalFileCleanup.java:
##########
@@ -0,0 +1,366 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.iceberg;
+
+import java.io.IOException;
+import java.util.List;
+import java.util.Set;
+import java.util.concurrent.ConcurrentHashMap;
+import java.util.concurrent.ExecutorService;
+import java.util.function.Consumer;
+import org.apache.iceberg.exceptions.NotFoundException;
+import org.apache.iceberg.exceptions.RuntimeIOException;
+import org.apache.iceberg.io.CloseableIterable;
+import org.apache.iceberg.relocated.com.google.common.base.Joiner;
+import org.apache.iceberg.relocated.com.google.common.collect.Iterables;
+import org.apache.iceberg.relocated.com.google.common.collect.Sets;
+import org.apache.iceberg.util.PropertyUtil;
+import org.apache.iceberg.util.SnapshotUtil;
+import org.apache.iceberg.util.Tasks;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+class IncrementalFileCleanup extends FileCleanupStrategy {
+  private static final Logger LOG = 
LoggerFactory.getLogger(IncrementalFileCleanup.class);
+
+  private final TableMetadata base;
+  private final TableMetadata current;
+  private final Consumer<String> deleteFunc;
+
+  IncrementalFileCleanup(
+      TableOperations ops,
+      TableMetadata base,
+      ExecutorService deleteExecutorService,
+      ExecutorService planExecutorService,
+      Consumer<String> deleteFunc) {
+    super(ops, deleteExecutorService, planExecutorService, deleteFunc);
+    this.base = base;
+    this.current = ops.refresh();
+    this.deleteFunc = deleteFunc;
+  }
+
+  @Override
+  @SuppressWarnings({"checkstyle:CyclomaticComplexity", "MethodLength"})
+  public void cleanFiles() {
+    if (current.refs().size() > 1) {
+      throw new UnsupportedOperationException(
+          "Cannot incrementally clean files for tables with more than 1 ref");
+    }
+
+    // clean up the expired snapshots:
+    // 1. Get a list of the snapshots that were removed
+    // 2. Delete any data files that were deleted by those snapshots and are 
not in the table
+    // 3. Delete any manifests that are no longer used by current snapshots
+    // 4. Delete the manifest lists
+
+    Set<Long> validIds = Sets.newHashSet();
+    for (Snapshot snapshot : current.snapshots()) {
+      validIds.add(snapshot.snapshotId());
+    }
+
+    Set<Long> expiredIds = Sets.newHashSet();
+    for (Snapshot snapshot : base.snapshots()) {
+      long snapshotId = snapshot.snapshotId();
+      if (!validIds.contains(snapshotId)) {
+        // the snapshot was expired
+        LOG.info("Expired snapshot: {}", snapshot);
+        expiredIds.add(snapshotId);
+      }
+    }
+
+    if (expiredIds.isEmpty()) {
+      // if no snapshots were expired, skip cleanup
+      return;
+    }
+
+    // Reads and deletes are done using 
Tasks.foreach(...).suppressFailureWhenFinished to complete
+    // as much of the delete work as possible and avoid orphaned data or 
manifest files.
+
+    SnapshotRef branchToCleanup = Iterables.getFirst(base.refs().values(), 
null);
+    if (branchToCleanup == null) {
+      return;
+    }
+
+    Snapshot branchTip = base.snapshot(branchToCleanup.snapshotId());
+    List<Snapshot> snapshots = current.snapshots();
+
+    // this is the set of ancestors of the current table state. when removing 
snapshots, this must
+    // only remove files that were deleted in an ancestor of the current table 
state to avoid
+    // physically deleting files that were logically deleted in a commit that 
was rolled back.
+    Set<Long> ancestorIds = 
Sets.newHashSet(SnapshotUtil.ancestorIds(branchTip, base::snapshot));
+
+    Set<Long> pickedAncestorSnapshotIds = Sets.newHashSet();
+    for (long snapshotId : ancestorIds) {
+      String sourceSnapshotId =
+          
base.snapshot(snapshotId).summary().get(SnapshotSummary.SOURCE_SNAPSHOT_ID_PROP);
+      if (sourceSnapshotId != null) {
+        // protect any snapshot that was cherry-picked into the current table 
state
+        pickedAncestorSnapshotIds.add(Long.parseLong(sourceSnapshotId));
+      }
+    }
+
+    // find manifests to clean up that are still referenced by a valid 
snapshot, but written by an
+    // expired snapshot
+    Set<String> validManifests = Sets.newHashSet();
+    Set<ManifestFile> manifestsToScan = Sets.newHashSet();
+    Tasks.foreach(snapshots)
+        .retry(3)
+        .suppressFailureWhenFinished()
+        .onFailure(
+            (snapshot, exc) ->
+                LOG.warn(
+                    "Failed on snapshot {} while reading manifest list: {}",
+                    snapshot.snapshotId(),
+                    snapshot.manifestListLocation(),
+                    exc))
+        .run(
+            snapshot -> {
+              try (CloseableIterable<ManifestFile> manifests = 
readManifestFiles(snapshot)) {
+                for (ManifestFile manifest : manifests) {
+                  validManifests.add(manifest.path());
+
+                  long snapshotId = manifest.snapshotId();
+                  // whether the manifest was created by a valid snapshot 
(true) or an expired
+                  // snapshot (false)
+                  boolean fromValidSnapshots = validIds.contains(snapshotId);
+                  // whether the snapshot that created the manifest was an 
ancestor of the table
+                  // state
+                  boolean isFromAncestor = ancestorIds.contains(snapshotId);
+                  // whether the changes in this snapshot have been picked 
into the current table
+                  // state
+                  boolean isPicked = 
pickedAncestorSnapshotIds.contains(snapshotId);
+                  // if the snapshot that wrote this manifest is no longer 
valid (has expired),
+                  // then delete its deleted files. note that this is only for 
expired snapshots
+                  // that are in the
+                  // current table state
+                  if (!fromValidSnapshots
+                      && (isFromAncestor || isPicked)
+                      && manifest.hasDeletedFiles()) {
+                    manifestsToScan.add(manifest.copy());
+                  }
+                }
+
+              } catch (IOException e) {
+                throw new RuntimeIOException(
+                    e, "Failed to close manifest list: %s", 
snapshot.manifestListLocation());
+              }
+            });
+
+    // find manifests to clean up that were only referenced by snapshots that 
have expired
+    Set<String> manifestListsToDelete = Sets.newHashSet();
+    Set<String> manifestsToDelete = Sets.newHashSet();
+    Set<ManifestFile> manifestsToRevert = Sets.newHashSet();
+    Tasks.foreach(base.snapshots())
+        .retry(3)
+        .suppressFailureWhenFinished()
+        .onFailure(
+            (snapshot, exc) ->
+                LOG.warn(
+                    "Failed on snapshot {} while reading manifest list: {}",
+                    snapshot.snapshotId(),
+                    snapshot.manifestListLocation(),
+                    exc))
+        .run(
+            snapshot -> {
+              long snapshotId = snapshot.snapshotId();
+              if (!validIds.contains(snapshotId)) {
+                // determine whether the changes in this snapshot are in the 
current table state
+                if (pickedAncestorSnapshotIds.contains(snapshotId)) {
+                  // this snapshot was cherry-picked into the current table 
state, so skip cleaning
+                  // it up.
+                  // its changes will expire when the picked snapshot expires.
+                  // A -- C -- D (source=B)
+                  //  `- B <-- this commit
+                  return;
+                }
+
+                long sourceSnapshotId =
+                    PropertyUtil.propertyAsLong(
+                        snapshot.summary(), 
SnapshotSummary.SOURCE_SNAPSHOT_ID_PROP, -1);
+                if (ancestorIds.contains(sourceSnapshotId)) {
+                  // this commit was cherry-picked from a commit that is in 
the current table state.
+                  // do not clean up its
+                  // changes because it would revert data file additions that 
are in the current
+                  // table.
+                  // A -- B -- C
+                  //  `- D (source=B) <-- this commit
+                  return;
+                }
+
+                if (pickedAncestorSnapshotIds.contains(sourceSnapshotId)) {
+                  // this commit was cherry-picked from a commit that is in 
the current table state.
+                  // do not clean up its
+                  // changes because it would revert data file additions that 
are in the current
+                  // table.
+                  // A -- C -- E (source=B)
+                  //  `- B `- D (source=B) <-- this commit
+                  return;
+                }
+
+                // find any manifests that are no longer needed
+                try (CloseableIterable<ManifestFile> manifests = 
readManifestFiles(snapshot)) {
+                  for (ManifestFile manifest : manifests) {
+                    if (!validManifests.contains(manifest.path())) {
+                      manifestsToDelete.add(manifest.path());
+
+                      boolean isFromAncestor = 
ancestorIds.contains(manifest.snapshotId());
+                      boolean isFromExpiringSnapshot = 
expiredIds.contains(manifest.snapshotId());
+
+                      if (isFromAncestor && manifest.hasDeletedFiles()) {
+                        // Only delete data files that were deleted in by an 
expired snapshot if
+                        // that
+                        // snapshot is an ancestor of the current table state. 
Otherwise, a snapshot
+                        // that
+                        // deleted files and was rolled back will delete files 
that could be in the
+                        // current
+                        // table state.
+                        manifestsToScan.add(manifest.copy());
+                      }
+
+                      if (!isFromAncestor && isFromExpiringSnapshot && 
manifest.hasAddedFiles()) {
+                        // Because the manifest was written by a snapshot that 
is not an ancestor of
+                        // the
+                        // current table state, the files added in this 
manifest can be removed. The
+                        // extra
+                        // check whether the manifest was written by a known 
snapshot that was
+                        // expired in
+                        // this commit ensures that the full ancestor list 
between when the snapshot
+                        // was
+                        // written and this expiration is known and there is 
no missing history. If
+                        // history
+                        // were missing, then the snapshot could be an 
ancestor of the table state
+                        // but the
+                        // ancestor ID set would not contain it and this would 
be unsafe.
+                        manifestsToRevert.add(manifest.copy());
+                      }
+                    }
+                  }
+                } catch (IOException e) {
+                  throw new RuntimeIOException(
+                      e, "Failed to close manifest list: %s", 
snapshot.manifestListLocation());
+                }
+
+                // add the manifest list to the delete set, if present
+                if (snapshot.manifestListLocation() != null) {
+                  manifestListsToDelete.add(snapshot.manifestListLocation());
+                }
+              }
+            });
+    deleteDataFiles(manifestsToScan, manifestsToRevert, validIds);
+    deleteMetadataFiles(manifestsToDelete, manifestListsToDelete);
+  }
+
+  private void deleteMetadataFiles(
+      Set<String> manifestsToDelete, Set<String> manifestListsToDelete) {
+    LOG.warn("Manifests to delete: {}", Joiner.on(", 
").join(manifestsToDelete));
+    LOG.warn("Manifests Lists to delete: {}", Joiner.on(", 
").join(manifestListsToDelete));
+
+    Tasks.foreach(manifestsToDelete)
+        .executeWith(deleteExecutorService)
+        .retry(3)
+        .stopRetryOn(NotFoundException.class)
+        .suppressFailureWhenFinished()
+        .onFailure((manifest, exc) -> LOG.warn("Delete failed for manifest: 
{}", manifest, exc))
+        .run(deleteFunc::accept);
+
+    Tasks.foreach(manifestListsToDelete)
+        .executeWith(deleteExecutorService)
+        .retry(3)
+        .stopRetryOn(NotFoundException.class)
+        .suppressFailureWhenFinished()
+        .onFailure((list, exc) -> LOG.warn("Delete failed for manifest list: 
{}", list, exc))
+        .run(deleteFunc::accept);
+  }

Review Comment:
   can be moved to FileCleanupStrategy



-- 
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

To unsubscribe, e-mail: [email protected]

For queries about this service, please contact Infrastructure at:
[email protected]


---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]

Reply via email to