This is an automated email from the ASF dual-hosted git repository.
etudenhoefner pushed a commit to branch main
in repository https://gitbox.apache.org/repos/asf/iceberg.git
The following commit(s) were added to refs/heads/main by this push:
new 6b2e804539 Core: Don't fail when bulk deleting metadata in CatalogUtil
(#15464)
6b2e804539 is described below
commit 6b2e8045398918d3392518d005a04d0e9c9219ab
Author: Ayson Chang <[email protected]>
AuthorDate: Tue Mar 3 02:30:33 2026 +0800
Core: Don't fail when bulk deleting metadata in CatalogUtil (#15464)
---
.../main/java/org/apache/iceberg/CatalogUtil.java | 23 +++++--------
.../java/org/apache/iceberg/TestCatalogUtil.java | 38 ++++++++++++++++++++++
2 files changed, 46 insertions(+), 15 deletions(-)
diff --git a/core/src/main/java/org/apache/iceberg/CatalogUtil.java
b/core/src/main/java/org/apache/iceberg/CatalogUtil.java
index 5390dfdd39..989d6b3ddf 100644
--- a/core/src/main/java/org/apache/iceberg/CatalogUtil.java
+++ b/core/src/main/java/org/apache/iceberg/CatalogUtil.java
@@ -26,6 +26,7 @@ import java.util.List;
import java.util.Locale;
import java.util.Map;
import java.util.Set;
+import java.util.stream.Collectors;
import org.apache.iceberg.catalog.Catalog;
import org.apache.iceberg.catalog.TableIdentifier;
import org.apache.iceberg.common.DynClasses;
@@ -586,21 +587,13 @@ public class CatalogUtil {
// the log, thus we don't include metadata.previousFiles() for deletion
- everything else can
// be removed
removedPreviousMetadataFiles.removeAll(metadata.previousFiles());
- if (io instanceof SupportsBulkOperations) {
- ((SupportsBulkOperations) io)
- .deleteFiles(
- Iterables.transform(
- removedPreviousMetadataFiles,
TableMetadata.MetadataLogEntry::file));
- } else {
- Tasks.foreach(removedPreviousMetadataFiles)
- .noRetry()
- .suppressFailureWhenFinished()
- .onFailure(
- (previousMetadataFile, exc) ->
- LOG.warn(
- "Delete failed for previous metadata file: {}",
previousMetadataFile, exc))
- .run(previousMetadataFile ->
io.deleteFile(previousMetadataFile.file()));
- }
+ deleteFiles(
+ io,
+ removedPreviousMetadataFiles.stream()
+ .map(TableMetadata.MetadataLogEntry::file)
+ .collect(Collectors.toSet()),
+ "metadata",
+ true);
}
}
}
diff --git a/core/src/test/java/org/apache/iceberg/TestCatalogUtil.java
b/core/src/test/java/org/apache/iceberg/TestCatalogUtil.java
index 4b755079ec..84e79e35c9 100644
--- a/core/src/test/java/org/apache/iceberg/TestCatalogUtil.java
+++ b/core/src/test/java/org/apache/iceberg/TestCatalogUtil.java
@@ -19,7 +19,15 @@
package org.apache.iceberg;
import static org.assertj.core.api.Assertions.assertThat;
+import static org.assertj.core.api.Assertions.assertThatCode;
import static org.assertj.core.api.Assertions.assertThatThrownBy;
+import static org.mockito.Mockito.any;
+import static org.mockito.Mockito.anyBoolean;
+import static org.mockito.Mockito.doThrow;
+import static org.mockito.Mockito.eq;
+import static org.mockito.Mockito.mock;
+import static org.mockito.Mockito.when;
+import static org.mockito.Mockito.withSettings;
import java.util.List;
import java.util.Map;
@@ -33,9 +41,11 @@ import org.apache.iceberg.io.FileIO;
import org.apache.iceberg.io.InputFile;
import org.apache.iceberg.io.OutputFile;
import org.apache.iceberg.io.StorageCredential;
+import org.apache.iceberg.io.SupportsBulkOperations;
import org.apache.iceberg.io.SupportsStorageCredentials;
import org.apache.iceberg.metrics.MetricsReport;
import org.apache.iceberg.metrics.MetricsReporter;
+import org.apache.iceberg.relocated.com.google.common.collect.ImmutableList;
import org.apache.iceberg.relocated.com.google.common.collect.ImmutableMap;
import org.apache.iceberg.relocated.com.google.common.collect.Maps;
import org.junit.jupiter.api.Test;
@@ -282,6 +292,34 @@ public class TestCatalogUtil {
.isEqualTo(pathStyleCatalogName + "/" + nameSpaceWithTwoLevels + "." +
tableName);
}
+ @Test
+ public void noFailureWhenBulkDeletingMetadataFiles() {
+ FileIO io = mock(FileIO.class,
withSettings().extraInterfaces(SupportsBulkOperations.class));
+
+ doThrow(new RuntimeException("Simulated bulk delete failure"))
+ .when((SupportsBulkOperations) io)
+ .deleteFiles(any());
+
+ TableMetadata.MetadataLogEntry entry1 =
+ new TableMetadata.MetadataLogEntry(
+ System.currentTimeMillis(), "s3://bucket/metadata/v1.json");
+ TableMetadata.MetadataLogEntry entry2 =
+ new TableMetadata.MetadataLogEntry(
+ System.currentTimeMillis(), "s3://bucket/metadata/v2.json");
+
+ TableMetadata base = mock(TableMetadata.class);
+ TableMetadata metadata = mock(TableMetadata.class);
+
+ when(metadata.propertyAsBoolean(
+ eq(TableProperties.METADATA_DELETE_AFTER_COMMIT_ENABLED),
anyBoolean()))
+ .thenReturn(true);
+ when(base.previousFiles()).thenReturn(ImmutableList.of(entry1, entry2));
+ when(metadata.previousFiles()).thenReturn(ImmutableList.of());
+
+ assertThatCode(() -> CatalogUtil.deleteRemovedMetadataFiles(io, base,
metadata))
+ .doesNotThrowAnyException();
+ }
+
public static class TestCatalog extends BaseMetastoreCatalog {
private String catalogName;