RussellSpitzer commented on code in PR #3256: URL: https://github.com/apache/polaris/pull/3256#discussion_r2673285544
########## storage/files/impl/src/main/java/org/apache/polaris/storage/files/impl/FileOperationsImpl.java: ########## @@ -0,0 +1,472 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.apache.polaris.storage.files.impl; + +import static java.lang.String.format; + +import com.google.common.collect.Streams; +import com.google.common.util.concurrent.RateLimiter; +import jakarta.annotation.Nonnull; +import java.io.IOException; +import java.time.Duration; +import java.util.ArrayList; +import java.util.HashSet; +import java.util.LinkedHashSet; +import java.util.Map; +import java.util.Objects; +import java.util.Optional; +import java.util.OptionalDouble; +import java.util.Set; +import java.util.function.Function; +import java.util.function.Predicate; +import java.util.stream.Stream; +import org.apache.iceberg.ContentFile; +import org.apache.iceberg.ManifestFile; +import org.apache.iceberg.ManifestFiles; +import org.apache.iceberg.PartitionSpec; +import org.apache.iceberg.Snapshot; +import org.apache.iceberg.TableMetadata; +import org.apache.iceberg.TableMetadataParser; +import org.apache.iceberg.io.BulkDeletionFailureException; +import org.apache.iceberg.io.CloseableIterator; +import org.apache.iceberg.io.FileIO; +import org.apache.iceberg.io.SupportsBulkOperations; +import org.apache.iceberg.io.SupportsPrefixOperations; +import org.apache.iceberg.view.ViewMetadata; +import org.apache.iceberg.view.ViewMetadataParser; +import org.apache.polaris.storage.files.api.FileFilter; +import org.apache.polaris.storage.files.api.FileOperations; +import org.apache.polaris.storage.files.api.FileSpec; +import org.apache.polaris.storage.files.api.FileType; +import org.apache.polaris.storage.files.api.ImmutablePurgeStats; +import org.apache.polaris.storage.files.api.PurgeSpec; +import org.apache.polaris.storage.files.api.PurgeStats; +import org.projectnessie.storage.uri.StorageUri; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +/** + * @param fileIO the {@link FileIO} instance to use. The given instance must implement both {@link + * org.apache.iceberg.io.SupportsBulkOperations} and {@link + * org.apache.iceberg.io.SupportsPrefixOperations}. + */ +record FileOperationsImpl(@Nonnull FileIO fileIO) implements FileOperations { + private static final Logger LOGGER = LoggerFactory.getLogger(FileOperationsImpl.class); + + @Override + public Stream<FileSpec> findFiles(@Nonnull String prefix, @Nonnull FileFilter filter) { + var prefixUri = StorageUri.of(prefix).resolve("/"); + if (fileIO instanceof SupportsPrefixOperations prefixOps) { + return Streams.stream(prefixOps.listPrefix(prefix).iterator()) + .filter(Objects::nonNull) + .map( + fileInfo -> { + var location = StorageUri.of(fileInfo.location()); + if (!location.isAbsolute()) { + // ADLSFileIO does _not_ include the prefix, but GCSFileIO and S3FileIO do. + location = prefixUri.resolve(location); + } + return FileSpec.builder() + .location(location.toString()) + .size(fileInfo.size()) + .createdAtMillis(fileInfo.createdAtMillis()) + .build(); + }) + .filter(filter); + } + + throw new IllegalStateException( + format( + "An Iceberg FileIO supporting prefix operations is required, but the given %s does not", + fileIO.getClass().getName())); + } + + @Override + public Stream<FileSpec> identifyIcebergTableFiles( + @Nonnull String tableMetadataLocation, boolean deduplicate) { + var metadataOpt = readTableMetadataFailsafe(tableMetadataLocation); + if (metadataOpt.isEmpty()) { + return Stream.empty(); + } + var metadata = metadataOpt.get(); + + var metadataFileSpec = + FileSpec.fromLocation(tableMetadataLocation).fileType(FileType.ICEBERG_METADATA).build(); + + var fileSources = new ArrayList<Stream<FileSpec>>(); + + fileSources.add(Stream.of(metadataFileSpec)); + + var statisticsFiles = metadata.statisticsFiles(); + if (statisticsFiles != null) { + fileSources.addFirst( + statisticsFiles.stream() + .map( + statisticsFile -> + FileSpec.fromLocationAndSize( + statisticsFile.path(), statisticsFile.fileSizeInBytes()) + .fileType(FileType.ICEBERG_STATISTICS) + .build())); + } + + var previousFiles = metadata.previousFiles(); + if (previousFiles != null) { + fileSources.add( + previousFiles.stream() + .filter( + metadataLogEntry -> + metadataLogEntry.file() != null && !metadataLogEntry.file().isEmpty()) + .map( + metadataLogEntry -> + FileSpec.fromLocation(metadataLogEntry.file()) + .fileType(FileType.ICEBERG_METADATA) + .build())); + } + + var specsById = metadata.specsById(); + + var addPredicate = deduplicator(deduplicate); + + fileSources.addFirst( + metadata.snapshots().stream() + // Newest snapshots first + .sorted((s1, s2) -> Long.compare(s2.timestampMillis(), s1.timestampMillis())) + .flatMap( + snapshot -> identifyIcebergTableSnapshotFiles(snapshot, specsById, addPredicate))); + + // Return "dependencies" before the "metadata" itself, so the probability of being able to + // resume a failed/aborted purge is higher. + + return fileSources.stream().flatMap(Function.identity()); + } + + static Predicate<String> deduplicator(boolean deduplicate) { Review Comment: I mentioned this above, but I think we can skip the "deduplicator" logic all together here by using a bit more of the information in the Iceberg Table. Starting with the oldest snapshot, add all manifests and entries, then for all newer snapshots add only manifests which have been marked as added and the entries within those which have been added. This only gets messed up if there is are other snapshots without parents (branches without a common snapshot, orphaned WAP - that one is probably not possible?) so we could handle duplicates for that, but it should only require-deduping at the manifest level I think. We only need to check added manifests in the snapshot and make sure they are deduped and then we can use the added/existing status within those manifests. -- This is an automated message from the Apache Git Service. To respond to the message, please log on to GitHub and use the URL above to go to the specific comment. To unsubscribe, e-mail: [email protected] For queries about this service, please contact Infrastructure at: [email protected]
