This is an automated email from the ASF dual-hosted git repository.

singhpk234 pushed a commit to branch main
in repository https://gitbox.apache.org/repos/asf/polaris.git


The following commit(s) were added to refs/heads/main by this push:
     new 0be2de8d0 fix(Catalog): Add List PolarisStorageAction for all metadata 
read operations (#1391)
0be2de8d0 is described below

commit 0be2de8d05fdbc3cbd886d5fca7b22e9c4d61660
Author: fivetran-ashokborra 
<[email protected]>
AuthorDate: Mon May 12 21:29:39 2025 +0530

    fix(Catalog): Add List PolarisStorageAction for all metadata read 
operations (#1391)
    
    * fix(Catalog): Add List PolarisStorageAction for all metadata read 
operations
---
 .../src/test_spark_sql_s3_with_privileges.py       | 77 +++++++++++++++++++++-
 .../service/catalog/iceberg/IcebergCatalog.java    | 16 +++--
 2 files changed, 87 insertions(+), 6 deletions(-)

diff --git a/regtests/t_pyspark/src/test_spark_sql_s3_with_privileges.py 
b/regtests/t_pyspark/src/test_spark_sql_s3_with_privileges.py
index d4c1289bf..833135940 100644
--- a/regtests/t_pyspark/src/test_spark_sql_s3_with_privileges.py
+++ b/regtests/t_pyspark/src/test_spark_sql_s3_with_privileges.py
@@ -520,7 +520,7 @@ def 
test_spark_credentials_can_delete_after_purge(root_client, snowflake_catalog
     attempts = 0
 
     # watch the data directory. metadata will be deleted first, so if data 
directory is clear, we can expect
-    # metadatat diretory to be clear also
+    # metadata directory to be clear also
     while 'Contents' in objects and len(objects['Contents']) > 0 and attempts 
< 60:
       time.sleep(1)  # seconds, not milliseconds ;)
       objects = s3.list_objects(Bucket=test_bucket, Delimiter='/',
@@ -1149,6 +1149,81 @@ def test_spark_ctas(snowflake_catalog, 
polaris_catalog_url, snowman):
     spark.sql(f"drop table {table_name}_t2 PURGE")
 
 
[email protected](os.environ.get('AWS_TEST_ENABLED', 'False').lower() != 
'true',
+                    reason='AWS_TEST_ENABLED is not set or is false')
+def test_spark_credentials_s3_exception_on_metadata_file_deletion(root_client, 
snowflake_catalog, polaris_catalog_url,
+                                                snowman, 
snowman_catalog_client, test_bucket, aws_bucket_base_location_prefix):
+    """
+    Create a using Spark. Then call the loadTable api directly with snowman 
token to fetch the vended credentials
+    for the first table.
+    Delete the metadata directory and try to access the table using the vended 
credentials.
+    It should throw 404 exception
+    :param root_client:
+    :param snowflake_catalog:
+    :param polaris_catalog_url:
+    :param snowman_catalog_client:
+    :param reader_catalog_client:
+    :return:
+    """
+    with 
IcebergSparkSession(credentials=f'{snowman.principal.client_id}:{snowman.credentials.client_secret}',
+                             catalog_name=snowflake_catalog.name,
+                             polaris_url=polaris_catalog_url) as spark:
+        spark.sql(f'USE {snowflake_catalog.name}')
+        spark.sql('CREATE NAMESPACE db1')
+        spark.sql('CREATE NAMESPACE db1.schema')
+        spark.sql('USE db1.schema')
+        spark.sql('CREATE TABLE iceberg_table (col1 int, col2 string)')
+
+    response = snowman_catalog_client.load_table(snowflake_catalog.name, 
unquote('db1%1Fschema'),
+                                                        "iceberg_table",
+                                                        "vended-credentials")
+    assert response.config is not None
+    assert 's3.access-key-id' in response.config
+    assert 's3.secret-access-key' in response.config
+    assert 's3.session-token' in response.config
+
+    s3 = boto3.client('s3',
+                      aws_access_key_id=response.config['s3.access-key-id'],
+                      
aws_secret_access_key=response.config['s3.secret-access-key'],
+                      aws_session_token=response.config['s3.session-token'])
+
+    # Get metadata files
+    objects = s3.list_objects(Bucket=test_bucket, Delimiter='/',
+                              
Prefix=f'{aws_bucket_base_location_prefix}/snowflake_catalog/db1/schema/iceberg_table/metadata/')
+    assert objects is not None
+    assert 'Contents' in objects
+    assert len(objects['Contents']) > 0
+
+    # Verify metadata content
+    metadata_file = next(f for f in objects['Contents'] if 
f['Key'].endswith('metadata.json'))
+    assert metadata_file is not None
+
+    metadata_contents = s3.get_object(Bucket=test_bucket, 
Key=metadata_file['Key'])
+    assert metadata_contents is not None
+    assert metadata_contents['ContentLength'] > 0
+
+    # Delete metadata files
+    s3.delete_objects(Bucket=test_bucket,
+                      Delete={'Objects': objects})
+
+    try:
+        response = snowman_catalog_client.load_table(snowflake_catalog.name, 
unquote('db1%1Fschema'),
+                                                     "iceberg_table",
+                                                     "vended-credentials")
+    except Exception as e:
+        assert '404' in str(e)
+
+
+    with 
IcebergSparkSession(credentials=f'{snowman.principal.client_id}:{snowman.credentials.client_secret}',
+                             catalog_name=snowflake_catalog.name,
+                             polaris_url=polaris_catalog_url) as spark:
+        spark.sql(f'USE {snowflake_catalog.name}')
+        spark.sql('USE db1.schema')
+        spark.sql('DROP TABLE iceberg_table PURGE')
+        spark.sql(f'USE {snowflake_catalog.name}')
+        spark.sql('DROP NAMESPACE db1.schema')
+        spark.sql('DROP NAMESPACE db1')
+
 def create_catalog_role(api, catalog, role_name):
   catalog_role = CatalogRole(name=role_name)
   try:
diff --git 
a/service/common/src/main/java/org/apache/polaris/service/catalog/iceberg/IcebergCatalog.java
 
b/service/common/src/main/java/org/apache/polaris/service/catalog/iceberg/IcebergCatalog.java
index 7c02a6154..2647c5bc6 100644
--- 
a/service/common/src/main/java/org/apache/polaris/service/catalog/iceberg/IcebergCatalog.java
+++ 
b/service/common/src/main/java/org/apache/polaris/service/catalog/iceberg/IcebergCatalog.java
@@ -357,7 +357,7 @@ public class IcebergCatalog extends BaseMetastoreViewCatalog
             Set.of(locationDir),
             resolvedParent,
             new HashMap<>(tableDefaultProperties),
-            Set.of(PolarisStorageActions.READ));
+            Set.of(PolarisStorageActions.READ, PolarisStorageActions.LIST));
 
     InputFile metadataFile = fileIO.newInputFile(metadataFileLocation);
     TableMetadata metadata = TableMetadataParser.read(fileIO, metadataFile);
@@ -1383,7 +1383,7 @@ public class IcebergCatalog extends 
BaseMetastoreViewCatalog
                       Set.of(latestLocationDir),
                       resolvedEntities,
                       new HashMap<>(tableDefaultProperties),
-                      Set.of(PolarisStorageActions.READ));
+                      Set.of(PolarisStorageActions.READ, 
PolarisStorageActions.LIST));
               return TableMetadataParser.read(fileIO, metadataLocation);
             });
         polarisEventListener.onAfterTableRefreshed(new 
AfterTableRefreshedEvent(tableIdentifier));
@@ -1422,7 +1422,10 @@ public class IcebergCatalog extends 
BaseMetastoreViewCatalog
               getLocationsAllowedToBeAccessed(metadata),
               resolvedStorageEntity,
               new HashMap<>(metadata.properties()),
-              Set.of(PolarisStorageActions.READ, PolarisStorageActions.WRITE));
+              Set.of(
+                  PolarisStorageActions.READ,
+                  PolarisStorageActions.WRITE,
+                  PolarisStorageActions.LIST));
 
       List<PolarisEntity> resolvedNamespace =
           resolvedTableEntities == null
@@ -1757,7 +1760,7 @@ public class IcebergCatalog extends 
BaseMetastoreViewCatalog
                       Set.of(latestLocationDir),
                       resolvedEntities,
                       new HashMap<>(tableDefaultProperties),
-                      Set.of(PolarisStorageActions.READ));
+                      Set.of(PolarisStorageActions.READ, 
PolarisStorageActions.LIST));
 
               return 
ViewMetadataParser.read(fileIO.newInputFile(metadataLocation));
             });
@@ -2447,7 +2450,10 @@ public class IcebergCatalog extends 
BaseMetastoreViewCatalog
               Set.of(locationDir),
               resolvedParent,
               new HashMap<>(tableDefaultProperties),
-              Set.of(PolarisStorageActions.READ, PolarisStorageActions.WRITE));
+              Set.of(
+                  PolarisStorageActions.READ,
+                  PolarisStorageActions.WRITE,
+                  PolarisStorageActions.LIST));
       TableMetadata tableMetadata = TableMetadataParser.read(fileIO, 
newLocation);
 
       // then validate that it points to a valid location for this table

Reply via email to