This is an automated email from the ASF dual-hosted git repository.

kevinjqliu pushed a commit to branch main
in repository https://gitbox.apache.org/repos/asf/iceberg-python.git


The following commit(s) were added to refs/heads/main by this push:
     new ff0fa555e Re-enable rule B208 (#2738)
ff0fa555e is described below

commit ff0fa555e2579ec15f6db79a6c6012e08d4d9232
Author: Alex Stephen <[email protected]>
AuthorDate: Wed Nov 12 04:49:53 2025 +0530

    Re-enable rule B208 (#2738)
    
    <!--
    Thanks for opening a pull request!
    -->
    Part of #2700
    
    <!-- In the case this PR will resolve an issue, please replace
    ${GITHUB_ISSUE_ID} below with the actual Github issue id. -->
    <!-- Closes #${GITHUB_ISSUE_ID} -->
    
    
    
    # Rationale for this change
    This lets us enable rule B208 on the linter.
    
    ## Are these changes tested?
    `make lint` and `make test` should pass.
    
    ## Are there any user-facing changes?
    
    <!-- In the case of user-facing changes, please add the changelog label.
    -->
    
    Co-authored-by: Kevin Liu <[email protected]>
---
 pyiceberg/avro/decoder.py    | 2 +-
 pyiceberg/io/__init__.py     | 2 +-
 pyiceberg/io/pyarrow.py      | 4 ++--
 pyiceberg/table/__init__.py  | 8 ++++----
 pyiceberg/table/snapshots.py | 2 +-
 ruff.toml                    | 1 -
 tests/io/test_pyarrow.py     | 2 +-
 7 files changed, 10 insertions(+), 11 deletions(-)

diff --git a/pyiceberg/avro/decoder.py b/pyiceberg/avro/decoder.py
index 75b320902..d30475acf 100644
--- a/pyiceberg/avro/decoder.py
+++ b/pyiceberg/avro/decoder.py
@@ -181,6 +181,6 @@ def new_decoder(b: bytes) -> BinaryDecoder:
     except ModuleNotFoundError:
         import warnings
 
-        warnings.warn("Falling back to pure Python Avro decoder, missing 
Cython implementation")
+        warnings.warn("Falling back to pure Python Avro decoder, missing 
Cython implementation", stacklevel=2)
 
         return StreamingBinaryDecoder(b)
diff --git a/pyiceberg/io/__init__.py b/pyiceberg/io/__init__.py
index 1915afcd0..981e03394 100644
--- a/pyiceberg/io/__init__.py
+++ b/pyiceberg/io/__init__.py
@@ -340,7 +340,7 @@ def _infer_file_io_from_scheme(path: str, properties: 
Properties) -> FileIO | No
                 if file_io := _import_file_io(file_io_path, properties):
                     return file_io
         else:
-            warnings.warn(f"No preferred file implementation for scheme: 
{parsed_url.scheme}")
+            warnings.warn(f"No preferred file implementation for scheme: 
{parsed_url.scheme}", stacklevel=2)
     return None
 
 
diff --git a/pyiceberg/io/pyarrow.py b/pyiceberg/io/pyarrow.py
index 5be4c5d24..179e9e892 100644
--- a/pyiceberg/io/pyarrow.py
+++ b/pyiceberg/io/pyarrow.py
@@ -232,7 +232,7 @@ def _import_retry_strategy(impl: str) -> S3RetryStrategy | 
None:
         class_ = getattr(module, class_name)
         return class_()
     except (ModuleNotFoundError, AttributeError):
-        warnings.warn(f"Could not initialize S3 retry strategy: {impl}")
+        warnings.warn(f"Could not initialize S3 retry strategy: {impl}", 
stacklevel=2)
         return None
 
 
@@ -2768,7 +2768,7 @@ def _get_parquet_writer_kwargs(table_properties: 
Properties) -> Dict[str, Any]:
         f"{TableProperties.PARQUET_BLOOM_FILTER_COLUMN_ENABLED_PREFIX}.*",
     ]:
         if unsupported_keys := fnmatch.filter(table_properties, key_pattern):
-            warnings.warn(f"Parquet writer option(s) {unsupported_keys} not 
implemented")
+            warnings.warn(f"Parquet writer option(s) {unsupported_keys} not 
implemented", stacklevel=2)
 
     compression_codec = 
table_properties.get(TableProperties.PARQUET_COMPRESSION, 
TableProperties.PARQUET_COMPRESSION_DEFAULT)
     compression_level = property_as_int(
diff --git a/pyiceberg/table/__init__.py b/pyiceberg/table/__init__.py
index 942e99db8..abc225d4c 100644
--- a/pyiceberg/table/__init__.py
+++ b/pyiceberg/table/__init__.py
@@ -663,7 +663,7 @@ class Transaction:
             self.table_metadata.properties.get(TableProperties.DELETE_MODE, 
TableProperties.DELETE_MODE_DEFAULT)
             == TableProperties.DELETE_MODE_MERGE_ON_READ
         ):
-            warnings.warn("Merge on read is not yet supported, falling back to 
copy-on-write")
+            warnings.warn("Merge on read is not yet supported, falling back to 
copy-on-write", stacklevel=2)
 
         if isinstance(delete_filter, str):
             delete_filter = _parse_row_filter(delete_filter)
@@ -731,7 +731,7 @@ class Transaction:
                             
overwrite_snapshot.append_data_file(replaced_data_file)
 
         if not delete_snapshot.files_affected and not 
delete_snapshot.rewrites_needed:
-            warnings.warn("Delete operation did not match any records")
+            warnings.warn("Delete operation did not match any records", 
stacklevel=2)
 
     def upsert(
         self,
@@ -1502,7 +1502,7 @@ class Table:
         try:
             self.catalog._delete_old_metadata(self.io, self.metadata, 
response.metadata)
         except Exception as e:
-            warnings.warn(f"Failed to delete old metadata after commit: {e}")
+            warnings.warn(f"Failed to delete old metadata after commit: {e}", 
stacklevel=2)
 
         self.metadata = response.metadata
         self.metadata_location = response.metadata_location
@@ -1728,7 +1728,7 @@ class TableScan(ABC):
                             schema for schema in self.table_metadata.schemas 
if schema.schema_id == snapshot.schema_id
                         )
                     except StopIteration:
-                        warnings.warn(f"Metadata does not contain schema with 
id: {snapshot.schema_id}")
+                        warnings.warn(f"Metadata does not contain schema with 
id: {snapshot.schema_id}", stacklevel=2)
             else:
                 raise ValueError(f"Snapshot not found: {self.snapshot_id}")
 
diff --git a/pyiceberg/table/snapshots.py b/pyiceberg/table/snapshots.py
index 14b5fa833..bc7656921 100644
--- a/pyiceberg/table/snapshots.py
+++ b/pyiceberg/table/snapshots.py
@@ -187,7 +187,7 @@ class Summary(IcebergBaseModel, Mapping[str, str]):
 
     def __init__(self, operation: Operation | None = None, **data: Any) -> 
None:
         if operation is None:
-            warnings.warn("Encountered invalid snapshot summary: operation is 
missing, defaulting to overwrite")
+            warnings.warn("Encountered invalid snapshot summary: operation is 
missing, defaulting to overwrite", stacklevel=2)
             operation = Operation.OVERWRITE
         super().__init__(operation=operation, **data)
         self._additional_properties = data
diff --git a/ruff.toml b/ruff.toml
index df6d402e6..efd14fea7 100644
--- a/ruff.toml
+++ b/ruff.toml
@@ -59,7 +59,6 @@ select = [
 ]
 ignore = [
     "E501",
-    "B028",
     "UP037",
     "UP035",
     "UP006"
diff --git a/tests/io/test_pyarrow.py b/tests/io/test_pyarrow.py
index 3765ea6de..3bec6fd15 100644
--- a/tests/io/test_pyarrow.py
+++ b/tests/io/test_pyarrow.py
@@ -2804,7 +2804,7 @@ def test_pyarrow_io_multi_fs() -> None:
 class SomeRetryStrategy(AwsDefaultS3RetryStrategy):
     def __init__(self) -> None:
         super().__init__()
-        warnings.warn("Initialized SomeRetryStrategy 👍")
+        warnings.warn("Initialized SomeRetryStrategy 👍", stacklevel=2)
 
 
 def test_retry_strategy() -> None:

Reply via email to