This is an automated email from the ASF dual-hosted git repository.

junrushao pushed a commit to branch main
in repository https://gitbox.apache.org/repos/asf/tvm-ffi.git


The following commit(s) were added to refs/heads/main by this push:
     new 5e2a0e5  Enable py38 build (#145)
5e2a0e5 is described below

commit 5e2a0e53a2a5445082a06796bf86fc28503dc8b6
Author: Yichen Yan <[email protected]>
AuthorDate: Thu Oct 16 23:41:04 2025 +0800

    Enable py38 build (#145)
    
    Add support for cpython 3.8.
    
    ---------
    
    Co-authored-by: Junru Shao <[email protected]>
---
 .gitignore                                 |  1 +
 pyproject.toml                             | 11 ++++++-----
 python/tvm_ffi/_dtype.py                   |  6 ++++--
 python/tvm_ffi/_optional_torch_c_dlpack.py |  4 ++++
 python/tvm_ffi/base.py                     |  4 ++--
 python/tvm_ffi/container.py                | 26 +++++++++++++++++++++++++-
 python/tvm_ffi/dataclasses/c_class.py      |  2 ++
 python/tvm_ffi/libinfo.py                  |  2 ++
 python/tvm_ffi/module.py                   |  6 ++++--
 python/tvm_ffi/stream.py                   | 16 +++++++++-------
 tests/lint/check_asf_header.py             |  7 ++++---
 tests/lint/check_version.py                |  7 ++++---
 tests/lint/clang_tidy_precommit.py         |  1 +
 tests/scripts/benchmark_dlpack.py          |  2 ++
 14 files changed, 70 insertions(+), 25 deletions(-)

diff --git a/.gitignore b/.gitignore
index 1507523..ae2591b 100644
--- a/.gitignore
+++ b/.gitignore
@@ -31,6 +31,7 @@ parts/
 sdist/
 var/
 wheels/
+wheelhouse/
 pip-wheel-metadata/
 share/python-wheels/
 *.egg-info/
diff --git a/pyproject.toml b/pyproject.toml
index 4e0e2b8..83a87a4 100644
--- a/pyproject.toml
+++ b/pyproject.toml
@@ -31,7 +31,7 @@ classifiers = [
   "Intended Audience :: Science/Research",
 ]
 keywords = ["machine learning", "inference"]
-requires-python = ">=3.9"
+requires-python = ">=3.8"
 dependencies = ["typing-extensions>=4.5"]
 
 [project.urls]
@@ -173,7 +173,7 @@ testpaths = ["tests"]
 include = ["python/**/*.py", "tests/**/*.py"]
 line-length = 100
 indent-width = 4
-target-version = "py39"
+target-version = "py38"
 
 [tool.ruff.lint]
 select = [
@@ -183,6 +183,7 @@ select = [
   "RUF", # ruff, https://docs.astral.sh/ruff/rules/#ruff-specific-rules-ruf
   "NPY", # numpy, https://docs.astral.sh/ruff/rules/#numpy-specific-rules-npy
   "F",   # pyflakes, https://docs.astral.sh/ruff/rules/#pyflakes-f
+  "FA",  # flake8-future-annotations, 
https://docs.astral.sh/ruff/rules/#flake8-future-annotations-fa
   "ANN", # flake8-annotations, 
https://docs.astral.sh/ruff/rules/#flake8-annotations-ann
   "PTH", # flake8-use-pathlib, 
https://docs.astral.sh/ruff/rules/#flake8-use-pathlib-pth
   "D",   # pydocstyle, https://docs.astral.sh/ruff/rules/#pydocstyle-d
@@ -224,10 +225,10 @@ build-verbosity = 1
 # only build up to cp312, cp312
 # will be abi3 and can be used in future versions
 # ship 314t threaded nogil version
-build = ["cp39-*", "cp310-*", "cp311-*", "cp312-*", "cp314t-*"]
+build = ["cp38-*", "cp39-*", "cp310-*", "cp311-*", "cp312-*", "cp314t-*"]
 skip = ["*musllinux*"]
 # we only need to test on cp312
-test-skip = ["cp39-*", "cp310-*", "cp311-*"]
+test-skip = ["cp38-*", "cp39-*", "cp310-*", "cp311-*"]
 # focus on testing abi3 wheel
 build-frontend = "build[uv]"
 test-command = "pytest {package}/tests/python -vvs"
@@ -244,7 +245,7 @@ environment = { MACOSX_DEPLOYMENT_TARGET = "10.14" }
 archs = ["AMD64"]
 
 [tool.mypy]
-python_version = "3.9"
+python_version = "3.8"
 show_error_codes = true
 mypy_path = ["python", "examples", "tests/python"]
 files = ["python/tvm_ffi", "examples", "tests/python"]
diff --git a/python/tvm_ffi/_dtype.py b/python/tvm_ffi/_dtype.py
index 3aae074..b712c65 100644
--- a/python/tvm_ffi/_dtype.py
+++ b/python/tvm_ffi/_dtype.py
@@ -17,6 +17,8 @@
 """dtype class."""
 
 # pylint: disable=invalid-name
+from __future__ import annotations
+
 from enum import IntEnum
 from typing import Any, ClassVar
 
@@ -63,7 +65,7 @@ class dtype(str):
 
     _NUMPY_DTYPE_TO_STR: ClassVar[dict[Any, str]] = {}
 
-    def __new__(cls, content: Any) -> "dtype":
+    def __new__(cls, content: Any) -> dtype:
         content = str(content)
         val = str.__new__(cls, content)
         val.__tvm_ffi_dtype__ = core.DataType(content)
@@ -72,7 +74,7 @@ class dtype(str):
     def __repr__(self) -> str:
         return f"dtype('{self}')"
 
-    def with_lanes(self, lanes: int) -> "dtype":
+    def with_lanes(self, lanes: int) -> dtype:
         """Create a new dtype with the given number of lanes.
 
         Parameters
diff --git a/python/tvm_ffi/_optional_torch_c_dlpack.py 
b/python/tvm_ffi/_optional_torch_c_dlpack.py
index ee50e5f..3cc1529 100644
--- a/python/tvm_ffi/_optional_torch_c_dlpack.py
+++ b/python/tvm_ffi/_optional_torch_c_dlpack.py
@@ -30,6 +30,8 @@ This module will load slowly at first time due to JITing,
 subsequent calls will be much faster.
 """
 
+from __future__ import annotations
+
 import warnings
 from typing import Any
 
@@ -69,6 +71,7 @@ DLDataType getDLDataTypeForDLPackv1(const Tensor& t) {
     case ScalarType::UInt64:
       dtype.code = DLDataTypeCode::kDLUInt;
       break;
+#if TORCH_VERSION_MAJOR >= 2 && TORCH_VERSION_MINOR >= 6
     case ScalarType::Int1:
     case ScalarType::Int2:
     case ScalarType::Int3:
@@ -79,6 +82,7 @@ DLDataType getDLDataTypeForDLPackv1(const Tensor& t) {
     case ScalarType::Char:
       dtype.code = DLDataTypeCode::kDLInt;
       break;
+#endif
     case ScalarType::Double:
       dtype.code = DLDataTypeCode::kDLFloat;
       break;
diff --git a/python/tvm_ffi/base.py b/python/tvm_ffi/base.py
index 3ec2feb..3a3eede 100644
--- a/python/tvm_ffi/base.py
+++ b/python/tvm_ffi/base.py
@@ -29,8 +29,8 @@ logger = logging.getLogger(__name__)
 # ----------------------------
 # Python3 version.
 # ----------------------------
-if not (sys.version_info[0] >= 3 and sys.version_info[1] >= 9):
-    PY3STATEMENT = "The minimal Python requirement is Python 3.9"
+if not (sys.version_info[0] >= 3 and sys.version_info[1] >= 8):
+    PY3STATEMENT = "The minimal Python requirement is Python 3.8"
     raise Exception(PY3STATEMENT)
 
 # ----------------------------
diff --git a/python/tvm_ffi/container.py b/python/tvm_ffi/container.py
index a1b51a4..046f8c9 100644
--- a/python/tvm_ffi/container.py
+++ b/python/tvm_ffi/container.py
@@ -20,15 +20,39 @@ from __future__ import annotations
 
 import itertools
 import operator
+import sys
 from collections.abc import ItemsView as ItemsViewBase
 from collections.abc import Iterable, Iterator, Mapping, Sequence
 from collections.abc import KeysView as KeysViewBase
 from collections.abc import ValuesView as ValuesViewBase
-from typing import Any, Callable, SupportsIndex, TypeVar, cast, overload
+from typing import (
+    Any,
+    Callable,
+    SupportsIndex,
+    TypeVar,
+    cast,
+    overload,
+)
 
 from . import _ffi_api, core
 from .registry import register_object
 
+# workarounds for python 3.8
+if not (sys.version_info[0] == 3 and sys.version_info[1] <= 8):
+    from typing import (
+        ItemsView as ItemsViewBase,
+    )
+    from typing import (
+        KeysView as KeysViewBase,
+    )
+    from typing import (
+        Mapping,
+        Sequence,
+    )
+    from typing import (
+        ValuesView as ValuesViewBase,
+    )
+
 __all__ = ["Array", "Map"]
 
 
diff --git a/python/tvm_ffi/dataclasses/c_class.py 
b/python/tvm_ffi/dataclasses/c_class.py
index 35c22d7..0a09740 100644
--- a/python/tvm_ffi/dataclasses/c_class.py
+++ b/python/tvm_ffi/dataclasses/c_class.py
@@ -23,6 +23,8 @@ properties that forward to the underlying C++ object, while 
an ``__init__``
 method is synthesized to call the FFI constructor when requested.
 """
 
+from __future__ import annotations
+
 from collections.abc import Callable
 from dataclasses import InitVar
 from typing import ClassVar, TypeVar, get_origin, get_type_hints
diff --git a/python/tvm_ffi/libinfo.py b/python/tvm_ffi/libinfo.py
index c59bd60..a16d5fc 100644
--- a/python/tvm_ffi/libinfo.py
+++ b/python/tvm_ffi/libinfo.py
@@ -20,6 +20,8 @@ This module also provides helpers to locate and load 
platform-specific shared
 libraries by a base name (e.g., ``tvm_ffi`` -> ``libtvm_ffi.so`` on Linux).
 """
 
+from __future__ import annotations
+
 import os
 import sys
 from pathlib import Path
diff --git a/python/tvm_ffi/module.py b/python/tvm_ffi/module.py
index 659a641..1b53d89 100644
--- a/python/tvm_ffi/module.py
+++ b/python/tvm_ffi/module.py
@@ -15,7 +15,9 @@
 # specific language governing permissions and limitations
 # under the License.
 """Module related objects and functions."""
+
 # pylint: disable=invalid-name
+from __future__ import annotations
 
 from collections.abc import Sequence
 from enum import IntEnum
@@ -98,7 +100,7 @@ class Module(core.Object):
         return _ffi_api.ModuleGetKind(self)
 
     @property
-    def imports(self) -> list["Module"]:
+    def imports(self) -> list[Module]:
         """Get imported modules.
 
         Returns
@@ -168,7 +170,7 @@ class Module(core.Object):
             raise AttributeError(f"Module has no function '{name}'")
         return func
 
-    def import_module(self, module: "Module") -> None:
+    def import_module(self, module: Module) -> None:
         """Add module to the import list of current one.
 
         Parameters
diff --git a/python/tvm_ffi/stream.py b/python/tvm_ffi/stream.py
index b2083f3..ee71e5d 100644
--- a/python/tvm_ffi/stream.py
+++ b/python/tvm_ffi/stream.py
@@ -17,8 +17,10 @@
 # pylint: disable=invalid-name
 """Stream context."""
 
+from __future__ import annotations
+
 from ctypes import c_void_p
-from typing import Any, Union
+from typing import Any
 
 from . import core
 from ._tensor import device
@@ -46,13 +48,13 @@ class StreamContext:
 
     """
 
-    def __init__(self, device: core.Device, stream: Union[int, c_void_p]) -> 
None:
+    def __init__(self, device: core.Device, stream: int | c_void_p) -> None:
         """Initialize a stream context with a device and stream handle."""
         self.device_type = device.dlpack_device_type()
         self.device_id = device.index
         self.stream = stream
 
-    def __enter__(self) -> "StreamContext":
+    def __enter__(self) -> StreamContext:
         """Enter the context and set the current stream."""
         self.prev_stream = core._env_set_current_stream(
             self.device_type, self.device_id, self.stream
@@ -76,7 +78,7 @@ try:
             """Initialize with an optional Torch stream/graph context 
wrapper."""
             self.torch_context = context
 
-        def __enter__(self) -> "TorchStreamContext":
+        def __enter__(self) -> TorchStreamContext:
             """Enter both Torch and FFI stream contexts."""
             if self.torch_context:
                 self.torch_context.__enter__()
@@ -93,7 +95,7 @@ try:
                 self.torch_context.__exit__(*args)
             self.ffi_context.__exit__(*args)
 
-    def use_torch_stream(context: Any = None) -> "TorchStreamContext":
+    def use_torch_stream(context: Any = None) -> TorchStreamContext:
         """Create an FFI stream context with a Torch stream or graph.
 
         cuda graph or current stream if `None` provided.
@@ -129,12 +131,12 @@ try:
 
 except ImportError:
 
-    def use_torch_stream(context: Any = None) -> "TorchStreamContext":
+    def use_torch_stream(context: Any = None) -> TorchStreamContext:
         """Raise an informative error when Torch is unavailable."""
         raise ImportError("Cannot import torch")
 
 
-def use_raw_stream(device: core.Device, stream: Union[int, c_void_p]) -> 
StreamContext:
+def use_raw_stream(device: core.Device, stream: int | c_void_p) -> 
StreamContext:
     """Create a ffi stream context with given device and stream handle.
 
     Parameters
diff --git a/tests/lint/check_asf_header.py b/tests/lint/check_asf_header.py
index 713520f..efc3546 100644
--- a/tests/lint/check_asf_header.py
+++ b/tests/lint/check_asf_header.py
@@ -16,12 +16,13 @@
 # under the License.
 """Helper tool to add ASF header to files that cannot be handled by Rat."""
 
+from __future__ import annotations
+
 import argparse
 import fnmatch
 import subprocess
 import sys
 from pathlib import Path
-from typing import Optional
 
 header_cstyle = """
 /*
@@ -181,7 +182,7 @@ def should_skip_file(filepath: str) -> bool:
     return False
 
 
-def get_git_files() -> Optional[list[str]]:
+def get_git_files() -> list[str] | None:
     """Get list of files tracked by git."""
     try:
         result = subprocess.run(
@@ -242,7 +243,7 @@ def check_header(fname: str, header: str) -> bool:
     return True
 
 
-def collect_files() -> Optional[list[str]]:
+def collect_files() -> list[str] | None:
     """Collect all files that need header checking from git."""
     files = []
 
diff --git a/tests/lint/check_version.py b/tests/lint/check_version.py
index f853d9e..10aca3b 100644
--- a/tests/lint/check_version.py
+++ b/tests/lint/check_version.py
@@ -16,15 +16,16 @@
 # under the License.
 """Helper tool to check version consistency between pyproject.toml and 
__init__.py."""
 
+from __future__ import annotations
+
 import re
 import sys
 from pathlib import Path
-from typing import Optional
 
 import tomli
 
 
-def read_pyproject_version(pyproject_path: Path) -> Optional[str]:
+def read_pyproject_version(pyproject_path: Path) -> str | None:
     """Read version from pyproject.toml."""
     with pyproject_path.open("rb") as f:
         data = tomli.load(f)
@@ -32,7 +33,7 @@ def read_pyproject_version(pyproject_path: Path) -> 
Optional[str]:
     return data.get("project", {}).get("version")
 
 
-def read_init_version(init_path: Path) -> Optional[str]:
+def read_init_version(init_path: Path) -> str | None:
     """Read __version__ from __init__.py."""
     with init_path.open(encoding="utf-8") as f:
         content = f.read()
diff --git a/tests/lint/clang_tidy_precommit.py 
b/tests/lint/clang_tidy_precommit.py
index 3b0ec3c..9b20d7f 100644
--- a/tests/lint/clang_tidy_precommit.py
+++ b/tests/lint/clang_tidy_precommit.py
@@ -14,6 +14,7 @@
 # KIND, either express or implied.  See the License for the
 # specific language governing permissions and limitations
 # under the License.
+from __future__ import annotations
 
 import os
 import subprocess
diff --git a/tests/scripts/benchmark_dlpack.py 
b/tests/scripts/benchmark_dlpack.py
index 0196ee7..a4833f2 100644
--- a/tests/scripts/benchmark_dlpack.py
+++ b/tests/scripts/benchmark_dlpack.py
@@ -30,6 +30,8 @@ Summary of some takeaways:
 - torch.add on gpu takes about 3.7us per call, giving us an idea of what 
roughly we need to get to in eager mode.
 """
 
+from __future__ import annotations
+
 import time
 from typing import Any, Callable
 

Reply via email to