This is an automated email from the ASF dual-hosted git repository.
junrushao pushed a commit to branch main
in repository https://gitbox.apache.org/repos/asf/tvm-ffi.git
The following commit(s) were added to refs/heads/main by this push:
new a754afe chore: Run examples on `main` commits (#353)
a754afe is described below
commit a754afe248c0a29509236b5e0c8d2e30a26f7ffa
Author: Junru Shao <[email protected]>
AuthorDate: Sun Dec 21 16:55:00 2025 -0800
chore: Run examples on `main` commits (#353)
---
.github/workflows/ci_mainline_only.yml | 95 +++++++++++++++++++++++++++++++++
examples/packaging/README.md | 13 ++---
examples/packaging/pyproject.toml | 2 +-
examples/packaging/run_example.py | 22 ++++----
examples/quickstart/CMakeLists.txt | 46 +++++++++-------
examples/quickstart/README.md | 31 +++++++++--
examples/quickstart/run_all_cpu.bat | 38 +++++++++++++
examples/stable_c_abi/CMakeLists.txt | 25 ++++++++-
examples/stable_c_abi/README.md | 22 +++++++-
examples/stable_c_abi/run_all.bat | 35 ++++++++++++
examples/stable_c_abi/src/add_one_cpu.c | 4 +-
11 files changed, 286 insertions(+), 47 deletions(-)
diff --git a/.github/workflows/ci_mainline_only.yml
b/.github/workflows/ci_mainline_only.yml
index c45a973..1912acb 100644
--- a/.github/workflows/ci_mainline_only.yml
+++ b/.github/workflows/ci_mainline_only.yml
@@ -90,3 +90,98 @@ jobs:
linux_image: ${{ matrix.linux_image }}
checkout_ref: ${{ github.sha }}
build_sdist: ${{ matrix.build_sdist }}
+
+ examples:
+ name: Run examples
+ needs: [prepare]
+ runs-on: ${{ matrix.os }}
+ strategy:
+ fail-fast: false
+ matrix:
+ include:
+ - {os: ubuntu-latest, python_version: "3.14"}
+ - {os: macos-14, python_version: "3.13"}
+ - {os: windows-latest, python_version: "3.12"}
+ steps:
+ - uses: actions/checkout@v5
+ with:
+ submodules: recursive
+ fetch-depth: 0
+ fetch-tags: true
+
+ - name: Set up uv
+ uses: astral-sh/setup-uv@b75a909f75acd358c2196fb9a5f1299a9a8868a4 #
v6.7.0
+ with:
+ python-version: ${{ matrix.python_version }}
+ activate-environment: true
+
+ - uses: ./.github/actions/detect-env-vars
+ id: env_vars
+
+ - name: Install dependencies for examples
+ env:
+ CMAKE_BUILD_PARALLEL_LEVEL: ${{ steps.env_vars.outputs.cpu_count }}
+ run: |
+ uv pip install --reinstall --verbose -e ".[torch]"
+ uv pip install --reinstall --verbose numpy scikit-build-core
torch-c-dlpack-ext
+
+ - name: Run example/quickstart (CPU) [posix]
+ if: ${{ runner.os != 'Windows' }}
+ env:
+ CMAKE_BUILD_PARALLEL_LEVEL: ${{ steps.env_vars.outputs.cpu_count }}
+ run: |
+ pushd examples/quickstart
+ rm -rf build
+ bash run_all_cpu.sh
+ popd
+
+ - name: Run example/quickstart (CPU) [windows]
+ if: ${{ runner.os == 'Windows' }}
+ shell: cmd
+ env:
+ CMAKE_BUILD_PARALLEL_LEVEL: ${{ steps.env_vars.outputs.cpu_count }}
+ run: |
+ cd examples\quickstart
+ call run_all_cpu.bat
+
+ - name: Run example/stable_c_abi [posix]
+ if: ${{ runner.os != 'Windows' }}
+ env:
+ CMAKE_BUILD_PARALLEL_LEVEL: ${{ steps.env_vars.outputs.cpu_count }}
+ run: |
+ pushd examples/stable_c_abi
+ rm -rf build
+ bash run_all.sh
+ popd
+
+ - name: Run example/stable_c_abi [windows]
+ if: ${{ runner.os == 'Windows' }}
+ shell: cmd
+ env:
+ CMAKE_BUILD_PARALLEL_LEVEL: ${{ steps.env_vars.outputs.cpu_count }}
+ run: |
+ cd examples\stable_c_abi
+ call run_all.bat
+
+ - name: Run example/packaging [posix]
+ if: ${{ runner.os != 'Windows' }}
+ env:
+ CMAKE_BUILD_PARALLEL_LEVEL: ${{ steps.env_vars.outputs.cpu_count }}
+ run: |
+ pushd examples/packaging
+ # This directory will be auto-generated in `CMakeLists.txt` by
setting `STUB_INIT ON`
+ rm -rf python/my_ffi_extension
+ uv pip install --verbose . --no-build-isolation
+ python run_example.py
+ popd
+
+ - name: Run example/packaging [windows]
+ if: ${{ runner.os == 'Windows' }}
+ shell: pwsh
+ env:
+ CMAKE_BUILD_PARALLEL_LEVEL: ${{ steps.env_vars.outputs.cpu_count }}
+ run: |
+ Set-Location examples/packaging
+ Remove-Item -Recurse -Force python/my_ffi_extension
+ uv pip install --verbose . --no-build-isolation
+ python run_example.py
diff --git a/examples/packaging/README.md b/examples/packaging/README.md
index b161fe7..96d0939 100644
--- a/examples/packaging/README.md
+++ b/examples/packaging/README.md
@@ -28,9 +28,11 @@ packaging as well.
## Install the wheel
+Use `uv pip` (the same tooling used in CI) to build and install the example
wheel:
+
```bash
cd examples/packaging
-pip install .
+uv pip install --reinstall --verbose .
```
### Note on build and auditwheel
@@ -44,15 +46,10 @@ After installing the `my_ffi_extension` example package,
you can run the followi
that invokes the `add_one` function exposed.
```bash
-python run_example.py add_one
+python run_example.py
```
-You can also run the following command to see how error is raised and
propagated
-across the language boundaries.
-
-```bash
-python run_example.py raise_error
-```
+This runs three flows: calling `add_one`, demonstrating `raise_error` with a
propagated traceback, and constructing/using the `IntPair` object.
When possible, tvm_ffi will try to preserve backtrace across language
boundary. You will see output like
diff --git a/examples/packaging/pyproject.toml
b/examples/packaging/pyproject.toml
index 7825ca8..7385120 100644
--- a/examples/packaging/pyproject.toml
+++ b/examples/packaging/pyproject.toml
@@ -48,7 +48,7 @@ build.verbose = true
# CMake configuration
cmake.version = "CMakeLists.txt"
-cmake.build-type = "RelWithDebugInfo"
+cmake.build-type = "RelWithDebInfo"
# Logging
logging.level = "INFO"
diff --git a/examples/packaging/run_example.py
b/examples/packaging/run_example.py
index 3f3e636..94c90b0 100644
--- a/examples/packaging/run_example.py
+++ b/examples/packaging/run_example.py
@@ -16,7 +16,7 @@
# Base logic to load library for extension package
"""Run functions from the example packaged tvm-ffi extension."""
-import sys
+import traceback
import my_ffi_extension
import torch
@@ -24,6 +24,7 @@ import torch
def run_add_one() -> None:
"""Invoke add_one from the extension and print the result."""
+ print("=========== Example 1: add_one ===========")
x = torch.tensor([1, 2, 3, 4, 5], dtype=torch.float32)
y = torch.empty_like(x)
my_ffi_extension.LIB.add_one(x, y)
@@ -32,23 +33,22 @@ def run_add_one() -> None:
def run_raise_error() -> None:
"""Invoke raise_error from the extension to demonstrate error handling."""
- my_ffi_extension.raise_error("This is an error")
+ print("=========== Example 2: raise_error ===========")
+ try:
+ my_ffi_extension.raise_error("This is an error")
+ except RuntimeError:
+ traceback.print_exc()
def run_int_pair() -> None:
"""Invoke IntPair from the extension to demonstrate object handling."""
+ print("=========== Example 3: IntPair ===========")
pair = my_ffi_extension.IntPair(1, 2)
print(f"first={pair.get_first()}")
print(f"second={my_ffi_extension.IntPair.static_get_second(pair)}")
if __name__ == "__main__":
- if len(sys.argv) > 1:
- if sys.argv[1] == "add_one":
- run_add_one()
- elif sys.argv[1] == "int_pair":
- run_int_pair()
- elif sys.argv[1] == "raise_error":
- run_raise_error()
- else:
- print("Usage: python run_example.py <add_one|int_pair|raise_error>")
+ run_add_one()
+ run_raise_error()
+ run_int_pair()
diff --git a/examples/quickstart/CMakeLists.txt
b/examples/quickstart/CMakeLists.txt
index ba2d7a4..eaf7659 100644
--- a/examples/quickstart/CMakeLists.txt
+++ b/examples/quickstart/CMakeLists.txt
@@ -22,6 +22,29 @@ option(EXAMPLE_NAME "Which example to build
('compile_cpu'/'compile_cuda'/'load_
)
message(STATUS "Building example: ${EXAMPLE_NAME}")
+# cmake-lint: disable=C0111
+function (set_flat_output_dirs target_name)
+ set(output_dir "${CMAKE_BINARY_DIR}")
+ # Ensure multi-config generators (e.g., MSVC) output to the same flat
directory.
+ foreach (config IN ITEMS "" DEBUG RELEASE RELWITHDEBINFO MINSIZEREL)
+ if (config STREQUAL "")
+ set_property(TARGET ${target_name} PROPERTY RUNTIME_OUTPUT_DIRECTORY
"${output_dir}")
+ set_property(TARGET ${target_name} PROPERTY LIBRARY_OUTPUT_DIRECTORY
"${output_dir}")
+ set_property(TARGET ${target_name} PROPERTY ARCHIVE_OUTPUT_DIRECTORY
"${output_dir}")
+ else ()
+ set_property(
+ TARGET ${target_name} PROPERTY RUNTIME_OUTPUT_DIRECTORY_${config}
"${output_dir}"
+ )
+ set_property(
+ TARGET ${target_name} PROPERTY LIBRARY_OUTPUT_DIRECTORY_${config}
"${output_dir}"
+ )
+ set_property(
+ TARGET ${target_name} PROPERTY ARCHIVE_OUTPUT_DIRECTORY_${config}
"${output_dir}"
+ )
+ endif ()
+ endforeach ()
+endfunction ()
+
# Run `tvm_ffi.config --cmakedir` to find tvm-ffi package
find_package(
Python
@@ -40,34 +63,21 @@ if (EXAMPLE_NAME STREQUAL "compile_cpu")
add_library(add_one_cpu SHARED compile/add_one_cpu.cc)
target_link_libraries(add_one_cpu PRIVATE tvm_ffi_header)
target_link_libraries(add_one_cpu PRIVATE tvm_ffi_shared)
- set_target_properties(
- add_one_cpu
- PROPERTIES LIBRARY_OUTPUT_DIRECTORY "${CMAKE_BINARY_DIR}/"
- PREFIX ""
- SUFFIX ".so"
- )
+ set_target_properties(add_one_cpu PROPERTIES PREFIX "" SUFFIX ".so")
+ set_flat_output_dirs(add_one_cpu)
elseif (EXAMPLE_NAME STREQUAL "compile_cuda")
# Example 2. CUDA `add_one`
enable_language(CUDA)
add_library(add_one_cuda SHARED compile/add_one_cuda.cu)
target_link_libraries(add_one_cuda PRIVATE tvm_ffi_shared)
- set_target_properties(
- add_one_cuda
- PROPERTIES LIBRARY_OUTPUT_DIRECTORY "${CMAKE_BINARY_DIR}/"
- PREFIX ""
- SUFFIX ".so"
- )
+ set_target_properties(add_one_cuda PROPERTIES PREFIX "" SUFFIX ".so")
+ set_flat_output_dirs(add_one_cuda)
elseif (EXAMPLE_NAME STREQUAL "load_cpp")
# Example 3. Load C++ shared library
add_executable(load_cpp load/load_cpp.cc)
target_link_libraries(load_cpp PRIVATE tvm_ffi_header)
target_link_libraries(load_cpp PRIVATE tvm_ffi_shared)
- set_target_properties(
- load_cpp
- PROPERTIES LIBRARY_OUTPUT_DIRECTORY "${CMAKE_BINARY_DIR}/"
- PREFIX ""
- SUFFIX ""
- )
+ set_flat_output_dirs(load_cpp)
else ()
message(FATAL_ERROR "Unknown EXAMPLE_NAME option: ${EXAMPLE_NAME}. "
"Expected: 'compile_cpu', 'compile_cuda', 'load_cpp'."
diff --git a/examples/quickstart/README.md b/examples/quickstart/README.md
index 4e515cc..1c23eca 100644
--- a/examples/quickstart/README.md
+++ b/examples/quickstart/README.md
@@ -19,7 +19,21 @@
This directory contains all the source code for
[tutorial](https://tvm.apache.org/ffi/get_started/quickstart.html).
-## Compile and Distribute `add_one_*`
+## Run everything (CPU path)
+
+On Linux/macOS:
+
+```bash
+bash run_all_cpu.sh
+```
+
+On Windows:
+
+```batch
+run_all_cpu.bat
+```
+
+## Compile and Distribute `add_one_*` manually
To compile the C++ Example:
@@ -28,7 +42,9 @@ cmake . -B build -DEXAMPLE_NAME="compile_cpu"
-DCMAKE_BUILD_TYPE=RelWithDebInfo
cmake --build build --config RelWithDebInfo
```
-To compile CUDA Example:
+This produces `build/add_one_cpu.so`.
+
+To compile CUDA Example (Linux with CUDA toolchain available):
```bash
cmake . -B build -DEXAMPLE_NAME="compile_cuda"
-DCMAKE_BUILD_TYPE=RelWithDebInfo
@@ -37,11 +53,10 @@ cmake --build build --config RelWithDebInfo
## Load the Distributed `add_one_*`
-To run library loading examples across ML frameworks:
+To run library loading examples across ML frameworks (requires CUDA for the
CUDA example):
```bash
python load/load_pytorch.py
-python load/load_jax.py
python load/load_numpy.py
python load/load_cupy.py
```
@@ -53,3 +68,11 @@ cmake . -B build -DEXAMPLE_NAME="load_cpp"
-DCMAKE_BUILD_TYPE=RelWithDebInfo
cmake --build build --config RelWithDebInfo
build/load_cpp
```
+
+The executable is emitted as `build/load_cpp` (`build/load_cpp.exe` on
Windows).
+
+For a CUDA end-to-end run, use:
+
+```bash
+bash run_all_cuda.sh
+```
diff --git a/examples/quickstart/run_all_cpu.bat
b/examples/quickstart/run_all_cpu.bat
new file mode 100644
index 0000000..5ce731c
--- /dev/null
+++ b/examples/quickstart/run_all_cpu.bat
@@ -0,0 +1,38 @@
+@echo off
+rem Licensed to the Apache Software Foundation (ASF) under one
+rem or more contributor license agreements. See the NOTICE file
+rem distributed with this work for additional information
+rem regarding copyright ownership. The ASF licenses this file
+rem to you under the Apache License, Version 2.0 (the
+rem "License"); you may not use this file except in compliance
+rem with the License. You may obtain a copy of the License at
+rem
+rem http://www.apache.org/licenses/LICENSE-2.0
+rem
+rem Unless required by applicable law or agreed to in writing,
+rem software distributed under the License is distributed on an
+rem "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+rem KIND, either express or implied. See the License for the
+rem specific language governing permissions and limitations
+rem under the License.
+
+setlocal enabledelayedexpansion
+cd /d "%~dp0"
+
+if exist build rmdir /s /q build
+
+rem To compile `compile/add_one_cpu.cc` to shared library
`build/add_one_cpu.so`
+cmake . -B build -DEXAMPLE_NAME="compile_cpu" -DCMAKE_BUILD_TYPE=RelWithDebInfo
+cmake --build build --config RelWithDebInfo
+
+rem To load and run `add_one_cpu.so` in NumPy
+python load\load_numpy.py
+
+rem To load and run `add_one_cpu.so` in C++
+for /f "delims=" %%i in ('python -c "from tvm_ffi import libinfo; import
pathlib; print(pathlib.Path(libinfo.find_libtvm_ffi()).parent)"') do set
"TVM_FFI_LIBDIR=%%i"
+set "PATH=%TVM_FFI_LIBDIR%;%PATH%"
+cmake . -B build -DEXAMPLE_NAME="load_cpp" -DCMAKE_BUILD_TYPE=RelWithDebInfo
+cmake --build build --config RelWithDebInfo
+build\load_cpp.exe
+
+endlocal
diff --git a/examples/stable_c_abi/CMakeLists.txt
b/examples/stable_c_abi/CMakeLists.txt
index 81bbd1d..a313034 100644
--- a/examples/stable_c_abi/CMakeLists.txt
+++ b/examples/stable_c_abi/CMakeLists.txt
@@ -20,6 +20,28 @@ project(tvm_ffi_example LANGUAGES C)
option(EXAMPLE_NAME "Which example to build ('kernel'/'load')" "kernel")
message(STATUS "Building example: ${EXAMPLE_NAME}")
+# cmake-lint: disable=C0111
+function (set_flat_output_dirs target_name)
+ set(output_dir "${CMAKE_BINARY_DIR}")
+ foreach (config IN ITEMS "" DEBUG RELEASE RELWITHDEBINFO MINSIZEREL)
+ if (config STREQUAL "")
+ set_property(TARGET ${target_name} PROPERTY RUNTIME_OUTPUT_DIRECTORY
"${output_dir}")
+ set_property(TARGET ${target_name} PROPERTY LIBRARY_OUTPUT_DIRECTORY
"${output_dir}")
+ set_property(TARGET ${target_name} PROPERTY ARCHIVE_OUTPUT_DIRECTORY
"${output_dir}")
+ else ()
+ set_property(
+ TARGET ${target_name} PROPERTY RUNTIME_OUTPUT_DIRECTORY_${config}
"${output_dir}"
+ )
+ set_property(
+ TARGET ${target_name} PROPERTY LIBRARY_OUTPUT_DIRECTORY_${config}
"${output_dir}"
+ )
+ set_property(
+ TARGET ${target_name} PROPERTY ARCHIVE_OUTPUT_DIRECTORY_${config}
"${output_dir}"
+ )
+ endif ()
+ endforeach ()
+endfunction ()
+
# Run `tvm_ffi.config --cmakedir` to find tvm-ffi package
find_package(
Python
@@ -47,6 +69,7 @@ if (EXAMPLE_NAME STREQUAL "kernel")
C_STANDARD_REQUIRED YES
C_EXTENSIONS NO
)
+ set_flat_output_dirs(add_one_cpu)
elseif (EXAMPLE_NAME STREQUAL "load")
# Example 2. Load `add_one_cpu` shared library in C
add_executable(load src/load.c)
@@ -56,11 +79,11 @@ elseif (EXAMPLE_NAME STREQUAL "load")
load
PROPERTIES LIBRARY_OUTPUT_DIRECTORY "${CMAKE_BINARY_DIR}/"
PREFIX ""
- SUFFIX ""
C_STANDARD 11
C_STANDARD_REQUIRED YES
C_EXTENSIONS NO
)
+ set_flat_output_dirs(load)
else ()
message(FATAL_ERROR "Unknown EXAMPLE_NAME option: ${EXAMPLE_NAME}. "
"Expected: 'kernel' or 'load'."
diff --git a/examples/stable_c_abi/README.md b/examples/stable_c_abi/README.md
index 3a93a7d..494c7f1 100644
--- a/examples/stable_c_abi/README.md
+++ b/examples/stable_c_abi/README.md
@@ -19,7 +19,21 @@
This directory contains all the source code for
[tutorial](https://tvm.apache.org/ffi/get_started/stable_c_abi.html).
-## Compile and Distribute `add_one_cpu`
+## Run everything
+
+On Linux/macOS:
+
+```bash
+bash run_all.sh
+```
+
+On Windows:
+
+```batch
+run_all.bat
+```
+
+## Compile and Distribute `add_one_cpu` manually
To compile the C Example:
@@ -28,7 +42,9 @@ cmake . -B build -DEXAMPLE_NAME="kernel"
-DCMAKE_BUILD_TYPE=RelWithDebInfo
cmake --build build --config RelWithDebInfo
```
-## Load the Distributed `add_one_cpu`
+This produces `build/add_one_cpu.so`.
+
+## Load the Distributed `add_one_cpu` manually
To run library loading example in C:
@@ -37,3 +53,5 @@ cmake . -B build -DEXAMPLE_NAME="load"
-DCMAKE_BUILD_TYPE=RelWithDebInfo
cmake --build build --config RelWithDebInfo
build/load
```
+
+The executable is emitted as `build/load` (`build/load.exe` on Windows).
diff --git a/examples/stable_c_abi/run_all.bat
b/examples/stable_c_abi/run_all.bat
new file mode 100644
index 0000000..4a15f54
--- /dev/null
+++ b/examples/stable_c_abi/run_all.bat
@@ -0,0 +1,35 @@
+@echo off
+rem Licensed to the Apache Software Foundation (ASF) under one
+rem or more contributor license agreements. See the NOTICE file
+rem distributed with this work for additional information
+rem regarding copyright ownership. The ASF licenses this file
+rem to you under the Apache License, Version 2.0 (the
+rem "License"); you may not use this file except in compliance
+rem with the License. You may obtain a copy of the License at
+rem
+rem http://www.apache.org/licenses/LICENSE-2.0
+rem
+rem Unless required by applicable law or agreed to in writing,
+rem software distributed under the License is distributed on an
+rem "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+rem KIND, either express or implied. See the License for the
+rem specific language governing permissions and limitations
+rem under the License.
+
+setlocal enabledelayedexpansion
+cd /d "%~dp0"
+
+if exist build rmdir /s /q build
+
+rem To compile `src/add_one_cpu.c` to shared library `build/add_one_cpu.so`
+cmake . -B build -DEXAMPLE_NAME="kernel" -DCMAKE_BUILD_TYPE=RelWithDebInfo
+cmake --build build --config RelWithDebInfo
+
+rem To compile `src/load.c` to executable `build/load`
+for /f "delims=" %%i in ('python -c "from tvm_ffi import libinfo; import
pathlib; print(pathlib.Path(libinfo.find_libtvm_ffi()).parent)"') do set
"TVM_FFI_LIBDIR=%%i"
+set "PATH=%TVM_FFI_LIBDIR%;%PATH%"
+cmake . -B build -DEXAMPLE_NAME="load" -DCMAKE_BUILD_TYPE=RelWithDebInfo
+cmake --build build --config RelWithDebInfo
+build\load.exe
+
+endlocal
diff --git a/examples/stable_c_abi/src/add_one_cpu.c
b/examples/stable_c_abi/src/add_one_cpu.c
index f273014..9a6cc27 100644
--- a/examples/stable_c_abi/src/add_one_cpu.c
+++ b/examples/stable_c_abi/src/add_one_cpu.c
@@ -24,8 +24,8 @@
// clang-format off
// [example.begin]
// File: src/add_one_cpu.cc
-TVM_FFI_DLL int __tvm_ffi_add_one_cpu(void* handle, const TVMFFIAny* args,
int32_t num_args,
- TVMFFIAny* result) {
+TVM_FFI_DLL_EXPORT int __tvm_ffi_add_one_cpu(void* handle, const TVMFFIAny*
args,
+ int32_t num_args, TVMFFIAny*
result) {
// Step 1. Extract inputs from `Any`
// Step 1.1. Extract `x := args[0]`
DLTensor* x;