This is an automated email from the ASF dual-hosted git repository.

ruihangl pushed a commit to branch main
in repository https://gitbox.apache.org/repos/asf/tvm.git


The following commit(s) were added to refs/heads/main by this push:
     new 00ae64744e [FFI][ABI] Refactor the naming of DLPack speed converter 
(#18308)
00ae64744e is described below

commit 00ae64744ef4657766c5bd1f5763a7e5830e08e2
Author: Tianqi Chen <[email protected]>
AuthorDate: Fri Sep 12 19:55:05 2025 -0400

    [FFI][ABI] Refactor the naming of DLPack speed converter (#18308)
    
    Update the name to avoid potential confusion
---
 ffi/pyproject.toml                                 |  2 +-
 ffi/python/tvm_ffi/_optional_torch_c_dlpack.py     | 22 +++++++------
 ffi/python/tvm_ffi/cython/base.pxi                 | 12 ++++----
 ffi/python/tvm_ffi/cython/function.pxi             | 36 +++++++++++-----------
 ffi/python/tvm_ffi/cython/tensor.pxi               | 18 +++++------
 ffi/python/tvm_ffi/cython/tvm_ffi_python_helpers.h | 19 ++++++------
 6 files changed, 55 insertions(+), 54 deletions(-)

diff --git a/ffi/pyproject.toml b/ffi/pyproject.toml
index 11e65a9065..8c146f41c4 100644
--- a/ffi/pyproject.toml
+++ b/ffi/pyproject.toml
@@ -17,7 +17,7 @@
 
 [project]
 name = "apache-tvm-ffi"
-version = "0.1.0a11"
+version = "0.1.0a12"
 description = "tvm ffi"
 
 authors = [{ name = "TVM FFI team" }]
diff --git a/ffi/python/tvm_ffi/_optional_torch_c_dlpack.py 
b/ffi/python/tvm_ffi/_optional_torch_c_dlpack.py
index f4af393025..fc5851af17 100644
--- a/ffi/python/tvm_ffi/_optional_torch_c_dlpack.py
+++ b/ffi/python/tvm_ffi/_optional_torch_c_dlpack.py
@@ -117,9 +117,11 @@ DLDataType getDLDataTypeForDLPackv1(const Tensor& t) {
     case ScalarType::Float8_e8m0fnu:
       dtype.code = DLDataTypeCode::kDLFloat8_e8m0fnu;
       break;
+#if TORCH_VERSION_MAJOR >= 2 && TORCH_VERSION_MINOR >= 8
     case ScalarType::Float4_e2m1fn_x2:
       dtype.code = DLDataTypeCode::kDLFloat4_e2m1fn;
       break;
+#endif
    default:
       TORCH_CHECK(false, "Unsupported scalar type: ");
   }
@@ -311,7 +313,7 @@ at::Tensor fromDLPackImpl(T* src, 
std::function<void(void*)> deleter) {
 } // namespace
 } // namespace at
 
-int TorchDLPackPyObjectExporter(void* py_obj, DLManagedTensorVersioned** out, 
void** env_stream) {
+int TorchDLPackFromPyObject(void* py_obj, DLManagedTensorVersioned** out, 
void** env_stream) {
   try {
     py::handle handle(static_cast<PyObject*>(py_obj));
     at::Tensor tensor = handle.cast<at::Tensor>();
@@ -326,7 +328,7 @@ int TorchDLPackPyObjectExporter(void* py_obj, 
DLManagedTensorVersioned** out, vo
   }
 }
 
-int TorchDLPackPyObjectImporter(DLManagedTensorVersioned* src, void** 
py_obj_out) {
+int TorchDLPackToPyObject(DLManagedTensorVersioned* src, void** py_obj_out) {
   try {
     at::Tensor tensor = at::fromDLPackImpl<DLManagedTensorVersioned>(src, 
nullptr);
     *py_obj_out = THPVariable_Wrap(tensor);
@@ -355,12 +357,12 @@ int TorchDLPackTensorAllocator(
   }
 }
 
-int64_t TorchDLPackPyObjectExporterPtr() {
-  return reinterpret_cast<int64_t>(TorchDLPackPyObjectExporter);
+int64_t TorchDLPackFromPyObjectPtr() {
+  return reinterpret_cast<int64_t>(TorchDLPackFromPyObject);
 }
 
-int64_t TorchDLPackPyObjectImporterPtr() {
-  return reinterpret_cast<int64_t>(TorchDLPackPyObjectImporter);
+int64_t TorchDLPackToPyObjectPtr() {
+  return reinterpret_cast<int64_t>(TorchDLPackToPyObject);
 }
 
 int64_t TorchDLPackTensorAllocatorPtr() {
@@ -376,8 +378,8 @@ int64_t TorchDLPackTensorAllocatorPtr() {
             name="to_dlpack",
             cpp_sources=cpp_source,
             functions=[
-                "TorchDLPackPyObjectExporterPtr",
-                "TorchDLPackPyObjectImporterPtr",
+                "TorchDLPackFromPyObjectPtr",
+                "TorchDLPackToPyObjectPtr",
                 "TorchDLPackTensorAllocatorPtr",
             ],
             extra_cflags=["-O3"],
@@ -385,8 +387,8 @@ int64_t TorchDLPackTensorAllocatorPtr() {
             verbose=True,
         )
         # set the dlpack related flags
-        torch.Tensor.__c_dlpack_exporter__ = 
mod.TorchDLPackPyObjectExporterPtr()
-        torch.Tensor.__c_dlpack_importer__ = 
mod.TorchDLPackPyObjectImporterPtr()
+        torch.Tensor.__c_dlpack_from_pyobject__ = 
mod.TorchDLPackFromPyObjectPtr()
+        torch.Tensor.__c_dlpack_to_pyobject__ = mod.TorchDLPackToPyObjectPtr()
         torch.Tensor.__c_dlpack_tensor_allocator__ = 
mod.TorchDLPackTensorAllocatorPtr()
         return mod
     except ImportError:
diff --git a/ffi/python/tvm_ffi/cython/base.pxi 
b/ffi/python/tvm_ffi/cython/base.pxi
index a1de1de1cd..fdb06f5105 100644
--- a/ffi/python/tvm_ffi/cython/base.pxi
+++ b/ffi/python/tvm_ffi/cython/base.pxi
@@ -247,11 +247,11 @@ cdef extern from "tvm/ffi/extra/c_env_api.h":
 cdef extern from "tvm_ffi_python_helpers.h":
     # no need to expose fields of the call context
      # setter data structure
-    ctypedef int (*DLPackPyObjectExporter)(
+    ctypedef int (*DLPackFromPyObject)(
         void* py_obj, DLManagedTensorVersioned** out, TVMFFIStreamHandle* 
env_stream
     ) except -1
 
-    ctypedef int (*DLPackPyObjectImporter)(
+    ctypedef int (*DLPackToPyObject)(
         DLManagedTensorVersioned* tensor, void** py_obj_out
     ) except -1
     ctypedef int (*DLPackTensorAllocator)(
@@ -263,13 +263,13 @@ cdef extern from "tvm_ffi_python_helpers.h":
         int device_type
         int device_id
         TVMFFIStreamHandle stream
-        DLPackPyObjectImporter c_dlpack_importer
+        DLPackToPyObject c_dlpack_to_pyobject
         DLPackTensorAllocator c_dlpack_tensor_allocator
 
     ctypedef struct TVMFFIPyArgSetter:
         int (*func)(TVMFFIPyArgSetter* handle, TVMFFIPyCallContext* ctx,  
PyObject* py_arg, TVMFFIAny* out) except -1
-        DLPackPyObjectExporter c_dlpack_exporter
-        DLPackPyObjectImporter c_dlpack_importer
+        DLPackFromPyObject c_dlpack_from_pyobject
+        DLPackToPyObject c_dlpack_to_pyobject
         DLPackTensorAllocator c_dlpack_tensor_allocator
 
     ctypedef int (*TVMFFIPyArgSetterFactory)(PyObject* value, 
TVMFFIPyArgSetter* out) except -1
@@ -281,7 +281,7 @@ cdef extern from "tvm_ffi_python_helpers.h":
         TVMFFIAny* result,
         int* c_api_ret_code,
         int release_gil,
-        DLPackPyObjectImporter* out_dlpack_importer
+        DLPackToPyObject* out_dlpack_importer
     ) except -1
 
     int TVMFFIPyCallFieldSetter(
diff --git a/ffi/python/tvm_ffi/cython/function.pxi 
b/ffi/python/tvm_ffi/cython/function.pxi
index bd486c5f77..9b86054b71 100644
--- a/ffi/python/tvm_ffi/cython/function.pxi
+++ b/ffi/python/tvm_ffi/cython/function.pxi
@@ -47,13 +47,13 @@ cdef inline object make_ret_small_bytes(TVMFFIAny result):
     return PyBytes_FromStringAndSize(bytes.data, bytes.size)
 
 
-cdef inline object make_ret(TVMFFIAny result, DLPackPyObjectImporter 
c_dlpack_importer = NULL):
+cdef inline object make_ret(TVMFFIAny result, DLPackToPyObject 
c_dlpack_to_pyobject = NULL):
     """convert result to return value."""
     cdef int32_t type_index
     type_index = result.type_index
     if type_index == kTVMFFITensor:
         # specially handle Tensor as it needs a special dltensor field
-        return make_tensor_from_any(result, c_dlpack_importer)
+        return make_tensor_from_any(result, c_dlpack_to_pyobject)
     elif type_index == kTVMFFIOpaquePyObject:
         return make_ret_opaque_object(result)
     elif type_index >= kTVMFFIStaticObjectBegin:
@@ -121,18 +121,18 @@ cdef int TVMFFIPyArgSetterDLPackCExporter_(
     cdef TVMFFIObjectHandle temp_chandle
     cdef TVMFFIStreamHandle env_stream = NULL
 
-    if this.c_dlpack_importer != NULL:
-        ctx.c_dlpack_importer = this.c_dlpack_importer
+    if this.c_dlpack_to_pyobject != NULL:
+        ctx.c_dlpack_to_pyobject = this.c_dlpack_to_pyobject
     if this.c_dlpack_tensor_allocator != NULL:
         ctx.c_dlpack_tensor_allocator = this.c_dlpack_tensor_allocator
 
     if ctx.device_id != -1:
         # already queried device, do not do it again, pass NULL to stream
-        if (this.c_dlpack_exporter)(arg, &temp_managed_tensor, NULL) != 0:
+        if (this.c_dlpack_from_pyobject)(arg, &temp_managed_tensor, NULL) != 0:
             return -1
     else:
         # query string on the envrionment stream
-        if (this.c_dlpack_exporter)(arg, &temp_managed_tensor, &env_stream) != 
0:
+        if (this.c_dlpack_from_pyobject)(arg, &temp_managed_tensor, 
&env_stream) != 0:
             return -1
         # If device is not CPU, we should set the device type and id
         if temp_managed_tensor.dl_tensor.device.device_type != kDLCPU:
@@ -148,7 +148,7 @@ cdef int TVMFFIPyArgSetterDLPackCExporter_(
     return 0
 
 
-cdef int TorchDLPackPyObjectImporterFallback_(
+cdef int TorchDLPackToPyObjectFallback_(
     DLManagedTensorVersioned* dltensor, void** py_obj_out
 ) except -1:
     # a bit convoluted but ok as a fallback
@@ -173,7 +173,7 @@ cdef int TVMFFIPyArgSetterTorchFallback_(
     out.type_index = kTVMFFITensor
     out.v_ptr = (<Tensor>arg).chandle
     temp_dltensor = TVMFFITensorGetDLTensorPtr((<Tensor>arg).chandle)
-    ctx.c_dlpack_importer = TorchDLPackPyObjectImporterFallback_
+    ctx.c_dlpack_to_pyobject = TorchDLPackToPyObjectFallback_
     # record the stream and device for torch context
     if is_cuda and ctx.device_type != -1:
         ctx.device_type = temp_dltensor.device.device_type
@@ -370,15 +370,15 @@ cdef int TVMFFIPyArgSetterFactory_(PyObject* value, 
TVMFFIPyArgSetter* out) exce
     if isinstance(arg, ObjectRValueRef):
         out.func = TVMFFIPyArgSetterObjectRValueRef_
         return 0
-    if os.environ.get("TVM_FFI_SKIP_C_DLPACK_EXPORTER", "0") != "1":
+    if os.environ.get("TVM_FFI_SKIP_c_dlpack_from_pyobject", "0") != "1":
         # external tensors
-        if hasattr(arg, "__c_dlpack_exporter__"):
+        if hasattr(arg, "__c_dlpack_from_pyobject__"):
             out.func = TVMFFIPyArgSetterDLPackCExporter_
-            temp_ptr = arg.__c_dlpack_exporter__
-            out.c_dlpack_exporter = <DLPackPyObjectExporter>temp_ptr
-            if hasattr(arg, "__c_dlpack_importer__"):
-                temp_ptr = arg.__c_dlpack_importer__
-                out.c_dlpack_importer = <DLPackPyObjectImporter>temp_ptr
+            temp_ptr = arg.__c_dlpack_from_pyobject__
+            out.c_dlpack_from_pyobject = <DLPackFromPyObject>temp_ptr
+            if hasattr(arg, "__c_dlpack_to_pyobject__"):
+                temp_ptr = arg.__c_dlpack_to_pyobject__
+                out.c_dlpack_to_pyobject = <DLPackToPyObject>temp_ptr
             if hasattr(arg, "__c_dlpack_tensor_allocator__"):
                 temp_ptr = arg.__c_dlpack_tensor_allocator__
                 out.c_dlpack_tensor_allocator = <DLPackTensorAllocator>temp_ptr
@@ -470,7 +470,7 @@ cdef class Function(Object):
     def __call__(self, *args):
         cdef TVMFFIAny result
         cdef int c_api_ret_code
-        cdef DLPackPyObjectImporter c_dlpack_importer = NULL
+        cdef DLPackToPyObject c_dlpack_to_pyobject = NULL
         # IMPORTANT: caller need to initialize result->type_index to 
kTVMFFINone
         result.type_index = kTVMFFINone
         result.v_int64 = 0
@@ -480,12 +480,12 @@ cdef class Function(Object):
             &result,
             &c_api_ret_code,
             self.release_gil,
-            &c_dlpack_importer
+            &c_dlpack_to_pyobject
         )
         # NOTE: logic is same as check_call
         # directly inline here to simplify traceback
         if c_api_ret_code == 0:
-            return make_ret(result, c_dlpack_importer)
+            return make_ret(result, c_dlpack_to_pyobject)
         elif c_api_ret_code == -2:
             raise_existing_error()
         raise move_from_last_error().py_error()
diff --git a/ffi/python/tvm_ffi/cython/tensor.pxi 
b/ffi/python/tvm_ffi/cython/tensor.pxi
index 2fd80bc1a6..1255f0b0c3 100644
--- a/ffi/python/tvm_ffi/cython/tensor.pxi
+++ b/ffi/python/tvm_ffi/cython/tensor.pxi
@@ -275,7 +275,7 @@ _set_class_tensor(Tensor)
 _register_object_by_index(kTVMFFITensor, Tensor)
 
 
-cdef int _dltensor_test_wrapper_c_dlpack_exporter(
+cdef int _dltensor_test_wrapper_c_dlpack_from_pyobject(
     void* obj, DLManagedTensorVersioned** out, TVMFFIStreamHandle* env_stream
 ) except -1:
     cdef PyObject* py_obj = <PyObject*>obj
@@ -291,8 +291,8 @@ cdef int _dltensor_test_wrapper_c_dlpack_exporter(
     return TVMFFITensorToDLPackVersioned(wrapper.tensor.chandle, out)
 
 
-def _dltensor_test_wrapper_c_dlpack_exporter_as_intptr():
-    cdef DLPackPyObjectExporter converter_func = 
_dltensor_test_wrapper_c_dlpack_exporter
+def _dltensor_test_wrapper_c_dlpack_from_pyobject_as_intptr():
+    cdef DLPackFromPyObject converter_func = 
_dltensor_test_wrapper_c_dlpack_from_pyobject
     cdef void* temp_ptr = <void*>converter_func
     cdef long long temp_int_ptr = <long long>temp_ptr
     return temp_int_ptr
@@ -301,7 +301,7 @@ def _dltensor_test_wrapper_c_dlpack_exporter_as_intptr():
 cdef class DLTensorTestWrapper:
     """Wrapper of a Tensor that exposes DLPack protocol, only for testing 
purpose.
     """
-    __c_dlpack_exporter__ = 
_dltensor_test_wrapper_c_dlpack_exporter_as_intptr()
+    __c_dlpack_from_pyobject__ = 
_dltensor_test_wrapper_c_dlpack_from_pyobject_as_intptr()
 
     cdef Tensor tensor
     cdef dict __dict__
@@ -333,19 +333,19 @@ cdef inline object make_ret_dltensor(TVMFFIAny result):
     return tensor
 
 
-cdef inline object make_tensor_from_chandle(TVMFFIObjectHandle chandle, 
DLPackPyObjectImporter c_dlpack_importer = NULL):
+cdef inline object make_tensor_from_chandle(TVMFFIObjectHandle chandle, 
DLPackToPyObject c_dlpack_to_pyobject = NULL):
     # TODO: Implement
     cdef Tensor tensor
     cdef void* py_obj
     cdef DLManagedTensorVersioned* dlpack
 
-    if c_dlpack_importer != NULL:
+    if c_dlpack_to_pyobject != NULL:
         # try convert and import into the environment array if possible
         if TVMFFITensorToDLPackVersioned(chandle, &dlpack) == 0:
             try:
                 # note that py_obj already holds an extra reference to the 
tensor
                 # so we need to decref it after the conversion
-                c_dlpack_importer(dlpack, &py_obj)
+                c_dlpack_to_pyobject(dlpack, &py_obj)
                 tensor = <Tensor>(<PyObject*>py_obj)
                 Py_DECREF(tensor)
                 return tensor
@@ -358,5 +358,5 @@ cdef inline object 
make_tensor_from_chandle(TVMFFIObjectHandle chandle, DLPackPy
     return tensor
 
 
-cdef inline object make_tensor_from_any(TVMFFIAny any, DLPackPyObjectImporter 
c_dlpack_importer):
-    return make_tensor_from_chandle(any.v_ptr, c_dlpack_importer)
+cdef inline object make_tensor_from_any(TVMFFIAny any, DLPackToPyObject 
c_dlpack_to_pyobject):
+    return make_tensor_from_chandle(any.v_ptr, c_dlpack_to_pyobject)
diff --git a/ffi/python/tvm_ffi/cython/tvm_ffi_python_helpers.h 
b/ffi/python/tvm_ffi/cython/tvm_ffi_python_helpers.h
index c7d847b857..87b426829d 100644
--- a/ffi/python/tvm_ffi/cython/tvm_ffi_python_helpers.h
+++ b/ffi/python/tvm_ffi/cython/tvm_ffi_python_helpers.h
@@ -44,8 +44,7 @@
  * \note We use void* to avoid dependency on Python.h so this specific type is
  *       not dependent on Python.h and can be copied to dlpack.h
  */
-typedef int (*DLPackPyObjectExporter)(void* py_obj, DLManagedTensorVersioned** 
out,
-                                      void** env_stream);
+typedef int (*DLPackFromPyObject)(void* py_obj, DLManagedTensorVersioned** 
out, void** env_stream);
 /*!
  * \brief C-style function pointer to speed convert a DLManagedTensorVersioned 
to a PyObject Tensor.
  * \param tensor The DLManagedTensorVersioned to convert.
@@ -54,7 +53,7 @@ typedef int (*DLPackPyObjectExporter)(void* py_obj, 
DLManagedTensorVersioned** o
  * \note We use void* to avoid dependency on Python.h so this specific type is
  *       not dependent on Python.h and can be copied to dlpack.h
  */
-typedef int (*DLPackPyObjectImporter)(DLManagedTensorVersioned* tensor, void** 
py_obj_out);
+typedef int (*DLPackToPyObject)(DLManagedTensorVersioned* tensor, void** 
py_obj_out);
 
 
///--------------------------------------------------------------------------------
 /// We deliberately designed the data structure and function to be C-style
@@ -82,7 +81,7 @@ struct TVMFFIPyCallContext {
   /*! \brief the number of temporary arguments */
   int num_temp_py_objects = 0;
   /*! \brief the DLPack exporter, if any */
-  DLPackPyObjectImporter c_dlpack_importer{nullptr};
+  DLPackToPyObject c_dlpack_to_pyobject{nullptr};
   /*! \brief the DLPack allocator, if any */
   DLPackTensorAllocator c_dlpack_tensor_allocator{nullptr};
 };
@@ -102,11 +101,11 @@ struct TVMFFIPyArgSetter {
   /*!
    * \brief Optional DLPack exporter for for setters that leverages DLPack 
protocol.
    */
-  DLPackPyObjectExporter c_dlpack_exporter{nullptr};
+  DLPackFromPyObject c_dlpack_from_pyobject{nullptr};
   /*!
    * \brief Optional DLPack importer for for setters that leverages DLPack 
protocol.
    */
-  DLPackPyObjectImporter c_dlpack_importer{nullptr};
+  DLPackToPyObject c_dlpack_to_pyobject{nullptr};
   /*!
    * \brief Optional DLPack allocator for for setters that leverages DLPack 
protocol.
    */
@@ -273,7 +272,7 @@ class TVMFFIPyCallManager {
    */
   int Call(TVMFFIPyArgSetterFactory setter_factory, void* func_handle, 
PyObject* py_arg_tuple,
            TVMFFIAny* result, int* c_api_ret_code, bool release_gil,
-           DLPackPyObjectImporter* optional_out_dlpack_importer) {
+           DLPackToPyObject* optional_out_dlpack_importer) {
     int64_t num_args = PyTuple_Size(py_arg_tuple);
     if (num_args == -1) return -1;
     try {
@@ -321,8 +320,8 @@ class TVMFFIPyCallManager {
         c_api_ret_code[0] = TVMFFIEnvSetTensorAllocator(prev_tensor_allocator, 
0, nullptr);
         if (c_api_ret_code[0] != 0) return 0;
       }
-      if (optional_out_dlpack_importer != nullptr && ctx.c_dlpack_importer != 
nullptr) {
-        *optional_out_dlpack_importer = ctx.c_dlpack_importer;
+      if (optional_out_dlpack_importer != nullptr && ctx.c_dlpack_to_pyobject 
!= nullptr) {
+        *optional_out_dlpack_importer = ctx.c_dlpack_to_pyobject;
       }
       return 0;
     } catch (const std::exception& ex) {
@@ -430,7 +429,7 @@ class TVMFFIPyCallManager {
 inline int TVMFFIPyFuncCall(TVMFFIPyArgSetterFactory setter_factory, void* 
func_handle,
                             PyObject* py_arg_tuple, TVMFFIAny* result, int* 
c_api_ret_code,
                             bool release_gil = true,
-                            DLPackPyObjectImporter* out_dlpack_importer = 
nullptr) {
+                            DLPackToPyObject* out_dlpack_importer = nullptr) {
   return TVMFFIPyCallManager::ThreadLocal()->Call(setter_factory, func_handle, 
py_arg_tuple, result,
                                                   c_api_ret_code, release_gil, 
out_dlpack_importer);
 }

Reply via email to