This is an automated email from the ASF dual-hosted git repository.

tqchen pushed a commit to branch main
in repository https://gitbox.apache.org/repos/asf/tvm-ffi.git


The following commit(s) were added to refs/heads/main by this push:
     new 9829dec  chore: cleanup old DLPack related defs (#129)
9829dec is described below

commit 9829dec97e4c3f394955c0b4d90102356b52d285
Author: Tianqi Chen <[email protected]>
AuthorDate: Wed Oct 15 12:24:37 2025 -0400

    chore: cleanup old DLPack related defs (#129)
    
    They are no longer needed as we migrate to the latest version.
---
 include/tvm/ffi/c_api.h                        | 18 ------------------
 include/tvm/ffi/container/tensor.h             |  4 ++--
 include/tvm/ffi/extra/c_env_api.h              |  8 ++++----
 python/tvm_ffi/cython/base.pxi                 | 12 ------------
 python/tvm_ffi/cython/tvm_ffi_python_helpers.h | 25 +------------------------
 rust/tvm-ffi-macros/src/utils.rs               |  2 +-
 rust/tvm-ffi-sys/src/c_env_api.rs              |  8 ++++----
 src/ffi/extra/env_context.cc                   | 25 +++++++++++++------------
 tests/cpp/test_tensor.cc                       | 21 +++++++++++----------
 9 files changed, 36 insertions(+), 87 deletions(-)

diff --git a/include/tvm/ffi/c_api.h b/include/tvm/ffi/c_api.h
index 88d91f8..ef2f70e 100644
--- a/include/tvm/ffi/c_api.h
+++ b/include/tvm/ffi/c_api.h
@@ -60,24 +60,6 @@
 extern "C" {
 #endif
 
-// TODO(tqchen): remove this once dlpack.h is updated
-typedef struct DLManagedTensorVersioned DLManagedTensorVersioned;
-
-/*
- * \brief C-style Allocator that allocates memory for a DLPack tensor.
- * \param prototype The prototype DLTensor to offer details about device and 
shape.
- * \param out The output DLManagedTensorVersioned.
- * \param error_ctx The context to set the error.
- * \param SetError The function to set the error.
- * \return 0 on success, -1 on failure.
- *         call SetError(error_ctx, kind, message) to set the error kind and 
message.
- * \note Error propagation via SetError.
- */
-typedef int (*DLPackTensorAllocator)(                                         
//
-    DLTensor* prototype, DLManagedTensorVersioned** out, void* error_ctx,     
//
-    void (*SetError)(void* error_ctx, const char* kind, const char* message)  
//
-);
-
 #ifdef __cplusplus
 enum TVMFFITypeIndex : int32_t {
 #else
diff --git a/include/tvm/ffi/container/tensor.h 
b/include/tvm/ffi/container/tensor.h
index d99a79a..00cc402 100644
--- a/include/tvm/ffi/container/tensor.h
+++ b/include/tvm/ffi/container/tensor.h
@@ -359,7 +359,7 @@ class Tensor : public ObjectRef {
         std::forward<ExtraArgs>(extra_args)...));
   }
   /*!
-   * \brief Create a Tensor from a DLPackTensorAllocator
+   * \brief Create a Tensor from a DLPackManagedTensorAllocator
    *
    * This function can be used together with TVMFFIEnvSetTensorAllocator
    * in the extra/c_env_api.h to create Tensor from the thread-local
@@ -378,7 +378,7 @@ class Tensor : public ObjectRef {
    * \param device The device of the Tensor.
    * \return The created Tensor.
    */
-  static Tensor FromDLPackAlloc(DLPackTensorAllocator allocator, 
ffi::ShapeView shape,
+  static Tensor FromDLPackAlloc(DLPackManagedTensorAllocator allocator, 
ffi::ShapeView shape,
                                 DLDataType dtype, DLDevice device) {
     if (allocator == nullptr) {
       TVM_FFI_THROW(RuntimeError)
diff --git a/include/tvm/ffi/extra/c_env_api.h 
b/include/tvm/ffi/extra/c_env_api.h
index 25291be..8276825 100644
--- a/include/tvm/ffi/extra/c_env_api.h
+++ b/include/tvm/ffi/extra/c_env_api.h
@@ -70,9 +70,9 @@ TVM_FFI_DLL TVMFFIStreamHandle TVMFFIEnvGetStream(int32_t 
device_type, int32_t d
  * \param opt_out_original_allocator Output original TLS allocator if the 
address is not nullptr.
  * \return 0 when success, nonzero when failure happens
  */
-TVM_FFI_DLL int TVMFFIEnvSetTensorAllocator(DLPackTensorAllocator allocator,
-                                            int write_to_global_context,
-                                            DLPackTensorAllocator* 
opt_out_original_allocator);
+TVM_FFI_DLL int TVMFFIEnvSetTensorAllocator(
+    DLPackManagedTensorAllocator allocator, int write_to_global_context,
+    DLPackManagedTensorAllocator* opt_out_original_allocator);
 
 /*!
  * \brief FFI function get the current DLPack allocator stored in context.
@@ -82,7 +82,7 @@ TVM_FFI_DLL int 
TVMFFIEnvSetTensorAllocator(DLPackTensorAllocator allocator,
  *
  * \return The current DLPack allocator.
  */
-TVM_FFI_DLL DLPackTensorAllocator TVMFFIEnvGetTensorAllocator();
+TVM_FFI_DLL DLPackManagedTensorAllocator TVMFFIEnvGetTensorAllocator();
 
 /*!
  * \brief Check if there are any signals raised in the surrounding env.
diff --git a/python/tvm_ffi/cython/base.pxi b/python/tvm_ffi/cython/base.pxi
index d7decda..1106901 100644
--- a/python/tvm_ffi/cython/base.pxi
+++ b/python/tvm_ffi/cython/base.pxi
@@ -313,18 +313,6 @@ def _env_get_current_stream(int device_type, int 
device_id):
 
 cdef extern from "tvm_ffi_python_helpers.h":
     # no need to expose fields of the call context setter data structure
-    ctypedef int (*DLPackFromPyObject)(
-        void* py_obj, DLManagedTensorVersioned** out, TVMFFIStreamHandle* 
env_stream
-    ) except -1
-
-    ctypedef int (*DLPackToPyObject)(
-        DLManagedTensorVersioned* tensor, void** py_obj_out
-    ) except -1
-    ctypedef int (*DLPackTensorAllocator)(
-        DLTensor* prototype, DLManagedTensorVersioned** out, void* error_ctx,
-        void (*SetError)(void* error_ctx, const char* kind, const char* 
message)
-    ) except -1
-
     ctypedef struct TVMFFIPyCallContext:
         int device_type
         int device_id
diff --git a/python/tvm_ffi/cython/tvm_ffi_python_helpers.h 
b/python/tvm_ffi/cython/tvm_ffi_python_helpers.h
index da2404b..bfd5d60 100644
--- a/python/tvm_ffi/cython/tvm_ffi_python_helpers.h
+++ b/python/tvm_ffi/cython/tvm_ffi_python_helpers.h
@@ -41,29 +41,6 @@
 #include <iostream>
 #include <unordered_map>
 
-//----------------------------------------------------------
-// Extra support for DLPack
-//----------------------------------------------------------
-/*!
- * \brief C-style function pointer to speed convert a PyObject Tensor to a 
DLManagedTensorVersioned.
- * \param py_obj The Python object to convert, this should be PyObject*
- * \param out The output DLManagedTensorVersioned.
- * \param env_stream Outputs the current context stream of the device provided 
by the tensor.
- * \return 0 on success, -1 on failure. PyError should be set if -1 is 
returned.
- * \note We use void* to avoid dependency on Python.h so this specific type is
- *       not dependent on Python.h and can be copied to dlpack.h
- */
-typedef int (*DLPackFromPyObject)(void* py_obj, DLManagedTensorVersioned** 
out, void** env_stream);
-/*!
- * \brief C-style function pointer to speed convert a DLManagedTensorVersioned 
to a PyObject Tensor.
- * \param tensor The DLManagedTensorVersioned to convert.
- * \param py_obj_out The output Python object.
- * \return 0 on success, -1 on failure. PyError should be set if -1 is 
returned.
- * \note We use void* to avoid dependency on Python.h so this specific type is
- *       not dependent on Python.h and can be copied to dlpack.h
- */
-typedef int (*DLPackToPyObject)(DLManagedTensorVersioned* tensor, void** 
py_obj_out);
-
 
///--------------------------------------------------------------------------------
 /// We deliberately designed the data structure and function to be C-style
 //  prefixed with TVMFFIPy so they can be easily invoked through Cython.
@@ -284,7 +261,7 @@ class TVMFFIPyCallManager {
         if (SetArgument(setter_factory, &ctx, py_arg, c_arg) != 0) return -1;
       }
       TVMFFIStreamHandle prev_stream = nullptr;
-      DLPackTensorAllocator prev_tensor_allocator = nullptr;
+      DLPackManagedTensorAllocator prev_tensor_allocator = nullptr;
       // setup stream context if needed
       if (ctx.device_type != -1) {
         c_api_ret_code[0] =
diff --git a/rust/tvm-ffi-macros/src/utils.rs b/rust/tvm-ffi-macros/src/utils.rs
index 5b6f4c1..da86534 100644
--- a/rust/tvm-ffi-macros/src/utils.rs
+++ b/rust/tvm-ffi-macros/src/utils.rs
@@ -58,7 +58,7 @@ pub(crate) fn attr_to_str(attr: &syn::Attribute) -> 
syn::LitStr {
             lit: syn::Lit::Str(s),
             ..
         })) => s,
-        Ok(m) => panic!("Expected a string literal, got"),
+        Ok(_m) => panic!("Expected a string literal, got"),
         Err(e) => panic!("{}", e),
     }
 }
diff --git a/rust/tvm-ffi-sys/src/c_env_api.rs 
b/rust/tvm-ffi-sys/src/c_env_api.rs
index 98b02ce..e0b9306 100644
--- a/rust/tvm-ffi-sys/src/c_env_api.rs
+++ b/rust/tvm-ffi-sys/src/c_env_api.rs
@@ -36,7 +36,7 @@ use crate::dlpack::DLTensor;
 pub type TVMFFIStreamHandle = *mut c_void;
 
 /// DLPack tensor allocator function type
-pub type DLPackTensorAllocator = unsafe extern "C" fn(
+pub type DLPackManagedTensorAllocator = unsafe extern "C" fn(
     prototype: *mut DLTensor,
     out: *mut *mut c_void, // DLManagedTensorVersioned**
     error_ctx: *mut c_void,
@@ -54,12 +54,12 @@ unsafe extern "C" {
     pub fn TVMFFIEnvGetStream(device_type: i32, device_id: i32) -> 
TVMFFIStreamHandle;
 
     pub fn TVMFFIEnvSetTensorAllocator(
-        allocator: DLPackTensorAllocator,
+        allocator: DLPackManagedTensorAllocator,
         write_to_global_context: i32,
-        opt_out_original_allocator: *mut DLPackTensorAllocator,
+        opt_out_original_allocator: *mut DLPackManagedTensorAllocator,
     ) -> i32;
 
-    pub fn TVMFFIEnvGetTensorAllocator() -> DLPackTensorAllocator;
+    pub fn TVMFFIEnvGetTensorAllocator() -> DLPackManagedTensorAllocator;
 
     pub fn TVMFFIEnvCheckSignals() -> i32;
 
diff --git a/src/ffi/extra/env_context.cc b/src/ffi/extra/env_context.cc
index 30f9270..cb68b53 100644
--- a/src/ffi/extra/env_context.cc
+++ b/src/ffi/extra/env_context.cc
@@ -54,15 +54,16 @@ class EnvContext {
     return nullptr;
   }
 
-  DLPackTensorAllocator GetDLPackTensorAllocator() {
+  DLPackManagedTensorAllocator GetDLPackManagedTensorAllocator() {
     if (dlpack_allocator_ != nullptr) {
       return dlpack_allocator_;
     }
     return GlobalTensorAllocator();
   }
 
-  void SetDLPackTensorAllocator(DLPackTensorAllocator allocator, int 
write_to_global_context,
-                                DLPackTensorAllocator* 
opt_out_original_allocator) {
+  void SetDLPackManagedTensorAllocator(DLPackManagedTensorAllocator allocator,
+                                       int write_to_global_context,
+                                       DLPackManagedTensorAllocator* 
opt_out_original_allocator) {
     dlpack_allocator_ = allocator;
     if (write_to_global_context != 0) {
       GlobalTensorAllocator() = allocator;
@@ -80,12 +81,12 @@ class EnvContext {
 
  private:
   // use static function to avoid static initialization order issue
-  static DLPackTensorAllocator& GlobalTensorAllocator() {  // NOLINT(*)
-    static DLPackTensorAllocator allocator = nullptr;
+  static DLPackManagedTensorAllocator& GlobalTensorAllocator() {  // NOLINT(*)
+    static DLPackManagedTensorAllocator allocator = nullptr;
     return allocator;
   }
   std::vector<std::vector<TVMFFIStreamHandle>> stream_table_;
-  DLPackTensorAllocator dlpack_allocator_ = nullptr;
+  DLPackManagedTensorAllocator dlpack_allocator_ = nullptr;
 };
 
 }  // namespace ffi
@@ -105,16 +106,16 @@ TVMFFIStreamHandle TVMFFIEnvGetStream(int32_t 
device_type, int32_t device_id) {
   TVM_FFI_LOG_EXCEPTION_CALL_END(TVMFFIEnvGetStream);
 }
 
-int TVMFFIEnvSetTensorAllocator(DLPackTensorAllocator allocator, int 
write_to_global_context,
-                                DLPackTensorAllocator* 
opt_out_original_allocator) {
+int TVMFFIEnvSetTensorAllocator(DLPackManagedTensorAllocator allocator, int 
write_to_global_context,
+                                DLPackManagedTensorAllocator* 
opt_out_original_allocator) {
   TVM_FFI_SAFE_CALL_BEGIN();
-  tvm::ffi::EnvContext::ThreadLocal()->SetDLPackTensorAllocator(allocator, 
write_to_global_context,
-                                                                
opt_out_original_allocator);
+  tvm::ffi::EnvContext::ThreadLocal()->SetDLPackManagedTensorAllocator(
+      allocator, write_to_global_context, opt_out_original_allocator);
   TVM_FFI_SAFE_CALL_END();
 }
 
-DLPackTensorAllocator TVMFFIEnvGetTensorAllocator() {
+DLPackManagedTensorAllocator TVMFFIEnvGetTensorAllocator() {
   TVM_FFI_LOG_EXCEPTION_CALL_BEGIN();
-  return tvm::ffi::EnvContext::ThreadLocal()->GetDLPackTensorAllocator();
+  return 
tvm::ffi::EnvContext::ThreadLocal()->GetDLPackManagedTensorAllocator();
   TVM_FFI_LOG_EXCEPTION_CALL_END(TVMFFIEnvGetTensorAllocator);
 }
diff --git a/tests/cpp/test_tensor.cc b/tests/cpp/test_tensor.cc
index 1c45e8a..60d9a9a 100644
--- a/tests/cpp/test_tensor.cc
+++ b/tests/cpp/test_tensor.cc
@@ -32,20 +32,21 @@ inline Tensor Empty(const Shape& shape, DLDataType dtype, 
DLDevice device) {
   return Tensor::FromNDAlloc(CPUNDAlloc(), shape, dtype, device);
 }
 
-int TestDLPackTensorAllocator(DLTensor* prototype, DLManagedTensorVersioned** 
out, void* error_ctx,
-                              void (*SetError)(void* error_ctx, const char* 
kind,
-                                               const char* message)) {
+int TestDLPackManagedTensorAllocator(DLTensor* prototype, 
DLManagedTensorVersioned** out,
+                                     void* error_ctx,
+                                     void (*SetError)(void* error_ctx, const 
char* kind,
+                                                      const char* message)) {
   Shape shape(prototype->shape, prototype->shape + prototype->ndim);
   Tensor nd = Empty(shape, prototype->dtype, prototype->device);
   *out = nd.ToDLPackVersioned();
   return 0;
 }
 
-int TestDLPackTensorAllocatorError(DLTensor* prototype, 
DLManagedTensorVersioned** out,
-                                   void* error_ctx,
-                                   void (*SetError)(void* error_ctx, const 
char* kind,
-                                                    const char* message)) {
-  SetError(error_ctx, "RuntimeError", "TestDLPackTensorAllocatorError");
+int TestDLPackManagedTensorAllocatorError(DLTensor* prototype, 
DLManagedTensorVersioned** out,
+                                          void* error_ctx,
+                                          void (*SetError)(void* error_ctx, 
const char* kind,
+                                                           const char* 
message)) {
+  SetError(error_ctx, "RuntimeError", "TestDLPackManagedTensorAllocatorError");
   return -1;
 }
 
@@ -138,7 +139,7 @@ TEST(Tensor, DLPackVersioned) {
 
 TEST(Tensor, DLPackAlloc) {
   // Test successful allocation
-  Tensor tensor = Tensor::FromDLPackAlloc(TestDLPackTensorAllocator, {1, 2, 3},
+  Tensor tensor = Tensor::FromDLPackAlloc(TestDLPackManagedTensorAllocator, 
{1, 2, 3},
                                           DLDataType({kDLFloat, 32, 1}), 
DLDevice({kDLCPU, 0}));
   EXPECT_EQ(tensor.use_count(), 1);
   EXPECT_EQ(tensor.shape().size(), 3);
@@ -157,7 +158,7 @@ TEST(Tensor, DLPackAllocError) {
   // Test error handling in DLPackAlloc
   EXPECT_THROW(
       {
-        Tensor::FromDLPackAlloc(TestDLPackTensorAllocatorError, {1, 2, 3},
+        Tensor::FromDLPackAlloc(TestDLPackManagedTensorAllocatorError, {1, 2, 
3},
                                 DLDataType({kDLFloat, 32, 1}), 
DLDevice({kDLCPU, 0}));
       },
       tvm::ffi::Error);

Reply via email to