This is an automated email from the ASF dual-hosted git repository.
ruihangl pushed a commit to branch main
in repository https://gitbox.apache.org/repos/asf/tvm.git
The following commit(s) were added to refs/heads/main by this push:
new 173bb23274 [Runtime][Builtin] Handle mismatched type on argument #0
when calling Builtin Runtime Operators (#18837)
173bb23274 is described below
commit 173bb2327453503f95103fbbac736a8fb2780dee
Author: Nguyen Duy Loc <[email protected]>
AuthorDate: Sun Mar 1 12:55:05 2026 +0700
[Runtime][Builtin] Handle mismatched type on argument #0 when calling
Builtin Runtime Operators (#18837)
This PR handle mismatched type on argument #0 when calling: Builtin
Runtime Operators (ex: `vm.builtin.shape_of`, `vm.builtin.reshape`, ...)
(Expected `ffi.Tensor` but got `DLTensor*`)
### Summary
- Handle mismatched type on argument #0 when calling Builtin runtime
operators (Expected `ffi.Tensor` but got `DLTensor*`)
### Description
- Builtin runtime operators have argument #0 currently accept `Tensor`.
But some case has argument #0 is `DLTensor*`.
- Then raise exception "Mismatched type on argument #0 ... Expected
`ffi.Tensor` but got `DLTensor*`".
### Resolve
- Change parameter #0 to `ffi:Any` to cover all cases.
- Use `try_cast` to convert data types.
- Handle the following input types: `Tensor` or `DLTensor*`.
### Reference
- This PR have related to #18546
- Fixed: #18824
---
src/runtime/vm/builtin.cc | 36 ++++++++++++++++++++++++++++++++----
1 file changed, 32 insertions(+), 4 deletions(-)
diff --git a/src/runtime/vm/builtin.cc b/src/runtime/vm/builtin.cc
index 35cc261e4d..2f1f89d2e5 100644
--- a/src/runtime/vm/builtin.cc
+++ b/src/runtime/vm/builtin.cc
@@ -526,11 +526,39 @@ TVM_FFI_STATIC_INIT_BLOCK() {
TVM_FFI_STATIC_INIT_BLOCK() {
namespace refl = tvm::ffi::reflection;
refl::GlobalDef()
- .def_method("vm.builtin.shape_of", [](Tensor data) -> ffi::Shape {
return data.Shape(); })
+ .def_method("vm.builtin.shape_of",
+ [](ffi::Any any) -> ffi::Shape {
+ if (auto opt_tensor = any.try_cast<Tensor>()) {
+ return opt_tensor.value().Shape();
+ } else if (auto opt_dltensor = any.try_cast<DLTensor*>()) {
+ DLTensor* ptr = opt_dltensor.value();
+ return ffi::Shape(ptr->shape, ptr->shape + ptr->ndim);
+ } else {
+ TVM_FFI_THROW(TypeError)
+ << "vm.builtin.shape_of expects a Tensor or
DLTensor*, but get "
+ << any.GetTypeKey();
+ }
+ })
.def("vm.builtin.copy", [](ffi::Any a) -> ffi::Any { return a; })
- .def(
- "vm.builtin.reshape",
- [](Tensor data, ffi::Shape new_shape) { return
data.CreateView(new_shape, data->dtype); })
+ .def("vm.builtin.reshape",
+ [](ffi::Any any, ffi::Shape new_shape) {
+ if (auto opt_tensor = any.try_cast<Tensor>()) {
+ Tensor data = opt_tensor.value();
+ return data.CreateView(new_shape, data->dtype);
+ } else if (auto opt_dltensor = any.try_cast<DLTensor*>()) {
+ DLTensor* ptr = opt_dltensor.value();
+ auto tmp = std::make_unique<DLManagedTensor>();
+ tmp->dl_tensor = *ptr;
+ tmp->manager_ctx = nullptr;
+ tmp->deleter = nullptr;
+ Tensor data = Tensor::FromDLPack(tmp.release());
+ return data.CreateView(new_shape, data->dtype);
+ } else {
+ TVM_FFI_THROW(TypeError)
+ << "vm.builtin.reshape expects a Tensor or DLTensor*, but
get "
+ << any.GetTypeKey();
+ }
+ })
.def("vm.builtin.null_value", []() -> std::nullptr_t { return nullptr; })
.def_packed("vm.builtin.to_device", [](ffi::PackedArgs args, ffi::Any*
rv) {
Tensor data = args[0].cast<Tensor>();