Cookiee235 opened a new issue, #17235:
URL: https://github.com/apache/tvm/issues/17235

   ### Actual behavior
   ```
   Traceback (most recent call last):
     File "/share_container/optfuzz/res/bugs/simple/obj_int.py", line 59, in 
<module>
       compile_mod(mod, input_0)
     File "/share_container/optfuzz/res/bugs/simple/obj_int.py", line 56, in 
compile_mod
       mod_outputs = vm['main'](*inputs)
                     ^^^^^^^^^^^^^^^^^^^
     File "/software/tvm/python/tvm/_ffi/_ctypes/packed_func.py", line 239, in 
__call__
       raise_last_ffi_error()
     File "/software/tvm/python/tvm/_ffi/base.py", line 481, in 
raise_last_ffi_error
       raise py_err
   tvm.error.InternalError: Traceback (most recent call last):
     13: 
tvm::runtime::PackedFuncObj::Extractor<tvm::runtime::PackedFuncSubObj<tvm::runtime::relax_vm::VirtualMachineImpl::_LookupFunction(tvm::runtime::String
 const&)::{lambda(tvm::runtime::TVMArgs, tvm::runtime::TVMRetValue*)#1}> 
>::Call(tvm::runtime::PackedFuncObj const*, tvm::runtime::TVMArgs, 
tvm::runtime::TVMRetValue*)
     12: 
tvm::runtime::relax_vm::VirtualMachineImpl::InvokeClosurePacked(tvm::runtime::ObjectRef
 const&, tvm::runtime::TVMArgs, tvm::runtime::TVMRetValue*)
     11: 
tvm::runtime::PackedFuncObj::Extractor<tvm::runtime::PackedFuncSubObj<tvm::runtime::relax_vm::VirtualMachineImpl::GetClosureInternal(tvm::runtime::String
 const&, bool)::{lambda(tvm::runtime::TVMArgs, tvm::runtime::TVMRetValue*)#1}> 
>::Call(tvm::runtime::PackedFuncObj const*, tvm::runtime::TVMArgs, 
tvm::runtime::TVMRetValue*)
     10: tvm::runtime::relax_vm::VirtualMachineImpl::InvokeBytecode(long, 
std::vector<tvm::runtime::TVMRetValue, 
std::allocator<tvm::runtime::TVMRetValue> > const&)
     9: tvm::runtime::relax_vm::VirtualMachineImpl::RunLoop()
     8: 
tvm::runtime::relax_vm::VirtualMachineImpl::RunInstrCall(tvm::runtime::relax_vm::VMFrame*,
 tvm::runtime::relax_vm::Instruction)
     7: 
tvm::runtime::relax_vm::VirtualMachineImpl::InvokeClosurePacked(tvm::runtime::ObjectRef
 const&, tvm::runtime::TVMArgs, tvm::runtime::TVMRetValue*)
     6: 
tvm::runtime::PackedFuncObj::Extractor<tvm::runtime::PackedFuncSubObj<tvm::runtime::relax_vm::VirtualMachineImpl::GetClosureInternal(tvm::runtime::String
 const&, bool)::{lambda(tvm::runtime::TVMArgs, tvm::runtime::TVMRetValue*)#1}> 
>::Call(tvm::runtime::PackedFuncObj const*, tvm::runtime::TVMArgs, 
tvm::runtime::TVMRetValue*)
     5: tvm::runtime::relax_vm::VirtualMachineImpl::InvokeBytecode(long, 
std::vector<tvm::runtime::TVMRetValue, 
std::allocator<tvm::runtime::TVMRetValue> > const&)
     4: tvm::runtime::relax_vm::VirtualMachineImpl::RunLoop()
     3: 
tvm::runtime::relax_vm::VirtualMachineImpl::RunInstrCall(tvm::runtime::relax_vm::VMFrame*,
 tvm::runtime::relax_vm::Instruction)
     2: 
tvm::runtime::relax_vm::VirtualMachineImpl::InvokeClosurePacked(tvm::runtime::ObjectRef
 const&, tvm::runtime::TVMArgs, tvm::runtime::TVMRetValue*)
     1: 
tvm::runtime::PackedFuncObj::Extractor<tvm::runtime::PackedFuncSubObj<tvm::runtime::relax_vm::{lambda(tvm::runtime::TVMArgs,
 tvm::runtime::TVMRetValue*)#11}> >::Call(tvm::runtime::PackedFuncObj const*, 
tvm::runtime::TVMArgs, tvm::runtime::TVMRetValue*)
     0: tvm::runtime::ObjectRef 
tvm::runtime::TVMPODValue_::AsObjectRef<tvm::runtime::ObjectRef>() const
     File "/software/tvm/include/tvm/runtime/packed_func.h", line 2080
   InternalError: Check failed: type_code_ == kTVMObjectHandle (0 vs. 8) : 
expected Object but got int
   ```
   
   ### Steps to reproduce
   ```
   import tvm
   from tvm import relax
   import numpy as np
   from tvm.script import ir as I
   from tvm.script import tir as T
   from tvm.script import relax as R
   
   @I.ir_module
   class Module:
       @T.prim_func(private=True)
       def add1(C: T.Buffer((T.int64(16), T.int64(16)), "float32"), B: 
T.Buffer((T.int64(16), T.int64(16)), "float32"), T_add: T.Buffer((T.int64(16), 
T.int64(16)), "float32")):
           T.func_attr({"tir.noalias": T.bool(True)})
           # with T.block("root"):
           for ax0, ax1 in T.grid(T.int64(16), T.int64(16)):
               with T.block("T_add"):
                   v_ax0, v_ax1 = T.axis.remap("SS", [ax0, ax1])
                   T.reads(C[v_ax0, v_ax1], B[v_ax0, v_ax1])
                   T.writes(T_add[v_ax0, v_ax1])
                   T_add[v_ax0, v_ax1] = C[v_ax0, v_ax1] + B[v_ax0, v_ax1]
   
       @T.prim_func(private=True)
       def multiply(A: T.Buffer((T.int64(16), T.int64(16)), "float32"), 
T_multiply: T.Buffer((T.int64(16), T.int64(16)), "float32")):
           T.func_attr({"tir.noalias": T.bool(True)})
           # with T.block("root"):
           for ax0, ax1 in T.grid(T.int64(16), T.int64(16)):
               with T.block("T_multiply"):
                   v_ax0, v_ax1 = T.axis.remap("SS", [ax0, ax1])
                   T.reads(A[v_ax0, v_ax1])
                   T.writes(T_multiply[v_ax0, v_ax1])
                   T_multiply[v_ax0, v_ax1] = A[v_ax0, v_ax1] * T.float32(2)
   
       @R.function
       def transform_params(A: R.Tensor((16, 16), dtype="float32"), B: 
R.Tensor((16, 16), dtype="float32")) -> R.Tuple(R.Tensor((16, 16), 
dtype="float32"), R.Tensor((16, 16), dtype="float32"), R.Prim(value=42), 
R.Tensor((), dtype="float16")):
           cls = Module
           C = R.call_tir(cls.multiply, (A,), out_sinfo=R.Tensor((16, 16), 
dtype="float32"))
           D = R.call_tir(cls.add1, (C, B), out_sinfo=R.Tensor((16, 16), 
dtype="float32"))
           return (C, D, R.prim_value(42), R.const(17.5, "float16"))
   
       @R.function
       def main(para0: R.Tensor((16, 16), dtype="float32")) -> 
R.Tuple(R.Tensor((16, 16), dtype="float32"), R.Tensor((16, 16), 
dtype="float32"), R.Prim(value=42), R.Tensor((), dtype="float16")):
           cls = Module
           with R.dataflow():
               res: R.Tuple(R.Tensor((16, 16), dtype="float32"), R.Tensor((16, 
16), dtype="float32"), R.Prim(value=42), R.Tensor((), dtype="float16")) = 
cls.transform_params(para0, para0)
               R.output(res)
           return res
   
   
   mod = Module
   mod = tvm.relax.transform.LegalizeOps()(mod)
   
   def compile_mod(mod, *inputs):
       mod = relax.transform.FuseTIR()(mod)
       mod = relax.transform.LambdaLift()(mod)
       ex = relax.build(mod, target='llvm')
       vm = relax.VirtualMachine(ex, tvm.cpu())
       mod_outputs = vm['main'](*inputs)
   
   input_0 = tvm.nd.array(np.random.randint(10, size=[16, 
16]).astype('float32'))
   compile_mod(mod, input_0)
   ```
   
   cc @Lunderberg @junrushao 


-- 
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

To unsubscribe, e-mail: commits-unsubscr...@tvm.apache.org.apache.org

For queries about this service, please contact Infrastructure at:
us...@infra.apache.org

Reply via email to