MPolaris opened a new issue, #18350:
URL: https://github.com/apache/tvm/issues/18350

   I built a very simple model with only one Linear layer, and encountered the 
error 'Did you forget to bind?' when turning to TVM through ONNX.
   May I ask if I have made any operational errors?
   Building Python code:
   ```python
   # import torch
   # import torch.nn as nn
   # class MyModule(nn.Module):
   #     def __init__(self):
   #         super().__init__()
   #         self.linear = nn.Linear(8, 16)
   
   #     def forward(self, x):
   #         return self.linear(x)
       
   # model = MyModule()
   # torch.onnx.export(model, torch.randn(1, 8), "model.onnx", opset=13, 
input_names=["input"], output_names=["output"])
   import tvm
   import onnx
   import numpy as np
   import tvm
   from tvm import relax
   from tvm.relax.frontend.onnx import from_onnx
   print(tvm.cuda().exist)
   onnx_model = onnx.load("/home/xiaohe/codes/run_tvm/model.onnx")
   target = "llvm"
   
   shape_dict = {"input": (1, 8)}
   mod = from_onnx(onnx_model, shape_dict, keep_params_in_input=True)
   del onnx_model
   
   mod, params = relax.frontend.detach_params(mod)
   mod = relax.transform.BundleModelParams()(mod)
   target = tvm.target.Target("cuda") # llvm is ok
   with target:
       lib = relax.build(mod, target=target)
   lib.export_library("/home/xiaohe/codes/run_tvm/model.so")
   ```
   TVM compilation process:
   ```bash
   cd tvm
   rm -rf build && mkdir build && cd build
   cp ../cmake/config.cmake .
   
   echo "set(USE_LLVM \"llvm-config --ignore-libllvm --link-static\")" >> 
config.cmake
   echo "set(HIDE_PRIVATE_SYMBOLS ON)" >> config.cmake
   echo "set(USE_CUDA   ON)" >> config.cmake
   
   cmake .. && cmake --build . --parallel $(nproc)
   ```
   Complete error message:
   ```txt
   Traceback (most recent call last):
     File "/home/codes/run_tvm/gen_onnx.py", line 36, in <module>
       lib = relax.build(mod, target=target)
             ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
     File "/home/codes/tvm/python/tvm/relax/vm_build.py", line 259, in build
       return _vmlink(
              ^^^^^^^^
     File "/home/codes/tvm/python/tvm/relax/vm_build.py", line 154, in _vmlink
       lib = tvm.tir.build(tir_mod, target=target, pipeline=tir_pipeline)
             ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
     File "/home/codes/tvm/python/tvm/tir/build.py", line 173, in build
       mod = pipeline(mod)
             ^^^^^^^^^^^^^
     File "/home/codes/tvm/python/tvm/ir/transform.py", line 238, in __call__
       return _ffi_transform_api.RunPass(self, mod)
              ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
     File "tvm/_ffi/_cython/packed_func.pxi", line 339, in 
tvm._ffi._cy3.core.PackedFuncBase.__call__
     File "tvm/_ffi/_cython/packed_func.pxi", line 270, in 
tvm._ffi._cy3.core.FuncCall
     File "tvm/_ffi/_cython/packed_func.pxi", line 259, in 
tvm._ffi._cy3.core.FuncCall3
     File "tvm/_ffi/_cython/base.pxi", line 185, in 
tvm._ffi._cy3.core.CHECK_CALL
     File "/home/codes/tvm/python/tvm/_ffi/base.py", line 468, in 
raise_last_ffi_error
       raise py_err
     File "tvm/_ffi/_cython/packed_func.pxi", line 56, in 
tvm._ffi._cy3.core.tvm_callback
     File "/home/codes/tvm/python/tvm/tir/pipeline.py", line 122, in _pipeline
       mod = tvm.ir.transform.Sequential(passes)(mod)
             ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
     File "/home/codes/tvm/python/tvm/ir/transform.py", line 238, in __call__
       return _ffi_transform_api.RunPass(self, mod)
              ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
     File "tvm/_ffi/_cython/packed_func.pxi", line 339, in 
tvm._ffi._cy3.core.PackedFuncBase.__call__
     File "tvm/_ffi/_cython/packed_func.pxi", line 270, in 
tvm._ffi._cy3.core.FuncCall
     File "tvm/_ffi/_cython/packed_func.pxi", line 259, in 
tvm._ffi._cy3.core.FuncCall3
     File "tvm/_ffi/_cython/base.pxi", line 185, in 
tvm._ffi._cy3.core.CHECK_CALL
     File "/home/codes/tvm/src/tir/analysis/verify_memory.cc", line 203, in 
operator()
       LOG(FATAL) << "RuntimeError: Memory verification failed with the 
following errors:\n"
               ^^^^^^^^^^^^^^^^^^^^^^^^^^^
   tvm._ffi.base.TVMError: Traceback (most recent call last):
     0: operator()
           at /home/codes/tvm/src/tir/analysis/verify_memory.cc:203
     Did you forget to bind?
       Variable `lv` is directly accessed by host memory (it is not contained 
in a thread environment or in the function arguments.
       Variable `input` is directly accessed by host memory (it is not 
contained in a thread environment or in the function arguments.
       Variable `matmul` is directly accessed by host memory (it is not 
contained in a thread environment or in the function arguments.
       Variable `matmul` is directly accessed by host memory (it is not 
contained in a thread environment or in the function arguments.
       Variable `matmul` is directly accessed by host memory (it is not 
contained in a thread environment or in the function arguments.
     File "/home/codes/tvm/src/tir/analysis/verify_memory.cc", line 203
   RuntimeError: Memory verification failed with the following errors:
   # from tvm.script import tir as T
   
   @T.prim_func
   def matmul(input: T.Buffer((T.int64(1), T.int64(8)), "float32"), lv: 
T.Buffer((T.int64(8), T.int64(16)), "float32"), matmul: T.Buffer((T.int64(1), 
T.int64(16)), "float32")):
       T.func_attr({"target": T.target({"arch": "sm_61", "host": {"keys": 
["cpu"], "kind": "llvm", "mtriple": "x86_64-pc-linux-gnu", "tag": ""}, "keys": 
["cuda", "gpu"], "kind": "cuda", "max_num_threads": 1024, "tag": "", 
"thread_warp_size": 32}), "tir.noalias": T.bool(True)})
       for i1, k in T.grid(16, 8):
           matmul_1 = T.Buffer((T.int64(16),), data=matmul.data)
           if k == 0:
               matmul_1[i1] = T.float32(0.0)
           input_1 = T.Buffer((T.int64(8),), data=input.data)
           lv_1 = T.Buffer((T.int64(128),), data=lv.data)
           matmul_1[i1] = matmul_1[i1] + input_1[k] * lv_1[k * 16 + i1]
   ```


-- 
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

To unsubscribe, e-mail: [email protected]

For queries about this service, please contact Infrastructure at:
[email protected]

Reply via email to