Cookiee235 opened a new issue, #17248:
URL: https://github.com/apache/tvm/issues/17248

   
   ### Actual behavior
   Giving the same inputs, the compiled model on CUDA can achieve about **1x 
~14x faster** than inference on CPU.
   Does the huge time cost difference between LLVM and CUDA represent a 
performance buy?
   
   ### Steps to reproduce
   ```
   import tvm
   from tvm import relax
   import numpy as np
   import time
   from tvm.script import ir as I
   from tvm.script import tir as T
   from tvm.script import relax as R
   
   @I.ir_module
   class Module:
       @T.prim_func(private=True)
       def cast(v0_0: T.Buffer((T.int64(1),), "int32"), compute: 
T.Buffer((T.int64(1),), "float64")):
           T.func_attr({"tir.noalias": T.bool(True)})
           # with T.block("root"):
           for i0 in range(T.int64(1)):
               with T.block("compute"):
                   v_i0 = T.axis.spatial(T.int64(1), i0)
                   T.reads(v0_0[v_i0])
                   T.writes(compute[v_i0])
                   compute[v_i0] = T.Cast("float64", v0_0[v_i0])
   
       @T.prim_func(private=True)
       def less(v1_0: T.Buffer((T.int64(42),), "int32"), v1_0_1: 
T.Buffer((T.int64(42),), "int32"), T_less: T.Buffer((T.int64(42),), "bool")):
           T.func_attr({"tir.noalias": T.bool(True)})
           # with T.block("root"):
           for ax0 in range(T.int64(42)):
               with T.block("T_less"):
                   v_ax0 = T.axis.spatial(T.int64(42), ax0)
                   T.reads(v1_0[v_ax0], v1_0_1[v_ax0])
                   T.writes(T_less[v_ax0])
                   T_less[v_ax0] = v1_0[v_ax0] < v1_0_1[v_ax0]
   
       @T.prim_func(private=True)
       def multiply(v1_0: T.Buffer((T.int64(42),), "int32"), lv: 
T.Buffer((T.int64(1),), "int32"), T_multiply: T.Buffer((T.int64(42),), 
"int32")):
           T.func_attr({"tir.noalias": T.bool(True)})
           # with T.block("root"):
           for ax0 in range(T.int64(42)):
               with T.block("T_multiply"):
                   v_ax0 = T.axis.spatial(T.int64(42), ax0)
                   T.reads(v1_0[v_ax0], lv[T.int64(0)])
                   T.writes(T_multiply[v_ax0])
                   T_multiply[v_ax0] = v1_0[v_ax0] * lv[T.int64(0)]
   
       @T.prim_func(private=True)
       def tir_negative(v0_0: T.Buffer((T.int64(1),), "int32"), compute: 
T.Buffer((T.int64(1),), "int32")):
           T.func_attr({"tir.noalias": T.bool(True)})
           # with T.block("root"):
           for i0 in range(T.int64(1)):
               with T.block("compute"):
                   v_i0 = T.axis.spatial(T.int64(1), i0)
                   T.reads(v0_0[v_i0])
                   T.writes(compute[v_i0])
                   compute[v_i0] = v0_0[v_i0] * -1
   
       @R.function
       def main(v0_0: R.Tensor((1,), dtype="int32"), v1_0: R.Tensor((42,), 
dtype="int32")) -> R.Tuple(R.Tensor((1,), dtype="float64"), R.Tensor((42,), 
dtype="bool"), R.Tensor((42,), dtype="int32")):
           R.func_attr({"num_input": 2})
           cls = Module
           with R.dataflow():
               lv = R.call_tir(cls.tir_negative, (v0_0,), 
out_sinfo=R.Tensor((1,), dtype="int32"))
               lv1 = R.call_tir(cls.cast, (v0_0,), out_sinfo=R.Tensor((1,), 
dtype="float64"))
               lv2 = R.call_tir(cls.less, (v1_0, v1_0), 
out_sinfo=R.Tensor((42,), dtype="bool"))
               lv3 = R.call_tir(cls.multiply, (v1_0, lv), 
out_sinfo=R.Tensor((42,), dtype="int32"))
               gv: R.Tuple(R.Tensor((1,), dtype="float64"), R.Tensor((42,), 
dtype="bool"), R.Tensor((42,), dtype="int32")) = lv1, lv2, lv3
               R.output(gv)
           return gv
   
   mod = Module
   
   def compile_mod(mod, func_name, target, *inputs):
       if target == 'llvm':
           ex = relax.build(mod, target='llvm')
           vm = relax.VirtualMachine(ex, tvm.cpu())
       else:
           with tvm.target.Target("cuda"):
               mod = tvm.tir.transform.DefaultGPUSchedule()(mod)
           ex = relax.build(mod, target='cuda')
           vm = relax.VirtualMachine(ex, tvm.gpu())
   
       start_time = time.perf_counter()
       mod_outputs = vm[f'{func_name}'](*inputs)
       end_time = time.perf_counter()
       infer_time = end_time - start_time
       return infer_time
   
   input_0 = tvm.nd.array(np.random.randint(10, size=[1]).astype('int32'))
   input_1 = tvm.nd.array(np.random.randint(10, size=[42]).astype('int32'))
   infer_time1 = compile_mod(mod, 'main', 'llvm', input_0,input_1,)
   infer_time2 = compile_mod(mod, 'main', 'cuda', tvm.nd.array(input_0, 
tvm.cuda()), tvm.nd.array(input_1, tvm.cuda()),)
   
   print(f"A performance bug that slower 
{(infer_time2-infer_time1)/infer_time1} after optimization.")
   
   ```
   
   cc @Lunderberg @junrushao 
   


-- 
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

To unsubscribe, e-mail: commits-unsubscr...@tvm.apache.org.apache.org

For queries about this service, please contact Infrastructure at:
us...@infra.apache.org

Reply via email to