This is an automated email from the ASF dual-hosted git repository.

tlopex pushed a commit to branch tut15
in repository https://gitbox.apache.org/repos/asf/tvm.git

commit 444a5ae0b811f73a9c0df10fbc78bc5c6109665f
Author: Shushi Hong <[email protected]>
AuthorDate: Mon Dec 1 22:41:35 2025 -0500

    Fix output extraction from GPU virtual machine
    
    Update output extraction from the virtual machine to return the first 
element as a NumPy array.
---
 docs/how_to/tutorials/e2e_opt_model.py | 4 +++-
 1 file changed, 3 insertions(+), 1 deletion(-)

diff --git a/docs/how_to/tutorials/e2e_opt_model.py 
b/docs/how_to/tutorials/e2e_opt_model.py
index 9f89e744a3..b9812516cb 100644
--- a/docs/how_to/tutorials/e2e_opt_model.py
+++ b/docs/how_to/tutorials/e2e_opt_model.py
@@ -113,12 +113,14 @@ if not IS_IN_CI:
 # We skip this step in the CI environment.
 
 if not IS_IN_CI:
+    with target:
+        mod = tvm.tir.transform.DefaultGPUSchedule()(mod)
     ex = tvm.compile(mod, target="cuda")
     dev = tvm.device("cuda", 0)
     vm = relax.VirtualMachine(ex, dev)
     # Need to allocate data and params on GPU device
     gpu_data = tvm.runtime.tensor(np.random.rand(1, 3, 224, 
224).astype("float32"), dev)
     gpu_params = [tvm.runtime.tensor(p, dev) for p in params["main"]]
-    gpu_out = vm["main"](gpu_data, *gpu_params).numpy()
+    gpu_out = vm["main"](gpu_data, *gpu_params)[0].numpy()
 
     print(gpu_out.shape)

Reply via email to