This is an automated email from the ASF dual-hosted git repository.

tqchen pushed a commit to branch s-tir-s0
in repository https://gitbox.apache.org/repos/asf/tvm.git


The following commit(s) were added to refs/heads/s-tir-s0 by this push:
     new ce8a7d0dbe fix all dlight tests
ce8a7d0dbe is described below

commit ce8a7d0dbeac0811d50c42c103cab0c9baf9446b
Author: tqchen <[email protected]>
AuthorDate: Wed Feb 4 13:59:16 2026 -0500

    fix all dlight tests
---
 python/tvm/dlight/cpu/gemv.py           | 2 +-
 python/tvm/dlight/gpu/fallback.py       | 2 +-
 python/tvm/dlight/gpu/gemv.py           | 2 +-
 python/tvm/dlight/gpu/low_batch_gemv.py | 2 +-
 python/tvm/dlight/gpu/matmul.py         | 8 ++++----
 python/tvm/dlight/gpu/reduction.py      | 2 +-
 python/tvm/dlight/gpu/transpose.py      | 2 +-
 7 files changed, 10 insertions(+), 10 deletions(-)

diff --git a/python/tvm/dlight/cpu/gemv.py b/python/tvm/dlight/cpu/gemv.py
index 5f6321ebea..2f149d223d 100644
--- a/python/tvm/dlight/cpu/gemv.py
+++ b/python/tvm/dlight/cpu/gemv.py
@@ -37,7 +37,7 @@ class GEMV(CPUScheduleRule):
     ) -> Union[None, s_tir.Schedule, List[s_tir.Schedule]]:
         if not isinstance(func, tir.PrimFunc) or not 
self.is_target_available(target):
             return None
-        sch = tvm.s_tir.Schedule(func)
+        sch = s_tir.Schedule(func)
         block_infos = normalize_prim_func(sch)
         block_infos = try_inline_contiguous_spatial(sch, block_infos)
         if block_infos is None:
diff --git a/python/tvm/dlight/gpu/fallback.py 
b/python/tvm/dlight/gpu/fallback.py
index e027039700..725c005c1a 100644
--- a/python/tvm/dlight/gpu/fallback.py
+++ b/python/tvm/dlight/gpu/fallback.py
@@ -43,7 +43,7 @@ class Fallback(GPUScheduleRule):
             return None
         max_threads_per_block = base.max_threads_per_block(target)
 
-        sch = tvm.s_tir.Schedule(func)
+        sch = s_tir.Schedule(func)
         block_infos = normalize_prim_func(sch)
 
         if block_infos is None:
diff --git a/python/tvm/dlight/gpu/gemv.py b/python/tvm/dlight/gpu/gemv.py
index dbde3f5921..78f1fd67e2 100644
--- a/python/tvm/dlight/gpu/gemv.py
+++ b/python/tvm/dlight/gpu/gemv.py
@@ -43,7 +43,7 @@ class GEMV(GPUScheduleRule):
     ) -> Union[None, s_tir.Schedule, List[s_tir.Schedule]]:
         if not isinstance(func, tir.PrimFunc) or not 
self.is_target_available(target):
             return None
-        sch = tvm.s_tir.Schedule(func)
+        sch = s_tir.Schedule(func)
         block_infos = normalize_prim_func(sch)
         block_infos = try_inline_contiguous_spatial(sch, block_infos)
         if block_infos is None:
diff --git a/python/tvm/dlight/gpu/low_batch_gemv.py 
b/python/tvm/dlight/gpu/low_batch_gemv.py
index d62ce4cc3a..931005e7a1 100644
--- a/python/tvm/dlight/gpu/low_batch_gemv.py
+++ b/python/tvm/dlight/gpu/low_batch_gemv.py
@@ -200,7 +200,7 @@ class LowBatchGEMV(GPUScheduleRule):
     ) -> Union[None, s_tir.Schedule, List[s_tir.Schedule]]:
         if not isinstance(func, tir.PrimFunc) or not 
self.is_target_available(target):
             return None
-        sch = tvm.s_tir.Schedule(func)
+        sch = s_tir.Schedule(func)
         block_infos = normalize_prim_func(sch)
         if block_infos is None:
             return None
diff --git a/python/tvm/dlight/gpu/matmul.py b/python/tvm/dlight/gpu/matmul.py
index 1142bdd8d7..223b216ed6 100644
--- a/python/tvm/dlight/gpu/matmul.py
+++ b/python/tvm/dlight/gpu/matmul.py
@@ -357,7 +357,7 @@ class MetalMatmul(GPUScheduleRule):
 
         if not isinstance(func, tir.PrimFunc) or not 
self.is_target_available(target):
             return None
-        sch = tvm.s_tir.Schedule(func)
+        sch = s_tir.Schedule(func)
         root_block = get_root_block(sch)
         blocks = sch.get_child_blocks(root_block)
 
@@ -498,7 +498,7 @@ class MatmulTensorization(GPUScheduleRule):
 
         if not isinstance(func, tir.PrimFunc) or not 
self.is_target_available(target):
             return None
-        sch = tvm.s_tir.Schedule(func)
+        sch = s_tir.Schedule(func)
         root_block = get_root_block(sch)
         blocks = sch.get_child_blocks(root_block)
 
@@ -719,7 +719,7 @@ class MatmulInt8Tensorization(GPUScheduleRule):
 
         if not isinstance(func, tir.PrimFunc) or not 
self.is_target_available(target):
             return None
-        sch = tvm.s_tir.Schedule(func)
+        sch = s_tir.Schedule(func)
         root_block = get_root_block(sch)
         blocks = sch.get_child_blocks(root_block)
 
@@ -969,7 +969,7 @@ class Matmul(GPUScheduleRule):
     ) -> Optional[s_tir.Schedule]:
         if not isinstance(func, tir.PrimFunc) or not 
self.is_target_available(target):
             return None
-        sch = tvm.s_tir.Schedule(func)
+        sch = s_tir.Schedule(func)
         config = self.get_configs(target)
         root_block = get_root_block(sch)
         blocks = sch.get_child_blocks(root_block)
diff --git a/python/tvm/dlight/gpu/reduction.py 
b/python/tvm/dlight/gpu/reduction.py
index f4055b75d5..e7a94da3d3 100644
--- a/python/tvm/dlight/gpu/reduction.py
+++ b/python/tvm/dlight/gpu/reduction.py
@@ -62,7 +62,7 @@ class Reduction(GPUScheduleRule):
     ) -> Union[None, s_tir.Schedule, List[s_tir.Schedule]]:
         if not isinstance(func, tir.PrimFunc) or not 
self.is_target_available(target):
             return None
-        sch = tvm.s_tir.Schedule(func)
+        sch = s_tir.Schedule(func)
         block_infos = normalize_prim_func(sch)
         if block_infos is None:
             return None
diff --git a/python/tvm/dlight/gpu/transpose.py 
b/python/tvm/dlight/gpu/transpose.py
index 12e878bc44..ace214fde9 100644
--- a/python/tvm/dlight/gpu/transpose.py
+++ b/python/tvm/dlight/gpu/transpose.py
@@ -64,7 +64,7 @@ class Transpose(GPUScheduleRule):
             unroll_depth = 64
         len_vec = 4
 
-        sch = tvm.s_tir.Schedule(func)
+        sch = s_tir.Schedule(func)
         blocks = normalize_prim_func(sch)
         transpose_block_idx = -1
         for idx, block in reversed(list(enumerate(blocks))):

Reply via email to