This is an automated email from the ASF dual-hosted git repository.
tqchen pushed a commit to branch s-tir-s0
in repository https://gitbox.apache.org/repos/asf/tvm.git
The following commit(s) were added to refs/heads/s-tir-s0 by this push:
new 89c20a10dc fixes
89c20a10dc is described below
commit 89c20a10dcde6bae3f55021904a078022512aa2e
Author: tqchen <[email protected]>
AuthorDate: Wed Feb 4 16:07:23 2026 -0500
fixes
---
python/tvm/relax/transform/legalize_ops/manipulate.py | 2 +-
tests/python/codegen/test_target_codegen_vulkan.py | 4 ++--
.../test_meta_schedule_feature_extractor_per_store_feature.py | 2 +-
tests/python/meta_schedule/test_meta_schedule_trace_apply.py | 3 ++-
tests/python/te/test_te_create_primfunc.py | 2 +-
5 files changed, 7 insertions(+), 6 deletions(-)
diff --git a/python/tvm/relax/transform/legalize_ops/manipulate.py
b/python/tvm/relax/transform/legalize_ops/manipulate.py
index c35bc7f571..b1743aaf6d 100644
--- a/python/tvm/relax/transform/legalize_ops/manipulate.py
+++ b/python/tvm/relax/transform/legalize_ops/manipulate.py
@@ -19,7 +19,7 @@
from typing import Optional
import tvm
-from tvm import topi, tir, relax, te
+from tvm import topi, tir, s_tir, relax, te
from tvm.relax.op.base import call_tir
from tvm.relax.struct_info import TensorStructInfo
from tvm.relax.utils import gen_call_tir_inputs
diff --git a/tests/python/codegen/test_target_codegen_vulkan.py
b/tests/python/codegen/test_target_codegen_vulkan.py
index 68403189b1..73d6597d10 100644
--- a/tests/python/codegen/test_target_codegen_vulkan.py
+++ b/tests/python/codegen/test_target_codegen_vulkan.py
@@ -29,7 +29,7 @@ import tvm.testing
from tvm import te, tir
from tvm.topi.math import cast
from tvm.script import tir as T, ir as I
-from tvm.tir import TensorIntrin, IntImm, Cast, Schedule
+from tvm.tir import TensorIntrin, IntImm, Cast
from tvm.s_tir.tensor_intrin.cuda import (
WMMA_LOAD_16x16x16_F16_A_INTRIN,
WMMA_LOAD_16x16x16_F16_B_INTRIN,
@@ -463,7 +463,7 @@ def test_cooperative_matrix(out_dtype):
M, N, K = 16, 16, 32
func = get_matmul(M, N, K, out_dtype)
- sch = Schedule(func)
+ sch = tvm.s_tir.Schedule(func)
block = sch.get_sblock("compute")
i, j, k = sch.get_loops(block)
diff --git
a/tests/python/meta_schedule/test_meta_schedule_feature_extractor_per_store_feature.py
b/tests/python/meta_schedule/test_meta_schedule_feature_extractor_per_store_feature.py
index ac5a0d61f2..9aaea370b0 100644
---
a/tests/python/meta_schedule/test_meta_schedule_feature_extractor_per_store_feature.py
+++
b/tests/python/meta_schedule/test_meta_schedule_feature_extractor_per_store_feature.py
@@ -23,7 +23,7 @@ import tvm
import tvm.testing
from numpy.testing import assert_allclose
from tvm import meta_schedule as ms
-from tvm import te, tir
+from tvm import te, tir, s_tir
from tvm.script import tir as T
N_FEATURES = 164
diff --git a/tests/python/meta_schedule/test_meta_schedule_trace_apply.py
b/tests/python/meta_schedule/test_meta_schedule_trace_apply.py
index d83f0a9a3e..fb8d8e29f3 100644
--- a/tests/python/meta_schedule/test_meta_schedule_trace_apply.py
+++ b/tests/python/meta_schedule/test_meta_schedule_trace_apply.py
@@ -21,7 +21,8 @@ import tvm.testing
from tvm.script import tir as T
from tvm.target import Target
from tvm.target.codegen import llvm_lookup_intrinsic_id
-from tvm.s_tir import Schedule, floordiv, floormod
+from tvm.tir import floordiv, floormod
+from tvm.s_tir import Schedule
from tvm.s_tir.tensor_intrin.cuda import *
from tvm.s_tir.tensor_intrin.x86 import VNNI_DOT_16x4_INTRIN as VNNI_INTRIN
diff --git a/tests/python/te/test_te_create_primfunc.py
b/tests/python/te/test_te_create_primfunc.py
index b57dc40a2f..9f1938c67b 100644
--- a/tests/python/te/test_te_create_primfunc.py
+++ b/tests/python/te/test_te_create_primfunc.py
@@ -18,7 +18,7 @@
import numpy as np
import tvm
import tvm.testing
-from tvm import te, tir, topi
+from tvm import te, tir, s_tir, topi
from tvm.script import tir as T
import pytest