This is an automated email from the ASF dual-hosted git repository. echuraev pushed a commit to branch main in repository https://gitbox.apache.org/repos/asf/tvm.git
The following commit(s) were added to refs/heads/main by this push: new 11f2253b9c Restore "pytest.mark.gpu" for RELAX tests (#16741) 11f2253b9c is described below commit 11f2253b9cc22ff354e7f13df2d5a55feae01259 Author: apeskov <pesko...@gmail.com> AuthorDate: Tue Apr 23 11:22:55 2024 +0300 Restore "pytest.mark.gpu" for RELAX tests (#16741) * [TEST] Mark RELAX GPU tests with pytest.mark.gpu Missed pytest.mark.gpu prevents tests from launch in CI. Signed-off-by: Alexander Peskov <alexander.pes...@deelvin.com> * fix Signed-off-by: Alexander Peskov <alexander.pes...@deelvin.com> * Check fp8 compute capability Signed-off-by: Alexander Peskov <alexander.pes...@deelvin.com> * fix func signature Signed-off-by: Alexander Peskov <alexander.pes...@deelvin.com> * lint Signed-off-by: Alexander Peskov <alexander.pes...@deelvin.com> --------- Signed-off-by: Alexander Peskov <alexander.pes...@deelvin.com> Co-authored-by: Alexander Peskov <alexander.pes...@deelvin.com> --- tests/python/relax/test_codegen_cublas.py | 10 ++-------- tests/python/relax/test_codegen_cudnn.py | 9 +-------- tests/python/relax/test_codegen_cutlass.py | 9 +-------- tests/python/relax/test_codegen_tensorrt.py | 13 +++++++++++-- tests/python/relax/test_contrib_vllm.py | 2 +- tests/python/relax/test_transform_codegen_pass.py | 10 ++++++---- 6 files changed, 22 insertions(+), 31 deletions(-) diff --git a/tests/python/relax/test_codegen_cublas.py b/tests/python/relax/test_codegen_cublas.py index 4f357626b8..ea0861467f 100644 --- a/tests/python/relax/test_codegen_cublas.py +++ b/tests/python/relax/test_codegen_cublas.py @@ -36,14 +36,7 @@ def reset_seed(): np.random.seed(0) -has_cublas = tvm.get_global_func("relax.ext.cublas", True) - -cublas_enabled = pytest.mark.skipif( - not has_cublas, - reason="CUBLAS not enabled.", -) - -pytestmark = [cublas_enabled] +pytestmark = tvm.testing.requires_cublas.marks() def build_and_run(mod, inputs_np, target, legalize=False, cuda_graph=False): @@ -231,6 +224,7 @@ def test_matmul_igemm_offload( tvm.testing.assert_allclose(out, ref, rtol=1e-2, atol=1e-2) +@tvm.testing.requires_cuda_compute_version(9) @pytest.mark.skipif(ml_dtypes is None, reason="requires ml_dtypes to be installed") @pytest.mark.parametrize( "x_shape, y_shape, transpose_y, out_dtype", diff --git a/tests/python/relax/test_codegen_cudnn.py b/tests/python/relax/test_codegen_cudnn.py index c913559232..f342705878 100644 --- a/tests/python/relax/test_codegen_cudnn.py +++ b/tests/python/relax/test_codegen_cudnn.py @@ -34,14 +34,7 @@ def reset_seed(): np.random.seed(0) -has_cudnn = tvm.get_global_func("relax.ext.cudnn", True) - -cudnn_enabled = pytest.mark.skipif( - not has_cudnn, - reason="cuDNN not enabled.", -) - -pytestmark = [cudnn_enabled] +pytestmark = tvm.testing.requires_cudnn.marks() _activation_table = { diff --git a/tests/python/relax/test_codegen_cutlass.py b/tests/python/relax/test_codegen_cutlass.py index fced7a84a8..57f47ca6e6 100644 --- a/tests/python/relax/test_codegen_cutlass.py +++ b/tests/python/relax/test_codegen_cutlass.py @@ -75,14 +75,7 @@ class Conv2dx2: return conv2 -has_cutlass = tvm.get_global_func("relax.ext.cutlass", True) - -cutlass_enabled = pytest.mark.skipif( - not has_cutlass, - reason="CUTLASS not enabled.", -) - -pytestmark = [cutlass_enabled] +pytestmark = tvm.testing.requires_cutlass.marks() def build_and_run(mod, inputs_np, target, legalize=True, cuda_graph=False): diff --git a/tests/python/relax/test_codegen_tensorrt.py b/tests/python/relax/test_codegen_tensorrt.py index 23dc7d887f..009bb24c63 100644 --- a/tests/python/relax/test_codegen_tensorrt.py +++ b/tests/python/relax/test_codegen_tensorrt.py @@ -43,13 +43,22 @@ class Conv2dResidualBlock: has_tensorrt = tvm.get_global_func("relax.ext.tensorrt", True) +env_checker_runtime = tvm.get_global_func("relax.is_tensorrt_runtime_enabled", True) -tensorrt_enabled = pytest.mark.skipif( +requires_tensorrt_codegen = pytest.mark.skipif( not has_tensorrt, reason="TENSORRT not enabled.", ) -pytestmark = [tensorrt_enabled] +requires_tensorrt_runtime = pytest.mark.skipif( + not env_checker_runtime or not env_checker_runtime(), + reason="TensorRT runtime not available", +) + +pytestmark = [ + requires_tensorrt_codegen, + requires_tensorrt_runtime, +] + tvm.testing.requires_cuda.marks() def build_and_run(mod, inputs_np, target, legalize=False): diff --git a/tests/python/relax/test_contrib_vllm.py b/tests/python/relax/test_contrib_vllm.py index dd2149e572..f3c4839133 100644 --- a/tests/python/relax/test_contrib_vllm.py +++ b/tests/python/relax/test_contrib_vllm.py @@ -32,7 +32,7 @@ vllm_enabled = pytest.mark.skipif( reason="VLLM not enabled.", ) -pytestmark = [vllm_enabled] +pytestmark = [vllm_enabled] + tvm.testing.requires_cuda.marks() def build_and_run(mod, inputs_np, target, legalize=True): diff --git a/tests/python/relax/test_transform_codegen_pass.py b/tests/python/relax/test_transform_codegen_pass.py index 560bd3bc0b..6e78a67fd0 100644 --- a/tests/python/relax/test_transform_codegen_pass.py +++ b/tests/python/relax/test_transform_codegen_pass.py @@ -30,17 +30,17 @@ from tvm.relax.dpl import is_op, wildcard env_checker_codegen = tvm.get_global_func("relax.ext.tensorrt", True) env_checker_runtime = tvm.get_global_func("relax.is_tensorrt_runtime_enabled", True) -has_tensorrt_codegen = pytest.mark.skipif( +requires_tensorrt_codegen = pytest.mark.skipif( not env_checker_codegen, reason="TensorRT codegen not available", ) -has_tensorrt_runtime = pytest.mark.skipif( +requires_tensorrt_runtime = pytest.mark.skipif( not env_checker_runtime or not env_checker_runtime(), reason="TensorRT runtime not available", ) # Global variable in pytest that applies markers to all tests. -pytestmark = [has_tensorrt_codegen, has_tensorrt_runtime] +pytestmark = [requires_tensorrt_codegen] + tvm.testing.requires_cuda.marks() # Target gpu target_str = "nvidia/nvidia-t4" @@ -117,6 +117,7 @@ entry_func_name = tvm.testing.parameter("main", "func") @tvm.testing.requires_gpu +@requires_tensorrt_runtime def test_tensorrt_only(entry_func_name): mod, inputs, expected = setup_test() @@ -146,6 +147,7 @@ def test_tensorrt_only(entry_func_name): @tvm.testing.requires_gpu +@requires_tensorrt_runtime def test_mix_use_tensorrt_and_tvm(): mod, inputs, expected = setup_test() @@ -367,7 +369,7 @@ def test_no_op_for_call_to_tir(): @tvm.script.ir_module class Before: @R.function - def main(x: R.Tensor): + def main(x: R.Tensor([4], "int64")): R.func_attr({"relax.force_pure": True}) _ = Before.shape_func(x) return x