This is an automated email from the ASF dual-hosted git repository.

masahi pushed a commit to branch main
in repository https://gitbox.apache.org/repos/asf/tvm.git


The following commit(s) were added to refs/heads/main by this push:
     new 436c17f885 [HEXAGON][TOPI] This PR adjusts schedules so >64 length 
vector loads/stores are not generated at LLVM level. This is a workaround for 
an instruction selection issue in current version of llvm for hexagon (#12471)
436c17f885 is described below

commit 436c17f88527406fa1b014b2431609aa217dee48
Author: arangasa <76030063+arang...@users.noreply.github.com>
AuthorDate: Thu Aug 18 14:25:44 2022 +0530

    [HEXAGON][TOPI] This PR adjusts schedules so >64 length vector loads/stores 
are not generated at LLVM level. This is a workaround for an instruction 
selection issue in current version of llvm for hexagon (#12471)
---
 python/tvm/topi/hexagon/slice_ops/cast.py                 | 6 ++++--
 tests/python/contrib/test_hexagon/topi/test_cast_slice.py | 4 ++--
 2 files changed, 6 insertions(+), 4 deletions(-)

diff --git a/python/tvm/topi/hexagon/slice_ops/cast.py 
b/python/tvm/topi/hexagon/slice_ops/cast.py
index b4984763e0..ac2e4c32e3 100644
--- a/python/tvm/topi/hexagon/slice_ops/cast.py
+++ b/python/tvm/topi/hexagon/slice_ops/cast.py
@@ -68,9 +68,10 @@ def cast_f16_f32_stir_schedule_nc(func, in_layout, 
out_layout, c_split_factor):
     block_name = "CastF16F32"
     _, c_orig = sch.get_loops(sch.get_block(block_name))
     _, c_inner = sch.split(c_orig, [None, c_split_factor])
+    _, c_inner_inner = sch.split(c_inner, [None, 64])
     sch.transform_layout(block_name, "A", in_layout)
     sch.transform_layout(block_name, block_name, out_layout)
-    sch.vectorize(c_inner)
+    sch.vectorize(c_inner_inner)
     return sch
 
 
@@ -122,9 +123,10 @@ def cast_f32_f16_stir_schedule_nc(func, in_layout, 
out_layout, c_split_factor):
     block_name = "CastF32F16"
     _, c_orig = sch.get_loops(sch.get_block(block_name))
     _, c_inner = sch.split(c_orig, [None, c_split_factor])
+    _, c_inner_inner = sch.split(c_inner, [None, 64])
     sch.transform_layout(block_name, "A", in_layout)
     sch.transform_layout(block_name, block_name, out_layout)
-    sch.vectorize(c_inner)
+    sch.vectorize(c_inner_inner)
     return sch
 
 
diff --git a/tests/python/contrib/test_hexagon/topi/test_cast_slice.py 
b/tests/python/contrib/test_hexagon/topi/test_cast_slice.py
index 30ea4c94b8..6569ce36bb 100644
--- a/tests/python/contrib/test_hexagon/topi/test_cast_slice.py
+++ b/tests/python/contrib/test_hexagon/topi/test_cast_slice.py
@@ -75,7 +75,7 @@ class TestCastF16F32Slice2d:
         """
         if hexagon_session._launcher._serial_number != "simulator":
             pytest.skip(msg="Due to 
https://github.com/apache/tvm/issues/11957";)
-        target_hexagon = tvm.target.hexagon("v68")
+        target_hexagon = tvm.target.hexagon("v69")
         target = tvm.target.Target(target_hexagon, host=target_hexagon)
         cast_input = te.placeholder(input_shape, name="A", dtype=dtype)
         cast_output = sl.cast_f16_f32_compute(cast_input)
@@ -161,7 +161,7 @@ class TestCastF32F16Slice2d:
         if hexagon_session._launcher._serial_number != "simulator":
             pytest.skip(msg="Due to 
https://github.com/apache/tvm/issues/11957";)
 
-        target_hexagon = tvm.target.hexagon("v68")
+        target_hexagon = tvm.target.hexagon("v69")
         target = tvm.target.Target(target_hexagon, host=target_hexagon)
         cast_input = te.placeholder(input_shape, name="A", dtype=dtype)
         cast_output = sl.cast_f32_f16_compute(cast_input)

Reply via email to