nverke commented on code in PR #13352:
URL: https://github.com/apache/tvm/pull/13352#discussion_r1025811521


##########
tests/python/contrib/test_hexagon/metaschedule_e2e/test_resnet50_int8.py:
##########
@@ -359,18 +429,115 @@ def test_packed_8x8x32_resnet50(hexagon_launcher):
             params=params,
         )
 
-    with hexagon_launcher.start_session() as session:
-        graph_mod = session.get_executor_from_factory(hexagon_lowered)
-        graph_mod.set_input(input_name, inp.copy())
-        graph_mod.run()
-        hexagon_output = graph_mod.get_output(0).numpy()
+    evaluate_mod(hexagon_launcher, hexagon_lowered, llvm_lowered, input_name, 
inp)
 
-        llvm_graph_mod = 
tvm.contrib.graph_executor.GraphModule(llvm_lowered["default"](tvm.cpu(0)))
-        llvm_graph_mod.set_input(input_name, inp.copy())
-        llvm_graph_mod.run()
-        ref_result = llvm_graph_mod.get_output(0).numpy()
 
-        np.testing.assert_allclose(ref_result, hexagon_output, atol=1e-4, 
rtol=1e-5)
+def _schedule_async_dma_conv2d():
+    """Manually schedule a conv2d block, created from TE compute op via 
CreatePrimFunc,
+    using 8x8x32 packed layout.
+    """
+
+    def schedule_fn(sch, conv2d_block: Optional[BlockRV] = None) -> bool:
+        if conv2d_block is None:
+            if has_block(sch, "conv2d_NCHWc_int8"):
+                conv2d_block = sch.get_block("conv2d_NCHWc_int8")
+            else:
+                return False
+
+        assert "conv2d_NCHWc_int8" in 
sch.get(conv2d_block).annotations["schedule_rule"]
+
+        # Apply scheduling
+
+        post_blocks = sch.get_consumers(conv2d_block)
+        if len(post_blocks) > 0:
+            # Fuse all intermediate post ops into the last op.
+            # This is equivalent to the traverse_inline function used in TE 
schedules.
+            while True:
+                next_post_blocks = []
+                for post_block in post_blocks:
+                    next_consumers = sch.get_consumers(post_block)
+                    if len(next_consumers) > 0:
+                        sch.compute_inline(post_block)
+                    next_post_blocks += next_consumers
+                if len(next_post_blocks) == 0:
+                    assert len(post_blocks) == 1
+                    outer_block = post_blocks[0]
+                    break
+                post_blocks = next_post_blocks
+        else:
+            outer_block = conv2d_block
+
+        # Move the conv2d mma into the injective post mma compute block
+        if outer_block != conv2d_block:
+            loops = sch.get_loops(outer_block)
+            # Compute at the second loop for pipelining.
+            sch.compute_at(conv2d_block, loops[1], preserve_unit_loops=True)
+
+        # Add cache for input and output for copying data to vtcm.
+        input_a_cache = sch.cache_read(conv2d_block, 0, "global.vtcm")
+        sch.compute_at(input_a_cache, sch.get_loops(conv2d_block)[1])
+        sch.fuse(*sch.get_loops(input_a_cache)[2:])
+
+        input_b_cache = sch.cache_read(conv2d_block, 1, "global.vtcm")
+        sch.compute_at(input_b_cache, sch.get_loops(conv2d_block)[1])
+        sch.fuse(*sch.get_loops(input_b_cache)[2:])
+
+        output_cache_write = sch.cache_write(conv2d_block, 0, "global.vtcm")
+        sch.fuse(*sch.get_loops(output_cache_write)[2:])
+
+        conv2d_loops = sch.get_loops(block=conv2d_block)
+        o_c, k_h, k_w, x_0, x_1, i_c = conv2d_loops[-6:]
+        ic_o, ic_i = sch.split(loop=i_c, factors=[None, 4], 
preserve_unit_iters=True)
+        oc_o, oc_i = sch.split(loop=o_c, factors=[None, 32], 
preserve_unit_iters=True)
+        sch.reorder(oc_o, k_h, k_w, x_0, x_1, ic_o, oc_i, ic_i)
+        new_loops = sch.get_loops(block=conv2d_block)
+        sch.parallel(new_loops[4])
+        sch.unroll(new_loops[5])
+        # TODO(nverke): Add compute optimizations here.
+        sch.blockize(loop=oc_i)

Review Comment:
   For some reason tensorization breaks when removing this...



-- 
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

To unsubscribe, e-mail: commits-unsubscr...@tvm.apache.org

For queries about this service, please contact Infrastructure at:
us...@infra.apache.org

Reply via email to