masahi commented on a change in pull request #7154:
URL: https://github.com/apache/tvm/pull/7154#discussion_r548183138



##########
File path: tests/python/frontend/pytorch/test_object_detection.py
##########
@@ -102,38 +105,55 @@ def test_detection_models():
     scripted_model = generate_jit_model(1)
     mod, params = relay.frontend.from_pytorch(scripted_model, shape_list)
 
-    with tvm.transform.PassContext(opt_level=3, 
disabled_pass=["FoldScaleAxis"]):
-        vm_exec = relay.vm.compile(mod, target=target, params=params)
+    def compile_and_run_vm(mod, params, data_np):
+        with tvm.transform.PassContext(opt_level=3, 
disabled_pass=["FoldScaleAxis"]):
+            vm_exec = relay.vm.compile(mod, target=target, params=params)
 
-    ctx = tvm.cpu()
-    vm = VirtualMachine(vm_exec, ctx)
-    data = process_image(img)
-    pt_res = scripted_model(data)
-    data = data.detach().numpy()
-    vm.set_input("main", **{input_name: data})
-    tvm_res = vm.run()
+        ctx = tvm.context(target, 0)
+        vm = VirtualMachine(vm_exec, ctx)
+        vm.set_input("main", **{input_name: data_np})
+        return vm.run()
 
+    data = process_image(img)
+    data_np = data.detach().numpy()
+    tvm_res = compile_and_run_vm(mod, params, data_np)
     # Note: due to accumulated numerical error, we can't directly compare 
results
     # with pytorch output. Some boxes might have a quite tiny difference in 
score
     # and the order can become different. We just measure how many valid boxes
     # there are for input image.
+    pt_res = scripted_model(data)
     pt_scores = pt_res[1].detach().numpy().tolist()
     tvm_scores = tvm_res[1].asnumpy().tolist()
-    num_pt_valid_scores = num_tvm_valid_scores = 0
 
-    for score in pt_scores:
-        if score >= score_threshold:
-            num_pt_valid_scores += 1
-        else:
-            break
+    def count_valid_scores(scores):
+        num_valid_scores = 0
+        for score in pt_scores:
+            if score >= score_threshold:
+                num_valid_scores += 1
+            else:
+                return num_valid_scores
 
-    for score in tvm_scores:
-        if score >= score_threshold:
-            num_tvm_valid_scores += 1
-        else:
-            break
+    num_pt_valid_scores = count_valid_scores(pt_scores)
+    num_tvm_valid_scores = count_valid_scores(tvm_scores)
 
     assert num_pt_valid_scores == num_tvm_valid_scores, (
         "Output mismatch: Under score threshold {}, Pytorch has {} valid "
         "boxes while TVM has {}.".format(score_threshold, num_pt_valid_scores, 
num_tvm_valid_scores)
     )
+
+    before = mod["main"]
+    after = rewrite(NMSRewrite(), before)
+    # TODO(masahi): Is there a better way to test if the desired rewrite has 
happened?

Review comment:
       Yes, the problem is the model is huge so manually creating the reference 
is not possible. Programmatically creating a reference requires the same 
pattern match & rewrite I want to test :)




----------------------------------------------------------------
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

For queries about this service, please contact Infrastructure at:
us...@infra.apache.org


Reply via email to