AndrewZhaoLuo commented on a change in pull request #9017:
URL: https://github.com/apache/tvm/pull/9017#discussion_r709416154



##########
File path: python/tvm/relay/frontend/onnx.py
##########
@@ -654,6 +676,40 @@ def _impl_v1(cls, inputs, attr, params):
         )
 
 
+class QLinearGlobalAveragePool(OnnxOpConverter):
+    "Operator converter for QLinearGlobalAveragePool from Microsoft 
onnxruntime contrib opset."
+
+    @classmethod
+    def _impl_v1(cls, inputs, attr, params):
+        rank = len(infer_shape(inputs[0]))
+
+        x_scale = get_scalar(inputs[1], params)
+        x_zero_point = get_scalar(inputs[2], params, dtype="int32")
+        y_scale = fold_constant(get_scalar(inputs[3], params))
+        y_zero_point = get_scalar(inputs[4], params, dtype="int32")
+
+        input_dtype = infer_type(inputs[0]).checked_type.dtype
+
+        # Onnxruntime documentation does not mention that this global avg_pool 
should follow the

Review comment:
       I'm fine with this for now but this should be a TODO since I believe the 
actual implementation does not dq -> pool -> q
   
https://github.com/microsoft/onnxruntime/blob/master/onnxruntime/core/mlas/lib/qlgavgpool.cpp

##########
File path: python/tvm/relay/frontend/onnx.py
##########
@@ -351,6 +366,13 @@ class AveragePool(Pool):
     name = "avg_pool"
 
 
+class QLinearAveragePool(Pool):

Review comment:
       I think composition rather than subclassing would be a cleaner solution. 
Right now all the code to handle quantization + not quantization are in the 
same place which makes it a bit harder to read. Please separate it.
   
   You can do something like refactor the Pool impl to a new class method like 
_run_calculation(...) and call it from QLinearAveragePool

##########
File path: tests/python/frontend/onnx/test_forward.py
##########
@@ -3056,6 +3056,152 @@ def verify_global_pooling(x_shape, mode):
         verify_global_pooling([4, 1, 2, 6, 4], mode)
 
 
+@tvm.testing.parametrize_targets
+def test_qlinear_average_pool(target, dev):
+    def verify_qlinear_average_pool(
+        x_shape, kernel_shape, strides, pads, out_shape, auto_pad="NOTSET"
+    ):
+        input_nodes = [
+            helper.make_tensor_value_info("X", TensorProto.FLOAT, 
list(x_shape)),
+        ]
+
+        output_nodes = [
+            helper.make_tensor_value_info("Y", TensorProto.FLOAT, 
list(out_shape)),
+        ]
+
+        input_names = ["X"]
+
+        node = helper.make_node(
+            "AveragePool",

Review comment:
       Should these be QLinear Nodes?

##########
File path: python/tvm/relay/frontend/onnx.py
##########
@@ -3794,12 +3850,14 @@ def _get_convert_map(opset):
         "Xor": Renamer("logical_xor"),
         # defs/nn
         "AveragePool": AveragePool.get_converter(opset),
+        "QLinearAveragePool": QLinearAveragePool.get_converter(opset),

Review comment:
       There's a quantization section down below, you should move these there

##########
File path: tests/python/frontend/onnx/test_forward.py
##########
@@ -3056,6 +3056,152 @@ def verify_global_pooling(x_shape, mode):
         verify_global_pooling([4, 1, 2, 6, 4], mode)
 
 
+@tvm.testing.parametrize_targets
+def test_qlinear_average_pool(target, dev):
+    def verify_qlinear_average_pool(
+        x_shape, kernel_shape, strides, pads, out_shape, auto_pad="NOTSET"
+    ):
+        input_nodes = [
+            helper.make_tensor_value_info("X", TensorProto.FLOAT, 
list(x_shape)),
+        ]
+
+        output_nodes = [
+            helper.make_tensor_value_info("Y", TensorProto.FLOAT, 
list(out_shape)),
+        ]
+
+        input_names = ["X"]
+
+        node = helper.make_node(
+            "AveragePool",
+            inputs=input_names,
+            outputs=["Y"],
+            kernel_shape=kernel_shape,
+            strides=strides,
+        )
+
+        if pads is None:
+            pad_attr = helper.make_attribute("auto_pad", auto_pad)
+        else:
+            pad_attr = helper.make_attribute("pads", pads)
+        node.attribute.append(pad_attr)
+
+        graph = helper.make_graph(
+            [node],
+            "qlinear_average_pool_test",
+            inputs=input_nodes,
+            outputs=output_nodes,
+        )
+
+        model = helper.make_model(graph, 
producer_name="qlinear_average_pool_Test")
+        quantize_and_verify_with_ort(model, input_names, [x_shape], target, 
dev)
+
+    # Pool1D
+    verify_qlinear_average_pool(
+        x_shape=[1, 1, 32],
+        kernel_shape=[3],
+        strides=[1],
+        pads=[1, 1],
+        out_shape=[1, 1, 32],
+    )
+    # Pool2D
+    verify_qlinear_average_pool(
+        x_shape=[1, 1, 32, 32],
+        kernel_shape=[3, 3],
+        strides=[1, 1],
+        pads=[1, 1, 1, 1],
+        out_shape=[1, 1, 32, 32],
+    )
+
+    # Pool1D with stride
+    verify_qlinear_average_pool(
+        x_shape=[1, 1, 32],
+        kernel_shape=[3],
+        strides=[2],
+        pads=[1, 1],
+        out_shape=[1, 1, 16],
+    )
+    # Pool2D with stride
+    verify_qlinear_average_pool(
+        x_shape=[1, 1, 32, 32],
+        kernel_shape=[3, 3],
+        strides=[2, 2],
+        pads=[1, 1, 1, 1],
+        out_shape=[1, 1, 16, 16],
+    )
+
+    # Pool1D with stride and autopadding
+    verify_qlinear_average_pool(
+        x_shape=[1, 1, 32],
+        kernel_shape=[3],
+        strides=[2],
+        pads=None,
+        out_shape=[1, 1, 16],
+        auto_pad="SAME_UPPER",
+    )
+    # Pool2D with stride and autopadding
+    verify_qlinear_average_pool(
+        x_shape=[1, 1, 32, 32],
+        kernel_shape=[3, 3],
+        strides=[2, 2],
+        pads=None,
+        out_shape=[1, 1, 16, 16],
+        auto_pad="SAME_UPPER",
+    )
+
+    # Pool3D with stride
+    verify_qlinear_average_pool(
+        x_shape=[1, 1, 32, 32, 32],
+        kernel_shape=[3, 3, 3],
+        strides=[2, 2, 2],
+        pads=[1, 1, 1, 1, 1, 1],
+        out_shape=[1, 1, 16, 16, 16],
+    )
+
+    # Pool3D with stride and autopadding
+    verify_qlinear_average_pool(
+        x_shape=[1, 1, 32, 32, 32],
+        kernel_shape=[3, 3, 3],
+        strides=[2, 2, 2],
+        pads=None,
+        out_shape=[1, 1, 16, 16, 16],
+        auto_pad="SAME_UPPER",
+    )
+
+
+@tvm.testing.parametrize_targets
+def test_qlinear_global_average_pool(target, dev):
+    def verify_qlinear_global_average_pool(x_shape):
+        out_shape = x_shape[:2] + [1] * (len(x_shape) - 2)
+
+        node_type = "GlobalAveragePool"
+
+        input_names = ["X"]
+
+        pool_node = helper.make_node(node_type, inputs=input_names, 
outputs=["Y"])

Review comment:
       Should these be QLinear Nodes?




-- 
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

To unsubscribe, e-mail: commits-unsubscr...@tvm.apache.org

For queries about this service, please contact Infrastructure at:
us...@infra.apache.org


Reply via email to