This is an automated email from the ASF dual-hosted git repository.

lukhut pushed a commit to branch main
in repository https://gitbox.apache.org/repos/asf/tvm.git


The following commit(s) were added to refs/heads/main by this push:
     new 1222398342 [TFLite] Support quantized GREATER op in TFLite frontend 
(#12754)
1222398342 is described below

commit 12223983422868bbbc5444f66d175aeb9318b71f
Author: Dhruv Chauhan <89972057+dchauhan-...@users.noreply.github.com>
AuthorDate: Mon Sep 12 21:03:56 2022 +0100

    [TFLite] Support quantized GREATER op in TFLite frontend (#12754)
    
    Support GREATER quantization operation conversion as part of issue #9187 
Continuation of #11519.
---
 python/tvm/relay/frontend/tflite.py          | 19 ++++++-----
 tests/python/frontend/tflite/test_forward.py | 49 +++++++++++++++-------------
 2 files changed, 37 insertions(+), 31 deletions(-)

diff --git a/python/tvm/relay/frontend/tflite.py 
b/python/tvm/relay/frontend/tflite.py
index c38191b389..6c68230e0e 100644
--- a/python/tvm/relay/frontend/tflite.py
+++ b/python/tvm/relay/frontend/tflite.py
@@ -1291,7 +1291,13 @@ class OperatorConverter(object):
 
         return out
 
-    def _convert_elemwise(self, relay_op, op, ignore_qnn_params=False):
+    def _convert_elemwise(
+        self,
+        relay_op,
+        op,
+        ignore_qnn_params=False,
+        comparison_op=False,
+    ):
         """Generic method to Convert TFLite elemwise"""
         try:
             from tflite.AddOptions import AddOptions
@@ -1316,7 +1322,7 @@ class OperatorConverter(object):
 
         # TFLite format demands equal scale and zero_point tuple parameters 
for some operations
         # to allow us to use non-quantized operation instead of quantized if 
ignore_qnn_params=True
-        if ignore_qnn_params:
+        if ignore_qnn_params and not comparison_op:
             assert (
                 lhs_tensor.qnn_params
                 and self.has_same_qnn_params(lhs_tensor, output_tensor)
@@ -1431,12 +1437,7 @@ class OperatorConverter(object):
 
     def convert_greater(self, op):
         """Convert TFLite GREATER"""
-        # Check if the input tensor is quantized, call QNN op
-        if self.is_quantized(op):
-            raise tvm.error.OpNotImplemented(
-                "TFlite quantized GREATER operator is not supported yet."
-            )
-        return self._convert_elemwise(_op.greater, op)
+        return self._convert_elemwise(_op.greater, op, self.is_quantized(op), 
comparison_op=True)
 
     def convert_squared_difference(self, op):
         """Convert TFLite SQUARED DIFFERENCE"""
@@ -1475,7 +1476,7 @@ class OperatorConverter(object):
 
     def convert_equal(self, op):
         """Convert TFLite EQUAL"""
-        return self._convert_elemwise(_op.equal, op, self.is_quantized(op))
+        return self._convert_elemwise(_op.equal, op, self.is_quantized(op), 
comparison_op=True)
 
     def convert_not_equal(self, op):
         """Convert TFLite NOT_EQUAL"""
diff --git a/tests/python/frontend/tflite/test_forward.py 
b/tests/python/frontend/tflite/test_forward.py
index 7267b72548..18045b8e83 100644
--- a/tests/python/frontend/tflite/test_forward.py
+++ b/tests/python/frontend/tflite/test_forward.py
@@ -2254,6 +2254,7 @@ def _test_elemwise(
     quantized=False,
     qnn_op=None,
     same_qnn_params=False,
+    comparison_op=False,
 ):
     """One iteration of elemwise"""
 
@@ -2298,7 +2299,7 @@ def _test_elemwise(
                 if x[0] is not None
             }
 
-            if math_op is math_ops.equal:
+            if comparison_op:
                 out = math_op(inq_data[0], inq_data[1])
                 out = with_fused_activation_function(out, 
fused_activation_function)
 
@@ -2307,6 +2308,9 @@ def _test_elemwise(
                     [x + ":0" for x in input_range.keys()],
                     [x[1] for x in zip(in_data, inq_data) if x[0] is not None],
                     [out],
+                    quantized=True,
+                    input_range=input_range,
+                    experimental_new_converter=same_qnn_params,
                 )
             else:
                 out = math_op(inq_data[0], inq_data[1])
@@ -2314,6 +2318,7 @@ def _test_elemwise(
                 out = tf.quantization.fake_quant_with_min_max_args(
                     out, min=out_min, max=out_max, name="out"
                 )
+
                 # Note same_qnn_params uses experimental_new_converter as toco 
failed
                 compare_tflite_with_tvm(
                     [x[1] for x in zip(in_data, data) if x[0] is not None],
@@ -2440,9 +2445,17 @@ def _test_minimum(data, fused_activation_function=None, 
quantized=False, qnn_op=
 # -------
 
 
-def _test_greater(data):
+def _test_greater(data, fused_activation_function=None, quantized=False, 
qnn_op=None):
     """One iteration of greater"""
-    return _test_elemwise(math_ops.greater, data)
+    return _test_elemwise(
+        math_ops.greater,
+        data,
+        fused_activation_function,
+        quantized,
+        qnn_op,
+        same_qnn_params=True,
+        comparison_op=True,
+    )
 
 
 #######################################################################
@@ -2489,6 +2502,7 @@ def _test_equal(data, fused_activation_function=None, 
quantized=False, qnn_op=No
         quantized,
         qnn_op,
         same_qnn_params=True,
+        comparison_op=True,
     )
 
 
@@ -2555,25 +2569,14 @@ def _test_forward_elemwise(testop):
 
 
 def _test_forward_elemwise_quantized(testop):
-    if testop is not _test_equal:
-        testop(
-            [
-                np.array(np.random.uniform(0, 255, (3, 6)), dtype=np.uint8),
-                np.array(np.random.uniform(0, 255, (3, 6)), dtype=np.uint8),
-            ],
-            quantized=True,
-            qnn_op=testop,
-        )
-    else:
-        # no need for fake_quant to hold tensors in float32 until conversion
-        testop(
-            [
-                np.array(np.random.uniform(0, 255, (3, 6)), dtype=np.float32),
-                np.array(np.random.uniform(0, 255, (3, 6)), dtype=np.float32),
-            ],
-            quantized=True,
-            qnn_op=testop,
-        )
+    testop(
+        [
+            np.array(np.random.uniform(0, 255, (3, 6)), dtype=np.uint8),
+            np.array(np.random.uniform(0, 255, (3, 6)), dtype=np.uint8),
+        ],
+        quantized=True,
+        qnn_op=testop,
+    )
 
 
 def _test_elemwise_qnn_out_range(qnn_op):
@@ -2585,6 +2588,7 @@ def _test_elemwise_qnn_out_range(qnn_op):
         _test_maximum: (-112, 111),
         _test_minimum: (-128, 127),
         _test_equal: (-150, 150),
+        _test_greater: (-150, 150),
     }
 
     return qnn_out_range[qnn_op]
@@ -2615,6 +2619,7 @@ def test_all_elemwise():
     _test_forward_elemwise(_test_minimum)
     _test_forward_elemwise_quantized(_test_minimum)
     _test_forward_elemwise(_test_greater)
+    _test_forward_elemwise_quantized(_test_greater)
     _test_forward_elemwise(_test_squared_difference)
     _test_forward_elemwise(_test_greater_equal)
     _test_forward_elemwise(_test_less)

Reply via email to