cconvey commented on code in PR #13569:
URL: https://github.com/apache/tvm/pull/13569#discussion_r1083318682


##########
tests/python/contrib/test_hexagon/test_call_tir.py:
##########
@@ -0,0 +1,248 @@
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+
+""" Various tests related to the (WIP) support for having
+one PrimFunc call another PrimFunc within the same IRModule.
+"""
+
+from typing import List
+import pytest
+import numpy as np
+
+import tvm
+import tvm.testing
+import tvm.script
+from tvm.script import tir as T
+
+from tvm.contrib.hexagon.session import Session
+from tvm.contrib.hexagon import allocate_hexagon_array
+from .infrastructure import get_hexagon_target
+
+# NOTE(cconvey): These pylint warnings should be re-enabled as TVM's pylint 
configuration matures.
+# pylint: disable=missing-function-docstring,no-self-argument,invalid-name
+# pylint: disable=redefined-outer-name,missing-class-docstring
+
+# 
--------------------------------------------------------------------------------------------------
+# Test parameters
+# 
--------------------------------------------------------------------------------------------------
+
+# The shape of the original (unsplit) tensors.
+# We assume that each shape describes a non-empty 2D tensor.
+original_shape = tvm.testing.parameter(
+    # degenerate cases...
+    [1, 1],
+    [1, 2],
+    [2, 1],
+    [2, 2],
+    # arbitrary, provided for variety
+    [5, 3],
+    [3, 5],
+)
+
+# This dtype is arbitrary, but it must match the dtype that's hardcoded into 
the
+# callee's function signature.  E.g., 'a_data: T.Ptr[T.int8]'.
+#
+# Hopefully over time we'll find a way to soften this limitation, at least for
+# some approaches to PrimFunc-to-PrimFunc calls.
+dtype = tvm.testing.parameter("int8")
+
+# 
--------------------------------------------------------------------------------------------------
+# Helper functions / definitions...
+# 
--------------------------------------------------------------------------------------------------
+
+HEXAGON_TARGET_ = get_hexagon_target("v69")
+
+ENTRY_PRIMFUNC_NAME_ = "main"
+
+
+def get_reference_input_tensor_(shape: list, dtype: str) -> np.array:
+    assert len(shape) == 2
+
+    a = np.ndarray(shape, dtype=dtype)
+    np_dtype = a.dtype
+
+    if np_dtype.kind in ["i", "u"]:
+        # We allow overflow for integer types because it tends to be 
well-behaved
+        # and well-understood...
+        min_value = np.iinfo(np_dtype).min
+        max_value = np.iinfo(np_dtype).max
+
+        next_value = min_value
+
+        for i in range(shape[0]):
+            for j in range(shape[1]):
+                a[i, j] = next_value
+                next_value += 1
+
+    elif np_dtype.kind == "f":
+        # NOTE: For simplicity, we avoid test data that that require
+        # well-defined behavior on floating-point overflow.
+        # But it may be reasonable to test that in the future.
+        min_value = np.finfo(np_dtype).min
+        max_value = np.finfo(np_dtype).max
+
+        min_input_value = min_value / 2.0 + 1
+        max_input_value = max_value / 2.0 - 2
+        delta = (max_input_value - min_input_value) / (shape[0] * shape[1])
+
+        next_value = min_input_value
+
+        for i in range(shape[0]):
+            for j in range(shape[1]):
+                a[i, j] = next_value
+                next_value += delta
+
+    else:
+        assert False, f"Unexpected data type: {np_dtype}"
+
+    return a
+
+
+def get_reference_output_tensor_(shape: list, dtype: str) -> np.array:

Review Comment:
   I think there's a good case for converting `get_reference_input_tensor_` 
into a fixture eventually, if other unit tests start using similar logic for 
populating their input tensors.
   
   `get_reference_output_tensor_`  is specific to this unit test, because it 
makes a specific assumption about the intended behavior of the code being 
tested.
   
   I can see a case for converting `get_reference_input_tensor_` and 
`get_reference_output_tensor_` into fixtures for _performance_ reasons 
(caching), but I don't think we're anywhere near that inflection point.



-- 
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

To unsubscribe, e-mail: [email protected]

For queries about this service, please contact Infrastructure at:
[email protected]

Reply via email to