areusch commented on code in PR #12614: URL: https://github.com/apache/tvm/pull/12614#discussion_r957628068
########## tests/python/contrib/test_csinn/test_conv2d.py: ########## @@ -0,0 +1,158 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. +"""SHL integration conv2d tests.""" + +import numpy as np + +import tvm +from tvm import relay + +from infrastructure import ( + skip_runtime_test, + build_and_run, + verify, +) +from infrastructure import Device + + +def _get_model( + shape, + kernel_h, + kernel_w, + padding, + strides, + dilation, + groups, + dtype, + channels, + var_names, + has_bias=False, +): + """Return a model and any parameters it may have""" + a = relay.var(next(var_names), shape=shape, dtype=dtype) + if len(padding) == 2: + padding = (padding[0], padding[1], padding[0], padding[1]) + shape = (shape[0], shape[1], shape[2] + padding[0] * 2, shape[3] + padding[1] * 2) + + weight_shape = (channels, shape[1] // groups, kernel_h, kernel_w) + w = tvm.nd.array(np.random.uniform(-128, 127, weight_shape).astype(dtype)) + weights = relay.const(w, dtype) + out = relay.nn.conv2d( + a, + weights, + kernel_size=(kernel_h, kernel_w), + data_layout="NCHW", + dilation=dilation, + strides=strides, + padding=padding, + groups=groups, + channels=channels, + out_dtype=dtype, + ) + params = {"w": w} + if has_bias: + bias_shape = weight_shape[0] + b = tvm.nd.array(np.random.uniform(-128, 127, bias_shape).astype(dtype)) + biasc = relay.const(b, dtype) + out = relay.nn.bias_add(out, biasc, axis=1) + params["b"] = b + return out, params + + +def test_conv2d(): + Device.load("test_config.json") + + if skip_runtime_test(): + return + + device = Device() + np.random.seed(0) + + dtype = "float32" + trials = [ Review Comment: suggest using tvm.testing.parameter for these ########## tests/scripts/task_riscv_microtvm.sh: ########## @@ -22,6 +22,13 @@ source tests/scripts/setup-pytest-env.sh make cython3 -# NOTE: this exists to ensure some tests run on RISC-V image. Without it, Jenkins reports a configuration error. -# This line can be removed when RISC-V tests are added. -run_pytest ctypes riscv-platform-minimal-test-0 tests/python/all-platform-minimal-test +#riscv gcc +export PATH=/opt/csi-nn2/tools/gcc-toolchain/bin:$PATH +#riscv qemu +export PATH=/opt/csi-nn2/tools/qemu/bin:$PATH + +# run qemu tvm_rpc +nohup qemu-riscv64 -cpu c906fdv -L /opt/csi-nn2/tools/gcc-toolchain/sysroot/ ./build-c906/tvm_rpc server --host=127.0.0.1 --port=9090 & Review Comment: what do you think about launching this from a pytest [fixture](https://docs.pytest.org/en/6.2.x/fixture.html)? ########## tests/python/contrib/test_csinn/test_conv2d.py: ########## @@ -0,0 +1,158 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. +"""SHL integration conv2d tests.""" + +import numpy as np + +import tvm +from tvm import relay + +from infrastructure import ( + skip_runtime_test, + build_and_run, + verify, +) +from infrastructure import Device + + +def _get_model( + shape, + kernel_h, + kernel_w, + padding, + strides, + dilation, + groups, + dtype, + channels, + var_names, + has_bias=False, +): + """Return a model and any parameters it may have""" + a = relay.var(next(var_names), shape=shape, dtype=dtype) + if len(padding) == 2: + padding = (padding[0], padding[1], padding[0], padding[1]) + shape = (shape[0], shape[1], shape[2] + padding[0] * 2, shape[3] + padding[1] * 2) + + weight_shape = (channels, shape[1] // groups, kernel_h, kernel_w) + w = tvm.nd.array(np.random.uniform(-128, 127, weight_shape).astype(dtype)) + weights = relay.const(w, dtype) + out = relay.nn.conv2d( + a, + weights, + kernel_size=(kernel_h, kernel_w), + data_layout="NCHW", + dilation=dilation, + strides=strides, + padding=padding, + groups=groups, + channels=channels, + out_dtype=dtype, + ) + params = {"w": w} + if has_bias: + bias_shape = weight_shape[0] + b = tvm.nd.array(np.random.uniform(-128, 127, bias_shape).astype(dtype)) + biasc = relay.const(b, dtype) + out = relay.nn.bias_add(out, biasc, axis=1) + params["b"] = b + return out, params + + +def test_conv2d(): + Device.load("test_config.json") + + if skip_runtime_test(): + return + + device = Device() + np.random.seed(0) + + dtype = "float32" + trials = [ + # Normal convolution + [2, 2, (1, 1), (1, 1), (1, 1), 4, (14, 10, 10), False, False], + [2, 1, (2, 2), (1, 1), (1, 1), 7, (16, 12, 15), False, False], + [2, 2, (1, 1), (1, 1), (1, 1), 4, (14, 10, 10), True, False], + [3, 3, (1, 1), (1, 1), (1, 1), 16, (16, 12, 15), False, False], + [5, 5, (1, 1), (2, 2), (1, 1), 4, (14, 10, 10), False, False], + [1, 3, (1, 1), (1, 1), (1, 1), 7, (20, 20, 20), False, False], + [2, 2, (2, 2), (1, 1), (1, 1), 4, (20, 20, 20), True, False], + [5, 5, (1, 1), (2, 2), (1, 1), 4, (14, 10, 10), False, False], + [3, 3, (2, 1), (1, 1), (1, 1), 7, (20, 20, 20), False, False], + [3, 3, (1, 1), (2, 2), (1, 1), 16, (14, 10, 10), True, False], + # # Depth-wise convolution + [3, 3, (1, 1), (1, 1), (1, 1), 20, (20, 20, 20), False, True], + [5, 5, (2, 2), (1, 1), (1, 1), 20, (20, 20, 20), True, True], + [3, 3, (2, 2), (2, 2), (1, 1), 14, (14, 10, 10), False, True], + [5, 5, (0, 0), (1, 1), (1, 1), 20, (20, 20, 20), False, True], + [3, 3, (1, 1), (2, 2), (1, 1), 14, (14, 10, 10), True, True], + ] + cc = 0 + for ( + kernel_h, + kernel_w, + pad, + stride, + dilation, + out_channels, + shape, + has_bias, + is_depthwise, + ) in trials: + cc += 1 + shape = (1, *shape) + if is_depthwise: + groups = shape[1] + else: + groups = 1 + outputs = [] + inputs = { + "a": tvm.nd.array(np.random.uniform(-128, 127, shape).astype(dtype)), + } + + func, params = _get_model( + shape, + kernel_h, + kernel_w, + pad, + stride, + dilation, + groups, + dtype, + out_channels, + iter(inputs), + has_bias, + ) + for csinn in [False, True]: + outputs.append(build_and_run(func, inputs, 1, params, device, enable_csinn=csinn)[0]) + + config = { + "shape": shape, + "groups": groups, + "kernel size": (kernel_h, kernel_w), + "padding": pad, + "stride": stride, + "dilation": dilation, + "out channels": out_channels, + "has bias": has_bias, + } + verify(outputs, atol=0.002, rtol=0.01, config=config) + + +if __name__ == "__main__": + test_conv2d() Review Comment: suggest tvm.testing.main() ########## tests/python/contrib/test_csinn/infrastructure.py: ########## @@ -0,0 +1,262 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. +from itertools import zip_longest, combinations +import json +import os +import warnings + +import numpy as np + +import tvm +import tvm.testing +from tvm import relay +from tvm import rpc +from tvm.contrib import graph_executor +from tvm.relay.op.contrib import csinn +from tvm.contrib import utils +from tvm.autotvm.measure import request_remote + + +class Device: + """ + Configuration for CSINN tests. + + Check tests/python/contrib/test_csinn/ for the presence of an test_config.json file. + This file can be used to override the default configuration here which will attempt to run the riscv + Compute Library runtime tests locally if the runtime is available. Changing the configuration + will allow these runtime tests to be offloaded to a remote riscv device via a tracker for example. + + Notes + ----- + The test configuration will be loaded once when the the class is created. If the configuration + changes between tests, any changes will not be picked up. + + Parameters + ---------- + device : RPCSession + Allows tests to connect to and use remote device. + + Attributes + ---------- + connection_type : str + Details the type of RPC connection to use. Options: + local - Use the local device, + tracker - Connect to a tracker to request a remote device, + remote - Connect to a remote device directly. + host : str + Specify IP address or hostname of remote target. + port : int + Specify port number of remote target. + target : str + The compilation target. + device_key : str + The device key of the remote target. Use when connecting to a remote device via a tracker. + cross_compile : str + Specify path to cross compiler to use when connecting a remote device from a non-riscv platform. + """ + + connection_type = "local" + host = "127.0.0.1" + port = 9090 + target = "llvm" + device_key = "" + cross_compile = "" + + def __init__(self): + """Keep remote device for lifetime of object.""" + self.device = self._get_remote() + + @classmethod + def _get_remote(cls): + """Get a remote (or local) device to use for testing.""" + if cls.connection_type == "tracker": + device = request_remote(cls.device_key, cls.host, cls.port, timeout=1000) + elif cls.connection_type == "remote": + device = rpc.connect(cls.host, cls.port) + elif cls.connection_type == "local": + device = rpc.LocalSession() + else: + raise ValueError( + "connection_type in test_config.json should be one of: " "local, tracker, remote." + ) + + return device + + @classmethod + def load(cls, file_name): + """Load test config + + Load the test configuration by looking for file_name relative + to the test_csinn directory. + """ + location = os.path.realpath(os.path.join(os.getcwd(), os.path.dirname(__file__))) + config_file = os.path.join(location, file_name) + if not os.path.exists(config_file): + warnings.warn("Config file doesn't exist, resuming CSINN tests with default config.") + return + with open(config_file, mode="r") as config: + test_config = json.load(config) + + cls.connection_type = test_config["connection_type"] + cls.host = test_config["host"] + cls.port = test_config["port"] + cls.target = test_config["target"] + cls.device_key = test_config.get("device_key") or "" + cls.cross_compile = test_config.get("cross_compile") or "" + + +def get_cpu_op_count(mod): + """Traverse graph counting ops offloaded to TVM.""" + + class Counter(tvm.relay.ExprVisitor): + def __init__(self): + super().__init__() + self.count = 0 + + def visit_call(self, call): + if isinstance(call.op, tvm.ir.Op): + self.count += 1 + + super().visit_call(call) + + c = Counter() + c.visit(mod["main"]) + return c.count + + +def skip_runtime_test(): + """Skip test if it requires the runtime and it's not present.""" + # CSINN codegen not present. + if not tvm.get_global_func("relay.ext.csinn", True): + print("Skip because CSINN codegen is not available.") + return True + + # Remote device is in use or CSINN runtime not present + # Note: Ensure that the device config has been loaded before this check + if not Device.connection_type != "local" and not csinn.is_csinn_runtime_enabled(): + print("Skip because runtime isn't present or a remote device isn't being used.") + return True + + +def skip_codegen_test(): + """Skip test if it requires the CSINN codegen and it's not present.""" + if not tvm.get_global_func("relay.ext.csinn", True): + print("Skip because CSINN codegen is not available.") + return True + + +def build_module(mod, target, params=None, enable_csinn=True, tvm_ops=0, csinn_partitions=1): + """Build module with option to build for CSINN.""" + if isinstance(mod, tvm.relay.expr.Call): + mod = tvm.IRModule.from_expr(mod) + with tvm.transform.PassContext(opt_level=3, disabled_pass=["AlterOpLayout"]): + if enable_csinn: + mod = csinn.partition_for_csinn(mod, params) + tvm_op_count = get_cpu_op_count(mod) + assert tvm_op_count == tvm_ops, "Got {} TVM operators, expected {}".format( + tvm_op_count, tvm_ops + ) + partition_count = 0 + for global_var in mod.get_global_vars(): + if "csinn" in global_var.name_hint: + partition_count += 1 + + assert ( + csinn_partitions == partition_count + ), "Got {} CSINN partitions, expected {}".format(partition_count, csinn_partitions) + relay.backend.te_compiler.get().clear() Review Comment: did you find this necessary? ########## ci/jenkins/Build.groovy.j2: ########## @@ -259,6 +259,40 @@ stage('Build') { Utils.markStageSkippedForConditional('BUILD: RISC-V') } }, + 'BUILD: CSINN2-x86': { Review Comment: ah sorry i was hoping I did all the Jenkinsfile changes for you--these are a bit harder to do as a non-committer. suggest to split these changes into a different PR. Jenkins has a security feature where it won't parse the Jenkinsfile modified by a PR (instead it just uses the one at `main`). also: You might be able to run both of these builds on a single node (not necessary, but might be simpler with stash/unstash files). -- This is an automated message from the Apache Git Service. To respond to the message, please log on to GitHub and use the URL above to go to the specific comment. To unsubscribe, e-mail: [email protected] For queries about this service, please contact Infrastructure at: [email protected]
