lhutton1 commented on a change in pull request #10801:
URL: https://github.com/apache/tvm/pull/10801#discussion_r838490715



##########
File path: python/tvm/relay/op/contrib/arm_compute_lib.py
##########
@@ -286,6 +286,7 @@ def _func_wrapper(expr):
 
 
 _register_external_op_helper("reshape")
+_register_external_op_helper("concatenate")

Review comment:
       Could we check a couple of conditions before offloading:
   - The input data type and output data type are equal
   - The concatenation axis is in (0, 1, 2, 3)
   
   _These conditions come from: 
https://arm-software.github.io/ComputeLibrary/v22.02/classarm__compute_1_1_n_e_concatenate_layer.xhtml#a97752e7404ffd5623774b26709fb4bc0_

##########
File path: tests/python/contrib/test_arm_compute_lib/test_concatenate.py
##########
@@ -0,0 +1,126 @@
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+"""Arm Compute Library integration space_to_batch_nd tests."""

Review comment:
       nit: s/space_to_batch_nd/concatenate

##########
File path: src/runtime/contrib/arm_compute_lib/acl_runtime.cc
##########
@@ -510,6 +539,30 @@ class ACLRuntime : public JSONRuntimeBase {
     layer->function = f;
   }
 
+  /*!
+   * \brief Create a Concatenate layer.
+   *
+   * \param layer The ACL layer to build. Containing inputs, outputs and the 
ACL function.c
+   * \param node The JSON representation of the operator.
+   */
+  void CreateConcatenateLayer(CachedLayer* layer, const JSONGraphNode& node) {
+    std::vector<std::string> axis = 
node.GetAttr<std::vector<std::string>>("axis");
+    std::vector<const arm_compute::ITensor*> inputs;
+    for (auto input : node.GetInputs()) {
+      layer->inputs.push_back(MakeACLTensorFromJSONEntry(input, nullptr, 
nullptr, false));
+      layer->json_inputid_to_layer_inputid[std::pair<uint32_t, 
uint32_t>(input.id_, input.index_)] =
+          layer->inputs.size() - 1;
+    }
+    for (size_t i = 0; i < layer->inputs.size(); i++) {
+      inputs.push_back(&layer->inputs[i]);
+    }
+    layer->outputs.push_back(MakeACLTensorFromJSONNode(node));
+    int dimNum = layer->inputs[0].info()->num_dimensions();
+    auto function = std::make_shared<arm_compute::NEConcatenateLayer>();
+    function->configure(inputs, &layer->outputs[0], dimNum - 
std::stoi(axis[0]) - 1);

Review comment:
       It might be worth adding a comment to explain why the axis here is 
different

##########
File path: tests/python/contrib/test_arm_compute_lib/test_concatenate.py
##########
@@ -0,0 +1,126 @@
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+"""Arm Compute Library integration space_to_batch_nd tests."""
+
+import numpy as np
+
+import tvm
+from tvm import relay
+from tvm import testing
+
+from test_arm_compute_lib.infrastructure import (
+    skip_runtime_test,
+    skip_codegen_test,
+    build_and_run,
+    verify,
+    verify_codegen,
+)
+from test_arm_compute_lib.infrastructure import Device
+
+
+def _get_model(input_shape_a, input_shape_b, input_shape_c, axis, dtype, 
var_names):
+    """Return a model and any parameters it may have."""
+    a = relay.var(next(var_names), shape=input_shape_a, dtype=dtype)
+    b = relay.var(next(var_names), shape=input_shape_b, dtype=dtype)
+    c = relay.var(next(var_names), shape=input_shape_c, dtype=dtype)
+    out = relay.concatenate([a, b, c], axis)
+    return out
+
+
+def _get_expected_codegen(input_shape_a, input_shape_b, input_shape_c, axis, 
dtype):
+    node = {
+        "op": "kernel",
+        "name": "concatenate",
+        "inputs": [
+            [0, 0, 0],
+            [0, 1, 0],
+            [0, 2, 0],
+        ],
+        "attrs": {
+            "num_outputs": "1",
+            "num_inputs": "3",
+            "dtype": [[dtype]],
+            "axis": [[str(axis)]],
+            "shape": [[[3, 234, 234, 256]]],
+        },
+    }
+
+    input = {
+        "op": "input",
+        "name": "",
+        "attrs": {
+            "shape": [[input_shape_a, input_shape_b, input_shape_c]],
+            "dtype": [[dtype, dtype, dtype]],
+        },
+    }
+
+    return [input, node]
+
+
+def test_concatenate():
+    Device.load("test_config.json")
+
+    if skip_runtime_test():
+        return
+
+    device = Device()
+    np.random.seed(0)
+
+    for input_shape_a, input_shape_b, input_shape_c, axis in [
+        ([1, 234, 234, 256], [1, 234, 234, 256], [1, 234, 234, 256], 0),

Review comment:
       Is it possible to test concatenation of different shapes as well? e.g.
   ```
   [(3, 2, 1), (3, 1, 1)] with axis 1
   ```

##########
File path: src/runtime/contrib/arm_compute_lib/acl_runtime.cc
##########
@@ -93,10 +94,19 @@ class ACLRuntime : public JSONRuntimeBase {
   void Run() override {
     for (size_t i = 0; i < input_nodes_.size(); ++i) {
       auto nid = input_nodes_[i];
-      uint32_t eid = EntryID(nid, 0);
       if (nodes_[nid].GetOpType() == "input") {
-        void* data = data_entry_[eid]->data;
-        CheckACLError(layer_.inputs[i].allocator()->import_memory(data));
+        for (uint32_t index = 0; index < nodes_[nid].GetNumOutput(); index++) {

Review comment:
       Would it be possible to rename `index` to something more meaningful e.g. 
`eid_idx`? And similar for `i` above to e.g. `nid_idx`, just to help avoid 
confusion

##########
File path: src/runtime/contrib/arm_compute_lib/acl_runtime.cc
##########
@@ -166,6 +178,7 @@ class ACLRuntime : public JSONRuntimeBase {
     std::shared_ptr<arm_compute::IFunction> function;
     std::vector<arm_compute::Tensor> inputs;
     std::vector<arm_compute::Tensor> outputs;
+    std::map<std::pair<uint32_t, uint32_t>, uint32_t> 
json_inputid_to_layer_inputid;

Review comment:
       Perhaps add a comment to explain what this map is for, similar to the 
commit message :) It would also be worth stating that using it is optional 
(i.e.only when an operator uses the eid index)

##########
File path: tests/python/contrib/test_arm_compute_lib/test_concatenate.py
##########
@@ -0,0 +1,126 @@
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+"""Arm Compute Library integration space_to_batch_nd tests."""
+
+import numpy as np
+
+import tvm
+from tvm import relay
+from tvm import testing
+
+from test_arm_compute_lib.infrastructure import (
+    skip_runtime_test,
+    skip_codegen_test,
+    build_and_run,
+    verify,
+    verify_codegen,
+)
+from test_arm_compute_lib.infrastructure import Device
+
+
+def _get_model(input_shape_a, input_shape_b, input_shape_c, axis, dtype, 
var_names):
+    """Return a model and any parameters it may have."""
+    a = relay.var(next(var_names), shape=input_shape_a, dtype=dtype)
+    b = relay.var(next(var_names), shape=input_shape_b, dtype=dtype)
+    c = relay.var(next(var_names), shape=input_shape_c, dtype=dtype)
+    out = relay.concatenate([a, b, c], axis)
+    return out
+
+
+def _get_expected_codegen(input_shape_a, input_shape_b, input_shape_c, axis, 
dtype):
+    node = {
+        "op": "kernel",
+        "name": "concatenate",
+        "inputs": [
+            [0, 0, 0],
+            [0, 1, 0],
+            [0, 2, 0],
+        ],
+        "attrs": {
+            "num_outputs": "1",
+            "num_inputs": "3",
+            "dtype": [[dtype]],
+            "axis": [[str(axis)]],
+            "shape": [[[3, 234, 234, 256]]],
+        },
+    }
+
+    input = {
+        "op": "input",
+        "name": "",
+        "attrs": {
+            "shape": [[input_shape_a, input_shape_b, input_shape_c]],
+            "dtype": [[dtype, dtype, dtype]],
+        },
+    }
+
+    return [input, node]
+
+
+def test_concatenate():
+    Device.load("test_config.json")
+
+    if skip_runtime_test():
+        return
+
+    device = Device()
+    np.random.seed(0)
+
+    for input_shape_a, input_shape_b, input_shape_c, axis in [
+        ([1, 234, 234, 256], [1, 234, 234, 256], [1, 234, 234, 256], 0),
+    ]:
+        dtype = "int32"

Review comment:
       I'm slightly surprised to see `int32` is working as its not listed in 
the supported datatypes here: 
https://arm-software.github.io/ComputeLibrary/v22.02/classarm__compute_1_1_n_e_concatenate_layer.xhtml#a97752e7404ffd5623774b26709fb4bc0.
 Could we test `float32` and `uint8`?




-- 
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

To unsubscribe, e-mail: commits-unsubscr...@tvm.apache.org

For queries about this service, please contact Infrastructure at:
us...@infra.apache.org


Reply via email to