guberti commented on code in PR #12969:
URL: https://github.com/apache/tvm/pull/12969#discussion_r988325948


##########
python/tvm/topi/arm_cpu/mprofile/dsp/tensordot_conv2ds.py:
##########
@@ -0,0 +1,276 @@
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+"""Implementations of several conv2d variations, all tensorized using 
tensordot and optimized for
+Cortex-M DSP. Currently contains a standard conv2d and depthwise conv2d 
implementation, but could be
+extended to add a grouped conv2d operator. Due to the way we tensorize, this 
schedule ONLY works
+when the data and kernel layouts are NCHWxc and OIHWxi respectively, where x 
is the number of
+input channels divided by the number of groups."""
+
+import random
+import string
+from typing import Union, Tuple
+
+from tvm import te
+from tvm.tir import indexdiv, indexmod
+from tvm.topi.utils import traverse_inline
+from tvm.topi.nn.pad import pad
+
+from .micro_kernel.tensordot import (
+    make_intrin_tensordot,
+    tensordot_impl,
+)
+
+
+def _unpack_2d_argument(argument: Union[int, Tuple]) -> Tuple:
+    if isinstance(argument, int):
+        return (argument, argument)
+    assert len(argument) == 2
+    return argument
+
+
+def _check_no_dilation(dilation: Union[int, Tuple]) -> None:
+    """Takes a dilation argument as an integer or tuple, and makes sure both 
dimensions are 1.
+    Dilation prevents us from using DSP instructions, so this schedule can't 
work (aside from the
+    niche case where dilation_h == stride_h and dilation_w == stride_w, which 
is rare enough we
+    probably don't need to support it)."""
+
+    dilation_h, dilation_w = _unpack_2d_argument(dilation)
+    assert dilation_h == dilation_w == 1
+
+
+def _unpack_padding(padding: Tuple) -> Tuple:
+    assert isinstance(padding, tuple)
+    if len(padding) == 2:
+        (pad_up, pad_down), (pad_left, pad_right) = padding
+    else:
+        pad_up, pad_left, pad_down, pad_right = padding
+    return pad_up, pad_left, pad_down, pad_right
+
+
+def _pad_if_needed(data: te.tensor.Tensor, layout: str, padding: Tuple) -> 
te.tensor.Tensor:
+    """Performs padding on a te.tensor.Tensor object if necessary. If padding 
= (0, 0, 0, 0), the
+    input tensor is returned unmodified. We only care about tuples here - 
"VALID" and "SAME" padding
+    will be converted by the importer TFLite importer if present."""
+
+    pad_up, pad_left, pad_down, pad_right = padding
+    if not any(padding):
+        return data
+
+    # We want to pad the "H" and "W" columns, and their position depends on 
the layout
+    pad_before, pad_after = [0, 0, 0, 0], [0, 0, 0, 0]
+    pad_before[layout.index("H")] = pad_up
+    pad_before[layout.index("W")] = pad_left
+    pad_after[layout.index("H")] = pad_down
+    pad_after[layout.index("W")] = pad_right
+    return pad(data, pad_before, pad_after, name="padded_data")
+
+
+def _compute_output_dim(data_dim, kernel_dim, pad_before, pad_after, stride) 
-> int:
+    return (data_dim - kernel_dim + pad_before + pad_after) // stride + 1

Review Comment:
   This exact logic is copied verbatim into a lot of other places in the TVM 
codebase 
([1](https://github.com/apache/tvm/blob/2e257f037681766f0bf31f40a62b81691bbcbc8e/python/tvm/topi/arm_cpu/mprofile/dsp/conv2d.py#L77-L78),
 
[2](https://github.com/apache/tvm/blob/2e257f037681766f0bf31f40a62b81691bbcbc8e/python/tvm/topi/arm_cpu/mprofile/dsp/depthwise_conv2d.py#L101-L102),
 
[3](https://github.com/apache/tvm/blob/2e257f037681766f0bf31f40a62b81691bbcbc8e/python/tvm/topi/arm_cpu/conv2d.py#L159-L160),
 
[4](https://github.com/apache/tvm/blob/2e257f037681766f0bf31f40a62b81691bbcbc8e/python/tvm/topi/arm_cpu/conv2d_gemm.py#L91-L92),
 
[5](https://github.com/apache/tvm/blob/2e257f037681766f0bf31f40a62b81691bbcbc8e/python/tvm/topi/arm_cpu/conv2d_int8.py#L74-L75)
 examples in just `python/tvm/topi/arm_cpu`). I think there is a broader 
discussion to be had about having general helper functions for common parts of 
the `conv2d` operation, but I think that's OOS for this PR and will likely have 
to be redon
 e anyway when moving to metascheduler. For now, I've just added a docstring to 
this function. 



-- 
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

To unsubscribe, e-mail: commits-unsubscr...@tvm.apache.org

For queries about this service, please contact Infrastructure at:
us...@infra.apache.org

Reply via email to