This is an automated email from the ASF dual-hosted git repository.

masahi pushed a commit to branch main
in repository https://gitbox.apache.org/repos/asf/tvm.git


The following commit(s) were added to refs/heads/main by this push:
     new 8e2382eea5 [Bugfix] Conv3Dtranspose default kernel layout should be 
IODHW (#14340)
8e2382eea5 is described below

commit 8e2382eea52bcead6e6b4835fb0e9381e38d76cd
Author: Jang Yeongsang <122958878+rebel-jan...@users.noreply.github.com>
AuthorDate: Thu Mar 30 20:58:26 2023 +0900

    [Bugfix] Conv3Dtranspose default kernel layout should be IODHW (#14340)
    
    * fix conv3Dtranspose kernel layout
    
    * fix typo
    
    * Fix conv3dtranspose frontend relay converter for  pytorch, keras
    
    * Add conv3dtranspose convert layout pass
    
    * Fix conv3dtranspose frontend relay converter keras
    
    * lint
    
    * fix dnnl test for conv3d transpose
    
    * fix tensorflow relay converter for conv3d_transpose op
    
    * fix dnnl runtime
    
    * fix topi conv3d_transpose legalize
---
 include/tvm/relay/attrs/nn.h                  | 12 +++----
 python/tvm/relay/frontend/keras.py            | 11 ++++---
 python/tvm/relay/frontend/pytorch.py          |  3 ++
 python/tvm/relay/frontend/tensorflow_ops.py   |  5 ++-
 python/tvm/relay/op/nn/_nn.py                 | 45 +++++++++++++++++++++++++++
 python/tvm/relay/op/nn/nn.py                  |  2 +-
 python/tvm/topi/nn/conv3d_transpose.py        | 11 +++----
 src/relay/op/nn/convolution.cc                | 18 ++++++-----
 src/runtime/contrib/dnnl/dnnl_json_runtime.cc |  7 +----
 tests/python/contrib/test_dnnl.py             |  2 +-
 10 files changed, 83 insertions(+), 33 deletions(-)

diff --git a/include/tvm/relay/attrs/nn.h b/include/tvm/relay/attrs/nn.h
index 5ffc4711ca..b918482a55 100644
--- a/include/tvm/relay/attrs/nn.h
+++ b/include/tvm/relay/attrs/nn.h
@@ -430,10 +430,10 @@ struct Conv3DTransposeAttrs : public 
tvm::AttrsNode<Conv3DTransposeAttrs> {
             "dimensions respectively. Convolution is applied on the 'D', 'H' 
and"
             "'W' dimensions.");
     TVM_ATTR_FIELD(kernel_layout)
-        .set_default("OIDHW")
+        .set_default("IODHW")
         .describe(
-            "Dimension ordering of data and weight. Can be 'OIDHW', 
'OIDHW16o16i', etc."
-            "'O', 'I', 'D', 'H', 'W' stands for num_filter, input_channel, 
depth, height, and width"
+            "Dimension ordering of data and weight. Can be 'IODHW', 
'IODHW16i16o', etc."
+            "'I', 'O', 'D', 'H', 'W' stands for input_channel, num_filter, 
depth, height, and width"
             "dimensions respectively.");
     TVM_ATTR_FIELD(out_layout)
         .set_default("")
@@ -588,10 +588,10 @@ struct Conv2DTransposeAttrs : public 
tvm::AttrsNode<Conv2DTransposeAttrs> {
             "dimensions respectively. Convolution is applied on the 'H' and"
             "'W' dimensions.");
     TVM_ATTR_FIELD(kernel_layout)
-        .set_default("OIHW")
+        .set_default("IOHW")
         .describe(
-            "Dimension ordering of data and weight. Can be 'OIHW', 
'OIHW16o16i', etc."
-            "'O', 'I', 'H', 'W' stands for num_filter, input_channel, height, 
and width"
+            "Dimension ordering of data and weight. Can be 'IOHW', 
'OIHW16o16i', etc."
+            "'I', 'O', 'H', 'W' stands for input_channel, num_filter, height, 
and width"
             "dimensions respectively.");
     TVM_ATTR_FIELD(out_layout)
         .set_default("")
diff --git a/python/tvm/relay/frontend/keras.py 
b/python/tvm/relay/frontend/keras.py
index 8c26efffb4..a5531e2ac7 100644
--- a/python/tvm/relay/frontend/keras.py
+++ b/python/tvm/relay/frontend/keras.py
@@ -450,6 +450,7 @@ def _convert_convolution(inexpr, keras_layer, etab, 
data_layout, input_shape=Non
 
 def _convert_convolution3d(inexpr, keras_layer, etab, data_layout, 
input_shape=None):
     _check_data_format(keras_layer)
+    is_deconv = type(keras_layer).__name__ == "Conv3DTranspose"
     weightList = keras_layer.get_weights()
     weight = weightList[0]
     if input_shape is None:
@@ -457,20 +458,22 @@ def _convert_convolution3d(inexpr, keras_layer, etab, 
data_layout, input_shape=N
 
     if data_layout == "NDHWC":
         kernel_layout = "DHWIO"
+        if is_deconv:
+            kernel_layout = "DHWOI"
     else:
         kernel_layout = "OIDHW"
+        if is_deconv:
+            kernel_layout = "IODHW"
         msg = (
             "Kernel layout with {} is not supported for operator Convolution3D 
"
             "in frontend Keras."
         )
         raise tvm.error.OpAttributeUnImplemented(msg.format(data_layout))
 
-    is_deconv = type(keras_layer).__name__ == "Conv3DTranspose"
-
     if is_deconv:
         kernel_d, kernel_h, kernel_w, n_filters, _ = weight.shape
-        if kernel_layout == "OIDHW":
-            weight = weight.transpose([4, 3, 2, 0, 1])
+        if kernel_layout == "IODHW":
+            weight = weight.transpose([4, 3, 0, 1, 2])
     else:
         kernel_d, kernel_h, kernel_w, _, n_filters = weight.shape
 
diff --git a/python/tvm/relay/frontend/pytorch.py 
b/python/tvm/relay/frontend/pytorch.py
index 6906daccad..03663bff41 100644
--- a/python/tvm/relay/frontend/pytorch.py
+++ b/python/tvm/relay/frontend/pytorch.py
@@ -1251,6 +1251,9 @@ class PyTorchOpConverter:
         if len(kernel_size) == 3:
             data_layout = "NCDHW"
             kernel_layout = "OIDHW"
+            if use_transpose:
+                # Transposed convolutions have IODHW layout.
+                kernel_layout = "IODHW"
         elif len(kernel_size) == 2:
             data_layout = "NCHW"
             kernel_layout = "OIHW"
diff --git a/python/tvm/relay/frontend/tensorflow_ops.py 
b/python/tvm/relay/frontend/tensorflow_ops.py
index e8da60a1af..6b3f144619 100644
--- a/python/tvm/relay/frontend/tensorflow_ops.py
+++ b/python/tvm/relay/frontend/tensorflow_ops.py
@@ -693,7 +693,10 @@ def _conv3d(opname):
             raise tvm.error.OpAttributeInvalid(msg.format(attr["padding"]))
 
         if "kernel_layout" not in attr:
-            attr["kernel_layout"] = "DHWIO" if attr["data_format"] == "NDHWC" 
else "OIDHW"
+            if opname == "conv":
+                attr["kernel_layout"] = "DHWIO" if attr["data_format"] == 
"NDHWC" else "OIDHW"
+            elif opname == "conv_transpose":
+                attr["kernel_layout"] = "DHWOI" if attr["data_format"] == 
"NDHWC" else "IODHW"
 
         use_bias = len(inputs) == (3 if opname != "conv_transpose" else 4)
         channel_axis = 1 if attr["data_format"] == "NCDHW" else 4
diff --git a/python/tvm/relay/op/nn/_nn.py b/python/tvm/relay/op/nn/_nn.py
index 53aec11e58..b93285aed8 100644
--- a/python/tvm/relay/op/nn/_nn.py
+++ b/python/tvm/relay/op/nn/_nn.py
@@ -382,6 +382,51 @@ def convert_conv2d_transpose(attrs, inputs, tinfos, 
desired_layouts):
 reg.register_strategy("nn.conv3d_transpose", 
strategy.conv3d_transpose_strategy)
 
 
+@reg.register_convert_op_layout("nn.conv3d_transpose")
+def convert_conv3d_transpose(attrs, inputs, tinfos, desired_layouts):
+    """Convert Layout pass registration for conv3d_transpose op.
+
+    Parameters
+    ----------
+    attrs : tvm.ir.Attrs
+        Attributes of current convolution
+    inputs : list of tvm.relay.Expr
+        The args of the Relay expr to be legalized
+    tinfos : list of types
+        List of input and output types
+    desired_layouts : list of layout strings
+        List of layouts defining our desired
+        layout for the data and kernel inputs respectively.
+
+    Returns
+    -------
+    result : tvm.relay.Expr
+        The transformed expr
+    """
+    data, weight = inputs
+    new_attrs = dict(attrs)
+    assert (
+        len(desired_layouts) == 2
+    ), "A desired layout is expected for both of nn.conv3d_transpose's inputs"
+    desired_data_layout, desired_kernel_layout = map(str, desired_layouts)
+    assert desired_data_layout != "default", "Data layout cannot be default"
+    new_attrs["data_layout"] = desired_data_layout
+
+    if desired_kernel_layout != "default":
+        new_attrs["kernel_layout"] = desired_kernel_layout
+        return relay.nn.conv3d_transpose(data, weight, **new_attrs)
+
+    # Handle default kernel layouts
+    if desired_data_layout == "NCDHW":
+        new_attrs["kernel_layout"] = "IODHW"
+        return relay.nn.conv3d_transpose(data, weight, **new_attrs)
+    elif desired_data_layout == "NDHWC":
+        new_attrs["kernel_layout"] = "DHWOI"
+        return relay.nn.conv3d_transpose(data, weight, **new_attrs)
+
+    raise ValueError("Layout %s is not yet supported" % desired_data_layout)
+
+
 @reg.register_legalize("nn.conv3d_transpose")
 def legalize_conv3d_transpose(attrs, inputs, types):
     """Legalize conv3d_transpose op.
diff --git a/python/tvm/relay/op/nn/nn.py b/python/tvm/relay/op/nn/nn.py
index 857e4c3eb9..0a18e08fee 100644
--- a/python/tvm/relay/op/nn/nn.py
+++ b/python/tvm/relay/op/nn/nn.py
@@ -436,7 +436,7 @@ def conv3d_transpose(
     channels=None,
     kernel_size=None,
     data_layout="NCDHW",
-    kernel_layout="OIDHW",
+    kernel_layout="IODHW",
     out_layout="",
     output_padding=(0, 0, 0),
     out_dtype="",
diff --git a/python/tvm/topi/nn/conv3d_transpose.py 
b/python/tvm/topi/nn/conv3d_transpose.py
index 9f5c01a1fc..2d048f432f 100644
--- a/python/tvm/topi/nn/conv3d_transpose.py
+++ b/python/tvm/topi/nn/conv3d_transpose.py
@@ -147,20 +147,19 @@ def conv3d_transpose_legalize(attrs, inputs, types):
         data, kernel = inputs
         kernel_layout = attrs["kernel_layout"]
         # Convert Kernel layout to IODHW
-        # kernel_layout is different from input kernel layout - IO is swapped
         if kernel_layout == "DHWIO":
             # input kernel layout is swapped to DHWOI
             # output kernel layout will be IODHW
-            kernel = relay.transpose(kernel, axes=(4, 3, 0, 1, 2))
+            kernel = relay.transpose(kernel, axes=(3, 4, 0, 1, 2))
         elif kernel_layout == "DHWOI":
             # input kernel layout is swapped to DHWIO
             # output kernel layout will be IODHW
-            kernel = relay.transpose(kernel, axes=(3, 4, 0, 1, 2))
-        elif kernel_layout == "IODHW":
+            kernel = relay.transpose(kernel, axes=(4, 3, 0, 1, 2))
+        elif kernel_layout == "OIDHW":
             # input kernel layout is swapped to OIDHW
             # output kernel layout will be IODHW
             kernel = relay.transpose(kernel, axes=(1, 0, 2, 3, 4))
-        elif kernel_layout == "OIDHW":
+        elif kernel_layout == "IODHW":
             # input kernel layout is swapped to IODHW
             # output kernel layout will be IODHW
             pass
@@ -172,7 +171,7 @@ def conv3d_transpose_legalize(attrs, inputs, types):
         new_attrs = {k: attrs[k] for k in attrs.keys()}
         new_attrs["data_layout"] = "NCDHW"
         # layout of kernel should be IODHW, but kernel_layout should be 
swapped - OIDHW
-        new_attrs["kernel_layout"] = "OIDHW"
+        new_attrs["kernel_layout"] = "IODHW"
 
         # Convert data to NCDHW.
         data = relay.transpose(data, axes=(0, 4, 1, 2, 3))
diff --git a/src/relay/op/nn/convolution.cc b/src/relay/op/nn/convolution.cc
index dabb189971..e2e8f12237 100644
--- a/src/relay/op/nn/convolution.cc
+++ b/src/relay/op/nn/convolution.cc
@@ -594,7 +594,7 @@ bool Conv3DTransposeRel(const Array<Type>& types, int 
num_inputs, const Attrs& a
   if (data == nullptr) return false;
 
   static const Layout kNCDHW("NCDHW");
-  static const Layout kOIDHW("OIDHW");
+  static const Layout kIODHW("IODHW");
 
   const Conv3DTransposeAttrs* param = attrs.as<Conv3DTransposeAttrs>();
   ICHECK(param != nullptr);
@@ -606,9 +606,9 @@ bool Conv3DTransposeRel(const Array<Type>& types, int 
num_inputs, const Attrs& a
       << "Conv3d_transpose only support input layouts that are convertible 
from NCDHW."
       << " But got " << in_layout;
 
-  const auto trans_kernel_layout = tir::BijectiveLayout(kernel_layout, kOIDHW);
+  const auto trans_kernel_layout = tir::BijectiveLayout(kernel_layout, kIODHW);
   ICHECK(trans_kernel_layout.defined())
-      << "Conv3d_transpose only support kernel layouts that are convertible 
from OIDHW."
+      << "Conv3d_transpose only support kernel layouts that are convertible 
from IODHW."
       << " But got " << kernel_layout;
 
   Layout out_layout(param->out_layout == "" ? param->data_layout : 
param->out_layout);
@@ -651,16 +651,18 @@ bool Conv3DTransposeRel(const Array<Type>& types, int 
num_inputs, const Attrs& a
       ICHECK(reporter->AssertEQ(param->kernel_size[0], wshape[2]) &&
              reporter->AssertEQ(param->kernel_size[1], wshape[3]) &&
              reporter->AssertEQ(param->kernel_size[2], wshape[4]))
-          << "Conv3D: shape of weight is inconsistent with kernel_size, "
+          << "Conv3DTransposed: shape of weight is inconsistent with 
kernel_size, "
           << " kernel_size=" << param->kernel_size << " wshape=" << 
Array<IndexExpr>(wshape);
     }
     if (param->channels.defined()) {
-      ICHECK(reporter->AssertEQ(param->channels, wshape[1]))
-          << "Conv3D: shape of weight is inconsistent with channels, "
-          << " channels=" << param->channels << " wshape=" << 
Array<IndexExpr>(wshape);
+      ICHECK(reporter->AssertEQ(indexdiv(param->channels, param->groups), 
wshape[1]))
+          << "Conv3DTransposed: shape of weight is inconsistent out_channels, "
+          << " out_channels // groups != weight.shape[1] "
+          << " out_channels=" << param->channels << " groups=" << param->groups
+          << " wshape=" << Array<IndexExpr>(wshape);
     }
     if (!dshape_ncdhw[1].as<tir::AnyNode>() && !wshape[0].as<tir::AnyNode>()) {
-      ICHECK(reporter->AssertEQ(indexdiv(dshape_ncdhw[1], param->groups), 
wshape[0]));
+      ICHECK(reporter->AssertEQ(dshape_ncdhw[1], wshape[0]));
     }
     channels = wshape[1];
     dilated_ksize_d = 1 + (wshape[2] - 1) * param->dilation[0];
diff --git a/src/runtime/contrib/dnnl/dnnl_json_runtime.cc 
b/src/runtime/contrib/dnnl/dnnl_json_runtime.cc
index ba06d082c4..0cf9764548 100644
--- a/src/runtime/contrib/dnnl/dnnl_json_runtime.cc
+++ b/src/runtime/contrib/dnnl/dnnl_json_runtime.cc
@@ -424,15 +424,10 @@ class DNNLJSONRuntime : public JSONRuntimeBase {
     // Minus one for DNNL representation. No dilation for DNNL is 0, for relay 
is 1.
     for (auto& d : dilates) d--;
 
-    // TODO(@apeskov): WA. conv3dTranspose uses wrong layout specifier. IO 
instead of OI.
-    auto wgh_logic_layout = TensorRequisite::DefaultLogicLayoutFor(wgh_layout);
-    if (wgh_logic_layout == "OIDHW") wgh_logic_layout = "IODHW";
-    if (wgh_logic_layout == "GOIDHW") wgh_logic_layout = "GIODHW";
-
     // Take into account provided layout strings
     src_tr = src_tr.TreatAs(src_layout);
     dst_tr = dst_tr.TreatAs(dst_layout);
-    wgh_tr = wgh_tr.TreatAs(wgh_layout, wgh_logic_layout);
+    wgh_tr = wgh_tr.TreatAs(wgh_layout);
 
     // Should support G mixed with O. Like { G*O, I, H, W }
     if (wgh_layout.find("G") == std::string::npos) {
diff --git a/tests/python/contrib/test_dnnl.py 
b/tests/python/contrib/test_dnnl.py
index f60472c923..c45149fc5f 100644
--- a/tests/python/contrib/test_dnnl.py
+++ b/tests/python/contrib/test_dnnl.py
@@ -486,7 +486,7 @@ def get_conv3d_transpose(
     activation=None,
     dtype="float32",
     data_layout="NCDHW",
-    kernel_layout="OIDHW",
+    kernel_layout="IODHW",
 ):
     x = relay.var("x", shape=(x_shape), dtype=dtype)
     kernel = relay.const(np.random.randint(0, 1, k_shape).astype(dtype))

Reply via email to