piiswrong closed pull request #10915: [MXNET-9704] An assertion check for 
invalid layout
URL: https://github.com/apache/incubator-mxnet/pull/10915
 
 
   

This is a PR merged from a forked repository.
As GitHub hides the original diff on merge, it is displayed below for
the sake of provenance:

As this is a foreign pull request (from a fork), the diff is supplied
below (as it won't show otherwise due to GitHub magic):

diff --git a/python/mxnet/gluon/nn/conv_layers.py 
b/python/mxnet/gluon/nn/conv_layers.py
index 87a62bc8c70..7b4a6be9096 100644
--- a/python/mxnet/gluon/nn/conv_layers.py
+++ b/python/mxnet/gluon/nn/conv_layers.py
@@ -196,7 +196,7 @@ class Conv1D(_Conv):
         layers side by side, each seeing half the input channels, and producing
         half the output channels, and both subsequently concatenated.
     layout: str, default 'NCW'
-        Dimension ordering of data and weight. Can be 'NCW', 'NWC', etc.
+        Dimension ordering of data and weight. Only supports 'NCW' layout for 
now.
         'N', 'C', 'W' stands for batch, channel, and width (time) dimensions
         respectively. Convolution is applied on the 'W' dimension.
     in_channels : int, default 0
@@ -229,6 +229,7 @@ def __init__(self, channels, kernel_size, strides=1, 
padding=0, dilation=1,
                  groups=1, layout='NCW', activation=None, use_bias=True,
                  weight_initializer=None, bias_initializer='zeros',
                  in_channels=0, **kwargs):
+        assert layout == 'NCW', "Only supports 'NCW' layout for now"
         if isinstance(kernel_size, numeric_types):
             kernel_size = (kernel_size,)
         assert len(kernel_size) == 1, "kernel_size must be a number or a list 
of 1 ints"
@@ -271,9 +272,9 @@ class Conv2D(_Conv):
         layers side by side, each seeing half the input channels, and producing
         half the output channels, and both subsequently concatenated.
     layout : str, default 'NCHW'
-        Dimension ordering of data and weight. Can be 'NCHW', 'NHWC', etc.
-        'N', 'C', 'H', 'W' stands for batch, channel, height, and width
-        dimensions respectively. Convolution is applied on the 'H' and
+        Dimension ordering of data and weight. Only supports 'NCHW' and 'NHWC'
+        layout for now. 'N', 'C', 'H', 'W' stands for batch, channel, height,
+        and width dimensions respectively. Convolution is applied on the 'H' 
and
         'W' dimensions.
     in_channels : int, default 0
         The number of input channels to this layer. If not specified,
@@ -293,12 +294,12 @@ class Conv2D(_Conv):
 
     Inputs:
         - **data**: 4D input tensor with shape
-          `(batch_size, in_channels, height, width)` when `layout` is `NCW`.
+          `(batch_size, in_channels, height, width)` when `layout` is `NCHW`.
           For other layouts shape is permuted accordingly.
 
     Outputs:
         - **out**: 4D output tensor with shape
-          `(batch_size, channels, out_height, out_width)` when `layout` is 
`NCW`.
+          `(batch_size, channels, out_height, out_width)` when `layout` is 
`NCHW`.
           out_height and out_width are calculated as::
 
               out_height = 
floor((height+2*padding[0]-dilation[0]*(kernel_size[0]-1)-1)/stride[0])+1
@@ -308,6 +309,8 @@ def __init__(self, channels, kernel_size, strides=(1, 1), 
padding=(0, 0),
                  dilation=(1, 1), groups=1, layout='NCHW',
                  activation=None, use_bias=True, weight_initializer=None,
                  bias_initializer='zeros', in_channels=0, **kwargs):
+        assert layout == 'NCHW' or layout == 'NHWC', \
+            "Only supports 'NCHW' and 'NHWC' layout for now"
         if isinstance(kernel_size, numeric_types):
             kernel_size = (kernel_size,)*2
         assert len(kernel_size) == 2, "kernel_size must be a number or a list 
of 2 ints"
@@ -350,9 +353,9 @@ class Conv3D(_Conv):
         layers side by side, each seeing half the input channels, and producing
         half the output channels, and both subsequently concatenated.
     layout : str, default 'NCDHW'
-        Dimension ordering of data and weight. Can be 'NCDHW', 'NDHWC', etc.
-        'N', 'C', 'H', 'W', 'D' stands for batch, channel, height, width and
-        depth dimensions respectively. Convolution is applied on the 'D',
+        Dimension ordering of data and weight. Only supports 'NCDHW' and 
'NDHWC'
+        layout for now. 'N', 'C', 'H', 'W', 'D' stands for batch, channel, 
height,
+        width and depth dimensions respectively. Convolution is applied on the 
'D',
         'H' and 'W' dimensions.
     in_channels : int, default 0
         The number of input channels to this layer. If not specified,
@@ -372,12 +375,12 @@ class Conv3D(_Conv):
 
     Inputs:
         - **data**: 5D input tensor with shape
-          `(batch_size, in_channels, depth, height, width)` when `layout` is 
`NCW`.
+          `(batch_size, in_channels, depth, height, width)` when `layout` is 
`NCDHW`.
           For other layouts shape is permuted accordingly.
 
     Outputs:
         - **out**: 5D output tensor with shape
-          `(batch_size, channels, out_depth, out_height, out_width)` when 
`layout` is `NCW`.
+          `(batch_size, channels, out_depth, out_height, out_width)` when 
`layout` is `NCDHW`.
           out_depth, out_height and out_width are calculated as::
 
               out_depth = 
floor((depth+2*padding[0]-dilation[0]*(kernel_size[0]-1)-1)/stride[0])+1
@@ -388,6 +391,8 @@ def __init__(self, channels, kernel_size, strides=(1, 1, 
1), padding=(0, 0, 0),
                  dilation=(1, 1, 1), groups=1, layout='NCDHW', activation=None,
                  use_bias=True, weight_initializer=None, 
bias_initializer='zeros',
                  in_channels=0, **kwargs):
+        assert layout == 'NCDHW' or layout == 'NDHWC', \
+            "Only supports 'NCDHW' and 'NDHWC' layout for now"
         if isinstance(kernel_size, numeric_types):
             kernel_size = (kernel_size,)*3
         assert len(kernel_size) == 3, "kernel_size must be a number or a list 
of 3 ints"
@@ -431,7 +436,7 @@ class Conv1DTranspose(_Conv):
         layers side by side, each seeing half the input channels, and producing
         half the output channels, and both subsequently concatenated.
     layout : str, default 'NCW'
-        Dimension ordering of data and weight. Can be 'NCW', 'NWC', etc.
+        Dimension ordering of data and weight. Only supports 'NCW' layout for 
now.
         'N', 'C', 'W' stands for batch, channel, and width (time) dimensions
         respectively. Convolution is applied on the 'W' dimension.
     in_channels : int, default 0
@@ -464,6 +469,7 @@ def __init__(self, channels, kernel_size, strides=1, 
padding=0, output_padding=0
                  dilation=1, groups=1, layout='NCW', activation=None, 
use_bias=True,
                  weight_initializer=None, bias_initializer='zeros',
                  in_channels=0, **kwargs):
+        assert layout == 'NCW', "Only supports 'NCW' layout for now"
         if isinstance(kernel_size, numeric_types):
             kernel_size = (kernel_size,)
         if isinstance(output_padding, numeric_types):
@@ -513,9 +519,9 @@ class Conv2DTranspose(_Conv):
         layers side by side, each seeing half the input channels, and producing
         half the output channels, and both subsequently concatenated.
     layout : str, default 'NCHW'
-        Dimension ordering of data and weight. Can be 'NCHW', 'NHWC', etc.
-        'N', 'C', 'H', 'W' stands for batch, channel, height, and width
-        dimensions respectively. Convolution is applied on the 'H' and
+        Dimension ordering of data and weight. Only supports 'NCHW' and 'NHWC'
+        layout for now. 'N', 'C', 'H', 'W' stands for batch, channel, height,
+        and width dimensions respectively. Convolution is applied on the 'H' 
and
         'W' dimensions.
     in_channels : int, default 0
         The number of input channels to this layer. If not specified,
@@ -535,12 +541,12 @@ class Conv2DTranspose(_Conv):
 
     Inputs:
         - **data**: 4D input tensor with shape
-          `(batch_size, in_channels, height, width)` when `layout` is `NCW`.
+          `(batch_size, in_channels, height, width)` when `layout` is `NCHW`.
           For other layouts shape is permuted accordingly.
 
     Outputs:
         - **out**: 4D output tensor with shape
-          `(batch_size, channels, out_height, out_width)` when `layout` is 
`NCW`.
+          `(batch_size, channels, out_height, out_width)` when `layout` is 
`NCHW`.
           out_height and out_width are calculated as::
 
               out_height = 
(height-1)*strides[0]-2*padding[0]+kernel_size[0]+output_padding[0]
@@ -550,6 +556,8 @@ def __init__(self, channels, kernel_size, strides=(1, 1), 
padding=(0, 0),
                  output_padding=(0, 0), dilation=(1, 1), groups=1, 
layout='NCHW',
                  activation=None, use_bias=True, weight_initializer=None,
                  bias_initializer='zeros', in_channels=0, **kwargs):
+        assert layout == 'NCHW' or layout == 'NHWC', \
+            "Only supports 'NCHW' and 'NHWC' layout for now"
         if isinstance(kernel_size, numeric_types):
             kernel_size = (kernel_size,)*2
         if isinstance(output_padding, numeric_types):
@@ -599,10 +607,10 @@ class Conv3DTranspose(_Conv):
         layers side by side, each seeing half the input channels, and producing
         half the output channels, and both subsequently concatenated.
     layout : str, default 'NCDHW'
-        Dimension ordering of data and weight. Can be 'NCDHW', 'NDHWC', etc.
-        'N', 'C', 'H', 'W', 'D' stands for batch, channel, height, width and
-        depth dimensions respectively. Convolution is applied on the 'D',
-        'H', and 'W' dimensions.
+        Dimension ordering of data and weight. Only supports 'NCDHW' and 
'NDHWC'
+        layout for now. 'N', 'C', 'H', 'W', 'D' stands for batch, channel, 
height,
+        width and depth dimensions respectively. Convolution is applied on the 
'D',
+        'H' and 'W' dimensions.
     in_channels : int, default 0
         The number of input channels to this layer. If not specified,
         initialization will be deferred to the first time `forward` is called
@@ -621,12 +629,12 @@ class Conv3DTranspose(_Conv):
 
     Inputs:
         - **data**: 5D input tensor with shape
-          `(batch_size, in_channels, depth, height, width)` when `layout` is 
`NCW`.
+          `(batch_size, in_channels, depth, height, width)` when `layout` is 
`NCDHW`.
           For other layouts shape is permuted accordingly.
 
     Outputs:
         - **out**: 5D output tensor with shape
-          `(batch_size, channels, out_depth, out_height, out_width)` when 
`layout` is `NCW`.
+          `(batch_size, channels, out_depth, out_height, out_width)` when 
`layout` is `NCDHW`.
           out_depth, out_height and out_width are calculated as::
 
             out_depth = 
(depth-1)*strides[0]-2*padding[0]+kernel_size[0]+output_padding[0]
@@ -637,6 +645,8 @@ def __init__(self, channels, kernel_size, strides=(1, 1, 
1), padding=(0, 0, 0),
                  output_padding=(0, 0, 0), dilation=(1, 1, 1), groups=1, 
layout='NCDHW',
                  activation=None, use_bias=True, weight_initializer=None,
                  bias_initializer='zeros', in_channels=0, **kwargs):
+        assert layout == 'NCDHW' or layout == 'NDHWC', \
+            "Only supports 'NCDHW' and 'NDHWC' layout for now"
         if isinstance(kernel_size, numeric_types):
             kernel_size = (kernel_size,)*3
         if isinstance(output_padding, numeric_types):
@@ -694,7 +704,7 @@ class MaxPool1D(_Pooling):
         If padding is non-zero, then the input is implicitly
         zero-padded on both sides for padding number of points.
     layout : str, default 'NCW'
-        Dimension ordering of data and weight. Can be 'NCW', 'NWC', etc.
+        Dimension ordering of data and weight. Only supports 'NCW' layout for 
now.
         'N', 'C', 'W' stands for batch, channel, and width (time) dimensions
         respectively. Pooling is applied on the W dimension.
     ceil_mode : bool, default False
@@ -716,7 +726,7 @@ class MaxPool1D(_Pooling):
     """
     def __init__(self, pool_size=2, strides=None, padding=0, layout='NCW',
                  ceil_mode=False, **kwargs):
-        assert layout == 'NCW', "Only supports NCW layout for now"
+        assert layout == 'NCW', "Only supports 'NCW' layout for now"
         if isinstance(pool_size, numeric_types):
             pool_size = (pool_size,)
         assert len(pool_size) == 1, "pool_size must be a number or a list of 1 
ints"
@@ -739,7 +749,7 @@ class MaxPool2D(_Pooling):
         If padding is non-zero, then the input is implicitly
         zero-padded on both sides for padding number of points.
     layout : str, default 'NCHW'
-        Dimension ordering of data and weight. Can be 'NCHW', 'NHWC', etc.
+        Dimension ordering of data and weight. Only supports 'NCHW' layout for 
now.
         'N', 'C', 'H', 'W' stands for batch, channel, height, and width
         dimensions respectively. padding is applied on 'H' and 'W' dimension.
     ceil_mode : bool, default False
@@ -748,12 +758,12 @@ class MaxPool2D(_Pooling):
 
     Inputs:
         - **data**: 4D input tensor with shape
-          `(batch_size, in_channels, height, width)` when `layout` is `NCW`.
+          `(batch_size, in_channels, height, width)` when `layout` is `NCHW`.
           For other layouts shape is permuted accordingly.
 
     Outputs:
         - **out**: 4D output tensor with shape
-          `(batch_size, channels, out_height, out_width)` when `layout` is 
`NCW`.
+          `(batch_size, channels, out_height, out_width)` when `layout` is 
`NCHW`.
           out_height and out_width are calculated as::
 
               out_height = 
floor((height+2*padding[0]-pool_size[0])/strides[0])+1
@@ -764,7 +774,7 @@ class MaxPool2D(_Pooling):
     """
     def __init__(self, pool_size=(2, 2), strides=None, padding=0, 
layout='NCHW',
                  ceil_mode=False, **kwargs):
-        assert layout == 'NCHW', "Only supports NCHW layout for now"
+        assert layout == 'NCHW', "Only supports 'NCHW' layout for now"
         if isinstance(pool_size, numeric_types):
             pool_size = (pool_size,)*2
         assert len(pool_size) == 2, "pool_size must be a number or a list of 2 
ints"
@@ -787,7 +797,7 @@ class MaxPool3D(_Pooling):
         If padding is non-zero, then the input is implicitly
         zero-padded on both sides for padding number of points.
     layout : str, default 'NCDHW'
-        Dimension ordering of data and weight. Can be 'NCDHW', 'NDHWC', etc.
+        Dimension ordering of data and weight. Only supports 'NCDHW' layout 
for now.
         'N', 'C', 'H', 'W', 'D' stands for batch, channel, height, width and
         depth dimensions respectively. padding is applied on 'D', 'H' and 'W'
         dimension.
@@ -802,7 +812,7 @@ class MaxPool3D(_Pooling):
 
     Outputs:
         - **out**: 5D output tensor with shape
-          `(batch_size, channels, out_depth, out_height, out_width)` when 
`layout` is `NCW`.
+          `(batch_size, channels, out_depth, out_height, out_width)` when 
`layout` is `NCDHW`.
           out_depth, out_height and out_width are calculated as::
 
               out_depth = floor((depth+2*padding[0]-pool_size[0])/strides[0])+1
@@ -814,7 +824,7 @@ class MaxPool3D(_Pooling):
     """
     def __init__(self, pool_size=(2, 2, 2), strides=None, padding=0,
                  ceil_mode=False, layout='NCDHW', **kwargs):
-        assert layout == 'NCDHW', "Only supports NCDHW layout for now"
+        assert layout == 'NCDHW', "Only supports 'NCDHW' layout for now"
         if isinstance(pool_size, numeric_types):
             pool_size = (pool_size,)*3
         assert len(pool_size) == 3, "pool_size must be a number or a list of 3 
ints"
@@ -836,7 +846,7 @@ class AvgPool1D(_Pooling):
         If padding is non-zero, then the input is implicitly
         zero-padded on both sides for padding number of points.
     layout : str, default 'NCW'
-        Dimension ordering of data and weight. Can be 'NCW', 'NWC', etc.
+        Dimension ordering of data and weight. Only supports 'NCW' layout for 
now.
         'N', 'C', 'W' stands for batch, channel, and width (time) dimensions
         respectively. padding is applied on 'W' dimension.
     ceil_mode : bool, default False
@@ -858,7 +868,7 @@ class AvgPool1D(_Pooling):
     """
     def __init__(self, pool_size=2, strides=None, padding=0, layout='NCW',
                  ceil_mode=False, **kwargs):
-        assert layout == 'NCW', "Only supports NCW layout for now"
+        assert layout == 'NCW', "Only supports 'NCW' layout for now"
         if isinstance(pool_size, numeric_types):
             pool_size = (pool_size,)
         assert len(pool_size) == 1, "pool_size must be a number or a list of 1 
ints"
@@ -880,7 +890,7 @@ class AvgPool2D(_Pooling):
         If padding is non-zero, then the input is implicitly
         zero-padded on both sides for padding number of points.
     layout : str, default 'NCHW'
-        Dimension ordering of data and weight. Can be 'NCHW', 'NHWC', etc.
+        Dimension ordering of data and weight. Only supports 'NCHW' layout for 
now.
         'N', 'C', 'H', 'W' stands for batch, channel, height, and width
         dimensions respectively. padding is applied on 'H' and 'W' dimension.
     ceil_mode : bool, default False
@@ -889,12 +899,12 @@ class AvgPool2D(_Pooling):
 
     Inputs:
         - **data**: 4D input tensor with shape
-          `(batch_size, in_channels, height, width)` when `layout` is `NCW`.
+          `(batch_size, in_channels, height, width)` when `layout` is `NCHW`.
           For other layouts shape is permuted accordingly.
 
     Outputs:
         - **out**: 4D output tensor with shape
-          `(batch_size, channels, out_height, out_width)` when `layout` is 
`NCW`.
+          `(batch_size, channels, out_height, out_width)` when `layout` is 
`NCHW`.
           out_height and out_width are calculated as::
 
               out_height = 
floor((height+2*padding[0]-pool_size[0])/strides[0])+1
@@ -905,7 +915,7 @@ class AvgPool2D(_Pooling):
     """
     def __init__(self, pool_size=(2, 2), strides=None, padding=0,
                  ceil_mode=False, layout='NCHW', **kwargs):
-        assert layout == 'NCHW', "Only supports NCHW layout for now"
+        assert layout == 'NCHW', "Only supports 'NCHW' layout for now"
         if isinstance(pool_size, numeric_types):
             pool_size = (pool_size,)*2
         assert len(pool_size) == 2, "pool_size must be a number or a list of 2 
ints"
@@ -937,12 +947,12 @@ class AvgPool3D(_Pooling):
 
     Inputs:
         - **data**: 5D input tensor with shape
-          `(batch_size, in_channels, depth, height, width)` when `layout` is 
`NCW`.
+          `(batch_size, in_channels, depth, height, width)` when `layout` is 
`NCDHW`.
           For other layouts shape is permuted accordingly.
 
     Outputs:
         - **out**: 5D output tensor with shape
-          `(batch_size, channels, out_depth, out_height, out_width)` when 
`layout` is `NCW`.
+          `(batch_size, channels, out_depth, out_height, out_width)` when 
`layout` is `NCDHW`.
           out_depth, out_height and out_width are calculated as::
 
               out_depth = floor((depth+2*padding[0]-pool_size[0])/strides[0])+1
@@ -954,7 +964,7 @@ class AvgPool3D(_Pooling):
     """
     def __init__(self, pool_size=(2, 2, 2), strides=None, padding=0,
                  ceil_mode=False, layout='NCDHW', **kwargs):
-        assert layout == 'NCDHW', "Only supports NCDHW layout for now"
+        assert layout == 'NCDHW', "Only supports 'NCDHW' layout for now"
         if isinstance(pool_size, numeric_types):
             pool_size = (pool_size,)*3
         assert len(pool_size) == 3, "pool_size must be a number or a list of 3 
ints"
@@ -965,7 +975,7 @@ def __init__(self, pool_size=(2, 2, 2), strides=None, 
padding=0,
 class GlobalMaxPool1D(_Pooling):
     """Global max pooling operation for temporal data."""
     def __init__(self, layout='NCW', **kwargs):
-        assert layout == 'NCW', "Only supports NCW layout for now"
+        assert layout == 'NCW', "Only supports 'NCW' layout for now"
         super(GlobalMaxPool1D, self).__init__(
             (1,), None, 0, True, True, 'max', **kwargs)
 
@@ -973,7 +983,7 @@ def __init__(self, layout='NCW', **kwargs):
 class GlobalMaxPool2D(_Pooling):
     """Global max pooling operation for spatial data."""
     def __init__(self, layout='NCHW', **kwargs):
-        assert layout == 'NCHW', "Only supports NCHW layout for now"
+        assert layout == 'NCHW', "Only supports 'NCHW' layout for now"
         super(GlobalMaxPool2D, self).__init__(
             (1, 1), None, 0, True, True, 'max', **kwargs)
 
@@ -981,7 +991,7 @@ def __init__(self, layout='NCHW', **kwargs):
 class GlobalMaxPool3D(_Pooling):
     """Global max pooling operation for 3D data."""
     def __init__(self, layout='NCDHW', **kwargs):
-        assert layout == 'NCDHW', "Only supports NCDHW layout for now"
+        assert layout == 'NCDHW', "Only supports 'NCDHW' layout for now"
         super(GlobalMaxPool3D, self).__init__(
             (1, 1, 1), None, 0, True, True, 'max', **kwargs)
 
@@ -989,7 +999,7 @@ def __init__(self, layout='NCDHW', **kwargs):
 class GlobalAvgPool1D(_Pooling):
     """Global average pooling operation for temporal data."""
     def __init__(self, layout='NCW', **kwargs):
-        assert layout == 'NCW', "Only supports NCW layout for now"
+        assert layout == 'NCW', "Only supports 'NCW' layout for now"
         super(GlobalAvgPool1D, self).__init__(
             (1,), None, 0, True, True, 'avg', **kwargs)
 
@@ -997,7 +1007,7 @@ def __init__(self, layout='NCW', **kwargs):
 class GlobalAvgPool2D(_Pooling):
     """Global average pooling operation for spatial data."""
     def __init__(self, layout='NCHW', **kwargs):
-        assert layout == 'NCHW', "Only supports NCHW layout for now"
+        assert layout == 'NCHW', "Only supports 'NCHW' layout for now"
         super(GlobalAvgPool2D, self).__init__(
             (1, 1), None, 0, True, True, 'avg', **kwargs)
 
@@ -1005,7 +1015,7 @@ def __init__(self, layout='NCHW', **kwargs):
 class GlobalAvgPool3D(_Pooling):
     """Global max pooling operation for 3D data."""
     def __init__(self, layout='NCDHW', **kwargs):
-        assert layout == 'NCDHW', "Only supports NCDHW layout for now"
+        assert layout == 'NCDHW', "Only supports 'NCDHW' layout for now"
         super(GlobalAvgPool3D, self).__init__(
             (1, 1, 1), None, 0, True, True, 'avg', **kwargs)
 


 

----------------------------------------------------------------
This is an automated message from the Apache Git Service.
To respond to the message, please log on GitHub and use the
URL above to go to the specific comment.
 
For queries about this service, please contact Infrastructure at:
us...@infra.apache.org


With regards,
Apache Git Services

Reply via email to