[GitHub] sxjscience commented on a change in pull request #10000: fix average pooling kernel size assignment error

2018-03-12 Thread GitBox
sxjscience commented on a change in pull request #1: fix average pooling 
kernel size assignment error
URL: https://github.com/apache/incubator-mxnet/pull/1#discussion_r173892671
 
 

 ##
 File path: src/operator/nn/pooling-inl.h
 ##
 @@ -56,11 +56,11 @@ struct PoolingParam : public dmlc::Parameter 
{
 DMLC_DECLARE_FIELD(cudnn_off).set_default(false)
 .describe("Turn off cudnn pooling and use MXNet pooling operator. ");
 
-DMLC_DECLARE_FIELD(kernel)
+DMLC_DECLARE_FIELD(kernel).set_default(TShape())  // add default value here
 .enforce_nonzero()
 .describe("Pooling kernel size: (y, x) or (d, y, x)");
 
-DMLC_DECLARE_FIELD(pool_type)
+DMLC_DECLARE_FIELD(pool_type).set_default(pool_enum::kMaxPooling)  // add 
default pooling method
 .add_enum("max", pool_enum::kMaxPooling)
 .add_enum("avg", pool_enum::kAvgPooling)
 .add_enum("sum", pool_enum::kSumPooling)
 
 Review comment:
   I realized that we need to change the order of the DMLC_DECLARE_FIELD here. 
In the original version, the parameters that do not have default values will be 
set first and then goes the params with default values. So the order will be 
`kernel`, `pool_type`, `global_pool`, `cudnn_off`, ... After we add default 
values to `kernel` and `pool_type`, the order becomes `global_pool`, 
`cudnn_off`, `kernel`, `pool_type`, ... Thus, the way to solve the problem is 
to change the order:
   ```c++
   DMLC_DECLARE_FIELD(kernel).set_default(TShape())  // add default value 
here
   .enforce_nonzero()
   .describe("Pooling kernel size: (y, x) or (d, y, x)");
   
   DMLC_DECLARE_FIELD(pool_type).set_default(pool_enum::kMaxPooling)  // 
add default pooling method
   .add_enum("max", pool_enum::kMaxPooling)
   .add_enum("avg", pool_enum::kAvgPooling)
   .add_enum("sum", pool_enum::kSumPooling)
   .describe("Pooling type to be applied.");
   
   DMLC_DECLARE_FIELD(global_pool).set_default(false)
   .describe("Ignore kernel size, do global pooling based on current input 
feature map. ");
   
   DMLC_DECLARE_FIELD(cudnn_off).set_default(false)
   .describe("Turn off cudnn pooling and use MXNet pooling operator. ");
   ```


This is an automated message from the Apache Git Service.
To respond to the message, please log on GitHub and use the
URL above to go to the specific comment.
 
For queries about this service, please contact Infrastructure at:
us...@infra.apache.org


With regards,
Apache Git Services


[GitHub] sxjscience commented on a change in pull request #10000: fix average pooling kernel size assignment error

2018-03-07 Thread GitBox
sxjscience commented on a change in pull request #1: fix average pooling 
kernel size assignment error
URL: https://github.com/apache/incubator-mxnet/pull/1#discussion_r173062611
 
 

 ##
 File path: tests/python/gpu/test_operator_gpu.py
 ##
 @@ -904,86 +904,87 @@ def test_1d_pooling(pool_type):
 kernel = (4,)
 pad = (2,)
 stride = (2,)
-
+
 ctx_list = []
 sym_list = []
-
+
 pooling_convention = 'valid'
-
+
 ctx_list.append({'ctx': mx.cpu(0), 'pool_data': data, 'type_dict': 
{'pool_data': np.float32}})
-sym_list.append(mx.sym.Pooling(kernel=kernel, pad=pad, stride=stride, 
pool_type=pool_type,
+sym_list.append(mx.sym.Pooling(pad=pad, stride=stride, 
pool_type=pool_type,
pooling_convention=pooling_convention, 
global_pool=True, name='pool'))
-
+
 ctx_list.append({'ctx': mx.cpu(0), 'pool_data': data, 'type_dict': 
{'pool_data': np.float32}})
-sym_list.append(mx.sym.Pooling(kernel=kernel, pool_type=pool_type,
+sym_list.append(mx.sym.Pooling(pool_type=pool_type,
pooling_convention=pooling_convention, 
global_pool=True, name='pool'))
-
+
 ctx_list.append({'ctx': mx.gpu(0), 'pool_data': data, 'type_dict': 
{'pool_data': np.float32}})
-sym_list.append(mx.sym.Pooling(kernel=kernel, pad=pad, stride=stride, 
pool_type=pool_type,
+sym_list.append(mx.sym.Pooling(pad=pad, stride=stride, 
pool_type=pool_type,
pooling_convention=pooling_convention, 
global_pool=True, cudnn_off=False, name='pool'))
-
+
 ctx_list.append({'ctx': mx.gpu(0), 'pool_data': data, 'type_dict': 
{'pool_data': np.float32}})
-sym_list.append(mx.sym.Pooling(kernel=kernel, pool_type=pool_type,
+sym_list.append(mx.sym.Pooling(pool_type=pool_type,
pooling_convention=pooling_convention, 
global_pool=True, cudnn_off=False, name='pool'))
-
+
 ctx_list.append({'ctx': mx.gpu(0), 'pool_data': data, 'type_dict': 
{'pool_data': np.float32}})
-sym_list.append(mx.sym.Pooling(kernel=kernel, pad=pad, stride=stride, 
pool_type=pool_type,
+sym_list.append(mx.sym.Pooling(pad=pad, stride=stride, 
pool_type=pool_type,
pooling_convention=pooling_convention, 
global_pool=True, cudnn_off=True, name='pool'))
-
+
 ctx_list.append({'ctx': mx.gpu(0), 'pool_data': data, 'type_dict': 
{'pool_data': np.float32}})
-sym_list.append(mx.sym.Pooling(kernel=kernel, pool_type=pool_type,
+sym_list.append(mx.sym.Pooling(pool_type=pool_type,
pooling_convention=pooling_convention, 
global_pool=True, cudnn_off=True, name='pool'))
-
+
 check_consistency(sym_list, ctx_list)
-
+
 def test_2d_pooling(pool_type):
 data = (2, 3, 20, 20)
 kernel = (4, 4)
 pad = (2, 2)
 stride = (2, 2)
-
+
 ctx_list = []
 sym_list = []
-
+
 pooling_convention = 'valid'
-
+
 ctx_list.append({'ctx': mx.cpu(0), 'pool_data': data, 'type_dict': 
{'pool_data': np.float32}})
 sym_list.append(mx.sym.Pooling_v1(kernel=kernel, pad=pad, 
stride=stride, pool_type=pool_type,
 
 Review comment:
   Sounds good.


This is an automated message from the Apache Git Service.
To respond to the message, please log on GitHub and use the
URL above to go to the specific comment.
 
For queries about this service, please contact Infrastructure at:
us...@infra.apache.org


With regards,
Apache Git Services


[GitHub] sxjscience commented on a change in pull request #10000: fix average pooling kernel size assignment error

2018-03-07 Thread GitBox
sxjscience commented on a change in pull request #1: fix average pooling 
kernel size assignment error
URL: https://github.com/apache/incubator-mxnet/pull/1#discussion_r173059994
 
 

 ##
 File path: tests/python/gpu/test_operator_gpu.py
 ##
 @@ -904,86 +904,87 @@ def test_1d_pooling(pool_type):
 kernel = (4,)
 pad = (2,)
 stride = (2,)
-
+
 ctx_list = []
 sym_list = []
-
+
 pooling_convention = 'valid'
-
+
 ctx_list.append({'ctx': mx.cpu(0), 'pool_data': data, 'type_dict': 
{'pool_data': np.float32}})
-sym_list.append(mx.sym.Pooling(kernel=kernel, pad=pad, stride=stride, 
pool_type=pool_type,
+sym_list.append(mx.sym.Pooling(pad=pad, stride=stride, 
pool_type=pool_type,
pooling_convention=pooling_convention, 
global_pool=True, name='pool'))
-
+
 ctx_list.append({'ctx': mx.cpu(0), 'pool_data': data, 'type_dict': 
{'pool_data': np.float32}})
-sym_list.append(mx.sym.Pooling(kernel=kernel, pool_type=pool_type,
+sym_list.append(mx.sym.Pooling(pool_type=pool_type,
pooling_convention=pooling_convention, 
global_pool=True, name='pool'))
-
+
 ctx_list.append({'ctx': mx.gpu(0), 'pool_data': data, 'type_dict': 
{'pool_data': np.float32}})
-sym_list.append(mx.sym.Pooling(kernel=kernel, pad=pad, stride=stride, 
pool_type=pool_type,
+sym_list.append(mx.sym.Pooling(pad=pad, stride=stride, 
pool_type=pool_type,
pooling_convention=pooling_convention, 
global_pool=True, cudnn_off=False, name='pool'))
-
+
 ctx_list.append({'ctx': mx.gpu(0), 'pool_data': data, 'type_dict': 
{'pool_data': np.float32}})
-sym_list.append(mx.sym.Pooling(kernel=kernel, pool_type=pool_type,
+sym_list.append(mx.sym.Pooling(pool_type=pool_type,
pooling_convention=pooling_convention, 
global_pool=True, cudnn_off=False, name='pool'))
-
+
 ctx_list.append({'ctx': mx.gpu(0), 'pool_data': data, 'type_dict': 
{'pool_data': np.float32}})
-sym_list.append(mx.sym.Pooling(kernel=kernel, pad=pad, stride=stride, 
pool_type=pool_type,
+sym_list.append(mx.sym.Pooling(pad=pad, stride=stride, 
pool_type=pool_type,
pooling_convention=pooling_convention, 
global_pool=True, cudnn_off=True, name='pool'))
-
+
 ctx_list.append({'ctx': mx.gpu(0), 'pool_data': data, 'type_dict': 
{'pool_data': np.float32}})
-sym_list.append(mx.sym.Pooling(kernel=kernel, pool_type=pool_type,
+sym_list.append(mx.sym.Pooling(pool_type=pool_type,
pooling_convention=pooling_convention, 
global_pool=True, cudnn_off=True, name='pool'))
-
+
 check_consistency(sym_list, ctx_list)
-
+
 def test_2d_pooling(pool_type):
 data = (2, 3, 20, 20)
 kernel = (4, 4)
 pad = (2, 2)
 stride = (2, 2)
-
+
 ctx_list = []
 sym_list = []
-
+
 pooling_convention = 'valid'
-
+
 ctx_list.append({'ctx': mx.cpu(0), 'pool_data': data, 'type_dict': 
{'pool_data': np.float32}})
 sym_list.append(mx.sym.Pooling_v1(kernel=kernel, pad=pad, 
stride=stride, pool_type=pool_type,
pooling_convention=pooling_convention, 
global_pool=True, name='pool'))
-
+
 ctx_list.append({'ctx': mx.cpu(0), 'pool_data': data, 'type_dict': 
{'pool_data': np.float32}})
 sym_list.append(mx.sym.Pooling_v1(kernel=kernel, pool_type=pool_type,
pooling_convention=pooling_convention, 
global_pool=True, name='pool'))
 
 Review comment:
   You can do it if you have time. It should be due to the difference between 
windows and unix.


This is an automated message from the Apache Git Service.
To respond to the message, please log on GitHub and use the
URL above to go to the specific comment.
 
For queries about this service, please contact Infrastructure at:
us...@infra.apache.org


With regards,
Apache Git Services


[GitHub] sxjscience commented on a change in pull request #10000: fix average pooling kernel size assignment error

2018-03-06 Thread GitBox
sxjscience commented on a change in pull request #1: fix average pooling 
kernel size assignment error
URL: https://github.com/apache/incubator-mxnet/pull/1#discussion_r172729940
 
 

 ##
 File path: tests/python/gpu/test_operator_gpu.py
 ##
 @@ -904,86 +904,87 @@ def test_1d_pooling(pool_type):
 kernel = (4,)
 pad = (2,)
 stride = (2,)
-
+
 ctx_list = []
 sym_list = []
-
+
 pooling_convention = 'valid'
-
+
 ctx_list.append({'ctx': mx.cpu(0), 'pool_data': data, 'type_dict': 
{'pool_data': np.float32}})
-sym_list.append(mx.sym.Pooling(kernel=kernel, pad=pad, stride=stride, 
pool_type=pool_type,
+sym_list.append(mx.sym.Pooling(pad=pad, stride=stride, 
pool_type=pool_type,
pooling_convention=pooling_convention, 
global_pool=True, name='pool'))
-
+
 ctx_list.append({'ctx': mx.cpu(0), 'pool_data': data, 'type_dict': 
{'pool_data': np.float32}})
-sym_list.append(mx.sym.Pooling(kernel=kernel, pool_type=pool_type,
+sym_list.append(mx.sym.Pooling(pool_type=pool_type,
pooling_convention=pooling_convention, 
global_pool=True, name='pool'))
-
+
 ctx_list.append({'ctx': mx.gpu(0), 'pool_data': data, 'type_dict': 
{'pool_data': np.float32}})
-sym_list.append(mx.sym.Pooling(kernel=kernel, pad=pad, stride=stride, 
pool_type=pool_type,
+sym_list.append(mx.sym.Pooling(pad=pad, stride=stride, 
pool_type=pool_type,
pooling_convention=pooling_convention, 
global_pool=True, cudnn_off=False, name='pool'))
-
+
 ctx_list.append({'ctx': mx.gpu(0), 'pool_data': data, 'type_dict': 
{'pool_data': np.float32}})
-sym_list.append(mx.sym.Pooling(kernel=kernel, pool_type=pool_type,
+sym_list.append(mx.sym.Pooling(pool_type=pool_type,
pooling_convention=pooling_convention, 
global_pool=True, cudnn_off=False, name='pool'))
-
+
 ctx_list.append({'ctx': mx.gpu(0), 'pool_data': data, 'type_dict': 
{'pool_data': np.float32}})
-sym_list.append(mx.sym.Pooling(kernel=kernel, pad=pad, stride=stride, 
pool_type=pool_type,
+sym_list.append(mx.sym.Pooling(pad=pad, stride=stride, 
pool_type=pool_type,
pooling_convention=pooling_convention, 
global_pool=True, cudnn_off=True, name='pool'))
-
+
 ctx_list.append({'ctx': mx.gpu(0), 'pool_data': data, 'type_dict': 
{'pool_data': np.float32}})
-sym_list.append(mx.sym.Pooling(kernel=kernel, pool_type=pool_type,
+sym_list.append(mx.sym.Pooling(pool_type=pool_type,
pooling_convention=pooling_convention, 
global_pool=True, cudnn_off=True, name='pool'))
-
+
 check_consistency(sym_list, ctx_list)
-
+
 def test_2d_pooling(pool_type):
 data = (2, 3, 20, 20)
 kernel = (4, 4)
 pad = (2, 2)
 stride = (2, 2)
-
+
 ctx_list = []
 sym_list = []
-
+
 pooling_convention = 'valid'
-
+
 ctx_list.append({'ctx': mx.cpu(0), 'pool_data': data, 'type_dict': 
{'pool_data': np.float32}})
 sym_list.append(mx.sym.Pooling_v1(kernel=kernel, pad=pad, 
stride=stride, pool_type=pool_type,
 
 Review comment:
   I see. We should then use add back one test case that uses the kernel 
argument.


This is an automated message from the Apache Git Service.
To respond to the message, please log on GitHub and use the
URL above to go to the specific comment.
 
For queries about this service, please contact Infrastructure at:
us...@infra.apache.org


With regards,
Apache Git Services


[GitHub] sxjscience commented on a change in pull request #10000: fix average pooling kernel size assignment error

2018-03-06 Thread GitBox
sxjscience commented on a change in pull request #1: fix average pooling 
kernel size assignment error
URL: https://github.com/apache/incubator-mxnet/pull/1#discussion_r172725993
 
 

 ##
 File path: tests/python/gpu/test_operator_gpu.py
 ##
 @@ -904,86 +904,87 @@ def test_1d_pooling(pool_type):
 kernel = (4,)
 pad = (2,)
 stride = (2,)
-
+
 ctx_list = []
 sym_list = []
-
+
 pooling_convention = 'valid'
-
+
 ctx_list.append({'ctx': mx.cpu(0), 'pool_data': data, 'type_dict': 
{'pool_data': np.float32}})
-sym_list.append(mx.sym.Pooling(kernel=kernel, pad=pad, stride=stride, 
pool_type=pool_type,
+sym_list.append(mx.sym.Pooling(pad=pad, stride=stride, 
pool_type=pool_type,
pooling_convention=pooling_convention, 
global_pool=True, name='pool'))
-
+
 ctx_list.append({'ctx': mx.cpu(0), 'pool_data': data, 'type_dict': 
{'pool_data': np.float32}})
-sym_list.append(mx.sym.Pooling(kernel=kernel, pool_type=pool_type,
+sym_list.append(mx.sym.Pooling(pool_type=pool_type,
pooling_convention=pooling_convention, 
global_pool=True, name='pool'))
-
+
 ctx_list.append({'ctx': mx.gpu(0), 'pool_data': data, 'type_dict': 
{'pool_data': np.float32}})
-sym_list.append(mx.sym.Pooling(kernel=kernel, pad=pad, stride=stride, 
pool_type=pool_type,
+sym_list.append(mx.sym.Pooling(pad=pad, stride=stride, 
pool_type=pool_type,
pooling_convention=pooling_convention, 
global_pool=True, cudnn_off=False, name='pool'))
-
+
 ctx_list.append({'ctx': mx.gpu(0), 'pool_data': data, 'type_dict': 
{'pool_data': np.float32}})
-sym_list.append(mx.sym.Pooling(kernel=kernel, pool_type=pool_type,
+sym_list.append(mx.sym.Pooling(pool_type=pool_type,
pooling_convention=pooling_convention, 
global_pool=True, cudnn_off=False, name='pool'))
-
+
 ctx_list.append({'ctx': mx.gpu(0), 'pool_data': data, 'type_dict': 
{'pool_data': np.float32}})
-sym_list.append(mx.sym.Pooling(kernel=kernel, pad=pad, stride=stride, 
pool_type=pool_type,
+sym_list.append(mx.sym.Pooling(pad=pad, stride=stride, 
pool_type=pool_type,
pooling_convention=pooling_convention, 
global_pool=True, cudnn_off=True, name='pool'))
-
+
 ctx_list.append({'ctx': mx.gpu(0), 'pool_data': data, 'type_dict': 
{'pool_data': np.float32}})
-sym_list.append(mx.sym.Pooling(kernel=kernel, pool_type=pool_type,
+sym_list.append(mx.sym.Pooling(pool_type=pool_type,
pooling_convention=pooling_convention, 
global_pool=True, cudnn_off=True, name='pool'))
-
+
 check_consistency(sym_list, ctx_list)
-
+
 def test_2d_pooling(pool_type):
 data = (2, 3, 20, 20)
 kernel = (4, 4)
 pad = (2, 2)
 stride = (2, 2)
-
+
 ctx_list = []
 sym_list = []
-
+
 pooling_convention = 'valid'
-
+
 ctx_list.append({'ctx': mx.cpu(0), 'pool_data': data, 'type_dict': 
{'pool_data': np.float32}})
 sym_list.append(mx.sym.Pooling_v1(kernel=kernel, pad=pad, 
stride=stride, pool_type=pool_type,
 
 Review comment:
   But `kernel` is not used if global_pool=True.


This is an automated message from the Apache Git Service.
To respond to the message, please log on GitHub and use the
URL above to go to the specific comment.
 
For queries about this service, please contact Infrastructure at:
us...@infra.apache.org


With regards,
Apache Git Services


[GitHub] sxjscience commented on a change in pull request #10000: fix average pooling kernel size assignment error

2018-03-05 Thread GitBox
sxjscience commented on a change in pull request #1: fix average pooling 
kernel size assignment error
URL: https://github.com/apache/incubator-mxnet/pull/1#discussion_r172421833
 
 

 ##
 File path: src/operator/nn/pooling-inl.h
 ##
 @@ -178,6 +178,11 @@ class PoolingOp {
 template
 PoolingOp (const PoolingParam ) {
   static thread_local PoolingOp op;
+  // check if filter size assigned correctly
+  if (param.global_pool == false) {
+CHECK_GT(param.kernel.ndim(), 0U)
+<< "A positive number must be assigned as filter size";
 
 Review comment:
   I think we can use "You need to set the kernel size if global pooling is not 
used".


This is an automated message from the Apache Git Service.
To respond to the message, please log on GitHub and use the
URL above to go to the specific comment.
 
For queries about this service, please contact Infrastructure at:
us...@infra.apache.org


With regards,
Apache Git Services


[GitHub] sxjscience commented on a change in pull request #10000: fix average pooling kernel size assignment error

2018-03-05 Thread GitBox
sxjscience commented on a change in pull request #1: fix average pooling 
kernel size assignment error
URL: https://github.com/apache/incubator-mxnet/pull/1#discussion_r172384242
 
 

 ##
 File path: src/operator/nn/pooling-inl.h
 ##
 @@ -178,6 +178,11 @@ class PoolingOp {
 template
 PoolingOp (const PoolingParam ) {
   static thread_local PoolingOp op;
+  // check if filter size assigned correctly
+  if (param.global_pool == false) {
+CHECK_GT(param.kernel.ndim(), 0U)
+<< "A positive number must be assigned as filter size";
 
 Review comment:
   Need a better error message.


This is an automated message from the Apache Git Service.
To respond to the message, please log on GitHub and use the
URL above to go to the specific comment.
 
For queries about this service, please contact Infrastructure at:
us...@infra.apache.org


With regards,
Apache Git Services